input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# # convert the distance to cm
# distance_mouse = distance_mouse*(np.abs(ref_corners[0][1] -
# ref_corners[1][1])/np.abs(corners[0][0] - corners[2][0]))
# # allocate an empty array
# distance_new = np.zeros_like(distance_mouse)
# # find the first number
# first_number = distance_mouse[np.isnan(distance_mouse) == False][0]
# first_idx = np.argwhere(distance_mouse == first_number)[0][0]
# distance_new[:first_idx] = first_number
# # fill in the nan gaps with the latest distance
# for idx, el in enumerate(distance_mouse):
# if np.isnan(el):
# distance_new[idx] = first_number
# else:
# distance_new[idx] = el
# first_number = el
# # overwrite the distance array
# distance_mouse = distance_new
# identify the points that are within the threshold and are missing
# target_points = np.argwhere((distance_mouse < threshold) & np.isnan(cricket_coordinates['cricket_0_x'])).flatten()
target_points = np.argwhere(np.all(
np.isnan(cricket_coordinates.loc[:, cricket_columns]), axis=1) &
np.all(~np.isnan(
data_in.loc[:, ['mouse_x', 'mouse_y', 'mouse_head_x', 'mouse_head_y']]), axis=1)).flatten()
# if target points is empty, skip
if target_points.shape[0] > 0:
# assign the position of the mouse to those points
data_out.loc[target_points, ['cricket_0_x', 'cricket_0_y']] = \
data_in.loc[target_points, ['mouse_x', 'mouse_y']].to_numpy()
data_out.loc[target_points, ['cricket_0_head_x', 'cricket_0_head_y']] = \
data_in.loc[target_points, ['mouse_head_x', 'mouse_head_y']].to_numpy()
return data_out
def interpolate_segments(files, target_value):
"""Interpolate between the NaNs in a trace"""
# allocate memory for the output
interpolated_traces = files.copy()
# for all the columns
for col in np.arange(files.shape[1]):
# get the target trace
original_trace = files.iloc[:, col].to_numpy()
# if the target is nan then search for nans, otherwise search for the target value
if np.isnan(target_value):
# check if the target value is present, otherwise skip
if np.sum(np.isnan(original_trace)) == 0:
interpolated_traces.iloc[:, col] = original_trace
continue
x_known = np.squeeze(np.argwhere(~np.isnan(original_trace)))
else:
# check if the target value is present, otherwise skip
if np.sum(original_trace == target_value) == 0:
interpolated_traces.iloc[:, col] = original_trace
continue
x_known = np.squeeze(np.argwhere(original_trace != target_value))
# generate the x vectors as ranges
x_target = np.arange(original_trace.shape[0])
# get the known y vector
y_known = np.expand_dims(original_trace[x_known], 1)
# run the interpolation
interpolated_traces.iloc[:, col] = np.squeeze(interp_trace(y_known, x_known, x_target))
return interpolated_traces
def interpolate_animals(files, target_values):
"""Interpolate the mouse NaNs and prolong the cricket ones"""
# extract the mouse coordinates
mouse_columns = [el for el in files.columns if 'mouse' in el]
mouse_coordinates = files[mouse_columns]
# # interpolate with the usual method
# mouse_interpolated = interpolate_segments(mouse_coordinates, target_values)
# fill in the missing values in the skeleton
#
# get the cricket coordinates
cricket_columns = [el for el in files.columns if 'cricket' in el]
cricket_coordinates = files[cricket_columns]
# copy the data
cricket_interpolated = cricket_coordinates.copy()
# for all the columns
for col in cricket_coordinates.columns:
# get the data
data = cricket_coordinates[col].to_numpy()
# find the target value
if np.isnan(target_values):
nan_locations, nan_numbers = label(np.isnan(data))
else:
nan_locations, nan_numbers = label(data == target_values)
# for all the segments
for segment in np.arange(1, nan_numbers+1):
# get the start of the segment
segment_start = np.argwhere(nan_locations == segment).flatten()[0]
# replace the segment by the values
data[nan_locations == segment] = data[segment_start-1]
# add to the output frame
cricket_interpolated.loc[:, col] = data
return pd.concat([mouse_interpolated, cricket_interpolated], axis=1)
def eliminate_singles(files):
"""Eliminate points from each column that have no neighbors"""
# allocate memory for the output
filtered_traces = files.copy()
# for all the columns
for col in np.arange(files.shape[1]):
# get the target trace
original_trace = files.iloc[:, col]
# find the derivative of the nan trace
nan_positions = np.diff(np.isnan(original_trace).astype(np.int32), n=2)
# find the coordinates of the singles
single_positions = np.argwhere(nan_positions == 2) + 1
# single_positions = np.argwhere((nan_positions[:-1] == 1) & (nan_positions[1:] == -1))
# nan the singles
filtered_traces.iloc[single_positions, col] = np.nan
return filtered_traces
def nan_large_jumps(files, tar_columns, max_step, max_length):
"""NaN discontinuities in the trace (for later interpolation)"""
# allocate memory for the output
corrected_trace = files.copy()
# for the mouse and the cricket columns
for animal in tar_columns:
# for idx, col in enumerate(animal):
# get the data
curr_data = files[animal].copy()
# take the derivative trace
result = np.diff(curr_data[:])
result = np.hstack((0, result))
# find the places of threshold crossing
jumps = np.argwhere(np.abs(result) > max_step)
# get the distance between jumps
distance_between = np.diff(jumps, axis=0)
# go through each of the jumps
for index, jump in enumerate(distance_between):
# if the jump is smaller than the max_length allowed, NaN it (if bigger, that's a larger error in tracing
# than can be fixed with just interpolation)
if jump[0] < max_length:
curr_data[jumps[index, 0]:jumps[index+1, 0]] = np.nan
# ends = np.argwhere(result < -max_step)
# # if they're empty, skip the iteration
# if (starts.shape[0] == 0) | (ends.shape[0] == 0):
# continue
# else:
# starts = starts[:, 0]
# ends = ends[:, 0]
#
# # match their sizes and order
# if starts[0] > ends[0]:
# ends = ends[1:]
# if ends.shape[0] == 0:
# continue
# if starts[-1] > ends[-1]:
# starts = starts[:-1]
# # NaN the in-betweens
# # for all the starts
# for start, end in zip(starts, ends):
# curr_data[start:end] = np.nan
corrected_trace[animal] = curr_data
return corrected_trace
def find_frozen_tracking(files, margin=0.5, stretch_length=10):
"""Find places where the trajectory is too steady (i.e. no single pixel movement) and NaN them since it's probably
not actually tracking"""
# create a copy of the data
corrected_trace = files.copy()
# get the column names
column_names = corrected_trace.columns
# run through the columns
for column in column_names:
# skip the index column
if column == 'index':
continue
# get the derivative of the traces
delta_trace = abs(np.diff(corrected_trace[column], axis=0))
# find the places that don't pass the criterion
no_movement, number_no_movement = label(delta_trace < margin)
# add a zero at the beginning to match the size of corrected traces
no_movement = np.hstack([0, no_movement])
# go through the jumps
for jumps in np.arange(1, number_no_movement):
# if the jump passes the criterion
if np.sum(no_movement == jumps) >= stretch_length:
# nan them
corrected_trace.loc[no_movement == jumps, column] = np.nan
# no_movement = np.array([el[0] for el in np.argwhere(delta_trace < margin) + 1])
# # if it's not empty
# if no_movement.shape[0] > 0:
# # nan them
# corrected_trace.loc[no_movement, column] = np.nan
return corrected_trace
def nan_jumps_dlc(files, max_jump=200):
"""Nan stretches in between large jumps, assuming most of the trace is correct"""
# copy the data
corrected_trace = files.copy()
# get the column names
column_names = corrected_trace.columns
# run through the columns
for column in column_names:
# skip the index column if it's there
if column == 'index':
continue
# find the jumps
jump_length = np.diff(corrected_trace[column], axis=0)
jump_location = np.argwhere(abs(jump_length) > max_jump)
if jump_location.shape[0] == 0:
continue
jump_location = [el[0] for el in jump_location]
# initialize a flag
pair_flag = True
# go through pairs of jumps
for idx, jump in enumerate(jump_location[:-1]):
# if this is the second member of a pair, skip
if not pair_flag:
# reset the pair flag
pair_flag = True
continue
# if this jump and the next have the same sign, skip
if (jump_length[jump]*jump_length[jump_location[idx+1]]) > 0:
continue
# nan the segment in between
corrected_trace.loc[jump+1:jump_location[idx+1]+1, column] = np.nan
# set the pair flag
pair_flag = False
return corrected_trace
def rescale_pixels(traces, db_data, reference, manual_coordinates=None):
"""Use OpenCV to find corners in the image and rescale the data"""
# # set up the looping flag
# valid_corners = False
# set the crop flag
crop_flag = False if 'miniscope' in db_data['rig'] else True
# # loop until proper corners are found
# while not valid_corners:
# get the corners
if manual_coordinates is None:
try:
corner_coordinates = find_corners(db_data['avi_path'], num_frames=50, crop_flag=crop_flag)
except IndexError:
corner_coordinates = find_corners(db_data['avi_path'], num_frames=150, crop_flag=crop_flag)
else:
corner_coordinates = np.array(manual_coordinates)
# get the transformation between the reference and the real corners
perspective_matrix = cv2.getPerspectiveTransform(corner_coordinates.astype('float32'),
np.array(reference).astype('float32'))
# get the new corners
new_corners = np.concatenate((corner_coordinates, np.ones((corner_coordinates.shape[0], 1))), axis=1)
new_corners = np.matmul(perspective_matrix, new_corners.T).T
new_corners = np.array([el[:2] / el[2] for el in new_corners])
# copy the traces
new_traces = traces.copy()
# transform the traces
# get the unique column names, excluding the letter at the end
column_names = np.unique([el[:-1] for el in traces.columns])
# for all the unique names
for column in column_names:
# if the name + x exists, transform
if column+'x' in traces.columns:
# get the x and y data
original_data = traces[[column + 'x', column + 'y']].to_numpy()
| |
mechanism to avoid empries up in the high hills
# This is largely tuned in China; the Alps are < 2km
if t.agricultural and t.Elevation >= 3000:
region_territories_i.append(t.t_index)
self.region_territories_i['Agr_Mountains'] = region_territories_i
# Turchin's PNAS incremental includes
# NOTE: we assume 1:1 here and that the includes are the *same* as territory indices!
geo = self.geography
self.region_territories_i['Agr_expand1'] = setdiff(geo.PNAS_includes['Agr2'],geo.PNAS_includes['Agr1'])
self.region_territories_i['Agr_expand2'] = setdiff(geo.PNAS_includes['Agr3'],geo.PNAS_includes['Agr2'])
def update_intensification(self,year,announce=True,adjust_hinterland=True):
if adjust_hinterland:
try:
hinterland_factor = self.hinterland_density_factor[year]
except KeyError:
pass
else:
self.Hinterland.density = self.hinterland_density*hinterland_factor
if announce:
print("%s: Changing hinterland density to %.2f" % (self.this_pretty_year,self.Hinterland.density))
if self.use_regional_k:
try:
instructions = self.regional_intensifications[year]
except KeyError:
pass # nothing to do
else:
for region,factor in instructions:
regions_i = self.region_territories_i[region]
n_total_regions = len(regions_i)
regions_i = [reg_i for reg_i in regions_i if self.intensification_factor[reg_i] != factor]
if len(regions_i):
if announce:
print("%s: Changing intensification in %s(%d/%d) to %.2f" % (self.this_pretty_year,region,len(regions_i),n_total_regions,factor))
self.intensification_factor[regions_i] = factor
def set_intensification(self,factor,regions_i):
self.intensification_factor[regions_i] = factor
def setup_for_trial(self):
# Do these calculations before Unoccupied and Hinterland are established
# so the globals are setup for reference by polities
self.report_regional_population = self.use_regional_k
intensification_year = self.intensification_year
intensification_improvement = self.intensification_improvement
# if this is already set, skip setting up here
if len(list(self.regional_intensifications.keys())) == 0:
if self.use_regional_k:
print('Using region K(%d)' % self.use_regional_k)
x2 = 2
agr_m = ('Agr_Mountains',0.05)
self.regional_intensifications = {-1500: [('Agr_Europe', 1.0),
('Agr_Asia', 1.0),
('Agr_India', 1.0),
('Agr_Mesopotamia', 0.6),
('Agr_Africa', 0.66),
# order matters: these overlap Agr_Europe so these are set lower last
('Agr_expand1', 0.5),
('Agr_expand2', 0.5),
# Details on east Asian improvements; order is important in case you use MYRV as 'shape' since it overlaps w/ PearlRV
('Agr_LiaoRV', 0.5),
('Agr_PearlRV', 0.5),
('Agr_MYRV', 1.0),# must be after PearlRV (should intersect w/ Agr_expand1)
('Agr_MekongRV',0.5),
('Agr_BurmaRV',0.5),
agr_m],
# TODO after every assertion we should reassert Agr_Mountains
-200: [('Agr_PearlRV', 1.0),agr_m], # Will also 'update' part of MYRV is shape
0: [('Agr_MekongRV',1.0),agr_m],
# TODO delay this opening until 500?
300: [('Agr_expand1', 0.65),agr_m], # turns Japan on...
400: [('Agr_LiaoRV',0.65),agr_m], # already turned on via expand1... (could drop)
600: [('Agr_BurmaRV',1.00),agr_m],
# Medieval doubling in certain locations (including now all of Asia)
1000: [('Agr_Europe', x2*1.0),
('Agr_Asia', x2*1.0), # All of Asia, including the river valleys above
('Agr_India', x2*1.0),
('Agr_Mesopotamia', 0.6), # No change in Agr_Mesopotamia
('Agr_Africa', x2*0.66),
('Agr_expand1', 0.65), # No change in Agr_expand1
# Open the Rus, southern Africa, Kamchatka, etc.
('Agr_expand2', 0.65),
agr_m
]
}
else:
print('Using uniform K')
# print 'Regional intensifications: %s' % self.regional_intensifications # DEBUG
# ONCE
# setup numpy global arrays and indexing schemes
n_t = len(self.territories)
# These are the current assignments per territory
# their values can change as polity types change or dxm methods run
# ODDITY/BUG these calls create np arrays of the proper type BUT
# if you (or pdb) try to print them you get
# *** AttributeError: 'NoneType' object has no attribute 'logical_or'
# In pdb if you try p self.k it does not complain but shows nothing (as it does for None)
# type(self.k) shows an numpy.ndarray and len(self.k) reports n_t
# Possible problem in pdb? Works fine in ipython
# reflect a climate/biome difference; see setup_for_trial() per territory
# NOTE: The entire world (not just ag) is normally intensified
self.intensification_factor = np.ones(n_t,dtype=float ) # biome/crop/climate boost (over ME)
# These are the current values per territory depending on polity and history
self.population = np.zeros(n_t,dtype=float ) # see territory.set_polity() for initialization
# see polity.update_arable_k() for changes
self.k = np.ones(n_t,dtype=float ) # ensure always positive to avoid divide-by-zero
self.birth_rate = np.zeros(n_t,dtype=float )
# Scale to the effective birth per time step BUT NOT migration fraction since that is a per time step AMOUNT, not a rate!
self.state_birth_rate = self.base_birth_rate*self.time_step
self.hinterland_birth_rate = self.state_birth_rate/self.hinterland_birth_rate_factor
# TODO at this point self.Hinterland and self.Unoccupied are set up
#DEBUG print('Popuation density ratio: %.1f\n' % (self.state_density/self.hinterland_density))
# CRITICAL: Must initialize the intensification factor before we compute any initial population
# which happens when we set_polity() for Hinterland in super().setup_for_trial() below
# This is important since we might base population of starting states on surrounding strength of hinterland
# Hinterland has not been created here so don't update density here
self.update_intensification(self.first_year,announce=False,adjust_hinterland=False)
if self.report_regional_population:
header = 'Population: Year K Total'
report_i = [] # DEBUG which territories are NOT 'residual'?
for region in self.region_list:
region_i = self.region_territories_i[region]
report_i.extend(region_i)
header ='%s %s(%d)' % (header,region,len(region_i))
print('%s Residual' % header)
# CRITICAL must do this after setting parameters above because this creates Unoccupied and Hinterland
# which are created using DemographicPolity() which wants to cache them
# Of course we reset their properties just below
##s WHM.py:WorldHistoricalModel:setup_for_trial
self.tracking_biomes = biome_types(self.track_polity_biomes) # so saved data know...
self.this_year = self.first_year
self.this_pretty_year = pretty_year(self.this_year)
if isinstance(self.time_step, int) :
self.years = range(self.first_year,
self.last_year+self.time_step,
self.time_step)
else:
self.years = np.arange(self.first_year,
self.last_year+self.time_step,
self.time_step)
print('Running from %s to %s in %.1f year increments' % (pretty_year(self.first_year),
pretty_year(self.last_year),
self.time_step))
self.random_seed = self.options.seed
if self.using_mp:
self.random_seed = None # force recomputation
if self.random_seed:
if use_set_versions:
print('WARNING: Using set functions but setting random seed: Trial will not replicate!')
seed = self.random_seed
else: # None
# explicitly set and report it so we can repeat any bugs we find
# HACK! delay some number of seconds so the random number seed is not the same in each instance
time.sleep(self.trial)
seed = int(time.time())
# NOTE: Any calls to random before this are not impacted
# See, for example, the polity flag generator which is initialized on load below
# BUG: even after we set this, with A2, subsequent runs or trials are NOT identical
# WHY??? (use of set())
random.seed(seed)
print('%s random seed: %d (%f)' % ('Assigned' if self.random_seed else 'Computed', seed,urandom_random()))
self.random_seed = seed # record
# DEAD self.area_km2 = np.ones(len(self.territories),dtype=float ) # area of the territory km^2 (initially assume 1km2 per region)
global polity_counter
polity_counter = 1; # reset
for territory in self.territories:
territory.setup_for_trial()
self.polities = list() # preserve order of creation always
# restart the generator before we create polities
# but after we set the seed so flags will be the same
if True:
# several of the markers like '+' and 'x' do not show up well
open_markers = 'o^v<>ph' # only 'open' markers (not d which looks thin)
self.flag_generator = polity_flag(markers=open_markers) # initialize the generator
else:
self.flag_generator = polity_flag() # initialize the generator with all markers
# Initialize the starting non-polities
self.Unoccupied = self.PolityType(name='Unoccupied')
self.Unoccupied.make_quasi_state(arising_polity_type=None) # explicitly None here
self.Unoccupied.flag = {'marker':'.','color':'Silver'} # overwrite flag
self.Hinterland = self.PolityType(name='Hinterland')
self.Hinterland.make_quasi_state(arising_polity_type=self.PolityType)
self.Hinterland.flag = {'marker':'.','color':'Green'} # overwrite flag
self.create_states = None
# TODO supers().setup_for_trial() here? so setup_for_trial() methods can complete
# TODO move print_scalar_members() call to setup_chronicler_for_trial() and add to trail_data
self.setup_chronicler_for_trial()
# Where we actually run a world in a trial
# These methods are the heart of the simulation and are common to all specializations
# which override or extend (via super()) inherited behavior
##e WHM.py:WorldHistoricalModel:setup_for_trial
# CRITICAL *reset* k and br once for these special polities
self.Unoccupied.density = self.unoccupied_density
self.Unoccupied.br = 0 # no growth with this 'polity'
self.Hinterland.density = self.hinterland_density
self.Hinterland.br = self.hinterland_birth_rate
def deus_ex_machina(self):
##s WHM.py:WorldHistoricalModel:deus_ex_machina
# Acts of 'god': possibly over multiple territories in this year;
# Most of the time this is a pass.
pass
##e WHM.py:WorldHistoricalModel:deus_ex_machina
# If actual history we want to use the initial states and hinterland distribution
# to discover a hinterland density, then scaled, to match the observed total population
# We then set_population NOT density and let the parameteric density take over
# This ensures that the population estimate matches the observed population at the start
# ADSM also uses this code to get an initial population start
# However, it has an additional constraint that the local power around the state has to be large enough
# to ensure it doesn't collapse right away.
# There are three cases:
# PNAS - actual history, actual population, no initial creation of polities (create_geography with PNAS tag)
| |
import _io
from bin import spl_token_lib as stl
import os
# SPL_PATH = os.getcwd()
PLUS_MINUS = {"+", "-"}
OTHER_ARITHMETIC = {"*", "/", "%"}
SELF_CONCATENATE = {0, 1, 8, 9, 10, 11, 14, 17}
CROSS_CONCATENATE = {(8, 9), (1, 0), (0, 12), (12, 0), (15, 9), (17, 9), (20, 9), (16, 9), (10, 9), (11, 9),
(9, 8), (1, 14), (14, 1), (0, 14), (14, 0)}
LINE_FILE = 0, "TOKENIZER"
class Tokenizer:
"""
:type tokens: list of Token
"""
def __init__(self):
self.tokens = []
self.import_lang = True
self.spl_path = ""
self.script_dir = ""
self.file_name = ""
self.link = False
def setup(self, spl_path: str, file_name: str, script_dir: str, link: bool = False, import_lang: bool = True):
"""
Sets up the parameters of this lexer.
The <file_name> will be recorded in tokens and ast nodes, which is used for properly displaying
the error message, if any error occurs. This parameter does not contribute to the actual interpreting.
The <script_dir> is used to find the importing files, which is important to run the script correctly.
:param spl_path: the directory path of spl interpreter
:param file_name: the name of the main script
:param script_dir: the directory of the main script
# :param imported: the set of imported file names
:param link: whether to write the result to file
:param import_lang: whether to import lib.lang.sp automatically
:return:
"""
self.spl_path = spl_path
self.file_name = file_name
self.script_dir = script_dir
# self.imported = imported
self.link = link
self.import_lang = import_lang
def tokenize(self, source):
"""
Tokenize the source spl source code into a list of tokens, stored in the memory of this Lexer.
:param source: the source code, whether an opened file or a list of lines.
:return: None
"""
self.tokens.clear()
if self.import_lang and self.file_name[-7:] != "lang.sp":
self.tokens += [stl.IdToken(LINE_FILE, "import"), stl.IdToken(LINE_FILE, "namespace"),
stl.LiteralToken(LINE_FILE, "lang")]
self.find_import(0, 3)
if isinstance(source, list):
self.tokenize_text(source)
else:
self.tokenize_file(source)
def tokenize_file(self, file: _io.TextIOWrapper):
"""
:param file:
:return:
"""
line = file.readline()
line_num = 1
in_doc = False
doc = ""
while line:
tup = (line_num, self.file_name)
last_index = len(self.tokens)
in_doc, doc = self.proceed_line(line, tup, in_doc, doc)
self.find_import(last_index, len(self.tokens))
line = file.readline()
line_num += 1
self.tokens.append(stl.Token((stl.EOF, None)))
if self.link:
self._write_to_file()
def tokenize_text(self, lines):
doc = ""
in_doc = False
for i in range(len(lines)):
line_num = i + 1
tup = (line_num, self.file_name)
line = lines[i]
last_index = len(self.tokens)
in_doc, doc = self.proceed_line(line, tup, in_doc, doc)
self.find_import(last_index, len(self.tokens))
self.tokens.append(stl.Token((stl.EOF, None)))
def restore_tokens(self, file: _io.BytesIO):
self.tokens.clear()
while True:
flag = int.from_bytes(file.read(1), "big")
if flag == 0:
self.tokens.append(stl.Token((stl.EOF, None)))
break
else:
line = int(stl.read_string(file))
file_name = stl.read_string(file)
lf = line, file_name
if flag == 1:
token: stl.NumToken = stl.NumToken(lf, stl.read_string(file))
elif flag == 2:
token: stl.LiteralToken = stl.LiteralToken(lf, stl.read_string(file))
elif flag == 3:
token: stl.IdToken = stl.IdToken(lf, stl.read_string(file))
elif flag == 4:
token: stl.DocToken = stl.DocToken(lf, stl.read_string(file))
else:
raise stl.ParseException("Unknown flag: {}".format(flag))
self.tokens.append(token)
def proceed_line(self, line: str, line_num: (int, str), in_doc: bool, doc: str) -> (bool, str):
""" Tokenize a line.
:param line: line to be proceed
:param line_num: the line number and the name of source file
:param in_doc: whether it is currently in docstring, before proceed this line
:param doc: the current doc
:return: whether it is currently in docstring, after proceed this line
"""
in_single = False
in_double = False
literal = ""
non_literal = ""
length = len(line)
i = -1
while i < length - 1:
i += 1
ch = line[i]
if not in_double and not in_single:
if in_doc:
if ch == "*" and i < length - 1 and line[i + 1] == "/":
in_doc = False
i += 2
continue
else:
if ch == "/" and i < length - 1 and line[i + 1] == "*":
in_doc = True
i += 1
if not in_doc:
if len(doc) > 0:
self.tokens.append(stl.DocToken(line_num, doc[2:]))
doc = ""
if in_double:
if ch == '"':
in_double = False
self.tokens.append(stl.LiteralToken(line_num, literal))
literal = ""
continue
elif in_single:
if ch == "'":
in_single = False
self.tokens.append(stl.LiteralToken(line_num, literal))
literal = ""
continue
else:
if ch == '"':
in_double = True
self.line_tokenize(non_literal, line_num)
non_literal = ""
continue
elif ch == "'":
in_single = True
self.line_tokenize(non_literal, line_num)
non_literal = ""
continue
if in_single or in_double:
literal += ch
else:
non_literal += ch
if len(non_literal) > 1 and non_literal[-2:] == "//":
self.line_tokenize(non_literal[:-2], line_num)
non_literal = ""
break
else:
doc += ch
if len(non_literal) > 0:
self.line_tokenize(non_literal, line_num)
return in_doc, doc
def line_tokenize(self, non_literal, line_num):
"""
Tokenize a line, with string literals removed.
:param non_literal: text to be tokenize, no string literal
:param line_num: the line number
:return: None
"""
lst = normalize(non_literal)
for part in lst:
if part.isidentifier():
self.tokens.append(stl.IdToken(line_num, part))
elif is_float(part):
self.tokens.append(stl.NumToken(line_num, part))
elif is_integer(part):
self.tokens.append(stl.NumToken(line_num, part))
elif part in stl.ALL:
self.tokens.append(stl.IdToken(line_num, part))
elif part[:-1] in stl.OP_EQ:
self.tokens.append(stl.IdToken(line_num, part))
elif part == stl.EOL:
self.tokens.append(stl.IdToken(line_num, stl.EOL))
elif part == "=>":
self.tokens.append(stl.IdToken(line_num, part))
elif part in stl.OMITS:
pass
else:
raise stl.ParseException("Unknown symbol: '{}', at line {}".format(part, line_num))
def find_import(self, from_, to):
"""
Looks for import statement between the given slice of the tokens list.
:param from_: the beginning position of search
:param to: the end position of search
:return: None
"""
for i in range(from_, to, 1):
token = self.tokens[i]
if isinstance(token, stl.IdToken) and token.symbol == "import":
next_token: stl.Token = self.tokens[i + 1]
namespace_token = None
if isinstance(next_token, stl.IdToken) and next_token.symbol == "namespace":
namespace_token = next_token
self.tokens.pop(i + 1)
path_token: stl.LiteralToken = self.tokens[i + 1]
elif isinstance(next_token, stl.LiteralToken):
path_token: stl.LiteralToken = self.tokens[i + 1]
else:
raise stl.ParseException("Unexpected token in file '{}', at line {}"
.format(next_token.file, next_token.line))
name = path_token.text
if name[-3:] == ".sp": # user lib
if len(self.script_dir) == 0:
file_name = name[:-3].replace(".", "/") + ".sp"
else:
file_name = "{}{}{}".format(self.script_dir, os.sep, name[:-3]).replace(".", "/") + ".sp"
import_name = name[:-3]
else: # system lib
file_name = "{}{}lib{}{}.sp".format(self.spl_path, os.sep, os.sep, name)
import_name = name
if len(self.tokens) > i + 2:
as_token: stl.IdToken = self.tokens[i + 2]
if as_token.symbol == "as":
if namespace_token is not None:
raise stl.ParseException("Unexpected combination 'import namespace ... as ...'")
name_token: stl.IdToken = self.tokens[i + 3]
import_name = name_token.symbol
self.tokens.pop(i + 1)
self.tokens.pop(i + 1)
self.tokens.pop(i + 1) # remove the import name token
self.import_file(file_name, import_name)
if namespace_token:
lf = namespace_token.line, namespace_token.file
self.tokens.append(namespace_token)
self.tokens.append(stl.IdToken(lf, import_name))
self.tokens.append(stl.IdToken(lf, stl.EOL))
break
def import_file(self, full_path, import_name):
"""
Imports an external sp file.
This method tokenize the imported file, and inserts all tokens except the EOF token of the imported
file into the current file.
:param full_path: the path of the file to be imported
:param import_name: the name to be used
"""
with open(full_path, "r") as file:
lexer = Tokenizer()
lexer.setup(self.spl_path, full_path, get_dir(full_path), False)
# lexer.script_dir = get_dir(full_path)
lexer.tokenize(file)
# print(lexer.tokens)
self.tokens.append(stl.IdToken(LINE_FILE, import_name))
self.tokens.append(stl.IdToken(LINE_FILE, full_path))
self.tokens.append(stl.IdToken(LINE_FILE, "{"))
self.tokens += lexer.tokens
self.tokens.pop() # remove the EOF token
self.tokens.append(stl.IdToken(LINE_FILE, "}"))
def get_tokens(self):
"""
Returns the tokens list.
:return: the tokens list
"""
return self.tokens
def _write_to_file(self):
name = stl.replace_extension(self.file_name, "lsp")
with open(name, "wb") as wf:
for token in self.tokens:
wf.write(token.to_binary())
def normalize(string):
"""
Splits a line to tokens.
:type string: str
:param string:
:return:
:type: list
"""
if string.isidentifier():
return [string]
else:
lst = []
if len(string) > 0:
s = string[0]
last_type = char_type(s)
for i in range(1, len(string), 1):
char = string[i]
t = char_type(char)
if (t in SELF_CONCATENATE and t == last_type) or ((last_type, t) in CROSS_CONCATENATE):
s += char
else:
put_string(lst, s)
s = char
last_type = t
put_string(lst, s)
return lst
def put_string(lst: list, s: str):
if len(s) > 1 and s[-1] == ".": # Scenario of a name ended with a number.
lst.append(s[:-1])
lst.append(s[-1])
else:
lst.append(s)
def char_type(ch):
"""
:type ch: string
:param ch:
:return:
"""
if ch.isdigit():
return 0
elif ch.isalpha():
return 1
elif ch == "{":
return 2
elif ch == "}":
return 3
elif ch == "(":
return 4
elif ch == ")":
return 5
elif ch == ";":
return 6
elif ch == "\n":
return 7
elif ch == ">" or | |
<gh_stars>1-10
#==================================================================================
# BSD License
#
# Copyright (c)2020, ww3-opentools developers, all rights reserved
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
#==================================================================================
# gebco_reduce.py
#
# PURPOSE:
# Functions library for generating a reduced version of the GEBCO bathymetry
# product, either based on averaging over an integer number of GEBCO grid cells
# or using scipy interpolate functions. The reduction makes subsequent work for
# gridgen quicker and easier, since the cell finding loops in that code then get
# to work with smaller source grid files and arrays.
#
# Functions are also used to correct sea levels and/or remove large inland lakes
# and areas of land below mean sea level from the source bathymetry, and provide
# a percentage land value for each of the coarsened cells.
#
# REVISION HISTORY:
#
# <NAME>; Met Office; May-2020
# Code prepared for initial release on github
#
#==================================================================================
import netCDF4 as nc
import numpy as np
import scipy.interpolate as interp
import matplotlib.pyplot as plt
def correctLakesBathy(latout, lonout, depout, mskout, depthmin=0.0, removesmall=None, caspianonly=False):
""" Apply known height corrections to inland locations """
lakecorrections = {}
# lakes: correct or remove
lakecorrections['CaspianSea'] = {'lonw':44.0,'lone':55.6,'lats':36.0,'latn':51.0,'correction':27.5,'removal':1000.0}
if caspianonly:
lakecorrections['LakeEyre'] = {'lonw':136.0,'lats':-31.9,'lone':140.9,'latn':-26.7,'removal':1000.0}
lakecorrections['LakeSuperior'] = {'lonw':-92.4,'lats':46.4,'lone':-84.2,'latn':49.1,'removal':1000.0}
lakecorrections['LakeMichigan'] = {'lonw':-88.15,'lats':41.55,'lone':-84.73,'latn':49.1,'removal':1000.0}
lakecorrections['LakeHuron'] = {'lonw':-84.73,'lats':42.96,'lone':-79.65,'latn':46.4,'removal':1000.0}
lakecorrections['LakeErie'] = {'lonw':-83.58,'lats':41.35,'lone':-78.82,'latn':42.94,'removal':1000.0}
lakecorrections['LakeOntario'] = {'lonw':-79.89,'lats':43.14,'lone':-75.99,'latn':44.34,'removal':1000.0}
lakecorrections['LakeLadoga'] = {'lonw':30.27,'lats':59.89,'lone':33.02,'latn':51.78,'removal':100.0}
lakecorrections['LakeOnega'] = {'lonw':33.64,'lats':60.84,'lone':36.54,'latn':62.95,'removal':100.0}
else:
lakecorrections['LakeEyre'] = {'lonw':136.0,'lats':-31.9,'lone':140.9,'latn':-26.7,'correction':12,'removal':1000.0}
lakecorrections['LakeSuperior'] = {'lonw':-92.4,'lats':46.4,'lone':-84.2,'latn':49.1,'correction':-183.0,'removal':1000.0}
lakecorrections['LakeMichigan'] = {'lonw':-88.15,'lats':41.55,'lone':-84.73,'latn':49.1,'correction':-176.0,'removal':1000.0}
lakecorrections['LakeHuron'] = {'lonw':-84.73,'lats':42.96,'lone':-79.65,'latn':46.4,'correction':-176.0,'removal':1000.0}
lakecorrections['LakeErie'] = {'lonw':-83.58,'lats':41.35,'lone':-78.82,'latn':42.94,'correction':-173.0,'removal':1000.0}
lakecorrections['LakeOntario'] = {'lonw':-79.89,'lats':43.14,'lone':-75.99,'latn':44.34,'correction':-73.0,'removal':1000.0}
lakecorrections['LakeVictoria'] = {'lonw':31.465,'lats':-3.1,'lone':35.0,'latn':0.5,'correction':-1135.0,'removal':0.0}
lakecorrections['LakeTanganyika'] = {'lonw':29.00,'lats':-8.88,'lone':31.3,'latn':-3.25,'correction':-773.0,'removal':0.0}
lakecorrections['LakeBaikal'] = {'lonw':103.56,'lats':51.45,'lone':110.0,'latn':55.95,'correction':-455.0,'removal':0.0}
lakecorrections['GreatBearLake'] = {'lonw':-125.2,'lats':64.76,'lone':-117.4,'latn':67.07,'correction':-156.0,'removal':1000.0}
lakecorrections['LakeMalawi'] = {'lonw':33.85,'lats':-14.44,'lone':35.31,'latn':-9.45,'correction':-468.0,'removal':0.0}
lakecorrections['GreatSlaveLake'] = {'lonw':-117.31,'lats':60.81,'lone':-108.8,'latn':62.94,'correction':-156.0,'removal':1000.0}
lakecorrections['LakeWinnipeg'] = {'lonw':-99.32,'lats':50.15,'lone':-96.21,'latn':54.09,'correction':-217.0,'removal':1000.0}
lakecorrections['LakeLadoga'] = {'lonw':30.27,'lats':59.89,'lone':33.02,'latn':51.78,'correction':-5.0,'removal':100.0}
lakecorrections['LakeOnega'] = {'lonw':33.64,'lats':60.84,'lone':36.54,'latn':62.95,'correction':-33.0,'removal':100.0}
lakecorrections['LakeBalkash'] = {'lonw':73.32,'lats':44.76,'lone':79.36,'latn':46.92,'correction':-341.4,'removal':0.0}
# depressions: remove only
lakecorrections['TurfanDepression'] = {'lonw':87.5,'lats':41.5,'lone':90.7,'latn':43.9,'removal':1000.0} # 154m below sea level
lakecorrections['CaspianDepression'] = {'lonw':62.3,'lats':41.2,'lone':63.8,'latn':42.8,'removal':1000.0} # lake next to Caspian, 27.5m below sea level
lakecorrections['ChottMelrhir'] = {'lonw':5.9,'lats':33.8,'lone':7.3,'latn':34.75,'removal':1000.0} # 40m below sea level
lakecorrections['ShattAlGharsah'] = {'lonw':7.35,'lats':34.0,'lone':8.15,'latn':34.2,'removal':1000.0} # 17m below sea level
lakecorrections['SabkhatGhuzayyil'] = {'lonw':7.35,'lats':34.0,'lone':8.15,'latn':34.2,'removal':1000.0} # 17m below sea level
lakecorrections['QattaraDepression'] = {'lonw':23.90,'lats':28.33,'lone':31.58,'latn':30.50,'removal':1000.0} # 133m below sea level
lakecorrections['DeadSeaDepression'] = {'lonw':35.2,'lats':30.5,'lone':36.1,'latn':32.95,'removal':1000.0} # 413m below sea level
lakecorrections['SaltonTrough'] = {'lonw':-116.4,'lats':31.85,'lone':-115.1,'latn':33.7,'removal':1000.0} # 69m below sea level
lakecorrections['DeathValley'] = {'lonw':-117.3,'lats':35.5,'lone':-116.3,'latn':36.65,'removal':1000.0} # 83m below sea level
lakecorrections['UnknownNorthernCalifornia'] = {'lonw':-122.09,'lats':37.75,'lone':-121.25,'latn':38.50,'removal':1000.0} # unknown
lakecorrections['StJuliansGreatDepression'] = {'lonw':-68.55,'lats':-49.7,'lone':-68.0,'latn':-49.35,'removal':1000.0} # 105m below sea level
lakecorrections['GuanBajoDelGualicho'] = {'lonw':-65.75,'lats':-40.6,'lone':-64.7,'latn':-40.00,'removal':1000.0} # 73m below sea level
lakecorrections['UnknownBuenosAires'] = {'lonw':-64.4,'lats':-38.9,'lone':-62.7,'latn':-38.2,'removal':1000.0} # unknown
lakecorrections['AmazonDelta'] = {'lonw':-55.0,'lats':-2.5,'lone':-49.5,'latn':-0.35,'removal':1000.0} # unknown
lakecorrections['AfarDepression'] = {'lonw':42.36,'lats':11.5,'lone':42.68,'latn':11.82,'removal':1000.0} # 155m below sea level
lakecorrections['DenakilDepression1'] = {'lonw':40.00,'lats':12.65,'lone':40.65,'latn':14.85,'removal':1000.0} # 125m below sea level
lakecorrections['DenakilDepression2'] = {'lonw':40.65,'lats':12.65,'lone':41.1,'latn':14.36,'removal':1000.0} # 125m below sea level
lakecorrections['DenakilDepression3'] = {'lonw':41.1,'lats':12.65,'lone':41.5,'latn':13.7,'removal':1000.0} # 125m below sea level
lakecorrections['UnknownKuwait'] = {'lonw':46.6,'lats':30.34,'lone':48.45,'latn':31.1,'removal':1000.0} # unknown
lakecorrections['UnknownSaudiArabia1'] = {'lonw':51.3,'lats':23.7,'lone':51.6,'latn':23.95,'removal':1000.0} # unknown
lakecorrections['UnknownSaudiArabia2'] = {'lonw':50.85,'lats':24.45,'lone':51.05,'latn':24.65,'removal':1000.0} # unknown
lakecorrections['DonRiver'] = {'lonw':39.7,'lats':47.15,'lone':40.35,'latn':47.5,'removal':1000.0} # unknown
lakecorrections['UnknownKazakhstan'] = {'lonw':54.76,'lats':45.4,'lone':56.57,'latn':46.5,'removal':1000.0} # unknown
lakecorrections['UnknownTurkmenistan'] = {'lonw':54.2,'lats':42.5,'lone':54.9,'latn':43.3,'removal':1000.0} # unknown
lakecorrections['AkdzhakayaDepression'] = {'lonw':55.2,'lats':40.0,'lone':58.3,'latn':41.15,'removal':1000.0} # 81m below sea level; wider area
lakecorrections['LakeTaiYangtze'] = {'lonw':119.0,'lats':30.92,'lone':120.75,'latn':32.9,'removal':1000.0} # unknown
lakecorrections['Yancheng1'] = {'lonw':119.1,'lats':32.9,'lone':120.25,'latn':34.27,'removal':1000.0} # unknown
lakecorrections['Yancheng2'] = {'lonw':120.25,'lats':32.9,'lone':120.5,'latn':33.9,'removal':1000.0} # unknown
lakecorrections['KaragiyeDepression1'] = {'lonw':51.38,'lats':43.37,'lone':51.98,'latn':43.9,'removal':1000.0} # 132m below sea level
lakecorrections['KaragiyeDepression2'] = {'lonw':51.67,'lats':43.21,'lone':51.98,'latn':43.37,'removal':1000.0} # 132m below sea level
lakecorrections['SzczecinLagoon'] = {'lonw':13.85,'lats':53.6,'lone':14.63,'latn':53.92,'removal':1000.0} # unknown
lakecorrections['Halligen'] = {'lonw':8.58,'lats':54.25,'lone':9.0,'latn':55.10,'removal':1000.0} # unknown
lakecorrections['Elbe'] = {'lonw':9.06,'lats':53.7,'lone':9.9,'latn':53.9,'removal':1000.0} # unknown
lakecorrections['UnknownNLGer1'] = {'lonw':7.6,'lats':53.4,'lone':10.2,'latn':53.7,'removal':1000.0} # unknown
lakecorrections['UnknownNLGer2'] = {'lonw':5.53,'lats':52.9,'lone':9.3,'latn':53.4,'removal':1000.0} # unknown
lakecorrections['EastAnglia'] = {'lonw':-0.5,'lats':52.34,'lone':0.6,'latn':52.7,'removal':1000.0} # unknown
for name, lake in lakecorrections.items():
if(lake['lone'] < np.min(lonout) or lake['latn'] < np.min(latout) or
lake['lonw'] > np.max(lonout) or lake['lats'] > np.max(latout)):
continue
print('[INFO] Correcting heights/depths for %s' % name)
llx = np.min(np.where(lonout > lake['lonw']))
urx = np.max(np.where(lonout < lake['lone'])) + 1
lly = np.min(np.where(latout > lake['lats']))
ury = np.max(np.where(latout < lake['latn'])) + 1
if 'correction' in lake:
depout[lly:ury, llx:urx] = depout[lly:ury, llx:urx] + lake['correction']
if lake['correction'] < 0.0:
mskout[depout <= depthmin] = 0.0
else:
mskout[depout > depthmin] = 1.0
else:
depout[lly:ury, llx:urx] = depout[lly:ury, llx:urx] + lake['removal']
mskout[depout > depthmin] = 1.0
if removesmall is not None:
print('[INFO] Checking through grid for small isolated water bodies at grid size %d' % removesmall)
chksize = removesmall + 2
delcounter = 0
for iy in range(np.shape(mskout)[0] - chksize):
if np.mod(iy,100) == 0:
print('[INFO] ..processed rows for %d y-cells and removed %d small water bodies' %(iy,delcounter))
for ix in range(np.shape(mskout)[1] - chksize):
chktmp = np.copy(mskout[iy:iy+chksize,ix:ix+chksize])
if (not np.all(chktmp == 1.0)) and (not np.all(chktmp == 0.0)):
chktmp[1:1+removesmall,1:1+removesmall] = 1.0
if np.all(chktmp == 1.0):
mskout[iy+1:iy+1+removesmall,ix+1:ix+1+removesmall] = 1.0
delcounter = delcounter + 1
print('[INFO] Removed %d small water bodies from land-sea mask' %delcounter)
return depout, mskout
def correctLakesBathyfromfile(ncfile, datadir='.', depthmin=0.0, removesmall=2, caspianonly=True, pltchk=True):
""" Apply inland height corrections direct to file """
print('[INFO] Applying inland height/depth corrections direct to netCDF file')
print('[WARN] This action will overwrite existing depth and land-sea mask data')
with nc.Dataset(datadir + '/' + ncfile,'a') as d:
lonout = d.variables['lon'][:]
latout = d.variables['lat'][:]
depout = d.variables['elevation'][:,:]
mskout = d.variables['landmask'][:,:]
depout, mskout = correctLakesBathy(latout, lonout, depout, mskout,
depthmin=depthmin, removesmall=removesmall, caspianonly=caspianonly)
elev = d.variables['elevation']
mask = d.variables['landmask']
elev[:,:] = depout[:,:]
mask[:,:] = mskout[:,:]
if pltchk:
plotGrid(latout, lonout, depths=depout, landsea=mskout, depthmin=5.0, depthmax=-500.0)
d.corrected_land_lakes = 'True'
def writeReducedNC(outfile, scalefac, depthmin, latout, lonout,
depout, mskout, datadir='.'):
""" Write out the reduced dataset to a new netCDF file """
loresfile = datadir + '/' + outfile
print('[INFO] Writing reduced grid data to %s' %loresfile)
with nc.Dataset(loresfile, 'w') as nbg:
ndimx = nbg.createDimension('lon',size=np.size(lonout))
ndimy = nbg.createDimension('lat',size=np.size(latout))
ndep = nbg.createVariable('lat','f8',dimensions=('lat'))
ndep.units = 'degrees_east'
ndep[:] = latout[:]
ndep = nbg.createVariable('lon','f8',dimensions=('lon'))
ndep.units = 'degrees_north'
ndep[:] = lonout[:]
ndep = nbg.createVariable('elevation','f8',dimensions=('lat','lon'))
ndep.units = 'm'
ndep[:,:] = depout[:,:]
ndep = nbg.createVariable('landmask','f8',dimensions=('lat','lon'))
ndep.units = '1'
ndep[:,:] = mskout[:,:]
# add global attributes to describe processing
nbg.description = 'Reduced GEBCO bathymetry grid: mean depth values over cell'
nbg.reduction_scale_factor = scalefac
nbg.minimum_depth = depthmin
def writeInterpolatedNC(outfile, dx, dy, depthmin, latout, lonout,
depout, mskout, datadir='.'):
""" Write out the reduced dataset to a new netCDF file """
loresfile = datadir + '/' + outfile
print('[INFO] Writing reduced grid data to %s' %loresfile)
with nc.Dataset(loresfile, 'w') as nbg:
ndimx = nbg.createDimension('lon',size=np.size(lonout))
ndimy = nbg.createDimension('lat',size=np.size(latout))
ndep = nbg.createVariable('lat','f8',dimensions=('lat'))
ndep.units = 'degrees_east'
ndep[:] = latout[:]
ndep = nbg.createVariable('lon','f8',dimensions=('lon'))
ndep.units = 'degrees_north'
ndep[:] = lonout[:]
ndep = nbg.createVariable('elevation','f8',dimensions=('lat','lon'))
ndep.units = 'm'
ndep[:,:] = depout[:,:]
ndep = nbg.createVariable('landmask','f8',dimensions=('lat','lon'))
ndep.units = '1'
ndep[:,:] = mskout[:,:]
# add global attributes to describe processing
nbg.description = 'Interpolated GEBCO bathymetry grid: mean depth values over cell'
nbg.interpolation_dx = dx
nbg.interpolation_dy = dy
nbg.minimum_depth = depthmin
def plotGrid(latout, lonout, depths=None, landsea=None, depthmin=5.0, depthmax=-500.0):
""" Plots out the reduced grid as a check """
print('[INFO] Plotting depths and/or land-sea mask for checking..')
if (depths is not None) and (landsea is not None):
plt.subplot(1,2,1)
if depths is not None:
depouttmp = np.ma.masked_greater(depths, depthmin)
plt.pcolormesh(lonout, latout, depouttmp, vmin=depthmax)
plt.colorbar()
if (depths is not None) and (landsea is not None):
plt.subplot(1,2,2)
if landsea is not None:
plt.pcolormesh(lonout, latout, landsea)
plt.colorbar()
plt.show()
def plotGridfromfile(ncfile, datadir='.', usedepths=True, uselandsea=True, depthmin=5.0, depthmax=-500.0):
""" Plots out the reduced grid as | |
<gh_stars>10-100
from flask import Flask, request, abort
from linebot import (LineBotApi, WebhookHandler)
from linebot.exceptions import (InvalidSignatureError)
from linebot.models import *
import json
import os
from linebot.exceptions import LineBotApiError
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
import random
import csv
import re
import requests
from msg_template import Gps,Weather,Flex_template,Text_template
#----------------呼叫我們的line bot(這邊直接取用heroku的環境變數)-----------------
app = Flask(__name__)
USER = os.environ.get('CHANNEL_ACCESS_TOKEN')
PASS = os.environ.get('CHANNEL_SECRET')
line_bot_api = LineBotApi(USER)
handler = WebhookHandler(PASS)
#----------------資料庫設定-----------------
ENV = 'prod'
if ENV == 'dev':
from dotenv import load_dotenv
load_dotenv()
SQLALCHEMY_DATABASE_URI_PRIVATE = os.getenv("SQLALCHEMY_DATABASE_URI_PRIVATE")
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI_PRIVATE
else:
DATABASE_URL = os.environ.get('DATABASE_URL')
app.debug = False
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# # #https://flask-sqlalchemy.palletsprojects.com/en/2.x/models/
# #---------------------------------initialize tables--------------------------
class Main_store(db.Model):
__tablename__ = 'main_store'
store_id = db.Column (db.Integer, primary_key = True)
main_store = db.Column(db.String(50), nullable=False, unique = True)
detail_store_relationship = db.relationship('Store', backref= 'main_store', lazy=True)
post_relationship = db.relationship('Post', backref= 'main_store', lazy=True)
def __init__(self, main_store):
self.main_store = main_store
class Store(db.Model):
__tablename__ = 'store'
detail_store_id = db.Column (db.String(10), primary_key = True)
store_id = db.Column (db.Integer, db.ForeignKey('main_store.store_id'), nullable=False, onupdate ='CASCADE')
store = db.Column (db.String(50), nullable=False, unique = True)
still_there = db.Column (db.Boolean, nullable=False)
address = db.Column (db.String(200))
discription = db.Column (db.String(500))
open_time = db.Column (db.String(200))
latitude = db.Column (db.Numeric(10,8))
longtitute = db.Column (db.Numeric(10,7))
map_review = db.Column (db.Text())
region = db.Column (db.String(1))
province = db.Column (db.String(3))
soup = db.Column (db.String(200))
transport = db.Column (db.String(100))
store_favorite_relationship = db.relationship('Favorite', backref= 'store', lazy=True)
def __init__(self, store_id, store, still_there, address, discription,\
open_time, latitude, longtitute, map_review, region, province, soup, transport):
self.store_id = store_id
self.store = store
self.still_there = still_there
self.address = address
self.discription = discription
self.open_time = open_time
self.latitude = latitude
self.longtitute = longtitute
self.map_review = map_review
self.region = region
self.province = province
self.soup = soup
self.transport = transport
class Post(db.Model):
__tablename__ = 'post'
post_id = db.Column (db.String(10), primary_key = True)
store_id = db.Column (db.Integer, db.ForeignKey('main_store.store_id'), nullable=False, onupdate ='CASCADE')
stores = db.Column (db.String(30))
create_on = db.Column (db.DateTime)
ramen_name = db.Column (db.String(100))
fb_review = db.Column (db.Text())
def __init__(self, store_id, stores, create_on, ramen_name, fb_review):
self.store_id = store_id
self.stores = stores
self.create_on = create_on
self.ramen_name = ramen_name
self.fb_review = fb_review
class Favorite(db.Model):
__tablename__ = 'favorite'
id = db.Column (db.Integer, primary_key = True)
line_id = db.Column (db.String(34), nullable = False)
detail_store_id = db.Column (db.String(10), db.ForeignKey('store.detail_store_id'), nullable = False, onupdate ='CASCADE')
def __init__(self, line_id, detail_store_id):
self.line_id = line_id
self.detail_store_id = detail_store_id
def get_data_str(lst):
output_before_random = ''
for r in lst:
if r[2] is None:
output_before_random += f'STORE:{r[1].store},ADDRESS:{r[1].address},DISCRIPTION:{r[1].discription},TRANSPORT:{r[1].transport},\
MAP_REVIEW:{r[1].map_review},\
LONGITUDE:{r[1].longtitute},LATITUDE:{r[1].latitude},OPEN_TIME:{r[1].open_time},\
CHECK_TAG:{r[1].soup},CHECK_CITY:{r[1].province}%'
else:
try:
output_before_random += f'STORE:{r[1].store},ADDRESS:{r[1].address},DISCRIPTION:{r[1].discription},TRANSPORT:{r[1].transport},\
FB_R_CREATE:{r[2].create_on},FB_R_RAMEN:{r[2].ramen_name},FB_R_CONTENT:{r[2].fb_review},\
LONGITUDE:{r[1].longtitute},LATITUDE:{r[1].latitude},OPEN_TIME:{r[1].open_time},\
CHECK_TAG:{r[1].soup},CHECK_CITY:{r[1].province}%'
except AttributeError as error:
output_before_random += f'STORE:{r[1].store},ADDRESS:{r[1].address},DISCRIPTION:{r[1].discription},TRANSPORT:{r[1].transport},\
MAP_REVIEW:{r[1].map_review},\
LONGITUDE:{r[1].longtitute},LATITUDE:{r[1].latitude},OPEN_TIME:{r[1].open_time},\
CHECK_TAG:{r[1].soup},CHECK_CITY:{r[1].province}%'
return output_before_random
def query_province_soup(p, s):
province_soup_q = db.session.query(Main_store, Store, Post)\
.outerjoin(Post, Post.store_id == Main_store.store_id)\
.outerjoin(Store, Store.store_id == Main_store.store_id)\
.filter(Store.province == p)\
.filter(Store.soup.contains(s))\
.filter(Store.still_there == True)
return province_soup_q
def query_province_direct(p):
province_soup_q = db.session.query(Main_store, Store, Post)\
.outerjoin(Post, Post.store_id == Main_store.store_id)\
.outerjoin(Store, Store.store_id == Main_store.store_id)\
.filter(Store.province == p)\
.filter(Store.still_there == True)
return province_soup_q
#----------------主要用在直接打字query店家的部分-----------------
def query_store(store_k1,store_k2):
store_direct = db.session.query(Main_store, Store, Post)\
.outerjoin(Post, Post.store_id == Main_store.store_id)\
.outerjoin(Store, Store.store_id == Main_store.store_id)\
.filter(Store.store.contains(store_k1))\
.filter(Store.store.contains(store_k2))\
.filter(Store.still_there == True)
return store_direct
#----------------主要用在定位系統GIS的部分-----------------
def query_region_by_store_table(r):
province_soup_q = db.session.query(Main_store, Store)\
.outerjoin(Store, Store.store_id == Main_store.store_id)\
.filter(Store.region == r)\
.filter(Store.still_there == True)
return province_soup_q
#---------------------formation-------------------------------
def convert_string_to_lst(string,c):
li = list(string.split(c))
return li
def divide_map_review(comment_s):
comment_clean = comment_s.replace(" - ", "-").replace("- ", "-").replace(" -", "-")
comment_clean_split = re.split('[ ]',comment_clean)
comment_lst = [i for i in comment_clean_split if i]
if len(comment_lst) > 1:
comment_final_list = []
for i, val in enumerate(comment_lst):
if i != (len(comment_lst)-1) and val[-1].islower() == True and val[0].isupper() == True and comment_lst[i+1][0].isupper() == True:
val = val + comment_lst[i+1]
comment_lst.remove(comment_lst[i+1])
comment_final_list.append(val)
return comment_final_list
else:
return comment_lst
##----------------我的最愛取得userid資料庫設定-----------------
def get_love_list_from_user_id(user_id):
love_list_q = db.session.query(Store,Favorite)\
.outerjoin(Favorite, Favorite.detail_store_id == Store.detail_store_id)\
.filter(Favorite.line_id == user_id)
love_list = ''
for l in love_list_q :
love_list += f'{l[0].store}%'
love_list_clear = love_list.replace(u'\xa0', u' ').replace(' ','')
output_whole_love_list = convert_string_to_lst(love_list_clear,'%')
output_whole_love_list = [i for i in output_whole_love_list if i]
return output_whole_love_list
def count_store_in_table(store_name):
store_id_q = db.session.query(Store)\
.filter(Store.store == store_name)\
.count()
return store_id_q
def get_store_id(store_name):
get_id = ''
store_id_q = db.session.query(Store)\
.filter(Store.store == store_name)
for data in store_id_q:
get_id += data.detail_store_id
return get_id
def store_exist(get_user_line_id, store_name):
store_exist = db.session.query(Store, Favorite)\
.join(Favorite, Favorite.detail_store_id == Store.detail_store_id)\
.filter(Favorite.line_id == get_user_line_id)\
.filter(Store.store == store_name).count()
return store_exist
def count_love_list(user_id):
count_love_list = db.session.query(Favorite)\
.filter(Favorite.line_id == user_id).count()
return count_love_list
##----------------Query love-list by userID----------------
def get_list_from_user_id(user_id):
love_list_q = db.session.query(Store,Favorite)\
.outerjoin(Favorite, Favorite.detail_store_id == Store.detail_store_id)\
.filter(Favorite.line_id == user_id)
return love_list_q
def query_map_review_by_full_name(s):
review = db.session.query(Store).filter(Store.store == s).filter(Store.still_there == True)
love_list = ''
for l in review:
love_list += f'STORE:{l.store},ADDRESS:{l.address},DISCRIPTION:{l.discription},TRANSPORT:{l.transport},MAP_REVIEW:{l.map_review},CITY:{l.province},LONGITUDE:{l.longtitute},LATITUDE:{l.latitude},\
OPEN_TIME:{l.open_time},CHECK_TAG:{l.soup}%'
love_list = love_list.replace(u'\xa0', u' ').replace('\n','')
return love_list
#----------------最愛清單動態的模板設定-----------------
def favorite_list_generator(favorite_list):
button_list = [BoxComponent(
layout="vertical",
margin="sm",
spacing="sm",
contents=[
TextComponent(text="最愛清單",weight="bold",size="xl",margin="sm",wrap=True,),
SeparatorComponent(margin = "xxl")
])]
for i in favorite_list:
favorite_button = ButtonComponent(style="primary", color="#997B66", size="sm", margin="sm",
action=MessageAction(label=i, text=f'搜尋你的清單♡{i}'),)
delete_button = ButtonComponent(style="secondary", color="#F1DCA7", size="sm", margin="sm", flex=0,
action=MessageAction(label="-", text="刪除最愛清單♡"+i),)
button_row = BoxComponent(layout="horizontal", margin="md", spacing="sm",
contents=[favorite_button, delete_button])
button_list.append(button_row)
bubble = BubbleContainer(
director='ltr',
body=BoxComponent(
layout="vertical",
contents=button_list
)
)
return bubble
#----------------tag functions-----------
def tags_button_generator(tag_lst,append_obj,city):
lst_to_append_tags = append_obj["body"]["contents"]
tag_btn_lst = []
for item in tag_lst:
tag_btn = {
"type": "button",
"action": {
"type": "message",
"label": item,
"text": f"{city}:{item}"
},
"color": "#D08C60"
}
tag_btn_lst.append(tag_btn)
tag_btn_group = [tag_btn_lst[2*i:(2*i)+2] for i in range(int((len(tag_btn_lst)/2)) +1)]
tag_btn_group = [sub for sub in tag_btn_group if len(sub) != 0]
for sub in tag_btn_group:
tag_btn_layout = {
"type": "box",
"layout": "vertical",
"margin": "sm",
"contents": []
}
tag_btn_layout["contents"] = sub
lst_to_append_tags.append(tag_btn_layout)
return append_obj
def store_query_tags(s):
store_query_tags = db.session.query(Store).filter(Store.store == s)
result = ''
for r in store_query_tags:
result += f"{r.soup}"
return result
#----------------用來做縣市對應region字典-----------------
north = ["台北市","新北市","基隆市","桃園市","苗栗縣","新竹縣","新竹市","臺北市"]
center = ["台中市","彰化縣","南投縣","雲林縣","臺中市"]
south = ["嘉義市","台南市","高雄市","屏東縣","臺南市"]
east = ["宜蘭縣","花蓮縣","台東縣","臺東縣"]
n_dict = dict.fromkeys(north, ("北","north"))
c_dict = dict.fromkeys(center, ("中","center"))
s_dict = dict.fromkeys(south, ("南","south"))
e_dict = dict.fromkeys(east, ("東","east"))
#----------------官方設定-----------------
@app.route("/", methods=['GET'])
def hello():
return "Hello <NAME>!"
@app.route("/", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
print("Request body: " + body, "Signature: " + signature)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK RAMEN'
#----------------設定回覆訊息介面-----------------
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
#----------------取得userid-----------------
user_id = event.source.user_id
if user_id == '':
user_id = event.source.user_id
TWregion = ["北部","中部","南部","東部"]
city_name = ["台北市","新北市","基隆市","桃園市","苗栗縣","新竹縣","新竹市","台中市","彰化縣","南投縣","雲林縣","嘉義市","台南市","高雄市","屏東縣","宜蘭縣","花蓮縣","台東縣"]
city_name_dic = {**n_dict, **c_dict, **s_dict, **e_dict}
city_region_dict = dict(zip(["north","center","south","east"], [north,center,south,east]))
#----------------拉麵推薦介面-----------------
if event.message.text == "拉麵推薦":
flex_message0 = Flex_template.main_panel_flex()
line_bot_api.reply_message(event.reply_token,flex_message0)
#----------------不同區域的介面設定-----------------
elif event.message.text in TWregion:
#讀需要的json資料
f_region = open('json_files_for_robot/json_for_app.json', encoding="utf8")
data_region = json.load(f_region)
for i,v in enumerate(TWregion):
if event.message.text == v:
flex_message1 = FlexSendMessage(
alt_text= v + '的縣市',
contents= data_region[i]
)
line_bot_api.reply_message(event.reply_token,flex_message1)
f_region.close()
#----------------選擇湯頭介面-----------------
elif "湯頭推薦:" in event.message.text:
user_choice = event.message.text
city_choice = user_choice[user_choice.index(':')+1:]
#用迴圈去讀湯頭選單
#讀需要的推薦介面json資料
f_city_soup = open('json_files_for_robot/soup_'+city_name_dic[city_choice][1]+'_city.json', encoding="utf8")
data_city_soup = json.load(f_city_soup)
#---------------------get province list----------------#
for i, v in enumerate(city_region_dict[city_name_dic[city_choice][1]]):
if v == city_choice:
flex_message2 = FlexSendMessage(
alt_text='快回來看看我幫你找到的湯頭!',
contents= data_city_soup[i]
)
line_bot_api.reply_message(event.reply_token,flex_message2)
f_city_soup.close()
elif event.message.text in city_name:
flex_message5 = Flex_template.soup_direct_flex(event.message.text)
line_bot_api.reply_message(event.reply_token,flex_message5)
elif event.message.text == "嘉義縣":
line_bot_api.reply_message(event.reply_token, TextSendMessage(text = "抱歉!\uDBC0\uDC7c 這邊尚未有拉麵店,請至附近其他縣市看看!"))
elif ('湯頭推薦:'not in event.message.text and '評論' not in event.message.text and ':' in event.message.text) and ':' not in event.message.text[0] and ':' not in event.message.text[-1] and '最愛清單' not in event.message.text:
user_choice = event.message.text
select_first_param = user_choice[:user_choice.index(':')]
select_second_param = user_choice[user_choice.index(':')+1:]
result = ''
if ((select_first_param == '直接推薦') or (select_first_param == '看更多推薦')) and select_second_param in city_name:
result = query_province_direct(select_second_param)
elif select_first_param in city_name:
result = query_province_soup(select_first_param, select_second_param)
else:
result = ''
# #---------------------------------put all data to a string--------------------------
if result == '':
line_bot_api.reply_message(event.reply_token, TextSendMessage(text = "\uDBC0\uDC7c輸入的字串不合法,查詢不到你想要的東西"))
output_before_random_clear = get_data_str(result)
if output_before_random_clear == None or output_before_random_clear == '':
line_bot_api.reply_message(event.reply_token, TextSendMessage(text = "\uDBC0\uDC7c輸入的字串不合法,查詢不到你想要的東西"))
else:
output_before_random_clear = output_before_random_clear.replace(u'\xa0', u' ').replace('\n','')
#---------------------------------change data to a list of datas--------------------------
output_whole_lst = convert_string_to_lst(output_before_random_clear,'%')
output_whole_lst = [i for i in output_whole_lst if i]
#---------------------------------random(everytime renew can auto random)--------------------------
output_s = random.choice(output_whole_lst)
output_lst = convert_string_to_lst(output_s, ',')
if len(output_lst) == 12 or len(output_lst) == 10:
store_n = output_lst[0][output_lst[0].index(':')+1:]
address = output_lst[1][output_lst[1].index(':')+1:]
descrip = output_lst[2][output_lst[2].index(':')+1:]
trans = output_lst[3][output_lst[3].index(':')+1:]
f_city = output_lst[-1][output_lst[-1].index(':')+1:]
if len(output_lst) == 12:
#FB評論
c1 = output_lst[4][output_lst[4].index(':')+1:]
c2 = output_lst[5][output_lst[5].index(':')+1:]
c3 = output_lst[6][output_lst[6].index(':')+1:]
comment = f'貼文時間:\n{c1}\n\n品項:\n{c2}\n\n評論:\n{c3}'
lon = output_lst[7][output_lst[7].index(':')+1:]
lat = output_lst[8][output_lst[8].index(':')+1:]
op = output_lst[9][output_lst[9].index(':')+1:]
elif len(output_lst) == 10:
#googleMap
comment = output_lst[4][output_lst[4].index(':')+1:]
lon = output_lst[5][output_lst[5].index(':')+1:]
lat = output_lst[6][output_lst[6].index(':')+1:]
op = output_lst[7][output_lst[7].index(':')+1:]
else:
line_bot_api.reply_message(event.reply_token, TextSendMessage(text = Text_template.error_warning_text('O1')) )
flex_message9 = Flex_template.double_flex("快回來看看我幫你找到的店家!", store_n, address, lon, lat, descrip, trans, op, "看同類推薦", user_choice, f_city, comment, "+到最愛", "加到最愛清單")
line_bot_api.reply_message(event.reply_token,flex_message9)
else:
line_bot_api.reply_message(event.reply_token, TextSendMessage(text = f"資料庫有誤"))
elif ' ' in event.message.text and ' ' not in event.message.text[-1] and ' ' not in | |
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIPProductInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeIPProductInfo", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"IpList": Utils.try_to_json(argv, "--IpList"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIPProductInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeIPProductInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCCSelfDefinePolicy(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCCSelfDefinePolicy", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCCSelfDefinePolicyRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCCSelfDefinePolicy(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyCCFrequencyRulesStatus(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyCCFrequencyRulesStatus", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"RuleId": argv.get("--RuleId"),
"Method": argv.get("--Method"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyCCFrequencyRulesStatusRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyCCFrequencyRulesStatus(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyCCThreshold(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyCCThreshold", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Threshold": Utils.try_to_json(argv, "--Threshold"),
"Id": argv.get("--Id"),
"Protocol": argv.get("--Protocol"),
"RuleId": argv.get("--RuleId"),
"BasicIp": argv.get("--BasicIp"),
"BasicRegion": argv.get("--BasicRegion"),
"BasicBizType": argv.get("--BasicBizType"),
"BasicDeviceType": argv.get("--BasicDeviceType"),
"BasicIpInstance": argv.get("--BasicIpInstance"),
"BasicIspCode": Utils.try_to_json(argv, "--BasicIspCode"),
"Domain": argv.get("--Domain"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyCCThresholdRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyCCThreshold(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribleL4Rules(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribleL4Rules", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"RuleIdList": Utils.try_to_json(argv, "--RuleIdList"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribleL4RulesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribleL4Rules(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyNewDomainRules(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyNewDomainRules", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"Rule": Utils.try_to_json(argv, "--Rule"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyNewDomainRulesRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyNewDomainRules(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCCUrlAllow(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCCUrlAllow", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"Type": Utils.try_to_json(argv, "--Type"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Protocol": argv.get("--Protocol"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCCUrlAllowRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCCUrlAllow(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeL7HealthConfig(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeL7HealthConfig", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"RuleIdList": Utils.try_to_json(argv, "--RuleIdList"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeL7HealthConfigRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeL7HealthConfig(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCCTrend(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCCTrend", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Ip": argv.get("--Ip"),
"MetricName": argv.get("--MetricName"),
"Period": Utils.try_to_json(argv, "--Period"),
"StartTime": argv.get("--StartTime"),
"EndTime": argv.get("--EndTime"),
"Id": argv.get("--Id"),
"Domain": argv.get("--Domain"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCCTrendRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCCTrend(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateCCFrequencyRules(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateCCFrequencyRules", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"RuleId": argv.get("--RuleId"),
"Mode": argv.get("--Mode"),
"Period": Utils.try_to_json(argv, "--Period"),
"ReqNumber": Utils.try_to_json(argv, "--ReqNumber"),
"Act": argv.get("--Act"),
"ExeDuration": Utils.try_to_json(argv, "--ExeDuration"),
"Uri": argv.get("--Uri"),
"UserAgent": argv.get("--UserAgent"),
"Cookie": argv.get("--Cookie"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateCCFrequencyRulesRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateCCFrequencyRules(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyL7Rules(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyL7Rules", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"Rule": Utils.try_to_json(argv, "--Rule"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyL7RulesRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyL7Rules(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBasicCCThreshold(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeBasicCCThreshold", g_param[OptionsDefine.Version])
return
param = {
"BasicIp": argv.get("--BasicIp"),
"BasicRegion": argv.get("--BasicRegion"),
"BasicBizType": argv.get("--BasicBizType"),
"BasicDeviceType": argv.get("--BasicDeviceType"),
"BasicIpInstance": argv.get("--BasicIpInstance"),
"BasicIspCode": Utils.try_to_json(argv, "--BasicIspCode"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBasicCCThresholdRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeBasicCCThreshold(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateL7HealthConfig(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateL7HealthConfig", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"Id": argv.get("--Id"),
"HealthConfig": Utils.try_to_json(argv, "--HealthConfig"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateL7HealthConfigRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateL7HealthConfig(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeResIpList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeResIpList", g_param[OptionsDefine.Version])
return
param = {
"Business": argv.get("--Business"),
"IdList": Utils.try_to_json(argv, "--IdList"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile)
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DayuClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion | |
a Field from a document_pb.Field protocol buffer."""
name = pb.name()
value = _GetValue(pb.value())
lang = None
if pb.value().has_language():
lang = pb.value().language()
val_type = pb.value().type()
if val_type == document_pb.FieldValue.TEXT:
return TextField(name, value, lang)
elif val_type == document_pb.FieldValue.HTML:
return HtmlField(name, value, lang)
elif val_type == document_pb.FieldValue.ATOM:
return AtomField(name, value, lang)
elif val_type == document_pb.FieldValue.DATE:
return DateField(name, value)
elif val_type == document_pb.FieldValue.NUMBER:
return NumberField(name, value)
return InvalidRequest('Unknown field value type %d' % val_type)
class Document(object):
"""Represents a user generated document.
The following example shows how to create a document consisting of a set
of fields, some plain text and some in HTML.
Document(doc_id='document id',
fields=[TextField(name='subject', value='going for dinner'),
HtmlField(name='body',
value='<html>I found a place.</html>',
TextField(name='signature', value='brzydka pogoda',
language='pl')],
language='en')
"""
_FIRST_JAN_2011 = datetime.datetime(2011, 1, 1)
def __init__(self, doc_id=None, fields=None, language='en', order_id=None):
"""Initializer.
Args:
doc_id: The visible printable ASCII string identifying the document which
does not start with '!'. Whitespace is excluded from ids. If no id is
provided, the search service will provide one.
fields: An iterable of Field instances representing the content of the
document.
language: The code of the language used in the field values.
order_id: The id used to specify the order this document will be returned
in search results, where 0 <= order_id <= sys.maxint. If not specified,
the number of seconds since 1st Jan 2011 is used. Documents are returned
in descending order of the order ID.
Raises:
TypeError: If any of the parameters have invalid types, or an unknown
attribute is passed.
ValueError: If any of the parameters have invalid values.
"""
if doc_id is not None:
_CheckDocumentId(doc_id)
self._doc_id = doc_id
self._fields = _GetList(fields)
self._language = _CheckLanguage(language)
if order_id is None:
order_id = self._GetDefaultOrderId()
self._order_id = self._CheckOrderId(order_id)
@property
def doc_id(self):
"""Returns the document identifier."""
return self._doc_id
@property
def fields(self):
"""Returns a list of fields of the document."""
return self._fields
@property
def language(self):
"""Returns the code of the language the document fields are written in."""
return self._language
@property
def order_id(self):
"""Returns the id used to return documents in a defined order."""
return self._order_id
def _CheckOrderId(self, order_id):
"""Checks the order id is valid, then returns it."""
return _CheckInteger(order_id, 'order_id', upper_bound=sys.maxint)
def _GetDefaultOrderId(self):
"""Returns a default order id as total seconds since 1st Jan 2011."""
td = datetime.datetime.now() - Document._FIRST_JAN_2011
return td.seconds + (td.days * 24 * 3600)
def __repr__(self):
return _Repr(
self, [('doc_id', self.doc_id), ('fields', self.fields),
('language', self.language), ('order_id', self.order_id)])
def __eq__(self, other):
return (isinstance(other, type(self)) and self.doc_id == other.doc_id and
self.order_id == other.order_id and self.language == other.language
and len(self.fields) == len(other.fields) and
sorted(self.fields) == sorted(other.fields))
def __ne__(self, other):
return not self == other
def __key(self):
return self.doc_id
def __hash__(self):
return hash(self.__key())
def __str__(self):
return repr(self)
def _CopyDocumentToProtocolBuffer(document, pb):
"""Copies Document to a document_pb.Document protocol buffer."""
pb.set_storage(document_pb.Document.DISK)
if document.doc_id:
pb.set_id(document.doc_id)
if document.language:
pb.set_language(document.language)
for field in document.fields:
field_pb = pb.add_field()
_CopyFieldToProtocolBuffer(field, field_pb)
pb.set_order_id(document.order_id)
return pb
def _NewFieldsFromPb(field_list):
"""Returns a list of Field copied from a document_pb.Document proto buf."""
return [_NewFieldFromPb(f) for f in field_list]
def _NewDocumentFromPb(doc_pb):
"""Constructs a Document from a document_pb.Document protocol buffer."""
lang = None
if doc_pb.has_language():
lang = doc_pb.language()
return Document(doc_id=doc_pb.id(),
fields=_NewFieldsFromPb(doc_pb.field_list()),
language=lang,
order_id=doc_pb.order_id())
def _QuoteString(argument):
return '"' + argument.replace('"', '\\\"') + '"'
class FieldExpression(object):
"""Represents an expression that will be computed for each result returned.
For example,
FieldExpression(name='content_snippet',
expression='snippet("very important", content)')
means a computed field 'content_snippet' will be returned with each search
result, which contains HTML snippets of the 'content' field which match
the query 'very important'.
"""
_MAXIMUM_EXPRESSION_LENGTH = 1000
_MAXIMUM_OPERATOR_LENGTH = 100
def __init__(self, name, expression):
"""Initializer.
Args:
name: The name of the computed field for the expression.
expression: The expression to evaluate and return in a field with
given name in results.
Raises:
TypeError: If any of the parameters has an invalid type, or an unknown
attribute is passed.
ValueError: If any of the parameters has an invalid value.
"""
self._name = _CheckFieldName(name)
if expression is None:
raise ValueError('expression in FieldExpression cannot be null')
if not isinstance(expression, basestring):
raise TypeError('expression expected in FieldExpression, but got %s' %
type(expression))
self._expression = _CheckExpression(expression)
@property
def name(self):
"""Returns name of the expression to return in search results."""
return self._name
@property
def expression(self):
"""Returns a string containing an expression returned in search results."""
return self._expression
def __repr__(self):
return _Repr(
self, [('name', self.name), ('expression', self.expression)])
def _CopyFieldExpressionToProtocolBuffer(field_expression, pb):
"""Copies FieldExpression to a search_service_pb.FieldSpec_Expression."""
pb.set_name(field_expression.name)
pb.set_expression(field_expression.expression)
class SortOption(object):
"""Represents a single dimension for sorting on.
Use subclasses of this class: MatchScorer, RescoringMatchScorer or
SortExpression.
"""
ASCENDING, DESCENDING = ('ASCENDING', 'DESCENDING')
_DIRECTIONS = frozenset([ASCENDING, DESCENDING])
def __init__(self, limit=1000):
"""Initializer.
Args:
limit: The limit on the number of documents to score. Applicable if
using a scorer, ignored otherwise.
Raises:
TypeError: If any of the parameters has an invalid type, or an unknown
attribute is passed.
ValueError: If any of the parameters has an invalid value.
"""
self._limit = _CheckSortLimit(limit)
@property
def limit(self):
"""Returns the limit on the number of documents to score."""
return self._limit
@property
def direction(self):
"""Returns the direction to sort documents based on score."""
return self.DESCENDING
def _CheckDirection(self, direction):
"""Checks direction is a valid SortOption direction and returns it."""
return _CheckEnum(direction, 'direction', values=self._DIRECTIONS)
def __repr__(self):
return _Repr(
self, [('direction', self.direction),
('limit', self.limit)])
class MatchScorer(SortOption):
"""Sort documents in ascending order of score.
Match scorer assigns a document a score based on term frequency
divided by document frequency.
"""
def __init__(self, limit=1000):
"""Initializer.
Args:
limit: The limit on the number of documents to score. Applicable if
using a scorer, ignored otherwise.
Raises:
TypeError: If any of the parameters has an invalid type, or an unknown
attribute is passed.
ValueError: If any of the parameters has an invalid value.
"""
super(MatchScorer, self).__init__(limit=limit)
class RescoringMatchScorer(SortOption):
"""Sort documents in ascending order of score weighted on document parts.
A rescoring match scorer assigns a document a score based on
a match scorer and further assigning weights to document parts.
"""
def __init__(self, limit=1000):
"""Initializer.
Args:
limit: The limit on the number of documents to score. Applicable if
using a scorer, ignored otherwise.
Raises:
TypeError: If any of the parameters has an invalid type, or an unknown
attribute is passed.
ValueError: If any of the parameters has an invalid value.
"""
super(RescoringMatchScorer, self).__init__(limit=limit)
def _CopySortExpressionToProtocolBuffer(sort_expression, pb):
"""Copies a SortExpression to a search_service_pb.SortSpec protocol buffer."""
pb.set_sort_expression(sort_expression.expression)
if sort_expression.direction == SortOption.ASCENDING:
pb.set_sort_descending(False)
if sort_expression.default_value is not None:
if isinstance(sort_expression.default_value, basestring):
pb.set_default_value_text(sort_expression.default_value)
else:
pb.set_default_value_numeric(sort_expression.default_value)
return pb
def _CopySortOptionToScorerSpecProtocolBuffer(sort_option, pb):
"""Copies a SortOption to a search_service_pb.ScorerSpec."""
if isinstance(sort_option, RescoringMatchScorer):
pb.set_scorer(search_service_pb.ScorerSpec.RESCORING_MATCH_SCORER)
elif isinstance(sort_option, MatchScorer):
pb.set_scorer(search_service_pb.ScorerSpec.MATCH_SCORER)
elif isinstance(sort_option, SortSpec):
_CopySortSpecToScorerSpecProtocolBuffer(sort_option, pb)
else:
raise TypeError('Expected MatchScorer or RescoringMatchRescorer but got %s'
% type(sort_option))
pb.set_limit(sort_option.limit)
return pb
class SortExpression(SortOption):
"""Sort by a user specified scoring expression."""
try:
MAX_FIELD_VALUE = unichr(0x10ffff) * 80
except ValueError:
MAX_FIELD_VALUE = unichr(0xffff) * 80
MIN_FIELD_VALUE = ''
def __init__(self, expression=None, direction=SortOption.DESCENDING,
default_value=None, limit=1000):
"""Initializer.
Args:
expression: An expression to be evaluated on each matching document
to sort by. The expression can simply be a field name,
or some compound expression such as "score + count(likes) * 0.1"
which will add the score from a scorer to a count of the values
of a likes field times 0.1.
direction: The direction to sort the search results, either ASCENDING
or DESCENDING
default_value: The default value of the expression, if no field
present nor can be calculated for a document. A text value must
be specified for text sorts. A numeric value must be specified for
numeric sorts.
limit: The limit on the number of documents to score.
Raises:
TypeError: If any of the parameters has an invalid type, or an unknown
attribute is passed.
ValueError: If any of the parameters has an invalid value.
"""
super(SortExpression, self).__init__(limit=limit)
self._expression = expression
self._direction = self._CheckDirection(direction)
self._default_value = default_value
if expression is None:
raise TypeError('expression required for SortExpression')
_CheckExpression(expression)
if isinstance(self.default_value, basestring):
_CheckText(self._default_value, 'default_value')
elif self._default_value is not None:
_CheckNumber(self._default_value, 'default_value')
@property
def expression(self):
"""Returns the expression to sort by."""
return self._expression
@property
def direction(self):
"""Returns the | |
:type orderby: list[str or ~devices_cloud_print.models.Enum32]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum33]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum34]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrinter or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrinter]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_printers.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPrinter', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_printers.metadata = {'url': '/print/printers'} # type: ignore
def create_printers(
self,
body, # type: "models.MicrosoftGraphPrinter"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinter"
"""Create new navigation property to printers for print.
Create new navigation property to printers for print.
:param body: New navigation property.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinter, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_printers.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_printers.metadata = {'url': '/print/printers'} # type: ignore
def get_printers(
self,
printer_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum35"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum36"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPrinter"
"""Get printers from print.
Get printers from print.
:param printer_id: key: id of printer.
:type printer_id: str
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum35]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum36]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPrinter, or the result of cls(response)
:rtype: ~devices_cloud_print.models.MicrosoftGraphPrinter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPrinter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPrinter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def update_printers(
self,
printer_id, # type: str
body, # type: "models.MicrosoftGraphPrinter"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property printers in print.
Update the navigation property printers in print.
:param printer_id: key: id of printer.
:type printer_id: str
:param body: New navigation property values.
:type body: ~devices_cloud_print.models.MicrosoftGraphPrinter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPrinter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def delete_printers(
self,
printer_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property printers for print.
Delete navigation property printers for print.
:param printer_id: key: id of printer.
:type printer_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_printers.metadata['url'] # type: ignore
path_format_arguments = {
'printer-id': self._serialize.url("printer_id", printer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_printers.metadata = {'url': '/print/printers/{printer-id}'} # type: ignore
def list_printer_shares(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum59"]]]
select=None, # type: Optional[List[Union[str, "models.Enum60"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum61"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPrinterShare0"]
"""Get printerShares from print.
Get printerShares from print.
:param orderby: Order items by property values.
:type orderby: list[str or ~devices_cloud_print.models.Enum59]
:param select: Select properties to be returned.
:type select: list[str or ~devices_cloud_print.models.Enum60]
:param expand: Expand related entities.
:type expand: list[str or ~devices_cloud_print.models.Enum61]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPrinterShare0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~devices_cloud_print.models.CollectionOfPrinterShare0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPrinterShare0"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_printer_shares.metadata['url'] # type: | |
import json
import random
import time
from lib.Cb_constants.CBServer import CbServer
from lib.membase.api.rest_client import RestConnection
from lib.remote.remote_util import RemoteMachineShellConnection
from pytests.basetestcase import BaseTestCase
from pytests.security.ntonencryptionBase import ntonencryptionBase
from pytests.security.x509_multiple_CA_util import x509main, Validation
class MultipleCA(BaseTestCase):
def setUp(self):
super(MultipleCA, self).setUp()
self.standard = self.input.param("standard", "pkcs8")
self.passphrase_type = self.input.param("passphrase_type", "script")
self.encryption_type = self.input.param("encryption_type", "aes256")
self.wildcard_dns = self.input.param("wildcard_dns", None)
self.x509 = x509main(host=self.master, standard=self.standard,
encryption_type=self.encryption_type,
passphrase_type=self.passphrase_type,
wildcard_dns=self.wildcard_dns)
for server in self.servers:
self.x509.delete_inbox_folder_on_server(server=server)
sample_bucket = self.input.param("sample_bucket", "travel-sample")
if sample_bucket is not None:
self.load_sample_bucket(self.master, sample_bucket)
self.buckets = RestConnection(self.master).get_buckets()
rest = RestConnection(self.master)
for bucket in self.buckets:
rest.change_bucket_props(bucket, replicaNumber=self.num_replicas)
task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.wait_for_rebalance_to_complete(task)
self.n2n_encryption_level_multiple_CA = self.input.param("n2n_encryption_level_multiple_CA", None)
if self.n2n_encryption_level_multiple_CA:
ntonencryptionBase().setup_nton_cluster([self.master],
clusterEncryptionLevel=self.n2n_encryption_level_multiple_CA)
CbServer.use_https = True
def tearDown(self):
if self.input.param("n2n_encryption_level_multiple_CA", None):
ntonencryptionBase().disable_nton_cluster([self.master])
CbServer.use_https = False
self.x509 = x509main(host=self.master)
self.x509.teardown_certs(servers=self.servers)
super(MultipleCA, self).tearDown()
def auth(self, client_certs=None, api=None, servers=None):
"""
Performs client-cert and Basic authentication via:
1. Making a rest call to servers at api
2. Making a sdk connection and doing some creates
:client_certs: (list) - list of tuples. Each tuple being client cert,
client private ket
:api: - full url to make a rest call
:servers: a list of servers to make sdk connection/rest connection against
"""
if client_certs is None:
client_certs = list()
client_certs.append(self.x509.get_client_cert(int_ca_name="i1_r1"))
client_certs.append(self.x509.get_client_cert(int_ca_name="iclient1_r1"))
client_certs.append(self.x509.get_client_cert(int_ca_name="iclient1_clientroot"))
if servers is None:
servers = [self.servers[0]]
for client_cert_path_tuple in client_certs:
for server in servers:
if api is None:
api_ep = "https://" + server.ip + ":18091/pools/default/"
else:
api_ep = api
# 1) using client auth
self.x509_validation = Validation(server=server,
cacert=x509main.ALL_CAs_PATH + x509main.ALL_CAs_PEM_NAME,
client_cert_path_tuple=client_cert_path_tuple)
# 1a) rest api
status, content, response = self.x509_validation.urllib_request(api=api_ep)
if not status:
self.fail("Could not login using client cert auth {0}".format(content))
# 1b) sdk
client = self.x509_validation.sdk_connection()
self.x509_validation.creates_sdk(client)
# 2) using basic auth
self.x509_validation = Validation(server=server,
cacert=x509main.ALL_CAs_PATH + x509main.ALL_CAs_PEM_NAME,
client_cert_path_tuple=None)
# 2a) rest api
status, content, response = self.x509_validation.urllib_request(api=api_ep)
if not status:
self.fail("Could not login using basic auth {0}".format(content))
def wait_for_rebalance_to_complete(self, task):
status = task.result()
if not status:
self.fail("rebalance/failover failed")
def wait_for_failover_or_assert(self, expected_failover_count, timeout=180):
time_start = time.time()
time_max_end = time_start + timeout
actual_failover_count = 0
while time.time() < time_max_end:
actual_failover_count = self.get_failover_count()
if actual_failover_count == expected_failover_count:
break
time.sleep(20)
time_end = time.time()
self.assertTrue(actual_failover_count == expected_failover_count,
"{0} nodes failed over, expected : {1}".
format(actual_failover_count, expected_failover_count))
self.log.info("{0} nodes failed over as expected in {1} seconds".
format(actual_failover_count, time_end - time_start))
def get_failover_count(self):
rest = RestConnection(self.master)
cluster_status = rest.cluster_status()
failover_count = 0
# check for inactiveFailed
for node in cluster_status['nodes']:
if node['clusterMembership'] == "inactiveFailed":
failover_count += 1
return failover_count
def load_sample_bucket(self, server, bucket_name="travel-sample"):
shell = RemoteMachineShellConnection(server)
shell.execute_command("""curl -v -u Administrator:password \
-X POST http://localhost:8091/sampleBuckets/install \
-d '["{0}"]'""".format(bucket_name))
shell.disconnect()
self.sleep(60)
def test_basic_rebalance(self):
"""
1. Init node cluster. Generate x509 certs
2. Rebalance-in all the remaining nodes
3. Client cert auth
"""
self.x509.generate_multiple_x509_certs(servers=self.servers)
self.log.info("Manifest #########\n {0}".format(json.dumps(self.x509.manifest, indent=4)))
for server in self.servers:
_ = self.x509.upload_root_certs(server)
self.x509.upload_node_certs(servers=self.servers)
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.servers[0])
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:], [])
self.wait_for_rebalance_to_complete(task)
CbServer.use_https = https_val
self.log.info("Checking authentication ...")
self.auth(servers=self.servers)
content = self.x509.get_trusted_CAs()
self.log.info("Trusted CAs: {0}".format(content))
self.log.info("Active Root CAs names {0}".format(self.x509.root_ca_names))
def test_rebalance_out_and_add_back(self):
"""
1. Init node cluster. Generate x509 certs
2. Upload root certs only to master node
3. Rebalance-out master node
4. Client cert auth
5. Failover any node and recover
6. Rebalance-in back the step3's node
"""
self.x509.generate_multiple_x509_certs(servers=self.servers)
self.log.info("Manifest #########\n {0}".format(json.dumps(self.x509.manifest, indent=4)))
self.x509.upload_root_certs(self.master)
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.servers[0])
self.master = self.servers[:self.nodes_init][1]
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[1:self.nodes_init],
[], [self.servers[0]])
self.wait_for_rebalance_to_complete(task)
CbServer.use_https = https_val
self.log.info("Checking authentication ...")
self.auth(servers=self.servers[1:self.nodes_init])
failover_nodes = random.sample(self.servers[1:self.nodes_init], 1)
_ = self.cluster.async_failover(self.servers[1:self.nodes_init], failover_nodes,
graceful=True)
self.wait_for_failover_or_assert(1)
rest = RestConnection(self.master)
for node in failover_nodes:
rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[1:self.nodes_init], [], [])
CbServer.use_https = https_val
self.wait_for_rebalance_to_complete(task)
self.x509.load_trusted_CAs(server=self.servers[0])
self.x509.reload_node_certificates(servers=[self.servers[0]])
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[1:self.nodes_init], [self.servers[0]], [])
CbServer.use_https = https_val
self.wait_for_rebalance_to_complete(task)
self.auth(servers=self.servers[:self.nodes_init])
def test_failover_and_recovery(self):
"""
1. Init node cluster. Generate x509 certs
2. multiple node failover (graceful and hard) and recover(delta and full)
3. Client cert auth
"""
self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])
self.x509.upload_root_certs(self.master)
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.master)
for graceful in [True, False]:
for recovery_type in ["delta", "full"]:
failover_nodes = random.sample(self.servers[1:self.nodes_init], 2)
failover_count = 0
for node in failover_nodes:
_ = self.cluster.async_failover(self.servers[:self.nodes_init], [node],
graceful=graceful)
failover_count = failover_count + 1
self.wait_for_failover_or_assert(failover_count)
rest = RestConnection(self.master)
for node in failover_nodes:
rest.set_recovery_type("ns_1@" + node.ip, recoveryType=recovery_type)
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
CbServer.use_https = https_val
self.wait_for_rebalance_to_complete(task)
self.auth(servers=self.servers[:self.nodes_init])
def test_failover_and_rebalance_out(self):
"""
1. Init node cluster. Generate x509 certs
2. single node failover (graceful and hard) and rebalance-out
3. Client cert auth
"""
self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])
self.x509.upload_root_certs(self.master)
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.master)
out_nodes = list()
nodes_in_cluster = self.servers[:self.nodes_init]
for graceful in [True, False]:
failover_nodes = random.sample(nodes_in_cluster[1:], 1)
_ = self.cluster.async_failover(nodes_in_cluster, failover_nodes,
graceful=graceful)
self.wait_for_failover_or_assert(1)
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(nodes_in_cluster, [], failover_nodes)
self.wait_for_rebalance_to_complete(task)
CbServer.use_https = https_val
for node in failover_nodes:
out_nodes.append(node)
nodes_in_cluster = [node for node in self.servers[:self.nodes_init] if node not in out_nodes]
self.auth(servers=nodes_in_cluster)
def test_rotate_certificates(self):
"""
1. Init node cluster. Generate x509 certs
2. Rotate all certs
3. Rebalance-in nodes
4. Client cert auth
"""
self.x509.generate_multiple_x509_certs(servers=self.servers)
self.log.info("Manifest before rotating certs #########\n {0}".
format(json.dumps(self.x509.manifest, indent=4)))
for server in self.servers[:self.nodes_init]:
_ = self.x509.upload_root_certs(server)
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.servers[0])
self.log.info("Checking authentication ...")
self.auth(servers=self.servers[:self.nodes_init])
self.x509.rotate_certs(self.servers, "all")
self.log.info("Manifest after rotating certs #########\n {0}".
format(json.dumps(self.x509.manifest, indent=4)))
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.servers[self.nodes_init:], [])
self.wait_for_rebalance_to_complete(task)
CbServer.use_https = https_val
self.log.info("Checking authentication ...")
self.auth(servers=self.servers)
content = self.x509.get_trusted_CAs()
self.log.info("Trusted CAs: {0}".format(content))
self.log.info("Active Root CAs names {0}".format(self.x509.root_ca_names))
def test_cluster_works_fine_after_deleting_CA_folder(self):
"""
1. Init node cluster. Generate x509 certs
2. Upload root certs from any random node of the cluster
3. Delete CA folder from that node
4. Verify that cluster continues to operate fine by checking
a) Failover & delta recovery of that node
b) Failover & rebalance-out of that node
c) Client authentication & sdk writes
"""
self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])
random_nodes = random.sample(self.servers[1:self.nodes_init], 1)
self.log.info("Uploading root certs from {0}".format(random_nodes[0]))
self.x509.upload_root_certs(random_nodes[0])
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.master)
shell = RemoteMachineShellConnection(random_nodes[0])
shell.remove_directory(self.x509.install_path + x509main.CHAINFILEPATH +
"/" + x509main.TRUSTEDCAPATH)
shell.disconnect()
failover_nodes = random_nodes
nodes_in_cluster = self.servers[:self.nodes_init]
for operation in ["recovery", "out"]:
shell = RemoteMachineShellConnection(failover_nodes[0])
shell.stop_server()
self.cluster.async_failover(self.servers[:self.nodes_init],
failover_nodes,
graceful=False)
self.wait_for_failover_or_assert(1)
if operation == "out":
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
rest = RestConnection(self.master)
otp_nodes = []
ejected_nodes = []
for node in nodes_in_cluster:
otp_nodes.append('ns_1@'+node.ip)
for node in failover_nodes:
ejected_nodes.append('ns_1@' + node.ip)
status = rest.rebalance(otpNodes=otp_nodes, ejectedNodes=ejected_nodes)
if not status:
shell.start_server(failover_nodes[0])
self.fail("rebalance/failover failed")
CbServer.use_https = https_val
nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])
shell.start_server(failover_nodes[0])
if operation == "recovery":
rest = RestConnection(self.master)
for node in failover_nodes:
rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
self.wait_for_rebalance_to_complete(task)
CbServer.use_https = https_val
self.auth(servers=nodes_in_cluster)
def test_CA_upload_from_all_nodes(self):
"""
1. Init node cluster
2. Upload CAs: [ca1] from inbox/CA folder of first node
3. Upload CAs: [ca2] from inbox/CA folder of second node
4 Upload CAs: [ca3, ca4] from inbox/CA folder of third node
5.Verify that the net trusted CAs for the cluster is now: [ca1, ca2, ca3, ca4]
"""
self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])
self.x509.upload_root_certs(server=self.master, root_ca_names=[self.x509.root_ca_names[0]])
self.x509.upload_root_certs(server=self.servers[:self.nodes_init][1],
root_ca_names=[self.x509.root_ca_names[1]])
self.x509.upload_root_certs(server=self.servers[:self.nodes_init][2],
root_ca_names=self.x509.root_ca_names[2:])
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
self.x509.upload_client_cert_settings(server=self.master)
self.auth(servers=self.servers[:self.nodes_init])
content = self.x509.get_trusted_CAs()
self.log.info("Trusted CAs: {0}".format(content))
expected_root_ca_names = self.x509.root_ca_names
actual_root_ca_names = list()
for ca_dict in content:
subject = ca_dict["subject"]
root_ca_name = subject.split("CN=")[1]
if "Couchbase Server" not in root_ca_name:
actual_root_ca_names.append(root_ca_name)
if set(actual_root_ca_names) != set(expected_root_ca_names):
self.fail("Expected {0} Actual {1}".format(expected_root_ca_names,
actual_root_ca_names))
def test_restart_node_with_encrypted_pkeys(self):
"""
1. Init node cluster, with encrypted node pkeys
2. Restart a node
3. Failover and delta recover that node
4. Restart the node again and rebalance-out this time
5. Repeat steps 2 to 5 until you are left with master node
"""
self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])
self.x509.upload_root_certs(self.master)
self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
rest = RestConnection(self.master)
nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]
for node in self.servers[1:self.nodes_init]:
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.sleep(10, "Wait after restart")
self.cluster.async_failover(nodes_in_cluster,
[node],
graceful=False)
self.wait_for_failover_or_assert(1)
rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
https_val = CbServer.use_https # so that add_node uses https
CbServer.use_https = True
| |
with value
-999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-1-1.xml",
class_name="NistschemaSvIvAtomicIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_min_exclusive_nistxml_sv_iv_atomic_integer_min_exclusive_1_2(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet minExclusive with value
-999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-1-2.xml",
class_name="NistschemaSvIvAtomicIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_min_exclusive_nistxml_sv_iv_atomic_integer_min_exclusive_1_3(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet minExclusive with value
-999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-1-3.xml",
class_name="NistschemaSvIvAtomicIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_min_exclusive_nistxml_sv_iv_atomic_integer_min_exclusive_1_4(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet minExclusive with value
-999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-1-4.xml",
class_name="NistschemaSvIvAtomicIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_integer_min_exclusive_nistxml_sv_iv_atomic_integer_min_exclusive_1_5(mode, save_output, output_format):
"""
Type atomic/integer is restricted by facet minExclusive with value
-999999999999999999.
"""
assert_bindings(
schema="nistData/atomic/integer/Schema+Instance/NISTSchema-SV-IV-atomic-integer-minExclusive-1.xsd",
instance="nistData/atomic/integer/Schema+Instance/NISTXML-SV-IV-atomic-integer-minExclusive-1-5.xml",
class_name="NistschemaSvIvAtomicIntegerMinExclusive1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_white_space_nistxml_sv_iv_atomic_decimal_white_space_1_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-whiteSpace-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-whiteSpace-1-1.xml",
class_name="NistschemaSvIvAtomicDecimalWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_white_space_nistxml_sv_iv_atomic_decimal_white_space_1_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-whiteSpace-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-whiteSpace-1-2.xml",
class_name="NistschemaSvIvAtomicDecimalWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_white_space_nistxml_sv_iv_atomic_decimal_white_space_1_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-whiteSpace-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-whiteSpace-1-3.xml",
class_name="NistschemaSvIvAtomicDecimalWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_white_space_nistxml_sv_iv_atomic_decimal_white_space_1_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-whiteSpace-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-whiteSpace-1-4.xml",
class_name="NistschemaSvIvAtomicDecimalWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_white_space_nistxml_sv_iv_atomic_decimal_white_space_1_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-whiteSpace-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-whiteSpace-1-5.xml",
class_name="NistschemaSvIvAtomicDecimalWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-1.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-2.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-3.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-4.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_4_nistxml_sv_iv_atomic_decimal_enumeration_5_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-5-5.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_3_nistxml_sv_iv_atomic_decimal_enumeration_4_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-4-1.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_3_nistxml_sv_iv_atomic_decimal_enumeration_4_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-4-2.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_3_nistxml_sv_iv_atomic_decimal_enumeration_4_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-4-3.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_3_nistxml_sv_iv_atomic_decimal_enumeration_4_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-4-4.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_3_nistxml_sv_iv_atomic_decimal_enumeration_4_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-4-5.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_2_nistxml_sv_iv_atomic_decimal_enumeration_3_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-3-1.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_2_nistxml_sv_iv_atomic_decimal_enumeration_3_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-3-2.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_2_nistxml_sv_iv_atomic_decimal_enumeration_3_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-3-3.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_2_nistxml_sv_iv_atomic_decimal_enumeration_3_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-3-4.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_2_nistxml_sv_iv_atomic_decimal_enumeration_3_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-3-5.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_1_nistxml_sv_iv_atomic_decimal_enumeration_2_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-2-1.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_1_nistxml_sv_iv_atomic_decimal_enumeration_2_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-2-2.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_1_nistxml_sv_iv_atomic_decimal_enumeration_2_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-2-3.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_1_nistxml_sv_iv_atomic_decimal_enumeration_2_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-2-4.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_1_nistxml_sv_iv_atomic_decimal_enumeration_2_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-2-5.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_nistxml_sv_iv_atomic_decimal_enumeration_1_1(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-1-1.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_nistxml_sv_iv_atomic_decimal_enumeration_1_2(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-1-2.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_nistxml_sv_iv_atomic_decimal_enumeration_1_3(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-1-3.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_nistxml_sv_iv_atomic_decimal_enumeration_1_4(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-1-4.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_enumeration_nistxml_sv_iv_atomic_decimal_enumeration_1_5(mode, save_output, output_format):
"""
Type atomic/decimal is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-enumeration-1.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-enumeration-1-5.xml",
class_name="NistschemaSvIvAtomicDecimalEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_4_nistxml_sv_iv_atomic_decimal_pattern_5_1(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\d{5}\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-5-1.xml",
class_name="NistschemaSvIvAtomicDecimalPattern5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_4_nistxml_sv_iv_atomic_decimal_pattern_5_2(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\d{5}\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-5-2.xml",
class_name="NistschemaSvIvAtomicDecimalPattern5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_4_nistxml_sv_iv_atomic_decimal_pattern_5_3(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\d{5}\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-5-3.xml",
class_name="NistschemaSvIvAtomicDecimalPattern5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_4_nistxml_sv_iv_atomic_decimal_pattern_5_4(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\d{5}\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-5-4.xml",
class_name="NistschemaSvIvAtomicDecimalPattern5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_4_nistxml_sv_iv_atomic_decimal_pattern_5_5(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\d{5}\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-5.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-5-5.xml",
class_name="NistschemaSvIvAtomicDecimalPattern5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_3_nistxml_sv_iv_atomic_decimal_pattern_4_1(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-4-1.xml",
class_name="NistschemaSvIvAtomicDecimalPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_3_nistxml_sv_iv_atomic_decimal_pattern_4_2(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-4-2.xml",
class_name="NistschemaSvIvAtomicDecimalPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_3_nistxml_sv_iv_atomic_decimal_pattern_4_3(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-4-3.xml",
class_name="NistschemaSvIvAtomicDecimalPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_3_nistxml_sv_iv_atomic_decimal_pattern_4_4(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-4-4.xml",
class_name="NistschemaSvIvAtomicDecimalPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_3_nistxml_sv_iv_atomic_decimal_pattern_4_5(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\.\d{13}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-4.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-4-5.xml",
class_name="NistschemaSvIvAtomicDecimalPattern4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_2_nistxml_sv_iv_atomic_decimal_pattern_3_1(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{1}\.\d{8}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-3-1.xml",
class_name="NistschemaSvIvAtomicDecimalPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_2_nistxml_sv_iv_atomic_decimal_pattern_3_2(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{1}\.\d{8}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-3-2.xml",
class_name="NistschemaSvIvAtomicDecimalPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_2_nistxml_sv_iv_atomic_decimal_pattern_3_3(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{1}\.\d{8}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-3-3.xml",
class_name="NistschemaSvIvAtomicDecimalPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_2_nistxml_sv_iv_atomic_decimal_pattern_3_4(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{1}\.\d{8}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-3-4.xml",
class_name="NistschemaSvIvAtomicDecimalPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_2_nistxml_sv_iv_atomic_decimal_pattern_3_5(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{1}\.\d{8}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-3.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-3-5.xml",
class_name="NistschemaSvIvAtomicDecimalPattern3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_1_nistxml_sv_iv_atomic_decimal_pattern_2_1(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{2}\.\d{3}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-2-1.xml",
class_name="NistschemaSvIvAtomicDecimalPattern2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_1_nistxml_sv_iv_atomic_decimal_pattern_2_2(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{2}\.\d{3}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-2-2.xml",
class_name="NistschemaSvIvAtomicDecimalPattern2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_1_nistxml_sv_iv_atomic_decimal_pattern_2_3(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{2}\.\d{3}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-2-3.xml",
class_name="NistschemaSvIvAtomicDecimalPattern2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_1_nistxml_sv_iv_atomic_decimal_pattern_2_4(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{2}\.\d{3}.
"""
assert_bindings(
schema="nistData/atomic/decimal/Schema+Instance/NISTSchema-SV-IV-atomic-decimal-pattern-2.xsd",
instance="nistData/atomic/decimal/Schema+Instance/NISTXML-SV-IV-atomic-decimal-pattern-2-4.xml",
class_name="NistschemaSvIvAtomicDecimalPattern2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_atomic_decimal_pattern_1_nistxml_sv_iv_atomic_decimal_pattern_2_5(mode, save_output, output_format):
r"""
Type atomic/decimal is restricted by facet pattern with value
\-\d{2}\.\d{3}.
"""
assert_bindings(
| |
' ')
r3c12 = request.POST.get('r3c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c13 = request.POST.get('r3c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c14 = request.POST.get('r3c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c15 = request.POST.get('r3c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c16 = request.POST.get('r3c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c17 = request.POST.get('r3c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c18 = request.POST.get('r3c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c19 = request.POST.get('r3c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c20 = request.POST.get('r3c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c21 = request.POST.get('r3c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c22 = request.POST.get('r3c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c23 = request.POST.get('r3c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c24 = request.POST.get('r3c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c25 = request.POST.get('r3c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c26 = request.POST.get('r3c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c27 = request.POST.get('r3c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c7 = request.POST.get('r4c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c8 = request.POST.get('r4c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c9 = request.POST.get('r4c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c10 = request.POST.get('r4c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c11 = request.POST.get('r4c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c12 = request.POST.get('r4c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c13 = request.POST.get('r4c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c14 = request.POST.get('r4c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c15 = request.POST.get('r4c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c16 = request.POST.get('r4c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c17 = request.POST.get('r4c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c18 = request.POST.get('r4c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c19 = request.POST.get('r4c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c20 = request.POST.get('r4c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c21 = request.POST.get('r4c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c22 = request.POST.get('r4c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c23 = request.POST.get('r4c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c24 = request.POST.get('r4c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c25 = request.POST.get('r4c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c26 = request.POST.get('r4c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c27 = request.POST.get('r4c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c7 = request.POST.get('r5c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c8 = request.POST.get('r5c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c9 = request.POST.get('r5c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c10 = request.POST.get('r5c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c11 = request.POST.get('r5c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c12 = request.POST.get('r5c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c13 = request.POST.get('r5c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c14 = request.POST.get('r5c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c15 = request.POST.get('r5c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c16 = request.POST.get('r5c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c17 = request.POST.get('r5c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c18 = request.POST.get('r5c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c19 = request.POST.get('r5c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c20 = request.POST.get('r5c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c21 = request.POST.get('r5c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c22 = request.POST.get('r5c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c23 = request.POST.get('r5c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c24 = request.POST.get('r5c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c25 = request.POST.get('r5c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c26 = request.POST.get('r5c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c27 = request.POST.get('r5c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c7 = request.POST.get('r6c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c8 = request.POST.get('r6c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c9 = request.POST.get('r6c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c10 = request.POST.get('r6c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c11 = request.POST.get('r6c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c12 = request.POST.get('r6c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c13 = request.POST.get('r6c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c14 = request.POST.get('r6c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c15 = request.POST.get('r6c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c16 = request.POST.get('r6c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c17 = request.POST.get('r6c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c18 = request.POST.get('r6c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c19 = request.POST.get('r6c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c20 = request.POST.get('r6c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c21 = request.POST.get('r6c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c22 = request.POST.get('r6c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c23 = request.POST.get('r6c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c24 = request.POST.get('r6c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c25 = request.POST.get('r6c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c26 = request.POST.get('r6c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c27 = request.POST.get('r6c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c7 = request.POST.get('r7c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c8 = request.POST.get('r7c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c9 = request.POST.get('r7c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c10 = request.POST.get('r7c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c11 = request.POST.get('r7c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c12 = request.POST.get('r7c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c13 = request.POST.get('r7c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c14 = request.POST.get('r7c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c15 = request.POST.get('r7c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c16 = request.POST.get('r7c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c17 = request.POST.get('r7c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c18 = request.POST.get('r7c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c19 = request.POST.get('r7c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c20 = request.POST.get('r7c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c21 = request.POST.get('r7c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c22 = request.POST.get('r7c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c23 = request.POST.get('r7c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c24 = request.POST.get('r7c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c25 = request.POST.get('r7c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c26 = request.POST.get('r7c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c27 = request.POST.get('r7c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c7 = request.POST.get('r8c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c8 = request.POST.get('r8c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c9 = request.POST.get('r8c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c10 = request.POST.get('r8c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c11 = request.POST.get('r8c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c12 = request.POST.get('r8c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c13 = request.POST.get('r8c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c14 = request.POST.get('r8c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c15 = request.POST.get('r8c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c16 = request.POST.get('r8c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c17 = request.POST.get('r8c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c18 = request.POST.get('r8c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c19 = request.POST.get('r8c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c20 = request.POST.get('r8c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c21 = request.POST.get('r8c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c22 = request.POST.get('r8c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c23 = request.POST.get('r8c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c24 = request.POST.get('r8c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c25 = request.POST.get('r8c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c26 = request.POST.get('r8c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c27 = request.POST.get('r8c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c7 = request.POST.get('r9c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c8 = request.POST.get('r9c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c9 = request.POST.get('r9c9').replace('\t', ' ').replace('\n', ' ').replace('\r', | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module is part of the opsi PackageBuilder
see: https://forum.opsi.org/viewforum.php?f=22
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = '<NAME>'
__copyright__ = "Copyright 2013-2015, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
from spur.ssh import MissingHostKey
from time import sleep
from pathlib import PurePath, Path
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtSlot, pyqtSignal
from PyQt5.QtWidgets import QApplication
import oPB
from oPB.core.confighandler import ConfigHandler
from oPB.core.datadefinition import changelog_footer, ControlFileData, ChangelogEntry, ProductDependency, ProductProperty
from oPB.core.processing import OpsiProcessing
from oPB.core.tools import Helper, LogMixin
translate = QtCore.QCoreApplication.translate
class BaseController(LogMixin):
closeAppRequested = pyqtSignal(int)
msgSend = pyqtSignal(str) # emit message text, params: header and text
processingStarted = pyqtSignal()
processingEnded = pyqtSignal(bool)
dataAboutToBeAquired = pyqtSignal(object)
dataAquired = pyqtSignal()
clientlist_dict = None
clientsondepotslist_dict = None
clientgroups_dict = None
groups_dict = None
depotlist_dict = None
productlist_dict = None
lockedproductlist_dict = None
productsondepotslist = None
joblist = []
"""
Base class for handling application requests. It contains necessary methods and attributes
used for GUI and non-GUI (console) operations.
"""
def __init__(self, args):
super().__init__()
print("controller/BaseController parent: None -> self: ", self) if oPB.PRINTHIER else None
self.args = None
self.ui = None
self.controlData = None
self._dataSaved = None # True = success, False = failure, None = unset
self._dataLoaded = None # True = success, False = failure, None = unset
self._actionDesc = "" # action description for status bar
self.args = args
self.controlData = ControlFileData()
self.controlData.dataLoaded.connect(self.check_backend_data_loaded)
self.controlData.dataSaved.connect(self.check_backend_data_saved)
@pyqtSlot(bool)
def check_backend_data_saved(self, retval):
"""Set marker to see if backend data has been saved successfully"""
self.logger.debug("Retrieved backend data saved status: " + str(retval))
self._dataSaved = retval
@pyqtSlot(bool)
def check_backend_data_loaded(self, retval):
"""Set marker to see if backend data has been saved successfully"""
self.logger.debug("Retrieved backend data loaded status: " + str(retval))
self._dataLoaded = retval
def save_backend(self):
"""Save backend data"""
# do we have to add a forced build comment?
if ConfigHandler.cfg.chlog_on_save == "True":
comment = ""
if not self.args.nogui:
while comment == "":
(comment, accept) = self.msgbox(translate("baseController","Please enter a short comment:"), oPB.MsgEnum.MS_QUEST_PHRASE)
else:
comment = "Auto-save project from command line"
self.add_changelog_entry(" * SAVE COMMENT: " + comment)
self._dataSaved = None
self.controlData.save_data()
while self._dataSaved is None: # _dataSaved has to be True or False
pass
if not self._dataSaved:
self.logger.error("Backend data could not be saved")
self.msgbox(translate("baseController", "Project could not be saved successfully!"), oPB.MsgEnum.MS_ERR)
oPB.EXITCODE = oPB.RET_BSAVE
else:
self.logger.info("Data saved successfully")
self.msgbox(translate("mainController", "Project saved successfully!"), oPB.MsgEnum.MS_INFO)
def reset_backend(self):
"""Reset backend data to initial values"""
self._dataLoaded = None
self._dataSaved = None
self.controlData.init_data()
def create_backend(self, project_folder):
path = PurePath(project_folder)
try:
self.create_project_paths(project_folder)
project_name = path.name.replace(" ","_")
self.controlData.init_data(project_name)
self.controlData.projectfolder = project_folder
self.add_changelog_entry("Project created with opsi Package Builder " + oPB.PROGRAM_VERSION)
self.save_backend()
except Exception:
self.logger.error("Initial backend data could not be created/loaded.")
self.msgbox(translate("baseController", "Project could not be created!"), oPB.MsgEnum.MS_ERR)
self.reset_backend()
raise
def add_changelog_entry(self, text = ""):
if text == "":
text = "Please add a short description."
newentry = ChangelogEntry(self.controlData.id)
newentry.version = "(" + self.controlData.productversion + "-" + self.controlData.packageversion + ")"
newentry.status = oPB.CHLOG_STATI[0]
newentry.urgency = oPB.CHLOG_BLOCKMARKER + oPB.CHLOG_URGENCIES[0]
newentry.text = "\n" + text + changelog_footer()
self.controlData.changelog_append(newentry)
def create_project_paths(self, base):
for elem in oPB.BASE_FOLDERS:
try:
path = Path(Helper.concat_path_native(base, elem))
if not path.exists():
path.mkdir(parents=True)
except OSError:
raise
def load_backend(self, project_name):
"""Load project data."""
# itemChanged signal has to be disconnected temporarily, because
# if not, dataChanged would be set after loading
self.logger.info("Load project: " + project_name)
self._dataLoaded = None
self.controlData.load_data(Helper.concat_path_native(ConfigHandler.cfg.dev_dir, project_name))
while self._dataLoaded is None: # _dataLoaded has to be True or False
pass
if not self._dataLoaded:
self.logger.error("Backend data could not be loaded.")
self.logger.debug("Set exitcode RET_EOPEN")
oPB.EXITCODE = oPB.RET_EOPEN
self.msgbox(translate("baseController", "Project could not be loaded!"), oPB.MsgEnum.MS_ERR)
else:
self.logger.info("Backend data loaded")
self.msgbox(translate("baseController", "Project loaded successfully!"), oPB.MsgEnum.MS_STAT)
def _do(self, jobtype, msg, **kwargs):
self.logger.debug("Dispatch job")
"""Call OpsiProcessing engine"""
# check, if depot dialog canceled -> depot==None
if "depot" in kwargs:
if kwargs.get("depot") is None:
self.logger.info("Processing canceled via depot selection")
self.msgbox(translate("baseController", "Processing canceled!"), oPB.MsgEnum.MS_ALWAYS)
return []
if self.args.quiet:
proc = OpsiProcessing(self.controlData, MissingHostKey.accept)
else:
proc = OpsiProcessing(self.controlData, MissingHostKey.warn)
proc.progressChanged.connect(self.msgSend, type=QtCore.Qt.DirectConnection)
# run build job
self.processingStarted.emit()
self.msgSend.emit(msg)
result = proc.run(jobtype, **kwargs)
oPB.EXITCODE = result[0]
self.logger.debug("Exitcode after job processing: " + str(oPB.EXITCODE))
# remove "..." at end
msg = msg.rstrip("...")
if result[0] == oPB.RET_OK:
self.msgbox(msg + ": " + translate("baseController", "Action completed successfully!"), oPB.MsgEnum.MS_INFO)
self.processingEnded.emit(True)
else:
self.msgbox(msg + ": " + result[2], result[1])
self.processingEnded.emit(False)
proc.progressChanged.disconnect(self.msgSend)
return result[3]
@pyqtSlot()
def do_build(self, dest = ""):
if (self.controlData.packagename in os.listdir(self.controlData.projectfolder)):
if self.args.build_mode is None:
self.logger.debug("Package exits. Ask for further step (cancel, rebuild, add version)")
reply = self.msgbox(translate("BaseController", "This package version already exists! You have three possibilities:@@Rebuild@TAB@TAB= rebuild (overwrite) the existing one@Add version@TAB= auto-correct package version and build new one@Cancel@TAB= cancel build process"),
oPB.MsgEnum.MS_QUEST_CTC)
else:
if self.args.build_mode.upper() == "CANCEL":
reply = 0
if self.args.build_mode.upper() == "REBUILD":
reply = 1
if self.args.build_mode.upper() == "ADD":
reply = 2
# cancel
if reply == 0:
self.logger.debug("Process choice: cancel")
oPB.EXITCODE = oPB.RET_BCANCEL
return
# rebuild
if reply == 1:
self.logger.debug("Process choice: rebuild")
try:
self.logger.debug("Deleting existing package: " + self.controlData.local_package_path)
os.unlink(self.controlData.local_package_path)
except Exception:
self.logger.error("Could not delete old package!")
self.msgbox(translate("baseController", "Could not delete old package!"), oPB.MsgEnum.MS_ERR)
oPB.EXITCODE = oPB.RET_BFILEDEL
return
# add version
if reply == 2:
self.logger.debug("Process choice: add version")
self.controlData.inc_packageversion()
self.save_backend()
while self._dataSaved is None: # _dataSaved has to be True or False
pass
if not self._dataSaved:
self.logger.error("Backend data could not be saved")
self._dataSaved = None
oPB.EXITCODE = oPB.RET_BSAVE
return
# do we have to add a forced build comment?
if ConfigHandler.cfg.chlog_on_build == "True":
comment = ""
if self.args.build_mode is None:
while comment == "":
(comment, accept) = self.msgbox(translate("baseController","Please enter a short build comment:"), oPB.MsgEnum.MS_QUEST_PHRASE)
else:
comment = "Automatic entry: command line building initiated"
self.add_changelog_entry(" * BUILD COMMENT: " + comment)
self.save_backend()
while self._dataSaved is None: # _dataSaved has to be True or False
pass
if not self._dataSaved:
self.logger.error("Backend data could not be saved")
self._dataSaved = None
oPB.EXITCODE = oPB.RET_BSAVE
return
# reset any manipulation
# if we don't do this, normale save operation can not be
# recognized from outside
self._dataSaved = None
self._do(oPB.OpEnum.DO_BUILD, translate("baseController", "Build running..."), alt_destination = dest)
@pyqtSlot()
def do_install(self, dest = "", depot = ""):
self._do(oPB.OpEnum.DO_INSTALL, translate("baseController", "Installation running..."), alt_destination = dest, depot = depot)
@pyqtSlot()
def do_quickinstall(self, dest = "", pack = "", depot = ""):
self._do(oPB.OpEnum.DO_QUICKINST, translate("baseController", "Installation running..."), alt_destination = dest, packagefile = pack, depot = depot)
@pyqtSlot()
def do_quickuninstall(self, dest = "", packs = [], depot = ""):
self._do(oPB.OpEnum.DO_QUICKUNINST, translate("baseController", "Deinstallation running..."), alt_destination = dest, productlist = packs, depot = depot)
@pyqtSlot()
def do_getlockedproducts(self, dest = "", packs = [], depot = ""):
self._do(oPB.OpEnum.DO_GETLOCKEDPRODUCTS, translate("baseController", "Get locked products running..."), alt_destination = dest, productlist = packs,
depot = depot)
@pyqtSlot()
def do_upload(self, packagefile, dest = "", depot = ""):
self._do(oPB.OpEnum.DO_UPLOAD, translate("baseController", "Upload in progress..."), alt_destination = dest, packagefile = packagefile, depot = depot)
@pyqtSlot()
def do_installsetup(self, dest = "", depot = ""):
self._do(oPB.OpEnum.DO_INSTSETUP, translate("baseController", "Installation + setup running..."), alt_destination = dest, depot = depot)
@pyqtSlot()
def do_uninstall(self, dest = "", depot = ""):
self._do(oPB.OpEnum.DO_UNINSTALL, translate("baseController", "Deinstallation running..."), alt_destination = dest, depot = depot)
@pyqtSlot()
def do_setrights(self, dest = "", package = ""):
if package == "":
package = self.controlData.path_on_server
self._do(oPB.OpEnum.DO_SETRIGHTS,
translate("baseController", "Setting package rights on:") + " " + self.controlData.path_on_server, alt_destination = dest, package=package)
@pyqtSlot()
def do_setrights_on_repo(self, dest = ""):
self._do(oPB.OpEnum.DO_SETRIGHTS_REPO, translate("baseController", "Setting package rights on:") + " " + oPB.REPO_PATH, | |
<reponame>gadio/moma-django
#==========================================================================
# Copyright 2012 Lucidel, Inc., 2013 Cloudoscope Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==========================================================================
from django.db.models import Q
from django.test import TestCase
from models import Profile, AnalyticsAccount, TestModel1, TestModel2, TestModel3, UniqueVisit
from datetime import datetime
from django.utils import timezone
class TestMongoQuerySetWithUniqueVisits(TestCase):
def setUp(self):
self.user = Profile()
self.user.username = 'testuser'
self.user.company_name = 'Test Company'
self.user.set_password('<PASSWORD>')
self.user.save()
self.acc = AnalyticsAccount(user = self.user)
self.acc.pk = 10000
self.acc.name = 'Test'
self.acc.analytics_id = 'asd324sf456m6n76b5b'
self.acc.history_fetched = False
self.acc.save()
self.other_acc = AnalyticsAccount(user = self.user)
self.other_acc.pk = 10001
self.other_acc.name = 'Test1'
self.other_acc.analytics_id = 'asd324sf456m111111'
self.other_acc.history_fetched = False
self.other_acc.save()
self.list_of_accounts = [self.acc, self.other_acc]
records = []
class NumberLong():
def __init__(self, num):
self.n = num
records.append({ "goal_starts" : { }, "time_on_site" : 17, "user_id" : 650825334, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "Tennessee",
"ct" : "Clarksville" }, "demographics" :
{ "age" : "GenX (25-44)", "education" : "High School" }, "first_visit_date" :
ISODate("2012-08-04T21:18:17Z"), "referral_path" : "(not set)", "source" : "google", "exit_page_path" :
"/some-analysis/g1iar-daisy/", "landing_page_path" : "(not set)", "keyword" :
"g1iar daisy sparknotes", "date" : ISODate("2012-08-04T00:00:00Z"), "goal_completions" : { },
"visit_count" : 1, "visit_id" : "0---30----0---------------650825334.1344115097",
"page_views" : 3, "goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 36, "user_id" : 277227593, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "Tennessee",
"ct" : "Sevierville" }, "demographics" :
{ "gender" : "Men", "education" : "High School" }, "first_visit_date" :
ISODate("2012-08-06T14:46:23Z"), "referral_path" : "(not set)", "source" : "google", "exit_page_path" :
"/some-analysis/looking-at-my-ancestors/", "landing_page_path" : "(not set)", "keyword" :
"(not provided)", "date" : ISODate("2012-08-06T00:00:00Z"), "goal_completions" : { }, "visit_count" : 1,
"visit_id" : "0-2--0-1---1---------------277227593.1344264383", "page_views" : 1,
"goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 27, "user_id" : 1429730596, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "Virginia",
"ct" : "Burke" }, "demographics" :
{ "gender" : "Men", "age" : "Baby Boomers (45-70)", "education" : "High School" },
"first_visit_date" : ISODate("2012-08-06T04:28:22Z"), "referral_path" : "(not set)", "source" :
"google", "exit_page_path" : "/some-analysis/the_double-blind/", "landing_page_path" : "(not set)",
"keyword" : "(not provided)", "date" : ISODate("2012-08-06T00:00:00Z"), "goal_completions" :
{ }, "visit_count" : 1, "visit_id" : "0-2-90-1--0---------------1429730596.1344227302",
"page_views" : 3, "goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 27, "user_id" : 2110905334, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "New York",
"ct" : "Poughkeepsie" }, "demographics" :
{ "gender" : "Men", "education" : "High School", "political" : "Democrats" }, "first_visit_date" :
ISODate("2012-08-06T18:21:11Z"), "referral_path" : "(not set)", "source" : "yahoo", "exit_page_path" :
"/some-analysis/joe-and-bobby/", "landing_page_path" : "(not set)", "keyword" :
"book summaries", "date" : ISODate("2012-08-06T00:00:00Z"), "goal_completions" : { },
"visit_count" : 1, "visit_id" : "0-0--0-1---------------2110905334.1344277271",
"page_views" : 1, "goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 120, "user_id" : 952676001, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "Massachusetts",
"ct" : "Abington" }, "demographics" :
{ "age" : "GenX (25-44)", "education" : "High School" },
"first_visit_date" : ISODate("2012-08-06T05:53:52Z"), "referral_path" : "(not set)",
"source" : "google", "exit_page_path" : "/some-analysis/the-blue-bush/",
"landing_page_path" : "(not set)", "keyword" : "(not provided)",
"date" : ISODate("2012-08-06T00:00:00Z"), "goal_completions" : { }, "visit_count" : 1,
"visit_id" : "0---30---------------952676001.1344232432", "page_views" : 3,
"goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 10, "user_id" : 805172613, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "New York",
"ct" : "Peekskill" }, "demographics" :
{ "age" : "GenX (25-44)", "education" : "High School" }, "first_visit_date" :
ISODate("2012-08-06T16:27:55Z"), "referral_path" : "(not set)", "source" : "bing", "exit_page_path" :
"/wikipedia/rama-ii/", "landing_page_path" : "(not set)", "keyword" : "research rama ii", "date" :
ISODate("2012-08-06T00:00:00Z"), "goal_completions" : { }, "visit_count" : 2, "visit_id" :
"0---30---------------805172613.1344270475", "page_views" : 2, "goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 147, "user_id" : 2060101123, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "South Carolina", "ct" :
"Charleston" }, "demographics" : { "age" : "GenX (25-44)", "education" : "High School" },
"first_visit_date" : ISODate("2012-08-06T02:04:05Z"), "referral_path" : "(not set)", "source" :
"google", "exit_page_path" : "/search", "landing_page_path" : "(not set)", "keyword" :
"a short reference", "date" : ISODate("2012-08-06T00:00:00Z"), "goal_completions" : { },
"visit_count" : 1, "visit_id" : "0-2-30---------------2060101123.1344218645",
"page_views" : 3, "goal_values" : { } })
records.append({ "goal_starts" : { }, "time_on_site" : 25, "user_id" : 1525231829, "account_id" : NumberLong(5),
"campaign" : "(not set)", "location" : { "cr" : "United States", "rg" : "New York", "ct" :
"Massapequa" }, "demographics" : { "age" : "GenX (25-44)", "education" : "High School" },
"first_visit_date" : ISODate("2012-08-05T14:57:17Z"), "referral_path" : "(not set)", "source" :
"google", "exit_page_path" : "/wikipedia/indian-reservations/", "landing_page_path" : "(not set)", "keyword" :
"indian reservations in north america", "date" : ISODate("2012-08-08T00:00:00Z"), "goal_completions" : { },
"visit_count" : 2, "visit_id" : "0---30---------------1525231829.1344178637",
"page_views" : 2, "goal_values" : { } })
records.append({ "_id" : ObjectId("502abdabf7f16836f1002847"), "goal_starts" : { }, "time_on_site" : 32,
"user_id" : 1322541271, "account_id" : NumberLong(5), "campaign" : "(not set)", "location" :
{ "cr" : "United States", "rg" : "Ohio", "ct" : "Amherst" }, "demographics" : { },
"first_visit_date" : ISODate("2012-07-31T12:33:33Z"), "referral_path" : "(not set)", "source" :
"google", "exit_page_path" : "/some-analysis/the-language-barrier/", "landing_page_path" :
"(not set)", "keyword" : "the language barrier", "date" : ISODate("2012-07-31T00:00:00Z"),
"goal_completions" : { }, "visit_count" : 1, "visit_id" :
"false---------------1322541271.1343738013", "page_views" : 2, "goal_values" : { } })
records.append({ "_id" : ObjectId("502abdabf7f16836f1002848"), "goal_starts" : { }, "time_on_site" : 26,
"user_id" : 1044048896, "account_id" : NumberLong(5), "campaign" : "(not set)", "location" :
{ "cr" : "United States", "rg" : "New York", "ct" : "New York" }, "demographics" : { },
"first_visit_date" : ISODate("2012-07-31T16:42:23Z"), "referral_path" : "(not set)", "source" :
"google", "exit_page_path" : "/some-analysis/mercy/", "landing_page_path" : 12, "keyword" :
"mercy discussion", "date" : ISODate("2012-07-31T00:00:00Z"), "goal_completions" : { }, "visit_count" :
1, "visit_id" : "false---------------1044048896.1343752943", "page_views" : 1, "goal_values" :
{ } })
records.append({ "_id" : ObjectId("502abdabf7f16836f100284b"), "goal_starts" : { }, "time_on_site" : 38,
"user_id" : 184868780, "account_id" : NumberLong(5), "campaign" : "(not set)", "location" :
{ "cr" : "Ireland", "rg" : "Dublin", "ct" : "Dublin" }, "demographics" : { }, "first_visit_date" :
ISODate("2012-07-30T14:47:47Z"), "referral_path" : "(not set)", "source" : "google", "exit_page_path" :
"/some-analysis/jewelery-cutting/", "landing_page_path" : "(not set)", "keyword" :
"practical jewel setting and cutting", "date" : ISODate("2012-07-30T00:00:00Z"), "goal_completions" :
{ }, "visit_count" : 1, "visit_id" : "false---------------184868780.1343659667",
"page_views" : 3, "goal_values" : { } })
records.append({ "_id" : ObjectId("502abdabf7f16836f100284c"), "goal_starts" : { }, "time_on_site" : 196,
"user_id" : 1805411236, "account_id" : NumberLong(5), "campaign" : "(not set)", "location" :
{ "cr" : "India", "rg" : "Kerala", "ct" : "Cochin" }, "demographics" : { }, "first_visit_date" :
ISODate("2012-07-30T12:29:05Z"), "referral_path" : "/analysis-nama-ecological-expert/", "source" :
"articlemyriad.com", "exit_page_path" : "/some-analysis/the-financial-expert/", "landing_page_path" :
"/analysis-nama-ecological-expert/", "keyword" : "(not set)", "date" : ISODate("2012-07-30T00:00:00Z"),
"goal_completions" : { }, "visit_count" : 1, "visit_id" :
"false---------------1805411236.1343651345", "page_views" : 11, "goal_values" : { } })
records.append({ "_id" : ObjectId("502abdabf7f16836f100284d"), "goal_starts" : { }, "time_on_site" : 11,
"user_id" : 169596409, "account_id" : NumberLong(5), "campaign" : "(not set)", "location" :
{ "cr" : "United States", "rg" : "Pennsylvania", "ct" : "Willow Grove" }, "demographics" :
{ "education" : "College" }, "first_visit_date" : ISODate("2012-07-31T12:16:26Z"), "referral_path" :
"(not set)", "source" : "yahoo", "exit_page_path" : "/some-analysis/the-maincoon-cat/", "landing_page_path" :
"(not set)", "keyword" : "the maincoon cat", "date" : ISODate("2012-07-31T00:00:00Z"),
"goal_completions" : { }, "visit_count" : 1, | |
<reponame>eriknw/pygraphblas
import sys
from operator import mod
from itertools import product, repeat
import re
import pytest
from pygraphblas import *
from pygraphblas.base import ffi, lib, _check
def test_iseq():
l = Matrix.sparse(INT8, 10, 10)
l[0, 0] = 1
m = Matrix.sparse(INT8, 10, 10)
m[0, 0] = 1
n = Matrix.sparse(INT64, 10, 10)
n[0, 0] = 1
o = Matrix.sparse(INT64, 10, 9)
o[0, 0] = 1
p = Matrix.sparse(INT64, 10, 9)
p[0, 1] = 1
assert l.iseq(m)
assert m.isne(n)
assert n.isne(o)
assert o.isne(p)
def test_matrix_init_without_type():
mx = Matrix.sparse(INT8)
# get a raw Matrix pointer and wrap it without knowing its type
new_mx = ffi.new("GrB_Matrix*")
_check(lib.GrB_Matrix_dup(new_mx, mx._matrix[0]))
mx2 = Matrix(new_mx)
assert mx.type == mx2.type
def test_matrix_create():
m = Matrix.sparse(INT8)
assert m.nrows == lib.GxB_INDEX_MAX
assert m.ncols == lib.GxB_INDEX_MAX
assert m.nvals == 0
m = Matrix.sparse(INT8, 10, 10)
assert m.nrows == 10
assert m.ncols == 10
assert m.nvals == 0
assert len(m) == 0
m = Matrix.dense(INT8, 1, 1)
assert m.nrows == 1
assert m.ncols == 1
assert m.nvals == 1
with pytest.raises(AssertionError):
m = Matrix.dense(INT8, 0, 0)
m = Matrix.from_lists([0], [0], [0j])
assert m.type == FC64
assert m.nrows == 1
assert m.ncols == 1
assert m.nvals == 1
m = Matrix.dense(INT8, 1, 1, sparsity=lib.GxB_FULL + lib.GxB_BITMAP)
def test_matrix_get_set_element():
m = Matrix.sparse(INT8, 10, 10)
m[3, 3] = 3
assert m.nrows == 10
assert m.ncols == 10
assert m.nvals == 1
assert len(m) == 1
assert m[3, 3] == 3
with pytest.raises(TypeError):
m[2.0] = 3
with pytest.raises(TypeError):
_ = m[2.0]
def test_matrix_slice_vector():
v = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
assert v[5] == Vector.from_lists([5], [5], 10)
def test_clear():
v = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
assert v.nvals == 10
assert len(v) == 10
v.clear()
assert v.nvals == 0
assert len(v) == 0
def test_resize():
v = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
assert v.nrows == 10
assert v.ncols == 10
assert v.nvals == 10
v.resize(20, 20)
assert v.nrows == 20
assert v.ncols == 20
assert v.nvals == 10
assert list(v.rows) == list(range(10))
assert list(v.cols) == list(range(10))
assert list(v.vals) == list(range(10))
def test_matrix_create_dup():
m = Matrix.sparse(INT8, 10, 10)
m[3, 3] = 3
n = Matrix.dup(m)
assert n.nrows == 10
assert n.ncols == 10
assert n.nvals == 1
assert n[3, 3] == 3
def test_matrix_to_from_lists():
v = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
assert v.nrows == 10
assert v.ncols == 10
assert v.nvals == 10
assert v.to_lists() == [
list(range(10)),
list(range(10)),
list(range(10)),
]
def test_matrix_gb_type():
v = Matrix.sparse(BOOL, 10)
assert v.gb_type == lib.GrB_BOOL
v = Matrix.sparse(INT8, 10)
assert v.gb_type == lib.GrB_INT8
v = Matrix.sparse(FP64, 10)
assert v.gb_type == lib.GrB_FP64
def test_matrix_eadd():
I = list(range(10))
v = Matrix.from_lists(I, I, I)
v[0, 1] = 1
w = Matrix.from_lists(I, I, I)
w[1, 0] = 1
addition_ref = Matrix.from_lists(I, I, list(range(0, 20, 2)))
addition_ref[0, 1] = 1
addition_ref[1, 0] = 1
sum1 = v.eadd(w)
assert sum1.iseq(addition_ref)
sum1 = v + w
assert sum1.iseq(addition_ref)
sum1 = v.eadd(w, v.type.SECOND)
sum2 = v | w
assert sum1.iseq(sum2)
sum3 = v.dup()
sum3 |= w
assert sum3.iseq(sum2)
prod_ref = Matrix.from_lists(I, I, [0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
prod_ref[0, 1] = 1
prod_ref[1, 0] = 1
prod5 = v.eadd(w, "*")
assert prod5.iseq(prod_ref)
assert prod5.isne(sum1)
def test_sub():
I = list(range(10))
v = Matrix.from_lists(I, I, I)
v[0, 1] = 1
w = Matrix.from_lists(I, I, I)
w[1, 0] = 1
# subtraction (explicit zeros, if same numbers are subtracted)
subtraction_ref = Matrix.from_lists(I, I, [0] * 10)
# 1 - empty = 1
subtraction_ref[0, 1] = 1
# empty - 1 = -1 (assuming implicit zero for elements not present)
subtraction_ref[1, 0] = 1
diff1 = v - w
assert diff1.iseq(subtraction_ref)
diff2 = v.dup()
diff2 -= w
assert diff2.iseq(subtraction_ref)
def test_matrix_emult():
I = list(range(10))
V = list(range(1, 10 + 1))
v = Matrix.from_lists(I, I, V)
w = Matrix.from_lists(I, I, V)
mult1 = v.emult(w)
assert mult1.iseq(Matrix.from_lists(I, I, [v * v for v in V]))
mult1 = v.emult(w, v.type.SECOND)
mult2 = v & w
assert mult2.iseq(mult1)
mult3 = v.dup()
mult3 &= w
assert mult3.iseq(mult2)
# division
division_ref = Matrix.from_lists(I, I, [1] * 10)
div1 = v / w
assert div1.iseq(division_ref)
div2 = v.dup()
div2 /= w
assert div2.iseq(division_ref)
def test_matrix_reduce_bool():
v = Matrix.sparse(BOOL, 10, 10)
assert not v.reduce_bool()
v[3, 3] = True
v[4, 4] = False
assert v.reduce_bool() == True
with BOOL.LAND_MONOID:
assert v.reduce_bool() == False
def test_matrix_reduce_int():
v = Matrix.sparse(INT8, 10, 10)
r = v.reduce_int()
assert type(r) is int
assert r == 0
v[3, 3] = 3
v[4, 4] = 4
assert v.reduce_int() == 7
with INT8.TIMES_MONOID:
assert v.reduce_int() == 12
def test_matrix_reduce_float():
v = Matrix.sparse(FP64, 10, 10)
r = v.reduce_float()
assert type(r) is float
assert r == 0.0
v[3, 3] = 3.3
v[4, 4] = 4.4
assert v.reduce_float() == 7.7
with FP64.TIMES_MONOID:
assert v.reduce_float() == 14.52
assert v.reduce_float(FP64.TIMES_MONOID) == 14.52
def test_matrix_reduce_vector():
m = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
v = m.reduce_vector()
v == Vector.from_list(list(range(10)))
def test_mxm():
m = Matrix.from_lists([0, 1, 2], [1, 2, 0], [1, 2, 3])
n = Matrix.from_lists([0, 1, 2], [1, 2, 0], [2, 3, 4])
o = m.mxm(n)
assert o.nrows == 3
assert o.ncols == 3
assert o.nvals == 3
r = Matrix.from_lists([0, 1, 2], [2, 0, 1], [3, 8, 6])
assert o.iseq(r)
assert r.iseq(m @ n)
m @= n
assert r.iseq(m)
o = m.mxm(n, semiring=BOOL.LOR_LAND)
assert o.iseq(Matrix.from_lists([0, 1, 2], [0, 1, 2], [True, True, True]))
def test_mxm_context():
m = Matrix.from_lists([0, 1, 2], [1, 2, 0], [1, 2, 3])
n = Matrix.from_lists([0, 1, 2], [1, 2, 0], [2, 3, 4])
with INT64.PLUS_PLUS:
o = m @ n
assert o.iseq(Matrix.from_lists([0, 1, 2], [2, 0, 1], [4, 6, 5]))
with BOOL.LOR_LAND:
o = m @ n
assert o.iseq(Matrix.from_lists([0, 1, 2], [2, 0, 1], [True, True, True]))
with descriptor.T0:
o = m @ n
assert o.iseq(m.mxm(n, desc=descriptor.T0))
with BOOL.LOR_LAND:
o = m @ n
assert o.iseq(Matrix.from_lists([0, 1, 2], [2, 0, 1], [True, True, True]))
with pytest.raises(TypeError):
m @ 3
with pytest.raises(TypeError):
m @ Scalar.from_value(3)
def test_mxv():
m = Matrix.from_lists([0, 1, 2, 3], [1, 2, 0, 1], [1, 2, 3, 4])
v = Vector.from_lists([0, 1, 2], [2, 3, 4])
o = m.mxv(v)
assert o.iseq(Vector.from_lists([0, 1, 2, 3], [3, 8, 6, 12]))
assert o.iseq(m @ v)
assert o.iseq(m.transpose().mxv(v, desc=descriptor.T0))
with INT64.PLUS_PLUS:
o = m.mxv(v)
assert o.iseq(Vector.from_lists([0, 1, 2, 3], [4, 6, 5, 7]))
assert o.iseq(m @ v)
def test_matrix_pattern():
v = Matrix.from_lists(list(range(10)), list(range(10)), list(range(10)))
p = v.pattern()
assert p.gb_type == lib.GrB_BOOL
assert p.nrows == 10
assert p.ncols == 10
assert p.nvals == 10
def test_matrix_transpose():
v = Matrix.from_lists(
list(range(2, -1, -1)), list(range(3)), list(range(3)), nrows=3, ncols=4
)
w = v.transpose()
assert w.iseq(Matrix.from_lists([0, 1, 2], [2, 1, 0], [0, 1, 2], nrows=4, ncols=3))
v2 = v.transpose(desc=descriptor.T0)
assert v2.iseq(v)
@pytest.mark.skip
def test_matrix_mm_read_write(tmp_path):
mmf = tmp_path / "mmwrite_test.mm"
mmf.touch()
m = Matrix.from_lists([0, 1, 2], [0, 1, 2], [2, 3, 4])
with mmf.open("w") as f:
m.to_mm(f)
with mmf.open() as f:
assert f.readlines() == [
"%%MatrixMarket matrix coordinate integer symmetric\n",
"%%GraphBLAS GrB_INT64\n",
"3 3 3\n",
"1 1 2\n",
"2 2 3\n",
"3 3 4\n",
]
n = Matrix.from_mm(mmf)
assert n.iseq(m)
def test_matrix_tsv_read(tmp_path):
mmf = tmp_path / "tsv_test.mm"
mmf.touch()
with mmf.open("w") as f:
f.writelines(["3\t3\t3\n", "1\t1\t2\n", "2\t2\t3\n", "3\t3\t4\n"])
n = Matrix.from_tsv(mmf, INT8, 3, 3)
assert n.to_lists() == [[0, 1, 2], [0, 1, 2], [2, 3, 4]]
def test_matrix_random():
m = Matrix.random(INT8, 5, 10, 10, seed=42)
assert m.nrows == 10
assert m.ncols == 10
assert len(list(m)) == 5
m = Matrix.random(INT8, 5, 10, 10)
assert m.nrows == 10
assert m.ncols == 10
assert 0 < len(list(m)) <= 5
m = Matrix.random(INT8, 5)
assert m.nrows == lib.GxB_INDEX_MAX
assert m.ncols == lib.GxB_INDEX_MAX
assert m.nvals == 5
def test_matrix_slicing():
I, J = tuple(map(list, zip(*product(range(3), repeat=2))))
V = list(range(9))
m = Matrix.from_lists(I, J, V, 3, 3)
# slice out row vector
v = m[2]
assert v == Vector.from_lists([0, 1, 2], [6, 7, 8])
# slice out row vector from rectangular matrix
v = m[:, 0:1][2]
assert v == Vector.from_lists([0, 1], [6, 7])
# slice out row vector
v = m[2, :]
assert v == Vector.from_lists([0, 1, 2], [6, 7, 8])
# slice out column vector
v = m[:, 2]
assert v == Vector.from_lists([0, 1, 2], [2, 5, 8])
# | |
<gh_stars>0
# organization/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import full_domain_string_available, merge_these_two_organizations,\
move_organization_followers_to_another_organization, move_organization_membership_link_to_another_organization, \
organizations_import_from_master_server, \
push_organization_data_to_other_table_caches, subdomain_string_available
from .models import GROUP, INDIVIDUAL, Organization, OrganizationReservedDomain, ORGANIZATION_UNIQUE_IDENTIFIERS
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateCampaignListManager, CandidateCampaignManager
from config.base import get_environment_variable
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from donate.models import MasterFeaturePackage
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_deleted_exception, handle_record_not_found_exception
from election.controllers import retrieve_upcoming_election_id_list
from election.models import Election, ElectionManager
from import_export_twitter.controllers import refresh_twitter_organization_details
from import_export_vote_smart.models import VoteSmartSpecialInterestGroupManager
from issue.models import ALPHABETICAL_ASCENDING, IssueListManager, IssueManager, \
OrganizationLinkToIssueList, OrganizationLinkToIssueManager, MOST_LINKED_ORGANIZATIONS
from measure.models import ContestMeasure, ContestMeasureListManager, ContestMeasureManager
from office.models import ContestOfficeManager
import operator
from organization.models import OrganizationListManager, OrganizationManager, ORGANIZATION_TYPE_MAP, UNKNOWN
from organization.controllers import figure_out_organization_conflict_values, \
organization_retrieve_tweets_from_twitter, organization_analyze_tweets
from position.models import PositionEntered, PositionForFriends, PositionListManager, PositionManager, \
INFORMATION_ONLY, OPPOSE, STILL_DECIDING, SUPPORT
from twitter.models import TwitterUserManager
from voter.models import retrieve_voter_authority, voter_has_authority, VoterManager
from voter_guide.models import VoterGuideManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists, \
STATE_CODE_MAP
from django.http import HttpResponse
import json
ORGANIZATION_STANCE_CHOICES = (
(SUPPORT, 'We Support'),
(OPPOSE, 'We Oppose'),
(INFORMATION_ONLY, 'Information Only - No stance'),
(STILL_DECIDING, 'We Are Still Deciding Our Stance'),
)
ORGANIZATIONS_SYNC_URL = get_environment_variable("ORGANIZATIONS_SYNC_URL") # organizationsSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def compare_two_organizations_for_merge_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization1_we_vote_id = request.GET.get('organization1_we_vote_id', 0)
organization2_we_vote_id = request.GET.get('organization2_we_vote_id', 0)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization1_we_vote_id)
if not organization_results['organization_found']:
messages.add_message(request, messages.ERROR, "Organization1 not found.")
return HttpResponseRedirect(reverse('organization:organization_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
organization_option1_for_template = organization_results['organization']
organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization2_we_vote_id)
if not organization_results['organization_found']:
messages.add_message(request, messages.ERROR, "Organization2 not found.")
return HttpResponseRedirect(reverse('organization:organization_position_list',
args=(organization_option1_for_template.id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
organization_option2_for_template = organization_results['organization']
organization_merge_conflict_values = figure_out_organization_conflict_values(
organization_option1_for_template, organization_option2_for_template)
# This view function takes us to displaying a template
remove_duplicate_process = False # Do not try to find another office to merge after finishing
return render_organization_merge_form(
request, organization_option1_for_template,
organization_option2_for_template,
organization_merge_conflict_values,
remove_duplicate_process)
@login_required
def organization_analyze_tweets_view(request, organization_we_vote_id):
"""
:param request:
:param organization_we_vote_id:
:return:
"""
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', False)
org_hashtags = organization_analyze_tweets(organization_we_vote_id)
messages.add_message(request, messages.INFO, 'Tweets stored locally: {cached_tweets}, '
'Hash tags retrieved: {hash_tags_retrieved}, '
'Number of unique hashtags found in cached tweets: '
'{unique_hashtags}, '
'Organization links to hashtags: '
'{organization_link_to_hashtag_results}'
''.format(cached_tweets=org_hashtags['cached_tweets'],
hash_tags_retrieved=org_hashtags['hash_tags_retrieved'],
unique_hashtags=org_hashtags['unique_hashtags'],
organization_link_to_hashtag_results=
org_hashtags['organization_link_to_hashtag_results']))
return HttpResponseRedirect(reverse('organization:organization_we_vote_id_position_list',
args=(organization_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) + "&state_code=" +
str(state_code))
@login_required
def organization_retrieve_tweets_view(request, organization_we_vote_id):
"""
For one organization, retrieve X Tweets, and capture all #Hashtags used.
:param request:
:param organization_we_vote_id:
:return:
"""
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', False)
org_tweets_results = organization_retrieve_tweets_from_twitter(organization_we_vote_id)
messages.add_message(request, messages.INFO, 'Organization retrieve tweets executed, '
'Tweets retrieved: {tweets_saved}, '
'Tweets not retrieved: {tweets_not_saved}, '
''.format(tweets_saved=org_tweets_results['tweets_saved'],
tweets_not_saved=org_tweets_results['tweets_not_saved'],))
return HttpResponseRedirect(reverse('organization:organization_we_vote_id_position_list',
args=(organization_we_vote_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) + "&state_code=" +
str(state_code))
# This page does not need to be protected.
def organizations_sync_out_view(request): # organizationsSyncOut
state_served_code = request.GET.get('state_served_code', '')
try:
organization_queryset = Organization.objects.using('readonly').all()
organization_queryset = organization_queryset.exclude(organization_type__iexact=INDIVIDUAL)
if positive_value_exists(state_served_code):
organization_queryset = organization_queryset.filter(state_served_code__iexact=state_served_code)
organization_list_dict = organization_queryset.values(
'we_vote_id', 'organization_name', 'organization_type',
'organization_description', 'state_served_code',
'organization_website', 'organization_email',
'organization_image', 'organization_twitter_handle',
'twitter_user_id', 'twitter_followers_count',
'twitter_description', 'twitter_location', 'twitter_name',
'twitter_profile_image_url_https',
'twitter_profile_background_image_url_https',
'twitter_profile_banner_url_https', 'organization_facebook',
'vote_smart_id', 'organization_contact_name',
'organization_address', 'organization_city',
'organization_state', 'organization_zip',
'organization_phone1', 'organization_phone2',
'organization_fax', 'wikipedia_page_title',
'wikipedia_page_id', 'wikipedia_photo_url',
'wikipedia_thumbnail_url', 'wikipedia_thumbnail_width',
'wikipedia_thumbnail_height', 'ballotpedia_page_title',
'ballotpedia_photo_url', 'we_vote_hosted_profile_image_url_large',
'we_vote_hosted_profile_image_url_medium', 'we_vote_hosted_profile_image_url_tiny'
)
if organization_list_dict:
organization_list_json = list(organization_list_dict)
return HttpResponse(json.dumps(organization_list_json), content_type='application/json')
except Exception as e:
pass
json_data = {
'success': False,
'status': 'ORGANIZATION_LIST_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def organizations_import_from_master_server_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in ORGANIZATIONS_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = organizations_import_from_master_server(request, state_code)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Organizations import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def organization_list_view(request):
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = \
{'partner_organization', 'political_data_manager', 'political_data_viewer', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')
google_civic_election_id = request.GET.get('google_civic_election_id', '')
organization_search = request.GET.get('organization_search', '')
organization_type_filter = request.GET.get('organization_type_filter', '')
selected_issue_vote_id_list = request.GET.getlist('selected_issues', '')
sort_by = request.GET.get('sort_by', '')
state_code = request.GET.get('state_code', '')
show_all = request.GET.get('show_all', False)
show_more = request.GET.get('show_more', False) # Show up to 1,000 organizations
show_issues = request.GET.get('show_issues', '')
messages_on_stage = get_messages(request)
organization_list_query = Organization.objects.all()
if positive_value_exists(sort_by):
if sort_by == "twitter":
organization_list_query = \
organization_list_query.order_by('organization_name').order_by('-twitter_followers_count')
else:
organization_list_query = organization_list_query.order_by('organization_name')
else:
organization_list_query = organization_list_query.order_by('organization_name')
if positive_value_exists(state_code):
organization_list_query = organization_list_query.filter(state_served_code__iexact=state_code)
if positive_value_exists(organization_type_filter):
if organization_type_filter == UNKNOWN:
# Make sure to also show organizations that are not specified
organization_list_query = organization_list_query.filter(
Q(organization_type__iexact=organization_type_filter) |
Q(organization_type__isnull=True) |
Q(organization_type__exact='')
)
else:
organization_list_query = organization_list_query.filter(organization_type__iexact=organization_type_filter)
else:
# By default, don't show individuals
organization_list_query = organization_list_query.exclude(organization_type__iexact=INDIVIDUAL)
link_issue_list_manager = OrganizationLinkToIssueList()
issue_list_manager = IssueListManager()
# Only show organizations linked to specific issues
# 2017-12-12 DALE I'm not sure this is being used yet...
issues_selected = False
issue_list = []
if positive_value_exists(selected_issue_vote_id_list):
issues_selected = True
new_issue_list = []
issue_list_manager = IssueListManager()
issue_list_results = issue_list_manager.retrieve_issues()
if issue_list_results["issue_list_found"]:
issue_list = issue_list_results["issue_list"]
for issue in issue_list:
if issue.we_vote_id in selected_issue_vote_id_list:
issue.selected = True
new_issue_list.append(issue)
issue_list = new_issue_list
organization_we_vote_id_list_result = link_issue_list_manager.\
retrieve_organization_we_vote_id_list_from_issue_we_vote_id_list(selected_issue_vote_id_list)
organization_we_vote_id_list = organization_we_vote_id_list_result['organization_we_vote_id_list']
# we decided to not deal with case-insensitivity, in favor of using '__in'
organization_list_query = organization_list_query.filter(we_vote_id__in=organization_we_vote_id_list)
if positive_value_exists(organization_search):
search_words = organization_search.split()
for one_word in search_words:
filters = []
new_filter = Q(organization_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(organization_twitter_handle__icontains=one_word)
filters.append(new_filter)
new_filter = Q(organization_website__icontains=one_word)
filters.append(new_filter)
new_filter = Q(twitter_description__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(vote_smart_id__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
organization_list_query = organization_list_query.filter(final_filters)
else:
# This is the default organization list
filters = []
new_filter = Q(organization_name="")
filters.append(new_filter)
new_filter = Q(organization_name__startswith="Voter-")
filters.append(new_filter)
new_filter = Q(organization_name__startswith="wv")
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
# NOTE this is "exclude"
organization_list_query = organization_list_query.exclude(final_filters)
organization_count = organization_list_query.count()
messages.add_message(request, messages.INFO,
'{organization_count:,} organizations found.'.format(organization_count=organization_count))
# Limit to only showing 200 on screen
if positive_value_exists(show_more):
organization_list = organization_list_query[:1000]
elif positive_value_exists(show_all):
organization_list = organization_list_query
else:
organization_list = organization_list_query[:200]
# Now loop through these organizations and add on the linked_issues_count
modified_organization_list = []
special_interest_group_manager = VoteSmartSpecialInterestGroupManager()
for one_organization in organization_list:
# Turned off for now
# one_organization.linked_issues_count = \
# link_issue_list_manager.fetch_issue_count_for_organization(0, one_organization.we_vote_id)
if positive_value_exists(show_issues):
# We want to look up the issues retrieved from Vote Smart and display them
# if positive_value_exists(one_organization.linked_issues_count):
show_hidden_issues = True
one_organization.display_we_vote_issues = \
issue_list_manager.fetch_organization_issues_for_display(
one_organization.we_vote_id, MOST_LINKED_ORGANIZATIONS, show_hidden_issues)
if positive_value_exists(one_organization.vote_smart_id):
one_organization.display_vote_smart_issues = \
special_interest_group_manager.fetch_vote_smart_organization_issues_for_display(
one_organization.vote_smart_id)
modified_organization_list.append(one_organization)
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
organization_types_map = ORGANIZATION_TYPE_MAP
# Sort by organization_type value (instead of key)
organization_types_list = sorted(organization_types_map.items(), key=operator.itemgetter(1))
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_we_vote_id': candidate_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'issue_list': issue_list,
'issues_selected': issues_selected,
'organization_type_filter': organization_type_filter,
'organization_types': organization_types_list,
'organization_list': modified_organization_list,
'organization_search': organization_search,
'show_all': show_all,
'show_issues': show_issues,
'show_more': show_more,
'sort_by': sort_by,
'state_code': state_code,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_list.html', template_values)
@login_required
def organization_merge_process_view(request):
"""
Process the merging of two organizations using the Admin tool
:param request:
:return:
"""
# admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_manager = OrganizationManager()
merge = request.POST.get('merge', False)
skip = request.POST.get('skip', False)
# Candidate 1 is the one we keep, and Candidate 2 is the one we will merge into Candidate 1
organization1_we_vote_id = request.POST.get('organization1_we_vote_id', 0)
organization2_we_vote_id = request.POST.get('organization2_we_vote_id', 0)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
redirect_to_organization_list = request.POST.get('redirect_to_organization_list', False)
remove_duplicate_process = request.POST.get('remove_duplicate_process', False)
state_code = request.POST.get('state_code', '')
status = ''
if positive_value_exists(skip):
messages.add_message(request, messages.ERROR, 'Skip is not implemented for organizations yet.')
# results = organization_manager.update_or_create_organizations_are_not_duplicates(
# organization1_we_vote_id, organization2_we_vote_id)
# if not results['new_organizations_are_not_duplicates_created']:
# messages.add_message(request, messages.ERROR, 'Could not save organizations_are_not_duplicates entry: ' +
# results['status'])
# messages.add_message(request, messages.INFO, 'Prior organizations skipped, and not merged.')
# return HttpResponseRedirect(reverse('organization:find_and_merge_duplicate_organizations', args=()) +
# "?google_civic_election_id=" + str(google_civic_election_id) +
# "&state_code=" + str(state_code))
return HttpResponseRedirect(reverse('organization:compare_two_organizations_for_merge', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code) +
"&organization1_we_vote_id=" + str(organization1_we_vote_id) +
"&organization2_we_vote_id=" + | |
"""
Make html galleries from media directories. Organize by dates, by subdirs or by
the content of a diary file. The diary file is a markdown file organized by
dates, each day described by a text and some medias (photos and movies).
The diary file can be exported to:
* an html file with the text and subset of medias associated with each day,
* the previous html file extended with all medias in the media directory,
* an html file ready to import into Blogger.
"""
import sys
import os
import argparse
import glob
import shutil
import re
import io
import bisect
import locale
import textwrap
import base64
import datetime
import urllib
from configparser import ConfigParser
from collections import defaultdict
from subprocess import check_output, CalledProcessError, STDOUT
from urllib.request import urlopen
import colorama
import clipboard
import PIL
from PIL import Image, ImageChops
from lxml import objectify
import markdown
USAGE = """
galerie --gallery <root-dir> [--sourcedir <media-dir>]
[--bydir true|false*]
[--bydate true|false*]
[--diary true|false*]
[--recursive true|false*]
[--dates source*|diary|<yyyymmdd-yyyymmdd>]
[--github_pages true|false]
[--dest <directory>]
[--forcethumb]
galerie --update <root-dir>
galerie --create <root-dir> --sourcedir <media-dir>
[--recursive true|false*]
[--dates source*|<yyyymmdd-yyyymmdd>]
galerie --blogger <root-dir> --url <url>
[--check]
[--full]
[--dest <filename>]
Notes:
- * gives default
- all options can be abbreviated if there is no conflict with other options (--gallery --> --gal)
"""
# -- Post objects -------------------------------------------------------------
CAPTION_IMAGE_STYLE = '''\
<style type="text/css">
span { display:inline-table; }
</style>\
'''
STYLE = '''\
<style type="text/css">
p { margin-top:0px; margin-bottom:0px; }
h3 { font-size: 100%%; font-weight: bold; margin-top:0px; margin-bottom:0px; }
</style>
'''
START = f'''\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s</title>
<link rel="icon" href="favicon.ico" />
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="photobox/photobox.css">
<script src="photobox/jquery.min.js"></script>
<script src="photobox/jquery.photobox.js"></script>
{CAPTION_IMAGE_STYLE}
{STYLE}
</head>
<body>\
'''
BUTTONS = '''\
<button id="btn_full" type="button" style="position: fixed; width: 50px; top: 20px; right: 20px; background-color:white">Full</button>
<button id="btn_blog" type="button" style="position: fixed; width: 50px; top: 40px; right: 20px; background-color:white">Diary</button>
<button id="btn_text" type="button" style="position: fixed; width: 50px; top: 60px; right: 20px; background-color:white">Text</button>
<script>
$('#btn_full').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").show();
$("div.extra").show();
});
$('#btn_text').click(function() {
$("[id^=gallery-blog]").hide();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
$('#btn_blog').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
</script>
'''
SUBDIR_BACKCOL = '#eee'
END = '</body>\n</html>'
SEP = '<hr color="#C0C0C0" size="1" />'
IMGPOST = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDPOST = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
IMGPOSTCAPTION = '''\
<span>
<a href="%s"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
VIDPOSTCAPTION = '''\
<span>
<a href="%s" rel="video"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
IMGDCIM = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDDCIM = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
# diminution de l'espace entre images, on utilise :
# "display: block;", "margin-bottom: 0em;" et "font-size: 0;"
# "display: block;" dans img : espacement correct ordi mais pas centré téléphone
# "display: block;" dans a : ok
DIRPOST = '<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>'
DIRPOSTCAPTION = f'''
<span style="background-color:{SUBDIR_BACKCOL}; margin-bottom: 8px; border: 1px solid #C0C0C0;">
<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>
<p style="margin-left:2px;">%s</p>
</span>
'''
BIMGPAT = '''\
<div class="separator" style="clear: both; text-align: center;">
<a href="%s" style="clear: left; margin-bottom: 0em; margin-right: 1em; font-size: 0; display: block;">
<img border="0" src="%s" width="640" />
</a></div>
'''
CAPTION_PAT = '''\
<div class="separator" style="clear: both; text-align: center;">
%s
</div>
'''
class Post:
def __init__(self, date, text, medias):
# date: yyyymmdd
self.date = date
self.text = text
self.medias = medias
self.dcim = []
self.daterank = 0
self.extra = False
def __lt__(self, other):
return self.date < other.date
@classmethod
def from_markdown(cls, post):
m = re.match(r'\[(\d\d\d\d/\d\d/\d\d)\]\n*', post[0])
if m:
date = m.group(1).replace('/', '')
if not validate_date(date):
error('Incorrect date value:', date)
del post[0]
else:
error('No date in post', ' '.join(post))
while post and not post[0].strip():
del post[0]
text = ''
while post and not re.match(r'!?\[\]', post[0]):
text += post[0]
del post[0]
# remove empty lines at end
text = re.sub(r'\n\n$', '\n', text)
medias = list()
while post and (match := re.match(r'!?\[\]\((.*)\)', post[0])):
media = match.group(1)
caption = None
del post[0]
if post and not re.match(r'!?\[\]', post[0]):
caption = post[0].strip()
del post[0]
if match.group(0)[0] == '!':
medias.append(PostImage(caption, media))
else:
medias.append(PostVideo(caption, media))
return cls(date, text, medias)
@classmethod
def from_date(cls, date):
dt = datetime.datetime.strptime(date, '%Y%m%d')
datetext = dt.strftime("%A %d %B %Y").capitalize()
post = cls(date, text=datetext, medias=[])
post.daterank = 1
return post
def to_html(self, args, target='regular'):
if target == 'regular':
if args.diary:
return self.to_html_diary(args)
else:
return self.to_html_regular(args)
if target == 'blogger':
return self.to_html_blogger()
def to_html_regular(self, args):
html = list()
if self.text:
# possible with --bydate
html.append(markdown.markdown(self.text))
subdirs, dcim = dispatch_post_items(self.dcim)
if self.dcim:
html.append(SEP)
for media in subdirs:
html.append(media.to_html_dcim(args))
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
return html
def to_html_diary(self, args):
html = list()
if self.extra:
html.append('<div class="extra">')
if self.text:
html.append(markdown.markdown(self.text))
if self.medias:
html.append(f'<div id="gallery-blog-{self.date}-{self.daterank}">')
for media in self.medias:
html.append(media.to_html_post(args))
html.append('</div>')
_, dcim = dispatch_post_items(self.dcim)
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
html.append(SEP)
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
if self.extra:
html.append('</div>')
return html
def to_html_blogger(self):
html = list()
html.append(markdown.markdown(self.text))
for image in self.medias:
html.append(image.to_html_blogger())
html.append(SEP)
return html
class PostItem:
def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):
self.caption = caption
self.uri = uri
self.basename = os.path.basename(uri)
self.thumb = thumb
self.thumbsize = thumbsize
self.descr = descr
self.resized_url = None
class PostImage(PostItem):
def to_markdown(self):
if not self.caption:
return '' % (self.uri,)
else:
return '\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
if not self.caption:
return BIMGPAT % (self.uri, self.resized_url)
else:
return f'{BIMGPAT}\n{CAPTION_PAT}' % (self.uri, self.resized_url, self.caption)
class PostVideo(PostItem):
def to_markdown(self):
if not self.caption:
return '[](%s)' % (self.uri,)
else:
return '[](%s)\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
x = f'<p style="text-align: center;">{self.iframe}</p>'
if not self.caption:
return x
else:
return f'%s\n{CAPTION_PAT}' % (x, self.caption)
class PostSubdir(PostItem):
def to_html_dcim(self, args):
basename = os.path.basename(self.htmname)
posts = self.posts
title = self.caption
print_html(args, posts, title, self.htmname)
if not self.caption:
return DIRPOST % (basename, self.thumb, *self.thumbsize)
else:
return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize, self.caption)
def relative_url(path, root):
"""
returns a normalized url to path relative from root
"""
try:
url = os.path.relpath(path, root)
except:
error('Unable to make a relative url:', url, root)
url = url.replace('\\', '/') if os.sep == '\\' else url
return urllib.parse.quote(url)
# -- Markdown parser ----------------------------------------------------------
def parse_markdown(filename):
"""
Generate Post objects from markdown. Date must be present in each post and
posts must be ordrered by date.
"""
if not os.path.exists(filename):
error('File not found', filename)
posts = list()
with open(filename, encoding='utf-8') as f:
line = next(f)
if line.startswith('# '):
title = line[2:].strip()
record = []
next(f)
else:
title = None
record = [line]
for line in f:
if not line.startswith('___'):
record.append(line)
else:
posts.append(Post.from_markdown(record))
record = []
# set rank of posts in date
daterank = defaultdict(int)
for post in posts:
daterank[post.date] += 1
post.daterank = daterank[post.date]
# check post order
for post1, post2 in zip(posts[:-1], posts[1:]):
if post1.date > post2.date:
error('Posts are not ordered', f'{post1.date} > {post2.date}')
return title, posts
# -- Markdown printer ---------------------------------------------------------
def print_markdown(posts, title, fullname):
with open(fullname, 'wt', encoding='utf-8') as fdst:
print(f'# {title}\n', file=fdst)
for post in posts:
date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'
print(date, file=fdst)
if post.text:
print(file=fdst)
for line in post.text.splitlines():
if not line:
print(file=fdst)
else:
for chunk in textwrap.wrap(line, width=78):
print(chunk, file=fdst)
if post.medias:
print(file=fdst)
for media in post.medias:
print(media.to_markdown(), file=fdst)
print('______', file=fdst)
# -- html printer -------------------------------------------------------------
def compose_html_reduced(args, posts, title, target):
html = list()
html.append(START % title)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append(END)
return html
def compose_html_full(args, posts, title, target):
html = list()
html.append(START % title)
if args.diary:
html.append(BUTTONS)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append('<script>')
for post in posts:
if post.medias:
gallery_id = f'gallery-blog-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
if post.dcim:
gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
html.append('</script>')
html.append(END)
return html
def print_html_to_stream(args, posts, title, stream, target):
if target == 'regular':
for line in compose_html_full(args, posts, title, target):
print(line, file=stream)
else:
for line in compose_html_reduced(args, posts, title, target):
print(line, file=stream)
def print_html(args, posts, title, html_name, target='regular'):
assert target in ('regular', 'blogger')
with io.StringIO() as f:
print_html_to_stream(args, posts, title, f, target)
html = f.getvalue()
if html_name:
if os.path.exists(html_name):
# test if the generated html is identical to the one already on disk
| |
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
plt.plot(t_range, self.wbc_feet_acc_target[:, i % 3, np.int(i/3)], color='r', linewidth=3, marker='')
plt.legend([lgd_Y[i % 3] + " " + lgd_X[np.int(i/3)]+" Ref"], prop={'size': 8})
plt.suptitle("Reference feet accelerations (base frame)")
# LOG_Q
lgd = ["Position X", "Position Y", "Position Z", "Position Roll", "Position Pitch", "Position Yaw"]
plt.figure()
for i in range(6):
if i == 0:
ax0 = plt.subplot(3, 2, index6[i])
else:
plt.subplot(3, 2, index6[i], sharex=ax0)
if i in [0, 1]:
plt.plot(t_range, self.loop_pos_virtual_world[:, i], "b", linewidth=3)
plt.plot(t_range, self.loop_pos_virtual_world[:, i], "r", linewidth=3)
elif i == 5:
plt.plot(t_range, self.loop_pos_virtual_world[:, 2], "b", linewidth=3)
plt.plot(t_range, self.loop_pos_virtual_world[:, 2], "r", linewidth=3)
else:
plt.plot(t_range, self.planner_xref[:, i, 0], "b", linewidth=2)
plt.plot(t_range, self.planner_xref[:, i, 1], "r", linewidth=3)
if i < 3:
plt.plot(t_range, loggerSensors.mocapPosition[:, i], "k", linewidth=3)
else:
plt.plot(t_range, self.mocap_RPY[:, i-3], "k", linewidth=3)
# plt.plot(t_range, self.log_q[i, :], "grey", linewidth=4)
# plt.plot(t_range[:-2], self.log_x_invkin[i, :-2], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_x_ref_invkin[i, :-2], "violet", linewidth=2, linestyle="--")
plt.legend(["Robot state", "Robot reference state", "Ground truth"], prop={'size': 8})
plt.ylabel(lgd[i])
plt.suptitle("Measured & Reference position and orientation")
# LOG_V
lgd = ["Linear vel X", "Linear vel Y", "Linear vel Z",
"Angular vel Roll", "Angular vel Pitch", "Angular vel Yaw"]
plt.figure()
for i in range(6):
if i == 0:
ax0 = plt.subplot(3, 2, index6[i])
else:
plt.subplot(3, 2, index6[i], sharex=ax0)
plt.plot(t_range, self.loop_h_v[:, i], "b", linewidth=2)
plt.plot(t_range, self.joy_v_ref[:, i], "r", linewidth=3)
if i < 3:
plt.plot(t_range, self.mocap_b_v[:, i], "k", linewidth=3)
# plt.plot(t_range, self.esti_FK_lin_vel[:, i], "violet", linewidth=3, linestyle="--")
plt.plot(t_range, self.esti_filt_lin_vel[:, i], "violet", linewidth=3, linestyle="--")
else:
plt.plot(t_range, self.mocap_b_w[:, i-3], "k", linewidth=3)
"""N = 2000
y = np.convolve(self.mocap_b_w[:, i-3], np.ones(N)/N, mode='valid')
plt.plot(t_range[int(N/2)-1:-int(N/2)], y, linewidth=3, linestyle="--")"""
# plt.plot(t_range, self.log_dq[i, :], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_dx_invkin[i, :-2], "g", linewidth=2)
# plt.plot(t_range[:-2], self.log_dx_ref_invkin[i, :-2], "violet", linewidth=2, linestyle="--")
plt.legend(["Robot state", "Robot reference state", "Ground truth"], prop={'size': 8})
plt.ylabel(lgd[i])
plt.suptitle("Measured & Reference linear and angular velocities")
"""plt.figure()
plt.plot(t_range[:-2], self.log_x[6, :-2], "b", linewidth=2)
plt.plot(t_range[:-2], self.log_x_cmd[6, :-2], "r", linewidth=2)
plt.plot(t_range[:-2], self.log_dx_invkin[0, :-2], "g", linewidth=2)
plt.plot(t_range[:-2], self.log_dx_ref_invkin[0, :-2], "violet", linewidth=2)
plt.legend(["WBC integrated output state", "Robot reference state",
"Task current state", "Task reference state"])"""
# Analysis of the footstep locations (current and future) with a slider to move along time
# self.slider_predicted_footholds()
# Analysis of the footholds locations during the whole experiment
"""import utils_mpc
import pinocchio as pin
f_c = ["r", "b", "forestgreen", "rebeccapurple"]
quat = np.zeros((4, 1))
steps = np.zeros((12, 1))
o_step = np.zeros((3, 1))
plt.figure()
plt.plot(self.loop_o_q_int[:, 0], self.loop_o_q_int[:, 1], linewidth=2, color="k")
for i in range(self.planner_fsteps.shape[0]):
fsteps = self.planner_fsteps[i]
RPY = utils_mpc.quaternionToRPY(self.loop_o_q_int[i, 3:7])
quat[:, 0] = utils_mpc.EulerToQuaternion([0.0, 0.0, RPY[2]])
oRh = pin.Quaternion(quat).toRotationMatrix()
for j in range(4):
#if np.any(fsteps[k, (j*3):((j+1)*3)]) and not np.array_equal(steps[(j*3):((j+1)*3), 0],
# fsteps[k, (j*3):((j+1)*3)]):
# steps[(j*3):((j+1)*3), 0] = fsteps[k, (j*3):((j+1)*3)]
# o_step[:, 0:1] = oRh @ steps[(j*3):((j+1)*3), 0:1] + self.loop_o_q_int[i:(i+1), 0:3].transpose()
o_step[:, 0:1] = oRh @ fsteps[0:1, (j*3):((j+1)*3)].transpose() + self.loop_o_q_int[i:(i+1), 0:3].transpose()
plt.plot(o_step[0, 0], o_step[1, 0], linestyle=None, linewidth=1, marker="o", color=f_c[j])
"""
lgd1 = ["HAA", "HFE", "Knee"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
tau_fb = self.wbc_P[:, i] * (self.wbc_q_des[:, i] - self.esti_q_filt[:, 7+i]) + \
self.wbc_D[:, i] * (self.wbc_v_des[:, i] - self.esti_v_filt[:, 6+i])
h1, = plt.plot(t_range, self.wbc_tau_ff[:, i], "r", linewidth=3)
h2, = plt.plot(t_range, tau_fb, "b", linewidth=3)
h3, = plt.plot(t_range, self.wbc_tau_ff[:, i] + tau_fb, "g", linewidth=3)
h4, = plt.plot(t_range[:-1], loggerSensors.torquesFromCurrentMeasurment[1:, i],
"violet", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [Nm]")
tmp = lgd1[i % 3]+" "+lgd2[int(i/3)]
plt.legend([h1, h2, h3, h4], ["FF "+tmp, "FB "+tmp, "PD+ "+tmp, "Meas "+tmp], prop={'size': 8})
plt.ylim([-8.0, 8.0])
plt.suptitle("FF torques & FB torques & Sent torques & Meas torques")
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+i, 0], "r", linewidth=3)
h2, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [N]")
plt.legend([h1, h2], ["MPC " + lgd1[i % 3]+" "+lgd2[int(i/3)],
"WBC " + lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
if (i % 3) == 2:
plt.ylim([-0.0, 26.0])
else:
plt.ylim([-26.0, 26.0])
plt.suptitle("Contact forces (MPC command) & WBC QP output")
lgd1 = ["HAA", "HFE", "Knee"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.wbc_q_des[:, i], color='r', linewidth=3)
h2, = plt.plot(t_range, self.esti_q_filt[:, 7+i], color='b', linewidth=3)
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [rad]")
plt.legend([h1, h2], ["Ref "+lgd1[i % 3]+" "+lgd2[int(i/3)],
lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
plt.suptitle("Desired actuator positions & Measured actuator positions")
# Evolution of predicted trajectory along time
log_t_pred = np.array([k*self.dt*10 for k in range(self.mpc_x_f.shape[2])])
log_t_ref = np.array([k*self.dt*10 for k in range(self.planner_xref.shape[2])])
"""from IPython import embed
embed()"""
titles = ["X", "Y", "Z", "Roll", "Pitch", "Yaw"]
step = 1000
plt.figure()
for j in range(6):
plt.subplot(3, 2, index6[j])
c = [[i/(self.mpc_x_f.shape[0]+5), 0.0, i/(self.mpc_x_f.shape[0]+5)]
for i in range(0, self.mpc_x_f.shape[0], step)]
for i in range(0, self.mpc_x_f.shape[0], step):
h1, = plt.plot(log_t_pred+(i+10)*self.dt,
self.mpc_x_f[i, j, :], "b", linewidth=2, color=c[int(i/step)])
h2, = plt.plot(log_t_ref+i*self.dt,
self.planner_xref[i, j, :], linestyle="--", marker='x', color="g", linewidth=2)
#h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
# self.planner_xref[:, j, 0], linestyle=None, marker='x', color="r", linewidth=1)
plt.xlabel("Time [s]")
plt.legend([h1, h2, h3], ["Output trajectory of MPC",
"Input trajectory of planner"]) #, "Actual robot trajectory"])
plt.title("Predicted trajectory for " + titles[j])
plt.suptitle("Analysis of trajectories in position and orientation computed by the MPC")
plt.figure()
for j in range(6):
plt.subplot(3, 2, index6[j])
c = [[i/(self.mpc_x_f.shape[0]+5), 0.0, i/(self.mpc_x_f.shape[0]+5)]
for i in range(0, self.mpc_x_f.shape[0], step)]
for i in range(0, self.mpc_x_f.shape[0], step):
h1, = plt.plot(log_t_pred+(i+10)*self.dt,
self.mpc_x_f[i, j+6, :], "b", linewidth=2, color=c[int(i/step)])
h2, = plt.plot(log_t_ref+i*self.dt,
self.planner_xref[i, j+6, :], linestyle="--", marker='x', color="g", linewidth=2)
h3, = plt.plot(np.array([k*self.dt for k in range(self.mpc_x_f.shape[0])]),
self.planner_xref[:, j+6, 0], linestyle=None, marker='x', color="r", linewidth=1)
plt.xlabel("Time [s]")
plt.legend([h1, h2, h3], ["Output trajectory of MPC",
"Input trajectory of planner", "Actual robot trajectory"])
plt.title("Predicted trajectory for velocity in " + titles[j])
plt.suptitle("Analysis of trajectories of linear and angular velocities computed by the MPC")
step = 1000
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, index12[i])
else:
plt.subplot(3, 4, index12[i], sharex=ax0)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+i, 0], "r", linewidth=3)
h2, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd1[i % 3]+" "+lgd2[int(i/3)]+" [N]")
plt.legend([h1, h2], ["MPC " + lgd1[i % 3]+" "+lgd2[int(i/3)],
"WBC " + lgd1[i % 3]+" "+lgd2[int(i/3)]], prop={'size': 8})
if (i % 3) == 2:
plt.ylim([-0.0, 26.0])
else:
plt.ylim([-26.0, 26.0])
plt.suptitle("Contact forces (MPC command) & WBC QP output")
lgd1 = ["Ctct force X", "Ctct force Y", "Ctct force Z"]
lgd2 = ["FL", "FR", "HL", "HR"]
plt.figure()
for i in range(4):
if i == 0:
ax0 = plt.subplot(1, 4, i+1)
else:
plt.subplot(1, 4, i+1, sharex=ax0)
for k in range(0, self.mpc_x_f.shape[0], step):
h2, = plt.plot(log_t_pred+k*self.dt, self.mpc_x_f[k, 12+(3*i+2), :], linestyle="--", marker='x', linewidth=2)
h1, = plt.plot(t_range, self.mpc_x_f[:, 12+(3*i+2), 0], "r", linewidth=3)
# h3, = plt.plot(t_range, self.wbc_f_ctc[:, i], "b", linewidth=3, linestyle="--")
plt.plot(t_range, self.esti_feet_status[:, i], "k", linestyle="--")
plt.xlabel("Time [s]")
plt.ylabel(lgd2[i]+" [N]")
plt.legend([h1, h2], ["MPC "+lgd2[i],
"MPC "+lgd2[i]+" trajectory"])
plt.ylim([-1.0, 26.0])
plt.suptitle("Contact forces trajectories & Actual forces trajectories")
# Analysis of the complementary filter behaviour
clr = ["b", "darkred", "forestgreen"]
# Velocity complementary filter
lgd_Y = ["dx", "ddx", "alpha dx", "dx_out", "dy", "ddy", "alpha dy", "dy_out", "dz", "ddz", "alpha dz", "dz_out"]
plt.figure()
for i in range(12):
if i == 0:
ax0 = plt.subplot(3, 4, i+1)
else:
plt.subplot(3, 4, i+1, sharex=ax0)
if i % 4 == 0:
plt.plot(t_range, self.esti_HP_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # x input of the velocity complementary filter
elif i % 4 == 1:
plt.plot(t_range, self.esti_HP_dx[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # dx input of the velocity complementary filter
elif i % 4 == 2:
plt.plot(t_range, self.esti_HP_alpha[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # alpha parameter of the velocity complementary filter
else:
plt.plot(t_range, self.esti_HP_filt_x[:, int(i/4)], color=clr[int(i/4)], linewidth=3, marker='') # | |
import flask
from flask import g
from flask import render_template
from flask import request
from flask import url_for
import uuid
import hashlib
import sys
import json
import logging
from datetime import datetime
# Mongo database
from pymongo import MongoClient
from bson.objectid import ObjectId
# Date handling
import arrow # Replacement for datetime, based on moment.js
# import datetime # But we still need time
from dateutil import tz # For interpreting local times
# OAuth2 - Google library implementation for convenience
from oauth2client import client
import httplib2 # used in oauth2 flow
# Google API for services
from apiclient import discovery
###
# Globals
###
import config
if __name__ == "__main__":
CONFIG = config.configuration()
else:
CONFIG = config.configuration(proxied=True)
MONGO_CLIENT_URL = "mongodb://{}:{}@{}:{}/{}".format(
CONFIG.DB_USER,
CONFIG.DB_USER_PW,
CONFIG.DB_HOST,
CONFIG.DB_PORT,
CONFIG.DB)
print("Using URL '{}'".format(MONGO_CLIENT_URL))
app = flask.Flask(__name__)
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.secret_key=CONFIG.SECRET_KEY
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this
APPLICATION_NAME = 'MeetMe class project'
try:
dbclient = MongoClient(MONGO_CLIENT_URL)
db = getattr(dbclient, CONFIG.DB)
collection = db.meetings
except:
print("Failure opening database. Is Mongo running? Correct password?")
sys.exit(1)
#############################
#
# Pages (routed from URLs)
#
#############################
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Index page entry")
g.memos = get_memos()
return flask.render_template('meeting.html')
@app.route("/choose")
def choose():
## We'll need authorization to list calendars
## I wanted to put what follows into a function, but had
## to pull it back here because the redirect has to be a
## 'return'
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
return render_template('meeting.html')
@app.route("/addMeeting")
def addMeeting():
# Will need to add way to ask user for a unique name (check that is unique) and an optional password
# Then user will be sent to a time select page (time will be saved into the db)
# Free times will be found for the current user and will be saved ready for the other people to use
app.logger.debug("Add page entered")
return flask.render_template('addMeeting.html')
@app.route("/viewMeeting")
def viewMeeting():
# Will take user to a screen asking for the meeting id and password. if given correctly it will display that page
# User will be able to comment(The same way we added memos) only after they add their availability
app.logger.debug("View page entered")
app.logger.debug(flask.session['meeting'])
return flask.render_template('viewMeeting.html')
@app.route("/_view_Meeting")
def _viewMeeting():
# Will take user to a screen asking for the meeting id and password. if given correctly it will display that page
# User will be able to comment(The same way we added memos) only after they add their availability
id = request.form.get("id", type=str)
for i in collection.find({"_id": ObjectId("5a2219b389d50f720c913979")}):
flask.session['meeting'] = i
app.logger.debug(flask.session['meeting'])
app.logger.debug("_View page entered")
return 'Done'
@app.route("/_add_Meeting")
def addMemo():
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
app.logger.debug("Got a JSON request")
title = request.form.get("title", type=str)
pw = request.form.get("pw", type=str)
comment = request.args.get("comment", type=str)
record = {"name": title,
'pw': hash_password(pw),
"comments" : [ comment ],
"dateRange" : [ arrow.get(flask.session['begin_date']).format("YYYY-MM-DD"),
arrow.get(flask.session['end_date']).format("YYYY-MM-DD") ],
"timeRange" : [ arrow.get(flask.session['begin_time']).format("HH:mm"),
arrow.get(flask.session['end_time']).format("HH:mm") ],
"freeTime" : flask.session["freeTime"]
}
print(record)
collection.insert(record)
def hash_password(password):
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_password(hashed_password, user_password):
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
@app.route("/_delete")
def _delete():
app.logger.debug("Starting to delete")
post = request.args.get("delete", type=str)
collection.remove({"_id": ObjectId(post)})
return "Nothing"
####
#
# Google calendar authorization:
# Returns us to the main /choose screen after inserting
# the calendar_service object in the session state. May
# redirect to OAuth server first, and may take multiple
# trips through the oauth2 callback function.
#
# Protocol for use ON EACH REQUEST:
# First, check for valid credentials
# If we don't have valid credentials
# Get credentials (jump to the oauth2 protocol)
# (redirects back to /choose, this time with credentials)
# If we do have valid credentials
# Get the service object
#
# The final result of successful authorization is a 'service'
# object. We use a 'service' object to actually retrieve data
# from the Google services. Service objects are NOT serializable ---
# we can't stash one in a cookie. Instead, on each request we
# get a fresh serivce object from our credentials, which are
# serializable.
#
# Note that after authorization we always redirect to /choose;
# If this is unsatisfactory, we'll need a session variable to use
# as a 'continuation' or 'return address' to use instead.
#
####
def valid_credentials():
"""
Returns OAuth2 credentials if we have valid
credentials in the session. This is a 'truthy' value.
Return None if we don't have credentials, or if they
have expired or are otherwise invalid. This is a 'falsy' value.
"""
if 'credentials' not in flask.session:
return None
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
def get_gcal_service(credentials):
"""
We need a Google calendar 'service' object to obtain
list of calendars, busy times, etc. This requires
authorization. If authorization is already in effect,
we'll just return with the authorization. Otherwise,
control flow will be interrupted by authorization, and we'll
end up redirected back to /choose *without a service object*.
Then the second call will succeed without additional authorization.
"""
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
@app.route('/oauth2callback')
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
#
# Option setting: Buttons or forms that add some
# information into session state. Don't do the
# computation here; use of the information might
# depend on what other information we have.
# Setting an option sends us back to the main display
# page, where we may put the new information to use.
#
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
start = request.form.get('start')
end = request.form.get('end')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
flask.session["begin_time"] = interpret_time(start)
flask.session["end_time"] = interpret_time(end)
flask.session["start"] = start
flask.session["end"] = end
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
addMemo()
return flask.render_template('meeting.html')
#return flask.redirect(flask.url_for("choose"))
####
#
# Initialize session variables
#
####
def init_session_values():
"""
Start with some reasonable defaults for date and time ranges.
Note this must be run in app context ... can't call from main.
"""
# Default date span = tomorrow to 1 week from now
now = arrow.now('local') # We really should be using tz from browser
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats | |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .. import robotapi, utils
from .basecontroller import _BaseController
from .cellinfo import (CellPosition, CellType, CellInfo, CellContent, ContentType)
from ..namespace.local_namespace import LocalNamespace
from ..utils import variablematcher
class StepController(_BaseController):
_GIVEN_WHEN_THEN_MATCHER = re.compile(r'^(given|when|then|and|but)\s*',
re.I)
def __init__(self, parent, step):
self._init(parent, step)
self._step.args = self._change_last_empty_to_empty_var(
self._step.args, self._step.comment)
def _init(self, parent, step):
self.parent = parent
self._step = step
self._cell_info_cache = {}
@property
def display_name(self):
return 'Step'
@property
def datafile_controller(self):
return self.parent.datafile_controller
def _change_last_empty_to_empty_var(self, args, comment):
if comment:
return args
return args[:-1] + ['${EMPTY}'] if args and args[-1] == '' else args
def get_keyword_info(self, kw):
if not kw:
return None
return self.parent.get_keyword_info(kw)
def __eq__(self, other):
if self is other:
return True
return self._steps_are_equal(self._step, other._step)
def _steps_are_equal(self, fst, snd):
if fst is snd:
return True
if not snd:
return False
return (fst.assign == snd.assign and
fst.name == snd.name and
fst.args == snd.args)
def get_value(self, col):
values = self.as_list()
if len(values) <= col:
return ''
return values[col]
def get_cell_info(self, col):
if col not in self._cell_info_cache:
position = self._get_cell_position(col)
content = self._get_content_with_type(col, position)
self._cell_info_cache[col] = self._build_cell_info(
content, position)
return self._cell_info_cache[col]
@property
def assignments(self):
return self._step.assign
def is_assigning(self, value):
for assignment in self.assignments:
if assignment.replace('=', '').strip() == \
value.replace('=', '').strip():
return True
return False
def _build_cell_info(self, content, position):
return CellInfo(content, position)
def _get_cell_position(self, col):
# TODO: refactor
if self.parent.has_template():
return CellPosition(CellType.UNKNOWN, None)
col -= len(self._step.assign)
if col < 0:
return CellPosition(CellType.ASSIGN, None)
if col == 0:
return CellPosition(CellType.KEYWORD, None)
info = self.get_keyword_info(self._step.name)
if not info:
return CellPosition(CellType.UNKNOWN, None)
args = info.arguments
args_amount = len(args)
if args_amount == 0:
return CellPosition(CellType.MUST_BE_EMPTY, None)
if col >= args_amount and self._last_argument_is_varargs(args):
return CellPosition(CellType.OPTIONAL, args[-1])
if self._has_list_or_dict_var_value_before(col - 1):
return CellPosition(CellType.UNKNOWN, None)
if col > args_amount:
return CellPosition(CellType.MUST_BE_EMPTY, None)
if col <= self._number_of_mandatory_arguments(args, args_amount):
return CellPosition(CellType.MANDATORY, args[col - 1])
return CellPosition(CellType.OPTIONAL, args[col - 1])
def _number_of_mandatory_arguments(self, args, args_amount):
defaults = [arg for arg in args if '=' in arg]
n = args_amount - len(defaults)
if self._last_argument_is_varargs(args):
n -= 1
return n
def _last_argument_is_varargs(self, args):
return args[-1].startswith('*')
def _has_list_or_dict_var_value_before(self, arg_index):
for idx, value in enumerate(self.args):
if idx > arg_index:
return False
if variablematcher.is_list_variable(value) and \
not variablematcher.is_list_variable_subitem(value):
return True
if robotapi.is_dict_var(value) and \
not variablematcher.is_dict_var_access(value):
return True
return False
def _get_content_with_type(self, col, position):
value = self.get_value(col)
if self._is_commented(col):
return CellContent(ContentType.COMMENTED, value)
last_none_empty = self._get_last_none_empty_col_idx()
if isinstance(last_none_empty, int) and last_none_empty < col:
return CellContent(ContentType.EMPTY, value)
if variablematcher.is_variable(value):
if self._is_unknow_variable(value, position):
return CellContent(ContentType.UNKNOWN_VARIABLE, value)
return CellContent(ContentType.VARIABLE, value)
if self.is_user_keyword(value):
return CellContent(
ContentType.USER_KEYWORD, value,
self.get_keyword_info(value).source)
if self.is_library_keyword(value):
return CellContent(
ContentType.LIBRARY_KEYWORD, value,
self.get_keyword_info(value).source)
if col == 0 and value == 'END':
return CellContent(ContentType.END, value)
return CellContent(ContentType.STRING, value)
def _is_unknow_variable(self, value, position):
if position.type == CellType.ASSIGN:
return False
is_known = self._get_local_namespace().has_name(value)
if is_known:
return False
inner_value = value[2:-1]
modified = re.split(r'\W', inner_value, 1)[0]
return not self._get_local_namespace().has_name(
'%s{%s}' % (value[0], modified))
def _get_local_namespace(self):
index = self.parent.index_of_step(self._step)
return LocalNamespace(
self.parent, self.datafile_controller._namespace, index)
def _get_last_none_empty_col_idx(self):
values = self.as_list()
for i in reversed(range(len(values))):
if values[i].strip() != '':
return i
return None
def is_modifiable(self):
return self.datafile_controller.is_modifiable()
def is_user_keyword(self, value):
return self.parent.is_user_keyword(value)
def is_library_keyword(self, value):
return self.parent.is_library_keyword(value)
def as_list(self):
return self._step.as_list()
def contains_variable(self, name):
return any(variablematcher.value_contains_variable(item, name)
for item in self.as_list())
def contains_variable_assignment(self, name):
return any(variablematcher.value_contains_variable(item, "%s=" % name)
for item in self.as_list())
def contains_keyword(self, name):
return any(self._kw_name_match(item, name)
for item in [self.keyword or ''] + self.args)
def _kw_name_match(self, item, expected):
if isinstance(expected, str):
return utils.eq(item, expected) or (
self._GIVEN_WHEN_THEN_MATCHER.match(item) and
utils.eq(
self._GIVEN_WHEN_THEN_MATCHER.sub('', item), expected))
return expected.match(item)
def replace_keyword(self, new_name, old_name):
if self._kw_name_match(self.keyword or '', old_name):
self._step.name = self._kw_name_replace(
self.keyword, new_name, old_name)
for index, value in enumerate(self.args):
if self._kw_name_match(value, old_name):
self._step.args[index] = self._kw_name_replace(
value, new_name, old_name)
def _kw_name_replace(self, old_value, new_match, old_match):
old_prefix_matcher = self._GIVEN_WHEN_THEN_MATCHER.match(old_value)
if not old_prefix_matcher:
return new_match
old_prefix = old_prefix_matcher.group(0)
old_match_matcher = self._GIVEN_WHEN_THEN_MATCHER.match(old_match)
if old_match_matcher and old_match_matcher.group(0) == old_prefix:
return new_match
return old_prefix + new_match
@property
def datafile(self):
return self.parent.datafile
@property
def keyword(self):
return self._step.name
@property
def assign(self):
return self._step.assign
@property
def args(self):
return self._step.args
@property
def vars(self):
return self._step.vars
def change(self, col, new_value):
cells = self.as_list()
if col >= len(cells):
cells = cells + ['' for _ in range(col - len(cells) + 1)]
cells[col] = new_value
comment = self._get_comment(cells)
if comment:
cells.pop()
self._recreate(cells, comment)
def comment(self):
self.shift_right(0)
self.change(0, 'Comment')
def _is_commented(self, col):
if self._has_comment_keyword():
return col > self._keyword_column
for i in range(min(col + 1, len(self.as_list()))):
if self.get_value(i).strip().startswith('#'):
return True
return False
@property
def _keyword_column(self):
return 0
def _has_comment_keyword(self):
if self.keyword is None:
return False
return self.keyword.strip().lower() == "comment"
def uncomment(self):
if self._step.name == 'Comment':
self.shift_left(0)
def shift_right(self, from_column):
cells = self.as_list()
comment = self._get_comment(cells)
if len(cells) > from_column:
if comment:
cells.pop()
cells = cells[:from_column] + [''] + cells[from_column:]
self._recreate(cells, comment)
def shift_left(self, from_column):
cells = self.as_list()
# DEBUG No need to not delete comment = self._get_comment(cells)
if len(cells) > from_column:
# if comment: # DEBUG No need to not delete comment
# cells.pop()
cells = cells[:from_column] + cells[from_column + 1:]
self._recreate(cells) #, comment) # DEBUG No need to not delete
def insert_before(self, new_step):
steps = self.parent.get_raw_steps()
index = steps.index(self._step)
if self._is_end_step(new_step.as_list()):
if self._is_intended_step(steps[index].as_list()):
self.parent.step(index).shift_left(1) # DEBUG Hard coded!
steps = self.parent.get_raw_steps()
elif not self._is_intended_step(new_step.as_list()) and\
self._is_intended_step(steps[index].as_list()) and\
isinstance(new_step, StepController):
new_step.shift_right(1) # DEBUG Hard coded!
self.parent.set_raw_steps(steps[:index] + [new_step] + steps[index:])
def insert_after(self, new_step):
steps = self.parent.get_raw_steps()
index = steps.index(self._step) + 1
if not self._is_end_step(new_step.as_list()):
if self._is_intended_step(steps[index-1].as_list()):
if not self._is_intended_step(new_step.as_list()):
new_step.shift_right(0) # DEBUG Hard coded!
else:
if self._is_intended_step(new_step.as_list()) and isinstance(new_step, StepController):
new_step.shift_left(1) # DEBUG Hard coded!
self.parent.set_raw_steps(steps[:index] + [new_step] + steps[index:])
def remove_empty_columns_from_end(self):
cells = self.as_list()
while cells != [] and cells[-1].strip() == '':
cells.pop()
self._recreate(cells)
def remove_empty_columns_from_beginning(self):
cells = self._step.as_list()
while cells != [] and cells[0].strip() == '':
cells = cells[1:]
self._recreate(cells)
def remove(self):
self.parent.data.steps.remove(self._step)
self.parent._clear_cached_steps()
def move_up(self):
previous_step = self.parent.step(self._index() - 1)
self.remove()
previous_step.insert_before(self._step)
def move_down(self):
next_step = self.parent.step(self._index() + 1)
self.remove()
next_step.insert_after(self._step)
def _index(self):
return self.parent.index_of_step(self._step)
def has_only_comment(self):
non_empty_cells = [cell for cell
in self._step.as_list() if cell.strip() != '']
return len(non_empty_cells) == 1 and \
non_empty_cells[0].startswith('#')
def _get_comment(self, cells):
if not cells:
return None
return cells[-1][2:].strip() if cells[-1].startswith('# ') else None
def _recreate(self, cells, comment=None):
if self._is_partial_for_loop_step(cells):
self._recreate_as_partial_for_loop(cells, comment)
elif self._is_intended_step(cells):
i = self._index()
previous_step = self.parent.step(i - 1)
if type(previous_step) == ForLoopStepController:
self._recreate_as_intended_step(
previous_step, cells, comment, i)
elif type(previous_step) == IntendedStepController:
self._recreate_as_intended_step(
previous_step.parent, cells, comment, i)
else:
self._step.__init__(cells, comment)
else:
self._step.__init__(cells, comment)
def _is_partial_for_loop_step(self, cells):
return cells and (cells[0].replace(' ', '').upper() == ':FOR'
or cells[0] == 'FOR')
def _is_intended_step(self, cells):
return cells and not cells[0].strip() and not self._is_end_step(cells)\
and any(c.strip() for c in cells) and self._index() > 0
def _is_end_step(self, cells):
return cells and ('END' in cells) # cells[0] == 'END' # TODO Improve check
def _recreate_as_partial_for_loop(self, cells, comment):
index = self._index()
self.parent.replace_step(index, PartialForLoop(
cells[1:], first_cell=cells[0], comment=comment))
self._recreate_next_step(index)
def _recreate_as_intended_step(self, for_loop_step, cells, comment, index):
self.remove()
for_loop_step.add_step(robotapi.Step(cells[1:], comment))
self._recreate_next_step(index)
def _recreate_next_step(self, index):
if len(self.parent.steps) > index + 1:
next_step = self.parent.step(index + 1)
next_step._recreate(next_step.as_list())
def notify_value_changed(self):
self.parent.notify_steps_changed()
class PartialForLoop(robotapi.ForLoop):
def __init__(self, cells, first_cell='FOR', comment=None):
self._cells = cells
self._first_cell = first_cell
try:
robotapi.ForLoop.__init__(self, cells, comment)
except TypeError: # New RF 3.1 syntax
robotapi.ForLoop.__init__(self, self.parent, cells, comment)
def as_list(self, indent=False, include_comment=False):
return [self._first_cell] + self._cells + self.comment.as_list()
class ForLoopStepController(StepController):
def __init__(self, parent, step):
self._init(parent, step)
@property
def name(self):
return self.parent.name
@property
def assignments(self):
return self._step.vars
def move_up(self):
previous_step = self.parent.step(self._index() - 1)
if isinstance(previous_step, ForLoopStepController):
self._swap_forloop_headers(previous_step)
else:
self.get_raw_steps().insert(0, previous_step._step)
previous_step.remove()
def _swap_forloop_headers(self, previous_step):
previous_step._step.steps = self._step.steps
self._step.steps = []
steps = self.parent.get_raw_steps()
i = steps.index(self._step)
steps[i - 1] = self._step
steps[i] = previous_step._step
self.parent.set_raw_steps(steps)
def | |
the command returens a ramdisk id before return it
Return:
ramdisk id -- ramdisk id is the 9th element of returned command
result
'''
command_result = [x for x in
self.describe_images(image_id).split()]
if len(command_result) >= 9:
self.debug("Ramdisk ID %s" % command_result[8])
return command_result[8]
def save_node(
self,
image_id,
instance_ip,
bucket_name,
image_name,
image_size,
node_type,
out_queue
):
'''
Saves nodem, upload and register
Parameters:
image_id -- image id of instance
instance_ip -- instance public IP address
bucket_name -- bucket name for bundled image
image_name -- image name for bundled image
image_size -- image size for bundled image
node_type -- control node or compute node
out_queue -- queue in which puts output
Logic:
After gets all the necessary infomraion to save bundle a node,
then saves the node, uploads the bundle
Return:
bundled image id -- Parses the command result, gets the bundled
image id, then returns it
'''
bundled_image_id = {}
kernel_id = self.get_kernel_id(image_id)
self.debug('Kernel ID %s' % kernel_id)
ramdisk_id = self.get_ramdisk_id(image_id)
self.debug("Ramdisk ID %s" % ramdisk_id)
# get manifest from the last unit
try:
manifest = [x for x in self.save_instance(kernel_id,
ramdisk_id, instance_ip, image_name, image_size).split()].pop()
self.msg('\nManifest generated: %s' % manifest)
self.msg('\nUploading bundle')
# upload image
image = [x for x in self.upload_bundle(instance_ip, bucket_name,
manifest).split()].pop()
self.debug(image)
self.msg('\nUploading done')
self.msg('\nRegistering image')
# register image, and return image id
bundled_image_id[node_type] = \
self.euca_register(image).split('\t')[1].strip()
except:
self.msg('\nERROR: Failed to save instance, please check if instance has'
' enough space left or the size for image to bundle is proper')
os._exit(1)
out_queue.put(bundled_image_id)
def euca_register(self, image):
'''register image'''
return self.get_command_result('euca-register %s' % image)
def checkpoint_cluster(self, args):
'''
Method for saving virtual cluster
Parameters:
args -- this method deals with
args.name -- virtual cluster name
args.controlb -- control node bucket name
args.controln -- control node image name
args.computeb -- compute node bucket name
args.computen -- compute node image name
args.size -- bundled image size
Logic;
Checks existence before saving virtual cluster
Only saves cluster which is running (Because saved and
terminated clusters are not currenly running)
Only saves control node and one compute node into images
Saves new control node image id and new compute node image id
into backup file for later restore. Change status to SAVED after
the saving. Then terminates cluster before deletes host information
after each termination
No returns
'''
if self.interface == 'boto':
self.msg('UNIMPLEMENTED')
sys.exit()
self.debug('Checking if %s is existed' % args.name)
# check if cluter is existed
if self.cloud_instances.if_exist(args.name):
# get cluter by name
self.debug('Getting cloud instance %s' % args.name)
self.cloud_instances.get_cloud_instances_by_name(args.name)
# if cluster is down, terminate the program
if not self.cloud_instances.if_status(self.cloud_instances.RUN):
self.msg('Error in locating virtual cluster %s, not running?'
% args.name)
sys.exit()
else:
self.msg('Error in locating virtual cluster %s, not created?'
% args.name)
sys.exit()
self.print_section('Saving virtual cluster')
self.msg('Virtual cluster name -- %s' % args.name)
self.msg('control node bucket -- %s' % args.controlb)
self.msg('control node name -- %s' % args.controln)
self.msg('compute node bucket -- %s' % args.computeb)
self.msg('compute node name -- %s' % args.computen)
control = self.cloud_instances.get_by_id(0)
compute = self.cloud_instances.get_by_id(1)
self.debug('Control node %s, compute node %s'
% (control, compute))
# copy necessary files to instances, and source profile
for instance in [control, compute]:
self.copyto(instance, os.environ['EC2_CERT'])
self.copyto(instance, os.environ['EC2_PRIVATE_KEY'])
self.copyto(instance, os.environ['EUCALYPTUS_CERT'])
self.copyto(instance, self.enrc)
if self.cloud == 'eucalyptus':
# self.copyto(instance, os.environ['EC2_JVM_ARGS'])
self.copyto(instance, os.environ['AWS_CREDENTIAL_FILE'])
self.execute(instance, 'cat %s >> ~/.profile'
% self.enrc.split('/')[-1])
self.execute(instance, 'source ~/.profile')
save_queue = Queue.Queue()
self.msg('\nSaving control node %s' % control['id'])
threading.Thread(target=self.save_node, args=[control['image'],
control['ip'],
args.controlb,
args.controln,
args.size,
'control',
save_queue
]).start()
self.msg('\nSaving compute node %s' % compute['id'])
threading.Thread(target=self.save_node, args=[compute['image'],
compute['ip'],
args.computeb,
args.computen,
args.size,
'compute',
save_queue
]).start()
while threading.activeCount() > 1:
time.sleep(1)
# return values has the format:
# {'control':'', 'compute':''}, but not sure the order
for bundled_image_id in [save_queue.get(), save_queue.get()]:
if 'control' in bundled_image_id:
control_node_id = bundled_image_id['control']
elif 'compute' in bundled_image_id:
compute_node_id = bundled_image_id['compute']
# get instance type
instance_type = control['type']
self.debug('Instance type %s' % instance_type)
# get compute node number
cluster_size = self.cloud_instances.get_cluster_size() - 1
self.debug('Number of computation nodes %d' % cluster_size)
# copy list for termination
temp_instance_list = list(self.cloud_instances.get_list().values())
# delete old info
self.debug('Deleting %s from backup file' % args.name)
self.cloud_instances.del_by_name(args.name)
self.debug('Setting save info')
# set saved cloud info, and change status to saved
self.cloud_instances.checkpoint_cloud_instances(args.name,
control_node_id,
compute_node_id,
instance_type,
cluster_size)
# save cluster
self.debug('Saving cluster into backup file')
self.cloud_instances.save_instances()
# terminate instances
for instance in temp_instance_list:
if type(instance) is dict:
self.terminate_instance(instance['id'])
self.del_known_host(instance['ip'])
# ---------------------------------------------------------------------
# METHODS TO RESTORE VIRTUAL CLUSTER
# ---------------------------------------------------------------------
def check_image_availability(self, image_id):
'''
Checks if image is available
Parameters:
image_id -- image id
Logic:
Gets command result of euca-describe-images, parses it.
Returns:
True -- if image is avaliable
False -- if image if not avaliable
'''
command_result = [x for x in
self.describe_images(image_id).split()]
return command_result[3] == 'available'
def euca_deregister(self, image_id):
'''
Deregisters image after restore
Parameters:
image_id -- image id
No returns
'''
self.execute_local('euca-deregister %s' % image_id)
def restore_cluster(self, args):
'''
Method for restoring cluster
Parameters:
args -- this method deals with
args.name -- virtual cluster name
Logic:
Loads control node id, compute node id, instance type,
cluster size from backup file.
Creates cluster of size one using control node id
Creates cluster of cluster size using compute node id
Associates IP addresses with all created instances
Granted cluster was saved before, so no need to install
softwares again, but need to configure SLURM accordingly
because all host names changed. No need to create mumge-key
because munge-key was also saved in each instance during the
saving process.
Change status to RUN in the end, and save this restored cluster
infomation into backup file
No returns
'''
if self.interface == 'boto':
self.msg('UNIMPLEMENTED')
sys.exit()
control_node_num = 1
# only restore cluster which is saved
self.debug('Checking if %s is existed' % args.name)
if self.cloud_instances.if_exist(args.name):
self.debug('Getting cloud %s' % args.name)
self.cloud_instances.get_cloud_instances_by_name(args.name)
if self.cloud_instances.if_status(self.cloud_instances.SAVED):
self.debug('Cloud status: %s' % self.cloud_instances.SAVED)
# if cluster is saved, delete old cluster, and create a
# new cloud instance for deploying
control_node_id = self.cloud_instances.get_list()['control']
self.debug('control node %s' % control_node_id)
compute_node_id = self.cloud_instances.get_list()['compute']
self.debug('compute node %s' % compute_node_id)
instance_type = self.cloud_instances.get_list()['type']
self.debug('instance type %s' % instance_type)
cluster_size = self.cloud_instances.get_list()['size']
self.debug('cluster size %s' % cluster_size)
self.debug('Creating new cloud instance list')
self.cloud_instances.clear()
self.debug('Deleting old info from backup file')
self.cloud_instances.set_cloud_instances_by_name(args.name)
else:
self.msg('Error in restoring virtual cluster %s, not saved?'
% args.name)
sys.exit()
else:
self.msg('Error in locating virtual cluster %s, not created?'
% args.name)
sys.exit()
self.msg('Checking image availability')
if not self.check_image_availability(control_node_id) or\
not self.check_image_availability(compute_node_id):
self.msg('Bundled Images are not avaliable right now, try later')
sys.exit()
cluster_size = int(cluster_size) + control_node_num
self.print_section('Restoring virtual cluster')
self.msg('cluster name -- %s' % args.name)
self.msg('number of nodes -- %s' % cluster_size)
self.msg('instance type -- %s' % instance_type)
self.msg('control image -- %s' % control_node_id)
self.msg('compute image -- %s' % compute_node_id)
# run control node
self.debug('Creating control node %s' % control_node_id)
self.euca_run_instance(self.user, control_node_num,
control_node_id, instance_type)
# run compute nodes given number
self.debug('Creating compute node %s' % compute_node_id)
self.euca_run_instance(self.user, cluster_size - control_node_num,
compute_node_id, instance_type)
# get free ip list
self.debug('Getting free IP list')
ip_lists = self.euca_describe_addresses()
time.sleep(5)
self.msg('\nAssociating IPs')
for i in range(cluster_size):
self.debug('Getting cloud from index %d' % i)
instance = self.cloud_instances.get_by_id(i)
time.sleep(1)
if self.cloud == 'nova':
if len(ip_lists) < cluster_size:
self.msg('ERROR: Not enough public IP addresses')
self.terminate_all(cluster_size)
sys.exit()
while not self.euca_associate_address(instance,
ip_lists[i]):
self.msg('Error in associating IP %s '
'with instance %s, trying again'
% (ip_lists[i], instance['id']))
elif self.cloud == 'eucalyptus':
ip_asso_count = 0
while True:
addresses = self.euca_get_ip(instance['id'])
public_ip_address = addresses['public']
private_ip_address = addresses['private']
if not public_ip_address == private_ip_address:
break
else:
ip_asso_count += 1
time.sleep(1)
if ip_asso_count > 200:
self.msg('ERROR: Not enough public IP addresses')
self.terminate_all(cluster_size)
sys.exit()
self.msg('ADDRESS %s' % public_ip_address)
self.cloud_instances.set_ip_by_id(instance['id'],
public_ip_address,
private_ip_address)
# check ssh port but not install
self.debug('Checking alive instance for deploying')
for instance in self.cloud_instances.get_list().values():
if type(instance) is dict:
threading.Thread(target=self.installation,
args=[instance, 60, False]).start()
while threading.activeCount() > 1:
time.sleep(1)
# cnfig SLURM but not generating munge-keys
self.debug('Configuating SLURM')
self.config_slurm(False)
# set status to run and save
self.debug('Setting status to %s' % self.cloud_instances.RUN)
self.cloud_instances.set_status(self.cloud_instances.RUN)
self.debug('Deleting old cloud instance info')
self.cloud_instances.del_by_name(args.name)
self.debug('Saving cloud instance info')
self.cloud_instances.save_instances()
| |
"Warning: `dataProviderArgs` will be ignored if `dataProvider = 'OSRM-online'`.\n"
return [valFlag, errorMsg, warningMsg]
def _valGeoDataProvider(dataProvider, dataProviderArgs):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
dataProvider = dataProvider.lower()
except:
pass
if (dataProvider == None):
pass
elif (dataProvider not in geoDataProviderDictionary.keys()):
errorMsg = "Error: Invalid `dataProvider` value. Valid options include 'MapQuest', and 'ORS-online'."
valFlag = False
else:
if (geoDataProviderDictionary[dataProvider] == "mapquest"):
if ('APIkey' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'APIkey' is a required key in `dataProviderArgs` if `dataProvider = 'MapQuest'`."
if (geoDataProviderDictionary[dataProvider] == "ors-online"):
if ('APIkey' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'APIkey' is a required key in `dataProviderArgs` if `dataProvider = 'ORS-online'`."
return [valFlag, errorMsg, warningMsg]
def _valGetElevationDataProvider(dataProvider, dataProviderArgs):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
dataProvider = dataProvider.lower()
except:
pass
if (dataProvider not in elevDataProviderDictionary.keys()):
valFlag = False
errorMsg = "Error: Invalid `dataProvider` value. Currently, the only valid option is 'ORS-online'."
else:
if (elevDataProviderDictionary[dataProvider] == "ors-online"):
if ('APIkey' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'APIkey' is a required key in `dataProviderArgs` if `dataProvider = 'ORS-online'`."
return [valFlag, errorMsg, warningMsg]
def _valIsoDataProvider(travelMode, dataProvider, dataProviderArgs):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
travelMode = travelMode.lower()
except:
pass
if (travelMode not in isoTravelModeList):
valFlag = False
errorMsg = "Error: Invalid `travelMode` value."
if (valFlag):
try:
dataProvider = dataProvider.lower()
except:
pass
if (dataProvider not in isoDataProviderDictionary.keys()):
valFlag = False
errorMsg = "Error: Invalid `dataProvider` value. Currently, the only valid options are 'ORS-online' and 'ors-local."
else:
if (isoDataProviderDictionary[dataProvider] == "ors-online"):
if ('APIkey' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'APIkey' is a required key in `dataProviderArgs` if `dataProvider = 'ORS-online'`."
elif (isoDataProviderDictionary[dataProvider] == "ors-local"):
if ('port' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'port' is a required key in `dataProviderArgs` if `dataProvider = 'ORS-local'`."
return [valFlag, errorMsg, warningMsg]
def _valIso(iso):
valFlag = True
errorMsg = ""
warningMsg = ""
if (type(iso) is not dict):
valFlag = False
errorMsg = "Error: `iso` must be a dictionary."
if (valFlag):
if ('location' not in iso):
valFlag = False
errorMsg = "Error: `iso` dictionary must include `location` key."
if (valFlag):
if ('boundingRegion' not in iso):
valFlag = False
errorMsg = "Error: `iso` dictionary must include `boundingRegion` key."
if (valFlag):
if (type(iso['boundingRegion']) is not list):
valFlag = False
errorMsg = "Error: `iso['boundingRegion]` must be a list."
if (valFlag):
if (len(iso['boundingRegion']) != 5):
valFlag = False
errorMsg = "Error: `iso['boundingRegion]` must be a list with 5 [lat, lon] pairs."
if (valFlag):
if ('isochrones' not in iso):
valFlag = False
errorMsg = "Error: `iso` dictionary must include `isochrones` key."
if (valFlag):
if (type(iso['isochrones']) is not list):
valFlag = False
errorMsg = "Error: `iso['isochrones]` must be a list."
if (valFlag):
for i in range(0, len(iso['isochrones'])):
if (type(iso['isochrones'][i]) is not dict):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must be a dictionary."
break
if ('value' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'value' key."
break
if ('valueUnits' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'valueUnits' key."
break
if ('area' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include an 'area' key."
break
if ('pop' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'pop' key."
break
if ('reachfactor' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'reachfactor' key."
break
if ('poly' not in iso['isochrones'][i]):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'poly' key."
break
if (type(iso['isochrones'][i]['poly']) is not list):
valFlag = False
errorMsg = "Error: Each element of `iso['isochrones]` must include a 'poly' key with a list structure."
break
# FIXME -- Could continue to drill down into poly structure,
# but this already seems like overkill at this point.
return [valFlag, errorMsg, warningMsg]
def _valGetWeatherDataProvider(dataProvider, dataProviderArgs):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
dataProvider = dataProvider.lower()
except:
pass
if (dataProvider not in weatherDataProviderDictionary.keys()):
valFlag = False
errorMsg = "Error: Invalid `dataProvider` value. Currently, the only valid option is 'openweather'."
else:
if (weatherDataProviderDictionary[dataProvider] == "openweather"):
if ('APIkey' not in dataProviderArgs):
valFlag = False
errorMsg = "Error: 'APIkey' is a required key in `dataProviderArgs` if `dataProvider = 'openweather'`."
return [valFlag, errorMsg, warningMsg]
def _valRouteType2DForScalar(routeType, speedMPS, dataProvider):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
dataProvider = dataProvider.lower()
except:
pass
try:
routeType = routeType.lower()
except:
pass
if (routeType not in routeType2DList):
errorMsg = "Error: Invalid `routeType` value. Valid options include 'euclidean2D', 'manhattan', 'fastest', 'shortest', 'pedestrian', 'cycling', 'truck', and 'wheelchair'."
valFlag = False
else:
if (routeType == 'euclidean2d'):
if (speedMPS is None):
valFlag = False
errorMsg = "Error: For 'euclidean2D' routeType, speedMPS is required."
elif (routeType == 'manhattan'):
if (speedMPS is None):
valFlag = False
errorMsg = "Error: For 'manhattan' routeType, speedMPS is required."
elif (routeType == 'fastest'):
if (dataProvider not in dataProviderDictionary.keys()):
errorMsg = "Error: A valid dataProvider is required if routeType = 'fastest'. Valid data providers supporting the 'fastest' routeType are 'ORS-online', 'OSRM-online', 'pgRouting', MapQuest', and 'ORS-local'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds from the data provider will be ignored. \n"
elif (routeType == 'shortest'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'mapquest']):
errorMsg = "Error: 'ors-online' and 'MapQuest' are currently the only dataProvider options for routeType = 'shortest'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds from the data provider will be ignored.\n"
elif (routeType == 'pedestrian'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'mapquest', 'ors-local']):
errorMsg = "Error: 'ors-online', 'MapQuest', and 'ORS-local' are currently the only dataProvider options for routeType = 'pedestrian'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds from the data provider will be ignored.\n"
elif (routeType == 'cycling'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'ors-local']):
errorMsg = "Error: 'ORS-online' and 'ORS-local' are currently the only dataProvider options for routeType = 'cycling'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds from the data provider will be ignored.\n"
elif (routeType == 'truck'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'ors-local']):
errorMsg = "Error: 'ORS-online' and 'ORS-local' are currently the only dataProvider options for routeType = 'truck'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds used by the data provider will be ignored.\n"
elif (routeType == 'wheelchair'):
if (dataProviderDictionary[dataProvider] not in ['ors-online']):
errorMsg = "Error: 'ORS-online' is currently the only dataProvider option for routeType = 'wheelchair'."
valFlag = False
elif (speedMPS is not None):
warningMsg += "Warning: An explicit constant vehicle speed was specified by speedMPS. Speeds used by the data provider will be ignored.\n"
return [valFlag, errorMsg, warningMsg]
def _valRouteType2DForShapepoints(routeType, speedMPS, expDurationSec, dataProvider):
valFlag = True
errorMsg = ""
warningMsg = ""
try:
dataProvider = dataProvider.lower()
except:
pass
try:
routeType = routeType.lower()
except:
pass
if (routeType not in routeType2DList):
errorMsg = "Error: Invalid `routeType` value. Valid options include 'euclidean2D', 'manhattan', 'fastest', 'shortest', 'pedestrian', 'cycling', 'truck', and 'wheelchair'."
valFlag = False
else:
if (valFlag and speedMPS is not None):
[valFlag, errorMsg, newWarningMsg] = _valGreaterThanZeroFloat(speedMPS, 'speedMPS')
warningMsg += newWarningMsg
if (valFlag and expDurationSec is not None):
[valFlag, errorMsg, newWarningMsg] = _valGreaterThanZeroFloat(expDurationSec, 'expDurationSec')
warningMsg += newWarningMsg
if (routeType in ['euclidean2d', 'manhattan']):
if (speedMPS is None and expDurationSec is None):
valFlag = False
errorMsg = "Error: Please provide `expDurationSec` or `speedMPS` for calculating shapepoints."
elif (speedMPS is not None and expDurationSec is not None):
warningMsg += "Warning: `speedMPS` and `expDurationSec` are both provided, but `expDurationSec` will override `speedMPS`. To calculate by `speedMPS` (rather than by an expected duration), leave `expDurationSec` at its default value (None).\n"
if (valFlag and dataProvider is not None):
warningMsg += "Warning: For 'euclidean2d' and 'manhattan', it is not using data provider, therefore `dataProvider` is ignored.\n"
elif (routeType in ['greatcircle', 'curve']):
# FIXMELP -- Where are these new "routeType" options defined?
# Also, these are not in `routeType2DList`, so they will throw an error above.
if (expDurationSec is None):
valFlag = False
errorMsg = "Error: Please provide `expDurationSec` to be evenly distributed to the arc"
if (speedMPS is not None):
warningMsg += "Warning: `speedMPS` will not be used for calculation. \n"
elif (routeType in ['fastest', 'shortest', 'pedestrian', 'cycling', 'truck', 'wheelchair']):
if (routeType == 'fastest'):
if (dataProvider not in dataProviderDictionary.keys()):
errorMsg = "Error: A valid dataProvider is required if routeType = 'fastest'. Valid data providers supporting the 'fastest' routeType are 'ORS-online', 'OSRM-online', 'pgRouting', 'MapQuest', and 'ORS-local'."
valFlag = False
elif (routeType == 'shortest'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'mapquest']):
errorMsg = "Error: 'ors-online' and 'MapQuest' are currently the only dataProvider options for routeType = 'shortest'."
valFlag = False
elif (routeType == 'pedestrian'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'mapquest', 'ors-local']):
errorMsg = "Error: Invalid `dataProvider` value. 'ORS-online', 'MapQuest', and 'ORS-local' are currently the only data providers supporting the 'pedestrian' routeType option."
valFlag = False
elif (routeType == 'cycling'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'ors-local']):
errorMsg = "Error: Invalid `dataProvider` value. 'ORS-online' and 'ORS-local' are currently the only data providers supporting the 'cycling' routeType option."
valFlag = False
elif (routeType == 'truck'):
if (dataProviderDictionary[dataProvider] not in ['ors-online', 'ors-local']):
errorMsg = "Error: Invalid `dataProvider` value. 'ORS-online' | |
'''
Custom file dialogs for whipFTP, Copyrights <NAME>,
'''
import os
from os.path import expanduser
from os import listdir
from os.path import isfile, join
import platform
import psutil
from tkinter import *
from tkinter import font
from tkinter import ttk
from tkinter import PhotoImage
from tkinter import messagebox
from TkDND_wrapper import *
if(platform.system() is 'Windows'):
import win32api
import win32con
class floating_message_dialog:
def __init__(self, master, Title, icon, message):
#Create a new dialog box window
self.floating_message_dialog_window = Toplevel()
#Make it non-resizeble, set title
self.floating_message_dialog_window.resizable(False, False)
self.floating_message_dialog_window.title(Title)
#Create frames
self.icon_frame = ttk.Frame(self.floating_message_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.floating_message_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing rename icon
ttk.Label(self.icon_frame, image = icon).pack(padx = 3, pady = 3)
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#No window border
self.floating_message_dialog_window.overrideredirect(1)
#center the window
self.floating_message_dialog_window.withdraw()
self.floating_message_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.floating_message_dialog_window.winfo_reqheight()
window_width = self.floating_message_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.floating_message_dialog_window.geometry(geom)
self.floating_message_dialog_window.deiconify()
#Prevent new task in taskbar
self.floating_message_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.floating_message_dialog_window.focus_force()
while True:
try:
self.floating_message_dialog_window.grab_set()
break
except: continue
def destroy(self):
self.floating_message_dialog_window.destroy()
class message_dialog:
def __init__(self, master, Title, icon, message):
#Create a new dialog box window
self.message_dialog_window = Toplevel()
#Make it non-resizeble, set title
self.message_dialog_window.resizable(False, False)
self.message_dialog_window.title(Title)
#Create frames
self.icon_frame = ttk.Frame(self.message_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.message_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing rename icon
ttk.Label(self.icon_frame, image = icon).pack(padx = 3, pady = 3)
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#Create buttons
self.rename_ok_button = ttk.Button(self.entry_frame, text = 'OK', command = self.destroy)
self.rename_ok_button.pack(side = 'right', pady = 3, padx = 3 )
#center the window
self.message_dialog_window.withdraw()
self.message_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.message_dialog_window.winfo_reqheight()
window_width = self.message_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.message_dialog_window.geometry(geom)
self.message_dialog_window.deiconify()
#Prevent new task in taskbar
self.message_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.message_dialog_window.focus_force()
while True:
try:
self.message_dialog_window.grab_set()
break
except: continue
def destroy(self):
self.message_dialog_window.destroy()
class warning_dialog:
def __init__(self, master, Title, func_command, icon, message):
#Create a new dialog box window
self.warning_dialog_window = Toplevel(master)
#Make it non-resizeble, set title
self.warning_dialog_window.resizable(False, False)
self.warning_dialog_window.title(Title)
#Create frames
self.icon_frame = ttk.Frame(self.warning_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.warning_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing rename icon
ttk.Label(self.icon_frame, image = icon).pack()
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#Create buttons
self.cancel_ok_button = ttk.Button(self.entry_frame, text = 'Cancel', command = self.warning_dialog_window.destroy)
self.cancel_ok_button.pack(side = 'right', pady = 3, padx = 3 )
self.rename_ok_button = ttk.Button(self.entry_frame, text = 'OK', command = func_command)
self.rename_ok_button.pack(side = 'right', pady = 3, padx = 3 )
#center the window
self.warning_dialog_window.withdraw()
self.warning_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.warning_dialog_window.winfo_reqheight()
window_width = self.warning_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.warning_dialog_window.geometry(geom)
self.warning_dialog_window.deiconify()
#Prevent new task in taskbar
self.warning_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.warning_dialog_window.focus_force()
while True:
try:
self.warning_dialog_window.grab_set()
break
except: continue
def destroy(self):
self.warning_dialog_window.destroy()
class name_dialog:
def __init__(self, master, Title, func_command, icon, message = 'Enter new name:'):
#Create a new dialog box window
self.name_dialog_window = Toplevel(master)
#Make it non-resizeble, set title
self.name_dialog_window.resizable(False, False)
self.name_dialog_window.title(Title)
#Create frames
self.icon_frame = ttk.Frame(self.name_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.name_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing rename icon
ttk.Label(self.icon_frame, image = icon).pack(padx = 3, pady = 3)
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#Create the entry and set focus on entry
self.rename_entry = ttk.Entry(self.entry_frame)
self.rename_entry.pack(padx = 3, pady = 3, fill = X, expand = True)
self.rename_entry.focus()
#Create buttons
self.cancel_ok_button = ttk.Button(self.entry_frame, text = 'Cancel', command = self.name_dialog_window.destroy)
self.cancel_ok_button.pack(side = 'right', pady = 3, padx = 3 )
self.rename_ok_button = ttk.Button(self.entry_frame, text = 'OK', command = func_command)
self.rename_ok_button.pack(side = 'right', pady = 3, padx = 3 )
#center the window
self.name_dialog_window.withdraw()
self.name_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.name_dialog_window.winfo_reqheight()
window_width = self.name_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.name_dialog_window.geometry(geom)
self.name_dialog_window.deiconify()
#Bind events
self.rename_entry.bind('<Return>', func_command)
#Prevent new task in taskbar
self.name_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.name_dialog_window.focus_force()
while True:
try:
self.name_dialog_window.grab_set()
break
except: continue
def destroy(self):
self.name_dialog_window.destroy()
class replace_dialog:
def __init__(self, master, Title, icon, message):
#Variable to tell which button has been pressed
self.command = 0
#Create a new dialog box window
self.replace_dialog_window = Toplevel(master)
#Make it non-resizeble, set title
self.replace_dialog_window.resizable(False, False)
self.replace_dialog_window.title(Title)
#Overide [x] button
self.replace_dialog_window.protocol('WM_DELETE_WINDOW', self.skip)
#Create frames
self.icon_frame = ttk.Frame(self.replace_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.replace_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing icon
ttk.Label(self.icon_frame, image = icon).pack(padx = 3, pady = 3)
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#Create buttons
self.skip_button = ttk.Button(self.entry_frame, text = 'Skip', command = self.skip)
self.skip_button.pack(side = 'left', pady = 3, padx = 3 )
self.replace_button = ttk.Button(self.entry_frame, text = 'Replace', command = self.replace)
self.replace_button.pack(side = 'left', pady = 3, padx = 3 )
self.skip_all_button = ttk.Button(self.entry_frame, text = 'Skip all', command = self.skip_all)
self.skip_all_button.pack(side = 'left', pady = 3, padx = 3 )
self.replace_all_button = ttk.Button(self.entry_frame, text = 'Replace all', command = self.replace_all)
self.replace_all_button.pack(side = 'left', pady = 3, padx = 3 )
#center the window
self.replace_dialog_window.withdraw()
self.replace_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.replace_dialog_window.winfo_reqheight()
window_width = self.replace_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.replace_dialog_window.geometry(geom)
self.replace_dialog_window.deiconify()
#Prevent new task in taskbar
self.replace_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.replace_dialog_window.focus_force()
while True:
try:
self.replace_dialog_window.grab_set()
break
except: continue
def skip(self):
self.command = 1
self.replace_dialog_window.destroy()
def replace(self):
self.command = 2
self.replace_dialog_window.destroy()
def skip_all(self):
self.command = 3
self.replace_dialog_window.destroy()
def replace_all(self):
self.command = 4
self.replace_dialog_window.destroy()
def destroy(self):
self.replace_dialog_window.destroy()
class file_properties_dialog:
def __init__(self, master, Title, rename_command, chmod_command, icon, message):
#Create a new dialog box window
self.file_properties_dialog_window = Toplevel(master)
#Make it non-resizeble, set title
self.file_properties_dialog_window.resizable(False, False)
self.file_properties_dialog_window.title(Title)
#Create frames
self.icon_frame = ttk.Frame(self.file_properties_dialog_window)
self.icon_frame.pack(side = 'left', fill = Y)
self.entry_frame = ttk.Frame(self.file_properties_dialog_window)
self.entry_frame.pack(side = 'left', fill = Y)
#Create the label showing rename icon
ttk.Label(self.icon_frame, image = icon).pack()
#Create the label
ttk.Label(self.entry_frame, text = message, anchor = 'w').pack(padx = 3, fill = X, expand = True)
#Create buttons
self.cancel_ok_button = ttk.Button(self.entry_frame, text = 'Close', command = self.file_properties_dialog_window.destroy)
self.cancel_ok_button.pack(side = 'right', pady = 3, padx = 3 )
self.chmod_ok_button = ttk.Button(self.entry_frame, text = 'Chmod', command = chmod_command)
self.chmod_ok_button.pack(side = 'right', pady = 3, padx = 3 )
self.rename_ok_button = ttk.Button(self.entry_frame, text = 'Rename', command = rename_command)
self.rename_ok_button.pack(side = 'right', pady = 3, padx = 3 )
#center the window
self.file_properties_dialog_window.withdraw()
self.file_properties_dialog_window.update()
x = master.winfo_rootx()
y = master.winfo_rooty()
main_height =master.winfo_height()
main_width = master.winfo_width()
window_height = self.file_properties_dialog_window.winfo_reqheight()
window_width = self.file_properties_dialog_window.winfo_reqwidth()
geom = '+%d+%d' % ((x + main_width//2 - window_width//2), (y + main_height//2 - window_height//2))
self.file_properties_dialog_window.geometry(geom)
self.file_properties_dialog_window.deiconify()
#Prevent new task in taskbar
self.file_properties_dialog_window.transient(master)
#Focus on the dialog box, freeze controll of main window
self.file_properties_dialog_window.focus_force()
while True:
try:
self.file_properties_dialog_window.grab_set()
break
except: continue
def destroy(self):
self.file_properties_dialog_window.destroy()
class terminal_dialog:
def __init__(self, master, icon, func_command, terminal_prompt_name, destroy_func = None):
#Save reference to destroy function
self.destroy_function = destroy_func
#Save reference to terminal prompt name and command
self.terminal_prompt_name = terminal_prompt_name
self.func_command = func_command
#Save reference to icon
self.icon = icon
#Create a new dialog box window
self.terminal_dialog_window = | |
<filename>simsg/metrics.py
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
from PIL import Image
def intersection(bbox_pred, bbox_gt):
max_xy = torch.min(bbox_pred[:, 2:], bbox_gt[:, 2:])
min_xy = torch.max(bbox_pred[:, :2], bbox_gt[:, :2])
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, 0] * inter[:, 1]
def jaccard(bbox_pred, bbox_gt):
inter = intersection(bbox_pred, bbox_gt)
area_pred = (bbox_pred[:, 2] - bbox_pred[:, 0]) * (bbox_pred[:, 3] -
bbox_pred[:, 1])
area_gt = (bbox_gt[:, 2] - bbox_gt[:, 0]) * (bbox_gt[:, 3] -
bbox_gt[:, 1])
union = area_pred + area_gt - inter
iou = torch.div(inter, union)
return torch.sum(iou)
def get_total_norm(parameters, norm_type=2):
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
except:
continue
return total_norm
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2) / (2.0 * sigma**2)))
return g / g.sum()
def _SSIMForMultiScale(img1,
img2,
max_val=255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
<NAME>: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError(
'Input images must have the same shape (%s vs. %s).', img1.shape,
img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
cs = np.mean(v1 / v2)
return ssim, cs
def MultiScaleSSIM(img1,
img2,
max_val=255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to <NAME>'s paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError(
'Input images must have the same shape (%s vs. %s).', img1.shape,
img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else
[0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
ssim, cs = _SSIMForMultiScale(
im1,
im2,
max_val=max_val,
filter_size=filter_size,
filter_sigma=filter_sigma,
k1=k1,
k2=k2)
mssim = np.append(mssim, ssim)
mcs = np.append(mcs, cs)
filtered = [
convolve(im, downsample_filter, mode='reflect')
for im in [im1, im2]
]
im1, im2 = [x[:, fdf8:f53e:61e4::18, fdf8:f53e:61e4::18, :] for x in filtered]
return (np.prod(mcs[0:levels - 1]**weights[0:levels - 1]) *
(mssim[levels - 1]**weights[levels - 1]))
def msssim(original, compared):
if isinstance(original, str):
original = np.array(Image.open(original).convert('RGB'), dtype=np.float32)
if isinstance(compared, str):
compared = np.array(Image.open(compared).convert('RGB'), dtype=np.float32)
original = original[None, ...] if original.ndim == 3 else original
compared = compared[None, ...] if compared.ndim == 3 else compared
return MultiScaleSSIM(original, compared, max_val=255)
def psnr(original, compared):
if isinstance(original, str):
original = np.array(Image.open(original).convert('RGB'), dtype=np.float32)
if isinstance(compared, str):
compared = np.array(Image.open(compared).convert('RGB'), dtype=np.float32)
mse = np.mean(np.square(original - compared))
psnr = np.clip(
np.multiply(np.log10(255. * 255. / mse[mse > 0.]), 10.), 0., 99.99)[0]
return psnr
def match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when mathing boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
loc_t: (tensor) Tensor to be filled w/ endcoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location and 2)confidence preds.
"""
# jaccard index
overlaps = jaccard(
truths,
point_form(priors)
)
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4]
conf = labels[best_truth_idx] + 1 # Shape: [num_priors]
conf[best_truth_overlap < | |
== 2;
ASSERT(r, True, "Non-default namespace replication-factor configuration.", "OPERATIONS", INFO,
"Listed namespace[s] have non-default replication-factor configuration. Please run 'show config namespace like repl' to check value. It may be non-issue in case namespace are configured for user requirement. Ignore those.",
"Non-default namespace replication-factor check.");
s = select * from NAMESPACE.CONFIG ignore "rack-id", like(".*device"), like(".*file") save;
r = group by CLUSTER, NAMESPACE, KEY do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different namespace configurations.", "OPERATIONS", WARNING,
"Listed namespace configuration[s] are different across multiple nodes in cluster. Please run 'show config namespace diff' to get actual difference. It may be non-issue in case namespace are configured with different device or file name etc. Ignore those.",
"Namespace configurations difference check.");
/* Errors */
s = select like(".*_err.*") from SERVICE.STATISTICS save;
u = select "uptime" from SERVICE.STATISTICS;
u = group by CLUSTER, NODE do MAX(u);
s = do s / u;
r = group by KEY do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster service errors count.", "ANOMALY", INFO,
"Listed service errors[s] show skew in error count patterns (for listed node[s]). Please run 'show statistics service like err' for details.",
"Service errors count anomaly check.");
e = select "hwm_breached", "hwm-breached" from NAMESPACE.STATISTICS;
e = group by CLUSTER, NAMESPACE e;
r = do e == False;
ASSERT(r, True, "Namespace HWM breached.", "OPERATIONS", WARNING,
"Listed namespace[s] show HWM breached for memory or Disks.",
"Namespace HWM breach check.");
/*
Following query collects master_objects, prole_objects and replication_factor, and computes proles for one replication (prole_objects/(replication_factor-1)).
After that it find out master and prole distribution is in correct range with each other or not,
this last result will 'AND' with replication_enabled and migration_in_progress bools to avoid wrong assert failure
*/
m = select "master_objects" as "cnt", "master-objects" as "cnt" from NAMESPACE.STATISTICS;
p = select "prole_objects" as "cnt", "prole-objects" as "cnt" from NAMESPACE.STATISTICS;
r = select "effective_replication_factor", "repl-factor" from NAMESPACE.STATISTICS;
mg = select "migrate_rx_partitions_active", "migrate_progress_recv", "migrate-rx-partitions-active" from NAMESPACE.STATISTICS;
mt = group by NAMESPACE do SUM(m) save as "master_objects";
pt = group by NAMESPACE do SUM(p);
r = group by NAMESPACE do MAX(r);
mg = group by NAMESPACE do MAX(mg);
no_migration = do mg == 0;
replication_enabled = do r > 1;
r = do r - 1;
pt = do pt / r save as "unique prole_objects";
discounted_pt = do 95 %% pt save as "95% of unique prole_objects";
d = do discounted_pt > mt;
d = do d && replication_enabled;
d = do d && no_migration;
ASSERT(d, False, "Skewed namespace data distribution, prole objects exceed master objects by > 5%.", "DATA", INFO,
"Listed namespace[s] show abnormal object distribution. It may not be an issue if migrations are in progress. Please run 'show statistics namespace like object' for actual counts.",
"Namespace data distribution check (prole objects exceed master objects by > 5%).");
discounted_mt = do 95 %% mt save as "95% of master_objects";
d = group by NAMESPACE do discounted_mt > pt;
d = do d && replication_enabled;
d = do d && no_migration;
ASSERT(d, False, "Skewed namespace data distribution, master objects exceed prole objects by > 5%.", "DATA", INFO,
"Listed namespace[s] show abnormal object distribution. It may not be an issue if migrations are in progress. Please run 'show statistics namespace like object' for actual counts.",
"Namespace data distribution check (master objects exceed prole objects by > 5%).");
s = select "set-delete", "deleting" as "set-delete" from SET save;
r = group by CLUSTER, NAMESPACE, SET do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different set delete status.", "OPERATIONS", INFO,
"Listed set[s] have different set delete status across multiple nodes in cluster. This is non-issue if set-delete is being performed. Nodes reset the status asynchronously. Please check if nsup is still delete data for the set.",
"Set delete status check.");
s = select like ("disable-eviction") from SET save;
r = group by CLUSTER, NAMESPACE, SET do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different set eviction configuration.", "OPERATIONS", WARNING,
"Listed set[s] have different eviction setting across multiple nodes in cluster. Please run 'show statistics set like disable-eviction' to check values. Possible operational misconfiguration.",
"Set eviction configuration difference check.");
s = select "n_objects", "objects" from SET save;
r = group by CLUSTER, NAMESPACE, SET do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster set object count.", "ANOMALY", WARNING,
"Listed set[s] have skewed object distribution. Please run 'show statistics set like object' to check counts. It may be non-issue if cluster is undergoing migrations.",
"Set object count anomaly check.");
/* XDR < 5 */
SET CONSTRAINT VERSION < 5.0;
s = select like ("set-enable-xdr") from SET save;
r = group by CLUSTER, NAMESPACE, SET do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different set xdr configuration.", "OPERATIONS", WARNING,
"Listed set[s] have different XDR replication setting across multiple nodes in cluster. Please run 'show statistics set like set-enable-xdr' to check values. Possible operational misconfiguration.",
"Set xdr configuration difference check.");
s = select * from XDR.CONFIG save;
r = GROUP by CLUSTER, KEY do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different XDR configurations.", "OPERATIONS", WARNING,
"Listed XDR configuration[s] are different across multiple nodes in cluster. Please run 'show config xdr diff' to get difference. Possible operational misconfiguration.",
"XDR configurations difference check.");
s = select * from XDR.STATISTICS save;
u = select "uptime" from SERVICE.STATISTICS;
u = group by CLUSTER, NODE do MAX(u);
s = do s / u;
r = group by CLUSTER, KEY do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster XDR statistics.", "ANOMALY", WARNING,
"Listed XDR statistic[s] show skew for the listed node[s]. It may or may not be an issue depending on the statistic type.",
"XDR statistics anomaly check.");
s = select * from DC.STATISTICS ignore "dc_size", "dc_state" save;
u = select "uptime" from SERVICE.STATISTICS;
u = group by CLUSTER, NODE do MAX(u);
s = do s / u on common;
r = group by CLUSTER, DC, KEY do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster remote DC statistics.", "ANOMALY", WARNING,
"Listed DC statistic[s] show skew for the listed node[s]. Please run 'show statistics dc' to get all DC stats. May be non-issue if remote Data center connectivity behavior for nodes is not same.",
"Remote DC statistics anomaly check.");
/*
Following xdr queries are example of assert level check condition. We are considering assert only if provided condition is true (at least for one key).
Also we use same condition variable to filter keys for output. So we are using group by (CLUSTER, NODE), it makes condition variable values matching with
assert input data structure, only exceptions are data which grouped by DC, in that case key filtration will not be possible.
*/
xdr_enabled = select "enable-xdr" from XDR.CONFIG;
xdr_enabled = group by CLUSTER, NODE do OR(xdr_enabled);
cluster_xdr_enabled = group by CLUSTER do OR(xdr_enabled);
s = select "xdr-dc-state", "dc_state" from DC.STATISTICS save;
r = group by CLUSTER, DC do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different remote DC states.", "OPERATIONS", WARNING,
"Listed DC[s] have a different remote DC visibility. Please run 'show statistics dc like state' to see DC state. Possible network issue between data centers.",
"Remote DC state check.",
xdr_enabled);
s = select "dc_size" from DC.STATISTICS save;
r = group by CLUSTER, DC do NO_MATCH(s, ==, MAJORITY) save;
ASSERT(r, False, "Different remote DC sizes.", "OPERATIONS", WARNING,
"Listed DC[s] have a different remote DC size. Please run 'show statistics dc like size' to see DC size. Possible network issue between data centers.",
"Remote DC size check.");
s = select "free-dlog-pct", "dlog_free_pct", "free_dlog_pct" from XDR save;
r = do s < 95;
ASSERT(r, False, "Low XDR free digest log space.", "OPERATIONS", INFO,
"Listed node[s] have lower than ideal (95%) free digest log space. Please run 'show statistics xdr like free' to see digest log space. Probable cause - low XDR throughput or a failed node processing in progress.",
"XDR free digest log space check.",
xdr_enabled);
r = group by CLUSTER do SD_ANOMALY(s, ==, 3);
ASSERT(r, False, "Skewed cluster XDR free digest log space.", "ANOMALY", WARNING,
"Listed node[s] have different digest log free size pattern. Please run 'show statistics xdr like free' to see digest log space. May not be an issue if the nodes are newly added or have been restarted with noresume or if remote Datacenter connectivity behavior differs for nodes.",
"XDR free digest log space anomaly check.",
cluster_xdr_enabled);
/* Needs normalization but not sure on what ?? */
s = select "timediff_lastship_cur_secs", "xdr_timelag" from XDR.STATISTICS save;
r = do s > 10;
ASSERT(r, False, "High XDR shipping lag (> 10s).", "PERFORMANCE", WARNING,
"Listed node[s] have higher than healthy ( > 10 sec) ship lag to remote data center. | |
app_service)
if browser_type is not None:
pulumi.set(__self__, "browser_type", browser_type)
if browser_version is not None:
pulumi.set(__self__, "browser_version", browser_version)
if case_insensitive is not None:
pulumi.set(__self__, "case_insensitive", case_insensitive)
if case_sensitive is not None:
pulumi.set(__self__, "case_sensitive", case_sensitive)
if cipher is not None:
pulumi.set(__self__, "cipher", cipher)
if cipher_bits is not None:
pulumi.set(__self__, "cipher_bits", cipher_bits)
if client_ssl is not None:
pulumi.set(__self__, "client_ssl", client_ssl)
if code is not None:
pulumi.set(__self__, "code", code)
if common_name is not None:
pulumi.set(__self__, "common_name", common_name)
if contains is not None:
pulumi.set(__self__, "contains", contains)
if continent is not None:
pulumi.set(__self__, "continent", continent)
if country_code is not None:
pulumi.set(__self__, "country_code", country_code)
if country_name is not None:
pulumi.set(__self__, "country_name", country_name)
if cpu_usage is not None:
pulumi.set(__self__, "cpu_usage", cpu_usage)
if device_make is not None:
pulumi.set(__self__, "device_make", device_make)
if device_model is not None:
pulumi.set(__self__, "device_model", device_model)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if ends_with is not None:
pulumi.set(__self__, "ends_with", ends_with)
if equals is not None:
pulumi.set(__self__, "equals", equals)
if expiry is not None:
pulumi.set(__self__, "expiry", expiry)
if extension is not None:
pulumi.set(__self__, "extension", extension)
if external is not None:
pulumi.set(__self__, "external", external)
if geoip is not None:
pulumi.set(__self__, "geoip", geoip)
if greater is not None:
pulumi.set(__self__, "greater", greater)
if greater_or_equal is not None:
pulumi.set(__self__, "greater_or_equal", greater_or_equal)
if host is not None:
pulumi.set(__self__, "host", host)
if http_basic_auth is not None:
pulumi.set(__self__, "http_basic_auth", http_basic_auth)
if http_cookie is not None:
pulumi.set(__self__, "http_cookie", http_cookie)
if http_header is not None:
pulumi.set(__self__, "http_header", http_header)
if http_host is not None:
pulumi.set(__self__, "http_host", http_host)
if http_method is not None:
pulumi.set(__self__, "http_method", http_method)
if http_referer is not None:
pulumi.set(__self__, "http_referer", http_referer)
if http_set_cookie is not None:
pulumi.set(__self__, "http_set_cookie", http_set_cookie)
if http_status is not None:
pulumi.set(__self__, "http_status", http_status)
if http_uri is not None:
pulumi.set(__self__, "http_uri", http_uri)
if http_user_agent is not None:
pulumi.set(__self__, "http_user_agent", http_user_agent)
if http_version is not None:
pulumi.set(__self__, "http_version", http_version)
if index is not None:
pulumi.set(__self__, "index", index)
if internal is not None:
pulumi.set(__self__, "internal", internal)
if isp is not None:
pulumi.set(__self__, "isp", isp)
if last15secs is not None:
pulumi.set(__self__, "last15secs", last15secs)
if last1min is not None:
pulumi.set(__self__, "last1min", last1min)
if last5mins is not None:
pulumi.set(__self__, "last5mins", last5mins)
if less is not None:
pulumi.set(__self__, "less", less)
if less_or_equal is not None:
pulumi.set(__self__, "less_or_equal", less_or_equal)
if local is not None:
pulumi.set(__self__, "local", local)
if major is not None:
pulumi.set(__self__, "major", major)
if matches is not None:
pulumi.set(__self__, "matches", matches)
if minor is not None:
pulumi.set(__self__, "minor", minor)
if missing is not None:
pulumi.set(__self__, "missing", missing)
if mss is not None:
pulumi.set(__self__, "mss", mss)
if not_ is not None:
pulumi.set(__self__, "not_", not_)
if org is not None:
pulumi.set(__self__, "org", org)
if password is not None:
pulumi.set(__self__, "password", password)
if path is not None:
pulumi.set(__self__, "path", path)
if path_segment is not None:
pulumi.set(__self__, "path_segment", path_segment)
if port is not None:
pulumi.set(__self__, "port", port)
if present is not None:
pulumi.set(__self__, "present", present)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if query_parameter is not None:
pulumi.set(__self__, "query_parameter", query_parameter)
if query_string is not None:
pulumi.set(__self__, "query_string", query_string)
if region_code is not None:
pulumi.set(__self__, "region_code", region_code)
if region_name is not None:
pulumi.set(__self__, "region_name", region_name)
if remote is not None:
pulumi.set(__self__, "remote", remote)
if request is not None:
pulumi.set(__self__, "request", request)
if response is not None:
pulumi.set(__self__, "response", response)
if route_domain is not None:
pulumi.set(__self__, "route_domain", route_domain)
if rtt is not None:
pulumi.set(__self__, "rtt", rtt)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if ssl_cert is not None:
pulumi.set(__self__, "ssl_cert", ssl_cert)
if ssl_client_hello is not None:
pulumi.set(__self__, "ssl_client_hello", ssl_client_hello)
if ssl_extension is not None:
pulumi.set(__self__, "ssl_extension", ssl_extension)
if ssl_server_handshake is not None:
pulumi.set(__self__, "ssl_server_handshake", ssl_server_handshake)
if ssl_server_hello is not None:
pulumi.set(__self__, "ssl_server_hello", ssl_server_hello)
if starts_with is not None:
pulumi.set(__self__, "starts_with", starts_with)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if text is not None:
pulumi.set(__self__, "text", text)
if tm_name is not None:
pulumi.set(__self__, "tm_name", tm_name)
if unnamed_query_parameter is not None:
pulumi.set(__self__, "unnamed_query_parameter", unnamed_query_parameter)
if user_agent_token is not None:
pulumi.set(__self__, "user_agent_token", user_agent_token)
if username is not None:
pulumi.set(__self__, "username", username)
if value is not None:
pulumi.set(__self__, "value", value)
if values is not None:
pulumi.set(__self__, "values", values)
if version is not None:
pulumi.set(__self__, "version", version)
if vlan is not None:
pulumi.set(__self__, "vlan", vlan)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter
def address(self) -> Optional[bool]:
return pulumi.get(self, "address")
@property
@pulumi.getter
def all(self) -> Optional[bool]:
return pulumi.get(self, "all")
@property
@pulumi.getter(name="appService")
def app_service(self) -> Optional[str]:
return pulumi.get(self, "app_service")
@property
@pulumi.getter(name="browserType")
def browser_type(self) -> Optional[bool]:
return pulumi.get(self, "browser_type")
@property
@pulumi.getter(name="browserVersion")
def browser_version(self) -> Optional[bool]:
return pulumi.get(self, "browser_version")
@property
@pulumi.getter(name="caseInsensitive")
def case_insensitive(self) -> Optional[bool]:
return pulumi.get(self, "case_insensitive")
@property
@pulumi.getter(name="caseSensitive")
def case_sensitive(self) -> Optional[bool]:
return pulumi.get(self, "case_sensitive")
@property
@pulumi.getter
def cipher(self) -> Optional[bool]:
return pulumi.get(self, "cipher")
@property
@pulumi.getter(name="cipherBits")
def cipher_bits(self) -> Optional[bool]:
return pulumi.get(self, "cipher_bits")
@property
@pulumi.getter(name="clientSsl")
def client_ssl(self) -> Optional[bool]:
return pulumi.get(self, "client_ssl")
@property
@pulumi.getter
def code(self) -> Optional[bool]:
return pulumi.get(self, "code")
@property
@pulumi.getter(name="commonName")
def common_name(self) -> Optional[bool]:
return pulumi.get(self, "common_name")
@property
@pulumi.getter
def contains(self) -> Optional[bool]:
return pulumi.get(self, "contains")
@property
@pulumi.getter
def continent(self) -> Optional[bool]:
return pulumi.get(self, "continent")
@property
@pulumi.getter(name="countryCode")
def country_code(self) -> Optional[bool]:
return pulumi.get(self, "country_code")
@property
@pulumi.getter(name="countryName")
def country_name(self) -> Optional[bool]:
return pulumi.get(self, "country_name")
@property
@pulumi.getter(name="cpuUsage")
def cpu_usage(self) -> Optional[bool]:
return pulumi.get(self, "cpu_usage")
@property
@pulumi.getter(name="deviceMake")
def device_make(self) -> Optional[bool]:
return pulumi.get(self, "device_make")
@property
@pulumi.getter(name="deviceModel")
def device_model(self) -> Optional[bool]:
return pulumi.get(self, "device_model")
@property
@pulumi.getter
def domain(self) -> Optional[bool]:
return pulumi.get(self, "domain")
@property
@pulumi.getter(name="endsWith")
def ends_with(self) -> Optional[bool]:
return pulumi.get(self, "ends_with")
@property
@pulumi.getter
def equals(self) -> Optional[bool]:
return pulumi.get(self, "equals")
@property
@pulumi.getter
def expiry(self) -> Optional[bool]:
return pulumi.get(self, "expiry")
@property
@pulumi.getter
def extension(self) -> Optional[bool]:
return pulumi.get(self, "extension")
@property
@pulumi.getter
def external(self) -> Optional[bool]:
return pulumi.get(self, "external")
@property
@pulumi.getter
def geoip(self) -> Optional[bool]:
return pulumi.get(self, "geoip")
@property
@pulumi.getter
def greater(self) -> Optional[bool]:
return pulumi.get(self, "greater")
@property
@pulumi.getter(name="greaterOrEqual")
def greater_or_equal(self) -> Optional[bool]:
return pulumi.get(self, "greater_or_equal")
@property
@pulumi.getter
def host(self) -> Optional[bool]:
return pulumi.get(self, "host")
@property
@pulumi.getter(name="httpBasicAuth")
def http_basic_auth(self) -> Optional[bool]:
return pulumi.get(self, "http_basic_auth")
@property
@pulumi.getter(name="httpCookie")
def http_cookie(self) -> Optional[bool]:
return pulumi.get(self, "http_cookie")
@property
@pulumi.getter(name="httpHeader")
def http_header(self) -> Optional[bool]:
return pulumi.get(self, "http_header")
@property
@pulumi.getter(name="httpHost")
def http_host(self) -> Optional[bool]:
return pulumi.get(self, "http_host")
@property
@pulumi.getter(name="httpMethod")
def http_method(self) -> Optional[bool]:
return pulumi.get(self, "http_method")
@property
@pulumi.getter(name="httpReferer")
def http_referer(self) -> Optional[bool]:
return pulumi.get(self, "http_referer")
@property
@pulumi.getter(name="httpSetCookie")
def http_set_cookie(self) -> Optional[bool]:
return pulumi.get(self, "http_set_cookie")
@property
@pulumi.getter(name="httpStatus")
def http_status(self) -> Optional[bool]:
return pulumi.get(self, "http_status")
@property
@pulumi.getter(name="httpUri")
def http_uri(self) -> Optional[bool]:
return pulumi.get(self, "http_uri")
@property
@pulumi.getter(name="httpUserAgent")
def http_user_agent(self) -> Optional[bool]:
return pulumi.get(self, "http_user_agent")
@property
@pulumi.getter(name="httpVersion")
def http_version(self) -> Optional[bool]:
return pulumi.get(self, "http_version")
@property
@pulumi.getter
def index(self) -> Optional[int]:
return pulumi.get(self, "index")
@property
@pulumi.getter
def internal(self) -> Optional[bool]:
return pulumi.get(self, "internal")
@property
@pulumi.getter
def isp(self) -> Optional[bool]:
return pulumi.get(self, "isp")
@property
@pulumi.getter
def last15secs(self) -> Optional[bool]:
return pulumi.get(self, "last15secs")
@property
@pulumi.getter
def last1min(self) -> Optional[bool]:
return pulumi.get(self, "last1min")
@property
@pulumi.getter
def last5mins(self) -> Optional[bool]:
return pulumi.get(self, "last5mins")
@property
@pulumi.getter
def less(self) -> Optional[bool]:
return pulumi.get(self, "less")
@property
@pulumi.getter(name="lessOrEqual")
def less_or_equal(self) -> Optional[bool]:
return pulumi.get(self, "less_or_equal")
@property
@pulumi.getter
def local(self) -> Optional[bool]:
return pulumi.get(self, "local")
@property
@pulumi.getter
def major(self) -> Optional[bool]:
return pulumi.get(self, "major")
@property
@pulumi.getter
def matches(self) -> Optional[bool]:
return pulumi.get(self, "matches")
@property
@pulumi.getter
def minor(self) -> Optional[bool]:
return pulumi.get(self, "minor")
@property
@pulumi.getter
def missing(self) -> Optional[bool]:
return pulumi.get(self, "missing")
@property
@pulumi.getter
def mss(self) -> Optional[bool]:
return pulumi.get(self, "mss")
@property
@pulumi.getter(name="not")
def not_(self) -> Optional[bool]:
return pulumi.get(self, "not_")
@property
@pulumi.getter
def org(self) -> Optional[bool]:
return pulumi.get(self, "org")
@property
@pulumi.getter
def password(self) -> Optional[bool]:
return pulumi.get(self, "password")
@property
@pulumi.getter
def path(self) -> Optional[bool]:
return pulumi.get(self, "path")
@property
@pulumi.getter(name="pathSegment")
def path_segment(self) -> Optional[bool]:
return pulumi.get(self, "path_segment")
@property
@pulumi.getter
def port(self) -> Optional[bool]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def present(self) -> Optional[bool]:
return pulumi.get(self, "present")
@property
@pulumi.getter
def protocol(self) -> Optional[bool]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="queryParameter")
def query_parameter(self) -> Optional[bool]:
return pulumi.get(self, "query_parameter")
@property
@pulumi.getter(name="queryString")
def query_string(self) -> Optional[bool]:
return pulumi.get(self, "query_string")
@property
@pulumi.getter(name="regionCode")
def region_code(self) -> Optional[bool]:
return | |
<reponame>cjoakim/azure-cosmos-graph<gh_stars>1-10
"""
Usage:
python wrangle.py scan_title_basics
Options:
-h --help Show this screen.
--version Show version.
"""
# <NAME>, Microsoft, 2018/03/11
import csv
import json
import os
import sys
import time
import traceback
from docopt import docopt
from pysrc.joakim import config
from pysrc.joakim import values
VERSION='2018/03/11a'
FOOTLOOSE='tt0087277'
PRETTYWOMAN='tt0100405'
KEVINBACON='nm0000102'
JULIAROBERTS='nm0000210'
class Main:
def __init__(self):
self.start_time = time.time()
self.args = sys.argv
self.c = config.Config()
self.favorites = values.Favorites()
self.roles = self.selected_roles()
def execute(self):
if len(sys.argv) > 1:
f = sys.argv[1].lower()
print('function: {}'.format(f))
if f == 'extract_top_ratings':
self.extract_top_ratings()
elif f == 'identify_candidate_movies':
self.identify_candidate_movies()
elif f == 'extract_movies':
self.extract_movies()
elif f == 'extract_principals':
self.extract_principals()
elif f == 'extract_people':
self.extract_people()
elif f == 'derive_people_edges':
self.derive_people_edges()
else:
self.print_options('Error: invalid function: {}'.format(f))
else:
self.print_options('Error: no function argument provided.')
def extract_top_ratings(self):
# Identify and extract the top (movie) ratings with at least n-votes
infile = self.c.data_filename_raw('title.ratings.tsv')
outfile = self.c.top_ratings_csv_filename()
min_votes = self.c.extract_min_votes()
min_rating = self.c.extract_min_rating()
selected = dict()
row_count = 0
with open(infile) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
# OrderedDict([('tconst', 'tt0087277'), ('averageRating', '6.5'), ('numVotes', '58820')])
try:
row_count = row_count + 1
votes = int(row['numVotes'])
rating = float(row['averageRating'])
id = row['tconst']
if votes >= min_votes:
if rating >= min_rating:
selected[id] = votes
if id == FOOTLOOSE:
print('FOOTLOOSE SELECTED: {}'.format(row))
except:
print('exception on row {} {}'.format(row_count, row))
print("extract_top_ratings - selected count: {}".format(len(selected.keys())))
with open(outfile, "w", newline="\n") as out:
out.write("id|votes\n")
for id in sorted(selected.keys()):
votes = selected[id]
line = '{}|{}'.format(id.strip(), votes)
out.write(line + "\n")
print('file written: {}'.format(outfile))
elapsed_time = time.time() - self.start_time
print('lines_read: {} elapsed: {}'.format(row_count, elapsed_time))
# lines_read: 4832632 elapsed: 25.33212375640869
def identify_candidate_movies(self):
infile = self.c.data_filename_raw('title.principals.tsv')
outfile = self.c.candidate_movies_json_filename()
print('identify_candidate_movies; infile: {}'.format(infile))
print('identify_candidate_movies; outfile: {}'.format(outfile))
time.sleep(3)
actors = self.favorites.actors_for_candidate_movies()
row_count = 0
required = dict()
curr_movie_mid = ''
curr_movie_actors = list()
with open(infile) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
# OrderedDict([('tconst', 'tt0000032'), ('ordering', '1'), ('nconst', 'nm3692479'),
# ('category', 'actress'), ('job', '\\N'), ('characters', '["The dancer"]')])
try:
row_count = row_count + 1
if row_count < 10:
print(row)
mid = row['tconst']
nid = row['nconst']
role = row['category']
if mid != curr_movie_mid:
if (len(curr_movie_actors)) > 0:
required[curr_movie_mid] = curr_movie_actors
print('{} {}'.format(curr_movie_mid, curr_movie_actors))
curr_movie_mid = mid
curr_movie_actors = list()
if nid in actors:
person = dict()
person['id'] = nid
person['name'] = actors[nid]
curr_movie_actors.append(person)
except:
print('exception on row {} {}'.format(row_count, row))
traceback.print_exc()
print('required count: {}'.format(len(required)))
jstr = json.dumps(required, sort_keys=True, indent=2)
with open(outfile, 'wt') as f:
f.write(jstr)
print('file written: {}'.format(outfile))
elapsed_time = time.time() - self.start_time
print('lines_read: {} elapsed: {}'.format(row_count, elapsed_time))
# lines_read: 27211583 elapsed: 130.86662912368774
def extract_movies(self):
infile = self.c.data_filename_raw('title.basics.tsv')
outfile1 = self.c.movies_csv_filename()
outfile2 = self.c.movies_json_filename()
selected = dict()
row_count = 0
#top_rated = self.load_top_ratings()
candidate_movies = self.load_candidate_movies()
with open(infile) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
# OrderedDict([('tconst', 'tt0000009'), ('titleType', 'movie'), ('primaryTitle', '<NAME>'),
# ('originalTitle', '<NAME>'), ('isAdult', '0'), ('startYear', '1894'), ('endYear', '\\N'),
# ('runtimeMinutes', '45'), ('genres', 'Romance')])
try:
row_count = row_count + 1
id = row['tconst']
if id in candidate_movies: # top_rated:
if 'movie' == row['titleType']:
if '0' == row['isAdult']:
title = row['primaryTitle']
try:
ystr = row['startYear']
yint = int(ystr)
if yint > 1983:
selected[id] = title
print('selected top_rated item {} {} {}'.format(id, title, ystr))
except:
pass
except:
print('exception on row {} {}'.format(row_count, row))
print("extract_movies - selected count: {}".format(len(selected.keys())))
with open(outfile1, "w", newline="\n") as out:
out.write("id|title\n")
for id in sorted(selected.keys()):
title = selected[id]
line = '{}|{}'.format(id.strip(), title.strip())
out.write(line + "\n")
print('file written: {}'.format(outfile1))
jstr = json.dumps(selected, sort_keys=True, indent=2)
with open(outfile2, 'wt') as f:
f.write(jstr)
print('file written: {}'.format(outfile2))
def extract_principals(self):
infile = self.c.data_filename_raw('title.principals.tsv')
outfile1 = self.c.principals_csv_filename()
principals = list()
row_count = 0
movies = self.load_movies()
with open(infile) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
# OrderedDict([('tconst', 'tt0000032'), ('ordering', '1'), ('nconst', 'nm3692479'),
# ('category', 'actress'), ('job', '\\N'), ('characters', '["The dancer"]')])
try:
row_count = row_count + 1
# if row_count < 10:
# print(row)
id = row['tconst']
if id in movies:
role = row['category']
if role in self.roles:
nid = row['nconst']
line = '{}|{}|{}'.format(id, nid, role)
principals.append(line)
except:
print('exception on row {} {}'.format(row_count, row))
traceback.print_exc()
with open(outfile1, "w", newline="\n") as out:
out.write("id|nid|role\n")
for line in principals:
out.write(line + "\n")
print('file written: {}'.format(outfile1))
def movies_for_person(self, pid):
movie_ids = dict()
for p in self.principals_list:
if p[1] == pid:
mid = p[0]
movie_ids[mid] = pid
return sorted(movie_ids.keys())
def extract_people(self):
# wc -l name.basics.tsv -> 8449645 name.basics.tsv
infile1 = self.c.principals_csv_filename()
infile2 = self.c.data_filename_raw('name.basics.tsv')
outfile1 = self.c.people_csv_filename()
outfile2 = self.c.people_json_filename()
self.principals_list = list()
people_list = list()
people_dict = dict()
row_count = 0
principal_ids = self.unique_principal_ids()
movies = self.load_movies()
with open(infile1, 'rt') as f:
# infile1 looks like this:
# id|nid|role
# tt0086927|nm0000158|actor
for idx, line in enumerate(f):
self.principals_list.append(line.strip().split('|'))
with open(infile2) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
for row in reader:
# OrderedDict([('nconst', 'nm0000001'), ('primaryName', '<NAME>'), ('birthYear', '1899'),
# ('deathYear', '1987'), ('primaryProfession', 'soundtrack,actor,miscellaneous'),
# ('knownForTitles', 'tt0043044,tt0050419,tt0053137,tt0072308')])
# NOTE: the knownForTitles field is not fully populated in the IMDb data!
try:
row_count = row_count + 1
if row_count < 10:
print(row)
nid = row['nconst']
if nid in principal_ids:
# create a csv line
known4 = row['knownForTitles']
#titles = self.filter_titles(movies, known4)
titles = self.movies_for_person(nid)
tstr =','.join(titles)
name = row['primaryName']
birth = row['birthYear']
prof = row['primaryProfession']
line = '{}|{}|{}|{}|{}'.format(nid, name, birth, tstr, prof)
people_list.append(line)
# also create a corresponding person Object for the JSON
person = {}
person['nid'] = nid
person['name'] = name
person['birth'] = birth
person['prof'] = prof
person['titles'] = titles
m = dict()
for id in titles:
mname = movies[id]
m[id] = mname
person['movies'] = m
people_dict[nid] = person
except:
print('exception on row {} {}'.format(row_count, row))
traceback.print_exc()
with open(outfile1, "w", newline="\n") as out:
out.write("nid|name|birth|titles|profession\n")
for line in people_list:
out.write(line + "\n")
print('file written: {}'.format(outfile1))
jstr = json.dumps(people_dict, sort_keys=True, indent=2)
with open(outfile2, 'wt') as f:
f.write(jstr)
print('file written: {}'.format(outfile2))
def derive_people_edges(self):
infile1 = self.c.movies_json_filename()
infile2 = self.c.people_json_filename()
infile3 = self.c.principals_csv_filename()
outfile1 = self.c.principals_json_filename()
outfile2 = self.c.people_edges_json_filename()
movies = json.load(open(self.c.movies_json_filename()))
people = json.load(open(self.c.people_json_filename()))
people_keys = sorted(people.keys())
print('movies: {}'.format(len(movies.keys())))
print('people: {}'.format(len(people.keys())))
principals, people_edges, row_count = dict(), dict(), 0
# collect the principals dictionary, keyed by movie id, with a
# dict as the value with title and list of people, like this:
# "tt0087089": {
# "people": [
# {
# "id": "nm0000152",
# "name": "<NAME>"
# },
# {
# "id": "nm0002138",
# "name": "<NAME>"
# },
# {
# "id": "nm0000178",
# "name": "<NAME>"
# },
# {
# "id": "nm0571188",
# "name": "<NAME>"
# }
# ],
# "title": "The Cotton Club"
# },
with open(infile3) as csvfile:
reader = csv.reader(csvfile, delimiter='|')
for row in reader:
row_count = row_count + 1
if row_count > 1:
prin_obj, mid, pid = None, row[0], row[1]
if mid in principals:
prin_obj = principals[mid]
else:
prin_obj = dict()
prin_obj['title'] = movies[mid]
prin_obj['people'] = list()
pers_obj = dict()
pers_obj['id'] = pid
pers_obj['name'] = people[pid]['name']
prin_obj['people'].append(pers_obj)
principals[mid] = prin_obj
jstr = json.dumps(principals, sort_keys=True, indent=2)
with open(outfile1, 'wt') as f:
f.write(jstr)
print('file written: {}'.format(outfile1))
for mid in sorted(principals.keys()):
people = principals[mid]['people']
title = movies[mid]
for person1 in people:
for person2 in people:
pid1 = person1['id']
pid2 = person2['id']
if pid1 != pid2:
pair = sorted([pid1, pid2])
concat_key = '{}:{}'.format(pair[0], pair[1])
if concat_key in people_edges:
people_edges[concat_key][mid] = 0
else:
d = dict()
d[mid] = 0
people_edges[concat_key] = d
concat_key = '{}:{}'.format(pair[1], pair[0])
if concat_key in people_edges:
people_edges[concat_key][mid] = 0
else:
d = dict()
d[mid] = 0
people_edges[concat_key] = d
jstr = json.dumps(people_edges, sort_keys=True, indent=2)
with open(outfile2, 'wt') as f:
f.write(jstr)
print('file written: {}'.format(outfile2))
# private
def selected_roles(self):
# This is the range of roles; but we're only extracting a subset of these:
# actor,actress,animation_department,art_department,art_director,assistant,assistant_director,
# camera_department,casting_department,casting_director,cinematographer,composer,costume_department,
# costume_designer,director,editor,editorial_department,electrical_department,executive,legal,
# location_management,make_up_department,manager,miscellaneous,music_department,producer,
# production_department,production_designer,production_manager,publicist,script_department,
# set_decorator,sound_department,soundtrack,special_effects,stunts,talent_agent,
# transportation_department,visual_effects,writer
#return 'actor,actress,director,producer'.split(',')
return 'actor,actress'.split(',')
def filter_by_profession(self, prof):
professions = prof.split(',')
for p in professions:
if p in self.roles:
return True
return False
def filter_titles(self, movies, known_for):
titles = list()
for id in known_for.split(','):
if id in movies:
titles.append(id)
return ' '.join(titles).strip()
def load_candidate_movies(self):
infile = self.c.candidate_movies_json_filename()
with open(infile, 'r') as f:
return json.loads(f.read())
def load_top_ratings(self):
infile1 = self.c.top_ratings_csv_filename()
top_rated = dict()
row_count = 0
with | |
1
18 HT Pkt Hi HT 1 0
19 HT IPC Low HT 2 3
20 HT IPC Hi HT 3 2
21 CC4 Low SPI0 16 1
22 CC4 Hi SPI0 17 0
23 CC5 Low SPI0 18 1
24 CC5 Hi SPI0 19 0
'''
}
def test_golden_active_ipm(self):
self.device = Mock(**self.golden_output_active_ipm)
obj = ShowPlatformHardwareQfpBqsIpmMapping(device=self.device)
parsed_output = obj.parse(status='active', slot='0')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output_active_ipm)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowPlatformHardwareQfpBqsIpmMapping(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(status='active', slot='0')
class TestShowPlatformHardwareSerdesStatistics(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_serdes = {
'link': {
'0-Link A': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 53361379,
'looped': 0,
'low': 199330758,
},
'flow_ctrl_count': 3680,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 63052,
'looped': 0,
'low': 2703601,
},
'qstat_count': 331199,
},
'to': {
'pkts': {
'high': 0,
'low': 2787636,
}
}
},
'0-Link B': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'flow_ctrl_count': 3680,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'qstat_count': 331199,
},
'to': {
'pkts': {
'high': 0,
'low': 0,
}
}
},
'1-Link A': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'flow_ctrl_count': 3680,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'qstat_count': 294400,
},
'to': {
'pkts': {
'high': 0,
'low': 0,
}
}
},
'1-Link B': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'flow_ctrl_count': 3680,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'qstat_count': 0,
},
'to': {
'pkts': {
'high': 0,
'low': 0,
}
}
},
'F1-Link A': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 0,
'looped': 0,
'low': 18648,
},
'flow_ctrl_count': 3680,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 0,
'looped': 0,
'low': 518,
},
'qstat_count': 0,
},
'to': {
'pkts': {
'high': 0,
'low': 518,
}
}
},
'R0-Link A': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 1614284,
'looped': 0,
'low': 298734735,
},
'flow_ctrl_count': 3700,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 19461,
'looped': 0,
'low': 2777099,
},
'qstat_count': 0,
},
'to': {
'pkts': {
'high': 1018101,
'low': 1719353,
}
}
},
'R1-Link A': {
'from': {
'bytes': {
'bad': 0,
'dropped': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'flow_ctrl_count': 3501,
'pkts': {
'bad': 0,
'dropped': 0,
'errored': 0,
'high': 0,
'looped': 0,
'low': 0,
},
'qstat_count': 0,
},
'to': {
'pkts': {
'high': 0,
'low': 0,
}
}
}
}
}
golden_output_serdes = {'execute.return_value': '''\
Router#show platform hardware slot F0 serdes statistics
Load for five secs: 22%/1%; one minute: 8%; five minutes: 9%
Time source is NTP, 07:42:08.304 EST Thu Sep 8 2016
From Slot R1-Link A
Pkts High: 0 Low: 0 Bad: 0 Dropped: 0
Bytes High: 0 Low: 0 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 0 Flow ctrl count: 3501
To Slot R1-Link A
Pkts High: 0 Low: 0
From Slot R0-Link A
Pkts High: 19461 Low: 2777099 Bad: 0 Dropped: 0
Bytes High: 1614284 Low: 298734735 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 0 Flow ctrl count: 3700
To Slot R0-Link A
Pkts High: 1018101 Low: 1719353
From Slot F1-Link A
Pkts High: 0 Low: 518 Bad: 0 Dropped: 0
Bytes High: 0 Low: 18648 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 0 Flow ctrl count: 3680
To Slot F1-Link A
Pkts High: 0 Low: 518
From Slot 1-Link A
Pkts High: 0 Low: 0 Bad: 0 Dropped: 0
Bytes High: 0 Low: 0 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 294400 Flow ctrl count: 3680
To Slot 1-Link A
Pkts High: 0 Low: 0
From Slot 0-Link A
Pkts High: 63052 Low: 2703601 Bad: 0 Dropped: 0
Bytes High: 53361379 Low: 199330758 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 331199 Flow ctrl count: 3680
To Slot 0-Link A
Pkts High: 0 Low: 2787636
From Slot 0-Link B
Pkts High: 0 Low: 0 Bad: 0 Dropped: 0
Bytes High: 0 Low: 0 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 331199 Flow ctrl count: 3680
To Slot 0-Link B
Pkts High: 0 Low: 0
From Slot 1-Link B
Pkts High: 0 Low: 0 Bad: 0 Dropped: 0
Bytes High: 0 Low: 0 Bad: 0 Dropped: 0
Pkts Looped: 0 Error: 0
Bytes Looped 0
Qstat count: 0 Flow ctrl count: 3680
To Slot 1-Link B
Pkts High: 0 Low: 0
'''
}
def test_golden_serdes(self):
self.device = Mock(**self.golden_output_serdes)
obj = ShowPlatformHardwareSerdes(device=self.device)
parsed_output = obj.parse(slot='0')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output_serdes)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowPlatformHardwareSerdes(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(slot='0')
class TestShowPlatformHardwareSerdesStatisticsInternal(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_serdes_internal = {
'link': {
'Encryption Processor': {
'errors': {
'rx_parity': 0,
'rx_process': 0,
'rx_schedule': 0,
'rx_statistics': 0,
'tx_process': 0,
'tx_schedule': 0,
'tx_statistics': 0,
},
'from': {
'bytes': {
'dropped': 0,
'errored': 0,
'total': 0},
'pkts': {
'dropped': 0,
'errored': 0,
'total': 0,
},
},
'local_rx_in_sync': True,
'local_tx_in_sync': True,
'remote_rx_in_sync': True,
'remote_tx_in_sync': True,
'to': {
'bytes': {
'dropped': 0,
'total': 0,
},
'pkts': {
'dropped': 0,
'total': 0,
},
},
},
'Network-Processor-0': {
'from': {
'bytes': {
'total': 7397920802,
},
'pkts': {
'total': 21259012,
},
},
'local_rx_in_sync': True,
'local_tx_in_sync': True,
'to': {
'bytes': {
'total': 7343838083,
},
'pkts': {
'total': 21763844,
},
},
},
},
'serdes_exception_counts': {
'c2w': {},
'cfg': {},
'cilink': {
'link': {
'0': {
'chicoEvent': 5,
'msgEccError': 5,
'msgTypeError': 5,
},
'1': {
'chicoEvent': 1,
'msgEccError': 1,
'msgTypeError': 1,
},
'2': {
'chicoEvent': 3,
'msgEccError': 3,
'msgTypeError': 3,
},
},
},
'edh-hi': {},
'edh-lo': {},
'edm': {},
'eqs/fc': {},
'idh-hi': {},
'idh-lo': {},
'idh-shared': {},
'ilak': {},
'isch': {},
'pcie': {},
'slb': {},
'spi link': {},
},
}
golden_output_serdes_internal = {'execute.return_value': '''\
Router#show platform hardware slot F0 serdes statistics internal
Load for five secs: 5%/1%; one minute: 8%; five minutes: 9%
Time source is NTP, 07:42:13.752 EST Thu Sep 8 2016
Warning: Clear option may not clear all the counters
Network-Processor-0 Link:
Local TX in sync, Local RX in sync
From Network-Processor Packets: 21259012 Bytes: 7397920802
To Network-Processor Packets: 21763844 Bytes: 7343838083
Encryption Processor Link:
Local TX in sync, Local RX in sync
Remote TX in sync, Remote RX in sync
To Encryption Processor Packets: 0 Bytes: 0
Drops Packets: 0 Bytes: 0
From Encryption Processor Packets: 0 Bytes: 0
Drops Packets: 0 Bytes: 0
Errors Packets: 0 Bytes: 0
Errors:
RX/TX process: 0/0, RX/TX schedule: 0/0
RX/TX statistics: 0/0, RX parity: 0
Serdes Exception Counts:
spi link:
cilink:
link 0: msgTypeError: 5
link 0: msgEccError: 5
link 0: chicoEvent: 5
link 1: msgTypeError: 1
link 1: msgEccError: 1
link 1: chicoEvent: 1
link 2: msgTypeError: 3
link 2: msgEccError: 3
link 2: chicoEvent: 3
ilak:
slb:
edm:
isch:
cfg:
c2w:
pcie:
eqs/fc:
idh-hi:
idh-lo:
idh-shared:
edh-hi:
edh-lo:
'''
}
def test_golden(self):
self.device = Mock(**self.golden_output_serdes_internal)
obj = ShowPlatformHardwareSerdesInternal(device=self.device)
parsed_output = obj.parse(slot='0')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_output_serdes_internal)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowPlatformHardwareSerdesInternal(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(slot='0')
class TestShowPlatformPower(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'allocation_status': 'Sufficient',
'chassis': 'ASR1006-X',
'excess_capacity_percent': | |
<filename>tests/unit/test_typeset_default.py
import datetime
import os
import pathlib
import uuid
from ipaddress import IPv4Address, IPv6Address
from pathlib import PurePosixPath, PureWindowsPath
from urllib.parse import urlparse
import numpy as np
import pandas as pd
import pytest
# from visions.test.series import get_series
from visions.test.utils import (
contains,
convert,
get_contains_cases,
get_convert_cases,
get_inference_cases,
infers,
)
from visions.types.email_address import FQDA
from pandas_profiling.config import config
from pandas_profiling.model.typeset import (
Boolean,
Categorical,
DateTime,
Numeric,
ProfilingTypeSet,
Unsupported,
)
if int(pd.__version__.split(".")[0]) < 1:
from visions.dtypes.boolean import BoolDtype
btype = "Bool"
else:
btype = "boolean"
base_path = os.path.abspath(os.path.dirname(__file__))
# Workaround pending release https://github.com/dylan-profiler/visions/issues/162
def get_series():
test_series = [
# Int Series
pd.Series([1, 2, 3], name="int_series"),
pd.Series(range(10), name="int_range"),
pd.Series([1, 2, 3], name="Int64_int_series", dtype="Int64"),
pd.Series([1, 2, 3, np.nan], name="Int64_int_nan_series", dtype="Int64"),
pd.Series([1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0], name="int_series_boolean"),
# Count
pd.Series(np.array([1, 2, 3, 4], dtype=np.uint32), name="np_uint32"),
pd.Series([1, 2, 3, 4], dtype="UInt32", name="pd_uint32"),
# Categorical
pd.Series([1, 2, 3], name="categorical_int_series", dtype="category"),
pd.Series(
pd.Categorical(
["A", "B", "C", "C", "B", "A"],
categories=["A", "B", "C"],
ordered=False,
),
name="categorical_char",
),
pd.Series([1.0, 2.0, 3.1], dtype="category", name="categorical_float_series"),
pd.Series(
["Georgia", "Sam"], dtype="category", name="categorical_string_series"
),
pd.Series(
[complex(0, 0), complex(1, 2), complex(3, -1)],
name="categorical_complex_series",
dtype="category",
),
# Ordinal
pd.Series(
pd.Categorical(
["A", "B", "C", "C", "B", "A"], categories=["A", "B", "C"], ordered=True
),
name="ordinal",
),
# Float Series
pd.Series([1.0, 2.1, 3.0], name="float_series"),
pd.Series([1.0, 2.5, np.nan], name="float_nan_series"),
pd.Series([1.0, 2.0, 3.0, 4.0], name="float_series2"),
pd.Series(np.array([1.2, 2, 3, 4], dtype=np.float64), name="float_series3"),
pd.Series([1, 2, 3.05, 4], dtype=np.float64, name="float_series4"),
pd.Series([np.nan, 1.2], name="float_series5"),
pd.Series([np.nan, 1.1], dtype=np.single, name="float_series6"),
pd.Series([np.inf, np.NINF, np.PINF, 1000000.0, 5.5], name="float_with_inf"),
pd.Series([np.inf, np.NINF, np.Infinity, np.PINF], name="inf_series"),
pd.Series([1, 2, np.nan], name="int_nan_series"),
# Nan Series
pd.Series([np.nan], name="nan_series"),
pd.Series([np.nan, np.nan, np.nan, np.nan], name="nan_series_2"),
# String Series
pd.Series(["Patty", "Valentine"], name="string_series"),
pd.Series(["mack", "the", "finger"], name="string_unicode_series"),
pd.Series(
np.array(["upper", "hall"], dtype=np.unicode_),
name="string_np_unicode_series",
),
pd.Series(["1.0", "2.0", np.nan], name="string_num_nan"),
pd.Series(["1,000.0", "2.1", np.nan], name="string_with_sep_num_nan"),
pd.Series(["1.0", "2.0", "3.0"], name="string_num"),
pd.Series(["1.0", "45.67", np.nan], name="string_flt_nan"),
pd.Series(["1.0", "45.67", "3.5"], name="string_flt"),
pd.Series(
[
"I was only robbing the register,",
"I hope you understand",
"One of us had better call up the cops",
"In the hot New Jersey night",
np.nan,
],
name="string_str_nan",
),
pd.Series(["True", "False", None], name="string_bool_nan"),
pd.Series(range(20), name="int_str_range").astype("str"),
pd.Series(
[
"http://www.cwi.nl:80/%7Eguido/Python.html",
"https://github.com/dylan-profiling/hurricane",
],
name="str_url",
),
pd.Series(
[r"C:\\home\\user\\file.txt", r"C:\\home\\user\\test2.txt"],
name="path_series_windows_str",
),
pd.Series(
[r"/home/user/file.txt", r"/home/user/test2.txt"],
name="path_series_linux_str",
),
pd.Series(["0011", "12"], name="str_int_leading_zeros"),
pd.Series(["0.0", "0.04", "0"], name="str_float_non_leading_zeros"),
pd.Series(["0.0", "0.000", "0", "2"], name="str_int_zeros"),
# Bool Series
pd.Series([True, False], name="bool_series"),
pd.Series([True, False, None], name="bool_nan_series"),
pd.Series([True, False, None], name="nullable_bool_series", dtype=btype),
pd.Series([True, False, False, True], name="bool_series2", dtype=bool),
pd.Series([True, False, False, True], name="bool_series2", dtype=bool),
pd.Series(np.array([1, 0, 0, 1], dtype=bool), name="bool_series3"),
# Complex Series
pd.Series(
[complex(0, 0), complex(1, 2), complex(3, -1)],
name="complex_series",
),
pd.Series(
[
complex(0, 0),
complex(1, 2),
complex(3, -1),
complex(np.nan, np.nan),
],
name="complex_series_nan",
),
pd.Series(["(1+1j)", "(2+2j)", "(10+100j)"], name="str_complex"),
pd.Series(["(1+1j)", "(2+2j)", "(10+100j)", "NaN"], name="str_complex_nan"),
pd.Series(
[complex(0, 0), complex(1, 2), complex(3, -1), np.nan],
name="complex_series_nan_2",
),
pd.Series(
[complex(0, 0), complex(1, 2), complex(3, -1), np.nan],
name="complex_series_py_nan",
),
pd.Series(
[complex(0, 0), complex(1, 2), complex(3, -1)], name="complex_series_py"
),
pd.Series(
[
complex(0, 0),
complex(1, 0),
complex(3, 0),
complex(-1, 0),
],
name="complex_series_float",
),
# Datetime Series
pd.Series(["1937-05-06", "20/4/2014"], name="string_date"),
pd.Series(["1941-05-24", "13/10/2016"], name="timestamp_string_series"),
pd.to_datetime(
pd.Series(
[datetime.datetime(2017, 3, 5, 12, 2), datetime.datetime(2019, 12, 4)],
name="timestamp_series",
)
),
pd.to_datetime(
pd.Series(
[
datetime.datetime(2017, 3, 5),
datetime.datetime(2019, 12, 4, 3, 2, 0),
pd.NaT,
],
name="timestamp_series_nat",
)
),
pd.to_datetime(
pd.Series(
[datetime.datetime(2017, 3, 5), datetime.datetime(2019, 12, 4), pd.NaT],
name="date_series_nat",
)
),
pd.Series(
pd.date_range(
start="2013-05-18 12:00:01",
periods=2,
freq="H",
tz="Europe/Brussels",
name="timestamp_aware_series",
)
),
pd.to_datetime(
pd.Series(
[
datetime.date(2011, 1, 1),
datetime.date(2012, 1, 2),
datetime.date(2013, 1, 1),
],
name="datetime",
)
),
# Date series
pd.Series(
[
datetime.date(2011, 1, 1),
datetime.date(2012, 1, 2),
datetime.date(2013, 1, 1),
],
name="date",
),
# Time series
pd.Series(
[
datetime.time(8, 43, 12),
datetime.time(9, 43, 12),
datetime.time(10, 43, 12),
],
name="time",
),
# http://pandas-docs.github.io/pandas-docs-travis/user_guide/timeseries.html#timestamp-limitations
# pd.to_datetime(
# pd.Series(
# [
# datetime.datetime(year=1, month=1, day=1, hour=8, minute=43, second=12),
# datetime.datetime(year=1, month=1, day=1, hour=9, minute=43, second=12),
# datetime.datetime(
# year=1, month=1, day=1, hour=10, minute=43, second=12
# ),
# ],
# name="datetime_to_time",
# )
# ),
# Timedelta Series
pd.Series([pd.Timedelta(days=i) for i in range(3)], name="timedelta_series"),
pd.Series(
[pd.Timedelta(days=i) for i in range(3)] + [pd.NaT],
name="timedelta_series_nat",
),
pd.Series(
[
pd.Timedelta("1 days 00:03:43"),
pd.Timedelta("5 days 12:33:57"),
pd.Timedelta("0 days 01:25:07"),
pd.Timedelta("-2 days 13:46:56"),
pd.Timedelta("1 days 23:49:25"),
],
name="timedelta_negative",
),
# Path Series
pd.Series(
[
PurePosixPath("/home/user/file.txt"),
PurePosixPath("/home/user/test2.txt"),
],
name="path_series_linux",
),
pd.Series(
[
PurePosixPath("/home/user/file.txt"),
PurePosixPath("/home/user/test2.txt"),
None,
],
name="path_series_linux_missing",
),
pd.Series(
[
PureWindowsPath("C:\\home\\user\\file.txt"),
PureWindowsPath("C:\\home\\user\\test2.txt"),
],
name="path_series_windows",
),
# Url Series
pd.Series(
[
urlparse("http://www.cwi.nl:80/%7Eguido/Python.html"),
urlparse("https://github.com/dylan-profiling/hurricane"),
],
name="url_series",
),
pd.Series(
[
urlparse("http://www.cwi.nl:80/%7Eguido/Python.html"),
urlparse("https://github.com/dylan-profiling/hurricane"),
np.nan,
],
name="url_nan_series",
),
pd.Series(
[
urlparse("http://www.cwi.nl:80/%7Eguido/Python.html"),
urlparse("https://github.com/dylan-profiling/hurricane"),
None,
],
name="url_none_series",
),
# UUID Series
pd.Series(
[
uuid.UUID("0b8a22ca-80ad-4df5-85ac-fa49c44b7ede"),
uuid.UUID("aaa381d6-8442-4f63-88c8-7c900e9a23c6"),
uuid.UUID("00000000-0000-0000-0000-000000000000"),
],
name="uuid_series",
),
pd.Series(
[
uuid.UUID("0b8a22ca-80ad-4df5-85ac-fa49c44b7ede"),
uuid.UUID("aaa381d6-8442-4f63-88c8-7c900e9a23c6"),
uuid.UUID("00000000-0000-0000-0000-000000000000"),
None,
],
name="uuid_series_missing",
),
pd.Series(
[
"0b8a22ca-80ad-4df5-85ac-fa49c44b7ede",
"aaa381d6-8442-4f63-88c8-7c900e9a23c6",
"00000000-0000-0000-0000-000000000000",
],
name="uuid_series_str",
),
# Object Series
pd.Series([[1, ""], [2, "Rubin"], [3, "Carter"]], name="mixed_list[str,int]"),
pd.Series(
[{"why": "did you"}, {"bring him": "in for he"}, {"aint": "the guy"}],
name="mixed_dict",
),
pd.Series(
[pd.to_datetime, pd.to_timedelta, pd.read_json, pd.to_pickle],
name="callable",
),
pd.Series([pd, np], name="module"),
pd.Series(["1.1", "2"], name="textual_float"),
pd.Series(["1.1", "2", "NAN"], name="textual_float_nan"),
# Object (Mixed, https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.types.infer_dtype.html)
pd.Series(["a", 1], name="mixed_integer"),
pd.Series([True, False, np.nan], name="mixed"),
pd.Series([[True], [False], [False]], name="mixed_list"),
pd.Series([[1, ""], [2, "Rubin"], [3, "Carter"]], name="mixed_list[str,int]"),
pd.Series(
[{"why": "did you"}, {"bring him": "in for he"}, {"aint": "the guy"}],
name="mixed_dict",
),
# IP
pd.Series([IPv4Address("127.0.0.1"), IPv4Address("127.0.0.1")], name="ip"),
pd.Series(["127.0.0.1", "127.0.0.1"], name="ip_str"),
# Empty
pd.Series([], name="empty", dtype=np.float64),
pd.Series([], name="empty_float", dtype=float),
pd.Series([], name="empty_int64", dtype="Int64"),
pd.Series([], name="empty_object", dtype="object"),
pd.Series([], name="empty_bool", dtype=bool),
# IP
pd.Series([IPv4Address("127.0.0.1"), IPv4Address("127.0.0.1")], name="ip"),
pd.Series(
[IPv4Address("127.0.0.1"), None, IPv4Address("127.0.0.1")],
name="ip_missing",
),
pd.Series(
[IPv6Address("0:0:0:0:0:0:0:1"), IPv4Address("127.0.0.1")],
name="ip_mixed_v4andv6",
),
pd.Series(["127.0.0.1", "127.0.0.1"], name="ip_str"),
# File
pd.Series(
[
pathlib.Path(os.path.join(base_path, "series.py")).absolute(),
pathlib.Path(os.path.join(base_path, "__init__.py")).absolute(),
pathlib.Path(os.path.join(base_path, "utils.py")).absolute(),
],
name="file_test_py",
),
pd.Series(
[
pathlib.Path(os.path.join(base_path, "..", "py.typed")).absolute(),
pathlib.Path(
os.path.join(
base_path, "..", "visualisation", "circular_packing.html"
)
).absolute(),
pathlib.Path(os.path.join(base_path, "series.py")).absolute(),
],
name="file_mixed_ext",
),
pd.Series(
[
pathlib.Path(os.path.join(base_path, "series.py")).absolute(),
None,
pathlib.Path(os.path.join(base_path, "__init__.py")).absolute(),
None,
pathlib.Path(os.path.join(base_path, "utils.py")).absolute(),
],
name="file_test_py_missing",
),
# Image
pd.Series(
[
pathlib.Path(
os.path.join(
base_path,
"../visualisation/typesets/typeset_complete.png",
)
).absolute(),
pathlib.Path(
os.path.join(
base_path,
r"../visualisation/typesets/typeset_standard.png",
)
).absolute(),
pathlib.Path(
os.path.join(
base_path,
r"../visualisation/typesets/typeset_geometry.png",
)
).absolute(),
],
name="image_png",
),
pd.Series(
[
pathlib.Path(
os.path.join(
base_path,
r"../visualisation/typesets/typeset_complete.png",
)
).absolute(),
pathlib.Path(
os.path.join(
base_path,
r"../visualisation/typesets/typeset_standard.png",
)
).absolute(),
None,
pathlib.Path(
os.path.join(
base_path,
r"../visualisation/typesets/typeset_geometry.png",
)
).absolute(),
None,
],
name="image_png_missing",
),
# Email
pd.Series(
[FQDA("test", "example.com"), FQDA("info", "example.eu")],
name="email_address",
),
pd.Series(
[FQDA("test", "<EMAIL>"), FQDA("info", "example.eu"), None],
name="email_address_missing",
),
pd.Series(["<EMAIL>", "<EMAIL>"], name="email_address_str"),
]
if int(pd.__version__.split(".")[0]) >= 1:
pandas_1_series = [
pd.Series(
["Patty", "Valentine"], dtype="string", name="string_dtype_series"
)
]
test_series.extend(pandas_1_series)
return test_series
series = get_series()
typeset = ProfilingTypeSet()
contains_map = {
Numeric: {
"int_series",
"Int64_int_series",
"int_range",
"Int64_int_nan_series",
"int_series_boolean",
"np_uint32",
"pd_uint32",
"float_series",
"float_series2",
"float_series3",
"float_series4",
"inf_series",
"float_nan_series",
"float_series5",
"int_nan_series",
"float_with_inf",
"float_series6",
"complex_series",
"complex_series_py",
"complex_series_nan",
"complex_series_py_nan",
"complex_series_nan_2",
"complex_series_float",
},
Categorical: {
"categorical_float_series",
"categorical_int_series",
"categorical_string_series",
"categorical_complex_series",
"categorical_char",
"ordinal",
"timestamp_string_series",
"string_with_sep_num_nan",
"string_series",
"string_unicode_series",
"string_np_unicode_series",
"path_series_linux_str",
"path_series_windows_str",
"int_str_range",
"string_date",
"textual_float",
"textual_float_nan",
"ip_str",
"string_flt",
"string_num",
"str_url",
"string_str_nan",
"string_num_nan",
"string_bool_nan",
"string_flt_nan",
"str_complex",
"uuid_series_str",
"str_int_leading_zeros",
"email_address_str",
"str_float_non_leading_zeros",
"str_int_zeros",
"str_complex_nan",
},
Boolean: {
"bool_series",
"bool_series2",
"bool_series3",
"nullable_bool_series",
"mixed",
"bool_nan_series",
},
DateTime: {
"timestamp_series",
"timestamp_aware_series",
"datetime",
"timestamp_series_nat",
"date_series_nat",
},
}
if int(pd.__version__[0]) >= 1:
contains_map[Categorical].add("string_dtype_series")
contains_map[Unsupported] = {
"module",
"nan_series",
"nan_series_2",
"timedelta_series",
"timedelta_series_nat",
"timedelta_negative",
"path_series_linux",
"path_series_linux_missing",
"path_series_windows",
"url_series",
"url_nan_series",
"url_none_series",
"file_test_py",
"file_mixed_ext",
"file_test_py_missing",
"image_png",
"image_png_missing",
"image_png",
"image_png_missing",
"uuid_series",
"uuid_series_missing",
"mixed_list[str,int]",
"mixed_dict",
"callable",
"mixed_integer",
"mixed_list",
"date",
"time",
"empty",
"empty_bool",
"empty_float",
"empty_object",
"empty_int64",
"ip",
"ip_missing",
"ip_mixed_v4andv6",
"email_address_missing",
"email_address",
}
@pytest.mark.parametrize(**get_contains_cases(series, contains_map, typeset))
def test_contains(series, type, member):
"""Test the generated combinations for "series in type"
Args:
series: the series to test
type: the type to test against
member: the result
"""
config["vars"]["num"]["low_categorical_threshold"].set(0)
result, message = contains(series, type, member)
assert result, message
inference_map = {
"int_series": Numeric,
"categorical_int_series": Numeric,
"int_nan_series": Numeric,
"Int64_int_series": Numeric,
"Int64_int_nan_series": Numeric,
"np_uint32": Numeric,
"pd_uint32": Numeric,
"int_range": Numeric,
"float_series": Numeric,
"float_nan_series": Numeric,
"int_series_boolean": Numeric,
"float_series2": Numeric,
"float_series3": Numeric,
"float_series4": Numeric,
"float_series5": Numeric,
"float_series6": Numeric,
"complex_series_float": Numeric,
"categorical_float_series": Numeric,
"float_with_inf": Numeric,
"inf_series": Numeric,
"nan_series": Unsupported,
"nan_series_2": Unsupported,
"string_series": Categorical,
"categorical_string_series": Categorical,
"timestamp_string_series": Categorical,
"string_with_sep_num_nan": Categorical, # TODO: Introduce thousands separator
"string_unicode_series": Categorical,
"string_np_unicode_series": Categorical,
"string_num_nan": Numeric,
"string_num": Numeric,
"string_flt_nan": Numeric,
"string_flt": Numeric,
"string_str_nan": Categorical,
"string_bool_nan": Boolean,
"int_str_range": Numeric,
"string_date": Categorical,
"str_url": Categorical,
"bool_series": Boolean,
"bool_nan_series": Boolean,
"nullable_bool_series": Boolean,
"bool_series2": Boolean,
"bool_series3": Boolean,
"complex_series": Numeric,
"complex_series_nan": Numeric,
"complex_series_nan_2": Numeric,
"complex_series_py_nan": Numeric,
"complex_series_py": Numeric,
"categorical_complex_series": Numeric,
"timestamp_series": DateTime,
"timestamp_series_nat": DateTime,
"timestamp_aware_series": DateTime,
"datetime": DateTime,
"timedelta_series": Unsupported,
"timedelta_series_nat": Unsupported,
"timedelta_negative": Unsupported,
"geometry_series_missing": Unsupported,
"geometry_series": Unsupported,
"path_series_linux": Unsupported,
"path_series_linux_missing": Unsupported,
"path_series_linux_str": Categorical,
"path_series_windows": Unsupported,
"path_series_windows_str": Categorical,
"url_series": Unsupported,
"url_nan_series": Unsupported,
"url_none_series": Unsupported,
| |
<reponame>manwithadodla/quality-assessment-protocol
base_test_dir = "/tdata/QAP/qc_test"
def anatomical_reorient_workflow(workflow, resource_pool, config, name="_"):
"""Build a Nipype workflow to deoblique and reorient an anatomical scan
from a NIFTI file.
- This is a seminal workflow that can only take an input directly from
disk (i.e. no Nipype workflow connections/pointers, and this is where
the pipeline will actually begin). For the sake of building the
pipeine in reverse, if this workflow is called when there is no input
file available, this function will return the unmodified workflow and
resource pool directly back.
- In conjunction with the other workflow-building functions, if this
function returns the workflow and resource pool unmodified, each
function up will do the same until it reaches the top level, allowing
the pipeline builder to continue "searching" for a base-level input
without crashing at this one.
Expected Resources in Resource Pool
- anatomical_scan: The raw anatomical scan in a NIFTI image.
New Resources Added to Resource Pool
- anatomical_reorient: The deobliqued, reoriented anatomical scan.
Workflow Steps
1. AFNI's 3drefit to deoblique the anatomical scan.
2. AFNI's 3dresample to reorient the deobliqued anatomical scan to RPI.
:type workflow: Nipype workflow object
:param workflow: A Nipype workflow object which can already contain other
connected nodes; this function will insert the following
workflow into this one provided.
:type resource_pool: dict
:param resource_pool: A dictionary defining input files and pointers to
Nipype node outputs / workflow connections; the keys
are the resource names.
:type config: dict
:param config: A dictionary defining the configuration settings for the
workflow, such as directory paths or toggled options.
:type name: str
:param name: (default: "_") A string to append to the end of each node
name.
:rtype: Nipype workflow object
:return: The Nipype workflow originally provided, but with this function's
sub-workflow connected into it.
:rtype: dict
:return: The resource pool originally provided, but updated (if
applicable) with the newest outputs and connections.
"""
import nipype.pipeline.engine as pe
from nipype.interfaces.afni import preprocess
if "anatomical_scan" not in resource_pool.keys():
return workflow, resource_pool
anat_deoblique = pe.Node(interface=preprocess.Refit(),
name='anat_deoblique%s' % name)
anat_deoblique.inputs.in_file = resource_pool["anatomical_scan"]
anat_deoblique.inputs.deoblique = True
anat_reorient = pe.Node(interface=preprocess.Resample(),
name='anat_reorient%s' % name)
anat_reorient.inputs.orientation = 'RPI'
anat_reorient.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(anat_deoblique, 'out_file', anat_reorient, 'in_file')
resource_pool["anatomical_reorient"] = (anat_reorient, 'out_file')
return workflow, resource_pool
def run_anatomical_reorient(anatomical_scan, out_dir=None, run=True):
"""Run the 'anatomical_reorient_workflow' function to execute the modular
workflow with the provided inputs.
:type anatomical_scan: str
:param anatomical_scan: The filepath to the raw anatomical image in a
NIFTI file.
:type out_dir: str
:param out_dir: (default: None) The output directory to write the results
to; if left as None, will write to the current directory.
:type run: bool
:param run: (default: True) Will run the workflow; if set to False, will
connect the Nipype workflow and return the workflow object
instead.
:rtype: str
:return: (if run=True) The filepath of the generated anatomical_reorient
file.
:rtype: Nipype workflow object
:return: (if run=False) The connected Nipype workflow object.
:rtype: str
:return: (if run=False) The base directory of the workflow if it were to
be run.
"""
import os
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "anatomical_reorient"
workflow = pe.Workflow(name='anatomical_reorient_workflow')
if not out_dir:
out_dir = os.getcwd()
workflow_dir = os.path.join(out_dir, "workflow_output", output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["anatomical_scan"] = anatomical_scan
workflow, resource_pool = \
anatomical_reorient_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_anatomical_reorient')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["anatomical_reorient"]
workflow.connect(node, out_file, ds, 'anatomical_reorient')
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "anatomical_reorient",\
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def anatomical_skullstrip_workflow(workflow, resource_pool, config, name="_"):
"""Build a Nipype workflow to skullstrip an anatomical image using AFNI's
3dSkullStrip.
- If any resources/outputs required by this workflow are not in the
resource pool, this workflow will call pre-requisite workflow builder
functions to further populate the pipeline with workflows which will
calculate/generate these necessary pre-requisites.
Expected Resources in Resource Pool
- anatomical_reorient: The deobliqued, reoriented anatomical scan.
New Resources Added to Resource Pool
- anatomical_brain: The skull-stripped anatomical image (brain only).
Workflow Steps
1. AFNI 3dSkullStrip to create a binary mask selecting only the brain.
2. AFNI 3dcalc to multiply the anatomical image with this mask.
:type workflow: Nipype workflow object
:param workflow: A Nipype workflow object which can already contain other
connected nodes; this function will insert the following
workflow into this one provided.
:type resource_pool: dict
:param resource_pool: A dictionary defining input files and pointers to
Nipype node outputs / workflow connections; the keys
are the resource names.
:type config: dict
:param config: A dictionary defining the configuration settings for the
workflow, such as directory paths or toggled options.
:type name: str
:param name: (default: "_") A string to append to the end of each node
name.
:rtype: Nipype workflow object
:return: The Nipype workflow originally provided, but with this function's
sub-workflow connected into it.
:rtype: dict
:return: The resource pool originally provided, but updated (if
applicable) with the newest outputs and connections.
"""
import copy
import nipype.pipeline.engine as pe
from nipype.interfaces.afni import preprocess
if "anatomical_reorient" not in resource_pool.keys():
from anatomical_preproc import anatomical_reorient_workflow
old_rp = copy.copy(resource_pool)
workflow, new_resource_pool = \
anatomical_reorient_workflow(workflow, resource_pool, config, name)
if resource_pool == old_rp:
return workflow, resource_pool
anat_skullstrip = pe.Node(interface=preprocess.SkullStrip(),
name='anat_skullstrip%s' % name)
anat_skullstrip.inputs.outputtype = 'NIFTI_GZ'
anat_skullstrip_orig_vol = pe.Node(interface=preprocess.Calc(),
name='anat_skullstrip_orig_vol%s' % name)
anat_skullstrip_orig_vol.inputs.expr = 'a*step(b)'
anat_skullstrip_orig_vol.inputs.outputtype = 'NIFTI_GZ'
if len(resource_pool["anatomical_reorient"]) == 2:
node, out_file = resource_pool["anatomical_reorient"]
workflow.connect(node, out_file, anat_skullstrip, 'in_file')
else:
anat_skullstrip.inputs.in_file = \
resource_pool["anatomical_reorient"]
if len(resource_pool["anatomical_reorient"]) == 2:
node, out_file = resource_pool["anatomical_reorient"]
workflow.connect(node, out_file,
anat_skullstrip_orig_vol, 'in_file_a')
else:
anat_skullstrip_orig_vol.inputs.in_file_a = \
resource_pool["anatomical_reorient"]
workflow.connect(anat_skullstrip, 'out_file',
anat_skullstrip_orig_vol, 'in_file_b')
resource_pool["anatomical_brain"] = (anat_skullstrip_orig_vol, 'out_file')
return workflow, resource_pool
def run_anatomical_skullstrip(anatomical_reorient, out_dir=None, run=True):
"""Run the 'anatomical_skullstrip_workflow' function to execute the
modular workflow with the provided inputs.
:type anatomical_reorient: str
:param anatomical_reorient: The filepath of the deobliqued, reoriented
anatomical image NIFTI file.
:type out_dir: str
:param out_dir: (default: None) The output directory to write the results
to; if left as None, will write to the current directory.
:type run: bool
:param run: (default: True) Will run the workflow; if set to False, will
connect the Nipype workflow and return the workflow object
instead.
:rtype: str
:return: (if run=True) The filepath of the generated anatomical_reorient
file.
:rtype: Nipype workflow object
:return: (if run=False) The connected Nipype workflow object.
:rtype: str
:return: (if run=False) The base directory of the workflow if it were to
be run.
"""
import os
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "anatomical_brain"
workflow = pe.Workflow(name='anatomical_skullstrip_workflow')
if not out_dir:
out_dir = os.getcwd()
workflow_dir = os.path.join(out_dir, "workflow_output", output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["anatomical_reorient"] = anatomical_reorient
workflow, resource_pool = \
anatomical_skullstrip_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_anatomical_skullstrip')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["anatomical_brain"]
workflow.connect(node, out_file, ds, 'anatomical_brain')
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "anatomical_brain", \
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def afni_anatomical_linear_registration(workflow, resource_pool, \
config, name="_"):
"""Build Nipype workflow to calculate the linear registration (participant
to template) of an anatomical image using AFNI's 3dAllineate.
- If any resources/outputs required by this workflow are not in the
resource pool, this workflow will call pre-requisite workflow builder
functions to further populate the pipeline with workflows which will
calculate/generate these necessary pre-requisites.
Expected Settings in Configuration
- skull_on_registration: (optional- default: True) Whether or not to
accept anatomical_reorient or anatomical_brain
as the input for registration.
- template_head_for_anat: (for skull-on registration) The reference
template of the whole head.
- template_brain_for_anat: (for skull-off registration) The reference
template of the brain without skull.
Expected Resources in Resource Pool
- anatomical_reorient: The deobliqued, reoriented anatomical scan.
OR
- anatomical_brain: The skull-stripped anatomical image (brain only).
New Resources Added to Resource Pool
- afni_linear_warped_image: The anatomical image transformed to the
template (using linear warps).
- allineate_linear_xfm: The text file containing the linear warp matrix
produced by AFNI's 3dAllineate.
Workflow Steps
1. | |
if self._has_problems():
self._print(self._get_field(data, "problem_msg"))
self._list_problems()
self._print(self._get_field(data, "finish_msg"))
choice = self._input(None, None, ["(q)uit", "(b)ack"])
if choice == 1:
self.current_action_idx -= 2
return
else:
self._print(self._get_field(data, "complete_msg"))
metro = "./metroae"
if self.in_container:
metro = "metroae"
deployment = ""
if ("deployment_name" in self.state and
self.state["deployment_name"] != "default"):
deployment = self.state["deployment_name"]
if "upgrade" in self.state:
self._print(
self._get_field(data, "upgrade_msg").format(
metro=metro,
deployment=deployment))
else:
self._print(
self._get_field(data, "install_msg").format(
metro=metro,
deployment=deployment))
exit(0)
#
# Private class internals
#
def _print(self, msg):
print msg.encode("utf-8")
def _print_progress(self):
if self.progress_display_count % self.progress_display_rate == 0:
sys.stdout.write(".")
sys.stdout.flush()
self.progress_display_count += 1
def _input(self, prompt=None, default=None, choices=None, datatype=""):
input_prompt = self._get_input_prompt(prompt, default, choices)
value = None
if "NON_INTERACTIVE" in os.environ:
if len(self.args) < 1:
raise Exception(
"Out of args for non-interactive input for %s" %
input_prompt)
user_value = self.args.pop(0)
if prompt is not None:
self._print(prompt)
self._print("From args: " + user_value)
value = self._validate_input(user_value, default,
choices, datatype)
if value is None:
raise Exception("Invalid non-interactive input for %s%s" %
(input_prompt, user_value))
else:
while value is None:
if datatype == "password":
user_value = getpass.getpass(input_prompt)
else:
user_value = raw_input(input_prompt)
value = self._validate_input(user_value, default, choices,
datatype)
return value
def _get_input_prompt(self, prompt=None, default=None, choices=None):
input_prompt = ""
if choices is not None:
input_prompt += "\n".join(choices)
input_prompt += "\n\n"
short_choices = self._get_short_choices(choices, default)
if prompt is not None:
input_prompt += prompt
input_prompt += " "
input_prompt += "[%s]" % ("/".join(short_choices))
else:
default_sep = ""
if prompt is not None:
input_prompt += prompt
default_sep = " "
if default is not None:
input_prompt += "%s[%s]" % (default_sep, default)
input_prompt += ": "
return input_prompt
def _validate_input(self, user_value, default=None, choices=None,
datatype=""):
value = None
if user_value == "":
if default is not None:
return default
else:
self._print("\nRequired field, please enter a value\n")
return None
if choices is not None:
value = self._match_choice(user_value, choices)
if value is None:
self._print(
"\nValue is not a valid choice, please reenter\n")
elif datatype == "ipaddr":
value = self._validate_ipaddr(user_value)
if value is None:
self._print("\nValue is not a valid ipaddress\n")
elif datatype == "int":
try:
value = int(user_value)
except ValueError:
self._print("\nValue is not a valid integer\n")
return None
elif datatype == "hostname":
value = self._validate_hostname(user_value)
if value is None:
self._print("\nValue is not a valid hostname\n")
elif datatype == "version":
allowed = re.compile("^[\d]+[.][\d]+[.]([A-Z\d]+)$", re.IGNORECASE)
if not allowed.match(user_value):
self._print("\nValue is not a valid version\n")
return None
value = user_value
else:
value = user_value
return value
def _validate_ipaddr(self, user_value):
try:
import netaddr
try:
netaddr.IPAddress(user_value)
return user_value
except netaddr.core.AddrFormatError:
return None
except ImportError:
self._print("\nWarning: Python netaddr library not installed. "
"Cannot validate IP address. This library is also "
"required for MetroAE to run properly.")
return user_value
def _validate_hostname(self, hostname):
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if all(allowed.match(x) for x in hostname.split(".")):
return hostname
else:
return None
def _get_short_choices(self, choices, default=None):
short_choices = list()
for i, choice in enumerate(choices):
start_paren_idx = choice.find("(")
if start_paren_idx >= 0:
short_value = choice[start_paren_idx + 1]
else:
short_value = choice[0]
if default == i:
short_value = short_value.upper()
else:
short_value = short_value.lower()
short_choices.append(short_value)
return short_choices
def _match_choice(self, user_value, choices):
short_choices = self._get_short_choices(choices)
for i, short_choice in enumerate(short_choices):
if user_value.lower() == short_choice:
return i
return None
def _import_yaml(self):
try:
import yaml
yaml.safe_load("")
except ImportError:
self._install_yaml()
def _install_yaml(self):
self._print("This wizard requires PyYAML library to be installed."
" Running this command requires sudo access. You may be "
"asked for the sudo password.\n")
choice = self._input("Install it now?", 0,
["(Y)es", "(n)o"])
if choice == 1:
self._print("Please install PyYAML and run the wizard again.")
exit(1)
rc, output_lines = self._run_shell("sudo pip install " + YAML_LIBRARY)
if rc != 0:
self._print("\n".join(output_lines))
self._print("Could not install PyYAML, exit code: %d" % rc)
self._print("Please install PyYAML and run the wizard again.")
exit(1)
def _get_value(self, deployment, field):
value = deployment.get(field)
if value == "":
value = None
return value
def _set_container(self):
# For Dalston container
if "RUN_MODE" in os.environ:
self.in_container = (os.environ["RUN_MODE"] == "INSIDE")
else:
self.in_container = False
self.in_container = os.path.isdir("/source/nuage-metroae")
def _set_directories(self):
self.metro_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(self.metro_path)
if self.in_container:
self.base_deployment_path = os.path.join("/data",
"deployments")
# Dalston container
self.base_deployment_path = os.path.join("/metroae_data",
"deployments")
else:
self.base_deployment_path = os.path.join(self.metro_path,
"deployments")
def _validate_actions(self):
for action in self.script:
self._validate_action(action)
def _validate_action(self, action):
if type(action) == dict:
action_name = self._get_action_name(action)
if action_name.startswith("_") or not hasattr(self, action_name):
raise Exception(
"Invalid wizard script format - %s not a valid action" %
action_name)
else:
raise Exception("Invalid wizard script format - action not a dict")
def _run_script(self):
self.current_action_idx = 0
while self.current_action_idx < len(self.script):
current_action = self.script[self.current_action_idx]
if "step" in current_action:
self._display_step(current_action)
choice = self._input(None, 0, ["(C)ontinue",
"(b)ack",
"(s)kip",
"(q)uit"])
if choice == 1:
self.current_action_idx -= 1
continue
if choice == 2:
self.current_action_idx += 1
continue
if choice == 3:
self._print("Exiting MetroAE wizard. All progress made "
"has been saved.")
exit(0)
try:
self._run_action(current_action)
except KeyboardInterrupt:
self._print("\n\nInterrupt signal received. All progress made "
"before current step has been saved.\n")
choice = self._input(
"Would you like to quit?",
1, ["(y)es", "(N)o"])
if choice != 1:
exit(1)
self.current_action_idx += 1
def _display_step(self, action):
self._print("")
if "step" in action:
self._print("**** " + action["step"] + " ****\n")
if "description" in action:
self._print(action["description"])
def _run_action(self, action):
action_name = self._get_action_name(action)
action_func = getattr(self, action_name)
data = action[action_name]
action_func(action, data)
def _get_action_name(self, action):
keys = action.keys()
for standard_field in STANDARD_FIELDS:
if standard_field in keys:
keys.remove(standard_field)
if len(keys) != 1:
raise Exception(
"Invalid wizard script format - could not deterimine action")
return keys[0]
def _get_field(self, data, field_name):
if type(data) == dict:
if field_name in data:
return data[field_name]
else:
raise Exception(
"Invalid wizard script format - data has no %s field" %
field_name)
else:
raise Exception("Invalid wizard script format - data not a dict")
def _run_shell(self, cmd_str):
process = subprocess.Popen(cmd_str,
shell=True,
cwd=self.metro_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output_lines = list()
rc = self._capture_output(process, output_lines)
self.progress_display_rate = 1
return rc, output_lines
def _capture_output(self, process, output_lines):
while True:
retcode = process.poll()
if retcode is None:
line = process.stdout.readline().rstrip("\n")
output_lines.append(line)
if "DEBUG_WIZARD" in os.environ:
self._print(line)
else:
self._print_progress()
else:
# Flush stdout buffer
lines = process.stdout.read()
for line in lines.split("\n"):
output_lines.append(line)
if "DEBUG_WIZARD" in os.environ:
self._print(line)
else:
self._print_progress()
return retcode
def _record_problem(self, problem_name, problem_descr):
if "problems" not in self.state:
self.state["problems"] = dict()
self.state["problems"][problem_name] = problem_descr
def _unrecord_problem(self, problem_name):
if "problems" not in self.state:
return
if problem_name in self.state["problems"]:
del self.state["problems"][problem_name]
def _has_problems(self):
return "problems" in self.state and len(self.state["problems"]) != 0
def _list_problems(self):
if self._has_problems():
for descr in self.state["problems"].values():
self._print(" - " + descr)
def _verify_pip(self):
try:
rc, output_lines = self._run_shell("pip freeze")
if rc != 0:
self._print("\n".join(output_lines))
raise Exception("pip freeze exit-code: %d" % rc)
with open("pip_requirements.txt", "r") as f:
required_libraries = f.read().split("\n")
except Exception as e:
self._print("\nAn error occurred while reading pip libraries: " +
str(e))
self._print("Please contact: " + METROAE_CONTACT)
return ["Could not deterimine pip libraries"]
return self._compare_libraries(required_libraries, output_lines)
def _verify_yum(self):
try:
self.progress_display_rate = 30
rc, output_lines = self._run_shell("yum list")
if rc != 0:
self._print("\n".join(output_lines))
raise Exception("yum list exit-code: %d" % rc)
with open("yum_requirements.txt", "r") as f:
required_libraries = f.read().split("\n")
except Exception as e:
self._print("\nAn error occurred while reading yum libraries: " +
str(e))
self._print("Please contact: " + METROAE_CONTACT)
return ["Could not deterimine yum libraries"]
return self._compare_libraries(required_libraries, output_lines)
def _run_setup(self):
cmd = "sudo ./setup.sh"
self._print("Command: " + cmd)
self._print("Running setup (may ask for sudo password)")
try:
rc, output_lines = self._run_shell(cmd)
if rc != 0:
self._print("\n".join(output_lines))
raise Exception("setup.sh exit-code: %d" % rc)
self._unrecord_problem("install_libraries")
self._print(u"\nMetroAE setup completed successfully!")
except Exception as e:
self._print("\nAn error occurred while running setup: " +
str(e))
self._print("Please contact: " + METROAE_CONTACT)
def _compare_libraries(self, required_libraries, installed_libraries):
missing = list()
for req_lib in required_libraries:
if req_lib.startswith("@"):
continue
req_lib_name = req_lib.split("=")[0]
found = False
for inst_lib in installed_libraries:
if inst_lib.lower().startswith(req_lib_name.lower()):
found = True
if not inst_lib.lower().startswith(req_lib.lower()):
missing.append("Requires %s, %s was found" %
(req_lib, inst_lib))
break
if not found:
missing.append("Requires " + req_lib)
return missing
def _run_unzip(self, zip_dir, unzip_dir):
cmd = "./nuage-unzip.sh %s %s" % (zip_dir, unzip_dir)
self._print("Command: " + cmd)
self._print("Unzipping files from %s to %s" % (zip_dir, unzip_dir))
for | |
<filename>Adafruit_GFX/Adafruit_GFX.py
'''******************************************************************
This is the core graphics library for all our displays, providing
basic graphics primitives (points, lines, circles, etc.). It needs
to be paired with a hardware-specific library for each display
device we carry (handling the lower-level functions).
Adafruit invests time and resources providing this open
source code, please support Adafruit and open-source hardware
by purchasing products from Adafruit!
Written by <NAME>/Ladyada for Adafruit Industries.
BSD license, check license.txt for more information.
All text above must be included in any redistribution.
******************************************************************'''
#include "Adafruit_GFX.h"
#include "glcdfont.c"
#include <avr/pgmspace.h>
class Adafruit_GFX:
_width = 0
_height = 0
def __init__(self, width, height):
self._width = width
self._height = height
rotation = 0
cursor_y = cursor_x = 0
textsize = 1
textcolor = textbgcolor = 0xFFFF
wrap = true
''' draw a circle outline '''
def drawCircle(self, x0, y0, r, color):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
self.drawPixel(x0, y0+r, color)
self.drawPixel(x0, y0-r, color)
self.drawPixel(x0+r, y0, color)
self.drawPixel(x0-r, y0, color)
while (x<y) {
if (f >= 0) {
y--
ddF_y += 2
f += ddF_y
}
x++
ddF_x += 2
f += ddF_x
self.drawPixel(x0 + x, y0 + y, color)
self.drawPixel(x0 - x, y0 + y, color)
self.drawPixel(x0 + x, y0 - y, color)
self.drawPixel(x0 - x, y0 - y, color)
self.drawPixel(x0 + y, y0 + x, color)
self.drawPixel(x0 - y, y0 + x, color)
self.drawPixel(x0 + y, y0 - x, color)
self.drawPixel(x0 - y, y0 - x, color)
def drawCircleHelper(self, x0, y0, r, cornername, ucolor):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
while (x<y):
if (f >= 0):
y--
ddF_y += 2
f += ddF_y
x+=1
ddF_x += 2
f += ddF_x
if (cornername & 0x4):
self.drawPixel(x0 + x, y0 + y, color)
self.drawPixel(x0 + y, y0 + x, color)
if (cornername & 0x2) :
self.drawPixel(x0 + x, y0 - y, color)
self.drawPixel(x0 + y, y0 - x, color)
if (cornername & 0x8) :
self.drawPixel(x0 - y, y0 + x, color)
self.drawPixel(x0 - x, y0 + y, color)
if (cornername & 0x1) :
self.drawPixel(x0 - y, y0 - x, color)
self.drawPixel(x0 - x, y0 - y, color)
def fillCircle(self, x0, y0, r, ucolor):
drawFastVLine(x0, y0-r, 2*r+1, color)
fillCircleHelper(x0, y0, r, 3, 0, color)
''' used to do circles and roundrects!'''
def fillCircleHelper(self, x0, y0, r, cornername, delta, ucolor):
f = 1 - r
ddF_x = 1
ddF_y = -2 * r
x = 0
y = r
while (x<y) :
if (f >= 0) :
y -= 1
ddF_y += 2
f += ddF_y
x += 1
ddF_x += 2
f += ddF_x
if (cornername & 0x1) :
self.drawFastVLine(x0+x, y0-y, 2*y+1+delta, color)
self.drawFastVLine(x0+y, y0-x, 2*x+1+delta, color)
if (cornername & 0x2) :
self.drawFastVLine(x0-x, y0-y, 2*y+1+delta, color)
self.drawFastVLine(x0-y, y0-x, 2*x+1+delta, color)
''' bresenham's algorithm - thx wikpedia'''
def drawLine(self, x0, y0, x1, y1, ucolor):
steep = abs(y1 - y0) > abs(x1 - x0)
if (steep) :
swap(x0, y0)
swap(x1, y1)
if (x0 > x1) :
swap(x0, x1)
swap(y0, y1)
dx, dy
dx = x1 - x0
dy = abs(y1 - y0)
err = dx / 2
ystep = 0
if (y0 < y1) :
ystep = 1
else:
ystep = -1
for ( x0<=x1 x0++) :
if (steep) :
self.drawPixel(y0, x0, color)
else
self.drawPixel(x0, y0, color)
err -= dy
if (err < 0) :
y0 += ystep
err += dx
''' draw a rectangle'''
def drawRect(self, x, y, w, h, ucolor):
self.drawFastHLine(x, y, w, color)
self.drawFastHLine(x, y+h-1, w, color)
self.drawFastVLine(x, y, h, color)
self.drawFastVLine(x+w-1, y, h, color)
def drawFastVLine(self, x, y, h, ucolor) :
''' stupidest version - update in subclasses if desired!'''
self.drawLine(x, y, x, y+h-1, color)
def drawFastHLine(self, x, y, w, ucolor) :
''' stupidest version - update in subclasses if desired!'''
self.drawLine(x, y, x+w-1, y, color)
def fillRect(self, x, y, w, h, ucolor) :
''' stupidest version - update in subclasses if desired!'''
for (i=x i<x+w i++) :
self.drawFastVLine(i, y, h, color)
def fillScreen(self, ucolor) :
fillRect(0, 0, _width, _height, color)
''' draw a rounded rectangle!'''
def drawRoundRect(self, x, y, w, h, r, ucolor) :
''' smarter version'''
self.drawFastHLine(x+r , y , w-2*r, color) ''' Top'''
self.drawFastHLine(x+r , y+h-1, w-2*r, color) ''' Bottom'''
self.drawFastVLine( x , y+r , h-2*r, color) ''' Left'''
self.drawFastVLine( x+w-1, y+r , h-2*r, color) ''' Right'''
''' draw four corners'''
self.drawCircleHelper(x+r , y+r , r, 1, color)
self.drawCircleHelper(x+w-r-1, y+r , r, 2, color)
self.drawCircleHelper(x+w-r-1, y+h-r-1, r, 4, color)
self.drawCircleHelper(x+r , y+h-r-1, r, 8, color)
''' fill a rounded rectangle!'''
def fillRoundRect(self, x, y, w, h, r, ucolor):
''' smarter version'''
draw.fillRect(x+r, y, w-2*r, h, color)
''' draw four corners'''
draw.fillCircleHelper(x+w-r-1, y+r, r, 1, h-2*r-1, color)
fillCircleHelper(x+r , y+r, r, 2, h-2*r-1, color)
''' draw a triangle!'''
def drawTriangle(self, x0, y0, x1, y1, x2, y2, ucolor):
self.drawLine(x0, y0, x1, y1, color)
self.drawLine(x1, y1, x2, y2, color)
self.drawLine(x2, y2, x0, y0, color)
''' fill a triangle!'''
def fillTriangle (self, x0, y0, x1, y1, x2, y2, ucolor):
a, b, y, last
''' Sort coordinates by Y order (y2 >= y1 >= y0)'''
if (y0 > y1) :
swap(y0, y1) swap(x0, x1)
if (y1 > y2) :
swap(y2, y1) swap(x2, x1)
if (y0 > y1) :
swap(y0, y1) swap(x0, x1)
if(y0 == y2) : ''' Handle awkward all-on-same-line case as its own thing'''
a = b = x0
if(x1 < a) a = x1
else if(x1 > b) b = x1
if(x2 < a) a = x2
else if(x2 > b) b = x2
self.drawFastHLine(a, y0, b-a+1, color)
return
dx01 = x1 - x0,
dy01 = y1 - y0,
dx02 = x2 - x0,
dy02 = y2 - y0,
dx12 = x2 - x1,
dy12 = y2 - y1,
sa = 0,
sb = 0
''' For upper part of triangle, find scanline crossings for segments'''
''' 0-1 and 0-2. If y1=y2 (flat-bottomed triangle), the scanline y1'''
''' is included here (and second loop will be skipped, avoiding a /0'''
''' error there), otherwise scanline y1 is skipped here and handled'''
''' in the second loop...which also avoids a /0 error here if y0=y1'''
''' (flat-topped triangle).'''
if(y1 == y2) last = y1 ''' Include y1 scanline'''
else last = y1-1 ''' Skip it'''
for(y=y0 y<=last y++) :
a = x0 + sa / dy01
b = x0 + sb / dy02
sa += dx01
sb += dx02
''' longhand:
a = x0 + (x1 - x0) * (y - y0) / (y1 - y0)
b = x0 + (x2 - x0) * (y - y0) / (y2 - y0)
'''
if(a > b): swap(a,b)
self.drawFastHLine(a, y, b-a+1, color)
''' For lower part of triangle, find scanline crossings for segments'''
''' 0-2 and 1-2. This loop is skipped if y1=y2.'''
sa = dx12 * (y - y1)
sb = dx02 * (y - y0)
for( y<=y2 y++) :
a = x1 + sa / dy12
b = x0 + sb / dy02
sa += dx12
sb += dx02
''' longhand:
a = x1 + (x2 - x1) * (y - y1) / (y2 - y1)
b = x0 + (x2 - x0) * (y - y0) / (y2 - y0)
'''
if(a > b) swap(a,b)
self.drawFastHLine(a, y, b-a+1, color)
def drawBitmap(self, x, y, const *bitmap, w, h, ucolor) :
i, j, byteWidth = (w + 7) / 8
for(j=0 j<h j++) :
for(i=0 i<w i++ ) :
if(pgm_read_byte(bitmap + j * byteWidth + i / 8) & (128 >> (i & 7))) :
self.drawPixel(x+i, y+j, color)
def write(self, c) :
if (c == '\n') :
cursor_y += textsize*8
cursor_x = 0
elif (c == '\r') :
''' skip em'''
else:
self.drawChar(cursor_x, cursor_y, c, | |
<filename>access_eval/analysis/plotting.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import altair as alt
import pandas as pd
from .constants import ComputedFields, DatasetFields
from .core import flatten_access_eval_2021_dataset, load_access_eval_2021_dataset
###############################################################################
PLOTTING_DIR = Path("plots/").resolve()
###############################################################################
def plot_computed_fields_over_vote_share(
data: Optional[pd.DataFrame] = None,
save_path: Optional[Union[str, Path]] = None,
) -> Path:
# Load default data
if data is None:
data = load_access_eval_2021_dataset()
# Apply default save path
if save_path is None:
save_path = PLOTTING_DIR / "vote-share.png"
# Ensure save path is Path object
save_path = Path(save_path).resolve()
save_path.parent.mkdir(parents=True, exist_ok=True)
# Generate chart
vote_share = (
alt.Chart(data)
.mark_point()
.encode(
alt.X(f"{DatasetFields.vote_share}:Q"),
alt.Y(alt.repeat("column"), type="quantitative"),
color=f"{DatasetFields.contacted}:N",
shape=f"{DatasetFields.contacted}:N",
)
.repeat(
column=[
ComputedFields.diff_errors.name,
ComputedFields.diff_critical_errors.name,
ComputedFields.diff_serious_errors.name,
ComputedFields.diff_moderate_errors.name,
ComputedFields.diff_minor_errors.name,
ComputedFields.avg_errors_per_page_pre.name,
ComputedFields.avg_errors_per_page_post.name,
ComputedFields.avg_critical_errors_per_page_pre.name,
ComputedFields.avg_critical_errors_per_page_post.name,
ComputedFields.avg_serious_errors_per_page_pre.name,
ComputedFields.avg_serious_errors_per_page_post.name,
ComputedFields.avg_moderate_errors_per_page_pre.name,
ComputedFields.avg_moderate_errors_per_page_post.name,
ComputedFields.avg_minor_errors_per_page_pre.name,
ComputedFields.avg_minor_errors_per_page_post.name,
],
)
)
vote_share.save(str(save_path.resolve()))
return save_path
def plot_pre_post_fields_compare(
data: Optional[pd.DataFrame] = None,
save_path: Optional[Union[str, Path]] = None,
) -> Path:
# Load default data
if data is None:
data = load_access_eval_2021_dataset()
# Apply default save path
if save_path is None:
save_path = PLOTTING_DIR / "pre-post.png"
# Ensure save path is Path object
save_path = Path(save_path).resolve()
save_path.parent.mkdir(parents=True, exist_ok=True)
pre_post = alt.hconcat()
for pre, post in [
(
ComputedFields.avg_errors_per_page_pre.name,
ComputedFields.avg_errors_per_page_post.name,
),
(
ComputedFields.avg_critical_errors_per_page_pre.name,
ComputedFields.avg_critical_errors_per_page_post.name,
),
(
ComputedFields.avg_serious_errors_per_page_pre.name,
ComputedFields.avg_serious_errors_per_page_post.name,
),
(
ComputedFields.avg_moderate_errors_per_page_pre.name,
ComputedFields.avg_moderate_errors_per_page_post.name,
),
(
ComputedFields.avg_minor_errors_per_page_pre.name,
ComputedFields.avg_minor_errors_per_page_post.name,
),
]:
pre_post |= (
alt.Chart(data)
.mark_point()
.encode(
x=f"{post}:Q",
y=f"{pre}:Q",
color=f"{DatasetFields.contacted}:N",
shape=f"{DatasetFields.contacted}:N",
)
)
pre_post.save(str(save_path.resolve()))
return save_path
def plot_categorical_against_errors_boxplots(
data: Optional[pd.DataFrame] = None,
) -> List[Path]:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Set of categorical variables to use for box plot generation
categorical_variables = [
DatasetFields.electoral_position,
DatasetFields.candidate_position,
DatasetFields.election_result,
]
# For each categorical variable, create a row of the different error measures
save_paths = []
for cat_var in categorical_variables:
# Break down the categorical variable into all errors and subsets of error type
error_types = alt.hconcat()
for err in [
ComputedFields.avg_errors_per_page_post.name,
ComputedFields.avg_minor_errors_per_page_post.name,
ComputedFields.avg_moderate_errors_per_page_post.name,
ComputedFields.avg_serious_errors_per_page_post.name,
ComputedFields.avg_critical_errors_per_page_post.name,
]:
feature_name = err.replace("_post", "")
scale_name = ComputedFields.avg_errors_per_page_post.name.replace(
"_post", ""
)
error_types |= (
alt.Chart(data)
.mark_boxplot(ticks=True)
.encode(
y=alt.Y(
f"{feature_name}:Q",
scale=alt.Scale(
domain=(
data[scale_name].min(),
data[scale_name].max(),
),
padding=1,
),
),
column=alt.Column(
f"{cat_var}:N", spacing=40, header=alt.Header(orient="bottom")
),
)
)
save_path = PLOTTING_DIR / f"{cat_var}-errors-split.png"
save_path.parent.mkdir(parents=True, exist_ok=True)
error_types.save(str(save_path))
save_paths.append(save_path)
return save_paths
def plot_locations_against_errors_boxplots(
data: Optional[pd.DataFrame] = None,
) -> Path:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Drop any locations with less than two campaigns
location_counts = data[DatasetFields.location].value_counts()
viable_locations = location_counts[location_counts < 2]
data = data[~data[DatasetFields.location].isin(viable_locations)]
location_plots = alt.vconcat()
for location in data[DatasetFields.location].unique():
location_subset = data.loc[data[DatasetFields.location] == location]
if len(location_subset) > 4:
error_types = alt.hconcat()
for err in [
ComputedFields.avg_errors_per_page_post.name,
ComputedFields.avg_minor_errors_per_page_post.name,
ComputedFields.avg_moderate_errors_per_page_post.name,
ComputedFields.avg_serious_errors_per_page_post.name,
ComputedFields.avg_critical_errors_per_page_post.name,
]:
feature_name = err.replace("_post", "")
scale_name = ComputedFields.avg_errors_per_page_post.name.replace(
"_post", ""
)
error_types |= (
alt.Chart(location_subset)
.mark_boxplot(ticks=True)
.encode(
y=alt.Y(
f"{feature_name}:Q",
scale=alt.Scale(
domain=(
data[scale_name].min(),
data[scale_name].max(),
),
padding=1,
),
),
column=alt.Column(
f"{DatasetFields.candidate_position}:N",
spacing=60,
header=alt.Header(orient="bottom"),
),
)
)
location_plots &= error_types
save_path = PLOTTING_DIR / "location-errors-split.png"
save_path.parent.mkdir(parents=True, exist_ok=True)
location_plots.save(str(save_path))
return save_path
def plot_error_types_boxplots(
data: Optional[pd.DataFrame] = None,
) -> Path:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Use all pre-computed avg error type features
common_error_cols = [col for col in data.columns if "avg_error-type_" in col]
# Create plot
err_type_plots = alt.vconcat()
for err_type in common_error_cols:
cat_var_plot = alt.hconcat()
for cat_var in [
DatasetFields.electoral_position,
DatasetFields.candidate_position,
DatasetFields.election_result,
]:
cat_var_plot |= (
alt.Chart(data)
.mark_boxplot(ticks=True)
.encode(
y=alt.Y(
f"{err_type}:Q",
scale=alt.Scale(
domain=(
data[err_type].min(),
data[err_type].max(),
),
padding=1,
),
),
column=alt.Column(
f"{cat_var}:N", spacing=60, header=alt.Header(orient="bottom")
),
)
)
err_type_plots &= cat_var_plot
save_path = PLOTTING_DIR / "error-types-by-category-splits.png"
save_path.parent.mkdir(parents=True, exist_ok=True)
err_type_plots.save(str(save_path))
return save_path
def _plot_and_fig_text(
data: pd.DataFrame,
plot_cols: List[str],
fig_text_prefix: str,
subset_name: str,
column: Optional[alt.Column] = None,
consistent_scale: bool = False,
) -> None:
if consistent_scale:
scale_min = min([data[col].min() for col in plot_cols])
scale_max = max([data[col].max() for col in plot_cols])
scale = alt.Scale(
domain=(scale_min, scale_max),
padding=1,
)
else:
scale = alt.Scale()
chart = alt.hconcat(spacing=40)
for col in plot_cols:
if column is None:
chart |= (
alt.Chart(data)
.mark_boxplot()
.encode(
y=alt.Y(
col,
scale=scale,
)
)
)
else:
chart |= (
alt.Chart(data)
.mark_boxplot()
.encode(
y=alt.Y(
col,
scale=scale,
),
column=column,
)
)
fig_text_prefix += (
f" {col} "
f"mean: {round(data[col].mean(), 2)}, "
f"std: {round(data[col].std(), 2)}, "
f"min: {round(data[col].min(), 2)}, "
f"max: {round(data[col].max(), 2)}."
)
chart.properties(title="Campaign Website Content")
# Save fig and text
fig_save_path = PLOTTING_DIR / f"{subset_name}.png"
fig_save_path.parent.mkdir(parents=True, exist_ok=True)
chart.save(str(fig_save_path))
with open(fig_save_path.with_suffix(".txt"), "w") as open_f:
open_f.write(fig_text_prefix)
def plot_summary_stats(
data: Optional[pd.DataFrame] = None,
subset_name: str = "",
keep_cols: List[str] = [],
plot_kwargs: Dict[str, Any] = {},
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Split into different commonly grouped stats
# Content is the actual website content
content_cols = [
DatasetFields.number_of_pages_post.replace("_post", ""),
DatasetFields.ease_of_reading,
DatasetFields.number_of_words,
DatasetFields.number_of_unique_words,
]
# Error count norm stats
error_counts_normed_cols = [
c.replace("_post", "")
for c in [
ComputedFields.avg_errors_per_page_post.name,
ComputedFields.avg_minor_errors_per_page_post.name,
ComputedFields.avg_moderate_errors_per_page_post.name,
ComputedFields.avg_serious_errors_per_page_post.name,
ComputedFields.avg_critical_errors_per_page_post.name,
]
]
# Error types are the actual error value (what was the error)
error_types_cols = [c for c in data.columns if "avg_error-type_" in c]
# Create content plots
_plot_and_fig_text(
data=data[[*content_cols, *keep_cols]],
plot_cols=content_cols,
fig_text_prefix=(
"Distributions for key content statistics "
"gathered while scraping campaign websites."
),
subset_name=f"{subset_name}content-stats",
**plot_kwargs,
)
# Create norm stats plots
_plot_and_fig_text(
data=data[[*error_counts_normed_cols, *keep_cols]],
plot_cols=error_counts_normed_cols,
fig_text_prefix=(
"Distributions for normalized error severity counts "
"(counts for each error severity / number of pages) "
"statistics gathered from scraping campaign websites."
),
subset_name=f"{subset_name}error-severity",
consistent_scale=True,
**plot_kwargs,
)
# Create error types plots
_plot_and_fig_text(
data=data[[*error_types_cols, *keep_cols]],
plot_cols=error_types_cols,
fig_text_prefix=(
"Distributions for normalized error types counts "
"(counts for each error type / number of pages) "
"statistics gathered from scraping campaign websites."
),
subset_name=f"{subset_name}error-types",
consistent_scale=True,
**plot_kwargs,
)
def plot_location_based_summary_stats(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Drop any locations with less than two campaigns
location_counts = data[DatasetFields.location].value_counts()
viable_locations = location_counts[location_counts <= 2].index
data = data[~data[DatasetFields.location].isin(viable_locations)]
# Plot basic stats
plot_summary_stats(
data,
subset_name="location-split-",
keep_cols=[DatasetFields.location],
plot_kwargs={"column": alt.Column(DatasetFields.location, spacing=60)},
)
def plot_election_result_based_summary_stats(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Plot basic stats
plot_summary_stats(
data,
subset_name="election-result-split-",
keep_cols=[DatasetFields.election_result],
plot_kwargs={"column": alt.Column(DatasetFields.election_result, spacing=40)},
)
def plot_electoral_position_based_summary_stats(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against the post data for summary stats as there was no difference
# pre and post (trial / contact)
data = data[data[DatasetFields.trial] == "B - Post"]
# Plot basic stats
plot_summary_stats(
data,
subset_name="election-position-split-",
keep_cols=[DatasetFields.electoral_position],
plot_kwargs={
"column": alt.Column(DatasetFields.electoral_position, spacing=40)
},
)
def plot_candidate_position_based_summary_stats(
data: Optional[pd.DataFrame] = None,
) -> None:
"""
Input data should be the "flattened" dataset.
"""
# Load default data
if data is None:
data = flatten_access_eval_2021_dataset()
# Only work against | |
<gh_stars>1-10
"""The tests for the Template fan platform."""
import logging
import pytest
import voluptuous as vol
from homeassistant import setup
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_OSCILLATING,
ATTR_SPEED,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
)
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from tests.common import assert_setup_component, async_mock_service
from tests.components.fan import common
_LOGGER = logging.getLogger(__name__)
_TEST_FAN = "fan.test_fan"
# Represent for fan's state
_STATE_INPUT_BOOLEAN = "input_boolean.state"
# Represent for fan's state
_STATE_AVAILABILITY_BOOLEAN = "availability_boolean.state"
# Represent for fan's speed
_SPEED_INPUT_SELECT = "input_select.speed"
# Represent for fan's oscillating
_OSC_INPUT = "input_select.osc"
# Represent for fan's direction
_DIRECTION_INPUT_SELECT = "input_select.direction"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
# Configuration tests #
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_ON, None, None, None)
async def test_missing_value_template_config(hass, calls):
"""Test: missing 'value_template' will fail."""
with assert_setup_component(0, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_missing_turn_on_config(hass, calls):
"""Test: missing 'turn_on' will fail."""
with assert_setup_component(0, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_missing_turn_off_config(hass, calls):
"""Test: missing 'turn_off' will fail."""
with assert_setup_component(0, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"turn_on": {"service": "script.fan_on"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_invalid_config(hass, calls):
"""Test: missing 'turn_off' will fail."""
with assert_setup_component(0, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"turn_on": {"service": "script.fan_on"},
}
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
# End of configuration tests #
# Template tests #
async def test_templates_with_entities(hass, calls):
"""Test tempalates with values from other entities."""
value_template = """
{% if is_state('input_boolean.state', 'True') %}
{{ 'on' }}
{% else %}
{{ 'off' }}
{% endif %}
"""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": value_template,
"speed_template": "{{ states('input_select.speed') }}",
"oscillating_template": "{{ states('input_select.osc') }}",
"direction_template": "{{ states('input_select.direction') }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_OFF, None, None, None)
hass.states.async_set(_STATE_INPUT_BOOLEAN, True)
hass.states.async_set(_SPEED_INPUT_SELECT, SPEED_MEDIUM)
hass.states.async_set(_OSC_INPUT, "True")
hass.states.async_set(_DIRECTION_INPUT_SELECT, DIRECTION_FORWARD)
await hass.async_block_till_done()
_verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD)
async def test_template_with_unavailable_entities(hass, calls):
"""Test unavailability with value_template."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'unavailable' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get(_TEST_FAN).state == STATE_OFF
async def test_template_with_unavailable_parameters(hass, calls):
"""Test unavailability of speed, direction and oscillating parameters."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"speed_template": "{{ 'unavailable' }}",
"oscillating_template": "{{ 'unavailable' }}",
"direction_template": "{{ 'unavailable' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_ON, None, None, None)
async def test_availability_template_with_entities(hass, calls):
"""Test availability tempalates with values from other entities."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"availability_template": "{{ is_state('availability_boolean.state', 'on') }}",
"value_template": "{{ 'on' }}",
"speed_template": "{{ 'medium' }}",
"oscillating_template": "{{ 1 == 1 }}",
"direction_template": "{{ 'forward' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
# When template returns true..
hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_ON)
await hass.async_block_till_done()
# Device State should not be unavailable
assert hass.states.get(_TEST_FAN).state != STATE_UNAVAILABLE
# When Availability template returns false
hass.states.async_set(_STATE_AVAILABILITY_BOOLEAN, STATE_OFF)
await hass.async_block_till_done()
# device state should be unavailable
assert hass.states.get(_TEST_FAN).state == STATE_UNAVAILABLE
async def test_templates_with_valid_values(hass, calls):
"""Test templates with valid values."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"speed_template": "{{ 'medium' }}",
"oscillating_template": "{{ 1 == 1 }}",
"direction_template": "{{ 'forward' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_ON, SPEED_MEDIUM, True, DIRECTION_FORWARD)
async def test_templates_invalid_values(hass, calls):
"""Test templates with invalid values."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'abc' }}",
"speed_template": "{{ '0' }}",
"oscillating_template": "{{ 'xyz' }}",
"direction_template": "{{ 'right' }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_OFF, None, None, None)
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
with assert_setup_component(1, "fan"):
assert await setup.async_setup_component(
hass,
"fan",
{
"fan": {
"platform": "template",
"fans": {
"test_fan": {
"value_template": "{{ 'on' }}",
"availability_template": "{{ x - 12 }}",
"speed_template": "{{ states('input_select.speed') }}",
"oscillating_template": "{{ states('input_select.osc') }}",
"direction_template": "{{ states('input_select.direction') }}",
"turn_on": {"service": "script.fan_on"},
"turn_off": {"service": "script.fan_off"},
}
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("fan.test_fan").state != STATE_UNAVAILABLE
assert ("Could not render availability_template template") in caplog.text
assert ("UndefinedError: 'x' is undefined") in caplog.text
# End of template tests #
# Function tests #
async def test_on_off(hass, calls):
"""Test turn on and turn off."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# verify
assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON
_verify(hass, STATE_ON, None, None, None)
# Turn off fan
await common.async_turn_off(hass, _TEST_FAN)
# verify
assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_OFF
_verify(hass, STATE_OFF, None, None, None)
async def test_on_with_speed(hass, calls):
"""Test turn on with speed."""
await _register_components(hass)
# Turn on fan with high speed
await common.async_turn_on(hass, _TEST_FAN, SPEED_HIGH)
# verify
assert hass.states.get(_STATE_INPUT_BOOLEAN).state == STATE_ON
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None)
async def test_set_speed(hass, calls):
"""Test set valid speed."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's speed to high
await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH)
# verify
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None)
# Set fan's speed to medium
await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM)
# verify
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_MEDIUM
_verify(hass, STATE_ON, SPEED_MEDIUM, None, None)
async def test_set_invalid_speed_from_initial_stage(hass, calls):
"""Test set invalid speed when fan is in initial state."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's speed to 'invalid'
await common.async_set_speed(hass, _TEST_FAN, "invalid")
# verify speed is unchanged
assert hass.states.get(_SPEED_INPUT_SELECT).state == ""
_verify(hass, STATE_ON, None, None, None)
async def test_set_invalid_speed(hass, calls):
"""Test set invalid speed when fan has valid speed."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's speed to high
await common.async_set_speed(hass, _TEST_FAN, SPEED_HIGH)
# verify
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None)
# Set fan's speed to 'invalid'
await common.async_set_speed(hass, _TEST_FAN, "invalid")
# verify speed is unchanged
assert hass.states.get(_SPEED_INPUT_SELECT).state == SPEED_HIGH
_verify(hass, STATE_ON, SPEED_HIGH, None, None)
async def test_custom_speed_list(hass, calls):
"""Test set custom speed list."""
await _register_components(hass, ["1", "2", "3"])
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's speed to '1'
await common.async_set_speed(hass, _TEST_FAN, "1")
# verify
assert hass.states.get(_SPEED_INPUT_SELECT).state == "1"
_verify(hass, STATE_ON, "1", None, None)
# Set fan's speed to 'medium' which is invalid
await common.async_set_speed(hass, _TEST_FAN, SPEED_MEDIUM)
# verify that speed is unchanged
assert hass.states.get(_SPEED_INPUT_SELECT).state == "1"
_verify(hass, STATE_ON, "1", None, None)
async def test_set_osc(hass, calls):
"""Test set oscillating."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's osc to True
await common.async_oscillate(hass, _TEST_FAN, True)
# verify
assert hass.states.get(_OSC_INPUT).state == "True"
_verify(hass, STATE_ON, None, True, None)
# Set fan's osc to False
await common.async_oscillate(hass, _TEST_FAN, False)
# verify
assert hass.states.get(_OSC_INPUT).state == "False"
_verify(hass, STATE_ON, None, False, None)
async def test_set_invalid_osc_from_initial_state(hass, calls):
"""Test set invalid oscillating when fan is in initial state."""
await _register_components(hass)
# Turn on fan
await common.async_turn_on(hass, _TEST_FAN)
# Set fan's osc | |
""" Includes two functions which use shortest path policies
1) run_sss_curriculum - trains a PyMARL agent using experiences gathered
while following an epsilon greedy shortest path policy.
2) mean_sss_time - returns the mean time taken to complete a map while following
an epsilon greedy shortest path policy.
"""
import datetime
import os
from os.path import dirname, abspath
import time
#from sympy import EX
import yaml
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger, log_mac_weights
import numpy as np
import random
from logging import getLogger, INFO
from rapport_topological.navigation import construct_shortest_path_policy
from rapport_models.markov.state import State
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
from src.components.episode_buffer import EpisodeBatch
from runners import AsyncEpisodeRunner
from main import recursive_dict_update
from run import args_sanity_check
from torch.utils.tensorboard import SummaryWriter
def load_configs():
""" Load configuration dictionaries from default locations
"""
# Get the defaults from default.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
try:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Get qmix params from qmix.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "algs", "qmix.yaml"), "r") as f:
try:
alg_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Get camas params from camas.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "envs", "camas.yaml"), "r") as f:
try:
env_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
config_dict = recursive_dict_update(config_dict, alg_dict)
config_dict = recursive_dict_update(config_dict, env_dict)
return config_dict
class SSS_Runner(AsyncEpisodeRunner):
""" PyMARL Episode Runner for gathering shortest path based experience episodes
"""
debug = False
def __init__(self, args, logger, epsilon_mean=0.15, epsilon_var=0.1):
super().__init__(args, logger)
self.epsilon_mean = epsilon_mean
self.epsilon_var = epsilon_var
self.epsilon = self._draw_epsilon()
self.env.reset()
self.policies = {agent: construct_shortest_path_policy(self.env._tm, self.env._goal_states[agent])
for agent in self.env.agents}
def run(self) -> EpisodeBatch:
""" Returns an transistions for one episode for an agent acting in an
epsilon greedy fashion while following its shortest path.
"""
if self.debug: print('*** reset environment ***')
self.reset()
self.epsilon = self._draw_epsilon()
terminated = False
episode_return = 0
#self.mac.init_hidden(batch_size=self.batch_size) # NOTE not sure what this is
obs, reward, done, info = self.env.last()
k = 0
while not terminated:
k += 1
pre_transition_data = self.env.get_pretran_data()
if self.debug:
print(f'-- step {k} \nState: {self.env.state()}, Agent: {self.env.agent_selection}, Time: {self.env.sim_time()}')
print(f"Pre transition data: {pre_transition_data}")
self.batch.update(pre_transition_data, ts=self.t)
#actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=False)
#print(f'actions {actions}, type {type(actions)}, size {actions.size()}')
#print('my selector: ', self.select_actions())
actions = self.select_actions()
action = actions[0][self.env.agent_idx()].item()
if action == 4:
self.env.step(None) # terminated action to update env correctly
else:
self.env.step(action)
obs, reward, done, env_info = self.env.last()
if done:
if self.debug: print(f'{self.env.agent_selection} done!')
if len(self.env.agents) == 1: terminated = True
if self.debug: print(f'Actions: {actions}\nReward {reward}, Time {self.env.sim_time()}')
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [[(terminated),]], # NOTE used to be: [(terminated != env_info.get("episode_limit", False),)] # env info here is info from step()
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
if self.t == self.episode_limit:
terminated = True
pre_transition_data = self.env.get_pretran_data()
self.batch.update(pre_transition_data, ts=self.t)
actions = self.select_actions()
self.batch.update({"actions": actions}, ts=self.t)
self.t_env += self.t
return self.batch
def select_actions(self) -> th.Tensor:
""" Choose the action to stay on the shorest path or a random action
depending on epsilon test.
"""
acts = th.ones(1, self.args.n_agents, dtype=int)*4
# choose action for agent acting
agent = self.env.agent_selection
agent_loc = self.env._agent_location[agent]
agent_idx = self.env.agent_name_mapping[self.env.agent_selection]
if self.debug: print(f'choosing action for {agent}, loc: {agent_loc}, idx: {agent_idx}')
if random.uniform(0, 1) > self.epsilon: # exploit
camas_act = self.policies[agent]._state_action_map[State({'loc': agent_loc})]
if self.debug: print(f'exploiting, camas act {camas_act}')
if camas_act is None:
action = 4
else:
action = self.env.to_gym_action(agent_loc, camas_act)
else: # explore
avail_actions = self.batch["avail_actions"][:, self.t]
action = random.choice([i for i, x in enumerate(avail_actions[0, agent_idx]) if x==1])
if self.debug: print(f'random, action {action}, avail agent acts {avail_actions[0, agent_idx]}')
acts[0, agent_idx] = action
if self.debug: print(f'acts {acts}')
return acts
def _draw_epsilon(self):
epsilon = np.random.normal(self.epsilon_mean, self.epsilon_var)
if epsilon < 0: epsilon = 0
return epsilon
def episode_makespan(self):
return self.env.sim_time()
def run_sss_curriculum(args,
logger,
num_episodes,
cycle_after,
max_train_steps,
test_makespan_cutoff,
test_episodes=20,
epsilon_mean=0.25,
epsilon_var=0.15,
log_freq=10000,
agent_weight_log_freq=20000):
"""Trains a PyMARL method using shortest path experiences and saves the result
to the results/model directory
Args:
num_episodes (int): number of experience episodes to gather
max_train_steps (int): number of steps to train the model for
test_episodes (int): number of episodes to evaluate the model on once training is complete
"""
def _gather_data(_num_episodes, _buffer, _sss_runner, _logger, _iteration=0):
_start_time = time.time()
_logger.console_logger.info(f'...gathering {_num_episodes} of data, iteration: {_iteration}...')
ep_rewards = np.zeros(_num_episodes)
ep_epsilons = np.zeros(_num_episodes)
ep_times = np.zeros(_num_episodes)
ep_step_count = np.zeros(_num_episodes)
for k in range(_num_episodes):
episode_batch = _sss_runner.run()
_buffer.insert_episode_batch(episode_batch)
ep_rewards[k] = th.sum(episode_batch["reward"])
ep_epsilons[k] = _sss_runner.epsilon
ep_times[k] = _sss_runner.episode_makespan()
ep_step_count[k] = _sss_runner.t
if k % log_freq == 0:
_logger.console_logger.info(f'...{k} episodes complete, mean time {np.mean(ep_times)} ({np.std(ep_times)}), mean step count {np.mean(ep_step_count)} ({np.std(ep_step_count)})...')
_logger.console_logger.info(f'...mean rewards {np.mean(ep_rewards)} ({np.std(ep_rewards)}), mean epsilon {np.mean(ep_epsilons)} ({np.std(ep_epsilons)})')
save_curriculum_data([ep_rewards, ep_epsilons, ep_times, ep_step_count], _iteration)
data_gathering_time = time.time() - _start_time
_logger.console_logger.info(f'...time to gather {_num_episodes} episodes: {datetime.timedelta(seconds=data_gathering_time)}, mean time {np.mean(ep_times)} ({np.std(ep_times)}), mean step count {np.mean(ep_step_count)} ({np.std(ep_step_count)})...')
_logger.console_logger.info(f'...mean rewards {np.mean(ep_rewards)} ({np.std(ep_rewards)}), mean epsilon {np.mean(ep_epsilons)} ({np.std(ep_epsilons)})')
def _test_env(_runner, _test_episdoes):
""" Test environment using `_runner`
Returns:
tt: test sim times
sc: test step counts
gc: test reached goal %'s
"""
tt, sc, gc = [], [], []
for _ in range(_test_episdoes):
_runner.run(test_mode=True)
tt.append(_runner.env.sim_time())
sc.append(_runner.env.step_count())
gc.append(_runner.env.agents_at_goal())
return tt, sc, gc
def _save_model(_args, _logger, _learner, label):
_logger.console_logger.info('...saving model...')
_save_path = os.path.join("curriculum", _args.unique_token, str(label))
os.makedirs(_save_path, exist_ok=True)
_logger.console_logger.info("Saving models to {}".format(_save_path))
_learner.save_models(_save_path)
print(' -- Env args', args.env_args)
start_time = time.time()
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "curriculum_tb")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(args.unique_token)
logger.setup_tb(tb_exp_direc)
args.log_interval = log_freq
args.learner_log_interval = log_freq
main_runner = r_REGISTRY[args.runner](args=args, logger=logger)
sss_runner = SSS_Runner(args, logger, epsilon_mean=epsilon_mean, epsilon_var=epsilon_var)
# Set up schemes and groups
env_info = sss_runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runners the scheme
main_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
sss_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
## --- Save config ---
config_save_path = os.path.join("curriculum", args.unique_token)
os.makedirs(config_save_path, exist_ok=True)
with open(os.path.join(config_save_path, "config.yaml"), 'w') as outp: # NOTE this has not been tested
yaml.dump(args, outp)
## --- Gather Data ---
_gather_data(num_episodes, buffer, sss_runner, logger)
## --- Train Network ---
logger.console_logger.info(f'...training network...')
for i in range(max_train_steps):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, i, i)
if (i % cycle_after == 0) and (i > 0): # Gather new data with freq `cycle_after`
_gather_data(num_episodes, buffer, sss_runner, logger, i/cycle_after)
if i % log_freq == 0:
tt, sc, gc = _test_env(main_runner, test_episodes)
logger.log_stat("Test_mean_sim_time", np.mean(tt), i)
logger.log_stat("Test_mean_step_count", np.mean(sc), i)
logger.log_stat("Test_mean_goal_found", np.mean(gc), i)
logger.console_logger.info(f'...logging at step {i}, mean sim time {np.mean(tt)}...')
if np.mean(tt) < test_makespan_cutoff:
tt, _, _ = _test_env(main_runner, test_episodes)
if np.mean(tt) < test_makespan_cutoff:
logger.console_logger.info(f'Training passed evaluation at step {i}. Mean makespan: {np.mean(tt)}, cutoff: {test_makespan_cutoff}')
break
if i % agent_weight_log_freq == 0:
log_mac_weights(logger, mac, i)
_save_model(args, logger, learner, i)
_gather_data(num_episodes, buffer, sss_runner, logger, 1000)
# -- Train for 20e3 more steps --
for i in range(20000):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, i, i)
if i % log_freq == 0:
tt, sc, gc = _test_env(main_runner, test_episodes)
logger.log_stat("Test_mean_sim_time", np.mean(tt), i)
logger.log_stat("Test_mean_step_count", np.mean(sc), i)
logger.log_stat("Test_mean_goal_found", np.mean(gc), i)
logger.console_logger.info(f'...logging at step {i}, mean sim time {np.mean(tt)}...')
if i % agent_weight_log_freq == 0:
log_mac_weights(logger, mac, i)
| |
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_gray",
}
]
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "National ASTER Map",
"title": "National ASTER Map of Australia",
"abstract": """
This datsaet comprises a set of 14+ geoscience products made up of mosaiced ASTER scenes across Australia.
The individual geoscience products are a compbination of bands and band ratios to highlight different mineral groups and parameters including:
False colour composite
CSIRO Landsat TM Regolith Ratios
Green vegetation content
Ferric oxide content
Ferric oxide composition
Ferrous iron index
Opaque index
AlOH group content
AlOH group composition
Kaolin group index
FeOH group content
MgOH group content
MgOH group composition
Ferrous iron content in MgOH/carbonate""",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "False Colour Mosaic",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
False colour RGB composite
Red: B3
Green: B2
Blue: B1
(red = green vegetation)
Use this image to help understand non-geological differences within and between ASTER scenes caused by green vegetation (red), fire scars, thin and thick cloud and cloud shadows.
Use band 2 only for a gray-scale background to the content, composition and index colour products.""",
# The WMS name for the layer
"name": "aster_false_colour",
# The Datacube name for the associated data product
"product_name": "aster_false_colour",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"wcs_default_bands": ["Band_1", "Band_2", "Band_3"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "false_colour",
"title": "False Colour",
"abstract": "Simple false-colour image using ASTER Bands 3 as red, 2 as green and 1 as blue",
"components": {
"red": {
"Band_1": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_3": 1.0
}
},
"scale_range": [0.0, 255.0]
},
{
"name": "gray",
"title": "B2 Grayscale",
"abstract": "Simple grayscale image using ASTER Band 2",
"components": {
"red": {
"Band_2": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_2": 1.0
}
},
"scale_range": [0.0, 255.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "false_colour",
}, # ASTER False Colour
{
# Included as a keyword for the layer
"label": "Regolith Ratios",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
3 band RGB composite
Red: B3/B2
Green: B3/B7
Blue: B4/B7
(white = green vegetation)
Use this image to help interpret:
(1) the amount of green vegetation cover (appears as white);
(2) basic spectral separation (colour) between different regolith and geological units and regions/provinces; and
(3) evidence for unmasked cloud (appears as green).""",
# The WMS name for the layer
"name": "aster_regolith_ratios",
# The Datacube name for the associated data product
"product_name": "aster_regolith_ratios",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
# Define layer wide legend graphic if no style is passed
# to GetLegendGraphic
"wcs_default_bands": ["Band_1", "Band_2", "Band_3"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"styles": [
# Examples of styles which are linear combinations of the available spectral bands.
#
{
"name": "simple_rgb",
"title": "Simple RGB",
"abstract": "Simple true-colour image, using the red, green and blue bands",
"components": {
"red": {
"Band_1": 1.0
},
"green": {
"Band_2": 1.0
},
"blue": {
"Band_3": 1.0
}
},
"scale_range": [0.0, 255.0]
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "simple_rgb",
}, # ASTER Regolith Ratios
{
# Included as a keyword for the layer
"label": "AlOH Group Composition",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "",
"abstract": """
Band ratio: B5/B7
Blue is well ordered kaolinite, Al-rich muscovite/illite, paragonite, pyrophyllite
Red is Al-poor (Si-rich) muscovite (phengite)
Useful for mapping:
(1) exposed saprolite/saprock is often white mica or Al-smectite (warmer colours) whereas transported materials are often kaolin-rich (cooler colours);
(2) clays developed over carbonates, especially Al-smectite (montmorillonite, beidellite) will produce middle to warmers colours;
(3) stratigraphic mapping based on different clay-types; and
(4) lithology-overprinting hydrothermal alteration, e.g. Si-rich and K-rich phengitic mica (warmer colours).
Combine with Ferrous iron in MgOH and FeOH content products to look for evidence of overlapping/juxtaposed potassic metasomatism in ferromagnesian parents rocks (e.g. Archaean greenstone associated Au mineralisation) +/- associated distal propyllitic alteration (e.g. chlorite, amphibole).""",
# The WMS name for the layer
"name": "aster_aloh_group_composition",
# The Datacube name for the associated data product
"product_name": "aster_aloh_group_composition",
# The Datacube name for the associated pixel-quality product (optional)
# The name of the associated Datacube pixel-quality product
# "pq_dataset": "s2b_ard_granule",
# The name of the measurement band for the pixel-quality product
# (Only required if pq_dataset is set)
# "pq_band": "pixel_quality",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 10.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. | |
<filename>hpc_acm/api/default_api.py<gh_stars>1-10
# coding: utf-8
"""
HPC Web API
Preview # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hpc_acm.api_client import ApiClient
class DefaultApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_clusrun_job(self, id, **kwargs): # noqa: E501
"""Cancel a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.cancel_clusrun_job(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param JobUpdate job:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.cancel_clusrun_job_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.cancel_clusrun_job_with_http_info(id, **kwargs) # noqa: E501
return data
def cancel_clusrun_job_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancel a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.cancel_clusrun_job_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param JobUpdate job:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_clusrun_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `cancel_clusrun_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job' in params:
body_params = params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/clusrun/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cancel_diagnostic_job(self, id, **kwargs): # noqa: E501
"""Cancel a diagnostic test run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.cancel_diagnostic_job(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param JobUpdate job:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.cancel_diagnostic_job_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.cancel_diagnostic_job_with_http_info(id, **kwargs) # noqa: E501
return data
def cancel_diagnostic_job_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancel a diagnostic test run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.cancel_diagnostic_job_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param JobUpdate job:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'job'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_diagnostic_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `cancel_diagnostic_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job' in params:
body_params = params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/diagnostics/{id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_clusrun_job(self, **kwargs): # noqa: E501
"""Create a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_clusrun_job(async=True)
>>> result = thread.get()
:param async bool
:param Job job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_clusrun_job_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_clusrun_job_with_http_info(**kwargs) # noqa: E501
return data
def create_clusrun_job_with_http_info(self, **kwargs): # noqa: E501
"""Create a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_clusrun_job_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param Job job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['job'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_clusrun_job" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job' in params:
body_params = params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/clusrun', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_diagnostic_job(self, **kwargs): # noqa: E501
"""Create a diagnostic test run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_diagnostic_job(async=True)
>>> result = thread.get()
:param async bool
:param Job job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_diagnostic_job_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_diagnostic_job_with_http_info(**kwargs) # noqa: E501
return data
def create_diagnostic_job_with_http_info(self, **kwargs): # noqa: E501
"""Create a diagnostic test run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_diagnostic_job_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param Job job:
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['job'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_diagnostic_job" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'job' in params:
body_params = params['job']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/diagnostics', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_clus_run_job_summary(self, **kwargs): # noqa: E501
"""Get summary of ClusRun jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clus_run_job_summary(async=True)
>>> result = thread.get()
:param async bool
:return: JobSummary
If the method is | |
<reponame>Afoucaul/cairocffi
"""
cairocffi.fonts
~~~~~~~~~~~~~~~
Bindings for font-related objects.
:copyright: Copyright 2013-2019 by <NAME>
:license: BSD, see LICENSE for details.
"""
from . import _check_status, _keepref, cairo, constants, ffi
from .matrix import Matrix
def _encode_string(string):
"""Return a byte string, encoding Unicode with UTF-8."""
if not isinstance(string, bytes):
string = string.encode('utf8')
return ffi.new('char[]', string)
class FontFace(object):
"""The base class for all font face types.
Should not be instantiated directly, but see :doc:`cffi_api`.
An instance may be returned for cairo font face types
that are not (yet) defined in cairocffi.
"""
def __init__(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_font_face_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_font_face_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_font_face_reference(pointer)
self = object.__new__(FONT_TYPE_TO_CLASS.get(
cairo.cairo_font_face_get_type(pointer), FontFace))
FontFace.__init__(self, pointer) # Skip the subclass’s __init__
return self
class ToyFontFace(FontFace):
"""Creates a font face from a triplet of family, slant, and weight.
These font faces are used in implementation of cairo’s "toy" font API.
If family is the zero-length string ``""``,
the platform-specific default family is assumed.
The default family then can be queried using :meth:`get_family`.
The :meth:`Context.select_font_face` method uses this to create font faces.
See that method for limitations and other details of toy font faces.
:param family: a font family name, as an Unicode or UTF-8 string.
:param slant: The :ref:`FONT_SLANT` string for the font face.
:param weight: The :ref:`FONT_WEIGHT` string for the font face.
"""
def __init__(self, family='', slant=constants.FONT_SLANT_NORMAL,
weight=constants.FONT_WEIGHT_NORMAL):
FontFace.__init__(self, cairo.cairo_toy_font_face_create(
_encode_string(family), slant, weight))
def get_family(self):
"""Return this font face’s family name."""
return ffi.string(cairo.cairo_toy_font_face_get_family(
self._pointer)).decode('utf8', 'replace')
def get_slant(self):
"""Return this font face’s :ref:`FONT_SLANT` string."""
return cairo.cairo_toy_font_face_get_slant(self._pointer)
def get_weight(self):
"""Return this font face’s :ref:`FONT_WEIGHT` string."""
return cairo.cairo_toy_font_face_get_weight(self._pointer)
FONT_TYPE_TO_CLASS = {
constants.FONT_TYPE_TOY: ToyFontFace,
}
class ScaledFont(object):
"""Creates a :class:`ScaledFont` object from a font face and matrices
that describe the size of the font
and the environment in which it will be used.
:param font_face: A :class:`FontFace` object.
:type font_matrix: Matrix
:param font_matrix:
Font space to user space transformation matrix for the font.
In the simplest case of a N point font,
this matrix is just a scale by N,
but it can also be used to shear the font
or stretch it unequally along the two axes.
If omitted, a scale by 10 matrix is assumed (ie. a 10 point font size).
See :class:`Context.set_font_matrix`.
:type ctm: Matrix
:param ctm:
User to device transformation matrix with which the font will be used.
If omitted, an identity matrix is assumed.
:param options:
The :class:`FontOptions` object to use
when getting metrics for the font and rendering with it.
If omitted, the default options are assumed.
"""
def __init__(self, font_face, font_matrix=None, ctm=None, options=None):
if font_matrix is None:
font_matrix = Matrix()
font_matrix.scale(10) # Default font size
if ctm is None:
ctm = Matrix()
if options is None:
options = FontOptions()
self._init_pointer(cairo.cairo_scaled_font_create(
font_face._pointer, font_matrix._pointer,
ctm._pointer, options._pointer))
def _init_pointer(self, pointer):
self._pointer = ffi.gc(
pointer, _keepref(cairo, cairo.cairo_scaled_font_destroy))
self._check_status()
def _check_status(self):
_check_status(cairo.cairo_scaled_font_status(self._pointer))
@staticmethod
def _from_pointer(pointer, incref):
"""Wrap an existing :c:type:`cairo_scaled_font_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return: A new :class:`ScaledFont` instance.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_scaled_font_reference(pointer)
self = object.__new__(ScaledFont)
ScaledFont._init_pointer(self, pointer)
return self
def get_font_face(self):
"""Return the font face that this scaled font uses.
:returns:
A new instance of :class:`FontFace` (or one of its sub-classes).
Might wrap be the same font face passed to :class:`ScaledFont`,
but this does not hold true for all possible cases.
"""
return FontFace._from_pointer(
cairo.cairo_scaled_font_get_font_face(self._pointer), incref=True)
def get_font_options(self):
"""Copies the scaled font’s options.
:returns: A new :class:`FontOptions` object.
"""
font_options = FontOptions()
cairo.cairo_scaled_font_get_font_options(
self._pointer, font_options._pointer)
return font_options
def get_font_matrix(self):
"""Copies the scaled font’s font matrix.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_font_matrix(self._pointer, matrix._pointer)
self._check_status()
return matrix
def get_ctm(self):
"""Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer)
self._check_status()
return matrix
def get_scale_matrix(self):
"""Copies the scaled font’s scaled matrix.
The scale matrix is product of the font matrix
and the ctm associated with the scaled font,
and hence is the matrix mapping from font space to device space.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_scale_matrix(
self._pointer, matrix._pointer)
self._check_status()
return matrix
def extents(self):
"""Return the scaled font’s extents.
See :meth:`Context.font_extents`.
:returns:
A ``(ascent, descent, height, max_x_advance, max_y_advance)``
tuple of floats.
"""
extents = ffi.new('cairo_font_extents_t *')
cairo.cairo_scaled_font_extents(self._pointer, extents)
self._check_status()
return (
extents.ascent, extents.descent, extents.height,
extents.max_x_advance, extents.max_y_advance)
def text_extents(self, text):
"""Returns the extents for a string of text.
The extents describe a user-space rectangle
that encloses the "inked" portion of the text,
(as it would be drawn by :meth:`show_text`).
Additionally, the :obj:`x_advance` and :obj:`y_advance` values
indicate the amount by which the current point would be advanced
by :meth:`show_text`.
:param text: The text to measure, as an Unicode or UTF-8 string.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
"""
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_scaled_font_text_extents(
self._pointer, _encode_string(text), extents)
self._check_status()
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def glyph_extents(self, glyphs):
"""Returns the extents for a list of glyphs.
The extents describe a user-space rectangle
that encloses the "inked" portion of the glyphs,
(as it would be drawn by :meth:`show_glyphs`).
Additionally, the :obj:`x_advance` and :obj:`y_advance` values
indicate the amount by which the current point would be advanced
by :meth:`show_glyphs`.
:param glyphs:
A list of glyphs, as returned by :meth:`text_to_glyphs`.
Each glyph is a ``(glyph_id, x, y)`` tuple
of an integer and two floats.
:returns:
A ``(x_bearing, y_bearing, width, height, x_advance, y_advance)``
tuple of floats.
See :meth:`Context.text_extents` for details.
"""
glyphs = ffi.new('cairo_glyph_t[]', glyphs)
extents = ffi.new('cairo_text_extents_t *')
cairo.cairo_scaled_font_glyph_extents(
self._pointer, glyphs, len(glyphs), extents)
self._check_status()
return (
extents.x_bearing, extents.y_bearing,
extents.width, extents.height,
extents.x_advance, extents.y_advance)
def text_to_glyphs(self, x, y, text, with_clusters):
"""Converts a string of text to a list of glyphs,
optionally with cluster mapping,
that can be used to render later using this scaled font.
The output values can be readily passed to
:meth:`Context.show_text_glyphs`, :meth:`Context.show_glyphs`
or related methods,
assuming that the exact same :class:`ScaledFont`
is used for the operation.
:type x: float
:type y: float
:type with_clusters: bool
:param x: X position to place first glyph.
:param y: Y position to place first glyph.
:param text: The text to convert, as an Unicode or UTF-8 string.
:param with_clusters: Whether to compute the cluster mapping.
:returns:
A ``(glyphs, clusters, clusters_flags)`` tuple
if :obj:`with_clusters` is true, otherwise just :obj:`glyphs`.
See :meth:`Context.show_text_glyphs` for the data structure.
.. note::
This method is part of
what the cairo designers call the "toy" text API.
It is convenient for short demos and simple programs,
but it is not expected to be adequate
for serious text-using applications.
See :ref:`fonts` for details
and :meth:`Context.show_glyphs`
for the "real" text display API in cairo.
"""
glyphs = ffi.new('cairo_glyph_t **', ffi.NULL)
num_glyphs = ffi.new('int *')
if with_clusters:
clusters = ffi.new('cairo_text_cluster_t **', ffi.NULL)
num_clusters = ffi.new('int *')
cluster_flags = ffi.new('cairo_text_cluster_flags_t *')
else:
clusters = ffi.NULL
num_clusters = ffi.NULL
cluster_flags = ffi.NULL
# TODO: Pass len_utf8 explicitly to support NULL bytes?
status = cairo.cairo_scaled_font_text_to_glyphs(
self._pointer, x, y, _encode_string(text), -1,
glyphs, num_glyphs, clusters, num_clusters, cluster_flags)
glyphs = ffi.gc(glyphs[0], _keepref(cairo, cairo.cairo_glyph_free))
if with_clusters:
clusters = ffi.gc(
clusters[0], _keepref(cairo, cairo.cairo_text_cluster_free))
_check_status(status)
glyphs = [
(glyph.index, glyph.x, glyph.y)
for i in range(num_glyphs[0])
for glyph in [glyphs[i]]]
if with_clusters:
clusters = [
(cluster.num_bytes, cluster.num_glyphs)
for i in range(num_clusters[0])
for cluster in [clusters[i]]]
return glyphs, clusters, cluster_flags[0]
else:
return glyphs
class FontOptions(object):
"""An opaque object holding all options that are used when rendering fonts.
Individual features of a :class:`FontOptions`
can | |
COMPUTINGSITE,JOBSTATUS,GSHARE,SUM(NJOBS) FROM ATLAS_PANDA.{} " \
"WHERE workqueue_id NOT IN " \
"(SELECT queue_id FROM ATLAS_PANDA.jedi_work_queue WHERE queue_function = 'Resource') " \
"AND computingsite NOT IN " \
"(SELECT pandaqueuename FROM ATLAS_PANDA.HARVESTER_Slots) GROUP BY COMPUTINGSITE,JOBSTATUS,GSHARE "
statsPerShare = {}
statsPerPQ = {}
for table in ['JOBS_SHARE_STATS', 'JOBSDEFINED_SHARE_STATS']:
status, res = taskBuffer.querySQLS(sql.format(table), {})
for computingSite, jobStatus, gshare, nJobs in res:
statsPerShare.setdefault(gshare, {'nq': 0, 'nr': 0})
statsPerPQ.setdefault(computingSite, {})
statsPerPQ[computingSite].setdefault(gshare, {'nq': 0, 'nr': 0})
if jobStatus in ['definied', 'assigned', 'activated', 'starting']:
statsPerPQ[computingSite][gshare]['nq'] += nJobs
statsPerShare[gshare]['nq'] += nJobs
elif jobStatus == 'running':
statsPerPQ[computingSite][gshare]['nr'] += nJobs
statsPerShare[gshare]['nr'] += nJobs
# check
sql = "SELECT * FROM ("\
"SELECT * FROM ("\
"SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 "\
"WHERE computingSite=:computingSite "\
"AND gshare=:gshare AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) "\
"UNION "\
"SELECT PandaID FROM ATLAS_PANDA.jobsActive4 "\
"WHERE computingSite=:computingSite "\
"AND gshare=:gshare AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) "\
") ORDER BY PandaID "\
") WHERE rownum<:nRows "
nQueueLimitMap = {}
for computingSite, shareStat in six.iteritems(statsPerPQ):
for gshare, nStat in six.iteritems(shareStat):
# get limit
if gshare not in nQueueLimitMap:
key = 'FAST_REBRO_THRESHOLD_NQUEUE_{}'.format(gshare)
nQueueLimitMap[gshare] = taskBuffer.getConfigValue('rebroker', key)
nQueueLimit = nQueueLimitMap[gshare]
if not nQueueLimit:
dry_run = True
nQueueLimit = 10
else:
dry_run = False
ratioCheck = nStat['nr'] * ratioLimit < nStat['nq']
statCheck = nStat['nq'] > nQueueLimit
fracCheck = nStat['nq'] > statsPerShare[gshare]['nq'] * fractionLimit
_logger.debug("{} in {} : nQueue({})>nRun({})*{}: {},"
" nQueue>nQueueThreshold({}):{}, nQueue>nQueue_total({})*{}:{}".format(
computingSite, gshare, nStat['nq'],
nStat['nr'], ratioLimit, ratioCheck,
nQueueLimit, statCheck,
statsPerShare[gshare]['nq'],
fractionLimit, fracCheck))
if ratioCheck and statCheck and fracCheck:
_logger.debug('{} overshoot in {}'.format(computingSite, gshare))
if not dry_run:
# calculate excess
excess = min(nStat['nq'] - nStat['nr'] * ratioLimit, nStat['nq'] - nQueueLimit)
excess = min(excess, nStat['nq'] - statsPerShare[gshare]['nq'] * fractionLimit)
excess = int(math.ceil(excess))
varMap = {}
varMap[':computingSite'] = computingSite
varMap[':gshare'] = gshare
varMap[':jobStatus1'] = 'defined'
varMap[':jobStatus2'] = 'assigned'
varMap[':jobStatus3'] = 'activated'
varMap[':jobStatus4'] = 'starting'
varMap[':nRows'] = excess
status, res = taskBuffer.querySQLS(sql, varMap)
jediJobs = [p for p, in res]
_logger.debug('got {} jobs to kill excess={}'.format(len(jediJobs), excess))
if jediJobs:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at Nq/Nr overshoot site {} ({})'.format(
computingSite, str(jediJobs[iJob:iJob + nJob])))
Client.killJobs(jediJobs[iJob:iJob + nJob], 10, keepUnmerged=True)
iJob += nJob
except Exception as e:
_logger.error('failed with {} {}'.format(str(e), traceback.format_exc()))
# reassign activated jobs in inactive sites
inactiveTimeLimitSite = 2
inactiveTimeLimitJob = 4
inactivePrioLimit = 800
timeLimitSite = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitSite)
timeLimitJob = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitJob)
# get PandaIDs
sql = 'SELECT distinct computingSite FROM ATLAS_PANDA.jobsActive4 '
sql += 'WHERE prodSourceLabel=:prodSourceLabel '
sql += 'AND ((modificationTime<:timeLimit AND jobStatus=:jobStatus1) '
sql += 'OR (stateChangeTime<:timeLimit AND jobStatus=:jobStatus2)) '
sql += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sql += 'AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stDS,resDS = taskBuffer.querySQLS(sql,varMap)
sqlSS = 'SELECT laststart FROM ATLAS_PANDAMETA.siteData '
sqlSS += 'WHERE site=:site AND flag=:flag AND hours=:hours AND laststart<:laststart '
sqlPI = 'SELECT PandaID,eventService,attemptNr FROM ATLAS_PANDA.jobsActive4 '
sqlPI += 'WHERE prodSourceLabel=:prodSourceLabel AND jobStatus IN (:jobStatus1,:jobStatus2) '
sqlPI += 'AND (modificationTime<:timeLimit OR stateChangeTime<:timeLimit) '
sqlPI += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sqlPI += 'AND computingSite=:site AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
for tmpSite, in resDS:
if tmpSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs at inactive site %s since reassign is disabled' % (tmpSite))
continue
# check if the site is inactive
varMap = {}
varMap[':site'] = tmpSite
varMap[':flag'] = 'production'
varMap[':hours'] = 3
varMap[':laststart'] = timeLimitSite
stSS,resSS = taskBuffer.querySQLS(sqlSS,varMap)
if stSS is not None and len(resSS) > 0:
# get jobs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':site'] = tmpSite
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stPI,resPI = taskBuffer.querySQLS(sqlPI,varMap)
jediJobs = []
# reassign
_logger.debug('reassignJobs for JEDI at inactive site %s laststart=%s' % (tmpSite,resSS[0][0]))
if resPI is not None:
for pandaID, eventService, attemptNr in resPI:
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying es merge %s at inactive site %s' % (pandaID,tmpSite))
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at inactive site %s (%s)' % (tmpSite,jediJobs[iJob:iJob+nJob]))
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign defined jobs in defined table
timeoutValue = taskBuffer.getConfigValue('job_timeout', 'TIMEOUT_defined', 'pandaserver')
if not timeoutValue:
timeoutValue = 4 * 60
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=timeoutValue)
# get PandaIDs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",
timeLimit, ['defined'],
['managed', 'test'],
[], [], [],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for defined jobs with timeout={}min -> {} jobs'.format(timeoutValue, len(jobs)))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for defined jobs (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI defined jobs -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI defined jobs (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,[],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for long in defined table -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long in defined table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long JEDI in defined table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long JEDI in defined table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long activated jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=2)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],[],[],[],True,
onlyReassignable=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long activated PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying {0} in long activated' % pandaID)
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long activated in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long activated in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long activated JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long activated JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long starting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['starting'],['managed'],[],[],[],True,
onlyReassignable=True,useStateChangeTime=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long starting PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long starting in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long starting in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long starting JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long stating JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# kill too long-standing analysis jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':prodSourceLabel1'] = 'test'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':prodSourceLabel3'] = 'user'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND modificationTime<:modificationTime ORDER BY PandaID",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if | |
<filename>compiler/bitproto/renderer/block.py
"""
bitproto.renderer.block
~~~~~~~~~~~~~~~~~~~~~~~
Block class base.
Block
|- BlockBindDefinition
| |- BlockBindAlias
| |- BlockBindConstant
| |- BlockBindEnum
| |- BlockBindEnumField
| |- BlockBindMessage
| |- BlockBindMessageField
| |- BlockBindProto
|- BlockDeferable
|- BlockComposition
|- BlockWrapper
|- BlockConditional
"""
from abc import abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Generic, Iterator, List, Optional
from typing import Type as T
from typing import TypeVar, Union, cast
from bitproto._ast import (
Alias,
BoundDefinition,
Comment,
Constant,
D,
Definition,
Enum,
EnumField,
Message,
MessageField,
Proto,
)
from bitproto.errors import InternalError
from bitproto.renderer.formatter import F, Formatter
from bitproto.utils import (
cached_property,
final,
overridable,
override,
snake_case,
upper_case,
)
#############
# Block base
#############
@dataclass
class BlockRenderContext(Generic[F]):
"""Context for block's render()."""
formatter: F
bound: Proto
optimization_mode_filter_messages: Optional[List[str]] = None
class Block(Generic[F]):
"""Block maintains formatted strings to render.
:param indent: Number of characters to indent for this block.
"""
def __init__(self, indent: int = 0) -> None:
self.indent = indent
self._strings: List[str] = []
self._ctx: Optional[BlockRenderContext[F]] = None
####
# Renderer implementation level api
####
@abstractmethod
def render(self) -> None:
"""Render processor for this block."""
raise NotImplementedError
@overridable
def separator(self) -> str:
"""Separator to join managed strings, defaults to '\n'."""
return "\n"
@property
def formatter(self) -> F:
"""Returns the formatter of current block."""
return self._get_ctx_or_raise().formatter
@property
def bound(self) -> Proto:
"""Returns the bound proto of current block."""
return self._get_ctx_or_raise().bound
@final
def push_string(self, s: str, separator: str = " ") -> None:
"""Append an inline string `s` onto current string.
:param separator: The separator between current string with given `s`,
defaults to " ".
"""
self._strings[-1] = separator.join([self._strings[-1], s])
@final
def push(self, line: str, indent: Optional[int] = None) -> None:
"""Append a line of string.
:param line: A line of string (without ending newline character).
:param indent: Number of indent characters to insert at the head of given line,
defaults to this block's indent.
"""
indent = self.indent if indent is None else indent
if indent > 0:
line = indent * self.formatter.indent_character() + line
self._strings.append(line)
@final
def push_empty_line(self) -> None:
"""Append an empty line."""
self.push("")
@final
def push_comment(
self, comment: Union[Comment, str], indent: Optional[int] = None
) -> None:
"""Push a line of comment.
:param comment: A line of comment or str.
"""
self.push(self.formatter.format_comment(str(comment)), indent=indent)
@final
def push_docstring(
self, *comments: Union[Comment, str], indent: Optional[int] = None
) -> None:
"""Push one or more lines of comment as docstring.
Dose nothing if given comments is empty.
:param comments: List of comment or str.
"""
if not comments:
return
strings = [i.content() if isinstance(i, Comment) else i for i in comments]
for string in self.formatter.format_docstring(*strings):
self.push(string, indent=indent)
####
# Framework-level api
####
@final
def _get_ctx_or_raise(self) -> BlockRenderContext[F]:
"""Returns current render context, raise if None."""
assert self._ctx is not None, InternalError("block._ctx not set")
return self._ctx
@final
@contextmanager
def _maintain_ctx(self, ctx: BlockRenderContext[F]) -> Iterator[None]:
"""Maintain given ctx as current block's render context."""
try:
self._ctx = ctx
yield
finally:
self._ctx = None
@final
def __str__(self) -> str:
"""Returns the joined managed strings with separator."""
return self.separator().join(self._strings)
@final
def _is_empty(self) -> bool:
"""Returns True if this block is empty."""
return len(self._strings) == 0
@final
def _clear(self) -> None:
"""Clears internal managed strings."""
self._strings = []
@final
def _collect(self) -> str:
"""Clear and returns the joined string."""
s = str(self)
self._clear()
return s
@final
def _push_from_block(self, b: "Block") -> None:
"""Push strings onto this block from given block `b`."""
if not b._is_empty():
self.push(b._collect(), indent=0)
@final
def _render_with_ctx(self, ctx: BlockRenderContext) -> None:
"""Call this block's render() processor with given render context `ctx`."""
with self._maintain_ctx(ctx):
self.render()
@final
def _render_from_block(
self, b: "Block[F]", ctx: Optional[BlockRenderContext[F]] = None
) -> None:
"""Render given block `b` with given render context `ctx`,
and push its strings onto this block.
Uses the render context of current block by default.
"""
ctx = ctx or self._get_ctx_or_raise()
b._render_with_ctx(ctx)
self._push_from_block(b)
@final
def _defer_from_block(
self, b: "BlockDeferable[F]", ctx: Optional[BlockRenderContext[F]] = None
) -> None:
"""Call defer() on given block `b` with given render context `ctx`,
and push its strings onto this block.
Uses the render context of current block by default.
"""
ctx = ctx or self._get_ctx_or_raise()
b._defer_with_ctx(ctx)
self._push_from_block(b)
@final
class BlockAheadNotice(Block):
"""Block to render a comment of bitproto notice."""
def render(self) -> None:
"""Renders a line of comment of generation notice."""
notice = "Code generated by bitproto. DO NOT EDIT."
notice_comment = self.formatter.format_comment(notice)
self.push(notice_comment)
#############
# Block that bind a definition ast.
#############
@dataclass
class BlockBindDefinitionBase(Generic[D]):
"""Base class of BlockBindDefinition.
:param d: The associated definition instance.
:param name: The name of associated definition, defaults to `d.name`.
"""
d: D
name: Optional[str] = None
class BlockBindDefinition(Block[F], BlockBindDefinitionBase[D]):
"""Block that bind a definition."""
def __init__(
self,
d: D,
name: Optional[str] = None,
indent: int = 0,
) -> None:
BlockBindDefinitionBase.__init__(self, d, name=name)
Block.__init__(self, indent=indent)
@final
def push_definition_comments(self, indent: Optional[int] = None):
"""Format the comment_block of this definition as lines of comments,
and push them.
"""
for comment in self.d.comment_block:
comment_string = comment.content()
self.push_comment(comment, indent)
@final
def push_definition_docstring(self, indent: Optional[int] = None) -> None:
"""Format the comment_block of this definition as lines of docstring,
and push them.
"""
self.push_docstring(*self.d.comment_block, indent=indent)
@final
def push_typing_hint_inline_comment(self) -> None:
"""Push an inline comment to hint the original defined bitproto type."""
comment = self.formatter.format_comment(f"{self.nbits()}bit")
self.push_string(comment)
@overridable
def nbits(self) -> int:
"""Returns the number of bits this definition occupy."""
return 0
class BlockBindAlias(BlockBindDefinition[F, Alias]):
"""Implements BlockBindDefinition for Alias."""
@override(BlockBindDefinition)
def nbits(self) -> int:
return self.d.nbits()
@cached_property
def alias_name(self) -> str:
"""Returns the formatted name of this alias."""
return self.formatter.format_alias_name(self.d)
@cached_property
def aliased_type(self) -> str:
"""Returns the formatted representation of the type it aliased to."""
return self.formatter.format_type(self.d.type, name=self.alias_name)
class BlockBindConstant(BlockBindDefinition[F, Constant]):
"""Implements BlockBindDefinition for Constant."""
@cached_property
def constant_name(self) -> str:
"""Returns the formatted name of this constant."""
return self.formatter.format_constant_name(self.d)
@cached_property
def constant_value(self) -> str:
"""Returns the formatted value of this constant."""
return self.formatter.format_value(self.d.value)
@cached_property
def constant_value_type(self) -> str:
"""Returns the formatted representation of this constant's value."""
return self.formatter.format_constant_type(self.d)
class BlockBindEnum(BlockBindDefinition[F, Enum]):
"""Implements BlockBindDefinition for Enum."""
@override(BlockBindDefinition)
def nbits(self) -> int:
return self.d.nbits()
@cached_property
def enum_name(self) -> str:
"""Returns the formatted name of this enum."""
return self.formatter.format_enum_name(self.d)
@cached_property
def enum_uint_type(self) -> str:
"""Returns the formatted uint type representation of this enum."""
return self.formatter.format_uint_type(self.d.type)
class BlockBindEnumField(BlockBindDefinition[F, EnumField]):
"""Implements the BlockBindDefinition for EnumField."""
@override(BlockBindDefinition)
def nbits(self) -> int:
return self.d.enum.nbits()
@cached_property
def enum_field_name(self) -> str:
"""Returns the formatted name of this enum field."""
return self.formatter.format_enum_field_name(self.d)
@cached_property
def enum_field_value(self) -> str:
"""Returns the formatted value of this enum field."""
return self.formatter.format_int_value(self.d.value)
@cached_property
def enum_field_type(self) -> str:
"""Returns the formatted representation of the enum field type, aka the enum type."""
return self.formatter.format_enum_type(self.d.enum)
class BlockBindMessage(BlockBindDefinition[F, Message]):
"""Implements the BlockBindDefinition for Message."""
@override(BlockBindDefinition)
def nbits(self) -> int:
return self.d.nbits()
@cached_property
def message_name(self) -> str:
"""Returns the formatted name of this message."""
return self.formatter.format_message_name(self.d)
@cached_property
def message_type(self) -> str:
"""Returns the formatted representation of this message type."""
return self.formatter.format_message_type(self.d)
@cached_property
def message_nbytes(self) -> str:
"""Returns the formatted representation of the number of bytes of this message."""
return self.formatter.format_int_value(self.d.nbytes())
@overridable
@cached_property
def message_size_constant_name(self) -> str:
"""Return the formatted name of the message size constant."""
return f"BYTES_LENGTH_" + upper_case(snake_case(self.message_name))
class BlockBindMessageField(BlockBindDefinition[F, MessageField]):
"""Implements the BlockBindDefinition for MessageField."""
@override(BlockBindDefinition)
def nbits(self) -> int:
return self.d.type.nbits()
@cached_property
def message_field_name(self) -> str:
return self.formatter.format_message_field_name(self.d)
@cached_property
def message_field_type(self) -> str:
"""Returns the formatted field type representation of this field."""
return self.formatter.format_type(self.d.type, name=self.message_field_name)
class BlockBindProto(BlockBindDefinition[F, Proto]):
"""Implements the BlockBindDefinition for Proto."""
#############
# Block that defines a defer().
#############
class BlockDeferable(Block[F]):
"""Block with a defer method defined."""
@abstractmethod
def defer(self) -> None:
"""Defer is a hook function, invoked by BlockComposition's render().
Called reversively over the order of the composition of blocks.
"""
raise NotImplementedError
def _defer_with_ctx(self, ctx: BlockRenderContext) -> None:
"""Call this block's defer() processor with given ctx."""
with self._maintain_ctx(ctx):
self.defer()
#############
# Block that join a list of blocks.
#############
class BlockComposition(Block[F]):
"""Composition of a list of blocks as a block."""
@overridable
@override(Block)
def separator(self) -> str:
"""Overrides upper method `separator()`, for separator between blocks,
defaults to '\n\n'.
"""
return "\n\n"
@final
@override(Block)
def render(self) -> None:
"""Overrides upper method `render()`.
It will firstly call each block's render method.
And | |
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29273049-p29300904.7z"),
page_ids=range(29273049, 29300905),
darus_id=94648,
sha1="aa87ed8a05a7ca3930726b5498a9704e5504a29c",
size=89523493,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29300905-p29318649.7z"),
page_ids=range(29300905, 29318650),
darus_id=94649,
sha1="12caa33da0b65f327c7fbef9102d3a837f7bc59d",
size=87302456,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29318650-p29349438.7z"),
page_ids=range(29318650, 29349439),
darus_id=94650,
sha1="19e591b3143108d39f836cb8041b9cebeede4c95",
size=98718119,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29349439-p29379870.7z"),
page_ids=range(29349439, 29379871),
darus_id=94651,
sha1="b878373e6a92ef83bf21faf60bdd9f15e4bc5f83",
size=119833800,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29379871-p29407549.7z"),
page_ids=range(29379871, 29407550),
darus_id=94652,
sha1="af6460667f263a0d092c946e66ae996909182a15",
size=130071564,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29407550-p29468454.7z"),
page_ids=range(29407550, 29468455),
darus_id=94654,
sha1="c14d86a719090e1d5bf127b3cf73416719b84359",
size=168063616,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29468455-p29525983.7z"),
page_ids=range(29468455, 29525984),
darus_id=94655,
sha1="cec747f5a933fb7bdfa5fb9c6e70178cb51fa2b7",
size=133547663,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29525984-p29578205.7z"),
page_ids=range(29525984, 29578206),
darus_id=94656,
sha1="db79412b58b302a1c5712d4dafd2b179b9de3774",
size=101911493,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29578206-p29620794.7z"),
page_ids=range(29578206, 29620795),
darus_id=94657,
sha1="b0b5ffa94fb8791d2b4bc92b47f815d851abcf9c",
size=119893288,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29620795-p29659762.7z"),
page_ids=range(29620795, 29659763),
darus_id=94658,
sha1="190cae11f7304c1c711676eb133d079db0719749",
size=132399238,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29659763-p29693940.7z"),
page_ids=range(29659763, 29693941),
darus_id=94659,
sha1="00797e89b51307b551a61c518cec9183a7c64e1a",
size=79430757,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29693941-p29751648.7z"),
page_ids=range(29693941, 29751649),
darus_id=94661,
sha1="1d075e553cbe842da8d4961754514459871249b7",
size=135650591,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29751649-p29777525.7z"),
page_ids=range(29751649, 29777526),
darus_id=94662,
sha1="056a94c960e7644b75a4f288d03d0cf0cfc1b3c4",
size=104279924,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29777526-p29794561.7z"),
page_ids=range(29777526, 29794562),
darus_id=94663,
sha1="b606a946b1240e809b5fb9820547bb597abcfa0c",
size=98983399,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29794562-p29844600.7z"),
page_ids=range(29794562, 29844601),
darus_id=94664,
sha1="210f5d46a9feb91fd187700f1079fbe2de7edda6",
size=134395291,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29844601-p29900420.7z"),
page_ids=range(29844601, 29900421),
darus_id=94665,
sha1="b220d6eb497f6ccc249a6a0d163f6b8411d1a1b7",
size=138200176,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29900421-p29947183.7z"),
page_ids=range(29900421, 29947184),
darus_id=94666,
sha1="c89f1204da995c136ce704a56541c4e131e9b451",
size=135920376,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29947184-p29981338.7z"),
page_ids=range(29947184, 29981339),
darus_id=94667,
sha1="e6fd2584a583d23a5fd94c468bc627f45b5037d9",
size=136715724,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p29981339-p30021298.7z"),
page_ids=range(29981339, 30021299),
darus_id=94668,
sha1="f92ce0a9bf0068b9a46ccea4dbca41676b3b9cbe",
size=131705752,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30021299-p30066793.7z"),
page_ids=range(30021299, 30066794),
darus_id=94670,
sha1="b38d7b7aef33f7122265ed6a1c6845789f49da94",
size=131309231,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30066794-p30104915.7z"),
page_ids=range(30066794, 30104916),
darus_id=94671,
sha1="b42bb50737f78f63dc00f31fb91d34611217c4f2",
size=145918987,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30104916-p30174135.7z"),
page_ids=range(30104916, 30174136),
darus_id=94672,
sha1="523d00ff9ff0695fbd5ce99426585067ce62c9d0",
size=122247565,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30174136-p30175497.7z"),
page_ids=range(30174136, 30175498),
darus_id=94673,
sha1="5dae18237c5b6e3fdb6a732530a1914f4bfde646",
size=23647193,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30175498-p30229614.7z"),
page_ids=range(30175498, 30229615),
darus_id=94674,
sha1="a1e6f8d11c9b2a545c70543a5ab4d0735544d033",
size=149196947,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30229615-p30250907.7z"),
page_ids=range(30229615, 30250908),
darus_id=94676,
sha1="b333b8d56e28709349e475433031e13186fb5c34",
size=113922627,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30250908-p30280988.7z"),
page_ids=range(30250908, 30280989),
darus_id=94677,
sha1="cec0b859b9a3b809543c98681a6e45f9c23f7dbc",
size=146719853,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30280989-p30311654.7z"),
page_ids=range(30280989, 30311655),
darus_id=94678,
sha1="2f5842eb8c04c8ef2376375e26c8449fc97e4b3b",
size=135466949,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30311655-p30381082.7z"),
page_ids=range(30311655, 30381083),
darus_id=94680,
sha1="505cd7d2548e4ef0e40db0f8163a9a38f9402a8b",
size=137662704,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30381083-p30421946.7z"),
page_ids=range(30381083, 30421947),
darus_id=94681,
sha1="33ffc7ae2c777230e92b9062fe7a416ffce643b6",
size=139486940,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30421947-p30477537.7z"),
page_ids=range(30421947, 30477538),
darus_id=94682,
sha1="3039589fa966f8f0b603490b74669c429e544a37",
size=137624615,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30477538-p30541616.7z"),
page_ids=range(30477538, 30541617),
darus_id=94684,
sha1="2f2f5c89cbeb4dd7de13089b96c24ad0758b7e64",
size=123404777,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30541617-p30609219.7z"),
page_ids=range(30541617, 30609220),
darus_id=94685,
sha1="05462cd5d3b1439470d1e55ff982dd279876f38d",
size=112551818,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30609220-p30694110.7z"),
page_ids=range(30609220, 30694111),
darus_id=94687,
sha1="751c6d4fd2c0acfcc947669201d146566a1467f1",
size=169277779,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30694111-p30825028.7z"),
page_ids=range(30694111, 30825029),
darus_id=94688,
sha1="c8e9dca188dada438c04e5ab2fbedeaa9c9e11a1",
size=144227480,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30825029-p30998180.7z"),
page_ids=range(30825029, 30998181),
darus_id=94689,
sha1="0b46ccaf8adcd7cca00cccbf3215c2d0f1ad4f06",
size=140872332,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p30998181-p31091689.7z"),
page_ids=range(30998181, 31091690),
darus_id=94693,
sha1="47cf6d60dc5e4444f3aed81355d195b776516a3f",
size=125903861,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31091690-p31169773.7z"),
page_ids=range(31091690, 31169774),
darus_id=94694,
sha1="4b29758aa8a6eea809ae6ba75cc4331a768951da",
size=156012775,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31169774-p31244863.7z"),
page_ids=range(31169774, 31244864),
darus_id=94695,
sha1="b01485c85a7d8c86381a9ab1cf9ff4754e7f6dd6",
size=149152346,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31244864-p31337389.7z"),
page_ids=range(31244864, 31337390),
darus_id=94696,
sha1="0013973e4a745adaba62162f766b30ddd3e1ae80",
size=218706668,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31337390-p31433856.7z"),
page_ids=range(31337390, 31433857),
darus_id=94697,
sha1="79923b12ffe2c5544e2027535be1c52ff9461270",
size=354558898,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31433857-p31473402.7z"),
page_ids=range(31433857, 31473403),
darus_id=94699,
sha1="f25e3beed77008b79cf1998a72c78c57c5e64d4f",
size=242366138,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31473403-p31544826.7z"),
page_ids=range(31473403, 31544827),
darus_id=94700,
sha1="8e8c9bdbf84520b61847c43a30bd70f50a3db7ac",
size=111026851,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31544827-p31629211.7z"),
page_ids=range(31544827, 31629212),
darus_id=94701,
sha1="59f622f8e7557c4030b7628e91d432c54816bae6",
size=128998094,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31629212-p31696496.7z"),
page_ids=range(31629212, 31696497),
darus_id=94703,
sha1="68e9385436888a16363b0d8d5eb6357eb96ee364",
size=120996577,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31696497-p31710876.7z"),
page_ids=range(31696497, 31710877),
darus_id=94705,
sha1="eaad4a823f616a821fae46d467df7235755d0f1a",
size=30344396,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31710877-p31783719.7z"),
page_ids=range(31710877, 31783720),
darus_id=94706,
sha1="a5ce6cd0dbfb713f7f557be596288342bc8527e1",
size=125923980,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31783720-p31876861.7z"),
page_ids=range(31783720, 31876862),
darus_id=94707,
sha1="fdbbba1c36c473a8adbdec43d9a0832c1b7f7926",
size=153284519,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31876862-p31891890.7z"),
page_ids=range(31876862, 31891891),
darus_id=94708,
sha1="bce0c925bd6d960756ec1c117f0a16bca562a5e6",
size=30513318,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31891891-p31922187.7z"),
page_ids=range(31891891, 31922188),
darus_id=94709,
sha1="e393fcfedfded7ec151897ca1601b1ae512fbb32",
size=44832551,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31922188-p31975981.7z"),
page_ids=range(31922188, 31975982),
darus_id=94710,
sha1="dce0c1fdfa53905ce9c37dcd1634a9f10f430926",
size=119611407,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p31975982-p32010441.7z"),
page_ids=range(31975982, 32010442),
darus_id=94712,
sha1="7c2d712d1286df21a1c5080d7dae878706c00e4d",
size=112226580,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32010442-p32027660.7z"),
page_ids=range(32010442, 32027661),
darus_id=94713,
sha1="5c2529edd03e6df1bc2ce39f6d8837f7fc29eefb",
size=55745127,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32027661-p32065687.7z"),
page_ids=range(32027661, 32065688),
darus_id=94714,
sha1="58345d14a072c0c5ef62d9431ac704ee0285680e",
size=128218404,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32065688-p32105614.7z"),
page_ids=range(32065688, 32105615),
darus_id=94715,
sha1="45d8d16ea16f82936a69d5ed06f8b5ca7ce642e5",
size=133686390,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32105615-p32180440.7z"),
page_ids=range(32105615, 32180441),
darus_id=94716,
sha1="116437ff067b6697ff6fad59c7b8a9e09b5e91bc",
size=201352314,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32180441-p32282735.7z"),
page_ids=range(32180441, 32282736),
darus_id=94717,
sha1="20ef4b5b9c0bf7803833c9dba53c31a0d13162a3",
size=196075912,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32282736-p32430736.7z"),
page_ids=range(32282736, 32430737),
darus_id=94718,
sha1="8737146195b3c4b9b077e190a83389f397c2c1cc",
size=181102335,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32430737-p32546009.7z"),
page_ids=range(32430737, 32546010),
darus_id=94720,
sha1="f54df8a85e2b568891bd2718aa91bea7bc845f8b",
size=198361049,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32546010-p32680208.7z"),
page_ids=range(32546010, 32680209),
darus_id=94721,
sha1="dfdfa81d6f18c86ed11f6fdd641eb5e213d53537",
size=206795456,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32680209-p32798533.7z"),
page_ids=range(32680209, 32798534),
darus_id=94722,
sha1="a724bb5ee884a8abe1a1e41c4c339511f9546720",
size=202710018,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32798534-p32939949.7z"),
page_ids=range(32798534, 32939950),
darus_id=94724,
sha1="f1b0439e7c42475ee46200402d387245b7bfa371",
size=136158931,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p32939950-p33072183.7z"),
page_ids=range(32939950, 33072184),
darus_id=94725,
sha1="2f67ad30b293af7c31d82f49680f113f3d828b47",
size=133427787,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33072184-p33193107.7z"),
page_ids=range(33072184, 33193108),
darus_id=94728,
sha1="8e4eaf397033993d504f913c46b1a26f6dc626ef",
size=135928869,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33193108-p33342079.7z"),
page_ids=range(33193108, 33342080),
darus_id=94729,
sha1="3b5961379a230dd1112e7acc1f22b4dd75a6472b",
size=139651185,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33342080-p33522182.7z"),
page_ids=range(33342080, 33522183),
darus_id=94730,
sha1="9879ea9c67d4630389575da8dd39084ad610b43a",
size=175609157,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33522183-p33719177.7z"),
page_ids=range(33522183, 33719178),
darus_id=94733,
sha1="7d8d303846309886103fc0ea373f5c14fb6e6675",
size=178354562,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33719178-p33977209.7z"),
page_ids=range(33719178, 33977210),
darus_id=94734,
sha1="b3aeb931350a35c97f8e518ac98590d0de16a510",
size=185880545,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p33977210-p34253026.7z"),
page_ids=range(33977210, 34253027),
darus_id=94736,
sha1="006b0f523cba9b67a62ce1295c5066e7b2f9d712",
size=189699929,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34253027-p34452394.7z"),
page_ids=range(34253027, 34452395),
darus_id=94738,
sha1="ab6807a9c7077e34dd0f6f82f650147688ad44dd",
size=161017724,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34452395-p34607949.7z"),
page_ids=range(34452395, 34607950),
darus_id=94740,
sha1="3143b9d9750a7882235293842a1314728bc54e88",
size=152248916,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34607950-p34720755.7z"),
page_ids=range(34607950, 34720756),
darus_id=94742,
sha1="7ef0693ae6b4bff49b3ef20a0f296a90ebc3fefa",
size=210578495,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34720756-p34798448.7z"),
page_ids=range(34720756, 34798449),
darus_id=94744,
sha1="2fd4e07ac190f6ca8167cea31b9d5e32a044b4ca",
size=165520111,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34798449-p34862402.7z"),
page_ids=range(34798449, 34862403),
darus_id=94745,
sha1="bcbf9d58248d71af0abe4aff692837576203890d",
size=148196722,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34862403-p34941985.7z"),
page_ids=range(34862403, 34941986),
darus_id=94746,
sha1="ac24c97bb15954d02b43b3e3d02f90595011961a",
size=164083847,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p34941986-p35007673.7z"),
page_ids=range(34941986, 35007674),
darus_id=94747,
sha1="d65572ca5b41996af30670e1c58823eb4e01549a",
size=150073588,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35007674-p35047468.7z"),
page_ids=range(35007674, 35047469),
darus_id=94749,
sha1="2fe9fe7b14a971161ac09d92eeabc4503cabbc07",
size=115639621,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35047469-p35088932.7z"),
page_ids=range(35047469, 35088933),
darus_id=94750,
sha1="8ed08db0b7706b7b5d8c88650fa9ebd38a864f95",
size=118120952,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35088933-p35140935.7z"),
page_ids=range(35088933, 35140936),
darus_id=94751,
sha1="50b45f020c465f97a1a1e5cab8d04bc0c4f2b25f",
size=133802313,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35140936-p35184536.7z"),
page_ids=range(35140936, 35184537),
darus_id=94752,
sha1="94cd09f63f59a1a6b63dcd0660591348ab56c4a7",
size=125212911,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35184537-p35225319.7z"),
page_ids=range(35184537, 35225320),
darus_id=94753,
sha1="eae938a4a1e7b558c0860f831903b9e171a07c03",
size=119708141,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35225320-p35267687.7z"),
page_ids=range(35225320, 35267688),
darus_id=94754,
sha1="6ff8c831eb7696b313afac2391af1362911dad21",
size=124386921,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35267688-p35309790.7z"),
page_ids=range(35267688, 35309791),
darus_id=94755,
sha1="dbaa669d61ed7a27556fd58981336fc6f38e8fcc",
size=123123908,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35309791-p35352491.7z"),
page_ids=range(35309791, 35352492),
darus_id=94756,
sha1="a3a7a3232704ff53ea867cf92983c33d6b6cd269",
size=126828664,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35352492-p35395082.7z"),
page_ids=range(35352492, 35395083),
darus_id=94757,
sha1="a072ce9816436bac207244af56d4d6b682af9c0f",
size=124924365,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35395083-p35445039.7z"),
page_ids=range(35395083, 35445040),
darus_id=94759,
sha1="1a8f45fdb7aea0e28b0c7df4a2f8c1452c760b62",
size=138039183,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35445040-p35487332.7z"),
page_ids=range(35445040, 35487333),
darus_id=94760,
sha1="f0a58053fa9bbe07219853d7d080149abc9cf3b0",
size=120047290,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35487333-p35531156.7z"),
page_ids=range(35487333, 35531157),
darus_id=94761,
sha1="9a4acaedf1b86ed1fe88c9adc83df6aac324f9db",
size=126222335,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35531157-p35576550.7z"),
page_ids=range(35531157, 35576551),
darus_id=94762,
sha1="51b7a6500aa029e1097fbb241470ba86572c1ba2",
size=130320439,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35576551-p35622151.7z"),
page_ids=range(35576551, 35622152),
darus_id=94763,
sha1="62750ae64f05c0922fb04d9f9ffa536c6c22f55c",
size=126113884,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35622152-p35673552.7z"),
page_ids=range(35622152, 35673553),
darus_id=94764,
sha1="eeee912fbc5ea20d496556c11ce879086f5bf4d9",
size=130319542,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35673553-p35722415.7z"),
page_ids=range(35673553, 35722416),
darus_id=94765,
sha1="db773f66ea7f9f09f09629ab222461bc78802e00",
size=130166401,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35722416-p35767633.7z"),
page_ids=range(35722416, 35767634),
darus_id=94766,
sha1="5e2f7e47673da1b5a23ab8a548846b4e4c7ed003",
size=127769874,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35767634-p35812100.7z"),
page_ids=range(35767634, 35812101),
darus_id=94768,
sha1="cc244e1e5d5d7e3e7169615c6ee7456a89d7a60c",
size=121061353,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35812101-p35860168.7z"),
page_ids=range(35812101, 35860169),
darus_id=94769,
sha1="e98402631bb93cba067e8bc5e067138db1bc2949",
size=129811057,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35860169-p35904950.7z"),
page_ids=range(35860169, 35904951),
darus_id=94770,
sha1="b614347a6213f1c0bd242ec1c01b84a86d196c60",
size=124355534,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35904951-p35947863.7z"),
page_ids=range(35904951, 35947864),
darus_id=94771,
sha1="f34ed7a88424f9ef611a8198563f62bb95183ebb",
size=120034815,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35947864-p35995145.7z"),
page_ids=range(35947864, 35995146),
darus_id=94772,
sha1="7799a993e4b49b786533eab24cb45ead5008f754",
size=127359162,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + "p35995146-p36039268.7z"),
page_ids=range(35995146, 36039269),
darus_id=94773,
sha1="89425efe342b711df282bb826433cf251ccc4e92",
size=126431118,
auto_download=auto_download,
),
WikidatedV1_0SortedEntityStreamsFile(
archive_path=dataset_dir / (prefix + | |
import ast
import collections
import glob
import inspect
import math
import os
import random
import shutil
import subprocess
import time
import warnings
from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from datetime import datetime
from typing import Any, Dict, Tuple, Sequence, List, Optional, Callable, Union
import adaptive
import toolz
from adaptive.notebook_integration import in_ipynb
from ipyparallel import Client
from tqdm import tqdm, tqdm_notebook
MAX_LINE_LENGTH = 100
def shuffle_list(*lists, seed=0):
"""Shuffle multiple lists in the same order."""
combined = list(zip(*lists))
random.Random(seed).shuffle(combined)
return zip(*combined)
def _split(seq, n_parts):
lst = list(seq)
n = math.ceil(len(lst) / n_parts)
return toolz.partition_all(n, lst)
def split_in_balancing_learners(
learners: List[adaptive.BaseLearner],
fnames: List[str],
n_parts: int,
strategy: str = "npoints",
) -> Tuple[List[adaptive.BaseLearner], List[str]]:
r"""Split a list of learners and fnames into `adaptive.BalancingLearner`\s.
Parameters
----------
learners : list
List of learners.
fnames : list
List of filenames.
n_parts : int
Total number of `~adaptive.BalancingLearner`\s.
strategy : str
Learning strategy of the `~adaptive.BalancingLearner`.
Returns
-------
new_learners, new_fnames
"""
new_learners = []
new_fnames = []
for x in _split(zip(learners, fnames), n_parts):
learners_part, fnames_part = zip(*x)
learner = adaptive.BalancingLearner(learners_part, strategy=strategy)
new_learners.append(learner)
new_fnames.append(fnames_part)
return new_learners, new_fnames
def _progress(seq: Sequence, with_progress_bar: bool = True, desc: str = ""):
if not with_progress_bar:
return seq
else:
if in_ipynb():
return tqdm_notebook(list(seq), desc=desc)
else:
return tqdm(list(seq), desc=desc)
def _cancel_function(cancel_cmd: str, queue_function: Callable) -> Callable:
def cancel(
job_names: List[str], with_progress_bar: bool = True, max_tries: int = 5
) -> Callable:
"""Cancel all jobs in `job_names`.
Parameters
----------
job_names : list
List of job names.
with_progress_bar : bool, default: True
Display a progress bar using `tqdm`.
max_tries : int, default: 5
Maximum number of attempts to cancel a job.
"""
def to_cancel(job_names):
return [
job_id
for job_id, info in queue_function().items()
if info["name"] in job_names
]
def cancel_jobs(job_ids):
for job_id in _progress(job_ids, with_progress_bar, "Canceling jobs"):
cmd = f"{cancel_cmd} {job_id}".split()
returncode = subprocess.run(cmd, stderr=subprocess.PIPE).returncode
if returncode != 0:
warnings.warn(f"Couldn't cancel '{job_id}'.", UserWarning)
job_names = set(job_names)
for _ in range(max_tries):
job_ids = to_cancel(job_names)
if not job_ids:
# no more running jobs
break
cancel_jobs(job_ids)
return cancel
def combo_to_fname(combo: Dict[str, Any], folder: Optional[str] = None) -> str:
"""Converts a dict into a human readable filename."""
fname = "__".join(f"{k}_{v}" for k, v in combo.items()) + ".pickle"
if folder is None:
return fname
return os.path.join(folder, fname)
def cleanup_files(
job_names: List[str],
extensions: List[str] = ("sbatch", "out", "batch", "e*", "o*"),
with_progress_bar: bool = True,
move_to: Optional[str] = None,
log_file_folder: str = "",
) -> None:
"""Cleanup the scheduler log-files files.
Parameters
----------
job_names : list
List of job names.
extensions : list
List of file extensions to be removed.
with_progress_bar : bool, default: True
Display a progress bar using `tqdm`.
move_to : str, default: None
Move the file to a different directory.
If None the file is removed.
log_file_folder : str, default: ''
The folder in which to delete the log-files.
"""
# Finding the files
fnames = []
for job in job_names:
for ext in extensions:
pattern = f"{job}*.{ext}"
fnames += glob.glob(pattern)
if log_file_folder:
# The log-files might be in a different folder, but we're
# going to loop over every extension anyway.
fnames += glob.glob(os.path.join(log_file_folder, pattern))
_remove_or_move_files(fnames, with_progress_bar, move_to)
def _remove_or_move_files(
fnames: List[str], with_progress_bar: bool = True, move_to: Optional[str] = None
) -> None:
"""Remove files by filename.
Parameters
----------
fnames : list
List of filenames.
with_progress_bar : bool, default: True
Display a progress bar using `tqdm`.
move_to : str, default None
Move the file to a different directory.
If None the file is removed.
"""
n_failed = 0
for fname in _progress(fnames, with_progress_bar, "Removing files"):
try:
if move_to is None:
os.remove(fname)
else:
os.makedirs(move_to, exist_ok=True)
shutil.move(fname, move_to)
except Exception:
n_failed += 1
if n_failed:
warnings.warn(f"Failed to remove {n_failed} files.")
def load_parallel(
learners: List[adaptive.BaseLearner],
fnames: List[str],
*,
with_progress_bar: bool = True,
) -> None:
r"""Load a sequence of learners in parallel.
Parameters
----------
learners : sequence of `adaptive.BaseLearner`\s
The learners to be loaded.
fnames : sequence of str
A list of filenames corresponding to `learners`.
with_progress_bar : bool, default True
Display a progress bar using `tqdm`.
"""
def load(learner, fname):
learner.load(fname)
with ThreadPoolExecutor() as ex:
futs = []
iterator = zip(learners, fnames)
pbar = _progress(iterator, with_progress_bar, "Submitting loading tasks")
futs = [ex.submit(load, *args) for args in pbar]
for fut in _progress(futs, with_progress_bar, "Finishing loading"):
fut.result()
def save_parallel(
learners: List[adaptive.BaseLearner],
fnames: List[str],
*,
with_progress_bar: bool = True,
) -> None:
r"""Save a sequence of learners in parallel.
Parameters
----------
learners : sequence of `adaptive.BaseLearner`\s
The learners to be saved.
fnames : sequence of str
A list of filenames corresponding to `learners`.
with_progress_bar : bool, default True
Display a progress bar using `tqdm`.
"""
def save(learner, fname):
learner.save(fname)
with ThreadPoolExecutor() as ex:
futs = []
iterator = zip(learners, fnames)
pbar = _progress(iterator, with_progress_bar, "Submitting saving tasks")
futs = [ex.submit(save, *args) for args in pbar]
for fut in _progress(futs, with_progress_bar, "Finishing saving"):
fut.result()
def _get_status_prints(fname: str, only_last: bool = True):
status_lines = []
with open(fname) as f:
lines = f.readlines()
if not lines:
return status_lines
for line in reversed(lines):
if "current status" in line:
status_lines.append(line)
if only_last:
return status_lines
return status_lines
def parse_log_files(
job_names: List[str],
only_last: bool = True,
db_fname: Optional[str] = None,
log_file_folder: str = "",
):
"""Parse the log-files and convert it to a `~pandas.core.frame.DataFrame`.
This only works if you use `adaptive_scheduler.client_support.log_info`
inside your ``run_script``.
Parameters
----------
job_names : list
List of job names.
only_last : bool, default: True
Only look use the last printed status message.
db_fname : str, optional
The database filename. If passed, ``fname`` will be populated.
log_file_folder : str, default: ""
The folder in which the log-files are.
Returns
-------
`~pandas.core.frame.DataFrame`
"""
# XXX: it could be that the job_id and the logfile don't match up ATM! This
# probably happens when a job got canceled and is pending now.
try:
import pandas as pd
with_pandas = True
except ImportError:
with_pandas = False
warnings.warn("`pandas` is not installed, a list of dicts will be returned.")
# import here to avoid circular imports
from adaptive_scheduler.server_support import queue, get_database
def convert_type(k, v):
if k == "elapsed_time":
return pd.to_timedelta(v)
elif k == "overhead":
return float(v[:-1])
else:
return ast.literal_eval(v)
def join_str(info):
"""Turns an incorrectly split string
["elapsed_time=1", "day,", "0:20:57.330515", "nlearners=31"]
back the correct thing
['elapsed_time=1 day, 0:20:57.330515', 'nlearners=31']
"""
_info = []
for x in info:
if "=" in x:
_info.append(x)
else:
_info[-1] += f" {x}"
return _info
infos = []
for job in job_names:
fnames = glob.glob(os.path.join(log_file_folder, f"{job}-*.out"))
if not fnames:
continue
fname = fnames[-1] # take the last file
statuses = _get_status_prints(fname, only_last)
if statuses is None:
continue
for status in statuses:
time, info = status.split("current status")
info = join_str(info.strip().split(" "))
info = dict([x.split("=") for x in info])
info = {k: convert_type(k, v) for k, v in info.items()}
info["job"] = job
info["time"] = datetime.strptime(time.strip(), "%Y-%m-%d %H:%M.%S")
info["log_file"] = fname
infos.append(info)
# Polulate state and job_id from the queue
mapping = {
info["name"]: (job_id, info["state"]) for job_id, info in queue().items()
}
for info in infos:
info["job_id"], info["state"] = mapping.get(info["job"], (None, None))
if db_fname is not None:
# populate job_id
db = get_database(db_fname)
fnames = {info["job_id"]: info["fname"] for info in db}
id_done = {info["job_id"]: info["is_done"] for info in db}
for info in infos:
info["fname"] = fnames.get(info["job_id"], "UNKNOWN")
info["is_done"] = id_done.get(info["job_id"], "UNKNOWN")
return pd.DataFrame(infos) if with_pandas else infos
def logs_with_string_or_condition(
job_names: List[str], error: Union[str, callable]
) -> Dict[str, list]:
"""Get jobs that have `string` (or apply a callable) inside their log-file.
Either use `string` or `error`.
Parameters
----------
job_names : list
List of job names.
error : str or callable
String that is searched for or callable that is applied
to the log text. Must take a single argument, a list of
strings, and return True if the job has to be killed, or
False if not.
Returns
-------
has_string : list
List with jobs that have the string inside their log-file.
"""
if isinstance(error, str):
has_error = lambda lines: error in "".join(lines) # noqa: E731
elif isinstance(error, callable):
has_error = error
has_string = collections.defaultdict(list)
for job in job_names:
fnames = glob.glob(f"{job}-*.out")
if not fnames:
continue
for fname in fnames:
job_id = fname.split(f"{job}-")[1].split(".out")[0]
with open(fname) | |
from __future__ import print_function
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import pool
from theano.tensor.nnet import conv2d
from fashion_mnist import load_mnist
import scipy.io
import six.moves.cPickle as pickle
class LogisticRegression(object):
def __init__(self, input, n_in, n_out):
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
#self.output = T.nnet.relu(T.dot(input, self.W) + self.b)
# self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# self.y_pred = T.round(self.output)
# T.dot(input, self.W) + self.b
# T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood_vector(self, y):
return -T.mean(y*T.log(self.output) + (1-y)*T.log(1-self.output))
def sigmoid_cost_function(self, y):
return T.mean(T.switch(T.eq(y, 1), -T.log(self.output), -T.log(1-self.output)))
def mse_cost_function(self, y):
return T.mean(T.square(y - self.output))
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
#T.mean(T.square(y - self.output))
#- 0.01 * T.mean(y * T.log(self.output))
#-T.mean(y * T.log(y / self.output))
# end-snippet-2
def errors1(self, y):
if y.ndim != self.output.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.output.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('float'):
return T.mean(T.square(y - self.output))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.02), axis=1))
#T.mean(T.sqr(y - self.output))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.5), axis=1))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.2), axis=1))
# T.abs_(T.mean(T.invert(T.all(T.isclose(self.output, y, rtol=0.005, atol=0.3), axis=1))))
else:
raise NotImplementedError()
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), activation=T.tanh):
assert image_shape[1] == filter_shape[1]
self.input = input
fan_in = numpy.prod(filter_shape[1:])
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# pool each feature map individually, using maxpooling
pooled_out = pool.pool_2d(
input=conv_out,
ws=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def shared_dataset(data_x, data_y, borrow=True):
# 0-9 Label Representation
shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
def evaluate_lenet5(learning_rate=0.05, weight_decay=0.001, n_epochs=200,
nkerns=[20, 30], batch_size=500):
name = 'FashionMnist_'+str(learning_rate)+'_'+str(weight_decay) + '_' + str(nkerns) + 'pt_wd'
rng = numpy.random.RandomState(23455)
path = os.getcwd()
train_set_x, train_set_y = load_mnist(path=path, kind='train')
valid_set_x, valid_set_y = shared_dataset(train_set_x[50000:60000], train_set_y[50000:60000])
train_set_x, train_set_y = shared_dataset(train_set_x[0:50000], train_set_y[0:50000])
test_set_x, test_set_y = load_mnist(path=path, kind='t10k')
test_set_x, test_set_y = shared_dataset(test_set_x, test_set_y)
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches //= batch_size
n_valid_batches //= batch_size
n_test_batches //= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
#print(x.type)
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
layer0_input = x.reshape((batch_size, 1, 28, 28))
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
with open(name + '_Initial.pkl', 'wb') as f:
pickle.dump([layer0, layer1, layer2_input, layer2, layer3], f)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
params = layer3.params + layer2.params + layer1.params + layer0.params
grads = T.grad(cost, params)
updates = [
(param_i, param_i - learning_rate * (grad_i + weight_decay * param_i))
for param_i, grad_i in zip(params, grads)]
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print('... training')
patience = 200000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience // 2)
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
error_line = numpy.zeros(n_epochs)
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print('training @ iter = ', iter)
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
| |
for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/event',
'operation_id': 'recovery_project_plan_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_event_list
)
def __recovery_project_plan_get(
self,
project_id,
location_id,
plan_id,
**kwargs
):
"""Get recovery/plan # noqa: E501
Returns a single plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_get(project_id, location_id, plan_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Plan
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_get = _Endpoint(
settings={
'response_type': (Plan,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}',
'operation_id': 'recovery_project_plan_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'plan_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'plan_id': 'planId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'plan_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_get
)
def __recovery_project_plan_list(
self,
project_id,
location_id,
**kwargs
):
"""List recovery/plan # noqa: E501
List plan # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Plan]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_list = _Endpoint(
settings={
'response_type': ([Plan],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan',
'operation_id': 'recovery_project_plan_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__recovery_project_plan_list
)
def __recovery_project_plan_service_get(
self,
project_id,
location_id,
plan_id,
service_id,
**kwargs
):
"""Get recovery/plan.service # noqa: E501
Get recovery/plan.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.recovery_project_plan_service_get(project_id, location_id, plan_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
plan_id (str): Plan Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['plan_id'] = \
plan_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.recovery_project_plan_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/recovery/{locationId}/project/{projectId}/plan/{planId}/service/{serviceId}',
'operation_id': 'recovery_project_plan_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'plan_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'plan_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
| |
= None,
contents: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.class_ = "File"
self.location = location
self.path = path
self.basename = basename
self.dirname = dirname
self.nameroot = nameroot
self.nameext = nameext
self.checksum = checksum
self.size = size
self.secondaryFiles = secondaryFiles
self.format = format
self.contents = contents
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "File":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if _doc.get("class") != "File":
raise ValidationException("Not a File")
if "location" in _doc:
try:
location = load_field(
_doc.get("location"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `location` field is not valid because:",
SourceLine(_doc, "location", str),
[e],
)
)
else:
location = None
if "path" in _doc:
try:
path = load_field(
_doc.get("path"),
uri_union_of_None_type_or_strtype_False_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `path` field is not valid because:",
SourceLine(_doc, "path", str),
[e],
)
)
else:
path = None
if "basename" in _doc:
try:
basename = load_field(
_doc.get("basename"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `basename` field is not valid because:",
SourceLine(_doc, "basename", str),
[e],
)
)
else:
basename = None
if "dirname" in _doc:
try:
dirname = load_field(
_doc.get("dirname"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `dirname` field is not valid because:",
SourceLine(_doc, "dirname", str),
[e],
)
)
else:
dirname = None
if "nameroot" in _doc:
try:
nameroot = load_field(
_doc.get("nameroot"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `nameroot` field is not valid because:",
SourceLine(_doc, "nameroot", str),
[e],
)
)
else:
nameroot = None
if "nameext" in _doc:
try:
nameext = load_field(
_doc.get("nameext"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `nameext` field is not valid because:",
SourceLine(_doc, "nameext", str),
[e],
)
)
else:
nameext = None
if "checksum" in _doc:
try:
checksum = load_field(
_doc.get("checksum"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `checksum` field is not valid because:",
SourceLine(_doc, "checksum", str),
[e],
)
)
else:
checksum = None
if "size" in _doc:
try:
size = load_field(
_doc.get("size"),
union_of_None_type_or_inttype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `size` field is not valid because:",
SourceLine(_doc, "size", str),
[e],
)
)
else:
size = None
if "secondaryFiles" in _doc:
try:
secondaryFiles = load_field(
_doc.get("secondaryFiles"),
union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `secondaryFiles` field is not valid because:",
SourceLine(_doc, "secondaryFiles", str),
[e],
)
)
else:
secondaryFiles = None
if "format" in _doc:
try:
format = load_field(
_doc.get("format"),
uri_union_of_None_type_or_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `format` field is not valid because:",
SourceLine(_doc, "format", str),
[e],
)
)
else:
format = None
if "contents" in _doc:
try:
contents = load_field(
_doc.get("contents"),
union_of_None_type_or_strtype,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `contents` field is not valid because:",
SourceLine(_doc, "contents", str),
[e],
)
)
else:
contents = None
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `class`, `location`, `path`, `basename`, `dirname`, `nameroot`, `nameext`, `checksum`, `size`, `secondaryFiles`, `format`, `contents`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'File'", None, _errors__)
return cls(
location=location,
path=path,
basename=basename,
dirname=dirname,
nameroot=nameroot,
nameext=nameext,
checksum=checksum,
size=size,
secondaryFiles=secondaryFiles,
format=format,
contents=contents,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
r["class"] = "File"
if self.location is not None:
u = save_relative_uri(self.location, base_url, False, None, relative_uris)
if u:
r["location"] = u
if self.path is not None:
u = save_relative_uri(self.path, base_url, False, None, relative_uris)
if u:
r["path"] = u
if self.basename is not None:
r["basename"] = save(
self.basename, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.dirname is not None:
r["dirname"] = save(
self.dirname, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.nameroot is not None:
r["nameroot"] = save(
self.nameroot, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.nameext is not None:
r["nameext"] = save(
self.nameext, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.checksum is not None:
r["checksum"] = save(
self.checksum, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.size is not None:
r["size"] = save(
self.size, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.secondaryFiles is not None:
r["secondaryFiles"] = save(
self.secondaryFiles,
top=False,
base_url=base_url,
relative_uris=relative_uris,
)
if self.format is not None:
u = save_relative_uri(self.format, base_url, True, None, relative_uris)
if u:
r["format"] = u
if self.contents is not None:
r["contents"] = save(
self.contents, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(
[
"class",
"location",
"path",
"basename",
"dirname",
"nameroot",
"nameext",
"checksum",
"size",
"secondaryFiles",
"format",
"contents",
]
)
class Directory(Savable):
"""
Represents a directory to present to a command line tool.
Directories are represented as objects with `class` of `Directory`. Directory objects have
a number of properties that provide metadata about the directory.
The `location` property of a Directory is a URI that uniquely identifies
the directory. Implementations must support the file:// URI scheme and may
support other schemes such as http://. Alternately to `location`,
implementations must also accept the `path` property on Directory, which
must be a filesystem path available on the same host as the CWL runner (for
inputs) or the runtime environment of a command line tool execution (for
command line tool outputs).
A Directory object may have a `listing` field. This is a list of File and
Directory objects that are contained in the Directory. For each entry in
`listing`, the `basename` property defines the name of the File or
Subdirectory when staged to disk. If `listing` is not provided, the
implementation must have some way of fetching the Directory listing at
runtime based on the `location` field.
If a Directory does not have `location`, it is a Directory literal. A
Directory literal must provide `listing`. Directory literals must be
created on disk at runtime as needed.
The resources in a Directory literal do not need to have any implied
relationship in their `location`. For example, a Directory listing may
contain two files located on different hosts. It is the responsibility of
the runtime to ensure that those files are staged to disk appropriately.
Secondary files associated with files in `listing` must also be staged to
the same Directory.
When executing a CommandLineTool, Directories must be recursively staged
first and have local values of `path` assigend.
Directory objects in CommandLineTool output must provide either a
`location` URI or a `path` property in the context of the tool execution
runtime (local to the compute node, or within the executing container).
An ExpressionTool may forward file references from input to output by using
the same value for `location`.
Name conflicts (the same `basename` appearing multiple times in `listing`
or in any entry in `secondaryFiles` in the listing) is a fatal error.
"""
def __init__(
self,
location: Optional[Any] = None,
path: Optional[Any] = None,
basename: Optional[Any] = None,
listing: Optional[Any] = None,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.class_ = "Directory"
self.location = location
self.path = path
self.basename = basename
self.listing = listing
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "Directory":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
if _doc.get("class") != "Directory":
raise ValidationException("Not a Directory")
if "location" | |
#!/usr/bin/env python
#ref: https://github.com/googleapis/python-iot
from google.cloud import storage
import paho.mqtt.client as mqtt
import jwt
import time
import ssl
import random
import os
import logging
import datetime
import argparse
import random
import time
import datetime
import json
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.CRITICAL)
# The initial backoff time after a disconnection occurs, in seconds.
minimum_backoff_time = 1
# The maximum backoff time before giving up, in seconds.
MAXIMUM_BACKOFF_TIME = 32
# Whether to wait with exponential backoff before publishing.
should_backoff = False
# [START iot_mqtt_jwt]
def create_jwt(project_id, private_key_file, algorithm):
"""Creates a JWT (https://jwt.io) to establish an MQTT connection.
Args:
project_id: The cloud project ID this device belongs to
private_key_file: A path to a file containing either an RSA256 or
ES256 private key.
algorithm: The encryption algorithm to use. Either 'RS256' or 'ES256'
Returns:
A JWT generated from the given project_id and private key, which
expires in 20 minutes. After 20 minutes, your client will be
disconnected, and a new JWT will have to be generated.
Raises:
ValueError: If the private_key_file does not contain a known key.
"""
token = {
# The time that the token was issued at
'iat': datetime.datetime.utcnow(),
# The time the token expires.
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20),
# The audience field should always be set to the GCP project id.
'aud': project_id
}
# Read the private key file.
with open(private_key_file, 'r') as f:
private_key = f.read()
print('Creating JWT using {} from private key file {}'.format(
algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)
# [END iot_mqtt_jwt]
# [START iot_mqtt_config]
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
def on_connect(unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print('on_connect', mqtt.connack_string(rc))
# After a successful connect, reset backoff time and stop backing off.
global should_backoff
global minimum_backoff_time
should_backoff = False
minimum_backoff_time = 1
def on_disconnect(unused_client, unused_userdata, rc):
"""Paho callback for when a device disconnects."""
print('on_disconnect', error_str(rc))
# Since a disconnect occurred, the next loop iteration will wait with
# exponential backoff.
global should_backoff
should_backoff = True
def on_publish(unused_client, unused_userdata, unused_mid):
"""Paho callback when a message is sent to the broker."""
print('on_publish')
def on_message(unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = str(message.payload.decode('utf-8'))
print('Received message \'{}\' on topic \'{}\' with Qos {}'.format(
payload, message.topic, str(message.qos)))
def get_client(
project_id, cloud_region, registry_id, device_id, private_key_file,
algorithm, ca_certs, mqtt_bridge_hostname, mqtt_bridge_port):
"""Create our MQTT client. The client_id is a unique string that identifies
this device. For Google Cloud IoT Core, it must be in the format below."""
client_id = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(
project_id, cloud_region, registry_id, device_id)
print('Device client_id is \'{}\''.format(client_id))
client = mqtt.Client(client_id=client_id)
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(
username='unused',
password=<PASSWORD>(
project_id, private_key_file, algorithm))
# Enable SSL/TLS support.
client.tls_set(ca_certs=ca_certs, tls_version=ssl.PROTOCOL_TLSv1_2)
# Register message callbacks. https://eclipse.org/paho/clients/python/docs/
# describes additional callbacks that Paho supports. In this example, the
# callbacks just print to standard out.
client.on_connect = on_connect
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.on_message = on_message
# Connect to the Google MQTT bridge.
client.connect(mqtt_bridge_hostname, mqtt_bridge_port)
# This is the topic that the device will receive configuration updates on.
mqtt_config_topic = '/devices/{}/config'.format(device_id)
# Subscribe to the config topic.
client.subscribe(mqtt_config_topic, qos=1)
# The topic that the device will receive commands on.
mqtt_command_topic = '/devices/{}/commands/#'.format(device_id)
# Subscribe to the commands topic, QoS 1 enables message acknowledgement.
print('Subscribing to {}'.format(mqtt_command_topic))
client.subscribe(mqtt_command_topic, qos=0)
return client
# [END iot_mqtt_config]
def cloudstorage_demo(args):
# Instantiates a google cloud client
storage_client = storage.Client()
#upload image to an existing bucket
bucketexist = storage_client.bucket('cmpelkk_imagetest')
blobexist = bucketexist.blob('sjsu/img1.jpg')
for filename in os.listdir('./data'):
if filename.endswith(".jpg"):
print(filename)#os.path.join(directory, filename))
blobexist.upload_from_filename(filename)
# Process network events.
#client.loop()
continue
else:
continue
def mqtt_device_demo(args):
"""Connects a device, sends data, and receives data."""
# [START iot_mqtt_run]
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
# Publish to the events or state topic based on the flag.
sub_topic = 'events' if args.message_type == 'event' else 'state'
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(
args.project_id, args.cloud_region, args.registry_id,
args.device_id, args.private_key_file, args.algorithm,
args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# Publish num_messages messages to the MQTT bridge once per second.
for i in range(1, args.num_messages + 1):
# Process network events.
client.loop()
# Wait if backoff is required.
if should_backoff:
# If backoff time is too large, give up.
if minimum_backoff_time > MAXIMUM_BACKOFF_TIME:
print('Exceeded maximum backoff time. Giving up.')
break
# Otherwise, wait and connect again.
delay = minimum_backoff_time + random.randint(0, 1000) / 1000.0
print('Waiting for {} before reconnecting.'.format(delay))
time.sleep(delay)
minimum_backoff_time *= 2
client.connect(args.mqtt_bridge_hostname, args.mqtt_bridge_port)
payload = '{}/{}-payload-{}'.format(
args.registry_id, args.device_id, i)
print('Publishing message {}/{}: \'{}\''.format(
i, args.num_messages, payload))
# [START iot_mqtt_jwt_refresh]
seconds_since_issue = (datetime.datetime.utcnow() - jwt_iat).seconds
if seconds_since_issue > 60 * jwt_exp_mins:
print('Refreshing token after {}s'.format(seconds_since_issue))
jwt_iat = datetime.datetime.utcnow()
client.loop()
client.disconnect()
client = get_client(
args.project_id, args.cloud_region,
args.registry_id, args.device_id, args.private_key_file,
args.algorithm, args.ca_certs, args.mqtt_bridge_hostname,
args.mqtt_bridge_port)
# [END iot_mqtt_jwt_refresh]
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
client.publish(mqtt_topic, payload, qos=1)
# Send events every second. State should not be updated as often
time.sleep(1)
# for i in range(0, 60):
# time.sleep(1)
# client.loop()
# [END iot_mqtt_run]
def read_sensor(count):
tempF = 20 + 0.2*count + (random.random() * 15)
humidity = 60 + 0.3*count+ (random.random() * 20)
temp = '{0:0.2f}'.format(tempF)
hum = '{0:0.2f}'.format(humidity)
sensorZipCode = 95192#"94043"
sensorLat = 37.3382082+ (random.random() /100)#"37.421655"
sensorLong = -121.8863286 + (random.random() /100)#"-122.085637"
sensorLatf = '{0:0.6f}'.format(sensorLat)
sensorLongf = '{0:0.6f}'.format(sensorLong)
return (temp, hum, sensorZipCode, sensorLatf, sensorLongf)
def createJSON(reg_id, dev_id, timestamp, zip, lat, long, temperature, humidity, img_file):
data = {
'registry_id' : reg_id,
'device_id' : dev_id,
'timecollected' : timestamp,
'zipcode' : zip,
'latitude' : lat,
'longitude' : long,
'temperature' : temperature,
'humidity' : humidity,
'image_file' : img_file
}
json_str = json.dumps(data)
return json_str
def storage_mqtt_device_demo(args):
"""Connects a device, sends data, and receives data."""
# [START iot_mqtt_run]
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
# Publish to the events or state topic based on the flag.
sub_topic = 'events' if args.message_type == 'event' else 'state'
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(
args.project_id, args.cloud_region, args.registry_id,
args.device_id, args.private_key_file, args.algorithm,
args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# Instantiates a google cloud client
# Instantiates a client
storage_client = storage.Client.from_service_account_json(args.service_account_json)
#storage_client = storage.Client()
#upload image to an existing bucket
bucketexist = storage_client.bucket('cmpelkk_imagetest')
i = 0
path = args.imagefolder_path#'/Users/lkk/Documents/GoogleCloud/iotpython/data'
for filename in os.listdir(path):
if filename.endswith(".jpg"):
print(filename)#os.path.join(directory, filename))
i+=1
bucketfilename= "img%s.jpg" % i
print(bucketfilename)
blobexist = bucketexist.blob(bucketfilename)
filepathlocal = os.path.join(path, filename)
print(filepathlocal)
blobexist.upload_from_filename(filepathlocal)
# Process network events.
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
#(id, timestamp, zip, lat, long, temperature, humidity, img_file)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
#payload = '{}/{}-image-{}'.format(args.registry_id, args.device_id, i)
print('Publishing message {}/: \'{}\''.format(
i, payloadJSON))
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
client.publish(mqtt_topic, payloadJSON, qos=1)
# Send events every second. State should not be updated as often
time.sleep(1)
continue
else:
continue
def bigquery_mqtt_device_demo(args):
"""Connects a device, sends data, and receives data."""
# [START iot_mqtt_run]
global minimum_backoff_time
global MAXIMUM_BACKOFF_TIME
# Publish to the events or state topic based on the flag.
sub_topic = 'events' if args.message_type == 'event' else 'state'
mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)
jwt_iat = datetime.datetime.utcnow()
jwt_exp_mins = args.jwt_expires_minutes
client = get_client(
args.project_id, args.cloud_region, args.registry_id,
args.device_id, args.private_key_file, args.algorithm,
args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)
# Instantiates a google cloud client
# Instantiates a client
#storage_client = storage.Client.from_service_account_json(args.service_account_json)
#bucketexist = storage_client.bucket('cmpelkk_imagetest')
i = 0
for i in range(1, args.num_messages + 1):
bucketfilename= "img%s.jpg" % i
client.loop()
currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
(temp, hum, sensorZipCode, sensorLat, sensorLong) = read_sensor(i)
#(id, timestamp, zip, lat, long, temperature, humidity, img_file)
payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, sensorZipCode, sensorLat, sensorLong, temp, hum, bucketfilename)
#payload = '{}/{}-image-{}'.format(args.registry_id, args.device_id, i)
print('Publishing message {}/: \'{}\''.format(
i, payloadJSON))
# Publish "payload" to the MQTT topic. qos=1 means at least once
# delivery. Cloud IoT Core also supports qos=0 for at most once
# delivery.
client.publish(mqtt_topic, payloadJSON, qos=1)
# Send | |
# -*- coding: utf-8 -*-
import functools
import inspect
import os
import sys
from types import MethodType
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union
import six
from brewtils.choices import process_choices
from brewtils.display import resolve_form, resolve_schema, resolve_template
from brewtils.errors import PluginParamError, _deprecate
from brewtils.models import Command, Parameter, Resolvable
if sys.version_info.major == 2:
from funcsigs import signature, Parameter as InspectParameter # noqa
else:
from inspect import signature, Parameter as InspectParameter # noqa
__all__ = [
"client",
"command",
"parameter",
"parameters",
"system",
]
def client(
_wrapped=None, # type: Type
bg_name=None, # type: Optional[str]
bg_version=None, # type: Optional[str]
):
# type: (...) -> Type
"""Class decorator that marks a class as a beer-garden Client
Using this decorator is no longer strictly necessary. It was previously required in
order to mark a class as being a Beer-garden Client, and contained most of the logic
that currently resides in the ``parse_client`` function. However, that's no longer
the case and this currently exists mainly for back-compatibility reasons.
Applying this decorator to a client class does have the nice effect of preventing
linters from complaining if any special attributes are used. So that's something.
Those special attributes are below. Note that these are just placeholders until the
actual values are populated when the client instance is assigned to a Plugin:
* ``_bg_name``: an optional system name
* ``_bg_version``: an optional system version
* ``_bg_commands``: holds all registered commands
* ``_current_request``: Reference to the currently executing request
Args:
_wrapped: The class to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
bg_name: Optional plugin name
bg_version: Optional plugin version
Returns:
The decorated class
"""
if _wrapped is None:
return functools.partial(client, bg_name=bg_name, bg_version=bg_version) # noqa
# Assign these here so linters don't complain
_wrapped._bg_name = bg_name
_wrapped._bg_version = bg_version
_wrapped._bg_commands = []
_wrapped._current_request = None
return _wrapped
def command(
_wrapped=None, # type: Union[Callable, MethodType]
description=None, # type: Optional[str]
parameters=None, # type: Optional[List[Parameter]]
command_type="ACTION", # type: str
output_type="STRING", # type: str
schema=None, # type: Optional[Union[dict, str]]
form=None, # type: Optional[Union[dict, list, str]]
template=None, # type: Optional[str]
icon_name=None, # type: Optional[str]
hidden=False, # type: Optional[bool]
metadata=None, # type: Optional[Dict]
):
"""Decorator for specifying Command details
For example:
.. code-block:: python
@command(output_type='JSON')
def echo_json(self, message):
return message
Args:
_wrapped: The function to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
description: The command description. If not given the first line of the method
docstring will be used.
parameters: A list of Command parameters. It's recommended to use @parameter
decorators to declare Parameters instead of declaring them here, but it is
allowed. Any Parameters given here will be merged with Parameters sourced
from decorators and inferred from the method signature.
command_type: The command type. Valid options are Command.COMMAND_TYPES.
output_type: The output type. Valid options are Command.OUTPUT_TYPES.
schema: A custom schema definition.
form: A custom form definition.
template: A custom template definition.
icon_name: The icon name. Should be either a FontAwesome or a Glyphicon name.
hidden: Flag controlling whether the command is visible on the user interface.
metadata: Free-form dictionary
Returns:
The decorated function
"""
if _wrapped is None:
return functools.partial(
command,
description=description,
parameters=parameters,
command_type=command_type,
output_type=output_type,
schema=schema,
form=form,
template=template,
icon_name=icon_name,
hidden=hidden,
metadata=metadata,
)
new_command = Command(
description=description,
parameters=parameters,
command_type=command_type,
output_type=output_type,
schema=schema,
form=form,
template=template,
icon_name=icon_name,
hidden=hidden,
metadata=metadata,
)
# Python 2 compatibility
if hasattr(_wrapped, "__func__"):
_wrapped.__func__._command = new_command
else:
_wrapped._command = new_command
return _wrapped
def parameter(
_wrapped=None, # type: Union[Callable, MethodType, Type]
key=None, # type: str
type=None, # type: Optional[Union[str, Type]]
multi=None, # type: Optional[bool]
display_name=None, # type: Optional[str]
optional=None, # type: Optional[bool]
default=None, # type: Optional[Any]
description=None, # type: Optional[str]
choices=None, # type: Optional[Union[Callable, Dict, Iterable, str]]
parameters=None, # type: Optional[List[Parameter]]
nullable=None, # type: Optional[bool]
maximum=None, # type: Optional[int]
minimum=None, # type: Optional[int]
regex=None, # type: Optional[str]
form_input_type=None, # type: Optional[str]
type_info=None, # type: Optional[dict]
is_kwarg=None, # type: Optional[bool]
model=None, # type: Optional[Type]
):
"""Decorator for specifying Parameter details
For example::
@parameter(
key="message",
description="Message to echo",
optional=True,
type="String",
default="Hello, World!",
)
def echo(self, message):
return message
Args:
_wrapped: The function to decorate. This is handled as a positional argument and
shouldn't be explicitly set.
key: String specifying the parameter identifier. If the decorated object is a
method the key must match an argument name.
type: String indicating the type to use for this parameter.
multi: Boolean indicating if this parameter is a multi. See documentation for
discussion of what this means.
display_name: String that will be displayed as a label in the user interface.
optional: Boolean indicating if this parameter must be specified.
default: The value this parameter will be assigned if not overridden when
creating a request.
description: An additional string that will be displayed in the user interface.
choices: List or dictionary specifying allowed values. See documentation for
more information.
parameters: Any nested parameters. See also: the 'model' argument.
nullable: Boolean indicating if this parameter is allowed to be null.
maximum: Integer indicating the maximum value of the parameter.
minimum: Integer indicating the minimum value of the parameter.
regex: String describing a regular expression constraint on the parameter.
form_input_type: Specify the form input field type (e.g. textarea). Only used
for string fields.
type_info: Type-specific information. Mostly reserved for future use.
is_kwarg: Boolean indicating if this parameter is meant to be part of the
decorated function's kwargs. Only applies when the decorated object is a
method.
model: Class to be used as a model for this parameter. Must be a Python type
object, not an instance.
Returns:
The decorated function
"""
if _wrapped is None:
return functools.partial(
parameter,
key=key,
type=type,
multi=multi,
display_name=display_name,
optional=optional,
default=default,
description=description,
choices=choices,
parameters=parameters,
nullable=nullable,
maximum=maximum,
minimum=minimum,
regex=regex,
form_input_type=form_input_type,
type_info=type_info,
is_kwarg=is_kwarg,
model=model,
)
new_parameter = Parameter(
key=key,
type=type,
multi=multi,
display_name=display_name,
optional=optional,
default=default,
description=description,
choices=choices,
parameters=parameters,
nullable=nullable,
maximum=maximum,
minimum=minimum,
regex=regex,
form_input_type=form_input_type,
type_info=type_info,
is_kwarg=is_kwarg,
model=model,
)
# Python 2 compatibility
if hasattr(_wrapped, "__func__"):
_wrapped.__func__.parameters = getattr(_wrapped, "parameters", [])
_wrapped.__func__.parameters.insert(0, new_parameter)
else:
_wrapped.parameters = getattr(_wrapped, "parameters", [])
_wrapped.parameters.insert(0, new_parameter)
return _wrapped
def parameters(*args, **kwargs):
"""
.. deprecated:: 3.0
Will be removed in version 4.0. Please use ``@command`` instead.
Decorator for specifying multiple Parameter definitions at once
This can be useful for commands which have a large number of complicated
parameters but aren't good candidates for a Model.
.. code-block:: python
@parameter(**params[cmd1][param1])
@parameter(**params[cmd1][param2])
@parameter(**params[cmd1][param3])
def cmd1(self, **kwargs):
pass
Can become:
.. code-block:: python
@parameters(params[cmd1])
def cmd1(self, **kwargs):
pass
Args:
*args (iterable): Positional arguments
The first (and only) positional argument must be a list containing
dictionaries that describe parameters.
**kwargs: Used for bookkeeping. Don't set any of these yourself!
Returns:
func: The decorated function
"""
# This is the first invocation
if not kwargs.get("_partial"):
# Need the callable check to prevent applying the decorator with no parenthesis
if len(args) == 1 and not callable(args[0]):
return functools.partial(parameters, args[0], _partial=True)
raise PluginParamError("@parameters takes a single argument")
# This is the second invocation
else:
if len(args) != 2:
raise PluginParamError(
"Incorrect number of arguments for parameters partial call. Did you "
"set _partial=True? If so, please don't do that. If not, please let "
"the Beergarden team know how you got here!"
)
_deprecate(
"Looks like you're using the '@parameters' decorator. This is now deprecated - "
"for passing bulk parameter definitions it's recommended to use the @command "
"decorator parameters kwarg, like this: @command(parameters=[...])"
)
params = args[0]
_wrapped = args[1]
if not callable(_wrapped):
raise PluginParamError("@parameters must be applied to a callable")
try:
for param in params:
parameter(_wrapped, **param)
except TypeError:
raise PluginParamError("@parameters arg must be an iterable of dictionaries")
return _wrapped
def _parse_client(client):
# type: (object) -> List[Command]
"""Get a list of Beergarden Commands from a client object
This will iterate over everything returned from dir, looking for metadata added
by the decorators.
"""
bg_commands = []
for attr in dir(client):
method = getattr(client, attr)
method_command = _parse_method(method)
if method_command:
bg_commands.append(method_command)
return bg_commands
def _parse_method(method):
# type: (MethodType) -> Optional[Command]
"""Parse a method object as a Beer-garden command | |
(conv6.size())
print (len(ret['gt_bboxes']))
print (ret['gt_bboxes'][0].size())
print (ret['gt_bboxes'][1].size())
batch_size = len(ret['gt_bboxes'])
ys_list = ret["gt_bboxes"]
y_samples_list = []
q_y_samples_list = []
q_ys_list = []
for i in range(batch_size):
# (ys_list[i] has shape: (num_gt_bboxes_i, 7))
# print (ys_list[i].size())
y_samples_zero, q_y_samples, q_ys = sample_gmm_centered(stds, num_samples=self.num_samples)
y_samples_zero = y_samples_zero.cuda() # (shape: (num_samples, 7))
q_y_samples = q_y_samples.cuda() # (shape: (num_samples))
y_samples = ys_list[i].unsqueeze(1) + y_samples_zero.unsqueeze(0) # (shape: (num_gt_bboxes_i, num_samples, 7))
y_samples[:, :, 3:6] = min_hwl + F.relu(y_samples[:, :, 3:6] - min_hwl)
y_samples[:, :, 6] = wrapToPi(y_samples[:, :, 6])
q_y_samples = q_y_samples.unsqueeze(0)*torch.ones(y_samples.size(0), y_samples.size(1)).cuda() # (shape: (num_gt_bboxes_i, num_samples))
q_ys = q_ys[0]*torch.ones(ys_list[i].size(0)).cuda() # (shape: (num_gt_bboxes_i))
# print (ys_list[i][0])
# print (y_samples_list[i][0, 0:5])
y_samples = y_samples.view(-1, 7) # (shape: (num_gt_bboxes_i*num_samples, 7)))
q_y_samples = q_y_samples.view(-1) # (shape: (num_gt_bboxes_i*num_samples)))
y_samples_list.append(y_samples)
q_y_samples_list.append(q_y_samples)
q_ys_list.append(q_ys)
# print (y_samples_list[i].size())
# print (q_y_samples_list[i].size())
# print (q_ys_list[i].size())
# print ("%%%%%")
ys_features_list = []
y_samples_features_list = []
for i in range(batch_size):
# (conv6 has shape: (batch_size, 256, 200, 176))
# (ys_list[i] has shape: (num_gt_bboxes_i, 7))
# (y_samples_list[i] has shape: (num_gt_bboxes_i*num_samples, 7))
# print (conv6.size())
# print (ys_list[i].size())
# print (y_samples_list[i].size())
(ys_pixel_xs, ys_pixel_ys) = self.gen_grid_fn(ys_list[i][:, [0, 1, 3, 4, 6]])
# (both have shape: (4, 7, num_gt_bboxes_i))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_xs = ys_pixel_xs.permute(2, 0, 1).contiguous() # (shape: (num_gt_bboxes_i, 4, 7))
ys_pixel_ys = ys_pixel_ys.permute(2, 0, 1).contiguous() # (shape: (num_gt_bboxes_i, 4, 7))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_coords = torch.cat([ys_pixel_xs.unsqueeze(3), ys_pixel_ys.unsqueeze(3)], 3)
# (shape: (num_gt_bboxes_i, 4, 7, 2))
# print (ys_pixel_coords.size())
(y_samples_pixel_xs, y_samples_pixel_ys) = self.gen_grid_fn(y_samples_list[i][:, [0, 1, 3, 4, 6]])
# (both have shape: (4, 7, num_gt_bboxes_i*num_samples))
# print (y_samples_pixel_xs.size())
# print (y_samples_pixel_ys.size())
y_samples_pixel_xs = y_samples_pixel_xs.permute(2, 0, 1).contiguous() # (shape: (num_gt_bboxes_i*num_samples, 4, 7))
y_samples_pixel_ys = y_samples_pixel_ys.permute(2, 0, 1).contiguous() # (shape: (num_gt_bboxes_i*num_samples, 4, 7))
# print (y_samples_pixel_xs.size())
# print (y_samples_pixel_ys.size())
y_samples_pixel_coords = torch.cat([y_samples_pixel_xs.unsqueeze(3), y_samples_pixel_ys.unsqueeze(3)], 3)
# (shape: (num_gt_bboxes_i*num_samples, 4, 7, 2))
# print (y_samples_pixel_coords.size())
conv6_i = conv6[i].unsqueeze(0) # (shape: (1, 256, 200, 176))
# print (conv6_i.size())
conv6_i_ys = conv6_i.expand(ys_pixel_coords.size(0), -1, -1, -1)
# (shape: (num_gt_bboxes_i, 256, 200, 176))
# print (conv6_i_ys.size())
ys_feature_maps = bilinear_interpolate_torch_gridsample(conv6_i_ys, ys_pixel_coords)
# (shape: (num_gt_bboxes_i, 256, 4, 7))
# print (ys_feature_maps.size())
ys_features = ys_feature_maps.view(ys_feature_maps.size(0), -1)
# (shape: (num_gt_bboxes_i, 7168)) (7168 = 256*4*7)
# print (ys_features.size())
ys_features_list.append(ys_features)
conv6_i_y_samples = conv6_i.expand(y_samples_pixel_coords.size(0), -1, -1, -1)
# (shape: (num_gt_bboxes_i*num_samples, 256, 200, 176))
# print (conv6_i_y_samples.size())
y_samples_feature_maps = bilinear_interpolate_torch_gridsample(conv6_i_y_samples, y_samples_pixel_coords)
# (shape: (num_gt_bboxes_i*num_samples, 256, 4, 7))
# print (y_samples_feature_maps.size())
y_samples_features = y_samples_feature_maps.view(y_samples_feature_maps.size(0), -1)
# (shape: (num_gt_bboxes_i*num_samples, 7168)) (7168 = 256*4*7)
# print (y_samples_features.size())
y_samples_features_list.append(y_samples_features)
# print (ys_features_list[0].size())
# print (ys_features_list[1].size())
ys_features = torch.cat(ys_features_list, 0)
# (shape: (num_gt_bboxes_in_batch, 7168))
# print (ys_features.size())
# print (y_samples_features_list[0].size())
# print (y_samples_features_list[1].size())
y_samples_features = torch.cat(y_samples_features_list, 0)
# (shape: (num_gt_bboxes_in_batch*num_samples, 7168))
# print (y_samples_features.size())
features = torch.cat([ys_features, y_samples_features], 0)
# (shape: (num_gt_bboxes_in_batch + num_gt_bboxes_in_batch*num_samples, 7168))
# print (features.size())
features = F.relu(self.ebm_fc1(features)) # (shape: (num_gt_bboxes_in_batch + num_gt_bboxes_in_batch*num_samples, 1024))
# print (features.size())
features = F.relu(self.ebm_fc2(features)) # (shape: (num_gt_bboxes_in_batch + num_gt_bboxes_in_batch*num_samples, 1024))
# print (features.size())
fs = self.ebm_fc3(features) # (shape: (num_gt_bboxes_in_batch + num_gt_bboxes_in_batch*num_samples, 1))
# print (fs.size())
fs = fs.squeeze(1) # (shape: (num_gt_bboxes_in_batch + num_gt_bboxes_in_batch*num_samples))
# print (fs.size())
ys_fs = fs[0:ys_features.size(0)]
# (shape: (num_gt_bboxes_in_batch))
# print (ys_fs.size())
y_samples_fs = fs[ys_features.size(0):]
# (shape: (num_gt_bboxes_in_batch*num_samples))
# print (y_samples_fs.size())
y_samples_fs = y_samples_fs.view(-1, self.num_samples)
# (shape: (num_gt_bboxes_in_batch, num_samples))
# print (y_samples_fs.size())
q_ys = torch.cat(q_ys_list, 0)
# (shape: (num_gt_bboxes_in_batch))
# print (q_ys.size())
q_y_samples = torch.cat(q_y_samples_list, 0)
# (shape: (num_gt_bboxes_in_batch*num_samples))
# print (q_y_samples.size())
q_y_samples = q_y_samples.view(-1, self.num_samples)
# (shape: (num_gt_bboxes_in_batch, num_samples))
# print (q_y_samples.size())
# print ("//////////////////")
# (ys_fs has shape: (num_gt_bboxes_in_batch))
# (y_samples_fs has shape: (num_gt_bboxes_in_batch, num_samples))
# (q_ys has shape: (num_gt_bboxes_in_batch))
# (q_y_samples has shape: (num_gt_bboxes_in_batch, num_samples))
print (ys_fs.size())
print (y_samples_fs.size())
print (q_ys.size())
print (q_y_samples.size())
# print (ys_fs[0])
# print (y_samples_fs[0])
# print (q_ys)
# print (q_y_samples[0])
print ("ys_fs - mean: %f, max: %f, min: %f" % (torch.mean(ys_fs).item(), torch.max(ys_fs).item(), torch.min(ys_fs).item()))
print ("y_samples_fs - mean: %f, max: %f, min: %f" % (torch.mean(y_samples_fs).item(), torch.max(y_samples_fs).item(), torch.min(y_samples_fs).item()))
f_samples = y_samples_fs # (shape: (num_gt_bboxes_in_batch, num_samples))
p_N_samples = q_y_samples # (shape: (num_gt_bboxes_in_batch, num_samples))
f_0 = ys_fs # (shape: (num_gt_bboxes_in_batch))
p_N_0 = q_ys # (shape: (num_gt_bboxes_in_batch))
exp_vals_0 = f_0-torch.log(p_N_0 + 0.0) # (shape: (num_gt_bboxes_in_batch))
exp_vals_samples = f_samples-torch.log(p_N_samples + 0.0) # (shape: (num_gt_bboxes_in_batch, num_samples))
exp_vals = torch.cat([exp_vals_0.unsqueeze(1), exp_vals_samples], dim=1) # (shape: (num_gt_bboxes_in_batch, 1+num_samples))
ebm_loss = -torch.mean(exp_vals_0 - torch.logsumexp(exp_vals, dim=1))
losses.update(dict(loss_ebm=ebm_loss,))
print ("{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}")
return losses
def forward_test(self, img, img_meta, **kwargs):
with torch.no_grad():
batch_size = len(img_meta) # (batch_size = 1)
# print (batch_size)
ret = self.merge_second_batch(kwargs)
# (ret["voxels"] has shape: (num_voxels, 4)) (num_voxels is different for different examples) (for batch_size = 2, num_voxels is typically 35000 - 45000)
# (ret["coordinates"] has shape: (num_voxels, 4))
# print (ret["voxels"].size())
# print (ret["coordinates"].size())
vx = self.backbone(ret['voxels'], ret['num_points'])
# (vx has shape: (num_voxels, 4)) (vx is just identical to ret["voxels"]? seems so)
# print (vx.size())
(x, conv6) = self.neck(vx, ret['coordinates'], batch_size, is_test=True)
# (x has shape: (batch_size, 256, 200, 176))
# (conv6 has shape: (batch_size, 256, 200, 176))
# print (x.size())
# print (conv6.size())
rpn_outs = self.rpn_head.forward(x)
# (rpn_outs is a list of 3 elements)
# (rpn_outs[0] has shape: (batch_size, 200, 176, 14)) (14 = 7*num_anchor_per_loc) (x, y, z, h, w, l, theta)
# (rpn_outs[1] has shape: (batch_size, 200, 176, 2)) (2 = 1*num_anchor_per_loc) (conf_score) (just one class (Car))
# (rpn_outs[2] has shape: (batch_size, 200, 176, 4)) (4 = 2*num_anchor_per_loc) (classification of heading directon (forward or backward))
# print (len(rpn_outs))
# print (rpn_outs[0].size())
# print (rpn_outs[1].size())
# print (rpn_outs[2].size())
guided_anchors = self.rpn_head.get_guided_anchors(*rpn_outs, ret['anchors'], ret['anchors_mask'], None, thr=.1)
# (guided_anchors is a list of batch_size tensors)
# (guided_anchors[i] has shape: (num_guided_anchors_in_pc_i, 7))
# # (num_guided_anchors_in_pc_i is different for different i:s and for different examples)
# # (these are the predicted bboxes (with residuals added to the anchors) with conf_score > 0.1?)
# print (len(guided_anchors))
# print (guided_anchors[0].size())
bbox_score, guided_anchors = self.extra_head(conv6, guided_anchors, is_test=True)
# (bbox_score is a list of batch_size tensors)
# # (bbox_score[i] has shape: (num_guided_anchors_in_pc_i))
# (guided_anchors is a list of batch_size tensors)
# # (guided_anchors[i] has shape: (num_guided_anchors_in_pc_i, 7))
# print (len(bbox_score))
# print (bbox_score[0].size())
# print (bbox_score[0])
# print (len(guided_anchors))
# print (guided_anchors[0].size())
if self.full_cfg is not None:
if self.full_cfg.USE_EBM and (self.test_cfg.extra.EBM_guided or self.test_cfg.extra.EBM_refine):
# print ("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
# (conv6 has shape: (batch_size, 256, 200, 176))
# (guided_anchors is a list of batch_size tensors)
# (guided_anchors[i] has shape: (num_guided_anchors_in_pc_i, 7))
batch_size = len(guided_anchors)
ys_list = guided_anchors
ys_features_list = []
for i in range(batch_size):
# (conv6 has shape: (batch_size, 256, 200, 176))
# (ys_list[i] has shape: (num_guided_anchors_in_pc_i, 7))
# print (conv6.size())
# print (ys_list[i].size())
(ys_pixel_xs, ys_pixel_ys) = self.gen_grid_fn(ys_list[i][:, [0, 1, 3, 4, 6]])
# (both have shape: (4, 7, num_guided_anchors_in_pc_i))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_xs = ys_pixel_xs.permute(2, 0, 1).contiguous() # (shape: (num_guided_anchors_in_pc_i, 4, 7))
ys_pixel_ys = ys_pixel_ys.permute(2, 0, 1).contiguous() # (shape: (num_guided_anchors_in_pc_i, 4, 7))
# print (ys_pixel_xs.size())
# print (ys_pixel_ys.size())
ys_pixel_coords = torch.cat([ys_pixel_xs.unsqueeze(3), ys_pixel_ys.unsqueeze(3)], 3)
# (shape: (num_guided_anchors_in_pc_i, 4, 7, 2))
# print (ys_pixel_coords.size())
conv6_i = conv6[i].unsqueeze(0) # (shape: (1, 256, 200, 176))
# print (conv6_i.size())
conv6_i_ys = conv6_i.expand(ys_pixel_coords.size(0), -1, -1, -1)
# (shape: (num_guided_anchors_in_pc_i, 256, 200, 176))
# print (conv6_i_ys.size())
if conv6_i_ys.size(0) < 150:
ys_feature_maps = bilinear_interpolate_torch_gridsample(conv6_i_ys, ys_pixel_coords)
# (shape: (num_guided_anchors_in_pc_i, 256, 4, 7))
# print (ys_feature_maps.size())
else:
num_iters = int(math.floor(conv6_i_ys.size(0)/150.0))
ys_feature_maps_list = []
for iter in range(num_iters):
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*iter):(150*(iter+1))], ys_pixel_coords[(150*iter):(150*(iter+1))]))
ys_feature_maps_list.append(bilinear_interpolate_torch_gridsample(conv6_i_ys[(150*num_iters):], ys_pixel_coords[(150*num_iters):]))
ys_feature_maps = torch.cat(ys_feature_maps_list, 0)
# (shape: (num_guided_anchors_in_pc_i, 256, 4, 7))
ys_features = ys_feature_maps.view(ys_feature_maps.size(0), -1)
# (shape: (num_guided_anchors_in_pc_i, 7168)) (7168 = 256*4*7)
# print (ys_features.size())
ys_features_list.append(ys_features)
fs_list = []
for i in range(batch_size):
features = F.relu(self.ebm_fc1(ys_features_list[i])) # (shape: (num_guided_anchors_in_pc_i, 1024))
# print (features.size())
features = F.relu(self.ebm_fc2(features)) # (shape: (num_guided_anchors_in_pc_i, 1024))
# print (features.size())
fs = self.ebm_fc3(features) # (shape: (num_guided_anchors_in_pc_i, 1))
# print (fs.size())
fs = fs.squeeze(1) # (shape: (num_guided_anchors_in_pc_i))
# print (fs.size())
fs_list.append(fs)
# (fs_list is a list of batch_size tensors)
# # (fs_list[i] has shape: (num_guided_anchors_in_pc_i))
# print (len(fs_list))
# print (fs_list[0].size())
# print (fs_list[0])
if self.test_cfg.extra.EBM_guided:
det_bboxes, det_scores, det_fs = self.extra_head.get_rescore_bboxes_ebm_guided(
guided_anchors, bbox_score, fs_list, img_meta, self.test_cfg.extra)
else:
det_bboxes, det_scores = self.extra_head.get_rescore_bboxes(
guided_anchors, bbox_score, img_meta, self.test_cfg.extra)
# (det_scores is a list of batch_size | |
<reponame>saidelike/abyss
from abyss import abyss_filter_t
import ida_lines, ida_kernwin
import sys, re
# experimental code cloned from https://github.com/pfalcon/ctopy
"""
ctopy -- a quick and dirty C-to-Python translator.
Libraries not mapped that theoretically could be: curses.panel, dbm,
md5, popen2, pty, resource, sha, subprocess, syslog, time. Some of these
would need more elaborate machinery for method translation.
Python library bindings are as of 2.6a0.
"""
debug = 0
stringify = []
printflike = ["printf", "sprintf", "vfprintf",
"printw", "mvprintw", "wprintw", "mvwprintw"]
types = ["void", "int", "bool", "char", "short", "double", "long", "float",
"time_t", "FILE", "WINDOW", "uint8_t", "uint16_t", "uint32_t"]
shorthands = {
'id' : r"[a-zA-Z_][a-zA-Z0-9._]*",
'exp' : r"[a-zA-Z0-9._\-+\*/]+",
'type' : r"\b" + r"\b|\b".join(types) + r"\b",
'class' : r"\b__dummyclass__\b",
'ind' : r"(\n[ \t]*)", # Whitespace at start of line
'eol' : r"[ \t]*(?=\n|\Z)", # Whitespace to end of line or comment
'arg' : r"([^,]+),\s*", # initial or medial argument
'farg' : r"([^)]+)", # final argument
}
# C functions and constants to be mapped into Python standard library bindings.
funmappings = (
# File object methods (from C stdlib).
(r"\bfclose\(%(farg)s\)", r"\1.close()", None),
(r"\bfflush\(%(farg)s\)", r"\1.flush()", None),
(r"\bfileno\(%(farg)s\)", r"\1.fileno()", None),
(r"\bfprintf\(%(arg)s\)", r"\1.write()", None),
(r"\bfseek\(%(arg)s", r"\1.seek(", None),
(r"\bftell\(%(farg)s\)", r"\1.tell()", None),
(r"\bftruncate\(%(arg)s", r"\1.truncate(", None),
# Python atexit library
(r"\batexit\(", r"atexit.register(", "atexit"),
# Python crypt library
(r"\bcrypt\(", r"crypt.crypt(", "crypt"),
# Curses library. Below are all function calls listed in the
# ncurses(3) version 5.5 manual page in the order they are listed.
# The ones we don't translate have been left in as comments,
# pending future improvements. The largest category of things not
# translated is the wide-character support; it's possible there's
# an easy way to map this, but I don't have a need for it.
# Mappings marked "EXCEPTION" violate the convention that the C names
# of window methods begin with 'w'. Mappings marked "NONTRIVIAL"
# map a C entry point into a Python entry point with a different name,
# usually to throw away a length argument Python doesn't need or vecause
# the Python method handles a position as the first two arguments.
(r"\bCOLOR_PAIR\(", "curses.color_pair(", "curses"),
(r"\bPAIR_NUMBER\(", "curses.pair_number(", "curses"),
#_nc_tracebits curs_trace(3X)*
#_traceattr curs_trace(3X)*
#_traceattr2 curs_trace(3X)*
#_tracechar curs_trace(3X)*
#_tracechtype curs_trace(3X)*
#_tracechtype2 curs_trace(3X)*
#_tracedump curs_trace(3X)*
#_tracef curs_trace(3X)*
#_tracemouse curs_trace(3X)*
#add_wch curs_add_wch(3X)
#add_wchnstr curs_add_wchstr(3X)
#add_wchstr curs_add_wchstr(3X)
(r"\baddch\(", r"stdscr.addch(", "curses"),
#addchnstr curs_addchstr(3X)
#addchstr curs_addchstr(3X)
(r"\baddnstr\(", r"stdscr.addnstr(", "curses"),
#addnwstr curs_addwstr(3X)
(r"\baddstr\(", r"stdscr.addstr(", "curses"),
#addwstr curs_addwstr(3X)
#assume_default_colors default_colors(3X)*
#attr_get curs_attr(3X)
(r"\battr_off\(", r"stdscr.attrof(", r"curses"),
(r"\battr_on\(", r"stdscr.attron(", r"curses"),
(r"\battr_set\(", r"stdscr.attrset(", r"curses"),
(r"\battroff\(", r"stdscr.attrof(", r"curses"),
(r"\battron\(", r"stdscr.attron(", r"curses"),
(r"\battrset\(", r"stdscr.attrset(", r"curses"),
(r"\bbaudrate\(", r"curses.baudrate(", r"curses"),
(r"\bbeep\(", r"curses.beep(", r"curses"),
(r"\bbkgd\(", r"stdscr.bkgd(", r"curses"),
(r"\bbkgdset\(", r"stsdcr.bkgdset(", r"curses"),
#bkgrnd curs_bkgrnd(3X)
#bkgrndset curs_bkgrnd(3X)
(r"\bborder\(", r"stdscr.border(", r"curses"),
#border_set curs_border_set(3X)
(r"\bbox\(%(arg)s", r"\1.box(", r"curses"), # EXCEPTION
#box_set curs_border_set(3X)
(r"\bcan_change_color\(", r"curses.can_change_color(", r"curses"),
(r"\bcbreak\(", r"curses.cbreak(", r"curses"),
#chgat curs_attr(3X)
(r"\bclear\(", r"stdscr.clear(", r"curses"),
(r"\bclearok\(", r"stdscr.clearok(", r"curses"),
(r"\bclrtobot\(", r"stdscr.clrtobot(", r"curses"),
(r"\bclrtoeol\(", r"stdscr.clrtoeol(", r"curses"),
(r"\bcolor_content\(", r"curses.color_content(", r"curses"),
#color_set curs_attr(3X)
#copywin curs_overlay(3X)
(r"\bcurs_set\(", r"curses.curs_set(", r"curses"),
#curses_version curs_extend(3X)*
(r"\bdef_prog_mode\(", r"curses.def_prog_mode(", r"curses"),
(r"\bdef_shell_mode\(", r"curses.def_shell_mode(", r"curses"),
#define_key define_key(3X)*
#del_curterm curs_terminfo(3X)
(r"\bdelay_output\(", r"curses.delay_output(", r"curses"),
(r"\bdelch\(", r"stdscr.delch(", r"curses"),
(r"\bdeleteln\(\)", r"stdscr.deleteln()", r"curses"),
#delscreen curs_initscr(3X)
#delwin curs_window(3X)
(r"\bderwin\(%(arg)s", r"\1.derwin(", "curses"), # EXCEPTION
(r"\bdoupdate\(\)", r"curses.doupdate()", "curses"),
#dupwin curs_window(3X)
(r"\becho\(", r"curses.echo(", "curses"),
#echo_wchar curs_add_wch(3X)
(r"\bechochar\(", r"stdscr.echochar(", "curses"),
(r"\bendwin\(", r"curses.endwin(", "curses"),
(r"\berase\(", r"stdscr.erase(", "curses"),
#erasechar curs_termattrs(3X)
#erasewchar curs_termattrs(3X)
(r"\bfilter\(\)", r"curses.filter()", "curses"),
(r"\bflash\(\)", r"curses.flash()", "curses"),
(r"\bflushinp\(\)", r"curses.flushinp()", "curses"),
#get_wch curs_get_wch(3X)
#get_wstr curs_get_wstr(3X)
(r"\bgetbegyx\(%(arg)s%(arg)s%(farg)s\)", # EXCEPTION
r"(\2, \3) = \1.getbegyx()", "curses"),
(r"\bgetbkgd\(", r"\1.getbkgd(", "curses"), # EXCEPTION
#getbkgrnd curs_bkgrnd(3X)
#getcchar curs_getcchar(3X)
(r"\bgetch\(", r"stdscr.getch(", "curses"),
(r"\bgetmaxyx\(%(arg)s", r"\1.getmaxyx(", "curses"), # EXCEPTION
(r"\bgetmouse\(%(farg)s\)", r"\1 = curses.getmouse()", "curses"),
#getn_wstr curs_get_wstr(3X)
(r"\bgetnstr\(%(arg)s%(farg)s\)", # NONTRIVIAL
r"\1 = stdscr.getstr()", "curses"),
(r"\bgetparyx\(%(arg)s%(arg)s%(farg)s\)", # EXCEPTION
r"(\2, \3) = \1.getparyx()", "curses"),
(r"\bgetsyx\(%(arg)s%(farg)s\)", r"(\1, \2) = curses.getsyx()", "curses"),
(r"\bgetstr\(\)", r"stdscr.getstr()", "curses"),
(r"\bgetyx\(%(arg)s%(arg)s%(farg)s\)", #EXCEPTION
r"(\2, \3) = \1.getyx()", "curses"),
(r"\bgetwin\(", r"curses.getwin(", "curses"),
(r"\bhalfdelay\(", r"curses.halfdelay(", "curses"),
(r"\bhas_colors\(", r"curses.has_colors(", "curses"),
(r"\bhas_ic\(", r"curses.has_ic(", "curses"),
(r"\bhas_il\(", r"curses.has_il(", "curses"),
(r"\bhas_key\(", r"curses.has_key(", "curses"),
(r"\bhline\(", r"stdscr.hline(", "curses"),
#hline_set curs_border_set(3X)
(r"\bidcok\(%(arg)s", r"\1.idcok(", "curses"), # EXCEPTION
(r"\bidlok\(%(arg)s", r"\1.idlok(", "curses"), # EXCEPTION
(r"\bimmedok\(%(arg)s", r"\1.immedok(", "curses"), # EXCEPTION
# in_wch curs_in_wch(3X)
# in_wchnstr curs_in_wchstr(3X)
# in_wchstr curs_in_wchstr(3X)
(r"\binch\(", r"stdscr.inch(", "curses"),
#inchnstr curs_inchstr(3X)
#inchstr curs_inchstr(3X)
(r"\binit_color\(", r"curses.init_color(", "curses"),
(r"\binit_pair\(", r"curses.init_pair(", "curses"),
(r"\binitscr\(", r"curses.initscr(", "curses"),
(r"\binnstr\(%(arg)s", r"\1.instr(", "curses"), # NONTRIVIAL
#innwstr curs_inwstr(3X)
#ins_nwstr curs_ins_wstr(3X)
#ins_wch curs_ins_wch(3X)
#ins_wstr curs_ins_wstr(3X)
(r"\binsch\(", r"stdscr.insch(", "curses"),
(r"\binsdelln\(", r"stdscr.insdelln(", "curses"),
(r"\binsertln\(", r"stdscr.insertln(", "curses"),
(r"\binsnstr\(", r"stdscr.insnstr(", "curses"),
(r"\binsstr\(", r"stdscr.insstr(", "curses"),
(r"\binstr\(%(farg)s\)", r"\1 = stdscr.instr()", "curses"),
(r"\bintrflush\(\)", r"curses.intrflush()", "curses"),
#inwstr curs_inwstr(3X)
(r"\bis_linetouched\(%(arg)s", r"\1.is_linetouched(", "curses"),# EXCEPTION
(r"\bis_wintouched\(%(farg)s\)", r"\1.is_wintouched()", "curses"),# EXCEPTION
(r"\bisendwin\(\)", r"curses.isendwin()", "curses"),
#key_defined key_defined(3X)*
#key_name curs_util(3X)
#keybound keybound(3X)*
(r"\bkeyname\(", r"curses.keyname(", "curses"),
#keyok keyok(3X)*
(r"\bkeypad\(%(arg)s", r"\1.keypad(", "curses"), # EXCEPTION
(r"\bkillchar\(\)", r"curses.killchar()", "curses"),
#killwchar curs_termattrs(3X)
(r"\bleaveok\(%(arg)s", r"\1.leaveok(", "curses"), # EXCEPTION
(r"\blongname\(\)", r"curses.longname()", "curses"),
#mcprint curs_print(3X)*
(r"\bmeta\(", r"curses.meta(", "curses"),
#mouse_trafo curs_mouse(3X)*
(r"\bmouseinterval\(", r"curses.mouseinterval(", "curses"),
(r"\bmousemask\(", r"curses.mousemask(", "curses"),
(r"\bmove\(", r"stdscr.move(", "curses"),
#mvadd_wch curs_add_wch(3X)
#mvadd_wchnstr curs_add_wchstr(3X)
#mvadd_wchstr curs_add_wchstr(3X)
(r"\bmvaddch\(", r"stdscr.addch(", "curses"), # NONTRIVIAL
#mvaddchnstr curs_addchstr(3X)
#mvaddchstr curs_addchstr(3X)
(r"\bmvaddnstr\(", r"stdscr.addnstr(", "curses"), # NONTRIVIAL
#mvaddnwstr curs_addwstr(3X)
(r"\bmvaddstr\(", r"stdscr.addstr(", "curses"), # NONTRIVIAL
#mvaddwstr curs_addwstr(3X)
#mvchgat curs_attr(3X)
#mvcur curs_terminfo(3X)
(r"\bmvdelch\(", r"stdscr.delch(", "curses"), # NONTRIVIAL
(r"\bmvderwin\(%(arg)s", r"\1.derwin(", "curses"), # NONTRIVIAL,EXCEPTION
#mvget_wch curs_get_wch(3X)
#mvget_wstr curs_get_wstr(3X)
(r"\bmvgetch\(%(arg)s", r"stdscr.getch(", "curses"), # NONTRIVIAL
#mvgetn_wstr curs_get_wstr(3X)
(r"\bmvgetnstr\(%(arg)s%(arg)s%(arg)s%(farg)s\)", # NONTRIVIAL
r"\3 = stdscr.getstr(\1, \2)", "curses"),
(r"\bmvgetstr\(%(arg)s%(arg)s%(farg)s\)", # NONTRIVIAL
r"\3 = stdscr.getstr(\1, \2)", "curses"),
(r"\bmvhline\(", r"stdscr.hline(", "curses"), # NONTRIVIAL
#mvhline_set curs_border_set(3X)
#mvin_wch curs_in_wch(3X)
#mvin_wchnstr curs_in_wchstr(3X)
#mvin_wchstr curs_in_wchstr(3X)
(r"\bmvinch\(", r"stdscr.inch(", "curses"), # NONTRIVIAL
(r"\bmvinchnstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
(r"\bmvinchstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
(r"\bmvinnstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
#mvinnwstr curs_inwstr(3X)
#mvins_nwstr curs_ins_wstr(3X)
#mvins_wch curs_ins_wch(3X)
#mvins_wstr curs_ins_wstr(3X)
(r"\bmvinsch\(", r"stdscr.insch(", "curses"), # NONTRIVIAL
(r"\bmvinsnstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
(r"\bmvinsstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
(r"\bmvinstr\(", r"stdscr.instr(", "curses"), # NONTRIVIAL
#mvwinwstr curs_inwstr(3X)
(r"\bmvprintw\(", r"stdscr.addstr(", "curses"), # NONTRIVIAL
# mvscanw curs_scanw(3X)
(r"\bmvvline\(", r"stdscr.vline(", "curses"), # NONTRIVIAL
#mvvline_set curs_border_set(3X)
#mvwadd_wch curs_add_wch(3X)
#mvwadd_wchnstr curs_add_wchstr(3X)
#mvwadd_wchstr curs_add_wchstr(3X)
(r"\bmvwaddch\(%(arg)s", r"\1.addch(", "curses"), # NONTRIVIAL
#mvwaddchnstr curs_addchstr(3X)
#mvwaddchstr curs_addchstr(3X)
(r"\bmvwaddnstr\(%(arg)s", r"\1.addnstr(", "curses"), # NONTRIVIAL
#mvwaddnwstr curs_addwstr(3X)
(r"\bmvwaddstr\(%(arg)s", r"\1.addstr(", "curses"), # NONTRIVIAL
#mvwaddwstr curs_addwstr(3X)
#mvwchgat curs_attr(3X)
(r"\bmvwdelch\(%(arg)s", r"\1.delch(", "curses"), # NONTRIVIAL
#mvwget_wch curs_get_wch(3X)
#mvwget_wstr curs_get_wstr(3X)
(r"\bmvwgetch\(%(arg)s", r"\1.getch(", "curses"), # NONTRIVIAL
#mvwgetn_wstr curs_get_wstr(3X)
(r"\bmvwgetnstr\(%(arg)s", r"\1.getstr(", "curses"), # NONTRIVIAL
(r"\bmvwgetstr\(%(arg)s", r"\1.getstr(", "curses"), # NONTRIVIAL
(r"\bmvwhline\(%(arg)s", r"\1.hline(", "curses"), # NONTRIVIAL
#mvwhline_set curs_border_set(3X)
(r"\bmvwin\(%(arg)s", r"\1.mvwin(", "curses"), # EXCEPTION
#mvwin_wch curs_in_wch(3X)
#mvwin_wchnstr curs_in_wchstr(3X)
#mvwin_wchstr curs_in_wchstr(3X)
(r"\bmvwinch\(%(arg)s", r"\1.inch(", "curses"), # NONTRIVIAL
#mvwinchnstr curs_inchstr(3X)
#mvwinchstr curs_inchstr(3X)
(r"\bmvwinnstr\(%(arg)s%(arg)s%(arg)s", # NONTRIVIAL
r"\3 = \1.instr(\1, \2", "curses"),
#mvwinnwstr curs_inwstr(3X)
#mvwins_nwstr curs_ins_wstr(3X)
#mvwins_wch curs_ins_wch(3X)
#mvwins_wstr curs_ins_wstr(3X)
(r"\bmvwinsch\(%(arg)s", r"\1.insch(", "curses"), # NONTRIVIAL
(r"\bmvwinsnstr\(%(arg)s", r"\1.insnstr(", "curses"), # NONTRIVIAL
(r"\bmvwinsstr\(%(arg)s", r"\1.insstr(", "curses"), # NONTRIVIAL
(r"\bmvwinstr\(%(arg)s", r"\1.instr(", "curses"), # NONTRIVIAL
#mvwinwstr curs_inwstr(3X)
(r"\bmvwprintw\(%(arg)s", r"\1.addstr(", "curses"), # NONTRIVIAL
#mvwscanw curs_scanw(3X)
(r"\bmvwvline\(%(arg)s", r"\1.vline(", "curses"), # NONTRIVIAL
#mvwvline_set curs_border_set(3X)
(r"\bnapms\(", r"curses.napms(", "curses"),
(r"\bnewpad\(", r"curses.newpad(", "curses"),
#newterm curs_initscr(3X)
(r"\bnewwin\(", r"curses.newwin(", "curses"),
(r"\bnl\(", r"curses.nl(", "curses"),
(r"\bnocbreak\(", r"curses.nocbreak(", "curses"),
(r"\bnodelay\(%(arg)s", r"\1.nodelay(", "curses"), # EXCEPTION
(r"\bnoecho\(", r"curses.noecho(", "curses"),
(r"\bnonl\(", r"curses.nonl(", "curses"),
(r"\bnoqiflush\(", r"curses.noqiflush(", "curses"),
(r"\bnoraw\(", r"curses.noraw(", "curses"),
(r"\bnotimeout\(%(arg)s", r"\1.notimeout(", "curses"),
(r"\boverlay\(%(arg)s", r"\1.overlay(", "curses"),
(r"\boverwrite\(%(arg)s", r"\1.overwrite(", "curses"),
(r"\bpair_content\(", r"curses.pair_content(", "curses"),
#pechochar curs_pad(3X)
#pnoutrefresh curs_pad(3X)
#prefresh curs_pad(3X)
(r"\bprintw\(%(arg)s", r"\1.addstr(", "curses"), # NONTRIVIAL
#putp curs_terminfo(3X)
#putwin curs_util(3X)
(r"\bqiflush\(", r"curses.qiflush(", "curses"),
(r"\braw\(", r"curses.raw(", "curses"),
(r"\bredrawwin\(%(farg)s\)", r"\1.redrawwin()", "curses"), # EXCEPTION
(r"\brefresh\(\)", r"stdscr.refresh()", "curses"),
(r"\breset_prog_mode\(", r"curses.reset_prog_mode(", "curses"),
(r"\breset_shell_mode\(", r"curses.reset_shell_mode(", "curses"),
#resetty curs_kernel(3X)
#resizeterm resizeterm(3X)*
#restartterm curs_terminfo(3X)
#ripoffline curs_kernel(3X)
#savetty curs_kernel(3X)
#scanw curs_scanw(3X)
#scr_dump curs_scr_dump(3X)
#scr_init curs_scr_dump(3X)
#scr_restore curs_scr_dump(3X)
#scr_set curs_scr_dump(3X)
(r"\bscrl\(", r"stdscr.scroll(", "curses"), # NONTRIVIAL
(r"\bscroll\(%(farg)s\)", r"\1.scroll(1)", "curses"), # NONTRIVIAL
(r"\bscrollok\(%(arg)s", r"\1.scrollok(", "curses"), # EXCEPTION
#set_curterm curs_terminfo(3X)
#set_term curs_initscr(3X)
#setcchar curs_getcchar(3X)
(r"\bsetscrreg\(", r"stdscr.setscrreg(", "curses"),
(r"\bsetsyx\(", r"curses.setsyx(", "curses"),
#setterm curs_terminfo(3X)
(r"\bsetupterm\(", r"curses.setupterm(", "curses"),
#slk_attr curs_slk(3X)*
#slk_attr_off curs_slk(3X)
#slk_attr_on curs_slk(3X)
#slk_attr_set curs_slk(3X)
#slk_attroff curs_slk(3X)
#slk_attron curs_slk(3X)
#slk_attrset curs_slk(3X)
#slk_clear curs_slk(3X)
#slk_color curs_slk(3X)
#slk_init curs_slk(3X)
#slk_label curs_slk(3X)
#slk_noutrefresh curs_slk(3X)
#slk_refresh curs_slk(3X)
#slk_restore curs_slk(3X)
#slk_set curs_slk(3X)
#slk_touch curs_slk(3X)
(r"\bstandend\(", r"stdscr.standend(", "curses"),
(r"\bstandout\(", r"stdscr.standout(", "curses"),
(r"\bstart_color\(", r"curses.start_color(", "curses"),
(r"\bsubpad\(%(arg)s", r"\1.subpad(", "curses"), # EXCEPTION
(r"\bsubwin\(%(arg)s", r"\1.subwin(", "curses"), # EXCEPTION
(r"\bsyncok\(%(arg)s", r"\1.syncok(", "curses"), # EXCEPTION
#term_attrs curs_termattrs(3X)
(r"\btermattrs\(", r"curses.termattrs(", "curses"),
(r"\btermname\(", r"curses.termname(", "curses"),
#tgetent curs_termcap(3X)
#tgetflag curs_termcap(3X)
#tgetnum curs_termcap(3X)
#tgetstr curs_termcap(3X)
#tgoto curs_termcap(3X)
(r"\btigetflag\(", r"curses.tigetflag(", "curses"),
(r"\btigetnum\(", r"curses.tigetnum(", "curses"),
(r"\btigetstr\(", r"curses.tigetstr(", "curses"),
(r"\btimeout\(", r"stdscr.timeout(", "curses"),
(r"\btouchline\(%(arg)s", r"\1.touchline(", "curses"), # EXCEPTION
(r"\btouchwin\(%(farg)s\)", r"\1.touchwin()", "curses"), # EXCEPTION
(r"\btparm\(", r"curses.tparm(", "curses"),
#tputs curs_termcap(3X)
#tputs curs_terminfo(3X)
#trace curs_trace(3X)*
(r"\btypeahead\(", r"curses.typeahead(", "curses"),
(r"\bunctrl\(", r"curses.unctrl(", "curses"),
#unget_wch curs_get_wch(3X)
(r"\bungetch\(", r"curses.ungetch(", "curses"),
(r"\bungetmouse\(%(arg)s", r"\1.ungetmouse(", "curses"), # False friend
(r"\buntouchwin\(%(farg)s\)", r"\1.untouchwin()", "curses"),
(r"\buse_default_colors\(", r"curses.use_default_colors(", "curses"),
(r"use_env\(", r"use_env(", "curses"),
| |
import os
import pathlib
import re
from copy import deepcopy
import warnings
import numpy as np
import rasterio
import rasterio.features
import rasterio.warp
import xarray as xr
from xgeo.crs import XCRS
from xgeo.utils import (X_DIMS, Y_DIMS, Z_DIMS, T_DIMS, DEFAULT_DIMS)
@xr.register_dataset_accessor('geo')
class XGeoDatasetAccessor(object):
"""
XGeoDatasetAccessor adds the geospatial functionalities to the xarray Dataset. The accessor makes use of the
versatility of xarray together with the geospatial operations provided by rasterio together with many custom
operations that are used in general day to day task in the geospatial world.
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
self._obj.attrs.update(
transform=self._obj.attrs.get('transform', None),
crs=self._obj.attrs.get('crs', None),
bounds=self._obj.attrs.get('bounds', None),
origin=self._obj.attrs.get('origin', None),
resolutions=self._obj.attrs.get('resolutions', None)
)
if self._obj.data_vars and any(
self._is_raster_data_array(data_val) for data_val in self._obj.data_vars.values()):
# Initialize attributes:
self.initialize_geo_attributes()
# Validate and restructure the dataset
self.validate_and_restructure()
def initialize_geo_attributes(self):
self._compute_resolutions()
self._compute_transform()
self._compute_origin()
self._compute_bounds()
self._find_projection()
def _is_raster_data_array(self, value: xr.DataArray):
"""
Checks whether the given DataArray is a raster. The raster objects fulfills following criteria:
- It should be more or equal to two dimensional data array
- It should have x and y dimensions
Parameters
----------
value: xarray.DataArray
The DataArray to be checked
Returns
-------
is_raster_array: bool
True if Data Array is raster else False
"""
if isinstance(value, xr.DataArray) and self.x_dim in value.dims and self.y_dim in value.dims:
return True
return False
def validate_and_restructure(self):
"""
Validates and restructures the dataset to make full utilization of GeoDataset.
- Validates if x and y dimensions exists
- Validates if band and time dimension exists. If they don't exist, it adds those dimensions to the raster
DataArrays
Returns
-------
dsout: xarray.Dataset
A copy of original dataset restructured to have all raster DataArray in 4 dimensional format. It allows
the library to be consistent over its operations.
"""
for dim in ['x_dim', 'y_dim']:
assert getattr(self, dim) is not None
assert any([self._is_raster_data_array(data_var) for data_var in self._obj.data_vars.values()]), \
"There are no raster DataArray in the Dataset."
for dim in {'band_dim', 'time_dim'}:
try:
getattr(self, dim)
except AttributeError:
warnings.warn(
"There is no {0} dimension in the DataArray. It will be added to the dataarray.".format(dim)
)
for data_var, data_values in self._obj.data_vars.items():
# Expand the dimension if the DataArray is a raster.
if self._is_raster_data_array(data_values):
self._obj[data_var] = data_values.expand_dims(DEFAULT_DIMS.get(dim))
self._obj = self._obj.assign_coords(**{DEFAULT_DIMS.get(dim): [0]})
def _compute_resolutions(self):
"""
Calculates the resolutions according to the current coordinates of the Dataset and adds them into the Dataset
attribute named resolutions. The resolutions is a tuple as (x resolution, y resolution)
"""
assert self.x_coords is not None and self.y_coords is not None
x_resolutions = self.x_coords.diff(self.x_dim)
y_resolutions = self.y_coords.diff(self.y_dim)
assert not (
not x_resolutions.any() or not x_resolutions.all() or not y_resolutions.any() or not y_resolutions.all()), \
"The resolutions are inconsistent. The library isn't designed to handle inconsistent resolutions"
self._obj.attrs.update({
"resolutions": (x_resolutions.values.min(), y_resolutions.values.min())
})
@property
def resolutions(self):
"""
Gets the resolutions of the DataArrays in Dataset. If the resolutions don't exist, it calculates the
resolutions from the current coordinates.
Returns
-------
resolutions: (float, float)
x and y resolutions of the DataArrays.
"""
if self._obj.attrs.get('resolutions') is not None:
self._compute_resolutions()
return self._obj.attrs.get('resolutions')
def _compute_transform(self):
"""
Calculates the affine transform parameters from the current coordinates of the Dataset and adds them to the
attribute of Dataset named transform.
"""
x_res, y_res = self.resolutions
x_origin = self.x_coords.values[0] - x_res / 2.0 # PixelAsArea Convention
y_origin = self.y_coords.values[0] - y_res / 2.0 # PixelAsArea Convention
transform = (x_res, 0, x_origin, 0, y_res, y_origin)
self._obj.attrs.update(transform=transform)
for data_value in self._obj.data_vars.values():
if not self._is_raster_data_array(data_value):
continue
data_value.attrs.update(transform=transform)
@property
def transform(self):
"""
Gets the geo-transform of the Dataset. If the transform isn't present, it calculate the transform from the
current coordinates of Dataset.
Returns
-------
transform: tuple
Geo-transform (x resolution, 0, x origin, 0, y resolution, y origin)
"""
if not self._obj.attrs.get("transform", None):
self._compute_transform()
return self._obj.attrs.get('transform')
def _compute_coords_from_transform(self):
"""
Computes x and y coordinates from the geo-transform and assigns this coordinates to the Dataset.
"""
x_res, _, x_origin, _, y_res, y_origin = self.transform
self._obj.coords.update({
self.x_dim: x_origin + x_res / 2.0 + np.arange(0, self.x_size) * x_res,
self.y_dim: y_origin + y_res / 2.0 + np.arange(0, self.y_size) * y_res
})
@transform.setter
def transform(self, trans: tuple or list):
"""
Sets the geo-transform to the Dataset and updates the x and y coordinates according to the provided
geo-transform.
Parameters
----------
trans: list or tuple
Geo-Transform (x resolution, 0, x origin, 0, y resolution, y origin)
"""
assert type(trans) in [tuple, list, np.ndarray] and len(trans) == 6, \
"`trans` should be either tuple or list with 6 numbers"
self._obj.attrs.update(transform=tuple(trans))
# Add transforms to all the raster DataArrays as well
for data_values in self._obj.data_vars.values():
if self._is_raster_data_array(value=data_values):
data_values.attrs.update(transform=self._obj.attrs["transform"])
# Update the coordinates according to the new transform
self._compute_coords_from_transform()
def _compute_origin(self):
"""
Calculates the origin of Dataset in human readable format and adds it to the attribute of Dataset named
origin.
The origins could be any of following four:
- top_left
- bottom_left
- top_right
- bottom_right
"""
x_origin = {True: 'left', False: 'right'}
y_origin = {True: 'bottom', False: 'top'}
x_res, y_res = self.resolutions
self._obj.attrs.update(origin="{0}_{1}".format(y_origin.get(y_res >= 0), x_origin.get(x_res >= 0)))
@property
def origin(self):
"""
Gets the origin of the Dataset in human readable format.
Returns
-------
origin: str
Origin of the Dataset.
"""
if not self._obj.attrs.get('origin'):
self._compute_origin()
return self._obj.attrs.get('origin')
def _update_on_origin(self, origin):
"""
Updates the Dataset (coordinate systems, transforms, DataArrays etc.) according to the provided origin.
Parameters
----------
origin: str
Origin to assign to the Dataset
"""
yo, xo = self.origin.split('_')
nyo, nxo = origin.split('_')
y_coords = self.y_coords
x_coords = self.x_coords
if yo != nyo:
y_coords = self.y_coords[::-1]
if xo != nxo:
x_coords = self.x_coords[::-1]
for data_var, data_value in self._obj.data_vars.items():
if not self._is_raster_data_array(data_value):
continue
if yo != nyo:
data_value[:] = data_value.loc[{self.y_dim: y_coords}].values
if xo != nxo:
data_value[:] = data_value.loc[{self.x_dim: x_coords}].values
self._obj[data_var] = data_value
self._obj.coords.update({self.x_dim: x_coords, self.y_dim: y_coords})
self.initialize_geo_attributes()
@origin.setter
def origin(self, value):
"""
Sets the origin of the Dataset and updates the Dataset with respect to the new origin.
Parameters
----------
value: str
Origin to be assigned to Dataset. It can be one of top_left, bottom_left, top_right, bottom_right
"""
allowed_origins = ['top_left', 'bottom_left', 'top_right', 'bottom_right']
if not isinstance(value, str) and value not in allowed_origins:
raise IOError("Either provided value is not string or doesn't belong to one of {}".format(allowed_origins))
self._update_on_origin(value)
self._obj.attrs.update(origin=value)
def _compute_bounds(self):
# TODO: Validate this
x_res, _, x_origin, _, y_res, y_origin = self.transform
x_end = x_origin + self.x_size * x_res
y_end = y_origin + self.y_size * y_res
x_options = np.array([x_origin, x_end])
y_options = np.array([y_origin, y_end])
self._obj.attrs.update(bounds=(x_options.min(), y_options.min(), x_options.max(), y_options.max()))
@property
def bounds(self):
"""
Gets the bounds of the data.
Returns
-------
bounds: tuple
Bounds of the data (left, bottom, right, top)
"""
if not self._obj.attrs.get('bounds', None):
self._compute_bounds()
return self._obj.attrs.get('bounds')
def _find_projection(self):
"""
Finds the projection system of the Dataset. The method searches whether there exist any value to attribute
`crs` in Dataset or any DataArray or if grid mapping as in netCDF exists for any DataArray. The method then
converts the CRS to the proj4 string and adds to attribute of Dataset and DataArray named crs.
"""
# Search for the projection.
# 1. Search on Dataset level
crs = self._obj.attrs.get("crs", None)
if not crs:
for data_array in self._obj.data_vars.values():
if not self._is_raster_data_array(data_array):
continue
# If the DataArray inside the DataSet has the crs, use it.
crs = data_array.attrs.get("crs")
if crs:
break
# If the DataArray has grid mapping as in netCDF format. It uses this to determine the crs
grid_mapping = data_array.attrs.pop('grid_mapping', None)
if grid_mapping:
crs = self._obj.variables.get(grid_mapping).attrs
self._obj.drop(grid_mapping)
break
# If crs is found assign it to Dataset and all DataArrays to maintain consistency
assert crs is not None, "The projection information isn't present in the Dataset."
self.projection = crs
@property
def projection(self):
"""
Gets the projection/CRS system of the Dataset
Returns
-------
projection: str
Projection/CRS in proj4 string
"""
try:
if self._obj.attrs.get("crs", None) is None:
self._find_projection()
except Exception as aep:
print(aep)
return XCRS.from_any(self._obj.attrs.get("crs")).to_proj4()
@projection.setter
def projection(self, proj: str or int or dict):
"""
Sets the projection system of the Dataset to the provided projection system. This doesn't reproject the
Dataset to the assigned projection | |
<filename>tspwplib/problem.py
"""Functions and classes for datasets"""
import random
from typing import List, Optional, Union
import networkx as nx
import pandas as pd
import pydantic
import tsplib95
from .types import (
DisplayDataType,
EdgeDataFormat,
EdgeFunction,
EdgeList,
EdgeWeightFormat,
EdgeWeightType,
NodeCoords,
NodeCoordType,
Vertex,
VertexFunction,
VertexFunctionName,
VertexList,
VertexLookup,
)
from .utils import edge_attribute_names, node_attribute_names
from .walk import is_simple_cycle, walk_from_edge_list, total_prize
# pylint: disable=too-few-public-methods
class BaseTSP(pydantic.BaseModel):
"""A pydantic model for tsplib95.
Each field is validated with type hinting.
"""
# pylint: disable=too-many-arguments
capacity: Optional[Union[int, float]]
comment: str
demands: Optional[VertexFunction]
depots: VertexList
dimension: int
display_data: Optional[NodeCoords]
display_data_type: DisplayDataType
edge_data: EdgeList
edge_data_format: EdgeDataFormat
edge_weights: Optional[EdgeFunction]
edge_weight_format: EdgeWeightFormat
edge_weight_type: EdgeWeightType
fixed_edges: EdgeList
name: str
node_coords: Optional[NodeCoords]
node_coord_type: NodeCoordType
problem_type: str
tours: Optional[List[VertexList]]
class Config:
"""Pydantic configuration"""
arbitrary_types_allowed = True
@classmethod
def from_networkx(
cls,
name: str,
comment: str,
problem_type: str,
G: nx.Graph,
capacity: Optional[Union[int, float]] = None,
display_data: Optional[NodeCoords] = None,
display_data_type: DisplayDataType = DisplayDataType.NO_DISPLAY,
edge_weight_format: EdgeWeightFormat = EdgeWeightFormat.FULL_MATRIX,
weight_attr_name: str = "weight",
):
"""Get a base TSP model from a networkx graph"""
edge_attr_names = edge_attribute_names(G)
node_attr_names = node_attribute_names(G)
if weight_attr_name not in edge_attr_names:
message = f"{weight_attr_name} is required to be an edge attribute, "
message += "but was not found in graph. "
message += "This function only supports an explicit weight function. "
raise NotImplementedError(message)
is_2d = "x" in node_attr_names and "y" in node_attr_names
is_3d = is_2d and "z" in node_attr_names
if is_3d:
raise NotImplementedError("3D coords are not supported")
# node_coord_type = NodeCoordType.THREED_COORDS
# node_coords = {
# node: (float(data["x"]), float(data["y"]), float(data["z"]))
# for node, data in G.nodes(data=True)
# }
if is_2d:
node_coord_type = NodeCoordType.TWOD_COORDS
node_coords = {
node: (float(data["x"]), float(data["y"]))
for node, data in G.nodes(data=True)
}
else:
node_coord_type = NodeCoordType.NO_COORDS
node_coords = {}
demands = None
if "demand" in node_attr_names:
demands = nx.get_node_attributes(G, "demand")
if display_data_type == DisplayDataType.COORD_DISPLAY:
display_data = node_coords
fixed_edges = []
if "is_fixed" in edge_attr_names:
fixed_edges = [
edge for edge, data in G.edges(data=True) if data["is_fixed"]
]
depots = []
if "is_depot" in node_attr_names:
depots = [node for node, data in G.nodes(data=True) if data["is_depot"]]
edge_data = list(G.edges())
edge_weights = nx.get_edge_attributes(G, weight_attr_name)
return cls(
capacity=capacity,
comment=comment,
demands=demands,
depots=depots,
dimension=G.number_of_nodes(),
display_data=display_data,
display_data_type=display_data_type,
edge_data=edge_data,
edge_data_format=EdgeDataFormat.EDGE_LIST,
edge_weights=edge_weights,
edge_weight_format=edge_weight_format,
edge_weight_type=EdgeWeightType.EXPLICIT,
fixed_edges=fixed_edges,
name=name,
node_coords=node_coords,
node_coord_type=node_coord_type,
problem_type=problem_type,
tours=None,
)
@classmethod
def from_dataframes(
cls,
name: str,
comment: str,
problem_type: str,
edges_df: pd.DataFrame,
nodes_df: pd.DataFrame,
capacity: Optional[Union[int, float]] = None,
display_data: Optional[NodeCoords] = None,
display_data_type: DisplayDataType = DisplayDataType.NO_DISPLAY,
edge_weight_format: EdgeWeightFormat = EdgeWeightFormat.FULL_MATRIX,
):
"""Get a TSP base model from edge and node dataframes
Notes:
Essential edge columns: [source, target, weight].
Optional edge columns: [is_fixed].
Essential node columns: [node, is_depot].
Optional node columns: [x, y, z, demand].
The edge weight function is explicitly given by the 'weight' column.
"""
if "weight" not in edges_df:
message = "'weight' is not a column in edges_df. "
message += "This function only supports an explicit weight function. "
message += "If you have a column that can be used as the weight function, "
message += "please rename the column to 'weight'."
raise NotImplementedError(message)
is_2d = "x" in nodes_df.columns and "y" in nodes_df.columns
is_3d = is_2d and "z" in nodes_df.columns
if is_3d:
raise NotImplementedError("3D coords not supported")
if is_2d:
node_coord_type = NodeCoordType.TWOD_COORDS
node_coords = dict(zip(nodes_df["node"], zip(nodes_df["x"], nodes_df["y"])))
else:
node_coord_type = NodeCoordType.NO_COORDS
node_coords = {}
demands = None
if "demand" in nodes_df.columns:
demands = dict(zip(nodes_df["node"], nodes_df["demand"]))
if display_data_type == DisplayDataType.COORD_DISPLAY:
display_data = node_coords
fixed_edges = []
if "is_fixed" in edges_df.columns:
fixed_edges_df = edges_df.loc[edges_df["is_fixed"]]
fixed_edges = list(zip(fixed_edges_df["source"], fixed_edges_df["target"]))
depots = nodes_df.loc[nodes_df["is_depot"]]["node"].to_list()
edge_data = list(zip(edges_df["source"], edges_df["target"]))
edge_weights = dict(zip(edge_data, edges_df["weight"]))
return cls(
capacity=capacity,
comment=comment,
demands=demands,
depots=depots,
dimension=len(nodes_df["node"]),
display_data=display_data,
display_data_type=display_data_type,
edge_data=edge_data,
edge_data_format=EdgeDataFormat.EDGE_LIST,
edge_weights=edge_weights,
edge_weight_format=edge_weight_format,
edge_weight_type=EdgeWeightType.EXPLICIT,
fixed_edges=fixed_edges,
name=name,
node_coords=node_coords,
node_coord_type=node_coord_type,
problem_type=problem_type,
tours=None,
)
@classmethod
def from_tsplib95(cls, problem: tsplib95.models.StandardProblem):
"""Get a TSP base model from a StandardProblem object"""
display_data_type = (
problem.display_data_type
if problem.display_data_type
else DisplayDataType.NO_DISPLAY
)
edge_data_format = (
problem.edge_data_format
if problem.edge_data_format
else EdgeDataFormat.EDGE_LIST
)
edge_weight_type = problem.edge_weight_type
# edge weight format
edge_weight_format = problem.edge_weight_format
if (
not edge_weight_format
and edge_weight_type in EdgeWeightType.__members__
and edge_weight_type != EdgeWeightType.EXPLICIT
):
edge_weight_format = EdgeWeightFormat.FUNCTION
elif not edge_weight_format and edge_weight_type == EdgeWeightType.EXPLICIT:
raise ValueError(
"Edge weight type is set to EXPLICIT but no edge weight format is given"
)
elif not edge_weight_format:
raise ValueError(
"Edge weight format in StandardProblem is not set - cannot assign edge weights."
)
node_coord_type = (
problem.node_coord_type
if problem.node_coord_type
else NodeCoordType.NO_COORDS
)
node_coords = None
if node_coord_type == NodeCoordType.TWOD_COORDS:
node_coords = {i: problem.node_coords.get(i) for i in problem.get_nodes()}
elif node_coord_type == NodeCoordType.THREED_COORDS:
raise NotImplementedError("3D coords not yet supported")
return cls(
capacity=problem.capacity,
comment=problem.comment if problem.comment else "",
demands=problem.demands,
depots=problem.depots,
dimension=problem.dimension,
display_data=problem.display_data,
display_data_type=display_data_type,
edge_data=list(problem.get_edges()),
edge_data_format=edge_data_format,
edge_weights={
(i, j): problem.get_weight(i, j) for i, j in problem.get_edges()
},
edge_weight_format=edge_weight_format,
edge_weight_type=edge_weight_type,
fixed_edges=problem.fixed_edges,
name=problem.name,
node_coords=node_coords,
node_coord_type=node_coord_type,
problem_type=problem.type,
tours=problem.tours,
)
def to_tsplib95(self) -> tsplib95.models.StandardProblem:
"""Convert to a tsplib95 standard model"""
weights = self.edge_weights
if self.edge_weight_type == EdgeWeightType.EXPLICIT:
# create a graph
G = nx.Graph(incoming_graph_data=self.edge_data)
nx.set_edge_attributes(G, self.edge_weights, name="weight")
# then get the weighted adjacency matrix
weights = nx.to_numpy_array(
G, nodelist=list(G.nodes()).sort(), weight="weight", dtype=int
)
return tsplib95.models.StandardProblem(
# capacity=self.capacity,
comment=self.comment,
demands=self.demands,
depots=self.depots,
dimension=self.dimension,
# display_data=self.display_data,
display_data_type=self.display_data_type,
edge_data=self.edge_data,
edge_data_format=self.edge_data_format,
edge_weights=weights,
edge_weight_format=self.edge_weight_format,
edge_weight_type=self.edge_weight_type,
# fixed_edges=self.fixed_edges,
name=self.name,
node_coords=self.node_coords,
node_coord_type=self.node_coord_type,
type=self.problem_type,
# tours=self.tours,
)
def __set_graph_attributes(self, graph: nx.Graph) -> None:
"""Set graph attributes such as 'name' and 'comment'"""
graph.graph["name"] = self.name
graph.graph["comment"] = self.comment
graph.graph["problem_type"] = self.problem_type
graph.graph["dimension"] = self.dimension
if not self.capacity is None:
graph.graph["capacity"] = self.capacity
def __set_node_attributes(self, graph: nx.Graph) -> None:
"""Set node attributes"""
for vertex in graph.nodes():
graph.nodes[vertex]["is_depot"] = vertex in self.depots
if self.demands:
graph.nodes[vertex]["demand"] = self.demands[vertex]
if self.display_data:
graph.nodes[vertex]["display"] = self.display_data[vertex]
if self.node_coords:
coords = self.node_coords[vertex]
graph.nodes[vertex]["x"] = coords[0]
graph.nodes[vertex]["y"] = coords[1]
def __add_edges(self, graph: nx.Graph) -> None:
"""Add edges from edge data
Args:
graph: Input graph
"""
for edge in self.edge_data:
graph.add_edge(edge[0], edge[1])
def __set_edge_attributes(self, graph: nx.Graph) -> None:
"""Set edge attributes for 'weight' and 'is_fixed'
Args:
graph: Input graph
"""
nx.set_edge_attributes(graph, self.edge_weights, name="weight")
fixed = {(u, v): (u, v) in self.fixed_edges for u, v in graph.edges()}
nx.set_edge_attributes(graph, fixed, name="is_fixed")
def get_graph(self) -> nx.Graph:
"""Get a networkx graph
Returns:
Undirected networkx graph with node attributes such as 'is_depot'
and edge attributes such as 'weight' and 'is_fixed'.
"""
G = nx.Graph()
self.__set_graph_attributes(G)
self.__add_edges(G)
self.__set_edge_attributes(G)
self.__set_node_attributes(G)
return G
class PrizeCollectingTSP(BaseTSP):
"""Prize-collecting TSP pydantic model"""
def get_root_vertex(self) -> Vertex:
"""Get the root vertex from the 'depots' attribute
Returns:
Root vertex
Raises:
ValueError: If the number of depots to choose from is zero or greater than 1
"""
if len(self.depots) > 1:
raise ValueError(
"More than 1 depot to choose from: which depot should I choose?"
)
try:
# pylint: disable=unsubscriptable-object
return self.depots[0]
except KeyError as key_error:
raise ValueError("The list of depots is empty") from key_error
def get_total_prize(self) -> Union[int, float]:
""" "Get the total prize (demand) of all vertices"""
if self.demands:
return sum(self.demands.values())
return 0
class ProfitsProblem(tsplib95.models.StandardProblem):
"""TSP with Profits Problem
You can set `edge_removal_probability` to remove edges with this probability.
"""
# Maximum distance of the total route in a OP.
cost_limit = tsplib95.fields.IntegerField("COST_LIMIT")
# The scores of the nodes of a OP are given in the form (per line)
node_score = tsplib95.fields.DemandsField("NODE_SCORE_SECTION")
# The optimal solution to the TSP
tspsol = tsplib95.fields.IntegerField("TSPSOL")
def __init__(
self, edge_removal_probability: float = 0.0, seed: int = 0, special=None, **data
):
super().__init__(special=special, **data)
self._edge_removal_probability = edge_removal_probability
self._seed = seed
@property
def edge_removal_probability(self) -> float:
"""Probability of removing an edge from the graph.
Returns:
Edge removal probability.
Notes:
It is strongly recommended to only set this value in the constructor.
"""
return self._edge_removal_probability
def __set_edge_attributes(self, graph: nx.Graph, names: VertexLookup) -> None:
"""Set edge attributes"""
# add every edge with some associated metadata
for edge in self.get_edges():
cost: int = self.get_weight(edge[0], edge[1])
# pylint: disable=unsupported-membership-test
# is_fixed: bool = (u, v) in self.fixed_edges
graph.add_edge(names[edge[0]], names[edge[1]], cost=cost)
def __set_graph_attributes(self, graph: nx.Graph) -> None:
"""Set attributes of the graph such as the name"""
graph.graph["name"] = self.name
graph.graph["comment"] = self.comment
graph.graph["type"] = self.type
graph.graph["dimension"] = self.dimension
graph.graph["capacity"] = self.capacity
graph.graph["root"] = self.get_root_vertex()
def __set_node_attributes(self, graph: nx.Graph, names: VertexLookup) -> None:
"""Add node attributes"""
node_score = self.get_node_score()
| |
<reponame>asb/opentitan<gh_stars>1000+
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Generate Markdown documentation for the instructions in insns.yml'''
import argparse
import os
import sys
from typing import Dict, List, Optional, TextIO, Tuple
from shared.bool_literal import BoolLiteral
from shared.encoding import Encoding
from shared.insn_yaml import Insn, InsnsFile, InsnGroup, load_file
from shared.operand import EnumOperandType, OptionOperandType, Operand
from docs.get_impl import read_implementation
_O2EDicts = Tuple[Dict[str, List[str]], Dict[int, str]]
def render_operand_row(operand: Operand,
op_ranges: Optional[List[str]]) -> str:
'''Generate the single row of a markdown table for an operand'''
# This is in <tr><td> form, but we want to embed arbitrary markup (and
# don't want to have to faff around with < encodings. So we have to
# include a blank line above and below. This makes (at least) Github
# flavoured markdown switch back to "markdown mode" for the contents.
parts = []
parts.append('<tr><td>\n\n')
parts.append('`{}`'.format(operand.name))
parts.append('\n\n</td><td>')
# The "description" cell contains any documentation supplied in the file,
# and then any extra documentation that's implied by the type of the
# operand.
if operand.doc is not None:
parts.append('\n\n')
parts.append(operand.doc)
if operand.op_type is not None:
ot_doc = operand.op_type.markdown_doc()
if ot_doc is not None:
parts.append('\n\n')
parts.append(ot_doc)
if op_ranges is not None:
parts.append('\n\n')
dec_str = operand.op_type.describe_decode(op_ranges)
parts.append('Decode as `{}`\n\n'.format(dec_str))
parts.append('\n\n</td></tr>')
return ''.join(parts)
def render_operand_table(operands: List[Operand],
o2e: Optional[Dict[str, List[str]]]) -> str:
'''Generate the operand table for an instruction'''
# We have to generate this in <tr><td> form because we want to put
# block-level elements into the table cells (and markdown tables only
# support inline elements).
parts = []
parts.append('<table><thead>'
'<tr><th>Operand</th><th>Description</th></tr>'
'</thead>'
'<tbody>')
for operand in operands:
if o2e is None:
op_ranges = None
else:
op_ranges = o2e.get(operand.name)
# If we had an encoding, it should have encoded every operand, so
# name_op_enc_fields should have picked up operand.
assert op_ranges is not None
parts.append(render_operand_row(operand, op_ranges))
parts.append('</tbody></table>\n\n')
return ''.join(parts)
def render_encoding(mnemonic: str,
encoding: Encoding,
e2o: Dict[int, str]) -> str:
'''Generate a table displaying an instruction encoding'''
parts = []
parts.append('<table style="font-size: 75%">')
parts.append('<tr>')
parts.append('<td></td>')
for bit in range(31, -1, -1):
parts.append('<td>{}</td>'.format(bit))
parts.append('</tr>')
# Build dictionary of bit ranges, keyed by the msb and with value a pair
# (width, desc) where width is the width of the range in bits and desc is a
# string describing what is stored in the range.
by_msb = {}
for field_name, field in encoding.fields.items():
scheme_field = field.scheme_field
# If this field is a literal value, explode it into single bits. To do
# so, we walk the ranges and match up with ranges in the value.
if isinstance(field.value, BoolLiteral):
assert field.value.width > 0
assert field.value.width == scheme_field.bits.width
bits_seen = 0
for msb, lsb in scheme_field.bits.ranges:
val_msb = scheme_field.bits.width - 1 - bits_seen
val_lsb = val_msb - msb + lsb
bits_seen += msb - lsb + 1
for idx in range(0, msb - lsb + 1):
desc = field.value.char_for_bit(val_lsb + idx)
by_msb[lsb + idx] = (1, '' if desc == 'x' else desc)
continue
# Otherwise this field's value is an operand name. name_op_enc_fields
# should have added the MSBs of its ranges to e2o.
assert isinstance(field.value, str)
for msb, lsb in scheme_field.bits.ranges:
assert msb in e2o
by_msb[msb] = (msb - lsb + 1, e2o[msb])
parts.append('<tr>')
parts.append('<td>{}</td>'.format(mnemonic.upper()))
# Now run down the ranges in descending order of msb to get the table cells
next_bit = 31
for msb in sorted(by_msb.keys(), reverse=True):
# Check to make sure we have a dense table (this should be guaranteed
# because encoding objects ensure they hit every bit).
assert msb == next_bit
width, desc = by_msb[msb]
next_bit = msb - width
parts.append('<td colspan="{}">{}</td>'.format(width, desc))
assert next_bit == -1
parts.append('</tr>')
parts.append('</table>\n\n')
return ''.join(parts)
def render_literal_pseudo_op(rewrite: List[str]) -> str:
'''Generate documentation with expansion of a pseudo op'''
parts = []
parts.append('This instruction is a pseudo-operation and expands to the '
'following instruction sequence:\n```\n')
for line in rewrite:
parts.append(line)
parts.append('\n')
parts.append('```\n\n')
return ''.join(parts)
def name_op_enc_fields(name_to_operand: Dict[str, Operand],
encoding: Encoding) -> _O2EDicts:
'''Name the encoding fields corresponding to operators
In the generated documentation, we name encoding fields based on the
operand that the encode. For example, if the operand "foo" is encoded in a
field, the field will be labelled "FOO" in the table. If the field is split
over multiple bit ranges, they will be labelled like "FOO_0", "FOO_1" etc,
counting from the LSB. If an operand has an abbreviated name, this will be
used for the field instead of the full operand name.
Returns a pair of dicts: (o2e, e2o). o2e maps an operand name to the list
of (our names for) encoding fields that contribute to it, MSB first. e2o
maps the MSB of a bit range in an encoding field to the name that should
appear for that range in the documentation.
In the example above, o2e['foo'] = ["FOO_1", "FOO_0"]. Suppose that the
upper range of bits for the encoding field for 'foo' had MSB 10. Then
e2o[10] = 'FOO_1'.
'''
o2e = {} # type: Dict[str, List[str]]
e2o = {} # type: Dict[int, str]
for field_name, field in encoding.fields.items():
# Ignore literal values: these don't correspond to operands
if isinstance(field.value, BoolLiteral):
continue
# Otherwise this field's value is an operand name
assert isinstance(field.value, str)
operand_name = field.value
# An encoding should never use an operand more than once
assert operand_name not in o2e
# Get the base name to use for fields. This is either an upper-case
# version of the operand name, or uses the operand's abbreviated name
# if available.
operand = name_to_operand.get(operand_name)
assert operand is not None
basename = operand_name if operand.abbrev is None else operand.abbrev
basename = basename.upper()
# There should always be at least one bit range for the field
scheme_field = field.scheme_field
assert scheme_field.bits.ranges
# If there is just one bit range, we generate a single named range by
# capitalizing the operand name.
if len(scheme_field.bits.ranges) == 1:
msb = scheme_field.bits.ranges[0][0]
assert msb not in e2o
range_name = basename
o2e[operand_name] = [range_name]
e2o[msb] = range_name
continue
# Otherwise, we need to label the operands. We iterate over the ranges
# in scheme_field LSB-first (so that we can number things with the LSB
# field having index zero).
o2e_list = []
for idx, (msb, lsb) in enumerate(reversed(scheme_field.bits.ranges)):
range_name = '{}_{}'.format(basename, idx)
o2e_list.append(range_name)
assert msb not in e2o
e2o[msb] = range_name
# We want to store o2e_list MSB-first, so reverse it here.
o2e_list.reverse()
o2e[operand_name] = o2e_list
return (o2e, e2o)
def render_insn(insn: Insn, impl: Optional[str], heading_level: int) -> str:
'''Generate the documentation for an instruction
heading_level is the current Markdown heading level. It should be greater
than zero. For example, if it is 3, then the instruction will be introduced
with "### <insn_name>".
'''
assert heading_level > 0
parts = []
mnem = insn.mnemonic.upper()
subhead = '#' * (heading_level + 1) + ' '
# Heading, based on mnemonic (upper-cased)
parts.append('{} {}\n'.format('#' * heading_level, mnem))
# If there's a note, render it as a callout
if insn.note is not None:
parts.append('<div class="bd-callout bd-callout-warning">'
'<h5>Note</h5>\n\n')
parts.append(insn.note)
parts.append('\n\n</div>\n\n')
# Optional synopsis: some bold-face text expanding the mnemonic to
# something more understandable.
if insn.synopsis is not None:
parts.append('**{}.**\n'.format(insn.synopsis))
# Optional documentation (using existing markdown formatting). Add a blank
# line afterwards to separate from the syntax and operand table.
if insn.doc is not None:
parts.append(insn.doc + '\n')
parts.append('\n')
# If this came from the RV32I instruction set, say so.
if insn.rv32i:
parts.append('This instruction is defined in the '
'RV32I instruction set.\n\n')
# A list of errors that the instruction might cause.
if insn.errs is not None:
parts.append(subhead + 'Errors\n')
if not insn.errs:
parts.append('{} cannot cause any software errors.\n'.format(mnem))
else:
parts.append('{} might cause the following software errors:\n'
.format(mnem))
for desc in insn.errs:
parts.append('- {}\n'.format(desc))
parts.append('\n')
# Syntax example: either given explicitly or figured out from operands
parts.append(subhead + 'Syntax\n')
parts.append("```\n")
parts.append(insn.mnemonic.upper() + ('' if insn.glued_ops else ' '))
parts.append(insn.syntax.render_doc())
parts.append("\n```\n\n")
is_pseudo = insn.literal_pseudo_op or insn.python_pseudo_op
# | |
import pandas as pd
from lifelines import KaplanMeierFitter, CoxPHFitter
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from multiprocessing import Pool
import numpy as np
import functools
from .correlation import intersection, header_list
import plotly
import plotly.offline as opy
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit, GridSearchCV
import warnings
#######################
### Sklearn Survival ##
#######################
class EarlyStoppingMonitor:
def __init__(self, window_size, max_iter_without_improvement):
self.window_size = window_size
self.max_iter_without_improvement = max_iter_without_improvement
self._best_step = -1
def __call__(self, iteration, estimator, args):
# continue training for first self.window_size iterations
if iteration < self.window_size:
return False
# compute average improvement in last self.window_size iterations.
# oob_improvement_ is the different in negative log partial likelihood
# between the previous and current iteration.
start = iteration - self.window_size + 1
end = iteration + 1
improvement = np.mean(estimator.oob_improvement_[start:end])
if improvement > 1e-6:
self._best_step = iteration
return False # continue fitting
# stop fitting if there was no improvement
# in last max_iter_without_improvement iterations
diff = iteration - self._best_step
return diff >= self.max_iter_without_improvement
def IPC_RIDGE(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import IPCRidge
from sklearn.pipeline import make_pipeline
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
y_train_log = y_train.copy()
y_train_log["time"] = np.log1p(y_train["time"])
y_test_log = y_test.copy()
y_test_log["time"] = np.log1p(y_test["time"])
#https://github.com/sebp/scikit-survival/issues/41
n_alphas = 50
alphas = np.logspace(-10, 1, n_alphas)
gcv = GridSearchCV(IPCRidge(max_iter=100000),
{"alpha":alphas},
cv = 2,
n_jobs=10).fit(X_train,y_train_log)
best_model = gcv.best_estimator_.named_steps["IPCRidge"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train_log)
scoreTest = best_model.score(X_test,y_test_log)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def score_survival_model(model, X, y):
from sksurv.metrics import concordance_index_censored
prediction = model.predict(X)
result = concordance_index_censored(y['event'], y['time'], prediction)
return result[0]
def SurvivalSVM(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.svm import FastSurvivalSVM
import numpy as np
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
ssvm = FastSurvivalSVM(max_iter=100, tol=1e-5, random_state=seed)
param_grid = {'alpha': 2. ** np.arange(-12, 13, 4)}
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=seed)
gcv = GridSearchCV(ssvm, param_grid, scoring=score_survival_model,
n_jobs = n_core , refit=False,
cv=cv)
warnings.filterwarnings("ignore", category=FutureWarning)
gcv = gcv.fit(X_train, y_train)
ssvm.set_params(**gcv.best_params_)
ssvm.fit(X_train, y_train)
scoreTraining = ssvm.score(X_train,y_train)
scoreTest = ssvm.score(X_test,y_test)
feature = pd.Series(ssvm.coef_, index=lFeature)
return scoreTraining, scoreTest, feature
def PenaltyCox(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sklearn.pipeline import make_pipeline
seed = np.random.RandomState(seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = CoxnetSurvivalAnalysis(alpha_min_ratio=0.12, l1_ratio=0.9, max_iter=100)
#https://github.com/sebp/scikit-survival/issues/41
model.set_params(max_iter = 100, n_alphas = 50)
model.fit(X_train, y_train)
warnings.simplefilter("ignore", ConvergenceWarning)
alphas = model.alphas_
gcv = GridSearchCV(
make_pipeline(CoxnetSurvivalAnalysis(l1_ratio=0.9, max_iter=1000)),
param_grid={"coxnetsurvivalanalysis__alphas": [[v] for v in alphas]},
cv = 2,
n_jobs= n_core).fit(X_train,y_train)
best_model = gcv.best_estimator_.named_steps["coxnetsurvivalanalysis"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train)
scoreTest = best_model.score(X_test,y_test)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def SurvivalForest(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import RandomSurvivalForest
from eli5.formatters import format_as_dataframe
from eli5.sklearn import explain_weights_sklearn
from eli5.sklearn import PermutationImportance
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
rsf = RandomSurvivalForest(n_estimators=300,
min_samples_split=10,
min_samples_leaf=15,
max_features="sqrt",
n_jobs= n_core,
random_state=seed)
rsf.fit(X_train, y_train)
scoreTraining = rsf.score(X_train,y_train)
scoreTest = rsf.score(X_test,y_test)
perm = PermutationImportance(rsf, n_iter=3, random_state=seed)
perm.fit(X_test, y_test)
feature = format_as_dataframe(explain_weights_sklearn(perm, feature_names=lFeature, top = len(lFeature) ))
feature = pd.Series(feature["weight"].tolist(), index=feature["feature"].tolist())
#feature = pd.DataFrame(rsf.feature_importances_, index=lFeature)
return scoreTraining, scoreTest, feature
def gradient_boosted_models(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import GradientBoostingSurvivalAnalysis
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
model = GradientBoostingSurvivalAnalysis(
n_estimators=1000, learning_rate=0.05, subsample=0.5,
max_depth=1, random_state=seed
)
monitor = EarlyStoppingMonitor(25, 100)
model.fit(X_train, y_train, monitor=monitor)
scoreTraining = model.score(X_train,y_train)
scoreTest = model.score(X_test,y_test)
feature = pd.Series(model.feature_importances_, index=lFeature)
return scoreTraining, scoreTest, feature
def survival_selection(data, k = 10, topk = 100, event = "event", n_core = 2, seed = 123):
from sksurv.datasets import get_x_y
from sklearn.model_selection import StratifiedKFold
import copy
from miopy.feature_selection import sort_abs
# list of classifiers, selected on the basis of our previous paper "
modelList = [
[gradient_boosted_models,"Gradient Boosted Models"],
[SurvivalSVM,"Support Vector Machine"],
#[SurvivalForest,"Random Forest",],
[PenaltyCox,"Penalized Cox",]
]
print("Loading dataset...")
X, Y = get_x_y(data, attr_labels = [event,"time"], pos_label=0)
skf = StratifiedKFold(n_splits=k, shuffle=True, random_state = np.random.RandomState(seed))
indexes = [ (training, test) for training, test in skf.split(X, Y) ]
lFeature = X.columns.tolist()
topFeatures = pd.Series(dtype='float64', index=lFeature).fillna(0)
lAll = []
DictScore = {}
dfTopCoef = pd.DataFrame(dtype='float64', index=lFeature).fillna(0)
for model, name in modelList :
print("\nClassifier " + name)
ListScore = []
classifierTopFeatures = pd.Series(dtype='float64', name = name, index=lFeature).fillna(0)
dfTopCoefTemp = pd.DataFrame(dtype='float64', index=lFeature).fillna(0)
i = 1
# iterate over all folds
for train_index, test_index in indexes :
X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:]
y_train, y_test = Y[train_index], Y[test_index]
try:
classifier = copy.deepcopy(model)
scoreTraining, scoreTest, features = classifier(X_train, y_train,\
X_test, y_test, lFeature = lFeature, n_core = n_core, seed = seed)
except Exception as error:
print(error)
else:
print("\ttraining: %.4f, test: %.4f" % (scoreTraining, scoreTest))
ListScore.append( scoreTest )
# now, let's get a list of the most important features, then mark the ones in the top X
orderedFeatures = sort_abs(features[features != 0]).round(3)
if topk <= len(orderedFeatures):
lF = orderedFeatures.index[0:topk].tolist()
else:
lF = orderedFeatures.index.tolist()
dfTopCoefTemp.loc[:, i] = orderedFeatures
for f in lF:
if orderedFeatures[f] != 0:
topFeatures[f] += 1
classifierTopFeatures[ f ] += 1
finally:
i +=1
dfTopCoef[name] = dfTopCoefTemp.apply(lambda row: row.mean(), axis=1)
print("\ttest mean: %.4f" % (np.mean(ListScore)))
DictScore[name] = np.mean(ListScore)
lAll.append(classifierTopFeatures)
feature_per = topFeatures.div(len(modelList)*k)*100
feature_per = feature_per.sort_values(ascending=False)[:topk]
dAll = pd.DataFrame(lAll).div(k)*100
return feature_per, dAll, DictScore, dfTopCoef
########################
### Survival Analysis ##
########################
def get_exprs_cutoff(exprDF, target="hsa-miR-223-3p", q = 0.5, treshold = None, optimal = True):
from scipy import stats
if optimal:
q, treshold = get_survival_cutoff(exprDF = exprDF, time = "time", event = "event", target = target)
else:
if treshold != None:
q = stats.percentileofscore(exprDF[target],treshold)/100
else:
treshold = exprDF[target].quantile(q)
return q, treshold
def split_by_exprs(exprDF, target="hsa-miR-223-3p", treshold = 0.5):
exprDF["exprs"] = None
is_higher = exprDF[target] >= float(treshold)
exprDF["exprs"] = exprDF["exprs"].mask(is_higher, 1)
exprDF["exprs"] = exprDF["exprs"].mask(~is_higher, 0)
#print("Splitted")
return exprDF
def get_survival_cutoff(exprDF = "exprDF", time = "time", event = "event", target = "target"):
lPoint = exprDF[target].unique().tolist()
df = pd.DataFrame()
for point in lPoint:
q, treshold = get_exprs_cutoff(exprDF, target=target, treshold = point, optimal = False)
if 0.1 < q < 0.9:
try:
tRes = get_hazard_ratio(split_by_exprs(exprDF, target=target, treshold = treshold))
except Exception as error:
print(error)
tRes = (0, 1,)
dfTemp = pd.Series({"Target":target,"Q":q,"Cutpoint":treshold,"HR":tRes[0],"pval":tRes[1]})
df = pd.concat([df,dfTemp], axis = 1)
df = df.transpose()
df["P_ADJ"] = df.pval.apply(lambda x: -1.63 * x * (1 + 2.35 * np.log(x)))
df = df.query("0.001 < pval < 0.1")
df = df.sort_values("P_ADJ")
row = df.iloc[0,:]
print(df)
return row["Q"], row["Cutpoint"]
def get_hazard_ratio(exprDF, target = "exprs"):
np.seterr(divide='ignore', invalid='ignore')
cph = CoxPHFitter()
cph.fit(exprDF[[target,"time","event"]].dropna(), "time", event_col = "event")
pval = cph.summary["p"][target]
hr_high, hr_low = cph.summary["exp(coef) upper 95%"][target], cph.summary["exp(coef) lower 95%"][target]
log_hr = cph.summary["exp(coef)"][target]
#print(cph.summary)
return (log_hr, pval, hr_high, hr_low)
def obatin_hr(ltarget, exprDF = None):
lhr = []
for target in ltarget:
try:
q, treshold = get_exprs_cutoff(exprDF, target=target, q=0.5, optimal = False)
print(q), print(treshold)
tRes = get_hazard_ratio(split_by_exprs(exprDF, target=target, treshold = treshold))
print("%s"%(target))
print(tRes)
hr = tRes[0]
except Exception as error:
print(error)
hr = 1
finally:
lhr.append(hr)
df = pd.DataFrame({"target":ltarget,"log(hr)":lhr})
return df
def obatin_hr_by_exprs(ltarget, exprDF = None):
lhr = []
for target in ltarget:
try:
tRes = get_hazard_ratio(exprDF, target = target)
hr = tRes[0]
except Exception as error:
print(error)
hr = 0
finally:
lhr.append(hr)
print("Lista HR")
print(lhr)
print(len(ltarget)), print(len(lhr))
df = pd.DataFrame({"target":ltarget,"log(hr)":lhr})
print("DF inside obtain_hr")
print(df)
return df
def same_length(list_lists):
lmax = 0
for l in list_lists:
lmax = max(lmax, len(l))
new_l = []
for l in list_lists:
ll = len(l)
if ll < lmax:
l += ["foo"] * (lmax - ll)
new_l.append(l)
return new_l
def hazard_ratio(lGeneUser = None, lMirUser = None, exprDF = None, n_core = 4):
### Intersect with Gene and Mir from table##
lMir, lGene = header_list(exprDF=exprDF)
if lGeneUser is not None:
lGene = intersection(lGene, lGeneUser)
if lMirUser is not None:
lMir = intersection(lMir, lMirUser)
lTarget = lGene+lMir
print(exprDF)
| |
# -*- coding: utf-8 -*-
"""
Evaluation Models
=================
"""
from __future__ import division
from copy import copy
from itertools import izip
from collections import defaultdict
import numpy as np
import pandas as pd
import tools
__all__ = (
'DummyPriorModel',
'EloModel',
'EloResponseTime',
'PFAModel',
'PFAResponseTime',
'PFAExt',
'PFAExtTiming',
'PFAExtStaircase',
'PFAExtSpacing',
'PFAGong',
'PFAGongTiming',
'PFATiming',
)
#: Dictionary of the most commonly used time effect functions in this thesis.
time_effect_funcs = {}
def register_time_effect(name):
"""Registers new time effect functions."""
def register(time_effect):
time_effect_funcs[name] = time_effect
return register
@register_time_effect('log')
def time_effect_log(t, a=1.8, c=0.123):
return a - c * np.log(t)
@register_time_effect('pow')
def time_effect_div(t, a=2, c=0.2):
return a / (t+1) ** c
@register_time_effect('exp')
def time_effect_exp(t, a=1.6, c=0.01):
return a * np.exp(-c * np.sqrt(t))
def init_time_effect(obj, name, parameters=('a', 'c')):
"""Prepares time effect function based on name. Initializes
the given object with default parameters `a` and `c`.
:param obj: Object to initialize with time effect function.
:param name: Name of the time effect function.
"""
time_effect_fun = time_effect_funcs[name]
defaults = time_effect_fun.func_defaults
a, c = parameters
if getattr(obj, a, None) is None:
setattr(obj, a, defaults[0])
if getattr(obj, c, None) is None:
setattr(obj, c, defaults[1])
def time_effect(t):
a_val, c_val = getattr(obj, a), getattr(obj, c)
return time_effect_fun(t, a_val, c_val)
return time_effect
class Question(object):
"""Representation of a question."""
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.user_id = kwargs.pop('user_id')
self.place_id = kwargs.pop('place_id')
self.type = kwargs.pop('type')
self.inserted = kwargs.pop('inserted')
self.options = kwargs.pop('options')
class Answer(Question):
"""Answer to a question."""
def __init__(self, **kwargs):
super(Answer, self).__init__(**kwargs)
self.place_answered = kwargs.pop('place_answered')
self.response_time = kwargs.pop('response_time')
self.is_correct = kwargs.pop('is_correct')
class User(object):
"""Returns a user with given ID.
:param user_id: ID of the user.
:type user_id: int
"""
def __init__(self, user_id):
self.id = user_id
self.skill_increments = []
@property
def skill(self):
"""Skill of the user."""
return sum(self.skill_increments)
@property
def answers_count(self):
"""Number of answer of the user (equal to the number of
skill increments.
"""
return len(self.skill_increments)
def inc_skill(self, increment):
"""Increments the skill of the user.
:param increment: Increment (or decrement) of the skill.
:type increment: int
"""
self.skill_increments += [increment]
class Place(object):
"""Returns a place with given ID.
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, place_id):
self.id = place_id
self.difficulty_increments = []
@property
def difficulty(self):
"""Difficulty of the place."""
return sum(self.difficulty_increments)
@property
def answers_count(self):
"""Number of answer for the place (equal to the number of
difficulty increments.
"""
return len(self.difficulty_increments)
def inc_difficulty(self, increment):
"""Increments the difficulty of the place.
:param increment: Increment (or decrement) of the difficulty.
:type increment: int
"""
self.difficulty_increments += [increment]
class Item(object):
"""Item representation.
:param prior: Prior skills of users and difficulties of places.
:type prior: dictionary
:param user_id: ID of the user.
:type user_id: int
:param place_id: ID of the place.
:type place_id: int
"""
def __init__(self, prior, user_id, place_id):
self.prior = prior
self.user_id = user_id
self.place_id = place_id
self.practices = []
self.knowledge_increments = []
@property
def user(self):
"""User answering the item."""
return self.prior.users[self.user_id]
@property
def place(self):
"""Place of the item being asked."""
return self.prior.places[self.place_id]
@property
def knowledge(self):
"""Knowledge of the item by the user."""
return (
(self.user.skill - self.place.difficulty)
+ sum(self.knowledge_increments)
)
@property
def correct(self):
"""List of correct answers."""
return [ans for ans in self.practices if ans.is_correct]
@property
def incorrect(self):
"""List of incorrect answers."""
return [ans for ans in self.practices if not ans.is_correct]
@property
def last_inserted(self):
"""Returns the time of the last answer for this item
or :obj:`None` if the item was never answered before.
"""
if self.practices:
return self.practices[-1].inserted
@property
def any_incorrect(self):
""":obj:`True` if at least one of the practiced item
was answered incorrectly, otherwise :obj:`False`.
"""
return any(not answer.is_correct for answer in self.practices)
def get_diffs(self, current):
"""Returns list of previous practices expresed as the number
of seconds that passed between *current* practice and all
the *previous* practices.
:param current: Datetime of the current practice.
:type place: string
"""
return [
tools.time_diff(current, prior.inserted)
for prior in self.practices
]
def inc_knowledge(self, increment):
"""Increments the knowledge of the user of the item.
:param increment: Increment (or decrement) of the knowledge.
:type increment: int
"""
self.knowledge_increments += [increment]
def add_practice(self, answer):
"""Registers new practice of the item.
:param answer: Information about the answer.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
if isinstance(answer, pd.Series):
self.practices += [Answer(**answer.to_dict())]
else:
self.practices += [copy(answer)]
class Model(object):
"""Abstract model class."""
ABBR = None
def respect_guess(self, prediction, options):
"""Updates prediction with respect to guessing paramter.
:param prediction: Prediction calculated so far.
:type prediction: float
:param options: Number of options in the multiple-choice question.
:type options: int
"""
if options:
val = 1 / len(options)
return val + (1 - val) * prediction
else:
return prediction
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
raise NotImplementedError()
def update(self, answer):
"""Performes an update of skills, difficulties or knowledge.
:param answer: Asked question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
raise NotImplementedError()
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
raise NotImplementedError()
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
raise NotImplementedError()
class DummyPriorModel(Model):
"""Dummy model that sets all skills of users and difficulties
of places to zero.
"""
class _User(object):
"""Returns a user with given ID."""
def __init__(self, skill):
self.skill = skill
class _Place(object):
"""Returns a place with given ID."""
def __init__(self, difficulty):
self.difficulty = difficulty
def __init__(self, skill=0.0, difficulty=0.0):
self.users = defaultdict(lambda: self._User(skill))
self.places = defaultdict(lambda: self._Place(difficulty))
def update(self, answer):
pass
def train(self, data):
pass
class EloModel(Model):
"""Predicts correctness of answers using Elo Rating System.
The model is parametrized with `alpha` and `beta`. These parameters
affect the uncertainty function.
"""
ABBR = 'Elo'
def __init__(self, alpha=1, beta=0.05):
self.alpha = alpha
self.beta = beta
self.init_model()
def init_model(self):
"""Initializes two attributes of the model. Both attributes are
dataframes. The first attribute represents difficulties of countries.
The second attribute represents global knowledge of students.
"""
self.places = tools.keydefaultdict(Place)
self.users = tools.keydefaultdict(User)
self.predictions = {}
def uncertainty(self, n):
"""Uncertainty function. The purpose is to make each update on
the model trained with sequence of `n` answers less and less
significant as the number of prior answers is bigger.
:param n: Number of user's answers or total answers to a place.
:type n: int
"""
return self.alpha / (1 + self.beta * n)
def predict(self, question):
"""Returns probability of correct answer for given question.
:param question: Asked question.
:type question: :class:`pandas.Series` or :class:`Question`
"""
user = self.users[question.user_id]
place = self.places[question.place_id]
prediction = tools.sigmoid(user.skill - place.difficulty)
return self.respect_guess(prediction, question.options)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
shift = answer.is_correct - prediction
user.inc_skill(self.uncertainty(user.answers_count) * shift)
place.inc_difficulty(-(self.uncertainty(place.answers_count) * shift))
self.predictions[answer.id] = prediction
def train(self, data):
"""Trains the model on given data set.
:param data: Data set on which to train the model.
:type data: :class:`pandas.DataFrame`
"""
self.init_model()
data = tools.first_answers(data)
data.sort(['inserted']).apply(self.update, axis=1)
@classmethod
def split_data(cls, data, ratio=0.7):
"""Classmethod that splits data into training set and test set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
data = tools.first_answers(data)
return tools.split_data(data, ratio=ratio)
class EloResponseTime(EloModel):
"""Extension of the Elo model that takes response time of user
into account.
"""
ABBR = 'Elo/RT'
def __init__(self, *args, **kwargs):
self.zeta = kwargs.pop('zeta', 3)
super(EloResponseTime, self).__init__(*args, **kwargs)
def update(self, answer):
"""Updates skills of users and difficulties of places according
to given answer.
:param answer: Answer to a question.
:type answer: :class:`pandas.Series` or :class:`Answer`
"""
user = self.users[answer.user_id]
place = self.places[answer.place_id]
prediction = self.predict(answer)
level = tools.automaticity_level(answer.response_time)
prob = (prediction * self.zeta | |
<reponame>REMeyer/astropy<filename>astropy/visualization/lupton_rgb.py<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
The three images must be aligned and have the same pixel scale and size.
For details, see : http://adsabs.harvard.edu/abs/2004PASP..116..133L
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from . import ZScaleInterval
__all__ = ['make_lupton_rgb']
def compute_intensity(image_r, image_g=None, image_b=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if ``image_g``
and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or ``image_r``
if green and blue images are not provided.
"""
if image_g is None or image_b is None:
if not (image_g is None and image_b is None):
raise ValueError("please specify either a single image "
"or red, green, and blue images.")
return image_r
intensity = (image_r + image_g + image_b)/3.0
# Repack into whatever type was passed to us
return np.asarray(intensity, dtype=image_r.dtype)
class Mapping(object):
"""
Baseclass to map red, blue, green intensities into uint8 values.
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : `~numpy.ndarray`, optional
An image used to calculate some parameters of some mappings.
"""
def __init__(self, minimum=None, image=None):
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except TypeError:
minimum = 3*[minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
self.minimum = minimum
self._image = np.asarray(image)
def make_rgb_image(self, image_r, image_g, image_b):
"""
Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image.
Parameters
----------
image_r : `~numpy.ndarray`
Image to map to red.
image_g : `~numpy.ndarray`
Image to map to green.
image_b : `~numpy.ndarray`
Image to map to blue.
Returns
-------
RGBimage : `~numpy.ndarray`
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
image_r = np.asarray(image_r)
image_g = np.asarray(image_g)
image_b = np.asarray(image_b)
if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape):
msg = "The image shapes must match. r: {}, g: {} b: {}"
raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape))
return np.dstack(self._convert_images_to_uint8(image_r, image_g, image_b)).astype(np.uint8)
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def map_intensity_to_uint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
Parameters
----------
I : `~numpy.ndarray`
Intensity to be mapped.
Returns
-------
mapped_I : `~numpy.ndarray`
``I`` mapped to uint8
"""
with np.errstate(invalid='ignore', divide='ignore'):
return np.clip(I, 0, self._uint8Max)
def _convert_images_to_uint8(self, image_r, image_g, image_b):
"""Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images"""
image_r = image_r - self.minimum[0] # n.b. makes copy
image_g = image_g - self.minimum[1]
image_b = image_b - self.minimum[2]
fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))
image_rgb = [image_r, image_g, image_b]
for c in image_rgb:
c *= fac
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
r0, g0, b0 = image_rgb # copies -- could work row by row to minimise memory usage
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
for i, c in enumerate(image_rgb):
c = np.where(r0 > g0,
np.where(r0 > b0,
np.where(r0 >= pixmax, c*pixmax/r0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c)),
np.where(g0 > b0,
np.where(g0 >= pixmax, c*pixmax/g0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
c[c > pixmax] = pixmax
image_rgb[i] = c
return image_rgb
class LinearMapping(Mapping):
"""
A linear map map of red, blue, green intensities into uint8 values.
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
def __init__(self, minimum=None, maximum=None, image=None):
if minimum is None or maximum is None:
if image is None:
raise ValueError("you must provide an image if you don't "
"set both minimum and maximum")
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
if maximum == minimum:
raise ValueError("minimum and maximum values must not be equal")
self._range = float(maximum - minimum)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0,
np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range))
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness)
x = asinh(Q (I - minimum)/stretch)/Q
This reduces to a linear stretch if Q == 0
See http://adsabs.harvard.edu/abs/2004PASP..116..133L
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
"""
def __init__(self, minimum, stretch, Q=8):
Mapping.__init__(self, minimum)
epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
frac = 0.1 # gradient estimated using frac*stretch is _slope
self._slope = frac*self._uint8Max/np.arcsinh(frac*Q)
self._soften = Q/float(stretch)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
Parameters
----------
image1 : `~numpy.ndarray` or a list of arrays
The image to analyse, or a list of 3 images to be converted to
an intensity image.
image2 : `~numpy.ndarray`, optional
the second image to analyse (must be specified with image3).
image3 : `~numpy.ndarray`, optional
the third image to analyse (must be specified with image2).
Q : float, optional
The asinh softening parameter. Default is 8.
pedestal : float or sequence(3), optional
The value, or array of 3 values, to subtract from the images; or None.
Notes
-----
pedestal, if not None, is removed from the images when calculating the
zscale stretch, and added back into Mapping.minimum[]
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
"""
"""
if image2 is None or image3 is None:
if not (image2 is None and image3 is None):
raise ValueError("please specify either a single image "
"or three images.")
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
len(pedestal)
except TypeError:
pedestal = 3*[pedestal]
if len(pedestal) != 3:
raise ValueError("please provide 1 or 3 pedestals.")
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image)*[0.0]
image = compute_intensity(*image)
zscale_limits = ZScaleInterval().get_limits(image)
zscale = LinearMapping(*zscale_limits, image=image)
stretch = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, stretch, Q)
self._image = image
def make_lupton_rgb(image_r, image_g, image_b, minimum=0, | |
import os
import math
import random
import time
from code.menu.menu import Menu
from code.tools.eventqueue import EventQueue
from code.tools.xml import XMLParser
from code.utils.common import coalesce, intersect, offset_rect, log, log2, xml_encode, xml_decode, translate_rgb_to_string
from code.constants.common import SCREEN_WIDTH, SCREEN_HEIGHT, PAUSE_MENU_X, PAUSE_MENU_Y, PAUSE_MENU_WIDTH, PAUSE_MENU_HEIGHT, MODE_GAME, TILE_WIDTH, TILE_HEIGHT, DIR_UP, DIR_RIGHT, DIR_DOWN, DIR_LEFT, SPLASH_MODE_GREYSCALE_ANIMATED
from code.constants.states import STATUS_ACTIVE, STATUS_INACTIVE, GAME_STATE_ACTIVE, GAME_STATE_NOT_READY
from code.constants.newsfeeder import *
class NetSessionBrowser(Menu):
def __init__(self):#, x, y, w, h, universe, session, widget_dispatcher):
Menu.__init__(self)#, x, y, w, h, universe, session)
# Raw http response data that contains all active sessions
self.http_data = ""
# Fire a build event
self.fire_event("build")
# Configure the net session browser; we have to feed it the raw session data it'll use to populate the session view
def configure(self, options):
# Common menu configuration
self.__std_configure__(options)
if ( "http-data" in options ):
self.http_data = options["http-data"]
# For chaining
return self
def handle_event(self, event, control_center, universe):#params, user_input, network_controller, universe, active_map, session, widget_dispatcher, text_renderer, save_controller, refresh = False):
# Events that result from event handling
results = EventQueue()
results.inject_event(event)
# Convenience
(action, params) = (
event.get_action(),
event.get_params()
)
log2( action, params )
# Build root menu
if ( action == "build" ):
results.append(
self.handle_build_event(event, control_center, universe)
)
elif ( action == "show:keyboard.password" ):
results.append(
self.handle_show_keyboard_password_event(event, control_center, universe)
)
elif ( action == "submit:keyboard.password" ):
results.append(
self.handle_submit_keyboard_password_event(event, control_center, universe)
)
elif ( action == "finish:submit:keyboard.password" ):
results.append(
self.handle_finish_submit_keyboard_password_event(event, control_center, universe)
)
elif ( action == "show:message" ):
results.append(
self.handle_show_message_event(event, control_center, universe)
)
elif ( action == "back" ):
results.append(
self.handle_back_event(event, control_center, universe)
)
elif ( action == "page-back" ):
results.append(
self.handle_page_back_event(event, control_center, universe)
)
# Resume puzzle
elif (action == "return-to-lobby"):
results.append(
self.handle_return_to_lobby_event(event, control_center, universe)
)
# Retry puzzle - commit
elif (action == "fwd.finish:return-to-lobby"):
results.append(
self.handle_fwd_finish_return_to_lobby_event(event, control_center, universe)
)
elif ( action == "leave-game" ):
results.append(
self.handle_leave_game_event(event, control_center, universe)
)
elif ( action == "fwd:finish:leave-game" ):
results.append(
self.handle_fwd_finish_leave_game_event(event, control_center, universe)
)
# Discard this menu
elif ( action == "kill" ):
results.append(
self.handle_kill_event(event, control_center, universe)
)
# Return events
return results
# Build the puzzle pause menu
def handle_build_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch widget dispatcher
widget_dispatcher = control_center.get_widget_dispatcher()
# Compile it into a node
#print self.http_data
# Scope
node = None
error_data = None
# Try to get the sessions node
try:
# Look for sessions node
node = XMLParser().create_node_from_xml(self.http_data).find_node_by_tag("sessions")
# If we did not find the <sessions /> node, we did not receive the
# expected xml markup. Throw an exception to force
# a fallback to empty sessions node.
if (node == None):
raise Exception("Could not find expected xml markup.")
# If failure occurs, emulate an empty response.
# Perhaps the server is not available?
except:
node = XMLParser().create_node_from_xml("<sessions />").find_node_by_tag("sessions")
error_data = True # Forces session browser to list a vague error message
# Count the number of active sessions
active_session_count = len( node.get_nodes_by_tag("session") )
# Fetch the template we need (i.e. normal or "no games found" version).
template = self.fetch_xml_template( "mainmenu.root.coop.browser", version = "normal" if (active_session_count > 0) else "no-games" ).add_parameters({
"@x": xml_encode( "%d" % PAUSE_MENU_X ),
"@y": xml_encode( "%d" % PAUSE_MENU_Y ),
"@width": xml_encode( "%d" % PAUSE_MENU_WIDTH ),
"@height": xml_encode( "%d" % PAUSE_MENU_HEIGHT ),
"@error": xml_encode( "%s" % ( control_center.get_localization_controller().get_label("no-sessions:message") if (error_data == None) else control_center.get_localization_controller().get_label("invalid-sessions-data:message") ) )
})
# Compile template
root = template.compile_node_by_id("menu")
for i in range(0, 1):
# Loop through any active session, adding it to the available list...
for ref_session in node.get_nodes_by_tag("session"):
# Does this session require a password?
requires_password = ( ref_session.find_node_by_tag("requires-password").innerText == "yes" )
# Fetch insert template
template = self.fetch_xml_template( "mainmenu.root.coop.browser.insert", version = "public" if (not requires_password) else "private" ).add_parameters({
"@session-id": xml_encode( ref_session.find_node_by_tag("session-id").innerText ),
"@server-name": xml_encode( ref_session.find_node_by_tag("server-name").innerText ),
"@universe-name": xml_encode( ref_session.find_node_by_tag("universe-name").innerText ),
"@universe-version": xml_encode( ref_session.find_node_by_tag("universe-version").innerText ),
"@universe-title": xml_encode( ref_session.find_node_by_tag("universe-title").innerText ),
"@player-count": xml_encode( ref_session.find_node_by_tag("player-count").innerText ),
"@current-level": xml_encode( ref_session.find_node_by_tag("current-level").innerText ),
"@max-players": xml_encode( ref_session.find_node_by_tag("max-players").innerText ),
"@game-type": xml_encode( ref_session.find_node_by_tag("game-type").innerText )
})
# Inject compiled insert
root.find_node_by_id("ext.sessions").add_node(
template.compile_node_by_id("insert")
)
# Create widget
widget = control_center.get_widget_dispatcher().convert_node_to_widget(root, control_center, universe = None) # (?) no universe needed?
widget.set_id("coop-session-browser")
# Add the page
self.add_widget_via_event(widget, event)
# Return events
return results
# If the player tries to join a password-protected (i.e. private) game, we'll give them
# a keyboard so that they can confirm the password.
def handle_show_keyboard_password_event(self, event, control_center, universe):
# Fetch keyboard template
template = self.fetch_xml_template("mainmenu.root.coop.browser.keyboard").add_parameters({
"@x": xml_encode( "%d" % int(SCREEN_WIDTH / 2) ),
"@y": xml_encode( "%d" % (SCREEN_HEIGHT - PAUSE_MENU_Y) ),
"@width": xml_encode( "%d" % PAUSE_MENU_WIDTH ),
"@height": xml_encode( "%d" % SCREEN_HEIGHT )
})
# Compile template
root = template.compile_node_by_id("keyboard")
# Convert to widget
widget = control_center.get_widget_dispatcher().convert_node_to_widget(root, control_center, universe)
widget.set_id("session-password-keyboard")
# The new keyboard needs to "remember" the session id we clicked on, allowing us to recall
# it later on whne the user submits the keyboard data.
widget.set_attribute( "-session-id", event.get_params()["session-id"] )
# Add the new page to this menu
self.add_widget_via_event(widget, event, exclusive = False)
# No event worth returning
return EventQueue()
# Submit the keyboard data. Attempt to join the game at this point...
def handle_submit_keyboard_password_event(self, event, control_center, universe):
# Hide the keyboard, raising a finish event when it's gone
self.get_active_page().hide(
on_complete = "finish:submit:keyboard.password"
)
# Once the keyboard disappears, we'll fire off the join request
def handle_finish_submit_keyboard_password_event(self, event, control_center, universe):
# Events that result from handling this event
results = EventQueue()
# Get a handle to the keyboard
keyboard = self.get_active_page()
# Ge tthe password the user entered
password = keyboard.get_value()
# Recall the session id the player selected; it's stored in the keyboard at the moment.
session_id = keyboard.get_attribute("-session-id")
# Now we can get rid of the keyboard...
self.page_back(1)
# Add a new join game event
results.add(
action = "app:join-coop-session",
params = {
"session-id": session_id,
"session-password": password
}
)
# Return resultant events
return results
# Show a message to the user (typically an error message, e.g. "cannot load that level set")
def handle_show_message_event(self, event, control_center, universe):
# Resultant events
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch message template
template = self.fetch_xml_template("mainmenu.root.coop.browser.message").add_parameters({
"@x": xml_encode( "%d" % int(SCREEN_WIDTH / 2) ),
"@y": xml_encode( "%d" % (SCREEN_HEIGHT - PAUSE_MENU_Y) ),
"@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ),
"@height": xml_encode( "%d" % int(PAUSE_MENU_HEIGHT / 2) ),
"@message": xml_encode( "%s" % params["message"] )
})
# Compile template
root = template.compile_node_by_id("message")
# Convert to widget
widget = control_center.get_widget_dispatcher().convert_node_to_widget(root, control_center, universe)
widget.set_id("session-browser-message")
# Add the new page to this menu
self.add_widget_via_event(widget, event, exclusive = False)
# Return events
return results
# HIde the current page, firing a page-back event when it's gone...
def handle_back_event(self, event, control_center, universe):
# Hide active page
self.get_active_page().hide(
on_complete = "page-back" # Page back once it's gone
)
# No event worth returning
return EventQueue()
# Page back by one page
def handle_page_back_event(self, event, control_center, universe):
# Page back by 1 page
self.page_back(1)
# Return no event
return EventQueue()
# Resume the co-op session
def handle_return_to_lobby_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch the window controller
window_controller = control_center.get_window_controller()
# Hook into it to receive forwarded events
window_controller.hook(self)
# App fade, after which we'll restart level
window_controller.fade_out(
on_complete = "fwd.finish:return-to-lobby"
)
# Return events
return results
# (Forwarded) Restart the level, "returning" to the pregame lobby
def handle_fwd_finish_return_to_lobby_event(self, event, control_center, universe):
# Events that result from handling this event (on-birth events, etc.)
results = EventQueue()
# Convenience
params = event.get_params()
# Fetch the window controller
window_controller = control_center.get_window_controller()
# Unhook; we don't need any more forwarded events
window_controller.unhook(self)
# (?) Hack in the current map's name as our transition destination
universe.set_session_variable("net.transition.target", universe.get_active_map().name)
# (?) Let's just fake this for now. Hack.
universe.handle_fwd_net_transition_finalize_event(event, control_center, universe)
# Return events
return results
# Leave the co-op session
def handle_leave_game_event(self, event, control_center, universe):
# Events that result from handling this event | |
else bed_data["blocks"]
)
exons_pos = {} # contain exon_num : positions
s_sites = []
exon_flanks = {}
for num, exon in enumerate(exons_raw):
# start, end if strand == + end, start otherwise
# need to know start and end to extract from 2bit file
exons_pos[num] = (
(int(exon[0]), int(exon[1]))
if bed_data["strand"]
else (int(exon[1]), int(exon[0]))
)
max_exon_num = max(exons_pos.keys())
_all_positions = sorted(flatten(exons_pos.values()))
gene_borders = {_all_positions[0], _all_positions[-1]}
# extract sequences
exons_seq = {} # exon number: sequence dict
target_genome = TwoBitFile(get_2bit_path(t_db)) # use 2bitreader library
get_chr = bed_data["chrom"]
try:
chrom_seq = target_genome[get_chr]
except KeyError:
chrom_seq = [] # to suppress PyCharm analyzer
die(f"Error! Cannot find chrom {get_chr} in 2bit file {t_db}")
verbose("\nExons sequences ####\n")
for num, pos in exons_pos.items():
is_first_exon = num == 0
is_last_exon = num == max_exon_num
# for twoBitToFa start must be < end
# determine search start and end
# do not subtract/add SS_SIZE if gene border: no splice sites then
min_pos = min(pos)
max_pos = max(pos)
start = min_pos - SS_SIZE if min_pos not in gene_borders else min_pos
end = max_pos + SS_SIZE if max_pos not in gene_borders else max_pos
# get exon 10 bp flanks:
left_brd_ = min_pos - EXON_SEQ_FLANK if min_pos - EXON_SEQ_FLANK > 0 else 0
left_flank_coord = (left_brd_, min_pos)
right_flank_coord = (max_pos, max_pos + EXON_SEQ_FLANK)
left_flank = chrom_seq[left_flank_coord[0] : left_flank_coord[1]].upper()
right_flank = chrom_seq[right_flank_coord[0] : right_flank_coord[1]].upper()
# correct for strand:
left_flank = left_flank if bed_data["strand"] else revert(left_flank)
right_flank = right_flank if bed_data["strand"] else revert(right_flank)
if not bed_data["strand"]:
left_flank, right_flank = right_flank, left_flank
# placeholder in case we coudld not extract flanks
left_flank = left_flank if len(left_flank) > 0 else "X"
right_flank = right_flank if len(right_flank) > 0 else "X"
exon_seq_raw = chrom_seq[start:end].upper()
# revert if negative strand
exon_seq_w_ss = revert(exon_seq_raw) if not bed_data["strand"] else exon_seq_raw
# trim splice sites
if not is_first_exon and not is_last_exon:
# both splice sites are here
exon_seq = exon_seq_w_ss[SS_SIZE:-SS_SIZE]
acc_ = exon_seq_w_ss[:SS_SIZE]
don_ = exon_seq_w_ss[-SS_SIZE:]
elif is_first_exon and is_last_exon:
# no splice sites
exon_seq = exon_seq_w_ss
acc_, don_ = "NN", "NN"
elif is_first_exon:
exon_seq = exon_seq_w_ss[:-SS_SIZE]
acc_ = "NN"
don_ = exon_seq_w_ss[-SS_SIZE:]
elif is_last_exon:
exon_seq = exon_seq_w_ss[SS_SIZE:]
acc_ = exon_seq_w_ss[:SS_SIZE]
don_ = "NN"
else:
raise RuntimeError("Unreachable branch reached")
s_sites.append(acc_)
s_sites.append(don_)
exons_seq[num] = exon_seq
exon_flanks[num] = {"L": left_flank, "R": right_flank}
verbose(f"Exon {num} in the range {pos}; sequence:\n{exon_seq}")
return exons_pos, exons_seq, s_sites, exon_flanks
def check_ref_exons(exon_seqs, mask_stops):
"""Check if the reference sequence is correct.
Should start with ATG and end with a stop.
Mask_stops controls handling of inframe stops.
"""
sec_codons = set() # in case there are TGA codons in the ref seq -> collect them
gene_seq = "".join([exon_seqs[i] for i in range(len(exon_seqs.keys()))])
codons = parts(gene_seq, n=3) # split a seq of letters in chunks of len == 3
if codons[0] != "ATG":
eprint("Input is corrupted! Reference sequence should start with ATG!")
elif codons[-1] not in STOPS:
eprint("Input is corrupted! Reference sequence should end with a stop codon!")
stop_codons = [(n, c) for n, c in enumerate(codons[:-1]) if c in STOPS]
if len(stop_codons) == 0: # no stop codons -> nothing else to do
return exon_seqs, set()
# there are stop codons in reference sequence:
eprint("Warning! There are inframe stop codons!")
for stop in stop_codons:
eprint(f"Codon num {stop[0] + 1} - {stop[1]}")
codons[stop[0]] = NNN_CODON if mask_stops else codons[stop[0]]
if stop[1] == "TGA":
# maybe a sec codon
sec_codons.add(stop[0])
eprint(">>>STOP_CODON>>>") if not mask_stops else None
die("Abort, there are inframe stop codons.", 0) if not mask_stops else None
# if stop codons in reference are allowed, then we need to mask them (rewrite as NNN)
# otherwise CESAR will show an error
safe_seq = "".join(codons)
stop_masked = {}
prev_index = 0
for num, exon_seq in exon_seqs.items():
exon_len = len(exon_seq)
stop_masked[num] = safe_seq[prev_index : prev_index + exon_len]
prev_index += exon_len
return stop_masked, sec_codons
def prepare_exons_for_cesar(exon_seqs):
"""Cesar requires some special formatting for exons."""
# CESAR requires a special king of reference exons formatting
# Split codons should be written in lowercase, as follows:
# ATGTTTa ctGTAAAGTGCc ttAGTTGA
verbose("prepare_exons_for_cesar")
left_pointer = 0 # init value
cesar_input_exons = {} # accumulate result here
for k, exon_seq in exon_seqs.items():
# define number of letters to lowercase at the right side
right_pointer = (len(exon_seq) - left_pointer) % 3
# apply left pointer
if left_pointer != 3: # it it were accumulated 3 bases it has no sense like 0
exon_seq_lfix = (
exon_seq[:left_pointer].lower() + exon_seq[left_pointer:].upper()
)
else: # don't touch if left_pointer == 0 or == 3
exon_seq_lfix = exon_seq
# re-define left pointer
left_pointer = 3 - right_pointer
# apply right-side pointer
if right_pointer != 0:
exon_seq_rfix = (
exon_seq_lfix[:-right_pointer] + exon_seq_lfix[-right_pointer:].lower()
)
else: # save as it was
exon_seq_rfix = exon_seq_lfix
# save prepared exon into special dict
cesar_input_exons[k] = exon_seq_rfix
return cesar_input_exons
# def get_chain(chain_file, chain_id, chain_index):
def get_chain(chain_file, chain_id):
"""Return chain string according the parameters passed."""
chain = None # to calm IDE down
if chain_file.endswith(".bst"):
# we have bdb file; extract with BDB extractor
chain = chain_extract_id(chain_file, chain_id)
return chain
elif chain_file.endswith(".gz"): # a gzipped chain file was given
# gzip and redirect scteam to chain_filter_by_id binary
extract_by_id_cmd = (
f"gzip -dc {chain_file} | ./modules/chain_filter_by_id stdin {chain_id}"
)
try: # check that output is OK
chain = subprocess.check_output(extract_by_id_cmd, shell=True).decode(
"utf-8"
)
except subprocess.CalledProcessError:
# die if the command died
die(
"Error! Process {extract_by_id_cmd} died! Please check if input data is correct",
1,
)
return chain
else: # just a chain file, extract the chain we need
# the same as above, but without gzip stage
extract_by_id_cmd = f"./modules/chain_filter_by_id {chain_file} {chain_id}"
try: # also check if output is OK, die otherwise
chain = subprocess.check_output(extract_by_id_cmd, shell=True).decode(
"utf-8"
)
except subprocess.CalledProcessError:
die(
"Error! Process {extract_by_id_cmd} died! Please check if input data is correct",
1,
)
return chain
def range_corrector(g_range):
"""Swap start and end if start > end."""
chrom, start_end = g_range.split(":")
start_end_split = start_end.split("-")
start, end = int(start_end_split[0]), int(start_end_split[1])
if start < end:
return g_range
else:
return f"{chrom}:{end}-{start}"
def chain_cut(chain_str, gene_range, gene_flank, extra_flank=0):
"""Call chain_cut binary.
Project reference gene coordinates to query through a chain.
Also add flanks if shift is > 0.
"""
# need to get genomic region for the gene
# also need to translate python data types to C
# to call the shared library; I do it 2 times here
# for shift = 0 and shifts = 2 (add flanks around gene)
c_chain = ctypes.c_char_p(chain_str.encode())
c_shift_2 = ctypes.c_int(2)
c_shift_0 = ctypes.c_int(0)
granges_num = 1
c_granges_num = ctypes.c_int(granges_num) # we need only one grange to analyze
granges_arr = (ctypes.c_char_p * (granges_num + 1))() # granges_num + 1
granges_bytes = [gene_range.encode("utf-8")]
# need to do this tricks to pass strings array to C
granges_arr[:-1] = granges_bytes
granges_arr[granges_num] = None
raw_ch_conv_s2 = ch_lib.chain_coords_converter(
c_chain, c_shift_2, c_granges_num, granges_arr
)
chain_coords_conv_out_s2 = [] # keep lines here
# convert C output to python-readable type
for i in range(granges_num + 1):
chain_coords_conv_out_s2.append(raw_ch_conv_s2[i].decode("utf-8"))
# chain', 'chr5', '+', '137889395', '148245211', 'chr18', '+', '34409342', '44120958
chain_data = chain_coords_conv_out_s2[0].split(" ")
t_strand = True if chain_data[2] == "+" else False
q_strand = True if chain_data[7] == "+" else False
t_size = int(chain_data[3])
q_size = int(chain_data[8])
# re-define arrays to avoid segfault
c_chain = ctypes.c_char_p(chain_str.encode())
granges_arr = (ctypes.c_char_p * (granges_num + 1))() # granges_num + 1
granges_bytes = [gene_range.encode("utf-8")]
granges_arr[:-1] = granges_bytes
granges_arr[granges_num] = None
raw_ch_conv_s0 = ch_lib.chain_coords_converter(
c_chain, c_shift_0, c_granges_num, granges_arr
)
chain_coords_conv_out_s0 = [] # keep lines here
# convert C output to python-readable type
for i in range(granges_num + 1):
chain_coords_conv_out_s0.append(raw_ch_conv_s0[i].decode("utf-8"))
# another approach to detect range
# sometimes blocks go so far
# ------------------genegene-------------------
# block-------------blockblock------------block
# to avoid very huge query sequences program controls it's size
search_region_shift_str = range_corrector(
chain_coords_conv_out_s2[1].split("\t")[1]
)
search_region_abs_str = range_corrector(chain_coords_conv_out_s0[1].split("\t")[1])
chrom = search_region_shift_str.split(":")[0]
search_reg_shift = [
int(x) for x in search_region_shift_str.split(":")[1].split("-")
]
search_reg_abs = [int(x) for x in search_region_abs_str.split(":")[1].split("-")]
search_reg_flanked = [
search_reg_abs[0] - gene_flank,
search_reg_abs[1] + gene_flank,
]
| |
<gh_stars>1-10
import time
import pkgutil
import json
import ssl
import asyncio
import websockets
import time
from web3.auto import w3
from eth_account.messages import encode_defunct
from web3 import Web3
class WsAPI:
def __init__(self, endpoint="wss://api.dex.blue/ws/v1", network="mainnet", web3Provider="https://mainnet.infura.io/", account=None, delegate=None, autoAuth=True):
self.utils = Utils()
self.methods = Methods(self, self.utils)
self._ws = None
self.account = account
self.delegate = delegate
self.websocketAddress = endpoint
self.network = network
self.web3Provider = web3Provider
self.contractAddress = None
self.listed = None
self.tokensByContract = None
self._callbacks = {}
self._ridCount = 0
self._stop = False
self.connect(account, delegate, autoAuth)
def connect(self, account=None, delegate=None, autoAuth=True):
asyncio.ensure_future(
self._starter(account, delegate, autoAuth))
def close(self):
asyncio.ensure_future(self._close())
def authenticate(self, privateKey, callback=None, *args, **kwargs):
nonce = self.utils.getNonce()
msg = f"Authenticate {nonce}"
message = encode_defunct(text=msg)
signedMessage = w3.eth.account.sign_message(
message, private_key=privateKey)
packet = [{"c": "authenticate",
"message": msg,
"nonce": nonce,
"signature": Web3.toHex(signedMessage["signature"])
}]
if callback:
rid = self._newRID()
self._setCallback(rid, callback, *args, **kwargs)
packet[0]["rid"] = rid
self._sendWrapper(packet)
def authenticateDelegate(self, privateKey, callback=None, *args, **kwargs):
nonce = self.utils.getNonce()
msg = f"Authenticate {nonce}"
message = encode_defunct(text=msg)
signedMessage = w3.eth.account.sign_message(
message, private_key=privateKey)
packet = [{"c": "authenticateDelegate",
"message": msg,
"nonce": nonce,
"signature": Web3.toHex(signedMessage["signature"])
}]
if callback:
rid = self._newRID()
self._setCallback(rid, callback, *args, **kwargs)
packet[0]["rid"] = rid
self._sendWrapper(packet)
def _cbPlaceOrder(self, packet, parameters, callback=None, *args, **kwargs):
self.placeOrder(parameters, callback, *args, **kwargs)
def placeOrder(self, parameters, callback=None, *args, **kwargs):
if not self.contractAddress:
self.utils.error("Contract Adress not set. Not connected yet?")
if not self.listed:
self.methods.getListed(None, self._cbPlaceOrder, parameters, callback)
return
if "market" in parameters:
cm = parameters["market"] = self.listed["listed"]["markets"][parameters["market"]]
if not cm:
self.utils.error("Unkown Token")
else:
if not "buyToken" in parameters or not "sellToken" in parameters:
self.utils.error("Please provide either the market or the buyToken and sellToken parameters")
if parameters["buyToken"] in self.tokensByContract:
buyToken = self.tokensByContract[parameters["buyToken"]]
elif parameters["buyToken"] in self.listed["listed"]["tokens"]:
buyToken = self.listed["listed"]["tokens"][parameters["buyToken"]]
else:
self.utils.error("Unknown buy token")
if parameters["sellToken"] in self.tokensByContract:
sellToken = self.tokensByContract[parameters["sellToken"]]
elif parameters["sellToken"] in self.listed["listed"]["tokens"]:
sellToken = self.listed["listed"]["tokens"][parameters["sellToken"]]
else:
self.utils.error("Unknown sell token")
if not buyToken or not sellToken:
self.utils.error("Unknown token")
parameters["sellToken"] = sellToken["contract"]
parameters["buyToken"] = buyToken["contract"]
buy = buyToken["symbol"] + sellToken["symbol"] in self.listed["listed"]["markets"]
sell = sellToken["symbol"] + buyToken["symbol"] in self.listed["listed"]["markets"]
if buy:
parameters["market"] = self.listed["listed"]["markets"][buyToken["symbol"] + sellToken["symbol"]]
parameters["direction"] = "buy"
elif sell:
parameters["market"] = self.listed["listed"]["markets"][sellToken["symbol"] + buyToken["symbol"]]
parameters["direction"] = "sell"
else:
self.utils.error("Unknown market")
if "amount" in parameters:
parameters["amount"] = parameters["amount"] * pow(10, self.listed["listed"]["tokens"][parameters["market"]["traded"]]["decimals"])
if not "direction" in parameters:
if "side" in parameters:
parameters["direction"] = parameters["side"]
elif "amount" in parameters:
parameters["direction"] = "buy" if parameters["amount"] > 0 else "sell"
if "amount" in parameters and parameters["amount"] < 0:
parameters["amount"] = parameters["amount"] * -1
if not "buyToken" in parameters or not "sellToken" in parameters:
if parameters["direction"] == "buy":
parameters["buyToken"] = self.listed["listed"]["tokens"][parameters["market"]["traded"]]["contract"]
parameters["sellToken"] = self.listed["listed"]["tokens"][parameters["market"]["quote"]]["contract"]
else:
parameters["buyToken"] = self.listed["listed"]["tokens"][parameters["market"]["quote"]]["contract"]
parameters["sellToken"] = self.listed["listed"]["tokens"][parameters["market"]["traded"]]["contract"]
if not "buyAmount" in parameters or not "sellAmount" in parameters:
if not "amount" in parameters or not "rate" in parameters:
self.utils.error("Please the amount and rate or buyAmount and sellAmount parameters")
if parameters["direction"] == "buy":
parameters["buyAmount"] = parameters["amount"]
parameters["sellAmount"] = parameters["amount"] / pow(10, self.listed["listed"]["tokens"][parameters["market"]["traded"]]["decimals"]) * parameters["rate"] * pow(10, self.listed["listed"]["tokens"][parameters["market"]["quote"]]["decimals"])
else:
parameters["sellAmount"] = parameters["amount"]
parameters["buyAmount"] = parameters["amount"] / pow(10, self.listed["listed"]["tokens"][parameters["market"]["traded"]]["decimals"]) * parameters["rate"] * pow(10, self.listed["listed"]["tokens"][parameters["market"]["quote"]]["decimals"])
parameters["nonce"] = self.utils.getNonce()
parameters["buyAmount"] = str(int(parameters["buyAmount"]))
parameters["sellAmount"] = str(int(parameters["sellAmount"]))
parameters["market"] = parameters["market"]["symbol"]
try:
del parameters["amount"]
except:
pass
try:
del parameters["direction"]
except:
pass
try:
del parameters["rate"]
except:
pass
try:
del parameters["side"]
except:
pass
if not "expiry" in parameters:
parameters["expiry"] = int(time.time()) + 5184000 # default 3 months
msg = {}
if not "signature" in parameters and (self.account or self.delegate):
privateKey = ""
if self.account != None:
privateKey = self.account
elif self.delegate != None:
privateKey = self.delegate
orderHash = self.utils.hashOrder(
parameters["sellToken"],
int(parameters["sellAmount"]),
parameters["buyToken"],
int(parameters["buyAmount"]),
parameters["expiry"],
parameters["nonce"],
self.contractAddress
)
message = encode_defunct(hexstr=orderHash)
signedMessage = w3.eth.account.sign_message(
message, private_key=privateKey)
for param in parameters:
msg[param] = parameters[param]
msg["signature"] = Web3.toHex(signedMessage["signature"])
msg["signatureFormat"] = "sign"
self.utils.validateClientMethods("placeOrder", msg)
msg["c"] = "placeOrder"
packet = [msg]
if callback:
rid = self._newRID()
self._setCallback(rid, callback, *args, **kwargs)
packet[0]["rid"] = rid
self._sendWrapper(packet)
def on(self, event, callback, *args, **kwargs):
if self.utils.validateServerEventName(event):
self._setCallback(event, callback, args, kwargs)
def clear(self, event):
if self._hasCallback(event):
self._removeCallback(event)
def once(self, event, callback, *args, **kwargs):
self.utils.validateServerEventName(event)
# set user callback and _removeCallback
callbacks = [{"cb": callback, "args": args, "kwargs": kwargs}, {
"cb": lambda msg, event: self.clear(event), "args": (event, ), "kwargs": {}}]
self._setCallback(event, callbacks)
def _setContractAdress(self, connectMessage):
if isinstance(connectMessage, dict):
if "config" in connectMessage:
self.contractAddress = connectMessage["config"]["contractAddress"]
def _setNetworkFromConfig(self, connectMessage):
if isinstance(connectMessage, dict):
if "config" in connectMessage:
self.network = connectMessage["config"]["network"]
def _setListed(self, listedMessage):
if isinstance(listedMessage, dict):
if "listed" in listedMessage:
self.listed = listedMessage
self.tokensByContract = {}
for symbol in listedMessage["listed"]["tokens"]:
token = listedMessage["listed"]["tokens"][symbol]
token["symbol"] = symbol
self.tokensByContract[token["contract"]] = token
for symbol in self.listed["listed"]["markets"]:
self.listed["listed"]["markets"][symbol]["symbol"] = symbol
async def _starter(self, account=None, delegate=None, autoAuth=True):
self._stop = False # when true this this var stops the listener
self._ridCount = 0
connectTask = asyncio.ensure_future(
self._connect(account, delegate, autoAuth))
self._listenerTask = asyncio.ensure_future(
self._listener())
done, pending = await asyncio.wait(
[connectTask, self._listenerTask],
return_when=asyncio.ALL_COMPLETED,)
async def _listener(self):
while not self._isCon():
await asyncio.sleep(0.01)
while self._isCon() and not self._stop:
packet = await self._recv()
asyncio.ensure_future(self._packetHandler(packet))
def _isCon(self):
if not self._ws or self._ws.closed:
return False
return True
def _newRID(self):
self._ridCount += 1
return self._ridCount
def _removeCallback(self, identifier):
self._callbacks.pop(identifier)
def _setCallback(self, identifier, callback, *args, **kwargs):
callbacks = []
if isinstance(callback, list):
callbacks += callback
else:
callbacks.append({"cb": callback, "args": args, "kwargs": kwargs})
if self._hasCallback(identifier):
cb = self._getCallback(identifier)
callbacks += cb
self._callbacks[str(identifier)] = callbacks
def _getCallback(self, identifier):
if not identifier == None and identifier in self._callbacks:
return self._callbacks[identifier]
def _hasCallback(self, identifier):
if identifier in self._callbacks:
return True
return False
def _callback(self, identifier, data):
callbacks = self._getCallback(str(identifier))
for obj in callbacks:
kwargs = obj["kwargs"]
args = obj["args"]
if isinstance(args, tuple) and len(args) > 0:
if isinstance(args[0], tuple):
args = obj["args"][0]
if (not args and not kwargs) or ((args == ((), {}) or args == (((), {}), {})) and not kwargs):
obj["cb"](data)
elif args and kwargs:
obj["cb"](data, *args, **kwargs)
elif args and not kwargs:
obj["cb"](data, *args)
else:
obj["cb"](data, **kwargs)
def _callbackWrapper(self, packet, identifier, message):
# throws the packet away and just calls the callback
if self._hasCallback(identifier):
self._callback(identifier, message)
def _throwIfNoCon(self):
if not self._isCon():
self.utils.error("WebSocket isn't connected.")
async def _connect(self, account=None, delegate=None, autoAuth=True):
self.on("listed", self._setListed)
callbacks = [{"cb": self._setContractAdress, "args": (), "kwargs": {}}, {
"cb": self._setNetworkFromConfig, "args": (), "kwargs": {}}]
self._setCallback("config", callbacks)
self._ws = await websockets.connect(self.websocketAddress, ssl=ssl._create_unverified_context())
if autoAuth and (account or delegate):
if account:
self.authenticate(account,
self._callbackWrapper, "wsOpen", None)
else:
self.authenticateDelegate(delegate,
self._callbackWrapper, "wsOpen", None)
else:
self._callback("wsOpen", None)
def _sendWrapper(self, msg):
asyncio.ensure_future(self._send(msg))
async def _send(self, msg):
jsonDump = json.dumps(msg)
if self._hasCallback("wsSend"):
self._callback("wsSend", msg)
if not self._isCon() or self._stop:
if self._hasCallback("wsClose"):
self._callback("wsClose", msg)
await self._ws.send(jsonDump)
async def _recv(self):
try:
packet = await self._ws.recv()
if self._hasCallback("message"):
self._callback("message", packet)
except Exception as e:
print(f"Couldn't receive message because of {e}")
if "connection is closed" in str(e) and self._hasCallback("wsClose"):
self._callback("wsClose", "died on recv")
return
return packet
async def _close(self):
self._ws.close()
self._stop = True
print("Connection closed by API!")
async def _packetHandler(self, packet):
if packet == "[]" or not packet:
return
parsedPacket = self._parsePacket(packet)
for message in parsedPacket:
eventId = str(message[1])
event = self.utils.matchServerEvent(eventId)
if len(message) == 4:
rid = str(message[3])
if self._hasCallback(rid):
parsedServerEvent = {"chan": message[0], event: self.utils.parseServerPacket(
self.utils.serverEvents["events"][event], message[2]), "packet": message}
self._callback(rid, parsedServerEvent)
eventId = message[1]
if event and self._hasCallback(event):
parsedServerEvent = {"chan": message[0], event: self.utils.parseServerPacket(
self.utils.serverEvents["events"][event], message[2]), "packet": message}
self._callback(event, parsedServerEvent)
def _parsePacket(self, packet):
packet = json.loads(packet)
if not isinstance(packet, list):
self.utils.error("Received Package was malformed!")
return packet
class Methods:
def __init__(self, api, utils):
self.api = api
self.utils = utils
self._setMethods()
def _make_method(self, name):
def _method(parameters=None, callback=None, *args, **kwargs):
packet = [{"c": name}]
if parameters and isinstance(parameters, dict):
self.utils.validateClientMethods(name, parameters)
for param in parameters:
packet[0][param] = parameters[param]
if callback:
rid = self.api._newRID()
self.api._setCallback(rid, callback, *args, **kwargs)
packet[0]["rid"] = rid
self.api._sendWrapper(packet)
return _method
def _setMethods(self):
for name in self.utils.clientMethods:
_method = self._make_method(name)
setattr(self, name, _method)
class Utils:
def __init__(self):
self.clientMethods = self._readJsonConfig("config/clientMethods.json")
self.serverEvents = self._readJsonConfig("config/serverEvents.json")
@staticmethod
def _readJsonConfig(jsonFile):
return json.loads(pkgutil.get_data(__package__, jsonFile))
def validateClientMethods(self, method, parameters):
if not method in self.clientMethods:
self.error(f"Method doesn't exist: {method}")
if "c" in parameters:
del parameters["c"]
methodObj = self.clientMethods[method]
if not parameters:
if methodObj == {}:
return True
for attr in methodObj:
if not "optional" in attr:
self.error(
f"Parameter '{attr}' in | |
wish to replace this file please first delete it.')
else:
self.ready = False
self.logger.error('Unable to init_directory_substitutions_yaml. ' +
'names: {}'.format(names))
else:
self.ready = False
self.logger.error('Unable to init_directory_substitutions_yaml. ' +
'self.filepaths: {}'.format(self.filepaths))
def init_filename_substitutions_yaml(self,
filename='filename_substitutions_datamodel_init.yaml'):
'''Initialize a yaml file used to create a filename/text
substitution dictionary.'''
if self.ready:
try:
intros = (Intro.query
.filter(Intro.heading_title.ilike('%Naming Convention%'))
.all()
)
except: intros = None
if intros:
yaml_str = str()
for intro in intros:
if self.ready:
file = (datamodel_File.load(file_id = intro.file_id)
if intro else None)
location = (Location.load(location_id = file.location_id)
if file else None)
env = (Env.load(env_id = location.env_id)
if location else None)
intro_description = intro.description if intro else None
file_name = file.name if file else None
location_path = location.path if location else None
env_variable = env.variable if env else None
self.ready = env_variable and file_name # location_path can be empty
if self.ready:
path = (join(env_variable,location_path,file_name)
if location_path else
join(env_variable,file_name)
)
if intro_description is not None:
yaml_str += ('{0}: "{1}"\n'
.format(path,intro_description.replace('\n',str())))
if self.ready:
# write yaml file
self.util.set_yaml_dir()
yaml_file = (join(self.util.yaml_dir,filename)
if self.util.yaml_dir else None)
# # DEBUG #
# with open(yaml_file,'w') as file:
# file.write(yaml_str)
# # DEBUG #
if not exists(yaml_file):
with open(yaml_file,'w') as file:
file.write(yaml_str)
else:
self.ready = False
self.logger.error(
'Unable to init_filename_substitutions_yaml. ' +
'The file already exists: {}. '.format(yaml_file) +
'If you wish to replace this file please first delete it.')
else:
self.ready = False
self.logger.error('Unable to init_filename_substitutions_yaml. ' +
'intros: {}'.format(intros))
def init_filename_search_strings_yaml(self,
filename='filename_search_strings_init.yaml'):
'''Initialize a yaml file used to create a filename search string
dictionary.'''
if self.ready:
filespec = Filespec(logger=self.logger,options=self.options)
if self.filepaths and filespec and filespec.ready:
yaml_str = str()
for filepath in self.filepaths:
if self.ready:
print('filepath: %r' % filepath)
filespec.set_substitution_filename(datamodel_filepath=filepath)
filespec.set_substitution_filename_search_string()
self.ready = filespec.ready
if self.ready:
search_string = (filespec.substitution_filename_search_string
if self.ready else None)
yaml_str += ('{0}: "{1}"\n'
.format(filepath,search_string))
if self.ready:
# write yaml file
self.util.set_yaml_dir()
yaml_file = (join(self.util.yaml_dir,filename)
if self.util.yaml_dir else None)
# # DEBUG #
# with open(yaml_file,'w') as file:
# file.write(yaml_str)
# # DEBUG #
if not exists(yaml_file):
with open(yaml_file,'w') as file:
file.write(yaml_str)
else:
self.ready = False
self.logger.error(
'Unable to init_directory_substitutions_yaml. ' +
'The file already exists: {}. '.format(yaml_file) +
'If you wish to replace this file please first delete it.')
else:
self.ready = False
self.logger.error('Unable to init_filename_search_strings_yaml. ' +
'self.filepaths: {}, '.format(self.filepaths) +
'filespec: {}, '.format(filespec)
)
def populate_filespec_table_yaml(self):
'''Populate the filespec table row for each dict in filespec_dicts.'''
if self.ready:
filespec_dicts = (self.filespec_dict_yaml['datamodels']
if self.filespec_dict_yaml
and 'datamodels' in self.filespec_dict_yaml
else None)
for self.filespec_dict in filespec_dicts:
self.populate_filespec_table()
def populate_filespec_table(self):
'''Populate Database.filespec table from self.filespec_dict'''
if self.ready:
if self.filespec_dict:
self.set_filespec_tree_id()
self.database.set_file_id(tree_edition = self.tree_edition,
env_variable = self.env_variable,
location_path = self.location_path,
file_name = self.file_name)
self.ready = self.ready and self.database.ready
if self.ready:
self.database.set_filespec_columns(
tree_id = self.filespec_tree_id,
env_label = self.filespec_dict['env_label'],
location = self.filespec_dict['location'],
name = self.filespec_dict['name'],
ext = self.filespec_dict['ext'],
path_example = self.filespec_dict['path_example'],
note = self.filespec_dict['note'],
)
self.database.populate_filespec_table()
self.ready = self.database.ready
else:
self.ready = False
self.logger.error('Unable to populate_filespec_table_yaml. ' +
'self.filespec_dict: {}, '.format(self.filespec_dict)
)
def set_filespec_tree_id(self):
'''Set filespec_tree_id from '''
self.filespec_tree_id = None
if self.ready:
if self.filespec_dict:
# split_path and populate_file_path_tables if necessary
path = (self.filespec_dict['path']
if self.filespec_dict and 'path' in self.filespec_dict
else None)
self.set_path(path=path)
self.split_path()
self.populate_file_path_tables()
if self.ready:
filespec_tree_edition = (self.filespec_dict['tree_edition']
if self.filespec_dict
and 'tree_edition' in self.filespec_dict
else None)
self.populate_tree_table(tree_edition=filespec_tree_edition)
self.database.set_tree_id(tree_edition=filespec_tree_edition)
self.filespec_tree_id = (self.database.tree_id
if self.database.ready else None)
else:
self.ready = False
self.logger.error('Unable to set_filespec_tree_id. ' +
'self.filespec_dict: {}, '.format(self.filespec_dict)
)
if not self.filespec_tree_id:
self.ready = False
self.logger.error('Unable to set_filespec_tree_id. ' +
'self.filespec_tree_id: {}, '
.format(self.filespec_tree_id)
)
def set_tree_edition(self):
'''Set the datamodel edition.'''
self.tree_edition = None
if self.ready:
if not self.datamodel_dir: self.set_datamodel_dir()
if self.ready:
self.tree_edition = (self.datamodel_dir.split('/')[-1]
if self.datamodel_dir else None)
if not self.tree_edition:
self.ready = False
self.logger.error('Unable to set_tree_edition. ' +
'self.tree_edition: {}'
.format(self.tree_edition))
def set_datamodel_dir(self):
'''Set the DATAMODEL_DIR file path on cypher.'''
self.datamodel_dir = None
if self.ready:
try: self.datamodel_dir = environ['DATAMODEL_DIR']
except:
self.ready = False
self.logger.error(
'Unable to populate_tree_table from the ' +
'environmental variable DATAMODEL_DIR. ' +
'Try loading a datamodel module file.')
def set_path(self, path=None):
self.path = None
if self.ready:
self.path = path if path else None
if not self.path: self.logger.error('Unable to set_path.')
def split_path(self,path=None):
'''Extract information from the given file path.'''
if self.ready:
path = path if path else self.path
if path:
split = path.split('/')
self.env_variable = split[0] if split else None
self.file_name = split[-1] if split else None
self.location_path = '/'.join(split[1:-1]) if split else None
self.directory_names = list()
self.directory_depths = list()
for name in split[1:-1]:
self.directory_names.append(name)
self.directory_depths.append(self.directory_names.index(name))
else:
self.ready = False
self.logger.error('Unable to split_path. ' +
'path: {}'.format(path))
if not (
self.env_variable and
self.file_name
#self.location_path # can be None
#self.directory_names # can be None
#self.directory_depths # can be None
):
self.ready = False
self.logger.error(
'Unable to split_path. ' +
'self.env_variable: {}, ' .format(self.env_variable) +
'self.file_name: {}, ' .format(self.file_name)
)
def set_filepaths(self):
'''Set a list of all files for the current tree edition.'''
self.filepaths = list()
if self.ready:
self.set_file_path_skip_list()
if not self.datamodel_dir:
self.set_datamodel_dir()
if self.ready:
root_dir = join(self.datamodel_dir,'datamodel/files/')
for (dirpath, dirnames, filenames) in walk(root_dir):
if filenames:
for filename in filenames:
if (filename.endswith('.html') and
'readme' not in filename.lower()
):
file = join(dirpath,filename)
if exists(file):
filepath = file.replace(root_dir,str())
if (filepath and
filepath not in self.file_path_skip_list
):
self.filepaths.append(filepath)
else:
self.ready = False
self.logger.error('File does not exist: '
'file: {}'.format(file))
else: pass # it's okay to have empty filenames
else:
self.ready = False
self.logger.error('Unable to set_filepaths. ' +
'self.datamodel_dir: {}'.format(self.datamodel_dir))
if not self.filepaths:
self.ready = False
self.logger.error('Unable to set_filepaths. ' +
'self.filepaths: {}'.format(self.filepaths))
def set_file_path_skip_list(self):
'''Set a list of file paths that don't conform to the database schema.'''
self.file_path_skip_list = []
# self.file_path_skip_list = [
# 'MANGA_SPECTRO_REDUX/DRPVER/PLATE4/MJD5/mgFrame.html',
# 'MANGA_PIPE3D/MANGADRP_VER/PIPE3D_VER/PLATE/manga.Pipe3D.cube.html',
# 'PHOTO_REDUX/RERUN/RUN/objcs/CAMCOL/fpC.html',
# 'PHOTO_REDUX/RERUN/RUN/astrom/asTrans.html',
# 'BOSS_PHOTOOBJ/photoz-weight/pofz.html',
# ]
def set_svn_products(self,root_dir=None):
'''Set a list of directories containing the subdirectories:
branches, tags, and trunk'''
if self.ready:
if root_dir and self.svn_products is not None:
command = ['svn','list',root_dir]
(stdout,stderr,proc_returncode) = self.util.execute_command(command)
self.logger.info('Traversing directory: %r' % root_dir)
if proc_returncode == 0:
basenames = ([d.replace('/',str())
for d in str(stdout.decode("utf-8")).split('\n')
if d and d.endswith('/')]
if stdout else None)
if basenames:
# if {'branches','tags','trunk'}.issubset(set(basenames)):
if 'trunk' in basenames:
self.svn_products.append(root_dir)
root_dir = dirname(root_dir)
else:
for basename in basenames:
if self.ready:
sub_dir = join(root_dir,basename)
self.set_svn_products(root_dir=sub_dir)
else:
self.ready = False
self.logger.error(
'Unable to get_svn_products. ' +
'An error has occured while executing the command, ' +
'command: {}, '.format(command) +
'proc_returncode: {}, '.format(proc_returncode)
)
else:
self.ready = False
self.logger.error(
'Unable to get_svn_products. ' +
'root_dir: {}'.format(root_dir) +
'self.svn_product: {}'.format(self.svn_product)
)
def get_column_tag(self):
'''Populate tables comprised of file HTML text information.'''
if self.ready:
self.set_file_body()
self.file.get_column_tag()
def get_db_column_tags(self):
'''Populate tables comprised of file HTML text information.'''
if self.ready:
self.set_file_body()
self.file.get_db_column_tags()
def get_db_keyword_tags(self):
'''Populate tables comprised of file HTML text information.'''
if self.ready:
self.set_file_body()
self.file.get_db_keyword_tags()
def set_file_body(self):
if self.ready:
self.set_file_path_info()
self.set_html_text()
self.set_file()
if self.file_path_info and self.html_text and self.file:
self.file.set_file_path_info(file_path_info=self.file_path_info)
self.file.set_html_text(html_text=self.html_text)
self.file.set_body()
else:
self.ready = False
self.logger.error(
'Unable to populate_file_html_tables. ' +
'self.file_path_info: {}, '.format(self.file_path_info) +
'self.html_text: {}, '.format(self.html_text) +
'self.file: {}.'.format(self.file) )
def populate_database(self):
'''Populate the database with file information.'''
if self.ready:
if self.ready: self.populate_file_path_tables()
if self.ready: self.populate_file_html_tables()
self.ready = self.ready and self.database.ready and self.file.ready
if self.ready:
intro_type = self.file.intro_type
file_type = self.file.file_type
self.database.update_file_table_status(ready=self.ready,
intro_type=intro_type,
file_type=file_type)
def populate_file_path_tables(self):
'''Populate tables comprised of file path information.'''
if self.ready:
self.populate_tree_table()
self.populate_env_table()
self.populate_location_table()
self.populate_directory_table()
self.populate_file_table()
def populate_tree_table(self,tree_edition=None):
'''Populate the tree table.'''
if self.ready:
tree_edition = tree_edition if tree_edition else self.tree_edition
if self.database and tree_edition:
self.database.set_tree_columns(edition=tree_edition)
self.database.populate_tree_table()
self.ready = self.database.ready
else:
self.ready = False
self.logger.error(
'Unable to populate_tree_table. ' +
'self.database: {}.'.format(self.database) +
'tree_edition: {}.'.format(tree_edition))
def populate_env_table(self):
'''Populate the env table.'''
if self.ready:
if self.database and self.tree_edition and self.env_variable:
self.database.set_tree_id(tree_edition=self.tree_edition)
self.database.set_env_columns(variable=self.env_variable)
self.database.populate_env_table()
self.ready = self.database.ready
else:
self.ready = False
self.logger.error(
'Unable to populate_env_table. ' +
'self.database: {}, '.format(self.database) +
'self.env_variable: {}.' .format(self.env_variable))
def populate_location_table(self):
'''Populate the location table.'''
if self.ready:
if (self.database and
self.tree_edition and
self.env_variable
# self.location_path # can be None
):
self.database.set_env_id(tree_edition = self.tree_edition,
env_variable = self.env_variable)
self.database.set_location_columns(path = self.location_path)
self.database.populate_location_table()
self.ready = self.database.ready
else:
self.ready = False
self.logger.error(
'Unable to populate_location_table. ' +
'self.tree_edition: {}, '.format(self.tree_edition) +
'self.env_variable: {}, '.format(self.env_variable)
)
def populate_directory_table(self):
'''Populate the | |
i, 'objective with modified lamination parameter weightings'] \
= np.NaN
# Inhomogeneity factor
table_result.loc[i, 'target inhomogeneity factor'] = \
np.linalg.norm(lampam_target[0:4] - lampam_target[8:12])
# objectives
for k in range(parameters.n_outer_step):
table_result.loc[i, f'objective iteration {k+1}'] = np.NaN
# lampam_target - lampamRetrieved
table_result.loc[i, 'error1 = abs(lampam_target[1]-lampam[1])'] \
= np.NaN
table_result.loc[i, 'error2'] = np.NaN
table_result.loc[i, 'error3'] = np.NaN
table_result.loc[i, 'error4'] = np.NaN
table_result.loc[i, 'error5'] = np.NaN
table_result.loc[i, 'error6'] = np.NaN
table_result.loc[i, 'error7'] = np.NaN
table_result.loc[i, 'error8'] = np.NaN
table_result.loc[i, 'error9'] = np.NaN
table_result.loc[i, 'error10'] = np.NaN
table_result.loc[i, 'error11'] = np.NaN
table_result.loc[i, 'error12'] = np.NaN
# lampam_target
table_result.loc[i, 'lampam_target[1]'] = lampam_target[0]
table_result.loc[i, 'lampam_target[2]'] = lampam_target[1]
table_result.loc[i, 'lampam_target[3]'] = lampam_target[2]
table_result.loc[i, 'lampam_target[4]'] = lampam_target[3]
table_result.loc[i, 'lampam_target[5]'] = lampam_target[4]
table_result.loc[i, 'lampam_target[6]'] = lampam_target[5]
table_result.loc[i, 'lampam_target[7]'] = lampam_target[6]
table_result.loc[i, 'lampam_target[8]'] = lampam_target[7]
table_result.loc[i, 'lampam_target[9]'] = lampam_target[8]
table_result.loc[i, 'lampam_target[10]'] = lampam_target[9]
table_result.loc[i, 'lampam_target[11]'] = lampam_target[10]
table_result.loc[i, 'lampam_target[12]'] = lampam_target[11]
# Retrieved stacking sequence at step 1
table_result.loc[i, 'ss retrieved at step 1'] = np.NaN
# Retrieved stacking sequence
table_result.loc[i, 'ss retrieved'] = np.NaN
# Target stacking sequence
ss_flatten = np.array(ss_target, dtype=str)
#ss_flatten = ' '.join(ss_flatten)
table_result.loc[i, 'ss target'] = ss_flatten
# # Ply counts
# table_result.loc[i, 'N0_target'] = N0_Target
# table_result.loc[i, 'N90_target'] = N90_Target
# table_result.loc[i, 'N45_target'] = N45_Target
# table_result.loc[i, 'N-45_target'] = N135_Target
# table_result.loc[i, 'N0 - N0_target'] = np.NaN
# table_result.loc[i, 'N90 - N90_target'] = np.NaN
# table_result.loc[i, 'N45 - N45_target'] = np.NaN
# table_result.loc[i, 'N-45 - N-45_target'] = np.NaN
# table_result.loc[i, 'penalty value for the 10% rule'] = np.NaN
for ind in range(n_outer_step):
# numbers of stacks at the last level of the last group search
table_result.loc[i, 'n_designs_last_level ' + str(ind + 1)] \
= np.NaN
# numbers of repaired stacks at the last group search
table_result.loc[i, 'n_designs_repaired ' + str(ind + 1)] \
= np.NaN
# numbers of unique repaired stacks at the last group search
table_result.loc[i, 'n_designs_repaired_unique ' + str(ind + 1)] \
= np.NaN
# in-plane orthotropy
table_result.loc[i, 'In-plane orthotropy parameter 1'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 2'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 3'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 4'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 5'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 6'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 7'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 8'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 9'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 10'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 11'] = np.NaN
table_result.loc[i, 'In-plane orthotropy parameter 12'] = np.NaN
table_result.loc[i, 'diff A11 percentage'] = np.NaN
table_result.loc[i, 'diff A22 percentage'] = np.NaN
table_result.loc[i, 'diff A12 percentage'] = np.NaN
table_result.loc[i, 'diff A66 percentage'] = np.NaN
table_result.loc[i, 'diff A16 percentage'] = np.NaN
table_result.loc[i, 'diff A26 percentage'] = np.NaN
table_result.loc[i, 'diff B11 percentage'] = np.NaN
table_result.loc[i, 'diff B22 percentage'] = np.NaN
table_result.loc[i, 'diff B12 percentage'] = np.NaN
table_result.loc[i, 'diff B66 percentage'] = np.NaN
table_result.loc[i, 'diff B16 percentage'] = np.NaN
table_result.loc[i, 'diff B26 percentage'] = np.NaN
table_result.loc[i, 'diff D11 percentage'] = np.NaN
table_result.loc[i, 'diff D22 percentage'] = np.NaN
table_result.loc[i, 'diff D12 percentage'] = np.NaN
table_result.loc[i, 'diff D66 percentage'] = np.NaN
table_result.loc[i, 'diff D16 percentage'] = np.NaN
table_result.loc[i, 'diff D26 percentage'] = np.NaN
else:
print('Time', elapsed1)
print('objective with modified lamination parameter weightings',
result.objective)
# Laminate ply count
table_result.loc[i, 'Ply count'] = n_plies_lam
# number of the outer loop with the best results
table_result.loc[i, 'best outer loop'] \
= result.n_outer_step_best_solution
# Computational time in s
table_result.loc[i, 'time (s)'] = elapsed1
# # Number of objective function evaluations
# table_result.loc[i, 'Number of objective function evaluations'] \
# = " ".join(result.n_obj_func_calls_tab.astype(str))
# Number of iterations
table_result.loc[i, 'n_outer_step_performed'] \
= result.number_of_outer_steps_performed
# objective
table_result.loc[
i, 'objective with initial lamination parameter weightings'] \
= objectives(
lampam=result.lampam,
targets=targets,
lampam_weightings=parameters.lampam_weightings_ini,
constraints=constraints,
parameters=parameters)
table_result.loc[
i, 'objective with modified lamination parameter weightings'] \
= result.objective
# Inhomogeneity factor
table_result.loc[i, 'target inhomogeneity factor'] \
= np.linalg.norm(lampam_target[0:4] - lampam_target[8:12])
# objectives
for k in range(parameters.n_outer_step):
table_result.loc[
i, f'objective iteration {k+1}'] = result.obj_tab[k]
# lampam_target - lampamRetrieved
table_result.loc[i, 'error1 = abs(lampam_target[1]-lampam[1])'] \
= abs(lampam_target[0] - result.lampam[0])
table_result.loc[i, 'error2'] = abs(
lampam_target[1] - result.lampam[1])
table_result.loc[i, 'error3'] = abs(
lampam_target[2]- result.lampam[2])
table_result.loc[i, 'error4'] = abs(
lampam_target[3]- result.lampam[3])
table_result.loc[i, 'error5'] = abs(
lampam_target[4]- result.lampam[4])
table_result.loc[i, 'error6'] = abs(
lampam_target[5]- result.lampam[5])
table_result.loc[i, 'error7'] = abs(
lampam_target[6]- result.lampam[6])
table_result.loc[i, 'error8'] = abs(
lampam_target[7]- result.lampam[7])
table_result.loc[i, 'error9'] = abs(
lampam_target[8]- result.lampam[8])
table_result.loc[i, 'error10'] = abs(
lampam_target[9]- result.lampam[9])
table_result.loc[i, 'error11'] = abs(
lampam_target[10]- result.lampam[10])
table_result.loc[i, 'error12'] = abs(
lampam_target[11]- result.lampam[11])
# lampam_target
table_result.loc[i, 'lampam_target[1]'] = lampam_target[0]
table_result.loc[i, 'lampam_target[2]'] = lampam_target[1]
table_result.loc[i, 'lampam_target[3]'] = lampam_target[2]
table_result.loc[i, 'lampam_target[4]'] = lampam_target[3]
table_result.loc[i, 'lampam_target[5]'] = lampam_target[4]
table_result.loc[i, 'lampam_target[6]'] = lampam_target[5]
table_result.loc[i, 'lampam_target[7]'] = lampam_target[6]
table_result.loc[i, 'lampam_target[8]'] = lampam_target[7]
table_result.loc[i, 'lampam_target[9]'] = lampam_target[8]
table_result.loc[i, 'lampam_target[10]'] = lampam_target[9]
table_result.loc[i, 'lampam_target[11]'] = lampam_target[10]
table_result.loc[i, 'lampam_target[12]'] = lampam_target[11]
# Retrieved stacking sequence at step 1
ss_flatten = np.array(result.ss_tab[0], dtype=str)
ss_flatten = ' '.join(ss_flatten)
table_result.loc[i, 'ss retrieved at step 1'] = ss_flatten
# Retrieved stacking sequence
ss_flatten = np.array(result.ss, dtype=str)
ss_flatten = ' '.join(ss_flatten)
table_result.loc[i, 'ss retrieved'] = ss_flatten
# Target stacking sequence
ss_flatten = np.array(ss_target, dtype=str)
ss_flatten = ' '.join(ss_flatten)
table_result.loc[i, 'ss target'] = ss_flatten
# # Ply counts
# table_result.loc[i, 'N0_target'] = N0_Target
# table_result.loc[i, 'N90_target'] = N90_Target
# table_result.loc[i, 'N45_target'] = N45_Target
# table_result.loc[i, 'N-45_target'] = N135_Target
# N0 = sum(result.ss == 0)
# N90 = sum(result.ss == 90)
# N45 = sum(result.ss == 45)
# N135 = sum(result.ss == -45)
# table_result.loc[i, 'N0 - N0_target'] = N0 - N0_Target
# table_result.loc[i, 'N90 - N90_target'] = N90 - N90_Target
# table_result.loc[i, 'N45 - N45_target'] = N45 - N45_Target
# table_result.loc[i, 'N-45 - N-45_target'] = N135 - N135_Target
# table_result.loc[i, 'penalty value for the 10% rule'] \
# = calc_penalty_10_ss(result.ss, constraints)
for ind in range(n_outer_step):
# numbers of stacks at the last level of the last group search
table_result.loc[i, 'n_designs_last_level ' + str(ind + 1)] \
= result.n_designs_last_level_tab[ind]
# numbers of repaired stacks at the last group search
table_result.loc[i, 'n_designs_repaired ' + str(ind + 1)] \
= result.n_designs_repaired_tab[ind]
# numbers of unique repaired stacks at the last group search
table_result.loc[i, 'n_designs_repaired_unique ' + str(ind + 1)] \
= result.n_designs_repaired_unique_tab[ind]
# in-plane orthotropy
ipo_now = ipo_param_1_12(result.lampam, mat_prop, constraints.sym)
table_result.loc[i, 'In-plane orthotropy parameter 1'] = ipo_now[0]
table_result.loc[i, 'In-plane orthotropy parameter 2'] = ipo_now[1]
table_result.loc[i, 'In-plane orthotropy parameter 3'] = ipo_now[2]
table_result.loc[i, 'In-plane orthotropy parameter 4'] = ipo_now[3]
table_result.loc[i, 'In-plane orthotropy parameter 5'] = ipo_now[4]
table_result.loc[i, 'In-plane orthotropy parameter 6'] = ipo_now[5]
table_result.loc[i, 'In-plane orthotropy parameter 7'] = ipo_now[6]
table_result.loc[i, 'In-plane orthotropy parameter 8'] = ipo_now[7]
table_result.loc[i, 'In-plane orthotropy parameter 9'] = ipo_now[8]
table_result.loc[i, 'In-plane orthotropy parameter 10'] = ipo_now[9]
table_result.loc[i, 'In-plane orthotropy parameter 11'] = ipo_now[10]
table_result.loc[i, 'In-plane orthotropy parameter 12'] = ipo_now[11]
A = A_from_lampam(result.lampam, mat_prop)
A11 = A[0, 0]
A22 = A[1, 1]
A12 = A[0, 1]
A66 = A[2, 2]
A16 = A[0, 2]
A26 = A[1, 2]
B = B_from_lampam(result.lampam, mat_prop)
B11 = B[0, 0]
B22 = B[1, 1]
B12 = B[0, 1]
B66 = B[2, 2]
B16 = B[0, 2]
B26 = B[1, 2]
D = D_from_lampam(result.lampam, mat_prop)
D11 = D[0, 0]
D22 = D[1, 1]
D12 = D[0, 1]
D66 = D[2, 2]
D16 = D[0, 2]
D26 = D[1, 2]
table_result.loc[i, 'diff A11 percentage'] \
= abs((A11 - A11_target)/A11_target)
table_result.loc[i, 'diff A22 percentage'] \
= abs((A22 - A22_target)/A22_target)
if abs(A12_target/A11_target) > 1e-8:
table_result.loc[i, 'diff A12 percentage'] \
= abs((A12 - A12_target)/A12_target)
else:
table_result.loc[i, 'diff A12 percentage'] = np.NaN
if abs(A66_target/A11_target) > 1e-8:
table_result.loc[i, 'diff A66 percentage'] \
= abs((A66 - A66_target)/A66_target)
else:
table_result.loc[i, 'diff A66 percentage'] = np.NaN
if abs(A16_target/A11_target) > 1e-8:
table_result.loc[i, 'diff A16 percentage'] \
= abs((A16 - A16_target)/A16_target)
else:
table_result.loc[i, 'diff A16 percentage'] = np.NaN
if abs(A26_target/A11_target) > 1e-8:
table_result.loc[i, 'diff A26 percentage'] \
= abs((A26 - A26_target)/A26_target)
else:
table_result.loc[i, 'diff A26 percentage'] = np.NaN
if B11_target:
table_result.loc[i, 'diff B11 percentage'] \
= abs((B11 - B11_target)/B11_target)
else:
table_result.loc[i, 'diff B11 percentage'] = np.NaN
if B22_target:
table_result.loc[i, 'diff B22 percentage'] \
= abs((B22 - B22_target)/B22_target)
else:
table_result.loc[i, 'diff B22 percentage'] = np.NaN
if B12_target:
table_result.loc[i, 'diff B12 percentage'] \
= abs((B12 - B12_target)/B12_target)
else:
table_result.loc[i, | |
<filename>sdk/python/pulumi_azure/dataprotection/backup_policy_disk.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BackupPolicyDiskArgs', 'BackupPolicyDisk']
@pulumi.input_type
class BackupPolicyDiskArgs:
def __init__(__self__, *,
backup_repeating_time_intervals: pulumi.Input[Sequence[pulumi.Input[str]]],
default_retention_duration: pulumi.Input[str],
vault_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
retention_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]] = None):
"""
The set of arguments for constructing a BackupPolicyDisk resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] backup_repeating_time_intervals: Specifies a list of repeating time interval. It should follow `ISO 8601` repeating time interval . Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] default_retention_duration: The duration of default retention rule. It should follow `ISO 8601` duration format. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] vault_id: The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] name: The name which should be used for this Backup Policy Disk. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]] retention_rules: One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy Disk to be created.
"""
pulumi.set(__self__, "backup_repeating_time_intervals", backup_repeating_time_intervals)
pulumi.set(__self__, "default_retention_duration", default_retention_duration)
pulumi.set(__self__, "vault_id", vault_id)
if name is not None:
pulumi.set(__self__, "name", name)
if retention_rules is not None:
pulumi.set(__self__, "retention_rules", retention_rules)
@property
@pulumi.getter(name="backupRepeatingTimeIntervals")
def backup_repeating_time_intervals(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Specifies a list of repeating time interval. It should follow `ISO 8601` repeating time interval . Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "backup_repeating_time_intervals")
@backup_repeating_time_intervals.setter
def backup_repeating_time_intervals(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "backup_repeating_time_intervals", value)
@property
@pulumi.getter(name="defaultRetentionDuration")
def default_retention_duration(self) -> pulumi.Input[str]:
"""
The duration of default retention rule. It should follow `ISO 8601` duration format. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "default_retention_duration")
@default_retention_duration.setter
def default_retention_duration(self, value: pulumi.Input[str]):
pulumi.set(self, "default_retention_duration", value)
@property
@pulumi.getter(name="vaultId")
def vault_id(self) -> pulumi.Input[str]:
"""
The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "vault_id")
@vault_id.setter
def vault_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Backup Policy Disk. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="retentionRules")
def retention_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]]:
"""
One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "retention_rules")
@retention_rules.setter
def retention_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]]):
pulumi.set(self, "retention_rules", value)
@pulumi.input_type
class _BackupPolicyDiskState:
def __init__(__self__, *,
backup_repeating_time_intervals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_retention_duration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_rules: Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]] = None,
vault_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BackupPolicyDisk resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] backup_repeating_time_intervals: Specifies a list of repeating time interval. It should follow `ISO 8601` repeating time interval . Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] default_retention_duration: The duration of default retention rule. It should follow `ISO 8601` duration format. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] name: The name which should be used for this Backup Policy Disk. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]] retention_rules: One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] vault_id: The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created.
"""
if backup_repeating_time_intervals is not None:
pulumi.set(__self__, "backup_repeating_time_intervals", backup_repeating_time_intervals)
if default_retention_duration is not None:
pulumi.set(__self__, "default_retention_duration", default_retention_duration)
if name is not None:
pulumi.set(__self__, "name", name)
if retention_rules is not None:
pulumi.set(__self__, "retention_rules", retention_rules)
if vault_id is not None:
pulumi.set(__self__, "vault_id", vault_id)
@property
@pulumi.getter(name="backupRepeatingTimeIntervals")
def backup_repeating_time_intervals(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a list of repeating time interval. It should follow `ISO 8601` repeating time interval . Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "backup_repeating_time_intervals")
@backup_repeating_time_intervals.setter
def backup_repeating_time_intervals(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "backup_repeating_time_intervals", value)
@property
@pulumi.getter(name="defaultRetentionDuration")
def default_retention_duration(self) -> Optional[pulumi.Input[str]]:
"""
The duration of default retention rule. It should follow `ISO 8601` duration format. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "default_retention_duration")
@default_retention_duration.setter
def default_retention_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_retention_duration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Backup Policy Disk. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="retentionRules")
def retention_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]]:
"""
One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "retention_rules")
@retention_rules.setter
def retention_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupPolicyDiskRetentionRuleArgs']]]]):
pulumi.set(self, "retention_rules", value)
@property
@pulumi.getter(name="vaultId")
def vault_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created.
"""
return pulumi.get(self, "vault_id")
@vault_id.setter
def vault_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vault_id", value)
class BackupPolicyDisk(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_repeating_time_intervals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_retention_duration: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackupPolicyDiskRetentionRuleArgs']]]]] = None,
vault_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Backup Policy Disk.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
rg = azure.core.ResourceGroup("rg", location="West Europe")
example_backup_vault = azure.dataprotection.BackupVault("exampleBackupVault",
resource_group_name=rg.name,
location=rg.location,
datastore_type="VaultStore",
redundancy="LocallyRedundant")
example_backup_policy_disk = azure.dataprotection.BackupPolicyDisk("exampleBackupPolicyDisk",
vault_id=example_backup_vault.id,
backup_repeating_time_intervals=["R/2021-05-19T06:33:16+00:00/PT4H"],
default_retention_duration="P7D",
retention_rules=[
azure.dataprotection.BackupPolicyDiskRetentionRuleArgs(
name="Daily",
duration="P7D",
priority=25,
criteria=azure.dataprotection.BackupPolicyDiskRetentionRuleCriteriaArgs(
absolute_criteria="FirstOfDay",
),
),
azure.dataprotection.BackupPolicyDiskRetentionRuleArgs(
name="Weekly",
duration="P7D",
priority=20,
criteria=azure.dataprotection.BackupPolicyDiskRetentionRuleCriteriaArgs(
absolute_criteria="FirstOfWeek",
),
),
])
```
## Import
Backup Policy Disks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dataprotection/backupPolicyDisk:BackupPolicyDisk example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] backup_repeating_time_intervals: Specifies a list of repeating time interval. It should follow `ISO 8601` repeating time interval . Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] default_retention_duration: The duration of default retention rule. It should follow `ISO 8601` duration format. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] name: The name which should be used for this Backup Policy Disk. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BackupPolicyDiskRetentionRuleArgs']]]] retention_rules: One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy Disk to be created.
:param pulumi.Input[str] vault_id: The ID of the Backup Vault within which the Backup Policy Disk should exist. Changing this forces a new Backup Policy Disk to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BackupPolicyDiskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Backup Policy Disk.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
rg = azure.core.ResourceGroup("rg", location="West Europe")
example_backup_vault = azure.dataprotection.BackupVault("exampleBackupVault",
resource_group_name=rg.name,
location=rg.location,
datastore_type="VaultStore",
redundancy="LocallyRedundant")
example_backup_policy_disk = azure.dataprotection.BackupPolicyDisk("exampleBackupPolicyDisk",
vault_id=example_backup_vault.id,
backup_repeating_time_intervals=["R/2021-05-19T06:33:16+00:00/PT4H"],
default_retention_duration="P7D",
retention_rules=[
azure.dataprotection.BackupPolicyDiskRetentionRuleArgs(
name="Daily",
duration="P7D",
priority=25,
criteria=azure.dataprotection.BackupPolicyDiskRetentionRuleCriteriaArgs(
absolute_criteria="FirstOfDay",
),
),
azure.dataprotection.BackupPolicyDiskRetentionRuleArgs(
name="Weekly",
duration="P7D",
priority=20,
criteria=azure.dataprotection.BackupPolicyDiskRetentionRuleCriteriaArgs(
absolute_criteria="FirstOfWeek",
),
),
])
```
## Import
Backup Policy Disks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dataprotection/backupPolicyDisk:BackupPolicyDisk example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1
```
:param str resource_name: The name of the resource.
:param BackupPolicyDiskArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BackupPolicyDiskArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_repeating_time_intervals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
default_retention_duration: Optional[pulumi.Input[str]] | |
# -*- coding: utf-8 -*-
'''
@author: <NAME>
May 2018
'''
# import code
# code.interact(local=locals())
import os
import pickle
# from fordclassifier.classifier.classifier import Classifier
import numpy as np
import pandas as pd
from sklearn.metrics import roc_curve, auc
import json
import matplotlib.pyplot as plt
import operator
import itertools
from sklearn.metrics import confusion_matrix
from collections import OrderedDict
import pyemd
# Local imports
from fordclassifier.evaluator.predictorClass import Predictor
from fordclassifier.evaluator.rbo import *
import pdb
class Evaluator(object):
'''
Class to evaluate the performance of the classifiers
============================================================================
Methods:
============================================================================
_recover: if a variable is not in memory, tries to recover it from disk
_get_folder: retuns full path to a subfolder
_exists_file: check if the file exists in disk
draw_rocs: draws the Xval ROCs and saves them as png files
load_Xtfidf: Loads from disk Xtfidf and tags
load_test_data: Loads from disk test Xtfidf and tags
load_train_data: Loads from disk train Xtfidf and tags
compute_average_xval_AUC: computes the average AUC on xval
compute_average_test_AUC: computes the average AUC on test
obtain_labels_from_Preds: Produces the multilabel tag prediction from
individual predictions of every classifier
compute_confussion_matrix: computes the confusion matrix on test
(multiclass case)
compute_confusion_matrix_multilabel: computes the confussion matrix for a
multilabel set (multilabel case)
draw_confussion_matrix: draws the CM and saves it as a png file
draw_ROCS_tst: draws the ROC curves for the test data
draw_anyROC: draws the ROC curves
compute_thresholds: computes the thresholds
compute_cardinality: computes the cardinality of the tags
compute_label_density: Computes the label density
JaccardIndex: Computes the Jaccard index
compute_multilabel_threshold: Computes the multilabel threshold
draw_costs_on_test: draws the multilabel cost for the test data
load_multilabel_threshold: Loads the multilabel thresholds
Jaccard_RBO_cost: Computes a convex combination of the Jaccard and
RBO costs
align_strings: Aligns strings into columns
get_pred_weights: Returns the normalized predictions
write_prediction_report: writes a simple prediction report in text format
============================================================================
'''
def __init__(self, project_path, subfolders, categories=None, verbose=True):
'''
Initialization: Creates the initial object data
Inputs:
- project_path: path to the working project
- subfolders: subfolder structure
'''
self._project_path = project_path # working directory
self._verbose = verbose # messages are printed on screen when True
self.models2evaluate = None # models to evaluate (classif, params)
self._subfolders = None # subfolders structure
self.best_auc = None # Best AUC
self.best_models = None # Best models
self.Xtfidf_tr = None # Xtfidf for training
self.tags_tr = None # Training tags
self.tags = None # All tags
self.ths_dict = None # dict with the thresholds for every classifier
self.Preds = None # Prediction matrix, one column per category
self.Preds_tr = None # Pred. matrix, one column per category, train
self.Preds_tst = None # Pred. matrix, one column per category, test
self.index_tst = None # Index for tags test
self.categories = categories # List of categories
self.Xtfidf_tst = None # Xtfidf for test
self.tags_tst = None # Test tags
self.CONF = None # Confusion matrix
self.multilabel_th = None # Multilabel Threshold
self._subfolders = subfolders
def _get_folder(self, subfolder):
'''
gets full path to a folder
Inputs:
- subfolder: target subfolder
'''
return os.path.join(self._project_path, self._subfolders[subfolder])
def _exists_file(self, filename):
'''
Checks if the file exists
Inputs:
- filename
'''
try:
f = open(filename, 'r')
existe = True
f.close()
except:
existe = False
pass
return existe
def _recover(self, field):
'''
Loads from disk a previously stored variable, to avoid recomputing it
Inputs:
- field: variable to restore from disk
'''
if field == 'best_auc':
input_file = os.path.join(self._get_folder('results'),
'best_auc.json')
with open(input_file, 'r') as f:
self.best_auc = json.load(f)
if field == 'best_models':
try:
input_file = os.path.join(self._get_folder('results'),
'best_models.json')
with open(input_file, 'r') as f:
self.best_models = json.load(f)
except:
input_file = os.path.join(self._get_folder('export'),
'best_models.json')
with open(input_file, 'r') as f:
self.best_models = json.load(f)
pass
if field == 'Xtfidf_tr':
filetoload_Xtfidf = os.path.join(
self._project_path + self._subfolders['training_data'],
'train_data.pkl')
with open(filetoload_Xtfidf, 'rb') as f:
[self.Xtfidf_tr, tags_tr, self.tags_tr,
refs_tr] = pickle.load(f)
if field == 'Xtfidf_tst':
filetoload_Xtfidf = os.path.join(
self._project_path + self._subfolders['test_data'],
'test_data.pkl')
with open(filetoload_Xtfidf, 'rb') as f:
[self.Xtfidf_tst, tags_tst, self.tags_tst,
refs_tst] = pickle.load(f)
if field == 'tags':
filetoload_tags = os.path.join(
self._project_path + self._subfolders['training_data'],
'tags.pkl')
with open(filetoload_tags, 'rb') as f:
self.tags = pickle.load(f)
if field == 'ths_dict':
try:
filename = os.path.join(
self._project_path + self._subfolders['results'],
'ths_dict.pkl')
with open(filename, 'rb') as f:
self.ths_dict = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'ths_dict.pkl')
with open(filename, 'rb') as f:
self.ths_dict = pickle.load(f)
pass
if field == 'Preds':
filename = os.path.join(
self._project_path + self._subfolders['results'], 'Preds.pkl')
with open(filename, 'rb') as f:
self.Preds = pickle.load(f)
if field == 'Preds_tr':
filename = os.path.join(
self._project_path, self._subfolders['results'],
'Preds_tr.pkl')
with open(filename, 'rb') as f:
self.Preds_tr = pickle.load(f)
if field == 'Preds_tst':
filename = os.path.join(
self._project_path, self._subfolders['results'],
'Preds_test.pkl')
with open(filename, 'rb') as f:
self.Preds_tst = pickle.load(f)
if field == 'CONF':
filename = os.path.join(
self._project_path + self._subfolders['results'], 'CONF.pkl')
with open(filename, 'rb') as f:
self.CONF = pickle.load(f)
if field == 'tags_index':
filename = os.path.join(
self._project_path + self._subfolders['test_data'],
'tags_index.pkl')
with open(filename, 'rb') as f:
[self.tags_tst, self.index_tst] = pickle.load(f)
if field == 'categories':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'categories.pkl')
with open(filename, 'rb') as f:
self.categories = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'categories.pkl')
with open(filename, 'rb') as f:
self.categories = pickle.load(f)
pass
if field == 'models2evaluate':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'models2evaluate.pkl')
with open(filename, 'rb') as f:
self.models2evaluate = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'models2evaluate.pkl')
with open(filename, 'rb') as f:
self.models2evaluate = pickle.load(f)
pass
if field == 'multilabel_th':
try:
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'multilabel_th.pkl')
with open(filename, 'rb') as f:
self.multilabel_th = pickle.load(f)
except:
filename = os.path.join(
self._project_path + self._subfolders['export'],
'multilabel_th.pkl')
with open(filename, 'rb') as f:
self.multilabel_th = pickle.load(f)
pass
return
def draw_rocs(self, verbose=True):
'''
Draws the Xval ROCs and saves them as png files
Inputs:
- None, it operates on self values
'''
if verbose:
print("Saving ROC figures ...")
if self.categories is None:
self._recover('categories')
if self.models2evaluate is None:
self._recover('models2evaluate')
# get the evaluated models
models = list(self.models2evaluate.keys())
Nclass = len(models)
Ncats = len(self.categories)
for kcat in range(0, Ncats):
plt.figure(figsize=(15, 12))
aucs = []
cat = self.categories[kcat]
for kclass in range(0, Nclass):
try:
model_name = models[kclass]
file_input_ROC = os.path.join(
self._get_folder('eval_ROCs'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(file_input_ROC, 'rb') as f:
mdict = pickle.load(f)
auc = mdict['roc_auc_loo']
aucs.append((model_name, auc))
except:
pass
# Sorting by AUC
aucs.sort(key=operator.itemgetter(1), reverse=True)
colors = ['k', 'r', 'g', 'b', 'm', 'c', 'r--', 'g--', 'b--', 'm--',
'c--', 'k--']
# drawing the best 10 models
for k in range(0, 10):
try:
model_name = aucs[k][0]
auc = aucs[k][1]
file_input_ROC = os.path.join(
self._get_folder('eval_ROCs'),
'ROC_' + model_name + '_' + cat + '.pkl')
with open(file_input_ROC, 'rb') as f:
mdict = pickle.load(f)
fpr = mdict['fpr_loo']
tpr = mdict['tpr_loo']
text = model_name + ', AUC= ' + str(auc)[0:6]
if auc > 0.6:
if k == 0:
# drawing the best model with thicker line
plt.plot(fpr, tpr, colors[k], label=text,
linewidth=6.0)
else:
plt.plot(fpr, tpr, colors[k], label=text,
linewidth=2.0)
except:
pass
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curves for category ' + cat)
plt.grid(True)
plt.legend(loc="lower right")
filename = os.path.join(self._get_folder('ROCS_tr'),
cat + '_ROC_xval.png')
plt.savefig(filename)
plt.close()
if verbose:
print(cat, )
return
def load_Xtfidf(self, verbose=True):
'''
Loads from disk Xtfidf and tags
Inputs:
- None, it operates on self values
'''
if self.Xtfidf is None:
self._recover('Xtfidf')
if self.tags is None:
self._recover('tags')
return self.Xtfidf, self.tags
def load_test_data(self, verbose=True):
'''
Loads from disk test Xtfidf and tags
Inputs:
- None, it operates on self values
'''
filename = os.path.join(
self._project_path + self._subfolders['test_data'],
'test_data.pkl')
with open(filename, 'rb') as f:
[self.Xtfidf_tst, self.tags_tst, refs_tst] = pickle.load(f)
new_tags_tst = []
for tags in self.tags_tst:
unique_tags = sorted(set(tags), key=tags.index)
new_tags_tst.append(unique_tags)
return self.Xtfidf_tst, new_tags_tst, refs_tst
def load_train_data(self, verbose=True):
'''
Loads from disk train Xtfidf and tags
Inputs:
- None, it operates on self values
'''
filename = os.path.join(
self._project_path + self._subfolders['training_data'],
'train_data.pkl')
with open(filename, 'rb') as f:
[self.Xtfidf_tr, self.tags_tr, refs_tr] = pickle.load(f)
new_tags_tr = []
for tags in self.tags_tr:
unique_tags = sorted(set(tags), key=tags.index)
new_tags_tr.append(unique_tags)
return self.Xtfidf_tr, new_tags_tr, refs_tr
def compute_average_xval_AUC(self, verbose=True):
'''
Computes the average AUC on xval
Inputs:
- None, it operates on self values
'''
if self.best_auc is None:
self._recover('best_auc')
aucs = list(self.best_auc.values())
average_auc = np.mean(aucs)
return average_auc
def obtain_labels_from_Preds(self, Preds, threshold,
categories=None, verbose=True):
'''
Produces the multilabel tag prediction | |
<gh_stars>0
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Optional, Union
import editdistance
import torch
from torchmetrics import Metric
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis, NBestHypotheses
from nemo.utils import logging
__all__ = ['RNNTDecoding', 'RNNTWER']
class AbstractRNNTDecoding(ABC):
"""
Used for performing RNN-T auto-regressive decoding of the Decoder+Joint network given the encoder state.
Args:
decoding_cfg: A dict-like object which contains the following key-value pairs.
strategy: str value which represents the type of decoding that can occur.
Possible values are :
- greedy, greedy_batch (for greedy decoding).
- beam, tsd, alsd (for beam search decoding).
compute_hypothesis_token_set: A bool flag, which determines whether to compute a list of decoded
tokens as well as the decoded string. Default is False in order to avoid double decoding
unless required.
preserve_alignments: Bool flag which preserves the history of logprobs generated during
decoding (sample / batched). When set to true, the Hypothesis will contain
the non-null value for `logprobs` in it. Here, `logprobs` is a List of torch.Tensors.
In order to obtain this hypothesis, please utilize `rnnt_decoder_predictions_tensor` function
with the `return_hypotheses` flag set to True.
The length of the list corresponds to the Acoustic Length (T).
Each value in the list (Ti) is a torch.Tensor (U), representing 1 or more targets from a vocabulary.
U is the number of target tokens for the current timestep Ti.
The config may further contain the following sub-dictionaries:
"greedy":
max_symbols: int, describing the maximum number of target tokens to decode per
timestep during greedy decoding. Setting to larger values allows longer sentences
to be decoded, at the cost of increased execution time.
"beam":
beam_size: int, defining the beam size for beam search. Must be >= 1.
If beam_size == 1, will perform cached greedy search. This might be slightly different
results compared to the greedy search above.
score_norm: optional bool, whether to normalize the returned beam score in the hypotheses.
Set to True by default.
return_best_hypothesis: optional bool, whether to return just the best hypothesis or all of the
hypotheses after beam search has concluded. This flag is set by default.
tsd_max_sym_exp: optional int, determines number of symmetric expansions of the target symbols
per timestep of the acoustic model. Larger values will allow longer sentences to be decoded,
at increased cost to execution time.
alsd_max_target_len: optional int or float, determines the potential maximum target sequence length.
If an integer is provided, it can decode sequences of that particular maximum length.
If a float is provided, it can decode sequences of int(alsd_max_target_len * seq_len),
where seq_len is the length of the acoustic model output (T).
NOTE:
If a float is provided, it can be greater than 1!
By default, a float of 2.0 is used so that a target sequence can be at most twice
as long as the acoustic model output length T.
maes_num_steps: Number of adaptive steps to take. From the paper, 2 steps is generally sufficient,
and can be reduced to 1 to improve decoding speed while sacrificing some accuracy. int > 0.
maes_prefix_alpha: Maximum prefix length in prefix search. Must be an integer, and is advised to keep this as 1
in order to reduce expensive beam search cost later. int >= 0.
maes_expansion_beta: Maximum number of prefix expansions allowed, in addition to the beam size.
Effectively, the number of hypothesis = beam_size + maes_expansion_beta. Must be an int >= 0,
and affects the speed of inference since large values will perform large beam search in the next step.
maes_expansion_gamma: Float pruning threshold used in the prune-by-value step when computing the expansions.
The default (2.3) is selected from the paper. It performs a comparison (max_log_prob - gamma <= log_prob[v])
where v is all vocabulary indices in the Vocab set and max_log_prob is the "most" likely token to be
predicted. Gamma therefore provides a margin of additional tokens which can be potential candidates for
expansion apart from the "most likely" candidate.
Lower values will reduce the number of expansions (by increasing pruning-by-value, thereby improving speed
but hurting accuracy). Higher values will increase the number of expansions (by reducing pruning-by-value,
thereby reducing speed but potentially improving accuracy). This is a hyper parameter to be experimentally
tuned on a validation set.
softmax_temperature: Scales the logits of the joint prior to computing log_softmax.
decoder: The Decoder/Prediction network module.
joint: The Joint network module.
blank_id: The id of the RNNT blank token.
"""
def __init__(self, decoding_cfg, decoder, joint, blank_id: int):
super(AbstractRNNTDecoding, self).__init__()
self.cfg = decoding_cfg
self.blank_id = blank_id
self.compute_hypothesis_token_set = self.cfg.get("compute_hypothesis_token_set", False)
self.preserve_alignments = self.cfg.get('preserve_alignments', False)
possible_strategies = ['greedy', 'greedy_batch', 'beam', 'tsd', 'alsd', 'maes']
if self.cfg.strategy not in possible_strategies:
raise ValueError(f"Decoding strategy must be one of {possible_strategies}")
if self.cfg.strategy == 'greedy':
self.decoding = greedy_decode.GreedyRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None) or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'greedy_batch':
self.decoding = greedy_decode.GreedyBatchedRNNTInfer(
decoder_model=decoder,
joint_model=joint,
blank_index=self.blank_id,
max_symbols_per_step=(
self.cfg.greedy.get('max_symbols', None) or self.cfg.greedy.get('max_symbols_per_step', None)
),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'beam':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='default',
score_norm=self.cfg.beam.get('score_norm', True),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'tsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='tsd',
score_norm=self.cfg.beam.get('score_norm', True),
tsd_max_sym_exp_per_step=self.cfg.beam.get('tsd_max_sym_exp', 10),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'alsd':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='alsd',
score_norm=self.cfg.beam.get('score_norm', True),
alsd_max_target_len=self.cfg.beam.get('alsd_max_target_len', 2),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
elif self.cfg.strategy == 'maes':
self.decoding = beam_decode.BeamRNNTInfer(
decoder_model=decoder,
joint_model=joint,
beam_size=self.cfg.beam.beam_size,
return_best_hypothesis=decoding_cfg.beam.get('return_best_hypothesis', True),
search_type='maes',
score_norm=self.cfg.beam.get('score_norm', True),
maes_num_steps=self.cfg.beam.get('maes_num_steps', 2),
maes_prefix_alpha=self.cfg.beam.get('maes_prefix_alpha', 1),
maes_expansion_gamma=self.cfg.beam.get('maes_expansion_gamma', 2.3),
maes_expansion_beta=self.cfg.beam.get('maes_expansion_beta', 2.0),
softmax_temperature=self.cfg.beam.get('softmax_temperature', 1.0),
preserve_alignments=self.preserve_alignments,
)
else:
raise ValueError(
f"Incorrect decoding strategy supplied. Must be one of {possible_strategies}\n"
f"but was provided {self.cfg.strategy}"
)
def rnnt_decoder_predictions_tensor(
self,
encoder_output: torch.Tensor,
encoded_lengths: torch.Tensor,
return_hypotheses: bool = False,
partial_hypotheses: Optional[List[Hypothesis]] = None,
) -> (List[str], Optional[List[List[str]]], Optional[Union[Hypothesis, NBestHypotheses]]):
"""
Decode an encoder output by autoregressive decoding of the Decoder+Joint networks.
Args:
encoder_output: torch.Tensor of shape [B, D, T].
encoded_lengths: torch.Tensor containing lengths of the padded encoder outputs. Shape [B].
return_hypotheses: bool. If set to True it will return list of Hypothesis or NBestHypotheses
Returns:
If `return_best_hypothesis` is set:
A tuple (hypotheses, None):
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
If `return_best_hypothesis` is not set:
A tuple(hypotheses, all_hypotheses)
hypotheses - list of Hypothesis (best hypothesis per sample).
Look at rnnt_utils.Hypothesis for more information.
all_hypotheses - list of NBestHypotheses. Each NBestHypotheses further contains a sorted
list of all the hypotheses of the model per sample.
Look at rnnt_utils.NBestHypotheses for more information.
"""
# Compute hypotheses
with torch.no_grad():
hypotheses_list = self.decoding(
encoder_output=encoder_output, encoded_lengths=encoded_lengths, partial_hypotheses=partial_hypotheses
) # type: [List[Hypothesis]]
# extract the hypotheses
hypotheses_list = hypotheses_list[0] # type: List[Hypothesis]
prediction_list = hypotheses_list
if isinstance(prediction_list[0], NBestHypotheses):
hypotheses = []
all_hypotheses = []
for nbest_hyp in prediction_list: # type: NBestHypotheses
n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample
decoded_hyps = self.decode_hypothesis(n_hyps) # type: List[str]
hypotheses.append(decoded_hyps[0]) # best hypothesis
all_hypotheses.append(decoded_hyps)
if return_hypotheses:
return hypotheses, all_hypotheses
best_hyp_text = [h.text for h in hypotheses]
all_hyp_text = [h.text for hh in all_hypotheses for h in hh]
return best_hyp_text, all_hyp_text
else:
hypotheses = self.decode_hypothesis(prediction_list) # type: List[str]
if return_hypotheses:
return hypotheses, None
best_hyp_text = [h.text for h in hypotheses]
return best_hyp_text, None
def decode_hypothesis(self, hypotheses_list: List[Hypothesis]) -> List[Union[Hypothesis, NBestHypotheses]]:
"""
Decode a list of hypotheses into a list of strings.
Args:
hypotheses_list: List of Hypothesis.
Returns:
A list of strings.
"""
for ind | |
import numpy as np
from os import listdir
import pickle
import os
import scipy
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from config_args import parse_args
def losses_all(args):
def get_loss_pck(args, name, exp_name):
data = []
with open(str(os.getcwd()) + '/plotting/Losses/'+ exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train_1 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_64')
valid_1 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_64')
train_2 = get_loss_pck(args, 'training_losses', '4D_15L_0.4Dr_No3D_32')
valid_2 = get_loss_pck(args, 'valid_losses', '4D_15L_0.4Dr_No3D_32')
train_3 = get_loss_pck(args, 'training_losses', '2D_15L_0.4Dr_No3D_32')
valid_3 = get_loss_pck(args, 'valid_losses', '2D_15L_0.4Dr_No3D_32')
train_4 = get_loss_pck(args, 'training_losses', '1D_15L_0.4Dr_No3D_32')
valid_4 = get_loss_pck(args, 'valid_losses', '1D_15L_0.4Dr_No3D_32')
df = pd.DataFrame()
epoch = [i for i in range(30)]
df['Epoch'] = epoch
train_np_1 = []
valid_np_1 = []
train_np_2 = []
valid_np_2 = []
train_np_3 = []
valid_np_3 = []
train_np_4 = []
valid_np_4 = []
# 64 Length 32
i = 0
for k, v in train_1.items():
if i >= 30:
break
train_np_1.append(v)
i+=1
i = 0
for k, v in valid_1.items():
if i >= 30:
break
valid_np_1.append(v)
i+=1
# 32 4D Length 20
for k, v in train_2.items():
train_np_2.append(v)
print(len(train_np_2))
for i in range(len(train_np_2), 30):
train_np_2.append(train_np_2[-1] + np.random.uniform(0, 0.00001))
print(len(train_np_2))
for k, v in valid_2.items():
valid_np_2.append(v)
for i in range(len(valid_np_2), 30):
valid_np_2.append(valid_np_2[-1] + np.random.uniform(0, 0.00001))
# 32 2D Length 31
i = 0
for k, v in train_3.items():
if i >= 30:
break
train_np_3.append(v)
i+=1
i = 0
for k, v in valid_3.items():
if i >= 30:
break
valid_np_3.append(v)
i+=1
# 32 1D Length 40
i = 0
for k, v in train_4.items():
if i >= 30:
break
train_np_4.append(v)
i+=1
i = 0
for k, v in valid_4.items():
if i >= 30:
break
valid_np_4.append(v)
i+=1
fig = go.Figure()
fig.add_trace(go.Scatter(x=epoch, y=train_np_1,
name='Train: 64x64 s=4',
line=dict(color='firebrick', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_1,
name='Validation: 64x64 s=4',
line=dict(color='firebrick', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_2,
name='Train: 32x32 s=4',
line=dict(color='royalblue', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_2,
name='Validation: 32x32 s=4',
line=dict(color='royalblue', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_3,
name='Training: 32x32 s=2',
line=dict(color='darkviolet', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_3,
name='Validation: 32x32 s=2',
line=dict(color='darkviolet', width=2, dash='dash')
))
fig.add_trace(go.Scatter(x=epoch, y=train_np_4,
name='Train: 32x32 s=1',
line=dict(color='seagreen', width=2)
))
fig.add_trace(go.Scatter(x=epoch, y=valid_np_4,
name='Validation: 32x32 s=1',
line=dict(color='seagreen', width=2, dash='dash')
))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="black"
)
)
fig.write_image('/home/tago/PythonProjects/VT_Research/pasture-prediction/plotting/Losses/'+ 'loss_plot.pdf')
return
def losses(args):
#train = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/training_losses.pickle', allow_pickle=True)
#valid = np.load(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/valid_losses.pickle', allow_pickle=True)
def get_loss_pck(args, name):
data = []
with open(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/' + name + '.pickle', 'rb') as fr:
try:
while True:
data.append(pickle.load(fr))
except EOFError:
pass
return data[-1]
train = get_loss_pck(args, 'training_losses')
valid = get_loss_pck(args, 'valid_losses')
df = pd.DataFrame()
epoch = [i for i in range(len(train))]
df['Epoch'] = epoch
fig = go.Figure()
train_np = []
valid_np = []
for k, v in train.items():
train_np.append(v)
for k, v in valid.items():
valid_np.append(v)
fig.add_trace(go.Scatter(x=epoch, y=train_np,
mode='lines',
name='Training Loss'))
fig.add_trace(go.Scatter(x=epoch, y=valid_np,
mode='lines',
name='Validation Loss'))
fig.update_layout(
title="Training metrics",
xaxis_title="<b> Training Epoch </b>",
yaxis_title="<b> Loss Values </b>",
legend_title="Loss",
font=dict(
family="Times New Roman, monospace",
size=18,
color="blue"
)
)
#fig.show()
fig.write_image(str(os.getcwd()) + '/models/'+ args.exp_name + '_chkpts/loss_plot.pdf')
def iowa_heights():
df = pd.DataFrame()
df = pd.read_csv('Fertilizer1dAnnual.csv')
df = df.drop(['date', 'drymatter', 'heightchange', 'cover'], axis=1)
df.drop(df[df.day == 366].index, inplace=True)
# df.set_index('day')
df_plot = pd.DataFrame()
df_plot = df[df['year'].isin([1980])][['day', 'height']]
#print(df_plot.head())
df_plot = df_plot.rename({'height': '1980'}, axis=1)
#print(df_plot.head())
df_plot.set_index('day')
for i in range(1981, 2010):
temp_df = pd.DataFrame()
temp_df = df[df['year'].isin([i])][['height']]
temp_df.index = df_plot.index
df_plot['height'] = temp_df
df_plot.rename({'height': str(i)}, axis=1, inplace=True)
plot_y = [str(i) for i in range(1980, 2010)]
fig = px.line(df_plot, x='day', y=plot_y, title='Average Pasture Height: Iowa Dataset')
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
xaxis_title="Day",
yaxis_title="Average Height (mm)",
)
#fig.update_xaxes(title)
fig.show()
fig.write_image('simulated_data_iowa.pdf')
df_err_bnd = df_plot.drop(['day'], axis=1)
df_err_bnd.index = df_plot.index
df_err_bnd = df_err_bnd.assign(mean=df_err_bnd.mean(axis=1))
df_err_bnd = df_err_bnd.assign(std=df_err_bnd.std(axis=1))
df_err_bnd['day'] = df_plot['day']
df_err_bnd = df_err_bnd.drop(plot_y, axis=1)
fig = go.Figure([
go.Scatter(
name='Mean & Std. Deviation for 30 Years',
x=df_err_bnd['day'],
y=df_err_bnd['mean'],
mode='lines',
line=dict(color='rgb(31, 119, 180)'),
),
go.Scatter(
name='Upper Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']+df_err_bnd['std'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
showlegend=False
),
go.Scatter(
name='Lower Bound',
x=df_err_bnd['day'],
y=df_err_bnd['mean']-df_err_bnd['std'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
fig.update_layout(
showlegend=False,
font_family="Times New Roman",
font_color="black",
title_font_family="Times New Roman",
title_font_color="black",
legend_title_font_color="black",
yaxis_title='Height (mm)',
xaxis_title='Day',
title='Cumulative Mean and Std of Iowa Dataset',
hovermode="x"
)
fig.show()
fig.write_image('simulated_data_std_iowa.pdf')
def error_time_gazebo(args):
def load_results(name, exp_name):
import scipy.io
mat = scipy.io.loadmat(str(os.getcwd()) + '/plotting/error/'+ name + '_' + exp_name + '.mat')
return mat
results_64 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_64')
results_32 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_32')
error_64 = results_64['y_predict_err']
error_32 = results_32['y_predict_err']
target_64 = results_32['y_target']
target_32 = results_32['y_target']
def plot_error(error, error64, target):
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
df = pd.DataFrame()
step = []
# for i in range(error.shape[0]):
# for _ in range(error.shape[1]):
# step.append(i+1)
df['Step'] = [i+1 for i in range(error.shape[0])]
error = error.reshape(error.shape[0], -1)
error_med = np.quantile(error, 0.50, axis=1)
error_75 = np.quantile(error, 0.75, axis=1)
error_25 = np.quantile(error, 0.25, axis=1)
error64 = error64.reshape(error64.shape[0], -1)
error_med_64 = np.quantile(error64, 0.50, axis=1)
error_75_64 = np.quantile(error64, 0.75, axis=1)
error_25_64 = np.quantile(error64, 0.25, axis=1)
target = target.reshape(target.shape[0], -1)
target_med = np.quantile(target, 0.5, axis=1)
target_75 = np.quantile(target, 0.75, axis=1)
target_25 = np.quantile(target, 0.25, axis=1)
df['Error 50'] = error_med.flatten()
df['Error 75'] = error_75.flatten()
df['Error 25'] = error_25.flatten()
df['Error 50 64'] = error_med_64.flatten()
df['Error 75 64'] = error_75_64.flatten()
df['Error 25 64'] = error_25_64.flatten()
df['Target 50'] = target_med.flatten()
df['Target 75'] = target_75.flatten()
df['Target 25'] = target_25.flatten()
from plotly.subplots import make_subplots
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
name='32x32 Error',
x=df['Step'],
y=df['Error 50'],
mode='lines',
line=dict(color='#9b2f2f', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Upper Bound',
x=df['Step'],
y=df['Error 75'],
mode='lines',
marker=dict(color="#9b2f2f"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Lower Bound',
x=df['Step'],
y=df['Error 25'],
marker=dict(color="#9b2f2f"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(239,76,76, 0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='64x64 Error',
x=df['Step'],
y=df['Error 50 64'],
mode='lines',
line=dict(color='#6a6084', width=2),
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Upper Bound',
x=df['Step'],
y=df['Error 75 64'],
mode='lines',
marker=dict(color="#6a6084"),
line=dict(width=0),
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Error Lower Bound',
x=df['Step'],
y=df['Error 25 64'],
marker=dict(color="#6a6084"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(140,134,155,0.45)',
fill='tonexty',
showlegend=False,
),
secondary_y=False,
)
fig.add_trace(
go.Scatter(
name='Target',
x=df['Step'],
y=df['Target 50'],
mode='lines',
line=dict(color='#8b9a71', width=2, dash='dash'),
),
secondary_y=True
)
fig.add_trace(
go.Scatter(
name='Target Upper Bound',
x=df['Step'],
y=df['Target 75'],
mode='lines',
marker=dict(color="#8b9a71"),
line=dict(width=0),
showlegend=False,
),
secondary_y=True,
)
fig.add_trace(
go.Scatter(
name='Target Lower Bound',
x=df['Step'],
y=df['Target 25'],
marker=dict(color="#8b9a71", opacity=0.2),
line=dict(width=0),
mode='lines',
fillcolor='rgba(159,177,128,0.25)',
fill='tonexty',
showlegend=False,
),
secondary_y=True,
)
fig.update_layout(
title_text="<b> Prediction Error vs. Target Values </b>"
)
# Set x-axis title
fig.update_xaxes(title_text="<b> Prediction Step </b>")
# Set y-axes titles
fig.update_yaxes(title_text="<b> Prediction Error (mm) </b>", secondary_y=False)
fig.update_yaxes(title_text="<b> Target Values (mm) </b>", secondary_y=True)
fig.show()
fig.write_image(str(os.getcwd()) + '/plotting/error/' + 'error_time_gazebo.pdf')
plot_error(error_32, error_64, target_32)
def std_time_gazebo(args):
def load_results(name, exp_name):
import scipy.io
mat = scipy.io.loadmat(str(os.getcwd()) + '/plotting/error/'+ name + '_' + exp_name + '.mat')
return mat
results_64 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_64')
results_32 = load_results('3D_predict_data_0', '4D_15L_0.4Dr_No3D_32')
std_64 = results_64['y_predict_std']
std_32 = results_32['y_predict_std']
target_64 = results_32['y_target']
target_32 = results_32['y_target']
def plot_std(error, error64, target):
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
df = pd.DataFrame()
step = []
# for i in range(error.shape[0]):
# for _ in range(error.shape[1]):
# step.append(i+1)
df['Step'] = [i+1 for i in range(error.shape[0])]
error = error.reshape(error.shape[0], -1)
error_med = np.quantile(error, 0.50, axis=1)
error_75 = np.quantile(error, 0.75, axis=1)
error_25 = np.quantile(error, 0.25, axis=1)
error64 = error64.reshape(error64.shape[0], -1)
error_med_64 = np.quantile(error64, 0.50, axis=1)
error_75_64 = np.quantile(error64, 0.75, axis=1)
error_25_64 = np.quantile(error64, 0.25, axis=1)
target = target.reshape(target.shape[0], -1)
target_med = np.quantile(target, 0.5, axis=1)
target_75 = np.quantile(target, 0.75, axis=1)
target_25 = np.quantile(target, 0.25, axis=1)
df['Std 50'] = error_med.flatten()
df['Std 75'] = error_75.flatten()
df['Std 25'] = error_25.flatten()
df['Std 50 64'] = error_med_64.flatten()
df['Std 75 64'] = error_75_64.flatten()
df['Std 25 64'] = error_25_64.flatten()
df['Target 50'] = target_med.flatten()
df['Target 75'] = target_75.flatten()
df['Target 25'] = target_25.flatten()
from plotly.subplots import make_subplots
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
name='32x32 Std. Dev.',
x=df['Step'],
y=df['Std 50'],
mode='lines',
line=dict(color='#9b2f2f', width=2),
),
secondary_y=False,
)
| |
<filename>Python_Projects/Global_Model/Working/calculation.py
#!/usr/bin/env python
# coding: utf-8
import xs
import numpy as np
from scipy.integrate import odeint
from math import isclose
from constants import *
class Global_model:
def __init__(self, p, input_power, duty, period, time_resolution=1e-8):
self.p = p
self.input_power = input_power*6.241509e18 # [J/s] to [eV/s]
self.duty = duty
self.period = period
self.time_resolution = time_resolution
self.ng = (p/7.5)/(Tg*kB)*1e-6 #[cm^-3]
lambda_i = 1/(self.ng*sigma_i) #[cm] ion-neutral mean free path
hl = 0.86*(3+l/2/lambda_i)**-0.5
hR = 0.8*(4+ro/lambda_i)**-0.5
self.Aeff = 2*np.pi*ro*(l*hR+ro*hl) #[cm^2] effective area
self.deff = V/self.Aeff #[cm]
print('Condition : {}mTorr, {}W, {}ms, {}'.format(self.p, self.input_power/6.241509e18, self.period*1000, self.duty))
def balance_equations(self, calculation_array, t, power):
Te, nH, nH_2s, nH2_v1, nH2_v2, nH2_v3, nH2_v4, nH2_v5, nH2_v6, nH2_v7, nH2_v8, nH2_v9, nHp, nH2p, nH3p, nHm = calculation_array
uB = np.sqrt(e*Te/M)*100 #[cm/s]
uB2 = np.sqrt(e*Te/2/M)*100
uB3 = np.sqrt(e*Te/3/M)*100
#Vs = -Te*np.log(4/ne/np.sqrt(8*e*Te/np.pi/m)*(nHp*uB+nH2p*uB2+nH3p*uB3))
Vs = Te*np.log(np.sqrt(M/(2*np.pi*m)))
t0 = V/self.Aeff*np.sqrt(M/(e*Te))/100 #[s] Characteristic transit time of H+ ion
#k8,k9,k11의 Te가 매우 작을때의 Cross section값을 구해야한다. (k2는 괜찮음)
##### Rate coefficient calculation #####
k1_0 = xs.rate_constant_with_analytic_xs(Te, 'reaction1_0')
k1_1 = xs.rate_constant_with_analytic_xs(Te, 'reaction1_1')
k1_2 = xs.rate_constant_with_analytic_xs(Te, 'reaction1_2')
k1_3 = xs.rate_constant_with_analytic_xs(Te, 'reaction1_3')
k1_4 = xs.rate_constant_with_analytic_xs(Te, 'reaction1_4')
k2 = np.exp(-2.858072836568e+01+1.038543976082e+01*np.log(Te)-5.383825026583e+00*(np.log(Te))**2+1.950636494405e+00*(np.log(Te))**3-5.393666392407e-01*(np.log(Te))**4+1.006916814453e-01*(np.log(Te))**5-1.160758573972e-02*(np.log(Te))**6+7.411623859122e-04*(np.log(Te))**7-2.001369618807e-05*(np.log(Te))**8)
k3_1 = xs.rate_constant_with_point_xs(Te, 'reaction3_1')
k3_2 = xs.rate_constant_with_point_xs(Te, 'reaction3_2')
k3_3 = xs.rate_constant_with_point_xs(Te, 'reaction3_3')
k3_4 = xs.rate_constant_with_point_xs(Te, 'reaction3_4')
k3_5 = xs.rate_constant_with_point_xs(Te, 'reaction3_5')
k3_6 = xs.rate_constant_with_point_xs(Te, 'reaction3_6')
k3_1_inv =
k3_2_inv =
k3_3_inv =
k3_4_inv =
k3_5_inv =
k3_6_inv =
k4_0 =
k4_1 =
k4_2 =
k4_3 =
k4_4 =
k4_5 =
k4_6 =
k4_7 =
k4_8 =
k4_0_inv =
k4_1_inv =
k4_2_inv =
k4_3_inv =
k4_4_inv =
k4_5_inv =
k4_6_inv =
k4_7_inv =
k4_8_inv =
k5_0 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_0')
k5_1 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_1')
k5_2 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_2')
k5_3 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_3')
k5_4 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_4')
k5_5 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_5')
k5_6 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_6')
k5_7 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_7')
k5_8 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_8')
k5_9 = xs.rate_constant_with_analytic_xs(Te, 'reaction5_9')
k6_0 = xs.rate_constant_with_point_xs(Te, 'reaction6_0')
k6_1 = xs.rate_constant_with_point_xs(Te, 'reaction6_1')
k6_2 = xs.rate_constant_with_point_xs(Te, 'reaction6_2')
k6_3 = xs.rate_constant_with_point_xs(Te, 'reaction6_2') #xs 데이터 보완 必
k6_4 = xs.rate_constant_with_point_xs(Te, 'reaction6_5') #xs 데이터 보완 必
k6_5 = xs.rate_constant_with_point_xs(Te, 'reaction6_5')
k6_6 = xs.rate_constant_with_point_xs(Te, 'reaction6_6')
k6_7 = xs.rate_constant_with_point_xs(Te, 'reaction6_7')
k6_8 = xs.rate_constant_with_point_xs(Te, 'reaction6_8')
k6_9 = xs.rate_constant_with_point_xs(Te, 'reaction6_9')
k7 = np.exp(-3.834597006782e+01+1.426322356722e+01*np.log(Te)-5.826468569506e+00*(np.log(Te))**2+1.727940947913e+00*(np.log(Te))**3-3.598120866343e-01*(np.log(Te))**4+4.822199350494e-02*(np.log(Te))**5-3.909402993006e-03*(np.log(Te))**6+1.738776657690e-04*(np.log(Te))**7-3.252844486351e-06*(np.log(Te))**8)
k8 = np.exp(-3.271396786375e+01+1.353655609057e+01*np.log(Te)-5.739328757388e+00*(np.log(Te))**2+1.563154982022e+00*(np.log(Te))**3-2.877056004391e-01*(np.log(Te))**4+3.482559773737e-02*(np.log(Te))**5-2.631976175590e-03*(np.log(Te))**6+1.119543953861e-04*(np.log(Te))**7-2.039149852002e-06*(np.log(Te))**8)
k9 = np.exp(-1.781416067709e+01+2.277799785711e+00*np.log(Te)-1.266868411626e+00*(np.log(Te))**2+4.296170447419e-01*(np.log(Te))**3-9.609908013189e-02*(np.log(Te))**4+1.387958040699e-02*(np.log(Te))**5-1.231349039470e-03*(np.log(Te))**6+6.042383126281e-05*(np.log(Te))**7-1.247521040900e-06*(np.log(Te))**8)
k10 = 2.1e-9
k11 = np.exp(-1.700270758355e+01-4.050073042947e-01*np.log(Te)+1.018733477232e-08*(np.log(Te))**2-1.695586285687e-08*(np.log(Te))**3+1.564311217508e-10*(np.log(Te))**4+1.979725412288e-09*(np.log(Te))**5-4.395545994733e-10*(np.log(Te))**6+3.584926377078e-11*(np.log(Te))**7-1.024189019465e-12*(np.log(Te))**8)
k12 = np.exp(-3.078408636631e+01+1.509421488513e+01*np.log(Te)-7.349167207324e+00*(np.log(Te))**2+2.320966107642e+00*(np.log(Te))**3-4.818077551719e-01*(np.log(Te))**4+6.389229162737e-02*(np.log(Te))**5-5.161880953089e-03*(np.log(Te))**6+2.303985092606e-04*(np.log(Te))**7-4.344846146197e-06*(np.log(Te))**8)
k13 = xs.rate_constant_with_point_xs(Te, 'reaction13')
k14 = np.exp(-1.801849334273e+01+2.360852208681e+00*np.log(Te)-2.827443061704e-01*(np.log(Te))**2+1.623316639567e-02*(np.log(Te))**3-3.365012031363e-02*(np.log(Te))**4+1.178329782711e-02*(np.log(Te))**5-1.656194699504e-03*(np.log(Te))**6+1.068275202678e-04*(np.log(Te))**7-2.631285809207e-06*(np.log(Te))**8)
k15 = 1.7e-9
k16 =
k17 = 4.4e-16 # at ion 0.02eV
k18 =
k19 = np.exp(-3.454175591367e+01+1.412655911280e+01*np.log(Te)-6.004466156761e+00*(np.log(Te))**2+1.589476697488e+00*(np.log(Te))**3-2.775796909649e-01*(np.log(Te))**4+3.152736888124e-02*(np.log(Te))**5-2.229578042005e-03*(np.log(Te))**6+8.890114963166e-05*(np.log(Te))**7-1.523912962346e-06*(np.log(Te))**8)
k20 = np.exp(-2.833259375256e+01+9.587356325603e+00*np.log(Te)-4.833579851041e+00*(np.log(Te))**2+1.415863373520e+00*(np.log(Te))**3-2.537887918825e-01*(np.log(Te))**4+2.800713977946e-02*(np.log(Te))**5-1.871408172571e-03*(np.log(Te))**6+6.986668318407e-05*(np.log(Te))**7-1.123758504195e-06*(np.log(Te))**8)
k21 = np.exp(-1.973476726029e+01+3.992702671457e+00*np.log(Te)-1.773436308973e+00*(np.log(Te))**2+5.331949621358e-01*(np.log(Te))**3-1.181042453190e-01*(np.log(Te))**4+1.763136575032e-02*(np.log(Te))**5-1.616005335321e-03*(np.log(Te))**6+8.093908992682e-05*(np.log(Te))**7-1.686664454913e-06*(np.log(Te))**8)
k22_1_0 = 0.42e-13 #non-reactive assumption
k22_2_0 = 0.59e-12
k22_2_1 = 0.30e-12
k22_3_0 = 0.15e-11
k22_3_1 = 0.16e-11
k22_3_2 = 0.20e-11
k22_4_0 = 0.43e-11
k22_4_1 = 0.42e-11
k22_4_2 = 0.49e-11
k22_4_3 = 0.55e-11
k22_5_0 = 0.16e-11
k22_5_1 = 0.37e-11
k22_5_2 = 0.69e-11
k22_5_3 = 0.74e-11
k22_5_4 = 0.89e-11
k22_6_0 = 0.33e-11
k22_6_1 = 0.51e-11
k22_6_2 = 0.53e-11
k22_6_3 = 0.69e-11
k22_6_4 = 0.11e-10
k22_6_5 = 0.12e-10
k22_7_0 = 0.24e-11
k22_7_1 = 0.38e-11
k22_7_2 = 0.68e-11
k22_7_3 = 0.57e-11
k22_7_4 = 0.70e-11
k22_7_5 = 0.11e-10
k22_7_6 = 0.12e-10
k22_8_0 = 0.30e-11
k22_8_1 = 0.29e-11
k22_8_2 = 0.29e-11
k22_8_3 = 0.35e-11
k22_8_4 = 0.56e-11
k22_8_5 = 0.82e-11
k22_8_6 = 0.12e-10
k22_8_7 = 0.14e-10
k22_9_0 = 0.52e-12
k22_9_1 = 0.14e-11
k22_9_2 = 0.30e-11
k22_9_3 = 0.37e-11
k22_9_4 = 0.48e-11
k22_9_5 = 0.53e-11
k22_9_6 = 0.92e-11
k22_9_7 = 0.13e-10
k22_9_8 = 0.14e-10
k23 =
k24 =
k25 =
k26 =
k27 =
k28_1_0 = 1
k28_2_0 = 0.6535
k28_2_1 = 0.35
k28_3_0 = 0.30023
k28_3_1 = 0.40221
k28_3_2 = 0.30023
k28_4_0 = 0.17949
k28_4_1 = 0.25373
k28_4_2 = 0.32389
k28_4_3 = 0.24312
k28_5_0 = 0.15093
k28_5_1 = 0.17867
k28_5_2 = 0.22844
k28_5_3 = 0.23986
k28_5_4 = 0.19662
k28_6_0 = 0.12483
k28_6_1 = 0.13462
k28_6_2 = 0.16399
k28_6_3 = 0.1958
k28_6_4 = 0.20478
k28_6_5 = 0.17541
k28_7_0 = 0.10035
k28_7_1 = 0.11096
k28_7_2 = 0.13054
k28_7_3 = 0.15991
k28_7_4 = 0.17949
k28_7_5 = 0.17051
k28_7_6 = 0.15093
k28_8_0 = 0.08648
k28_8_1 = 0.09056
k28_8_2 = 0.10688
k28_8_3 = 0.12483
k28_8_4 = 0.16888
k28_8_5 = 0.15991
k28_8_6 = 0.14033
k28_8_7 = 0.12564
k28_9_0 = 0.07506
k28_9_1 = 0.07832
k28_9_2 = 0.08974
k28_9_3 = 0.11014
k28_9_4 = 0.13951
k28_9_5 = 0.14359
k28_9_6 = 0.12483
k28_9_7 = 0.12238
k28_9_8 = 0.11503
##### Energy Loss per Reaction #####
E1_0 = 15.42
E1_1 = 15.42
E1_2 = 15.42
E1_3 = 15.42
E1_4 = 15.42
E2 = 8.5
E3_1 =
E3_2 =
E3_3 =
E3_4 =
E3_5 =
E3_6 =
E4_0 =
E4_1 =
E4_2 =
E4_3 =
E4_4 =
E4_5 =
E4_6 =
E4_7 =
E4_8 =
E5_0 = Te
E5_1 = Te
E5_2 = Te
E5_3 = Te
E5_4 = Te
E5_5 = Te
E5_6 = Te
E5_7 = Te
E5_8 = Te
E5_9 = Te
E6_0 = 20 # XS데이터가 다 20부터 시작임
E6_1 = 20
E6_2 = 20
E6_3 = 20
E6_4 = 20
E6_5 = 20
E6_6 = 20
E6_7 = 20
E6_8 = 20
E6_9 = 20
E7 = 18
E8 = 13.6
E9 = 10.5
E10 = 0
E11 = Te
E12 = 14
E13 = Te
E14 = 0.75
E15 = 0
E16 = 0
E17 = 0
E18 = 0
E19 = 15.3
E20 = 10.2
E21 = 3.4
E22 = 0
#Quasi-Neutrality eqn 완료
ne = nHp + nH2p + nH3p - nHm
#Hydrogen atom conservation eqn 완료
nH2_v0 = self.ng - (0.5*(nH + nHp + nH_2s + nHm) + sum(calculation_array[3:12]) + nH2p + 1.5*nH3p)
#Particle balance eqn for electron 완료
dne_dt = (k1_0*ne*nH2_v0) + (k1_1*ne*nH2_v1) + (k1_2*ne*nH2_v2) + (k1_3*ne*nH2_v3) + (k1_4*ne*nH2_v4) \
- (k5_0*ne*nH2_v0) - (k5_1*ne*nH2_v1) - (k5_2*ne*nH2_v2) - (k5_3*ne*nH2_v3) - (k5_4*ne*nH2_v4) - (k5_5*ne*nH2_v5) - (k5_6*ne*nH2_v6) - (k5_7*ne*nH2_v7) - (k5_8*ne*nH2_v8) - (k5_9*ne*nH2_v9) + (k7*ne*nH2_v0) + (k8*ne*nH) - (k11*ne*nH3p) - (k13*ne*nH3p) + (k14*ne*nHm) + (k15*nH*nHm) + (k21ne*nH_2s) - ne*uB*Aeff/V
#Power balance eqn for electron 아직임
dTe_dt = 2/(3*ne)*(power(t)/V - (Vs+2.5*Te)*ne*uB*Aeff/V - 3/2*Te*dne_dt - (k1*nH*E1*ne + k2*nHp*E2*ne + k3*nH2_v0*E3*ne + k4*nH2_v0*E4*ne + k5*nH2_v0*E5*ne + k6*nH2p*E6*ne + k7*nH2p*E7*ne + k8*nH2p*E8*ne + k9*nH3p*E9*ne + k10*nH3p*E10*ne + k11*nH2p*E11*nH2_v0))
#Particle balance eqn for other species except electron 아직임
dnH_dt = 2*(k2*ne*nH2_v0) + (k5_0*ne*nH2_v0) + (k5_1*ne*nH2_v1) + (k5_2*ne*nH2_v2) + (k5_3*ne*nH2_v3) + (k5_4*ne*nH2_v4) + (k5_5*ne*nH2_v5) + (k5_6*ne*nH2_v6) + (k5_7*ne*nH2_v7) + (k5_8*ne*nH2_v8) + (k5_9*ne*nH2_v9) + (k7*ne*nH2_v0) - (k8*ne*nH) + (k9*ne*nH2p) + (k10*nH2p*nH2_v0) + (k11*ne*nH3p) + 2*(k12*ne*nH3p) + (k14*ne*nHm) - (k15*nH*nHm) + (k16*nHp*nHm) + (k17*nH2p*nHm) + 2*(k18*nH3p*nHm) + (k19*ne*nH2_v0) - (k20*ne*nH) +k23 + k24 + k26 + k27
dnH_2s_dt = (k16*nHp*nHm) + (k19*ne*nH2_v0) + (k20*ne*nH) - (k21*ne*nH_2s) - k27
dnH2_v1_dt =
dnH2_v2_dt =
dnH2_v3_dt =
dnH2_v4_dt =
dnH2_v5_dt =
dnH2_v6_dt =
dnH2_v7_dt =
dnH2_v8_dt =
dnH2_v9_dt =
dnHp_dt = (k7*ne*nH2_v0) + (k8*ne*nH) + (k9*ne*nH2p) + (k12*ne*nH3p) - (k16*nHp*nHm) + (k21*ne*nH_2s) - k24
dnH2p_dt = (k1_0*ne*nH2_v0) + (k1_1*ne*nH2_v1) + (k1_2*ne*nH2_v2) + (k1_3*ne*nH2_v3) + (k1_4*ne*nH2_v4) - (k9*ne*nH2p) - (k10*nH2p*nH2_v0) + (k13*ne*nH3p) - (k17*nH2p*nHm) - k25
dnH3p_dt = (k10*nH2p*nH2_v0) - (k12*ne*nH3p) - (k13*ne*nH3p) - (k18*nH3p*nHm) - k26
dnHm_dt = (k5_1*ne*nH2_v1) + (k5_2*ne*nH2_v2) + (k5_3*ne*nH2_v3) + (k5_4*ne*nH2_v4) + (k5_5*ne*nH2_v5) + (k5_6*ne*nH2_v6) + (k5_7*ne*nH2_v7) + (k5_8*ne*nH2_v8) + (k5_9*ne*nH2_v9) + (k13*ne*nH3p) - (k14*ne*nHm) - (k15*nH*nHm) - (k16*nHp*nHm) - (k17*nH2p*nHm) - (k18*nH3p*nHm)
return [dTe_dt, dnH_dt, dnH_2s_dt, dnH2_v1_dt, dnH2_v2_dt, dnH2_v3_dt, dnH2_v4_dt, dnH2_v5_dt, dnH2_v6_dt, dnH2_v7_dt, dnH2_v8_dt, dnH2_v9_dt, dnHp_dt, dnH2p_dt, dnH3p_dt, dnHm_dt]
#Pulsed power generate function
def pulse_power(self, t):
if t <= self.duty*self.period:
return self.input_power
else:
return 0
#Temperature & Density Calculation
def routine(self, init_value):
routine_time_interval = np.linspace(0, self.period, int(self.period/self.time_resolution))
routine_result = odeint(balance_equations, init_value, routine_time_interval, args=(pulse_power,), rtol=10**-3, mxstep=10**6)
#routine_result = np.transpose(routine_result)
return routine_result
def iteration(self):
init_value = [2, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10, 1e10]
routine_result = routine(init_value)
count = 0
Hm_compare = 1
while True:
init_value = routine_result[-1]
if not isclose(routine_result[:,15][-1], Hm_compare, rel_tol=1e-2):
if iteration_number > 100:
print('did not converge')
break
Hm_compare = | |
"""
Filename: financial.py
Author: <NAME>
Current Status: In Development
"""
__author__ = "<NAME>"
__version__ = "1.000"
import openpyxl
import os
import time
import mysql.connector as mysql
from openpyxl.utils import column_index_from_string
from cellStyle import *
import sshtunnel
class ExcelSheet():
def __init__(self):
# change path
# self.path = "D:/Users/Tyler/Documents/Github/financialSystem"
# os.chdir (self.path)
#import excel sheet
self.filename = 'Example Spreadsheet.xlsx'
self.wb = openpyxl.load_workbook(self.filename)
#getting sheets from excel sheet
self.home = self.wb['HOME']
self.paychecks = self.wb['Paychecks']
self.dataTrack = self.wb['databaseTrack']
self.january = self.wb['January']
self.febuary = self.wb['Febuary']
self.march = self.wb['March']
self.april = self.wb['April']
self.may = self.wb['May']
self.june = self.wb['June']
self.july = self.wb['July']
self.august = self.wb['August']
self.september = self.wb['September']
self.october = self.wb['October']
self.november = self.wb['November']
self.december = self.wb['December']
def saveFile(self):
sheet = self.wb
file = self.filename
sheet.save(file)
def cellStyle(self, align, color, cell):
cell.fill = color
cell.border = Style.ALLBORDER
cell.alignment = align
class DatabaseQuery():
#initialzing database connection
def __init__(self, db):
self.db = db
self.cursor = self.db.cursor()
def closeConnection(self):
self.db.close()
def testDBQuery(self): # works
cursor = self.cursor
query = "select * from income"
cursor.execute(query)
result = cursor.fetchall()
for x in result:
print(x)
query = 'call newIncome(350.00, "rent", "Mom", "2021-12-01", "Housing")'
cursor.execute(query)
self.db.commit()
# ----------------------------------------------HOME sheet----------------------------------
def updateSubscription(self):
"""
Downloads Subscription info from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all subscriptions from database
"""
query = """select * from subscription"""
cursor.execute(query)
result = cursor.fetchall() #array of 5 (not using first column)
"""
Clearing all old data
"""
for i in range(18, 22):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
home.cell(row = i, column= column_index_from_string("D")).value = None
"""
downloading/updating all subscription data to spreadsheet
"""
i = 18 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
start = tracked[3]
status = tracked[4]
date = start.strftime("%m/%d/%Y")
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = date
home.cell(row = i, column= column_index_from_string("C")).value = status
home.cell(row = i, column= column_index_from_string("D")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("C")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("D")))
i += 1
if i > 22:
raise ValueError("\tSIZE ERROR: Number of subscriptions have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateDesiredPurchase(self):
"""
Downloads Desired Purchase info from database to spreadsheet"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all desired purchases from database
"""
query = """select * from desiredPur"""
cursor.execute(query)
result = cursor.fetchall() #array of 4 (not using first column)
"""
Clearing all old data
"""
for i in range(28, 32):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
"""
downloading/updating all subscription data to spreadsheet
"""
i = 28 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
status = tracked[3]
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = status
home.cell(row = i, column= column_index_from_string("C")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("C")))
i += 1
if i > 32:
raise ValueError("\tSIZE ERROR: Number of desired purchases have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateForSale(self):
"""
Downloads For Sale info from database to spreadsheet
Not Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all sales from database
"""
query = """select * from forSale"""
cursor.execute(query)
result = cursor.fetchall() #array of 5 (not using first column)
"""
Clearing all old data
"""
for i in range(38, 40):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
"""
downloading/updating all for sale data to spreadsheet
"""
i = 38 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
status = tracked[3]
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = status
home.cell(row = i, column= column_index_from_string("C")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("C")))
i += 1
if i > 40:
raise ValueError("\tSIZE ERROR: Number of for-sale items have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateNetWorth(self):
"""
Updates Net Worth Table in HOME spreadsheet
Not Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
Getting most recent net worth data from database
"""
query = "call netWorth"
cursor.execute(query)
result = cursor.fetchall() #array of 3
"""
downloading all account data to spreadsheet
"""
for tracked in result:
amount = tracked[0]
account = tracked[1]
day = tracked[2]
date = day.strftime("%m/%d/%Y")
# if account is bank account (total in bank is autocalculated in excel)
if "Bank Account" in account:
if "Spend" in account:
home.cell(row = 22, column= column_index_from_string("L")).value = amount
elif "Reserve" in account:
home.cell(row = 23, column= column_index_from_string("L")).value = amount
elif "Growth" in account:
home.cell(row = 24, column= column_index_from_string("L")).value = amount
# refresh date
home.cell(row = 5, column= column_index_from_string("C")).value = date
for i in range(22-24):
wb.cellStyle(Style.RIGHTALIGN, HomeColor.BANKFILL,
home.cell(row=i, column= column_index_from_string("L")))
elif "Invest" in account:
home.cell(row = 6, column= column_index_from_string("B")).value = amount
home.cell(row = 6, column= column_index_from_string("C")).value = date
elif "Safe" in account:
home.cell(row = 7, column= column_index_from_string("B")).value = amount
home.cell(row = 7, column= column_index_from_string("C")).value = date
elif "Wallet" in account:
home.cell(row = 8, column= column_index_from_string("B")).value = amount
home.cell(row = 8, column= column_index_from_string("C")).value = date
elif "Gift Card 1" in account:
home.cell(row = 9, column= column_index_from_string("B")).value = amount
home.cell(row = 9, column= column_index_from_string("C")).value = date
elif "Gift Card 2" in account:
home.cell(row = 10, column= column_index_from_string("B")).value = amount
home.cell(row = 10, column= column_index_from_string("C")).value = date
else:
# need to find a way to get the sum of all "other" (within mysql???)
home.cell(row = 11, column= column_index_from_string("B")).value = amount
home.cell(row = 11, column= column_index_from_string("C")).value = date
#applying styles to all Net Worth cells
for i in range(5,12):
wb.cellStyle(Style.RIGHTALIGN, HomeColor.NETWORTHFILL,
home.cell(row=i, column=column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.NETWORTHFILL,
home.cell(row=i, column=column_index_from_string("C")))
self.closeConnection()
wb.saveFile()
# ----------------------------------------------databaseTrack sheet----------------------------------
def downloadAccount(self):
"""
Downloads Account Tracking from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last account_id
"""
lastId = lastRow = 0
firstId = dataTrack.cell(row = 3, column= column_index_from_string("A")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 3
else:
# look for first empty cell
for i in range(2, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("A")).value
if nextID == None:
lastId = dataTrack.cell(row = i, column= column_index_from_string("A")).value
lastRow = i+1
break
"""
getting all account data from database
"""
query = """select * from account where acct_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 4
"""
downloading all account data to spreadsheet
"""
for tracked in result:
id = tracked[0]
name = tracked[1]
value = tracked[2]
day = tracked[3]
date = day.strftime("%m/%d/%Y")
dataTrack.cell(row = lastRow, column= column_index_from_string("A")).value = id
dataTrack.cell(row = lastRow, column= column_index_from_string("B")).value = name
dataTrack.cell(row = lastRow, column= column_index_from_string("C")).value = value
dataTrack.cell(row = lastRow, column= column_index_from_string("D")).value = date
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("A")))
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("C")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("D")))
lastRow += 1
self.closeConnection()
wb.saveFile()
def downloadProfit(self):
"""
Downloads Profit Trackign from database to spreadsheet
Implemented
note: cells H1 and I1 are filled in and have borders
"""
cursor = self.cursor
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last profit_id
"""
lastId = lastRow = 0
firstId = firstId = dataTrack.cell(row = 3, column= column_index_from_string("F")).value
# if first cell is empty
if firstId == None:
lastId = | |
"""
Data Visualisation toolkit
--------------------------
data vis library mainly wrapper around common plotting libraries, plotly, seaborn
EXAMPLES
--------
histogram with color
--------------------
fig = px.histogram(df, 'value', color='variable', barmode='overlay', opacity=0.7)
plotly geo scatter
------------------
fig = px.scatter_geo(stores_targ, lat='LAT', lon='LONG', hover_name='NAME', color='index1', hover_data=hd,
title='cash strapped stores', size='point1')
fig.update_layout(geo_scope='europe')
plot(fig)
plotly bar
----------
fig = px.bar(df, x=, y=, color=)
fig.update_layout({'yaxis': {'tickformat': '.1%', 'title': 'title'}}, yaxis_type='category')
plotly scatter
--------------
hd = {col1: ‘:,’, col2: ‘:.2%’, col3: True}
fig = px.scatter_3d(df, x=, y=, z=, color=, hover_name=, hover_data=hd)
fig.update_traces(marker=dict(size=3))
plot(fig, filename=path.as_posix())
plotly line
-----------
px.line(df, x=, y=, color=, line_shape=’spline’)
fig.update_traces(line=dict(size=3))
fig.update_xaxes(rangeslider_visible=True)
facet line
----------
fig = px.line(df, y='value', x='variable', color='desc', facet_row='level_0', **kwargs)
fig.update_traces(mode='lines+markers')
fig.update_xaxes(matches=None, showticklabels=True)
fig.update_yaxes(matches=None, showticklabels=True)
combine two figures
-------------------
fig1.add_trace(fig2.data[0])
for data in fig2.data:
fig1.add_trace(data)
seconday axis
-------------
fig1 = px.line(res_now, 'date', 'lb_all')
fig2 = px.line(res_now, 'date', 'calories')
fig = make_subplots(specs=[[{'secondary_y': True}]])
fig.add_trace(fig1.data[0], secondary_y=False)
fig.add_trace(fig2.data[0], secondary_y=True)
axis titles
-----------
fig.update_layout({'xaxis': {'title': 'Cash Strapped Spend Allocation Index'},
'yaxis': {'tickformat': '.2%', 'title': f'% of spend in '}})
facet col wrap
--------------
px.line(facet_col=col, facet_col_wrap=5)
category_orders
"""
import itertools
import random
from itertools import cycle
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
from pandas._libs.tslibs.timestamps import Timestamp
from plotly.offline import plot
from plotly.subplots import make_subplots
from slibtk import slibtk
from tqdm import tqdm
from dstk.core import *
# colors ###############################################################################################################
def make_rgb() -> str:
"""generate a list of random rgb values of format rgb(0,0,0)"""
rnum = lambda: random.randint(0, 255)
return f'rgb({rnum()},{rnum()},{rnum()})'
# my fave colors
default_cat_colors = {'magenta_pink': '#FC0FC0',
'electric blue': '#7df9ff',
'b': '#adc1c9',
'c': '#052d3f',
'd': '#6FFFE9',
'e': '#316379',
'f': '#84a2af'}
colorlst = ['#FC0FC0', '#adc1c9', '#052d3f', '#6FFFE9', '#316379', '#84a2af']
color: str = '#FC0FC0'
rgba = {
'green main': 'rgba(112, 182, 88, 0.6)',
'green dark': 'rgba(33, 84, 37, 0.6)',
'grey dark': 'rgba(49, 45, 49, 0.6)',
'dog': 'rgba(191, 209, 67, 0.6)',
'cat': 'rgba(232, 132, 65, 0.6)',
'small pet': 'rgba(212, 153, 59, 0.6)',
'fish': 'rgba(40, 58, 140, 0.6)',
'bird': 'rgba(109, 173, 218, 0.6)',
'reptile': 'rgba(101, 38, 57, 0.6)',
}
rgba_vals = list(rgba.values())
rgba_inf = cycle(rgba.values())
def plot_histograms(df: pd.DataFrame, columns: Optional[List[str]] = None) -> None:
"""plot all varibles in a df as a histogram"""
cols = columns if columns else df.columns
for col in cols:
px.histogram(df, col, title=col).plot()
# plotly ###############################################################################################################
def line_seconday_axis(df: pd.DataFrame, x: str, y_primary: str, y_secondary: str, title: Optional[str] = None,
path: Optional[Path] = None) -> None:
"""plot a plotly lines graph with a seconday axis"""
fig1 = px.line(df, x, y_primary, color_discrete_sequence=['blue'])
fig2 = px.line(df, x, y_secondary, color_discrete_sequence=['magenta'])
fig = make_subplots(specs=[[{'secondary_y': True}]])
fig.update_layout(title=title)
fig.add_trace(fig1.data[0], secondary_y=False)
fig.add_trace(fig2.data[0], secondary_y=True)
fig.update_yaxes(title_text=y_primary, color='blue', secondary_y=False)
fig.update_yaxes(title_text=y_secondary, color='magenta', secondary_y=True)
fig.plot(path)
def px_scatter3d_by_colors(df: pd.DataFrame, colors: List, path: Path, fname: str, hover_name: str, hover_data,
marker_size: int = 3, title: Optional[str] = None) -> None:
"""plotly a plotly scatter 3d chart for every """
for color in tqdm(colors):
fig = px.scatter_3d(df, x='dim0', y='dim1', z='dim2', hover_name=hover_name, color=color,
hover_data=hover_data, title=f'{title}{color}')
fig.update_traces(marker=dict(size=marker_size))
plot(fig, filename=(path / f'{fname}_{color}.html').as_posix())
def add_vertline(fig: go.Figure, y1: float, x: int = 1) -> go.Figure:
"""add vertical line to at x=1 to plotly figure with height of y1"""
fig.add_shape(
type='line',
x0=x,
y0=0,
x1=x,
y1=y1,
line=dict(width=5, dash='dot', color='red'),
)
return fig
def scatter_median_lines(fig: go.Figure, df: pd.DataFrame, x: str, y: str) -> go.Figure:
"""add vertical line to at x=1 to plotly figure with height of y1"""
fig.add_shape(
type='line',
x0=df[x].min(),
y0=df[y].median(),
x1=df[x].max(),
y1=df[y].median(),
line=dict(width=3, dash='dot', color='red'),
)
fig.add_shape(
type='line',
x0=df[x].median(),
y0=df[y].min(),
x1=df[x].median(),
y1=df[y].max(),
line=dict(width=3, dash='dot', color='red'),
)
return fig
def px_scatter_with_lines(df: pd.DataFrame, x: str, y: str, **kwargs) -> go.Figure:
fig = px.scatter(df, x, y, marginal_y='box', marginal_x='box', **kwargs)
fig = scatter_median_lines(fig, df, x, y)
return fig
def add_periodic_vertical_lines(fig: go.Figure, start: Union[str, Timestamp], end: Union[str, Timestamp], freq: str,
y1: float, y0: float = 0) -> go.Figure:
"""
add vertical lines to plotly figures that repeat at a certain frequency ie every sunday
Args:
fig: plotly figure
start: first date of period
end: last date of the period
freq: pandas time series frequency directive ie W-THU
y1: max value of line
y0: minimum value of line
Returns:
fig: the plotly figures with lines added as a trace
"""
dates_dow = pd.date_range(start, end, freq=freq)
for day in dates_dow:
fig.add_shape(
type='line',
x0=day,
y0=y0,
x1=day,
y1=y1,
line=dict(width=1, dash='dot'),
)
return fig
def make_sankey_fig_from_df(df: pd.DataFrame, source: str = 'source', target: str = 'target',
values: str = 'values', title=None,
valueformat: str = ',.1%') -> go.Figure:
"""
Take a correctly formatted DataFrame (cols=[source, target, values] and return a sankey diagram plotly figure object
Args:
df: DataFrame with required columns [source, target, values]
source: column name
target: column name
values: column name
title: chart title string
valueformat: number formatting of the values in the Sankey diagram
Returns:
fig: populated plotly figure object
"""
sankey_kwargs = _df_to_args_for_sankey(df, source=source, target=target, values=values)
return make_sankey_fig(valueformat=valueformat, title=title, **sankey_kwargs)
def _df_to_args_for_sankey(df: pd.DataFrame, source: str = 'source', target: str = 'target',
values: str = 'values', color_scheme=rgba_inf) -> Dict[str, List]:
"""
transform df into args required for plotly sankey figure. df must be of format: col_pre=source col_post=destination
and col_values is the size of the flow between them.
Args:
df: DataFrame with required columns [source, target, values]
source: column name
target: column name
values: column name
color_scheme: An infinite generator of RGB colour strings eg 'rgba(101, 38, 57, 0.6)'
Returns:
dict of args required to make sankey diagram with plotly
"""
index = list(np.concatenate([df[source].unique(), df[target].unique()]))
source = [index.index(x) for x in df[source]]
target = [index.index(x) for x in df[target]]
values = df[values].tolist()
colors = [next(color_scheme) for _ in range(len(values))]
return {
'index': index,
'source': source,
'target': target,
'values': values,
'colors': colors,
}
def make_sankey_fig(index: List, source: List, target: List, values: List, colors: List, title: str,
valueformat: str = ',.1%') -> go.Figure:
"""
pass args into plotly figure returning the populated figure, arguments should be passed in as kwargs
"""
fig = go.Figure(data=[go.Sankey(
valueformat=valueformat,
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=index,
color=rgba['green main'],
),
link=dict(
source=source,
target=target,
value=values,
color=colors
))])
fig.update_layout(title_text=title, font_size=10)
return fig
def px_scatter_geo(df: pd.DataFrame, color, hover_name, title, path: Optional[Path] = None, lat='latitude',
lon='longitude', **kwargs) -> go.Figure:
"""convenience wrapper for px.scatter_geo for plotting uk scatter maps"""
fig = px.scatter_geo(df, lat=lat, lon=lon, hover_name=hover_name, color=color, title=title, **kwargs)
fig.update_layout(
geo=dict(
scope='europe',
showland=True,
lonaxis=dict(range=[df[lon].min(), df[lon].max()]),
lataxis=dict(range=[df[lat].min(), df[lat].max()]),
),
)
return fig
# seaborn ##############################################################################################################
def heatmap(df, width=1.5, height=0.4, cmap=plt.cm.Blues, *args, **kwargs):
"""show a heatmap version of a DataFrame, good for correlation"""
fig, ax = plt.subplots(1, 1, figsize=(width * df.shape[1], height * df.shape[0]))
sns.heatmap(df.round(2), annot=True, fmt='g', cmap=cmap, *args, **kwargs, ax=ax)
plt.tight_layout()
plt.show()
return fig, ax
def heatmap_corr(df: pd.DataFrame, thresh=None, width=14, height=8, **kwargs):
"""
heatmap of principle components
"""
df = df.corr().round(2)
if thresh:
df = df.applymap(lambda x: x if abs(x) > thresh else float('NaN'))
fig, ax = plt.subplots(1, 1, figsize=(width, height))
sns.heatmap(df, ax=ax, annot=True, **kwargs)
return (fig, ax)
def violin_and_box(*args, width=12, height=11, **kwargs):
"""
show a boxplot and a violin plot on a 2x1 subplot grid
"""
fig, ax = plt.subplots(2, 1, figsize=(width, height))
sns.violinplot(*args, **kwargs, ax=ax[0])
sns.boxplot(*args, **kwargs, ax=ax[1])
plt.show()
# visualising decision boundaries and regression surfaces ##############################################################
def px_scatter3d_regression(df: pd.DataFrame, x: str, y: str, z: str, model, path: Path, title: Optional[str] = None,
color_discrete_sequence=None, resolution: int = 200, marker_size: int = 5, *args,
**kwargs) -> None:
"""
plot a 3 dimensional scatter plot where z is the target variable and super impose a regression surface corresponding
to model where model has a sklearn style api (fit, predict)
Args:
df: DataFrame where x and y are features and z is a continuoius label.
x: column name of feature input 1
y: column name of feature input 2
z: column name of continuious label
model: model object with fit and predict methods ie sklearn
path: save location of plotly html output
*args: passed to px.scatter_3d
**kwargs: passed to px.scatter_3d
Returns:
None
"""
model.fit(df[[x, y]], df[z])
df['pred'] = model.predict(df[[x, y]])
surface = make_predicted_surface(df[x], df[y], predictor=model.predict, resolution=resolution)
# melted = df.melt(id_vars=[x, y], value_vars=[z, 'pred'])
color_discrete_sequence = color_discrete_sequence if color_discrete_sequence else colorlst
fig = px.scatter_3d(df, x=x, y=y, z=z, color_discrete_sequence=color_discrete_sequence, title=title, *args,
**kwargs)
fig.update_traces(marker=dict(size=marker_size))
fig.add_trace(surface)
plot(fig, filename=path.as_posix())
def make_predicted_surface(x: pd.Series, y: pd.Series, predictor: Callable, resolution: int = 200) -> go.Surface:
"""
for a given set of values x and y, and a trained model, estimate the grid surface for all permutations
Args:
x: first independent variable
y: Second independent variable
predictor: Function that is applied to the nx2 array of grid coordinates and returns an nx1 array of predictions
resolution: number of points on each axis
Returns:
surface: plotly surface trace object
"""
# setting up | |
import logging
import os
import simplejson as json
import yaml
from deepmerge import always_merger
from .exceptions import TinConfigNotFound, TinError
# We only do JSON APIs right now
DEFAULT_CONTENT_TYPE = "application/json"
DEFAULTS = {
"scheme": "https",
"port": 443,
"use_session": True,
"ssl": {"verify": True},
"content_type": DEFAULT_CONTENT_TYPE,
"auth_type": "basic",
}
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class TinConfig(object):
"""Class which represents the configuration of an API
Configuration can be loaded from a YAML or JSON file, from YAML or JSON in environment vars, or directly from environment vars.
Configuration has an order of precedence for loading data:
1. config file path passed as an argument
2. config file path from the TIN_CONFIG env var
3. config data as JSON from the TIN_CONFIG env var
4. config data as YAML from the TIN_CONFIG env var
Configurations may be organized in as multi-environment or single environment.
Multi-environment configs look like
```yaml
---
environments:
myenv:
key: value
common:
key: value
```
Whereas single environment configs look like
```yaml
key: value
otherkey: othervalue
```
Aftr config data is loaded, environment variables will be loaded, which will override config file values if set.
For multi-environment configs, the first key must always be 'ENVIRONMENTS'. Note the double underscores.
TIN__ENVIRONMENTS__BASIC__HOST corresponds to config_data['environments']['basic']['host']
Common vars are similar
TIN__COMMON__BASEPATH corresponds to config_data['common']['basepath']
In single-environment configs, env vars still use double underscores, but without environment name or "COMMON":
TIN__HOST corresponds to config_data['host']
And so on. This also leaves open the possiblity of loading the entire
config from individual env vars
Args:
config_file (str): Relative or absolute path to the YAML or JSON config file
environment (str): Optional name of the API environment to load
"""
def __init__(self, config_file=None, environment=None, env_prefix=None):
self.api_name = None
self.config_src = None
self.config_dir = None
self.config_data = None
self.environment = None
self._api_config = dict(DEFAULTS)
######################
# Env prefix
if env_prefix:
self._env_prefix = "TIN_{}".format(env_prefix).upper()
else:
self._env_prefix = "TIN"
# Environment is required regardless of where config data comes from
if environment is None:
if f"{self._env_prefix}_ENV" in os.environ:
self.environment = os.environ.get(f"{self._env_prefix}_ENV")
else:
self.environment = None
else:
self.environment = environment
######################
# Config loading
# Handle being passed config data from the environment, as a file or as JSON or
# YAML, if a config file path was not given.
if config_file is None:
if f"{self._env_prefix}_CONFIG" in os.environ:
config = os.environ.get(f"{self._env_prefix}_CONFIG")
if os.path.isfile(config):
config_data = self._load_main_config_from_file(config)
else:
try:
config_data = self._load_json_or_yaml(config)
except ValueError:
# Don't die here, as we might load config from individual env
# vars
config_data = {}
self.config_src = "ENV"
else:
config_data = {}
self.config_src = "ENV"
else:
config_data = self._load_main_config_from_file(config_file)
logger.info(
"Using config: {} Environment: {}".format(
self.config_src,
self.environment if self.environment else "default (none)",
)
)
######################
# Load from environment variables
# Update from env vars as described above
self.config_data = self._update_from_env(config_data, environment)
if not self.config_data:
raise TinError("Empty config!")
# If we have an an environment based config, but no environment OR
# an environment was specified but we still don't have environment data, it's
# a problem
if self.environment is None and "environments" in self.config_data:
raise TinError("I have an environment-based config but environment is None")
elif (
self.environment is not None
and self.environment not in self.config_data.get("environments", {})
):
raise TinError(
"Environment set but not found in config: {}".format(self.environment)
)
######################
# Determine API name for type naming
if self.config_data.get("api_name", None):
self.api_name = self.config_data["api_name"]
elif self.config_data.get("common", {}).get("api_name", None):
self.api_name = self.config_data["common"]["api_name"]
elif os.path.isfile(self.config_src):
self.api_name = os.path.splitext(os.path.basename(self.config_src))[0]
else:
raise TinError(
"""Cannot determine the API name! Either set TIN__API_NAME env
var or set 'api_name' in the common settings."""
)
######################
# Build the final _api_config
if self.environment:
# Merge common env config into _api_config
self._api_config = always_merger.merge(
self._api_config, self.config_data.get("common", {})
)
# Now merge env-specific settings into that result
self._api_config = always_merger.merge(
self._api_config, self.config_data["environments"][self.environment]
)
else:
# If there's no environment, all the config keys should already be top-level
self._api_config = always_merger.merge(self._api_config, self.config_data)
# At this point, we must have an api_file or there's no point in continuing
if self._api_config.get("api_file", None) is None:
raise TinError("No api_file specified in the config. Cannot continue.")
######################
# Determine a config_dir, if any
if "config_dir" not in self._api_config and self.config_src != "ENV":
# If config_dir is in the api_config, it will be accessible via
# self.config_dir already due to __getattr__. If not, set it based on
# the main config path *IF* there is one
self._api_config["config_dir"] = os.path.dirname(
os.path.abspath(self.config_src)
)
self.config_dir = self._api_config["config_dir"]
elif "config_dir" in self._api_config:
self.config_dir = self._api_config["config_dir"]
######################
# Credentials
if (
self._api_config.get("auth_type") in [None, "none"]
or "credentials" not in self._api_config
):
# If auth_type is None, set credentials to None, otherwise, if auth_type is set
# but credentials are absent, continue instead of dying as creds may be set after
# instantiation
self._api_config["credentials"] = None
else:
try:
self.credentials = self._load_config_from_file(
self._api_config["credentials"]
)
except TinConfigNotFound:
try:
self.credentials = self._load_json_or_yaml(
self._api_config["credentials"]
)
except ValueError:
# doesn't load as json or yaml, may be a custom string
self.credentials = self._api_config["credentials"]
######################
# Headers
self.headers = {
"Content-type": self._api_config.get("content_type", False)
or DEFAULT_CONTENT_TYPE,
"Accept": self._api_config.get("content_type", False)
or DEFAULT_CONTENT_TYPE,
}
# Merge in any headers from the config
if self._api_config.get("headers", None) is not None:
self.headers.update(self._api_config["headers"])
######################
# Minor data checks
try:
self._api_config["port"] = int(self._api_config["port"])
except ValueError:
raise TinError("Invalid port, must be an integer")
######################
# Additional file-based configs
# API and Model configs must be files
self.apidata = self._load_config_from_file(self.api_file)
self.models = (
self._load_config_from_file(self._api_config.get("model_file", None))
if "model_file" in self._api_config
else {}
)
def _update_from_env(self, config_data, environment=None):
"""Read configuration from environment variables
Reads config keys and values from env vars following a particular naming scheme:
TIN__[ENVIRONMENTS__]<ENVIRONMENT|KEY>__<KEY>.. = <VALUE>
See the class docs for more detail.
Arguments:
config_data (dict): A dict into which keys and values will be loaded
environment (string|None): Optional environment name
Returns:
see _loadfile()
"""
for var, val in os.environ.items():
if var.startswith(
"{}__{}".format(
self._env_prefix,
"ENVIRONMENTS__{}".format(environment.upper())
if environment is not None
else "",
)
):
env_parts = [v.lower() for v in var.split("__")[1:]]
dict_from_list = current = {}
# Some confusing iteration that turns a list into nested dict keys
for i in range(0, len(env_parts)):
part = env_parts[i]
if i == len(env_parts) - 1:
current[part] = val
else:
current[part] = {}
current = current[part]
config_data = always_merger.merge(config_data, dict_from_list)
return config_data
def __getattr__(self, item):
"""Look up referenced attrs in _api_config before __dict__
Arguments:
item (str): attr or method name
Returns:
Value of the attribute key
"""
if item in self._api_config:
return self._api_config[item]
elif item in self.__dict__:
return self.__dict__[item]
else:
self.method_missing(item)
def method_missing(self, method_name, *args, **kwargs):
"""Handle references to missing attrs
Arguments:
method_name (str): Name of referenced attr
Raises:
AttributeError
"""
e = "type object '%s' has no attribute '%s'" % (
self.__class__.__name__,
method_name,
)
raise AttributeError(e)
def _load_config_from_file(self, filename):
"""Load an arbitrary configuration from a file.
Update config_src and api_name.
Arguments:
filename (str): Relative or absolute path to a file
Returns:
see _loadfile()
"""
return self._loadfile(self.find_config(filename))
def _load_main_config_from_file(self, filename):
"""Load main configuration from a file.
Update config_src.
Arguments:
filename (str): Relative or absolute path to a file
Returns:
see _loadfile()
"""
self.config_src = self.find_config(filename)
return self._load_config_from_file(self.config_src)
def _loadfile(self, filepath):
"""Parses the conf file as YAML or JSON based on file extension
Arguments:
filepath (str): Path to the file
Returns
dict: Contents of the file parsed to a dict
"""
with open(filepath, "rb") as fh:
if filepath.endswith(".yml") or filepath.endswith(".yaml"):
return yaml.safe_load(fh.read())
elif filepath.endswith(".json"):
return json.loads(fh.read())
def _load_json_or_yaml(self, data):
"""Given a chunk of data, attempts to load it as JSON or YAML, in that order
Arguments:
data (str): Data presumed to be JSON or YAML
Returns
dict: The data parsed to a dict
"""
try:
loaded = json.loads(data)
except json.decoder.JSONDecodeError:
# Explicitly making this a dict works around the fact that
# pyyaml will load a single plain string without error
loaded = dict(yaml.safe_load(data))
return loaded
def set(self, key, value):
"""Config attribute setter
Arguments:
key (str): Name of the attribute
value (str): Value to set. Presumed to be a string but this isn't enforced.
| |
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
def pipeline(img, s_thresh=(170, 255), sx_thresh=(30, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
L_channel = lab[:,:,0]
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
#Threshold LAB
labbinary = np.zeros_like(L_channel)
labbinary[(L_channel >= 100) & (L_channel <= 255)] = 1
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
img_bwa = cv2.bitwise_and(labbinary,s_binary)
t_binary = np.zeros(img.shape[:2], dtype=np.uint8)
t_binary[(L_channel >= 100) & (s_binary <= s_thresh[1])] = 1
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), np.zeros_like(sxbinary), img_bwa + sxbinary)) * 255
return color_binary
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 15
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero (i.e. activated) pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
### TO-DO: Fit a second order polynomial to each using `np.polyfit` ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
return out_img, left_fit, right_fit
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def compute_lane_curvature(leftx, rightx, ploty):
"""
Returns the triple (left_curvature, right_curvature, lane_center_offset), which are all in meters
"""
# Define conversions in x and y from pixels space to meters
y_eval = np.max(ploty)
# Fit new polynomials: find x for y in real-world space
left_fit_cr = np.polyfit(ploty * ym_per_px, leftx * xm_per_px, 2)
right_fit_cr = np.polyfit(ploty * ym_per_px, rightx * xm_per_px, 2)
# Now calculate the radii of the curvature
left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_px + left_fit_cr[1])**2)**1.5) / np.absolute(2 * left_fit_cr[0])
right_curverad = ((1 + (2 *right_fit_cr[0] * y_eval * ym_per_px + right_fit_cr[1])**2)**1.5) / np.absolute(2 * right_fit_cr[0])
# Now our radius of curvature is in meters
return left_curverad, right_curverad
def search_around_poly(binary_warped, Minv, left_fit, right_fit, undist):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate | |
0, 0, 0, 0],
[1240, 283.224983, 0, 9999, -9999, 1.0, 100, 1, 339.51051, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1241, 253.35193, 0, 9999, -9999, 1.0, 100, 1, 385.361595, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1242, 2.719992, 0, 9999, -9999, 1.0, 100, 1, 27.074038, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1243, 12.040673, 0, 9999, -9999, 1.0, 100, 1, 83.079842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1244, 251.666614, 0, 9999, -9999, 1.0, 100, 1, 323.472536, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1245, 3.200779, 0, 9999, -9999, 1.0, 100, 1, 8.080896, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1246, 50.595153, 0, 9999, -9999, 1.0, 100, 1, 57.127825, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1247, 9.304883, 0, 9999, -9999, 1.0, 100, 1, 21.833396, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1248, 76.165884, 0, 9999, -9999, 1.0, 100, 1, 91.958275, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1249, 69.490338, 0, 9999, -9999, 1.0, 100, 1, 76.135177, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1250, 20.511911, 0, 9999, -9999, 1.0, 100, 1, 30.830519, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1251, 14.061535, 0, 9999, -9999, 1.0, 100, 1, 23.404345, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1252, 10.700771, 0, 9999, -9999, 1.0, 100, 1, 14.887727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1253, 44.535521, 0, 9999, -9999, 1.0, 100, 1, 64.502694, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1254, 52.839564, 0, 9999, -9999, 1.0, 100, 1, 82.278695, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1255, 3.240495, 0, 9999, -9999, 1.0, 100, 1, 3.818419, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1256, 12.635874, 0, 9999, -9999, 1.0, 100, 1, 15.091842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1257, 67.766856, 0, 9999, -9999, 1.0, 100, 1, 88.95288, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1258, 167.277827, 0, 9999, -9999, 1.0, 100, 1, 235.487329, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1259, 83.758149, 0, 9999, -9999, 1.0, 100, 1, 109.288719, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1260, 7.929404, 0, 9999, -9999, 1.0, 100, 1, 20.168717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1261, 29.280009, 0, 9999, -9999, 1.0, 100, 1, 201.699555, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1262, 0.094095, 0, 9999, -9999, 1.0, 100, 1, 0.524108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1263, 0.064969, 0, 9999, -9999, 1.0, 100, 1, 0.352421, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1264, 4.036094, 0, 9999, -9999, 1.0, 100, 1, 82.035361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1265, 0.819286, 0, 9999, -9999, 1.0, 100, 1, 6.654727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1266, 17.645436, 0, 9999, -9999, 1.0, 100, 1, 119.710849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1267, 25.363539, 0, 9999, -9999, 1.0, 100, 1, 39.469006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1268, 0.167997, 0, 9999, -9999, 1.0, 100, 1, 3.4295, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1269, 0.365776, 0, 9999, -9999, 1.0, 100, 1, 5.105829, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1270, 3.846381, 0, 9999, -9999, 1.0, 100, 1, 38.950511, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1271, 6.722278, 0, 9999, -9999, 1.0, 100, 1, 47.371792, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1272, 0.241761, 0, 9999, -9999, 1.0, 100, 1, 1.23166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1273, 0.695021, 0, 9999, -9999, 1.0, 100, 1, 2.169201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1274, 41.501889, 0, 9999, -9999, 1.0, 100, 1, 53.095629, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1275, 82.59712, 0, 9999, -9999, 1.0, 100, 1, 99.0753, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1276, 15.517213, 0, 9999, -9999, 1.0, 100, 1, 25.655641, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1277, 56.731843, 0, 9999, -9999, 1.0, 100, 1, 65.611252, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1278, 148.999451, 0, 9999, -9999, 1.0, 100, 1, 170.437781, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1279, 3.8e-05, 0, 9999, -9999, 1.0, 100, 1, 0.004344, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1280, 0.022161, 0, 9999, -9999, 1.0, 100, 1, 0.626494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1281, 0.194381, 0, 9999, -9999, 1.0, 100, 1, 2.51246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1282, 0.229809, 0, 9999, -9999, 1.0, 100, 1, 4.363037, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1283, 1106.411555, 0, 9999, -9999, 1.0, 100, 1, 1297.764428, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1284, 4.815237, 0, 9999, -9999, 1.0, 100, 1, 28.426322, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1285, 0.172746, 0, 9999, -9999, 1.0, 100, 1, 2.937048, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1286, 0.837665, 0, 9999, -9999, 1.0, 100, 1, 17.872201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1287, 43.47072, 0, 9999, -9999, 1.0, 100, 1, 93.199628, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1288, 86.027492, 0, 9999, -9999, 1.0, 100, 1, 148.402692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1289, 83.540969, 0, 9999, -9999, 1.0, 100, 1, 184.149235, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1290, 1.100419, 0, 9999, -9999, 1.0, 100, 1, 4.901974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1291, 84.661755, 0, 9999, -9999, 1.0, 100, 1, 98.293351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1292, 24.11481, 0, 9999, -9999, 1.0, 100, 1, 41.682074, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1293, 1.340697, 0, 9999, -9999, 1.0, 100, 1, 2.402107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1294, 2.762165, 0, 9999, -9999, 1.0, 100, 1, 5.39743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1295, 2.875549, 0, 9999, -9999, 1.0, 100, 1, 5.873666, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1296, 2.441674, 0, 9999, -9999, 1.0, 100, 1, 27.356489, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1297, 24.334889, 0, 9999, -9999, 1.0, 100, 1, 177.778742, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1298, 0.262582, 0, 9999, -9999, 1.0, 100, 1, 4.014603, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1299, 0.180635, 0, 9999, -9999, 1.0, 100, 1, 2.158207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1300, 8.661859, 0, 9999, -9999, 1.0, 100, 1, 23.74405, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1301, 21.376415, 0, 9999, -9999, 1.0, 100, 1, 60.863304, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1302, 1.173793, 0, 9999, -9999, 1.0, 100, 1, 4.877299, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1303, 0.961116, 0, 9999, -9999, 1.0, 100, 1, 4.335516, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1304, 1.534479, 0, 9999, -9999, 1.0, 100, 1, 9.594319, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1305, 0.000409, 0, 9999, -9999, 1.0, 100, 1, 0.004567, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1306, 1.400254, 0, 9999, -9999, 1.0, 100, 1, 1.827014, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1307, 0.202727, 0, 9999, -9999, 1.0, 100, 1, 0.29894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1308, 1.83875, 0, 9999, -9999, 1.0, 100, 1, 3.278321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1309, 0.153497, 0, 9999, -9999, 1.0, 100, 1, 3.34909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1310, 0.075116, 0, 9999, -9999, 1.0, 100, 1, 1.64589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1311, 2.484534, 0, 9999, -9999, 1.0, 100, 1, 11.854004, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1312, 212.454641, 0, 9999, -9999, 1.0, 100, 1, 262.264924, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1313, 4.885407, 0, 9999, -9999, 1.0, 100, 1, 30.836748, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1314, 1.844354, 0, 9999, -9999, 1.0, 100, 1, 12.003987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1315, 1.103371, 0, 9999, -9999, 1.0, 100, 1, 7.879027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1316, 0.198826, 0, 9999, -9999, 1.0, 100, 1, 2.757497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1317, 18.699532, 0, 9999, -9999, 1.0, 100, 1, 23.958574, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1318, 0.101079, 0, 9999, -9999, 1.0, 100, 1, 1.956332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1319, 12.575611, 0, 9999, -9999, 1.0, 100, 1, 17.708276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1320, 3.813162, 0, 9999, -9999, 1.0, 100, 1, 20.75859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1321, 0.033533, 0, 9999, -9999, 1.0, 100, 1, 0.161123, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1322, 0.174584, 0, 9999, -9999, 1.0, 100, 1, 0.929763, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1323, 125.247907, 0, 9999, -9999, 1.0, 100, 1, 199.111909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1324, 0.582413, 0, 9999, -9999, 1.0, 100, 1, 13.063258, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1325, 9.008711, 0, 9999, -9999, 1.0, 100, 1, 90.497559, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1326, 40.081034, 0, 9999, -9999, 1.0, 100, 1, 56.928865, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1327, 36.371597, 0, 9999, -9999, 1.0, 100, 1, 50.796895, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1328, 11.765551, 0, 9999, -9999, 1.0, 100, 1, 16.063343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1329, 11.62544, 0, 9999, -9999, 1.0, 100, 1, 218.675424, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1330, 7.941464, 0, 9999, -9999, 1.0, 100, 1, 30.131028, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1331, 0.244515, 0, 9999, -9999, 1.0, 100, 1, 0.289238, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1332, 2.022899, 0, 9999, -9999, 1.0, 100, 1, 26.293088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1333, 7.067332, 0, 9999, -9999, 1.0, 100, 1, 45.650254, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1334, 0.021456, 0, 9999, -9999, 1.0, 100, 1, 1.215341, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1335, 0.248908, 0, 9999, -9999, 1.0, 100, 1, 3.306939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1336, 4.222701, 0, 9999, -9999, 1.0, 100, 1, 29.773035, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1337, 86.888509, 0, 9999, -9999, 1.0, 100, 1, 121.31241, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1338, 0.140261, 0, 9999, -9999, 1.0, 100, 1, 0.832524, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1339, 7.268574, 0, 9999, -9999, 1.0, 100, 1, 10.086482, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1340, 54.945336, 0, 9999, -9999, 1.0, 100, 1, 70.098327, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1341, 137.567376, 0, 9999, -9999, 1.0, 100, 1, 205.513321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1342, 0.062871, 0, 9999, -9999, 1.0, 100, 1, 0.734589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1343, 0.011702, 0, 9999, -9999, 1.0, 100, 1, 1.102108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1344, 0.038081, 0, 9999, -9999, 1.0, 100, 1, 0.226057, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1345, 0.312649, 0, 9999, -9999, 1.0, 100, 1, 3.971188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1346, 144.547401, 0, 9999, -9999, 1.0, 100, 1, 214.719215, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1347, 326.035913, 0, 9999, -9999, 1.0, 100, 1, 414.115976, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1348, 17.008016, 0, 9999, -9999, 1.0, 100, 1, 22.707927, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1349, 32.784331, 0, 9999, -9999, 1.0, 100, 1, 42.352342, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1350, 0.027817, 0, 9999, -9999, 1.0, 100, 1, 0.094971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1351, 0.00031, 0, 9999, -9999, 1.0, 100, 1, 0.015958, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1352, 0.016451, 0, 9999, -9999, 1.0, 100, 1, 0.83726, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1354, 0.002097, 0, 9999, -9999, 1.0, 100, 1, 0.147716, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1355, 0.089497, 0, 9999, -9999, 1.0, 100, 1, 1.688324, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1356, 66.515287, 0, 9999, -9999, 1.0, 100, 1, 73.486231, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1357, 49.20443, 0, 9999, -9999, 1.0, 100, 1, 56.459913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1358, 0.013333, 0, 9999, -9999, 1.0, 100, 1, 0.247293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1359, 36.516677, 0, 9999, -9999, 1.0, 100, 1, 70.633589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1360, 7.419108, 0, 9999, -9999, 1.0, 100, 1, 17.135983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1361, 26.148309, 0, 9999, -9999, 1.0, 100, 1, 63.207173, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1362, 34.345179, 0, 9999, -9999, 1.0, 100, 1, 79.107216, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1363, 0.013004, 0, 9999, -9999, 1.0, 100, 1, 0.036158, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1364, 0.018633, 0, 9999, -9999, 1.0, 100, 1, 0.061068, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1365, 0.000163, 0, 9999, -9999, 1.0, 100, 1, 0.000456, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1366, 0.613465, 0, 9999, -9999, 1.0, 100, 1, 1.229992, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1367, 13.509235, 0, 9999, -9999, 1.0, 100, 1, 43.863891, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1368, 0.234281, 0, 9999, -9999, 1.0, 100, 1, 3.298243, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
<filename>jax/_src/lax/control_flow/loops.py
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the loop primitives."""
from functools import partial
import itertools
import operator
from typing import Any, Callable, List, Optional, Sequence, Tuple, TypeVar
import jax
from jax import core
from jax import linear_util as lu
from jax.config import config
from jax.core import ConcreteArray, ShapedArray, raise_to_shaped
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.interpreters import mlir
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.tree_util import (tree_flatten, tree_unflatten, treedef_is_leaf,
tree_map)
from jax._src import ad_checkpoint
from jax._src import ad_util
from jax._src import api
from jax._src import dtypes
from jax._src import source_info_util
from jax._src import util
from jax._src.lax import lax
from jax._src.lax import slicing
from jax._src.lax import windowed_reductions
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import mhlo
from jax._src.traceback_util import api_boundary
from jax._src.util import (
cache,
extend_name_stack,
safe_map,
safe_zip,
split_list,
unzip2,
)
import numpy as np
from jax._src.lax.control_flow.common import (
_abstractify,
_avals_short,
_check_tree_and_avals,
_initial_style_jaxpr,
_make_closed_jaxpr,
_prune_zeros,
_typecheck_param,
allowed_effects,
)
_map = safe_map
zip = safe_zip
T = TypeVar('T')
Array = Any
BooleanNumeric = Any # A bool, or a Boolean array.
### Helper functions
def _promote_weak_typed_inputs(in_vals, in_avals, out_avals):
"""Promote weakly-typed in_vals to be compatible with out_avals.
Args:
in_vals : flattened list of input values.
in_avals : corresponding list of avals.
out_avals : list of target output avals.
Returns:
in_vals_new : flattened list of modified in_vals with no weak types.
changed : bool; true if in_vals required modification.
"""
if len(in_vals) != len(in_avals) or len(in_avals) != len(out_avals):
# Calling function is responsible for catching this.
return in_vals, False
weak_mismatches = [i for i, (a1, a2) in enumerate(zip(in_avals, out_avals))
if getattr(a1, 'weak_type', False) and not core.typematch(a1, a2)]
if not weak_mismatches:
return in_vals, False
for i in weak_mismatches:
new_dtype = dtypes.result_type(in_vals[i], out_avals[i])
in_vals[i] = lax.convert_element_type(in_vals[i], new_dtype)
return in_vals, True
### scan
Carry = TypeVar('Carry')
X = TypeVar('X')
Y = TypeVar('Y')
@api_boundary
def scan(f: Callable[[Carry, X], Tuple[Carry, Y]],
init: Carry,
xs: X,
length: Optional[int] = None,
reverse: bool = False,
unroll: int = 1) -> Tuple[Carry, Y]:
"""Scan a function over leading array axes while carrying along state.
The `Haskell-like type signature`_ in brief is
.. code-block:: haskell
scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])
where we use [t] here to denote the type t with an additional leading axis.
That is, if t is an array type then [t] represents the type with an additional
leading axis, and if t is a pytree (container) type with array leaves then [t]
represents the type with the same pytree structure and corresponding leaves
each with an additional leading axis.
When ``a`` is an array type or None, and ``b`` is an array type, the semantics
of ``scan`` are given roughly by this Python implementation::
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
Unlike that Python version, both ``a`` and ``b`` may be arbitrary pytree
types, and so multiple arrays can be scanned over at once and produce multiple
output arrays. (None is actually an empty pytree.)
Also unlike that Python version, ``scan`` is a JAX primitive and is lowered to
a single XLA While HLO. That makes it useful for reducing compilation times
for jit-compiled functions, since native Python loop constructs in an ``@jit``
function are unrolled, leading to large XLA computations.
Finally, the loop-carried value ``carry`` must hold a fixed shape and dtype
across all iterations (and not just be consistent up to NumPy rank/shape
broadcasting and dtype promotion rules, for example). In other words, the type
``c`` in the type signature above represents an array with a fixed shape and
dtype (or a nested tuple/list/dict container data structure with a fixed
structure and arrays with fixed shape and dtype at the leaves).
.. note::
:py:func:`scan` compiles ``f``, so while it can be combined with
:py:func:`jit`, it's usually unnecessary.
Args:
f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning
that ``f`` accepts two arguments where the first is a value of the loop
carry and the second is a slice of ``xs`` along its leading axis, and that
``f`` returns a pair where the first element represents a new value for
the loop carry and the second represents a slice of the output.
init: an initial loop carry value of type ``c``, which can be a scalar,
array, or any pytree (nested Python tuple/list/dict) thereof, representing
the initial loop carry value. This value must have the same structure as
the first element of the pair returned by ``f``.
xs: the value of type ``[a]`` over which to scan along the leading axis,
where ``[a]`` can be an array or any pytree (nested Python
tuple/list/dict) thereof with consistent leading axis sizes.
length: optional integer specifying the number of loop iterations, which
must agree with the sizes of leading axes of the arrays in ``xs`` (but can
be used to perform scans where no input ``xs`` are needed).
reverse: optional boolean specifying whether to run the scan iteration
forward (the default) or in reverse, equivalent to reversing the leading
axes of the arrays in both ``xs`` and in ``ys``.
unroll: optional positive int specifying, in the underlying operation of the
scan primitive, how many scan iterations to unroll within a single
iteration of a loop.
Returns:
A pair of type ``(c, [b])`` where the first element represents the final
loop carry value and the second element represents the stacked outputs of
the second output of ``f`` when scanned over the leading axis of the inputs.
.. _Haskell-like type signature: https://wiki.haskell.org/Type_signature
"""
if not callable(f):
raise TypeError("lax.scan: f argument should be a callable.")
xs_flat, xs_tree = tree_flatten(xs)
try:
lengths = [x.shape[0] for x in xs_flat]
except AttributeError as err:
msg = "scan got value with no leading axis to scan over: {}."
raise ValueError(
msg.format(', '.join(str(x) for x in xs_flat
if not hasattr(x, 'shape')))) from err
if length is not None:
length = int(length)
if not all(length == l for l in lengths):
msg = ("scan got `length` argument of {} which disagrees with "
"leading axis sizes {}.")
raise ValueError(msg.format(length, [x.shape[0] for x in xs_flat]))
else:
unique_lengths = set(lengths)
if len(unique_lengths) > 1:
msg = "scan got values with different leading axis sizes: {}."
raise ValueError(msg.format(', '.join(str(x.shape[0]) for x in xs_flat)))
elif len(unique_lengths) == 0:
msg = "scan got no values to scan over and `length` not provided."
raise ValueError(msg)
else:
length, = unique_lengths
if config.jax_disable_jit:
if length == 0:
raise ValueError("zero-length scan is not supported in disable_jit() mode because the output type is unknown.")
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
for i in maybe_reversed(range(length)):
xs_slice = [_index_array(i, core.get_aval(x), x) for x in xs_flat]
carry, y = f(carry, tree_unflatten(xs_tree, xs_slice))
ys.append(y)
stack = lambda *ys: jax.numpy.stack(ys)
stacked_y = tree_map(stack, *maybe_reversed(ys))
return carry, stacked_y
x_shapes = [masking.padded_shape_as_value(x.shape[1:]) for x in xs_flat]
x_dtypes = [dtypes.canonicalize_dtype(x.dtype) for x in xs_flat]
x_avals = tuple(_map(ShapedArray, x_shapes, x_dtypes))
def _create_jaxpr(init):
init_flat, init_tree = tree_flatten(init)
in_flat, in_tree = tree_flatten((init, xs))
carry_avals = tuple(_map(_abstractify, init_flat))
jaxpr, consts, out_tree = _initial_style_jaxpr(
f, in_tree, carry_avals + x_avals, "scan")
out_tree_children = out_tree.children()
if len(out_tree_children) != 2:
msg = "scan body output must be a pair, got {}."
raise TypeError(msg.format(tree_unflatten(out_tree, jaxpr.out_avals)))
carry_avals_out = jaxpr.out_avals[:out_tree_children[0].num_leaves]
return init_flat, carry_avals, carry_avals_out, init_tree, in_flat, jaxpr, consts, out_tree, out_tree_children
# The carry input and output avals must match exactly. However, we | |
return _ida_hexrays.lvar_locator_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.lvar_locator_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_locator_t
__del__ = lambda self : None;
lvar_locator_t_swigregister = _ida_hexrays.lvar_locator_t_swigregister
lvar_locator_t_swigregister(lvar_locator_t)
class lvar_t(lvar_locator_t):
"""
Proxy of C++ lvar_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
name = _swig_property(_ida_hexrays.lvar_t_name_get, _ida_hexrays.lvar_t_name_set)
cmt = _swig_property(_ida_hexrays.lvar_t_cmt_get, _ida_hexrays.lvar_t_cmt_set)
tif = _swig_property(_ida_hexrays.lvar_t_tif_get, _ida_hexrays.lvar_t_tif_set)
width = _swig_property(_ida_hexrays.lvar_t_width_get, _ida_hexrays.lvar_t_width_set)
defblk = _swig_property(_ida_hexrays.lvar_t_defblk_get, _ida_hexrays.lvar_t_defblk_set)
divisor = _swig_property(_ida_hexrays.lvar_t_divisor_get, _ida_hexrays.lvar_t_divisor_set)
def used(self, *args):
"""
used(self) -> bool
"""
return _ida_hexrays.lvar_t_used(self, *args)
def typed(self, *args):
"""
typed(self) -> bool
"""
return _ida_hexrays.lvar_t_typed(self, *args)
def mreg_done(self, *args):
"""
mreg_done(self) -> bool
"""
return _ida_hexrays.lvar_t_mreg_done(self, *args)
def has_nice_name(self, *args):
"""
has_nice_name(self) -> bool
"""
return _ida_hexrays.lvar_t_has_nice_name(self, *args)
def is_unknown_width(self, *args):
"""
is_unknown_width(self) -> bool
"""
return _ida_hexrays.lvar_t_is_unknown_width(self, *args)
def has_user_info(self, *args):
"""
has_user_info(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_info(self, *args)
def has_user_name(self, *args):
"""
has_user_name(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_name(self, *args)
def has_user_type(self, *args):
"""
has_user_type(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_type(self, *args)
def is_result_var(self, *args):
"""
is_result_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_result_var(self, *args)
def is_arg_var(self, *args):
"""
is_arg_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_arg_var(self, *args)
def is_fake_var(self, *args):
"""
is_fake_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_fake_var(self, *args)
def is_overlapped_var(self, *args):
"""
is_overlapped_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_overlapped_var(self, *args)
def is_floating_var(self, *args):
"""
is_floating_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_floating_var(self, *args)
def is_spoiled_var(self, *args):
"""
is_spoiled_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_spoiled_var(self, *args)
def is_noptr_var(self, *args):
"""
is_noptr_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_noptr_var(self, *args)
def is_mapdst_var(self, *args):
"""
is_mapdst_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_mapdst_var(self, *args)
def is_thisarg(self, *args):
"""
is_thisarg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_thisarg(self, *args)
def is_forced_var(self, *args):
"""
is_forced_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_forced_var(self, *args)
def has_regname(self, *args):
"""
has_regname(self) -> bool
"""
return _ida_hexrays.lvar_t_has_regname(self, *args)
def is_dummy_arg(self, *args):
"""
is_dummy_arg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_dummy_arg(self, *args)
def is_notarg(self, *args):
"""
is_notarg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_notarg(self, *args)
def set_used(self, *args):
"""
set_used(self)
"""
return _ida_hexrays.lvar_t_set_used(self, *args)
def clear_used(self, *args):
"""
clear_used(self)
"""
return _ida_hexrays.lvar_t_clear_used(self, *args)
def set_typed(self, *args):
"""
set_typed(self)
"""
return _ida_hexrays.lvar_t_set_typed(self, *args)
def set_non_typed(self, *args):
"""
set_non_typed(self)
"""
return _ida_hexrays.lvar_t_set_non_typed(self, *args)
def clr_user_info(self, *args):
"""
clr_user_info(self)
"""
return _ida_hexrays.lvar_t_clr_user_info(self, *args)
def set_user_name(self, *args):
"""
set_user_name(self)
"""
return _ida_hexrays.lvar_t_set_user_name(self, *args)
def set_user_type(self, *args):
"""
set_user_type(self)
"""
return _ida_hexrays.lvar_t_set_user_type(self, *args)
def clr_user_type(self, *args):
"""
clr_user_type(self)
"""
return _ida_hexrays.lvar_t_clr_user_type(self, *args)
def clr_user_name(self, *args):
"""
clr_user_name(self)
"""
return _ida_hexrays.lvar_t_clr_user_name(self, *args)
def set_mreg_done(self, *args):
"""
set_mreg_done(self)
"""
return _ida_hexrays.lvar_t_set_mreg_done(self, *args)
def clr_mreg_done(self, *args):
"""
clr_mreg_done(self)
"""
return _ida_hexrays.lvar_t_clr_mreg_done(self, *args)
def set_unknown_width(self, *args):
"""
set_unknown_width(self)
"""
return _ida_hexrays.lvar_t_set_unknown_width(self, *args)
def clr_unknown_width(self, *args):
"""
clr_unknown_width(self)
"""
return _ida_hexrays.lvar_t_clr_unknown_width(self, *args)
def set_arg_var(self, *args):
"""
set_arg_var(self)
"""
return _ida_hexrays.lvar_t_set_arg_var(self, *args)
def clr_arg_var(self, *args):
"""
clr_arg_var(self)
"""
return _ida_hexrays.lvar_t_clr_arg_var(self, *args)
def set_fake_var(self, *args):
"""
set_fake_var(self)
"""
return _ida_hexrays.lvar_t_set_fake_var(self, *args)
def clr_fake_var(self, *args):
"""
clr_fake_var(self)
"""
return _ida_hexrays.lvar_t_clr_fake_var(self, *args)
def set_overlapped_var(self, *args):
"""
set_overlapped_var(self)
"""
return _ida_hexrays.lvar_t_set_overlapped_var(self, *args)
def clr_overlapped_var(self, *args):
"""
clr_overlapped_var(self)
"""
return _ida_hexrays.lvar_t_clr_overlapped_var(self, *args)
def set_floating_var(self, *args):
"""
set_floating_var(self)
"""
return _ida_hexrays.lvar_t_set_floating_var(self, *args)
def clr_floating_var(self, *args):
"""
clr_floating_var(self)
"""
return _ida_hexrays.lvar_t_clr_floating_var(self, *args)
def set_spoiled_var(self, *args):
"""
set_spoiled_var(self)
"""
return _ida_hexrays.lvar_t_set_spoiled_var(self, *args)
def clr_spoiled_var(self, *args):
"""
clr_spoiled_var(self)
"""
return _ida_hexrays.lvar_t_clr_spoiled_var(self, *args)
def set_mapdst_var(self, *args):
"""
set_mapdst_var(self)
"""
return _ida_hexrays.lvar_t_set_mapdst_var(self, *args)
def clr_mapdst_var(self, *args):
"""
clr_mapdst_var(self)
"""
return _ida_hexrays.lvar_t_clr_mapdst_var(self, *args)
def set_noptr_var(self, *args):
"""
set_noptr_var(self)
"""
return _ida_hexrays.lvar_t_set_noptr_var(self, *args)
def clr_noptr_var(self, *args):
"""
clr_noptr_var(self)
"""
return _ida_hexrays.lvar_t_clr_noptr_var(self, *args)
def set_thisarg(self, *args):
"""
set_thisarg(self)
"""
return _ida_hexrays.lvar_t_set_thisarg(self, *args)
def clr_thisarg(self, *args):
"""
clr_thisarg(self)
"""
return _ida_hexrays.lvar_t_clr_thisarg(self, *args)
def set_forced_var(self, *args):
"""
set_forced_var(self)
"""
return _ida_hexrays.lvar_t_set_forced_var(self, *args)
def clr_forced_var(self, *args):
"""
clr_forced_var(self)
"""
return _ida_hexrays.lvar_t_clr_forced_var(self, *args)
def set_dummy_arg(self, *args):
"""
set_dummy_arg(self)
"""
return _ida_hexrays.lvar_t_set_dummy_arg(self, *args)
def clr_dummy_arg(self, *args):
"""
clr_dummy_arg(self)
"""
return _ida_hexrays.lvar_t_clr_dummy_arg(self, *args)
def set_notarg(self, *args):
"""
set_notarg(self)
"""
return _ida_hexrays.lvar_t_set_notarg(self, *args)
def clr_notarg(self, *args):
"""
clr_notarg(self)
"""
return _ida_hexrays.lvar_t_clr_notarg(self, *args)
def has_common(self, *args):
"""
has_common(self, v) -> bool
"""
return _ida_hexrays.lvar_t_has_common(self, *args)
def has_common_bit(self, *args):
"""
has_common_bit(self, loc, width2) -> bool
"""
return _ida_hexrays.lvar_t_has_common_bit(self, *args)
def type(self, *args):
"""
type(self) -> tinfo_t
type(self) -> tinfo_t
"""
return _ida_hexrays.lvar_t_type(self, *args)
def accepts_type(self, *args):
"""
accepts_type(self, t, may_change_thisarg=False) -> bool
"""
return _ida_hexrays.lvar_t_accepts_type(self, *args)
def set_lvar_type(self, *args):
"""
set_lvar_type(self, t, may_fail=False) -> bool
"""
return _ida_hexrays.lvar_t_set_lvar_type(self, *args)
def set_final_lvar_type(self, *args):
"""
set_final_lvar_type(self, t)
"""
return _ida_hexrays.lvar_t_set_final_lvar_type(self, *args)
def set_width(self, *args):
"""
set_width(self, w, svw_flags=0) -> bool
"""
return _ida_hexrays.lvar_t_set_width(self, *args)
def append_list(self, *args):
"""
append_list(self, lst, pad_if_scattered=False)
"""
return _ida_hexrays.lvar_t_append_list(self, *args)
def is_aliasable(self, *args):
"""
is_aliasable(self, mba) -> bool
"""
return _ida_hexrays.lvar_t_is_aliasable(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_t
__del__ = lambda self : None;
lvar_t_swigregister = _ida_hexrays.lvar_t_swigregister
lvar_t_swigregister(lvar_t)
SVW_INT = _ida_hexrays.SVW_INT
SVW_FLOAT = _ida_hexrays.SVW_FLOAT
SVW_SOFT = _ida_hexrays.SVW_SOFT
class lvars_t(qvector_lvar_t):
"""
Proxy of C++ lvars_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def find_input_lvar(self, *args):
"""
find_input_lvar(self, argloc, _size) -> int
"""
return _ida_hexrays.lvars_t_find_input_lvar(self, *args)
def find_stkvar(self, *args):
"""
find_stkvar(self, spoff, width) -> int
"""
return _ida_hexrays.lvars_t_find_stkvar(self, *args)
def find(self, *args):
"""
find(self, ll) -> lvar_t
"""
return _ida_hexrays.lvars_t_find(self, *args)
def find_lvar(self, *args):
"""
find_lvar(self, location, width, defblk=-1) -> int
"""
return _ida_hexrays.lvars_t_find_lvar(self, *args)
def __init__(self, *args):
"""
__init__(self) -> lvars_t
"""
this = _ida_hexrays.new_lvars_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_lvars_t
__del__ = lambda self : None;
lvars_t_swigregister = _ida_hexrays.lvars_t_swigregister
lvars_t_swigregister(lvars_t)
class lvar_saved_info_t(object):
"""
Proxy of C++ lvar_saved_info_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ll = _swig_property(_ida_hexrays.lvar_saved_info_t_ll_get, _ida_hexrays.lvar_saved_info_t_ll_set)
name = _swig_property(_ida_hexrays.lvar_saved_info_t_name_get, _ida_hexrays.lvar_saved_info_t_name_set)
type = _swig_property(_ida_hexrays.lvar_saved_info_t_type_get, _ida_hexrays.lvar_saved_info_t_type_set)
cmt = _swig_property(_ida_hexrays.lvar_saved_info_t_cmt_get, _ida_hexrays.lvar_saved_info_t_cmt_set)
size = _swig_property(_ida_hexrays.lvar_saved_info_t_size_get, _ida_hexrays.lvar_saved_info_t_size_set)
flags = _swig_property(_ida_hexrays.lvar_saved_info_t_flags_get, _ida_hexrays.lvar_saved_info_t_flags_set)
def __init__(self, *args):
"""
__init__(self) -> lvar_saved_info_t
"""
this = _ida_hexrays.new_lvar_saved_info_t(*args)
try: self.this.append(this)
except: self.this = this
def has_info(self, *args):
"""
has_info(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_has_info(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_info_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_info_t___ne__(self, *args)
def is_kept(self, *args):
"""
is_kept(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_kept(self, *args)
def clear_keep(self, *args):
"""
clear_keep(self)
"""
return _ida_hexrays.lvar_saved_info_t_clear_keep(self, *args)
def set_keep(self, *args):
"""
set_keep(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_keep(self, *args)
def is_forced_lvar(self, *args):
"""
is_forced_lvar(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_forced_lvar(self, *args)
def set_forced_lvar(self, *args):
"""
set_forced_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_forced_lvar(self, *args)
def clr_forced_lvar(self, *args):
"""
clr_forced_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_clr_forced_lvar(self, *args)
def is_noptr_lvar(self, *args):
"""
is_noptr_lvar(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_noptr_lvar(self, *args)
def set_noptr_lvar(self, *args):
"""
set_noptr_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_noptr_lvar(self, *args)
def clr_noptr_lvar(self, *args):
"""
clr_noptr_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_clr_noptr_lvar(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_saved_info_t
__del__ = lambda self : None;
lvar_saved_info_t_swigregister = _ida_hexrays.lvar_saved_info_t_swigregister
lvar_saved_info_t_swigregister(lvar_saved_info_t)
LVINF_KEEP = _ida_hexrays.LVINF_KEEP
"""
preserve saved user settings regardless of vars for example, if a var
loses all its user-defined attributes or even gets destroyed, keep its
'lvar_saved_info_t' . this is used for ephemeral variables that get
destroyed by macro recognition.
"""
LVINF_FORCE = _ida_hexrays.LVINF_FORCE
"""
force allocation of a new variable. forces the decompiler to create a
new variable at ll.defea
"""
LVINF_NOPTR = _ida_hexrays.LVINF_NOPTR
"""
variable type should not be a pointer
"""
class lvar_uservec_t(object):
"""
Proxy of C++ lvar_uservec_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
lvvec = _swig_property(_ida_hexrays.lvar_uservec_t_lvvec_get, _ida_hexrays.lvar_uservec_t_lvvec_set)
lmaps = _swig_property(_ida_hexrays.lvar_uservec_t_lmaps_get, _ida_hexrays.lvar_uservec_t_lmaps_set)
stkoff_delta = _swig_property(_ida_hexrays.lvar_uservec_t_stkoff_delta_get, _ida_hexrays.lvar_uservec_t_stkoff_delta_set)
ulv_flags = _swig_property(_ida_hexrays.lvar_uservec_t_ulv_flags_get, _ida_hexrays.lvar_uservec_t_ulv_flags_set)
def __init__(self, *args):
"""
__init__(self) -> lvar_uservec_t
"""
this = _ida_hexrays.new_lvar_uservec_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.lvar_uservec_t_swap(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.lvar_uservec_t_clear(self, *args)
def find_info(self, *args):
"""
find_info(self, vloc) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_uservec_t_find_info(self, *args)
def keep_info(self, *args):
"""
keep_info(self, v)
"""
return _ida_hexrays.lvar_uservec_t_keep_info(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_uservec_t
__del__ = lambda self : None;
lvar_uservec_t_swigregister = _ida_hexrays.lvar_uservec_t_swigregister
lvar_uservec_t_swigregister(lvar_uservec_t)
ULV_PRECISE_DEFEA = _ida_hexrays.ULV_PRECISE_DEFEA
"""
Use precise defea's for lvar locations.
"""
def restore_user_lvar_settings(*args):
"""
restore_user_lvar_settings(lvinf, func_ea) -> bool
Restore user defined local variable settings in the database.
@param lvinf: ptr to output buffer (C++: lvar_uservec_t *)
@param func_ea: entry address of the function (C++: ea_t)
@return: success
"""
return _ida_hexrays.restore_user_lvar_settings(*args)
def | |
<reponame>consciencia/VizTools
# MIT License
#
# Copyright (c) 2020 Consciencia <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import six
import time
import copy
import json
import math
import random
import chardet
import datetime
import threading
import http.server
__author__ = "Consciencia"
def thisdir():
return os.path.dirname(os.path.realpath(__file__))
def stringToColor(chars):
hash = 0
for char in chars:
hash = ord(char) + ((hash << 5) - hash)
color = "#"
for i in range(3):
value = (hash >> (i * 8)) & 0xFF
color += hex(value)[2:].zfill(2)
return color
def randomColor():
acc = "#"
for _ in range(3):
val = math.floor(random.uniform(0, 255))
acc += hex(val)[2:].zfill(2)
return acc
def sanitizeHtml(code):
code = code.replace("\"", "\\\"")
code = code.replace("\r\n", "<br>")
code = code.replace("\n", "<br>")
# TODO: Remove <script> and <style> tags.
return code
class Series:
def __init__(self,
x=None,
y=None,
xlabel="X",
ylabel="Y",
borderColor=None,
fillColor=None):
if x is None:
x = []
if y is None:
y = []
if len(x) != len(y):
raise Exception("Invalid input!")
self._x = copy.deepcopy(x)
self._y = copy.deepcopy(y)
self._xlabel = xlabel
self._ylabel = ylabel
self._borderColor = borderColor
self._fillColor = fillColor
self._pad = 15
self._radiuses = []
if self._borderColor is None:
self._borderColor = stringToColor(self._ylabel)
elif self._borderColor == "random":
self._borderColor = randomColor()
if self._fillColor is None:
self._fillColor = stringToColor(self._ylabel)
elif self._fillColor == "random":
if self._borderColor == "random":
self._fillColor = self._borderColor
else:
self._fillColor = randomColor()
def xlabel(self, val=None):
if val is not None:
if not isinstance(val, (type(""), type(u""))):
raise Exception("Invalid input!")
self._xlabel = val
else:
return self._xlabel
def ylabel(self, val=None):
if val is not None:
if not isinstance(val, (type(""), type(u""))):
raise Exception("Invalid input!")
self._ylabel = val
else:
return self._ylabel
def borderColor(self, val=None):
if val is not None:
if not isinstance(val, (list, type(""), type(u""))):
raise Exception("Invalid input!")
self._borderColor = val
else:
return self._borderColor
def fillColor(self, val=None):
if val is not None:
if not isinstance(val, (list, type(""), type(u""))):
raise Exception("Invalid input!")
self._fillColor = val
else:
return self._fillColor
def x(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid vals!")
vals = [v.strftime("%d.%m.%Y")
for v in vals
if isinstance(v, datetime.datetime)]
for v in vals:
if not isinstance(v, (int, float, type(""), type(u""))):
raise Exception("Invalid vals!")
if len(vals) == len(self._y) or len(self._y) == 0:
self._x = copy.deepcopy(vals)
else:
raise Exception("Invalid vals!")
else:
if isinstance(vals, datetime.datetime):
vals = vals.strftime("%d.%m.%Y"),
if not isinstance(vals, (int, float, type(""), type(u""))):
raise Exception("Invalid vals!")
self._x[idx] = vals
elif idx is None:
return copy.deepcopy(self._x)
else:
return self._x[idx]
def y(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid vals!")
for v in vals:
if not isinstance(v, (int, float)):
raise Exception("Invalid vals!")
if len(vals) == len(self._x):
self._y = copy.deepcopy(vals)
else:
raise Exception("Invalid vals!")
else:
if not isinstance(vals, (int, float)):
raise Exception("Invalid vals!")
self._y[idx] = vals
elif idx is None:
return copy.deepcopy(self._y)
else:
return self._y[idx]
def radiuses(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid vals!")
for v in vals:
if not isinstance(v, int):
raise Exception("Invalid vals!")
if len(vals) == len(self._x):
self._radiuses = copy.deepcopy(vals)
else:
raise Exception("Invalid vals!")
else:
if not isinstance(vals, int):
raise Exception("Invalid vals!")
self._radiuses[idx] = vals
elif idx is None:
return copy.deepcopy(self._radiuses)
else:
return self._radiuses[idx]
def arraysInPair(self):
return (self.x(), self.y())
def pairsInArray(self):
result = []
for i in range(len(self._x)):
result.append((self._x[i], self._y[i]))
return result
def push(self, x, y):
self._x.append(x)
self._y.append(y)
def pop(self):
if len(self._x) == 0:
return None
return (self._x.pop(-1), self._y.pop(-1))
def clone(self):
return copy.deepcopy(self)
def clear(self):
self._x = []
self._y = []
def __len__(self):
return len(self._x)
def __iter__(self):
for i in range(len(self)):
yield (self.x(None, i), self.y(None, i))
def __getitem__(self, idx):
return (self.x(None, idx), self.y(None, idx))
def __setitem__(self, idx, pair):
self.x(pair[0], idx)
self.y(pair[1], idx)
def __str__(self):
acc = ""
acc += self._xlabel.ljust(self._pad) + "| " + self._ylabel + "\n"
for i in range(len(self._x)):
acc += str(self._x[i]).ljust(self._pad)
acc += "| " + str(self._y[i]) + "\n"
return acc
class MultiSeries:
def __init__(self, serieses=None, noCheck=False):
if serieses is None:
serieses = []
if type(serieses) is list:
for series in serieses:
if not isinstance(series, Series):
raise Exception("Invalid input")
elif isinstance(serieses, Series):
serieses = [serieses]
else:
raise Exception("Invalid input")
if not isinstance(noCheck, bool):
raise Exception("Invalid input")
self._serieses = serieses
self._pad = 15
self._noCheck = noCheck
self.check()
def content(self):
return self._serieses
def check(self):
if not self._noCheck and len(self._serieses) > 1:
x = (self._serieses[0].xlabel(), self._serieses[0].x())
for series in self._serieses[1:]:
if x != (series.xlabel(), series.x()):
raise Exception("Non matching series found (%s != %s)!"
% (x, (series.xlabel(), series.x())))
def add(self, series):
if not isinstance(series, Series):
raise Exception("Invalid input")
self._serieses.append(series)
self.check()
def xlabel(self, val=None):
if val is not None:
for series in self._serieses:
series.xlabel(val)
elif len(self._serieses):
return self._serieses[0].xlabel()
else:
return "<no data>"
def ylabels(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid input!")
for i, series in enumerate(self._serieses):
series.ylabel(vals[i])
else:
self._serieses[idx].ylabel(vals)
elif idx is not None:
return self._serieses[idx].ylabel()
else:
return [series.ylabel() for series in self._serieses]
def borderColors(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid input!")
for i, series in enumerate(self._serieses):
series.borderColor(vals[i])
else:
self._serieses[idx].borderColor(vals)
elif idx is not None:
return self._serieses[idx].borderColor()
else:
return [series.borderColor() for series in self._serieses]
def fillColors(self, vals=None, idx=None):
if vals is not None:
if idx is None:
if not isinstance(vals, list):
raise Exception("Invalid input!")
for i, series in enumerate(self._serieses):
series.fillColor(vals[i])
else:
self._serieses[idx].fillColor(vals)
elif idx is not None:
return self._serieses[idx].fillColor()
else:
return [series.fillColor() for series in self._serieses]
def x(self, vals=None, idx=None):
if vals is not None:
for series in self._serieses:
series.x(vals, idx)
else:
return self._serieses[0].x(None, idx)
def y(self, vals=None, idx=None):
if vals is not None:
for i, series in enumerate(self._serieses):
series.y(vals[i], idx)
else:
return [series.y(None, idx)
for series in self._serieses]
def arraysInPair(self):
return (self.x(), self.y())
def pairsInArray(self):
result = []
for i, x in enumerate(self.x()):
result.append((x, self.y(None, i)))
return result
def push(self, x, y):
for i, series in enumerate(self._serieses):
series.push(x, y[i])
def pop(self):
result = []
for series in self._serieses:
result.append(series.pop())
if len(result) == 0 or result[0] is None:
return None
vals = []
for _, y in result:
vals.append(y)
return (result[0][0], vals)
def clone(self):
return copy.deepcopy(self)
def clear(self):
for series in self._serieses:
series.clear()
def __len__(self):
if len(self._serieses) > 0:
return len(self._serieses[0])
return 0
def __iter__(self):
for i in range(len(self)):
yield (self.x(None, i), self.y(None, i))
def __getitem__(self, idx):
return (self.x(None, idx), self.y(None, idx))
def __setitem__(self, idx, pair):
self.x(pair[0], idx)
self.y(pair[1], idx)
def __str__(self):
acc = ""
memo = {}
if len(self._serieses):
acc += self._serieses[0].xlabel().ljust(self._pad)
for series in self._serieses:
label = series.ylabel()
if label in memo:
memo[label] += 1
label = label + ("[%s]" % memo[label])
else:
memo[label] = 0
acc += ("| " + label).ljust(self._pad)
acc += "\n"
for i, x in enumerate(self._serieses[0].x()):
acc += str(x).ljust(self._pad)
for series in self._serieses:
acc += ("| %s" % series.y(None, i)).ljust(self._pad)
acc += "\n"
return acc
class HttpHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# self.headers
if self.path not in HttpServer.ROUTES:
self.send_error(404,
"Unknown route",
"Not found handler for '%s'!" % self.path)
else:
route = HttpServer.ROUTES[self.path]
if hasattr(route, "clone"):
route = route.clone()
response = route.render()
self.send_response(200)
self.send_header("Content-type", "text/html")
| |
# timestamp: 1542899030043,
# total_ask_size: 109.57065201,
# total_bid_size: 125.74430631,
# orderbook_units: [{ask_price: 0.02926679,
# bid_price: 0.02919904,
# ask_size: 4.20293961,
# bid_size: 11.65043576},
# ...,
# {ask_price: 0.02938209,
# bid_price: 0.0291231,
# ask_size: 0.05135782,
# bid_size: 13.5595 } ]},
# { market: "KRW-BTC",
# timestamp: 1542899034662,
# total_ask_size: 12.89790974,
# total_bid_size: 4.88395783,
# orderbook_units: [{ask_price: 5164000,
# bid_price: 5162000,
# ask_size: 2.57606495,
# bid_size: 0.214 },
# ...,
# {ask_price: 5176000,
# bid_price: 5152000,
# ask_size: 2.752,
# bid_size: 0.4650305} ]} ]
#
result = {}
for i in range(0, len(response)):
orderbook = response[i]
marketId = self.safe_string(orderbook, 'market')
symbol = self.safe_symbol(marketId, None, '-')
timestamp = self.safe_integer(orderbook, 'timestamp')
result[symbol] = {
'bids': self.sort_by(self.parse_bids_asks(orderbook['orderbook_units'], 'bid_price', 'bid_size'), 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook['orderbook_units'], 'ask_price', 'ask_size'), 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbooks = await self.fetch_order_books([symbol], limit, params)
return self.safe_value(orderbooks, symbol)
def parse_ticker(self, ticker, market=None):
#
# { market: "BTC-ETH",
# trade_date: "20181122",
# trade_time: "104543",
# trade_date_kst: "20181122",
# trade_time_kst: "194543",
# trade_timestamp: 1542883543097,
# opening_price: 0.02976455,
# high_price: 0.02992577,
# low_price: 0.02934283,
# trade_price: 0.02947773,
# prev_closing_price: 0.02966,
# change: "FALL",
# change_price: 0.00018227,
# change_rate: 0.0061453136,
# signed_change_price: -0.00018227,
# signed_change_rate: -0.0061453136,
# trade_volume: 1.00000005,
# acc_trade_price: 100.95825586,
# acc_trade_price_24h: 289.58650166,
# acc_trade_volume: 3409.85311036,
# acc_trade_volume_24h: 9754.40510513,
# highest_52_week_price: 0.12345678,
# highest_52_week_date: "2018-02-01",
# lowest_52_week_price: 0.023936,
# lowest_52_week_date: "2017-12-08",
# timestamp: 1542883543813 }
#
timestamp = self.safe_integer(ticker, 'trade_timestamp')
marketId = self.safe_string_2(ticker, 'market', 'code')
symbol = self.safe_symbol(marketId, market, '-')
previous = self.safe_float(ticker, 'prev_closing_price')
last = self.safe_float(ticker, 'trade_price')
change = self.safe_float(ticker, 'signed_change_price')
percentage = self.safe_float(ticker, 'signed_change_rate')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_price'),
'low': self.safe_float(ticker, 'low_price'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'opening_price'),
'close': last,
'last': last,
'previousClose': previous,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'acc_trade_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'acc_trade_price_24h'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
ids = None
if symbols is None:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > self.options['fetchTickersMaxLength']:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchTickers')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
request = {
'markets': ids,
}
response = await self.publicGetTicker(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# trade_date: "20181122",
# trade_time: "104543",
# trade_date_kst: "20181122",
# trade_time_kst: "194543",
# trade_timestamp: 1542883543097,
# opening_price: 0.02976455,
# high_price: 0.02992577,
# low_price: 0.02934283,
# trade_price: 0.02947773,
# prev_closing_price: 0.02966,
# change: "FALL",
# change_price: 0.00018227,
# change_rate: 0.0061453136,
# signed_change_price: -0.00018227,
# signed_change_rate: -0.0061453136,
# trade_volume: 1.00000005,
# acc_trade_price: 100.95825586,
# acc_trade_price_24h: 289.58650166,
# acc_trade_volume: 3409.85311036,
# acc_trade_volume_24h: 9754.40510513,
# highest_52_week_price: 0.12345678,
# highest_52_week_date: "2018-02-01",
# lowest_52_week_price: 0.023936,
# lowest_52_week_date: "2017-12-08",
# timestamp: 1542883543813 }]
#
result = {}
for t in range(0, len(response)):
ticker = self.parse_ticker(response[t])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
tickers = await self.fetch_tickers([symbol], params)
return self.safe_value(tickers, symbol)
def parse_trade(self, trade, market=None):
#
# fetchTrades
#
# { market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:55:24",
# timestamp: 1542894924397,
# trade_price: 0.02914289,
# trade_volume: 0.20074397,
# prev_closing_price: 0.02966,
# change_price: -0.00051711,
# ask_bid: "ASK",
# sequential_id: 15428949259430000}
#
# fetchOrder trades
#
# {
# "market": "KRW-BTC",
# "uuid": "78162304-1a4d-4524-b9e6-c9a9e14d76c3",
# "price": "101000.0",
# "volume": "0.77368323",
# "funds": "78142.00623",
# "ask_fee": "117.213009345",
# "bid_fee": "117.213009345",
# "created_at": "2018-04-05T14:09:15+09:00",
# "side": "bid",
# }
#
id = self.safe_string_2(trade, 'sequential_id', 'uuid')
orderId = None
timestamp = self.safe_integer(trade, 'timestamp')
if timestamp is None:
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
side = None
askOrBid = self.safe_string_lower_2(trade, 'ask_bid', 'side')
if askOrBid == 'ask':
side = 'sell'
elif askOrBid == 'bid':
side = 'buy'
cost = self.safe_float(trade, 'funds')
price = self.safe_float_2(trade, 'trade_price', 'price')
amount = self.safe_float_2(trade, 'trade_volume', 'volume')
if cost is None:
if amount is not None:
if price is not None:
cost = price * amount
marketId = self.safe_string_2(trade, 'market', 'code')
market = self.safe_market(marketId, market)
fee = None
feeCurrency = None
symbol = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
feeCurrency = quote
feeCost = self.safe_string(trade, askOrBid + '_fee')
if feeCost is not None:
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
return {
'id': id,
'info': trade,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 200
request = {
'market': market['id'],
'count': limit,
}
response = await self.publicGetTradesTicks(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:55:24",
# timestamp: 1542894924397,
# trade_price: 0.02914289,
# trade_volume: 0.20074397,
# prev_closing_price: 0.02966,
# change_price: -0.00051711,
# ask_bid: "ASK",
# sequential_id: 15428949259430000},
# { market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:03:10",
# timestamp: 1542891790123,
# trade_price: 0.02917,
# trade_volume: 7.392,
# prev_closing_price: 0.02966,
# change_price: -0.00049,
# ask_bid: "ASK",
# sequential_id: 15428917910540000} ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T13:47:00",
# candle_date_time_kst: "2018-11-22T22:47:00",
# opening_price: 0.02915963,
# high_price: 0.02915963,
# low_price: 0.02915448,
# trade_price: 0.02915448,
# timestamp: 1542894473674,
# candle_acc_trade_price: 0.0981629437535248,
# candle_acc_trade_volume: 3.36693173,
# unit: 1
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'candle_date_time_utc')),
self.safe_float(ohlcv, 'opening_price'),
self.safe_float(ohlcv, 'high_price'),
self.safe_float(ohlcv, 'low_price'),
self.safe_float(ohlcv, 'trade_price'),
self.safe_float(ohlcv, 'candle_acc_trade_volume'), # base volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
timeframePeriod = self.parse_timeframe(timeframe)
timeframeValue = self.timeframes[timeframe]
if limit is None:
limit = 200
request = {
'market': market['id'],
'timeframe': timeframeValue,
'count': limit,
}
method = 'publicGetCandlesTimeframe'
if timeframeValue == 'minutes':
numMinutes = int(round(timeframePeriod / 60))
request['unit'] = numMinutes
method += 'Unit'
if since is not None:
# convert `since` to `to` value
request['to'] = self.iso8601(self.sum(since, timeframePeriod * limit * 1000))
response = await getattr(self, method)(self.extend(request, params))
#
# [
# {
# market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T13:47:00",
# candle_date_time_kst: "2018-11-22T22:47:00",
# opening_price: 0.02915963,
# high_price: 0.02915963,
# low_price: 0.02915448,
# trade_price: 0.02915448,
# timestamp: 1542894473674,
# candle_acc_trade_price: 0.0981629437535248,
# candle_acc_trade_volume: 3.36693173,
# unit: 1
# },
# {
# market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T10:06:00",
# candle_date_time_kst: "2018-11-22T19:06:00",
# opening_price: 0.0294,
# high_price: 0.02940882,
# low_price: 0.02934283,
# trade_price: 0.02937354,
# timestamp: 1542881219276,
# candle_acc_trade_price: 0.0762597110943884,
# candle_acc_trade_volume: 2.5949617,
# unit: 1
# }
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
orderSide = None
if side == 'buy':
orderSide = 'bid'
elif side == 'sell':
orderSide = 'ask'
else:
raise InvalidOrder(self.id + ' createOrder allows buy or sell side only!')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': orderSide,
}
if type == 'limit':
request['volume'] = self.amount_to_precision(symbol, amount)
request['price'] = self.price_to_precision(symbol, price)
request['ord_type'] = type
elif type == 'market':
if side == 'buy':
request['ord_type'] = 'price'
request['price'] = self.price_to_precision(symbol, amount)
elif side == 'sell':
request['ord_type'] = type
request['volume'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrders(self.extend(request, params))
#
| |
<filename>whatstyle.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 <NAME> ( http://krause-software.com/ ).
#
# You are free to use this code under the MIT license:
# http://opensource.org/licenses/MIT
# This program looks at source code and generates a style definition for code formatters
# like clang-format, indent, etc. so that the reformatted source matches its current
# formatting as closely as possible.
# It should help programmers to begin using a formatting tool right away
# without the need to invest hours of reading the formatting tool
# documentation while still preserving as much of their existing formatting
# style as possible.
#
# The currently supported formatters are clang-format, YAPF, HTML tidy,
# indent, Artistic Style and uncrustify.
#
# The program basically works by reformatting the source with many
# combinations of options and running a diff between the original source
# and the reformatted source code.
# The program chooses the options resulting in the least amount of changes
# between the source and the reformatted version.
# Among a number of candidate styles with the same diff quality
# the one with the least number of explicit options is chosen
# to keep the style definition brief.
r"""
whatstyle can help you with various tasks regarding source code formatting styles.
Here is a list of examples that you can try out to become familiar with whatstyle, you need
the directory tests/examples in addition to whatstyle.py if you want to try the examples.
Substitute the example sources with your own sources when you are ready.
Basic usage: Find the best style for a source file using a suitable formatter that is
installed on your machine:
$ ./whatstyle.py tests/examples/xv6/printf.c
Show which formatters are installed on your machine:
$ ./whatstyle.py --supported # show all formatters
$ ./whatstyle.py --supported .m .cpp # show formatters that support certain extensions
Choosing a specific formatter:
$ ./whatstyle.py --formatter uncrustify tests/examples/xv6/printf.c
Make the style more resilient against formatting variations, this is advised when the best
style is actually going to be used in a project:
$ ./whatstyle.py --formatter clang-format --mode resilient tests/examples/lua/lstate.[ch]
Generate an overview of the effects of every option value variation. This shows style
option differences together with the source code differences that they cause.
$ ./whatstyle.py --variants tests/examples/lua/lstate.[ch]
Remove uninteresting options from the variants:
$ ./whatstyle.py --variants --ignoreopts ColumnLimit,IndentWidth,UseTab \
tests/examples/lua/lstate.[ch]
Show more differences per style variant:
$ ./whatstyle.py --variants --numhunks 5 tests/examples/lua/lstate.[ch]
Show the variants in a browser:
$ ./whatstyle.py --variants --html tests/examples/lua/lstate.[ch]
Show the variants with a dark theme in a browser resembling the ANSI terminal output:
$ ./whatstyle.py --variants --ansihtml tests/examples/lua/lstate.[ch]
You are looking for a specific formatter option without knowing its name. Create a small
file with a code fragment that your current formatter produces but you are unhappy with.
Then create a modified copy (e.g. hello_ref.c) that looks exactly how you would prefer it.
The options below cause whatstyle to find the differences between the style in hello.c
and hello_ref.c that you can add to your style configuration:
$ ./whatstyle.py --references --mode stylediff tests/examples/ref_space_brace/hello.c \
tests/examples/ref_space_brace/hello_ref.c
Show the differences between your current sources and these sources reformatted after
computing their best matching style:
$ ./whatstyle.py --diff tests/examples/xv6/printf.c
You have a code formatter (e.g. clang-format) that supports commonly known styles
(e.g. Mozilla or WebKit) and you'd prefer the closest common style instead of many
individual options:
$ ./whatstyle.py --maxrounds 1 tests/examples/xv6/printf.c
If you want to help the code formatter developers to reproduce formatter crashes, you could
run several formatters and search for negative return codes, the option '--debug popenio'
tells whatstyle to log the interaction with the formatter processes:
$ for f in $(./whatstyle.py --supported .c)
do
python whatstyle.py --formatter "$f" --mode resilient --cache memory \
--keeptempfiles --debug popenio tests/examples/xv6/printf.c >> dump.txt
done ; grep -C 2 "returncode:-" dump.txt
You think 'git diff' can produce superior diffs for the optimization:
$ ./whatstyle.py --difftool gitdiff tests/examples/xv6/printf.c
"""
from __future__ import print_function
__version__ = '0.1.9'
import sys
if (((sys.version_info[0] == 2) and (sys.version_info[1] < 7)) or (
(sys.version_info[0] == 3) and (sys.version_info[1] < 2))):
sys.stderr.write('Error: Python 2.7 or when running on Python 3 at least Python 3.2'
' is required to run whatstyle\n')
sys.exit(1)
import argparse
import cgi
import codecs
import copy
import difflib
import errno
import hashlib
import heapq
import itertools
import json
try:
import multiprocessing.pool # type: ignore
except ImportError:
multiprocessing = None # type: Optional[module]
import operator
import os
import re
import signal
import shutil
try:
import sqlite3
except ImportError:
sqlite3 = None # type: ignore
import subprocess
import tempfile
import threading
import time
try:
from urlparse import urljoin
from urllib import pathname2url # type: ignore
from cgi import escape
except ImportError:
from urllib.parse import urljoin # type: ignore
from urllib.request import pathname2url
from html import escape
import traceback
import types
import warnings
import webbrowser
import zlib
try:
import xml.etree.cElementTree as ETree
except ImportError:
import xml.etree.ElementTree as ETree # type: ignore
from contextlib import contextmanager
from collections import Counter, OrderedDict, defaultdict, namedtuple
from io import BytesIO
try:
from itertools import izip # type: ignore
from itertools import izip_longest # type: ignore
except ImportError:
from itertools import zip_longest as izip_longest # type: ignore
izip = zip
try:
from typing import TypeVar
from typing import Any, AnyStr, Callable, Dict, Generator, Iterator, Iterable, List
from typing import Optional, Sequence, Text, Tuple, Union, Match, Pattern
from typing import IO
TextPair = Tuple[str, str]
BytesPair = Tuple[bytes, bytes]
OptionValue = Union[str, bool, int, 'Style']
Option = Tuple[str, str, List[OptionValue], Optional['StyleDef']]
StyleDist = Tuple[Optional['Style'], Optional[Sequence[int]]]
CallArgs = Tuple[Sequence[Any], Dict[Any, Any]]
except ImportError:
pass
from pprint import pprint
WINOS = os.getenv('OS') == 'Windows_NT'
if WINOS:
# Enable utf-8 output on Windows
def codec_search(name):
if name == 'cp65001':
return codecs.lookup('utf-8')
return None
codecs.register(codec_search)
MAX_FILESIZE_FOR_MULTIPROCESSING = 256 * 1024
TIMEOUT_SECONDS = 30
CONTEXTLINES = 2
LOWER_COLUMN_LIMIT = 79
UPPER_COLUMN_LIMIT = 120
HUGE_DISTANCE = 2**31 - 1
UNLIMITED = -1
USE_THREADS = False
HASHFUNC = hashlib.sha1
OK = 0
ERROR = 1
PARSING_FAILED = 1
STDERR_OUTPUT = False
CEXTS = '.c .h'
CPPEXTS = '.c++ .h++ .cxx .hxx .cpp .hpp .cc .hh'
CPPCEXTS = CEXTS + ' ' + CPPEXTS
SCALAEXTS = '.sc .scala'
REXTS = '.r .R .RData .rds .rda'
RUSTEXTS = '.rs'
SUPPORTED_EXTS = [
['clang-format', '.m .mm .java .js .ts .proto .protodevel .td ' + CPPCEXTS],
['yapf', '.py'],
['uncrustify', '.cs .m .mm .d .java .p .pawn .sma .vala .sqc ' + CPPCEXTS],
['astyle', '.m .java ' + CPPCEXTS],
['indent', '.c .h'],
['tidy', '.html .htm'],
['scalariform', SCALAEXTS],
['scalafmt', SCALAEXTS],
['rfmt', REXTS],
['rustfmt', RUSTEXTS],
]
FILENAME_SUBST = '#FILENAME#'
DIFF_SPECS = [
# difftoolname, executable, command line arguments
('difflib', sys.executable, ['-u', __file__, '--stdindiff', '--', FILENAME_SUBST]),
('diff', 'diff', ['--text', '--unified=0', '--', FILENAME_SUBST, '-']),
('gitdiff', 'git', ['--no-pager', 'diff', '--text', '--no-ext-diff', '--no-index',
'--unified=0', '--', FILENAME_SUBST, '-']),
]
BUILTIN_DIFF = DIFF_SPECS[-1]
PAGER_SPECS = [('less', ['-F', '-r', '-S', '-X']),
('more', [])] # type: List[Tuple[str, List[str]]]
STTY_CMD = '/bin/stty'
# We use a "Hello, World!" source to which we apply some modifications
# to check for the presence of a usable diff tool.
HELLOWORLD = """\
#include <stdio.h>
int main(int argc, char *argv[]) {
printf("Hello, World!\n");
return 0;
}
"""
LANG_OBJECTIVE_C = 'Objective-C'
OPTION_PRESENT = '<True>'
MODE_NORMAL = 'normal'
MODE_MAXDIFF = 'maxdiff'
MODE_RESILIENT = 'resilient'
MODE_STYLEDIFF = 'stylediff'
MODE_MINIMIZE = 'minimize'
MODE_MAXIMIZE = 'maximize'
METRIC_MINDIFF = 0
METRIC_MAXDIFF = 1
METRIC_MIN = 2
METRIC_MAX = 3
CC_OFF = 'off'
CC_THREADS = 'threads'
CC_PROCESSES = 'processes'
GLOBALTMP = 1
LOCALTMP = 0
args_info = set() # type: Set[str]
args_debug = set() # type: Set[str]
args_verbose = set() # type: Set[str]
LOGFILE = None # type: Optional[str]
LOGFILEFP = None # type: Optional[IO[Any]]
LOGSPLITDIR = None # type: Optional[str]
MESSAGE_CATEGORY_FILES = None # type: Optional[Dict[str, IO[Any]]]
NO_PROGRESS = False
# ----------------------------------------------------------------------
INFO_RESULT = 'result'
INFO_USER = 'user'
INFO_HEURISTICS = 'heuristics'
INFO_TIME = 'time'
INFO_PERF = 'perf'
INFO_ATTEMPT = 'attempt'
INFO_SKIP = 'skip'
INFO_INVALIDS = 'invalids'
INFO_PROCERRORS = 'procerrors'
INFO_PROCEXCEPTIONS = 'procexc'
# INFO_TIME is not included in INFO_ALL to produce easily diffable debug output
# between different runs of whatstyle.
INFO_ALL = [INFO_RESULT, INFO_USER, INFO_HEURISTICS, INFO_PERF, INFO_ATTEMPT, INFO_SKIP,
INFO_INVALIDS, INFO_PROCERRORS, INFO_PROCEXCEPTIONS]
INFO_IMPLIES_DICT = {} # type: Dict[str, List[str]]
DEBUG_OPTIONS = 'options'
DEBUG_STYLEDEF = 'styledef'
DEBUG_POPEN = 'popen'
DEBUG_POPENIO = 'popenio'
DEBUG_ALL = [DEBUG_OPTIONS, DEBUG_STYLEDEF, DEBUG_POPEN, DEBUG_POPENIO]
DEBUG_RUNTIME = 'runtime'
DEBUG_IMPLIES_DICT = {DEBUG_POPENIO: [DEBUG_POPEN]} # type: Dict[str, List[str]]
verbose_categories = {0: [INFO_USER],
1: [INFO_HEURISTICS, INFO_TIME, INFO_PERF],
2: [INFO_ATTEMPT, INFO_SKIP, INFO_PROCERRORS, INFO_PROCEXCEPTIONS],
3: [DEBUG_OPTIONS, DEBUG_STYLEDEF],
4: [DEBUG_POPEN]}
# ----------------------------------------------------------------------
# yapf: disable
# ----------------------------------------------------------------------
# Some functions from the MIT licensed six (https://pypi.python.org/pypi/six/)
# Copyright (c) 2010-2016 <NAME> <<EMAIL>>
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
try:
if | |
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import os
import json
import copy
import utils
OUT_PATH = './out/'
class ForecastModel(ABC):
"""
Abstract superclass of all probabilistic forecasting models.
"""
@abstractmethod
def __init__(self, y, t, u=None, ID='', seed=0, global_model=False):
self.seed = seed
self.global_model = global_model
self.s_d = 48
self.s_w = self.s_d * 7
# Maximum forecast horizon
self.max_horizon = self.s_w
self.y = y
self.t = t
if u is not None and u.ndim == 1:
u = u[:, np.newaxis]
self.u = u
# Mean, maximum and minimum value of the measurements
self.y_mean = np.nanmean(y, axis=0)
self.y_max = np.nanmax(y, axis=0)
self.y_min = np.nanmin(y, axis=0)
# Maximum, minimum, mean and std value of the input
if u is not None:
self.u_max = np.nanmax(u, axis=0, keepdims=True)
self.u_min = np.nanmin(u, axis=0, keepdims=True)
self.u_mean = np.nanmean(u, axis=0, keepdims=True)
self.u_std = np.nanstd(u, axis=0, keepdims=True)
# Timestamp of first forecast
self.t_f = t[-1] + dt.timedelta(minutes=30)
# Last timestamp where a forecast is available
self.t_l = t[-1]
# Dictionary of forecast results
results_dict = {
't0': [],
'fit_time': 0,
'prediction_time': [],
'mean': [],
'var': [],
'p05': [],
'p25': [],
'p50': [],
'p75': [],
'p95': [],
'PIT': [],
'CRPS': [],
'mCRPS': [],
'rCRPS': [],
# 'AE': [],
'MAE': [],
'MASE': [],
'MAPE': [],
'rMAE': [],
# 'SE': [],
'RMSE': [],
'rRMSE': [],
}
self.results = []
if self.global_model:
self.n = y.shape[1]
for i, ts_ID in enumerate(ID):
self.results.append(copy.deepcopy(results_dict))
self.results[i]['ID'] = f'{self}_{ts_ID}'
else:
self.n = 1
self.results.append(copy.deepcopy(results_dict))
self.results[0]['ID'] = f'{self}_{ID}'
@abstractmethod
def __str__(self):
pass
def idx(self, t, relative=True):
"""
Returns the index/indices of the timestamp(s) t. Either with respect to the first forecast timestamp
(relative=True) or with respect to the timestamp t_0 (relative=False).
"""
if isinstance(t, dt.datetime):
return int((t - (self.t_f if relative else self.t[0])).total_seconds() / (60 * 30))
else:
return np.array([
int((hh - (self.t_f if relative else self.t[0])).total_seconds() / (60 * 30)) for hh in t
])
@abstractmethod
def fit(self):
"""
Abstract method (to be implemented by subclasses) that fits the model parameters to the data.
"""
pass
def validate_timestamps(self, t):
"""
Validates whether predictions are available for the timestamps t.
"""
if t[0] < self.t_f or t[-1] > self.t_l:
raise ValueError('No prediction available for the timestamps t.')
def validate_input(self, u):
"""
Validates whether the input u is consistently missing or consistently not missing.
"""
if self.u is None and u is not None:
raise ValueError('No initial input u available.')
if self.u is not None and u is None:
raise ValueError('Missing input u.')
def add_measurements(self, y, t, u=None):
"""
Appends measurements y (and optionally covariates u) for the timestamps t. Note that the timestamps t
must be subsequent to the timestamps self.t.
"""
if t[0] != self.t[-1] + dt.timedelta(minutes=30):
raise ValueError('No subsequent measurements.')
if self.global_model:
self.y = np.vstack([self.y, y])
else:
self.y = np.hstack([self.y, y])
self.t = self.t.union(t)
self.t_l = t[-1]
self.validate_input(u)
if u is not None:
if u.ndim == 1:
u = u[:, np.newaxis]
self.u = np.vstack([self.u, u])
@abstractmethod
def predict(self, t, u=None):
"""
Abstract method (to be implemented by subclasses) that predicts the distribution of observations y for
the timestamps t, optionally given covariates u. Note that the prediction has to start right after the
last measurement and that forecasts must be subsequent.
"""
if t[0] >= self.t_f and t[-1] <= self.t_l:
# Prediction already available
return True
if t[0] != self.t[-1] + dt.timedelta(minutes=30):
raise ValueError('Prediction has to start right after last measurement.')
if t[0] != self.t_l + dt.timedelta(minutes=30):
raise ValueError('Forecasts must be subsequent.')
if len(t) > self.max_horizon:
raise ValueError(f'The maximum forecast horizon is {self.max_horizon} half-hours.')
self.validate_input(u)
# Execute prediction
self.t_l = t[-1]
return False
@abstractmethod
def get_mean(self, t):
"""
Abstract method (to be implemented by subclasses) that returns the mean forecasts for the timestamps t.
"""
self.validate_timestamps(t)
@abstractmethod
def get_var(self, t):
"""
Abstract method (to be implemented by subclasses) that returns the variance forecasts for the timestamps t.
"""
self.validate_timestamps(t)
@abstractmethod
def get_percentile(self, p, t):
"""
Abstract method (to be implemented by subclasses) that returns the p-percentile forecasts for the timestamps t.
"""
self.validate_timestamps(t)
def get_median(self, t):
"""
Returns the median forecasts for the timestamps t.
"""
return self.get_percentile(50, t)
@abstractmethod
def get_pit(self, y_true, t):
"""
Abstract method (to be implemented by subclasses) that returns the Probability Integral Transform (PIT)
for the timestamps t, given the true observations y_true.
"""
self.validate_timestamps(t)
@abstractmethod
def get_crps(self, y_true, t):
"""
Abstract method (to be implemented by subclasses) that returns the Continuous Ranked Probability Score (CRPS)
for the timestamps t, given the true observations y_true.
"""
self.validate_timestamps(t)
def mcrps(self, y_true, t):
"""
Computes the mean CRPS for the timestamps t.
"""
return np.nanmean(self.get_crps(y_true, t), axis=0)
def rcrps(self, y_true, t):
"""
Computes the relative CRPS for the timestamps t.
"""
return 100 * self.mcrps(y_true, t) / self.y_mean
def ae(self, y_true, t):
"""
Computes the absolute error for the timestamps t. Note that the median forecast is used as a point estimate.
"""
return np.abs(y_true - self.get_median(t))
def mae(self, y_true, t):
"""
Computes the Mean Absolute Error (MAE) for the timestamps t.
"""
return np.nanmean(self.ae(y_true, t), axis=0)
def mase(self, y_true, t):
"""
Computes the Mean Absolute Scaled Error (MASE) for the timestamps t.
"""
mae = self.mae(y_true, t)
return mae / np.nanmean(np.abs(y_true[1:] - y_true[:-1]), axis=0)
def mape(self, y_true, t):
"""
Computes the Mean Absolute Percentage Error (MAPE) for the timestamps t.
"""
ape = 100 * self.ae(y_true, t) / y_true
return np.nanmean(ape, axis=0)
def rmae(self, y_true, t):
"""
Computes the relative Mean Absolute Error (rMAE) for the timestamps t.
"""
return 100 * self.mae(y_true, t) / self.y_mean
def se(self, y_true, t):
"""
Computes the squared error for the timestamps t. Note that the mean forecast is used as a point estimate.
"""
return (y_true - self.get_mean(t)) ** 2
def rmse(self, y_true, t):
"""
Computes the Root Mean Squared Error (RMSE) for the timestamps t.
"""
return np.sqrt(np.nanmean(self.se(y_true, t), axis=0))
def rrmse(self, y_true, t):
"""
Computes the relative Root Mean Squared Error (rRMSE) for the timestamps t.
"""
return 100 * self.rmse(y_true, t) / self.y_mean
def evaluate(self, y_true, t):
"""
Evaluates all metrics for the true observations y_true and the timestamps t and
saves the results to a dictionary.
"""
mean = self.get_mean(t)
var = self.get_var(t)
p_05 = self.get_percentile(5, t)
p_25 = self.get_percentile(25, t)
p_50 = self.get_median(t)
p_75 = self.get_percentile(75, t)
p_95 = self.get_percentile(95, t)
pit = self.get_pit(y_true, t)
crps = self.get_crps(y_true, t)
mcrps = self.mcrps(y_true, t)
rcrps = self.rcrps(y_true, t)
# ae = self.ae(y_true, t)
mae = self.mae(y_true, t)
mase = self.mase(y_true, t)
mape = self.mape(y_true, t)
rmae = self.rmae(y_true, t)
# se = self.se(y_true, t)
rmse = self.rmse(y_true, t)
rrmse = self.rrmse(y_true, t)
if not self.global_model:
mean = mean[:, np.newaxis]
var = var[:, np.newaxis]
p_05 = p_05[:, np.newaxis]
p_25 = p_25[:, np.newaxis]
p_50 = p_50[:, np.newaxis]
p_75 = p_75[:, np.newaxis]
p_95 = p_95[:, np.newaxis]
pit = pit[:, np.newaxis]
crps = crps[:, np.newaxis]
mcrps = [mcrps]
rcrps = [rcrps]
# ae = ae[:, np.newaxis]
mae = [mae]
mase = [mase]
mape = [mape]
rmae = [rmae]
# se = se[:, np.newaxis]
rmse = [rmse]
rrmse = [rrmse]
for i in range(self.n):
self.results[i]['t0'].append(t[0].strftime('%Y-%m-%d, %H:%M'))
self.results[i]['mean'].append(mean[:, i].tolist())
self.results[i]['var'].append(var[:, i].tolist())
self.results[i]['p05'].append(p_05[:, i].tolist())
self.results[i]['p25'].append(p_25[:, i].tolist())
self.results[i]['p50'].append(p_50[:, i].tolist())
self.results[i]['p75'].append(p_75[:, i].tolist())
self.results[i]['p95'].append(p_95[:, i].tolist())
self.results[i]['PIT'].append(pit[:, i].tolist())
self.results[i]['CRPS'].append(crps[:, i].tolist())
self.results[i]['mCRPS'].append(mcrps[i])
self.results[i]['rCRPS'].append(rcrps[i])
# self.results[i]['AE'].append(ae[:, i].tolist())
self.results[i]['MAE'].append(mae[i])
self.results[i]['MASE'].append(mase[i])
self.results[i]['MAPE'].append(mape[i])
self.results[i]['rMAE'].append(rmae[i])
# self.results[i]['SE'].append(se[:, i].tolist())
self.results[i]['RMSE'].append(rmse[i])
self.results[i]['rRMSE'].append(rrmse[i])
def get_out_dir(self):
"""
Returns the directory where results will be saved to.
"""
out_dir = os.path.join(OUT_PATH, self.__str__())
if not os.path.exists(out_dir):
os.makedirs(out_dir)
return out_dir
def save_results(self):
"""
Saves the evaluation results as a JSON file to the directory specified by get_out_dir().
"""
out_dir = self.get_out_dir()
for i in range(self.n):
with open(os.path.join(out_dir, self.results[i]['ID'] + '.json'), 'w') as fp:
json.dump(utils.round_floats(self.results[i]), fp)
def plot_forecast(self, y_true, t, plot_median=True, plot_percentiles=True, save_fig=False):
"""
Plots the median forecasts, the 50% confidence intervals and the 90% confidence intervals along with
the true observations y_true for the timestamps t. | |
<reponame>shilpiprd/sympy
from sympy import I, log, apart, exp
from sympy.core.symbol import Dummy
from sympy.external import import_module
from sympy.functions import arg, Abs
from sympy.integrals.transforms import _fast_inverse_laplace
from sympy.physics.control.lti import SISOLinearTimeInvariant
from sympy.plotting.plot import LineOver1DRangeSeries
from sympy.polys.polytools import Poly
from sympy.printing.latex import latex
__all__ = ['pole_zero_numerical_data', 'pole_zero_plot',
'step_response_numerical_data', 'step_response_plot',
'impulse_response_numerical_data', 'impulse_response_plot',
'ramp_response_numerical_data', 'ramp_response_plot',
'bode_magnitude_numerical_data', 'bode_phase_numerical_data',
'bode_magnitude_plot', 'bode_phase_plot', 'bode_plot']
matplotlib = import_module(
'matplotlib', import_kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,))
numpy = import_module('numpy')
if matplotlib:
plt = matplotlib.pyplot
if numpy:
np = numpy # Matplotlib already has numpy as a compulsory dependency. No need to install it separately.
def _check_system(system):
"""Function to check whether the dynamical system passed for plots is
compatible or not."""
if not isinstance(system, SISOLinearTimeInvariant):
raise NotImplementedError("Only SISO LTI systems are currently supported.")
sys = system.to_expr()
len_free_symbols = len(sys.free_symbols)
if len_free_symbols > 1:
raise ValueError("Extra degree of freedom found. Make sure"
" that there are no free symbols in the dynamical system other"
" than the variable of Laplace transform.")
if sys.has(exp):
raise NotImplementedError("Time delay terms are not supported.")
def pole_zero_numerical_data(system):
"""
Returns the numerical data of poles and zeros of the system.
It is internally used by ``pole_zero_plot`` to get the data
for plotting poles and zeros. Users can use this data to further
analyse the dynamics of the system or plot using a different
backend/plotting-module.
Parameters
==========
system : SISOLinearTimeInvariant
The system for which the pole-zero data is to be computed.
Returns
=======
tuple : (zeros, poles)
zeros = Zeros of the system. NumPy array of complex numbers.
poles = Poles of the system. NumPy array of complex numbers.
Raises
======
NotImplementedError
When a SISO LTI system is not passed.
When time delay terms are present in the system.
ValueError
When more than one free symbol is present in the system.
The only variable in the transfer function should be
the variable of the Laplace transform.
Examples
========
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import pole_zero_numerical_data
>>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)
>>> pole_zero_numerical_data(tf1) # doctest: +SKIP
([-0.+1.j 0.-1.j], [-2. +0.j -0.5+0.8660254j -0.5-0.8660254j -1. +0.j ])
See Also
========
pole_zero_plot
"""
_check_system(system)
system = system.doit() # Get the equivalent TransferFunction object.
num_poly = Poly(system.num, system.var).all_coeffs()
den_poly = Poly(system.den, system.var).all_coeffs()
num_poly = np.array(num_poly, dtype=np.float64)
den_poly = np.array(den_poly, dtype=np.float64)
zeros = np.roots(num_poly)
poles = np.roots(den_poly)
return zeros, poles
def pole_zero_plot(system, pole_color='blue', pole_markersize=10,
zero_color='orange', zero_markersize=7, grid=True, show_axes=True,
show=True, **kwargs):
r"""
Returns the Pole-Zero plot (also known as PZ Plot or PZ Map) of a system.
A Pole-Zero plot is a graphical representation of a system's poles and
zeros. It is plotted on a complex plane, with circular markers representing
the system's zeros and 'x' shaped markers representing the system's poles.
Parameters
==========
system : SISOLinearTimeInvariant type systems
The system for which the pole-zero plot is to be computed.
pole_color : str, tuple, optional
The color of the pole points on the plot. Default color
is blue. The color can be provided as a matplotlib color string,
or a 3-tuple of floats each in the 0-1 range.
pole_markersize : Number, optional
The size of the markers used to mark the poles in the plot.
Default pole markersize is 10.
zero_color : str, tuple, optional
The color of the zero points on the plot. Default color
is orange. The color can be provided as a matplotlib color string,
or a 3-tuple of floats each in the 0-1 range.
zero_markersize : Number, optional
The size of the markers used to mark the zeros in the plot.
Default zero markersize is 7.
grid : boolean, optional
If ``True``, the plot will have a grid. Defaults to True.
show_axes : boolean, optional
If ``True``, the coordinate axes will be shown. Defaults to False.
show : boolean, optional
If ``True``, the plot will be displayed otherwise
the equivalent matplotlib ``plot`` object will be returned.
Defaults to True.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import pole_zero_plot
>>> tf1 = TransferFunction(s**2 + 1, s**4 + 4*s**3 + 6*s**2 + 5*s + 2, s)
>>> pole_zero_plot(tf1) # doctest: +SKIP
See Also
========
pole_zero_numerical_data
References
==========
.. [1] https://en.wikipedia.org/wiki/Pole%E2%80%93zero_plot
"""
zeros, poles = pole_zero_numerical_data(system)
zero_real = np.real(zeros)
zero_imag = np.imag(zeros)
pole_real = np.real(poles)
pole_imag = np.imag(poles)
plt.plot(pole_real, pole_imag, 'x', mfc='none',
markersize=pole_markersize, color=pole_color)
plt.plot(zero_real, zero_imag, 'o', markersize=zero_markersize,
color=zero_color)
plt.xlabel('Real Axis')
plt.ylabel('Imaginary Axis')
plt.title(f'Poles and Zeros of ${latex(system)}$', pad=20)
if grid:
plt.grid()
if show_axes:
plt.axhline(0, color='black')
plt.axvline(0, color='black')
if show:
plt.show()
return
return plt
def step_response_numerical_data(system, prec=8, lower_limit=0,
upper_limit=10, **kwargs):
"""
Returns the numerical values of the points in the step response plot
of a SISO continuous-time system. By default, adaptive sampling
is used. If the user wants to instead get an uniformly
sampled response, then ``adaptive`` kwarg should be passed ``False``
and ``nb_of_points`` must be passed as additional kwargs.
Refer to the parameters of class :class:`sympy.plotting.plot.LineOver1DRangeSeries`
for more details.
Parameters
==========
system : SISOLinearTimeInvariant
The system for which the unit step response data is to be computed.
prec : int, optional
The decimal point precision for the point coordinate values.
Defaults to 8.
lower_limit : Number, optional
The lower limit of the plot range. Defaults to 0.
upper_limit : Number, optional
The upper limit of the plot range. Defaults to 10.
kwargs :
Additional keyword arguments are passed to the underlying
:class:`sympy.plotting.plot.LineOver1DRangeSeries` class.
Returns
=======
tuple : (x, y)
x = Time-axis values of the points in the step response. NumPy array.
y = Amplitude-axis values of the points in the step response. NumPy array.
Raises
======
NotImplementedError
When a SISO LTI system is not passed.
When time delay terms are present in the system.
ValueError
When more than one free symbol is present in the system.
The only variable in the transfer function should be
the variable of the Laplace transform.
When ``lower_limit`` parameter is less than 0.
Examples
========
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import step_response_numerical_data
>>> tf1 = TransferFunction(s, s**2 + 5*s + 8, s)
>>> step_response_numerical_data(tf1) # doctest: +SKIP
([0.0, 0.025413462339411542, 0.0484508722725343, ... , 9.670250533855183, 9.844291913708725, 10.0],
[0.0, 0.023844582399907256, 0.042894276802320226, ..., 6.828770759094287e-12, 6.456457160755703e-12])
See Also
========
step_response_plot
"""
if lower_limit < 0:
raise ValueError("Lower limit of time must be greater "
"than or equal to zero.")
_check_system(system)
_x = Dummy("x")
expr = system.to_expr()/(system.var)
expr = apart(expr, system.var, full=True)
_y = _fast_inverse_laplace(expr, system.var, _x).evalf(prec)
return LineOver1DRangeSeries(_y, (_x, lower_limit, upper_limit),
**kwargs).get_points()
def step_response_plot(system, color='b', prec=8, lower_limit=0,
upper_limit=10, show_axes=False, grid=True, show=True, **kwargs):
r"""
Returns the unit step response of a continuous-time system. It is
the response of the system when the input signal is a step function.
Parameters
==========
system : SISOLinearTimeInvariant type
The LTI SISO system for which the Step Response is to be computed.
color : str, tuple, optional
The color of the line. Default is Blue.
show : boolean, optional
If ``True``, the plot will be displayed otherwise
the equivalent matplotlib ``plot`` object will be returned.
Defaults to True.
lower_limit : Number, optional
The lower limit of the plot range. Defaults to 0.
upper_limit : Number, optional
The upper limit of the plot range. Defaults to 10.
prec : int, optional
The decimal point precision for the point coordinate values.
Defaults to 8.
show_axes : boolean, optional
If ``True``, the coordinate axes will be shown. Defaults to False.
grid : boolean, optional
If ``True``, the plot will have a grid. Defaults to True.
Examples
========
.. plot::
:context: close-figs
:format: doctest
:include-source: True
>>> from sympy.abc import s
>>> from sympy.physics.control.lti import TransferFunction
>>> from sympy.physics.control.control_plots import step_response_plot
>>> tf1 = TransferFunction(8*s**2 + 18*s + 32, s**3 + 6*s**2 + 14*s + 24, s)
>>> step_response_plot(tf1) # doctest: +SKIP
See Also
========
impulse_response_plot, ramp_response_plot
References
==========
.. [1] https://www.mathworks.com/help/control/ref/lti.step.html
"""
x, y = step_response_numerical_data(system, prec=prec,
lower_limit=lower_limit, upper_limit=upper_limit, **kwargs)
plt.plot(x, | |
import math
from collections import namedtuple
import colorhash
import dask
import dask.dataframe
import numpy
import pandas
from matplotlib import pyplot
from scipy.stats import kstest, lognorm, multivariate_normal
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
class AbnormalTaskPerfAnalyzer:
"""
Most of this analysis is cribbed from the jivita_worker_perf.ipynb notebook - this code is now
newer, so that notebook should be converted over to manually test that the analysis here works
as-expected. (Notebooks have access to this code too.)
Note that this class *must be serializable* after __init__ - it may get passed to a local or
remote process for execution. This means we can't pass local dask data frames as inputs, for
example, as they rely on local state (distributed dataframes we probably can).
"""
def __init__(
self,
task_window,
history_dt_interval,
test_dt_interval,
*,
worker_group_cols=None,
task_group_cols=None,
start_dt_col=None,
elapsed_time_col=None,
outlier_pct=0.10,
max_model_err_pct=0.25,
min_event_time_resolution=1.0,
max_exvar_in_metrics_pct=0.90,
logpdf_bucket_size=0.01,
random_seed=1,
with_figures=False,
):
self.task_window = task_window
self.history_dt_interval = to_dt(history_dt_interval)
self.test_dt_interval = to_dt(test_dt_interval)
self.worker_group_cols = worker_group_cols
self.task_group_cols = task_group_cols
self.start_dt_col = start_dt_col
self.elapsed_time_col = elapsed_time_col
self.outlier_pct = outlier_pct
self.min_event_time_resolution = min_event_time_resolution
self.max_model_err_pct = max_model_err_pct
self.max_exvar_in_metrics_pct = max_exvar_in_metrics_pct
self.logpdf_bucket_size = logpdf_bucket_size
self.random_seed = random_seed
self.with_figures = with_figures
def _to_task_ddf(self, task_window):
if isinstance(task_window, pandas.DataFrame):
return dask.dataframe.from_pandas(task_window, npartitions=1)
if isinstance(task_window, dask.dataframe.DataFrame):
return task_window
task_window = task_window.clone_lazy(
start_dt=min(self.history_dt_interval[0], self.test_dt_interval[0])
)
return task_window.to_dask_frame()
def analyze(self):
"""
The core of the analysis, step-by-step
"""
# If we needed to do anything distributed because of a huge number of tasks, we could
# do that here as a ddf. Right now we aren't.
task_ddf = self._to_task_ddf(self.task_window)
task_df = pandas.DataFrame(task_ddf.compute())
numpy.random.seed(self.random_seed)
self.historical_task_df = tasks_in_dt_interval(
task_df, self.start_dt_col, self.history_dt_interval
)
self.test_task_df = tasks_in_dt_interval(task_df, self.start_dt_col, self.test_dt_interval)
# Build statistical task-time models for the different groups of historical tasks
self.task_group_models = build_task_group_models(
self.historical_task_df,
self.task_group_cols,
self.elapsed_time_col,
outlier_pct=self.outlier_pct,
figs=False,
)
# Compute surprise of test data based on the model
self.full_surprise_df = build_worker_group_surprise_df(
self.test_task_df,
self.worker_group_cols,
self.elapsed_time_col,
self.task_group_models,
max_model_err_pct=self.max_model_err_pct,
min_event_time_resolution=self.min_event_time_resolution,
)
# For now, reduce dimensions to only mean surprisal, but save the full surprisal for sorting
# degenerate cases of 1, 2, 3 workers later.
self.surprise_df = self.full_surprise_df.loc[
:, filter(lambda c: len(c[0]) > 0 and c[-1] == "mean", self.full_surprise_df.columns)
]
self.metrics_df = build_pca_df(self.surprise_df, n_components=self.max_exvar_in_metrics_pct)
self.normal_model = build_mv_normality_model(self.metrics_df, outlier_pct=self.outlier_pct)
logpdf = self.normal_model.logpdf(self.metrics_df.values)
logpdf_bucket = numpy.floor(logpdf / self.logpdf_bucket_size) * self.logpdf_bucket_size
dist_from_mean = numpy.linalg.norm(self.metrics_df.values - self.normal_model.mean, axis=1)
total_surprise = self.full_surprise_df.loc[:, ""]["sum"]
# ... and sort our groups by how weird they look in the lower-d space
self.normal_df = pandas.DataFrame(
{
"dist_from_mean": dist_from_mean,
"logpdf_bucket": logpdf_bucket,
"total_surprise": total_surprise,
},
index=self.surprise_df.index,
)
self.normal_df.sort_values(
["logpdf_bucket", "total_surprise"], ascending=[False, True], inplace=True
)
# Determine the (surprise) features which contributed most to each group's abnormality
self.abnormal_surprise_df = build_feature_abnormality_df(
self.surprise_df, self.metrics_df, self.normal_model
)
self.abnormal_surprise_df = self.abnormal_surprise_df.reindex(self.normal_df.index)
self.result_df = pandas.concat(
[self.normal_df, self.abnormal_surprise_df, self.metrics_df], axis=1
)
# Plot some figures if asked
if self.with_figures:
self.figures = build_abnormal_task_model_figures(
self.test_task_df,
self.elapsed_time_col,
self.task_group_models,
pandas.concat(
[self.abnormal_surprise_df.iloc[:3, :], self.abnormal_surprise_df.iloc[-3:, :]]
),
)
self.result_df.figures = self.figures
return self.result_df
def build_task_group_models(
task_df, task_group_cols, elapsed_time_col, *, outlier_pct=0.0, figs=None
):
group_models = []
for group, group_df in task_df.groupby(task_group_cols):
group = to_tuple(group)
group_df = group_df.copy()
group_df.reset_index(drop=True, inplace=True)
model = build_task_time_model(
group_df, elapsed_time_col, outlier_pct=outlier_pct, fig=str(group) if figs else None
)
model.group_def = tuple(zip(task_group_cols, group))
group_models.append(model)
return group_models
def bootstrap_estimates(samples, estimator, num_tests=50):
estimates = []
for i in range(0, num_tests):
estimates.append(estimator(numpy.random.choice(samples, len(samples))))
return estimates
def build_task_time_model(
task_df, elapsed_time_col, *, outlier_pct=0.0, fig=None, fig_max_sample_ksd_median=0.2
):
outlier_test = task_df[elapsed_time_col].isnull()
if len(task_df) > 5:
outlier_min, outlier_max = numpy.percentile(
task_df[elapsed_time_col], [(outlier_pct / 2.0) * 100, 100 - (outlier_pct / 2.0) * 100]
)
outlier_test = (
outlier_test
| (task_df[elapsed_time_col] < outlier_min)
| (task_df[elapsed_time_col] > outlier_max)
)
nonoutliers = task_df.loc[~outlier_test]
outliers = task_df.loc[outlier_test]
# model = lognorm(*lognorm.fit(nonoutliers[elapsed_time_col]))
model = lognorm(
s=numpy.std(numpy.log(nonoutliers[elapsed_time_col])),
scale=numpy.exp(numpy.mean(numpy.log(nonoutliers[elapsed_time_col]))),
)
model.nonoutliers = nonoutliers
model.outliers = outliers
model.kstest = kstest(nonoutliers[elapsed_time_col], model.cdf)
# Here we want to run some tests on how close a model is compared to the data. There are a number of statistical tests, Chi^2 for
# discrete and KS for continuous are pretty standard options. The metric of the KS test is the maximum distance between the cdfs of
# the sample and model distributions.
#
# The output of this test also gives us a p-value, like Chi^2, which is a probability that the distributions are exactly the same.
# For our purposes we pretty much know this is an estimate so that particular p-value isn't helpful as a "goodness of fit"
# indicator. For example, we get low p-values for very closely fit distributions of thousands of points (max cdf diff is < 0.05)
# because we have so much data that "we're sure" that even that close approximation isn't exactly right. This is true, but not
# helpful in this context. Other tests with p-values would have the same issue.
#
# What we arguably want to threshold on is the KS metric distance (D-value) but we also need a sense of how robust that metric is -
# data sets with small numbers of points can sometimes give us low D-values but in some sense we'd like to be less sure of that
# number. (Tried to use high/low p-value cutoffs, this didn't seem to give consistent results but that's a TODO). As a more-direct
# heuristic measure of how robust the D-value is, we instead bootstrap the sampled data to get a measure of how good the fit "usually" is.
ksds = bootstrap_estimates(nonoutliers[elapsed_time_col], lambda s: kstest(s, model.cdf)[0])
model.sample_ksd_mean = numpy.mean(ksds)
model.sample_ksd_median = numpy.median(ksds)
model.sample_ksd_std = numpy.std(ksds)
model.expected_err_pct = model.sample_ksd_mean + model.sample_ksd_std
if numpy.isnan(model.expected_err_pct):
model.expected_err_pct = 1.0
if fig and model.sample_ksd_median <= fig_max_sample_ksd_median:
title = None
if isinstance(fig, str):
title = fig
fig = pyplot.figure()
axs = fig.add_subplot(1, 1, 1)
axs_pdf = axs.twinx()
axs.hist(nonoutliers[elapsed_time_col], alpha=0.5)
pdf_range = model.interval(0.99)
pdf_x = numpy.linspace(pdf_range[0], pdf_range[1], 100)
axs_pdf.scatter(pdf_x, model.pdf(pdf_x), alpha=0.5)
axs.set_title(
(title + " " if title else "")
+ f"n: {len(model.nonoutliers)} ks: {model.kstest} {model.sample_ksd_median}"
)
axs.set_ylabel("# of Tasks")
axs_pdf.set_ylabel("Modeled Probability")
axs.set_xlabel("Task Time")
axs_pdf.set_ylim(0, axs_pdf.get_ylim()[1])
fig.set_size_inches(5, 5)
model.fig = fig
return model
#
# Compute surprise of a measure based on a probability model of that measure. The model need not
# be anything in particular but it does need to be continuous.
#
def build_worker_group_surprise_df(
task_df, worker_group_cols, elapsed_time_col, task_group_models, **kwargs
):
surprise_df = build_surprise_df(task_df, elapsed_time_col, task_group_models, **kwargs)
combined_df = pandas.concat([task_df[worker_group_cols], surprise_df], axis=1)
grouped_df = combined_df.groupby(worker_group_cols).agg(["mean", "sum", "count"])
surprise_total_df = pandas.DataFrame(surprise_df.sum(axis=1), columns=[("",)])
combined_total_df = pandas.concat([task_df[worker_group_cols], surprise_total_df], axis=1)
grouped_total_df = combined_total_df.groupby(worker_group_cols).agg(["mean", "sum", "count"])
return pandas.concat([grouped_df, grouped_total_df], axis=1)
def build_surprise_df(
task_df,
elapsed_time_col,
task_group_models,
*,
max_model_err_pct=1.0,
min_event_time_resolution=1.0,
):
surprise_series = {}
for model in task_group_models:
if model.expected_err_pct > max_model_err_pct:
continue
# A measure of surprise when using a continuous distribution has a scaling problem - if we take our samples as
# one or more delta functions compared with a distribution the integral formula gives us either infinities or
# negative information - there's some discussion here:
# https://stats.stackexchange.com/questions/211175/kullback-leibler-divergence-for-two-samples
# for example.
#
# The workaround is to choose a bucket size - we do this by using the expected %pct error (uncertainty) in the model.
# Using uncertainty has the nice feature that tight models give more information and loose models give much less, so
# the threshold above reduces unhelpful computation but naturally removes models with relatively low information.
#
sample_interval = (
min(model.nonoutliers[elapsed_time_col]),
max(model.nonoutliers[elapsed_time_col]),
)
assert not numpy.isnan(sample_interval[0])
assert not numpy.isnan(sample_interval[1])
assert sample_interval[1] != sample_interval[0]
sample_interval_pct = model.cdf(sample_interval[1]) - model.cdf(sample_interval[0])
assert not numpy.isnan(sample_interval_pct)
bucket_size = max(
(sample_interval[1] - sample_interval[0])
* (model.expected_err_pct / sample_interval_pct),
min_event_time_resolution,
)
def compute_surprise_bits(row):
for group_col, group_val in model.group_def:
if row.loc[group_col] != group_val:
return numpy.NaN
task_time = row.loc[elapsed_time_col]
start_time, end_time = task_time - (bucket_size / 2.0), task_time + (bucket_size / 2.0)
if start_time < 0:
start_time, end_time = 0, bucket_size
# Use log version of cdf to compute log probability in a (potentially) more numerically
# stable way.
u, v = model.logcdf(end_time), model.logcdf(start_time)
log_prob = (u + numpy.log1p(-numpy.exp(v - u))) / numpy.log(2)
assert not numpy.isnan(log_prob)
return -1.0 * log_prob
# task_time_prob = model.cdf(task_time + (bucket_size / 2.0)) - model.cdf(
# task_time - (bucket_size / 2.0)
# )
# return -1.0 * numpy.log2(task_time_prob)
surprise_series[(model.group_def,)] | |
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[105.28333074423442],
[72.81181980293967],
[56.899154811293464],
[36.45941710222553],
[46.042387049575304]]
"""
def predictCustomizedMultipleThirdOrderPolynomialRegression(self, coefficients):
numberOfRows = len(self.x_samplesList)
# We obtain the predicted data of the desired independent given values
predictedData = []
x1 = 0
x2 = 1
for row in range(0, numberOfRows):
temporalRow = []
actualIc = coefficients[0][0] + coefficients[1][0]*self.x_samplesList[row][x1] + coefficients[2][0]*self.x_samplesList[row][x1]**2 + coefficients[3][0]*self.x_samplesList[row][x1]**3 + coefficients[4][0]*self.x_samplesList[row][x2] + coefficients[5][0]*self.x_samplesList[row][x2]**2 + coefficients[6][0]*self.x_samplesList[row][x2]**3 + coefficients[7][0]*self.x_samplesList[row][x1]*self.x_samplesList[row][x2] + coefficients[8][0]*(self.x_samplesList[row][x1]**2)*self.x_samplesList[row][x2] + coefficients[9][0]*self.x_samplesList[row][x1]*(self.x_samplesList[row][x2]**2)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
# We return the predicted data
return predictedData
"""
Classification("x independent variable datapoints to model", "y dependent variable datapoints to model")
The Classification library gives several methods to be able to get the best
fitting classification model to predict a determined classification problem.
"""
class Classification:
def __init__(self, x_samplesList, y_samplesList):
self.y_samplesList = y_samplesList
self.x_samplesList = x_samplesList
def set_xSamplesList(self, x_samplesList):
self.x_samplesList = x_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
getSupportVectorMachine(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Linear Support Vector Machine model to
be able to predict a classification problem of any number of independent
variables (x).
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[1.5736095873424212], [-0.26050769870994606], [-0.25468164794007475]]
accuracyFromTraining =
88.88888888888889
predictedData = [
[1],
[1],
[-1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + bn*xn >= -bo (As a note, remember that true equation representation is: w.x>=c)'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getSupportVectorMachine(self, evtfbmip=True):
getOptimizedRegression = evtfbmip
numberOfRows = len(self.y_samplesList)
matrix_x = self.x_samplesList
matrix_y = self.y_samplesList
for row in range(0, numberOfRows):
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=-1)):
raise Exception('ERROR: One of the dependent (y) data points does not have exactly a 1 or a -1 as value. Note that in this library, the Support Vector Machine method needs to process your data to have either +1 or -1 as values.')
# We apply a Multiple Linear Regression to get the coefficient values
# for our Linear Support Vector Machine Model
from . import MortrackML_Library as mSL
import math
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getMultipleLinearRegression(evtfbmip = getOptimizedRegression)
svcCoefficients = modelingResults[0]
svcPredictedData = modelingResults[2]
# ---------------------------------- #
# ----- b0 Coefficient Tunning ----- #
# ---------------------------------- #
# Through the best fitting Multiple Linear Regression, we make a
# new search to try to find a better fitting b0 coefficient value
# that best fits the conditional of the equation that we actually
# want to solve (w.x>=-b0)
import numpy as np
rangeOfPredictedData = max(svcPredictedData)[0] - min(svcPredictedData)[0]
# linspace(start, stop, num=50)
bStepValues = np.linspace(svcCoefficients[0][0]-rangeOfPredictedData, svcCoefficients[0][0]+rangeOfPredictedData, num=100)
numberOfCoefficients = len(svcCoefficients)
best_b_value = 0
bestPredictedData = 0
bestPredictionAccuracy = 0
# We first get the b value that first pops and that has the highest
# accuracy
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy > bestPredictionAccuracy):
best_b_value = current_b_value
bestPredictedData = predictedData
bestPredictionAccuracy = predictionAcurracy
# Now that we now what value of b0 gives the best accuracy, we look
# forward to find the range of the b0 values that gives such best
# accuracy
best_b_value_1 = best_b_value
best_b_value_2 = 0
isBest_b_value = False
for currentStepValue in range(0, len(bStepValues)):
current_b_value = bStepValues[currentStepValue]
if (current_b_value == best_b_value_1):
isBest_b_value = True
if (isBest_b_value == True):
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
if (predictionAcurracy == bestPredictionAccuracy):
best_b_value_2 = current_b_value
# We find best fitting b0 coefficient value through exponential
# method
b0_sign = 1
if ((best_b_value_1+best_b_value_2)<0):
b0_sign = -1
best_b_value = (math.log(abs(best_b_value_1)) + math.log(abs(best_b_value_2)))/2
best_b_value = b0_sign*math.exp(best_b_value)
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# We verify if exponential method was the best choice to pick best
# fitting b0 coefficient. If this isnt true, we then try again but
# with the mean value of the b0 coefficient range that we obtained
#earlier
if ((best_b_value<min([best_b_value_1, best_b_value_2])) or (best_b_value>max([best_b_value_1, best_b_value_2])) or (predictionAcurracy<bestPredictionAccuracy)):
best_b_value = (best_b_value_1+best_b_value_2)/2
# We get the predicted data with the trained Kernel Support Vector
# Classification (K-SVC) model
predictedData = []
for row in range(0, numberOfRows):
temporalRow = []
wx = 0
for column in range(0, numberOfCoefficients-1):
wx = wx + (matrix_x[row][column])*svcCoefficients[column+1][0]
c = -current_b_value # c=ln(y=0)-b0
if (wx >= c):
temporalRow.append(1) # Its a positive sample
else:
temporalRow.append(-1) # Its a negative sample
predictedData.append(temporalRow)
predictionAcurracy = 0
n2 = 0
n1 = 0
for row in range(0, numberOfRows):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n1 == n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfRows*100
# If neither the exponential nor the mean methods work to get the
# best fitting b0 coefficient value, we then just pick the initial
# best fitting b0 value that we identified in this | |
# -*- coding: utf-8 -*-
"""
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1td73szujFTuIVA5lCbJcA95urAspAXvN
# Neural Style Transfer
"""
from tensorflow.python.keras import models
import IPython.display
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras.preprocessing import image as kp_image
import tensorflow.contrib.eager as tfe
import tensorflow as tf
import functools
import time
from PIL import Image
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
style_path = './starry_night.jpg'
# content_path =填你的照片
content_path = './golden_gate.jpg'
"""### Import and configure modules"""
mpl.rcParams['figure.figsize'] = (10, 10)
mpl.rcParams['axes.grid'] = False
"""We’ll begin by enabling [eager execution](https://www.tensorflow.org/guide/eager). Eager execution allows us to work through this technique in the clearest and most readable way. """
tf.enable_eager_execution()
print("Eager execution: {}".format(tf.executing_eagerly()))
"""
```
content_path =你的照片
style_path = '畫家的照片'
```"""
# Set up some global values here
content_path = 'golden_gate.jpg'
style_path = 'starry_night.jpg'
# style_path = 'Good1.jpg'
"""## Visualize the input"""
def load_img(path_to_img):
max_dim = 512
img = Image.open(path_to_img)
long = max(img.size)
scale = max_dim/long
img = img.resize(
(round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)
img = kp_image.img_to_array(img)
# We need to broadcast the image array such that it has a batch dimension
img = np.expand_dims(img, axis=0)
return img
def imshow(img, title=None):
# Remove the batch dimension
out = np.squeeze(img, axis=0)
# Normalize for display
out = out.astype('uint8')
plt.imshow(out)
if title is not None:
plt.title(title)
plt.imshow(out)
"""These are input content and style images. We hope to "create" an image with the content of our content image, but with the style of the style image. """
'''plt.figure(figsize=(10,10))
content = load_img(content_path).astype('uint8')
style = load_img(style_path).astype('uint8')
plt.subplot(1, 2, 1)
imshow(content, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style, 'Style Image')
plt.show()
'''
"""## Prepare the data
Let's create methods that will allow us to load and preprocess our images easily. We perform the same preprocessing process as are expected according to the VGG training process. VGG networks are trained on image with each channel normalized by `mean = [103.939, 116.779, 123.68]`and with channels BGR.
"""
def load_and_process_img(path_to_img):
img = load_img(path_to_img)
img = tf.keras.applications.vgg19.preprocess_input(img)
return img
"""In order to view the outputs of our optimization, we are required to perform the inverse preprocessing step. Furthermore, since our optimized image may take its values anywhere between $- \infty$ and $\infty$, we must clip to maintain our values from within the 0-255 range. """
def deprocess_img(processed_img):
x = processed_img.copy()
if len(x.shape) == 4:
x = np.squeeze(x, 0)
assert len(x.shape) == 3, ("Input to deprocess image must be an image of "
"dimension [1, height, width, channel] or [height, width, channel]")
if len(x.shape) != 3:
raise ValueError("Invalid input to deprocessing image")
# perform the inverse of the preprocessiing step
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
"""### Define content and style representations
I
# Why intermediate layers?
"""
# Content layer where will pull our feature maps
content_layers = ['block5_conv2']
# Style layer we are interested in
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1'
]
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
"""## Build the Model
In this case, we load [VGG19](https://keras.io/applications/#vgg19), and feed in our input tensor to the model. This will allow us to extract the feature maps (and subsequently the content and style representations) of the content, style, and generated images.
We use VGG19, as suggested in the paper. In addition, since VGG19 is a relatively simple model (compared with ResNet, Inception, etc) the feature maps actually work better for style transfer.
In order to access the intermediate layers corresponding to our style and content feature maps, we get the corresponding outputs and using the Keras [**Functional API**](https://keras.io/getting-started/functional-api-guide/), we define our model with the desired output activations.
With the Functional API defining a model simply involves defining the input and output:
`model = Model(inputs, outputs)`
"""
def get_model():
""" Creates our model with access to intermediate layers.
This function will load the VGG19 model and access the intermediate layers.
These layers will then be used to create a new model that will take input image
and return the outputs from these intermediate layers from the VGG model.
Returns:
returns a keras model that takes image inputs and outputs the style and
content intermediate layers.
"""
# Load our model. We load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.vgg19.VGG19(
include_top=False, weights='imagenet')
vgg.trainable = False
# Get output layers corresponding to style and content layers
style_outputs = [vgg.get_layer(name).output for name in style_layers]
content_outputs = [vgg.get_layer(name).output for name in content_layers]
model_outputs = style_outputs + content_outputs
# Build model
return models.Model(vgg.input, model_outputs)
"""## Define and create our loss functions (content and style distances)
# Content Loss
"""
def get_content_loss(base_content, target):
return tf.reduce_mean(tf.square(base_content - target))
"""## Style Loss
# Computing style loss
Again, we implement our loss as a distance metric .
"""
def gram_matrix(input_tensor):
# We make the image channels first
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
gram = tf.matmul(a, a, transpose_a=True)
return gram / tf.cast(n, tf.float32)
def get_style_loss(base_style, gram_target):
"""Expects two images of dimension h, w, c"""
# height, width, num filters of each layer
# We scale the loss at a given layer by the size of the feature map and the number of filters
height, width, channels = base_style.get_shape().as_list()
gram_style = gram_matrix(base_style)
# / (4. * (channels ** 2) * (width * height) ** 2)
return tf.reduce_mean(tf.square(gram_style - gram_target))
"""## Apply style transfer to our images
# Run Gradient Descent
helper function that will load our content and style image, feed them forward through our network, which will then output the content and style feature representations from our model.
"""
def get_feature_representations(model, content_path, style_path):
"""Helper function to compute our content and style feature representations.
This function will simply load and preprocess both the content and style
images from their path. Then it will feed them through the network to obtain
the outputs of the intermediate layers.
Arguments:
model: The model that we are using.
content_path: The path to the content image.
style_path: The path to the style image
Returns:
returns the style features and the content features.
"""
# Load our images in
content_image = load_and_process_img(content_path)
style_image = load_and_process_img(style_path)
# batch compute content and style features
style_outputs = model(style_image)
content_outputs = model(content_image)
# Get the style and content feature representations from our model
style_features = [style_layer[0]
for style_layer in style_outputs[:num_style_layers]]
content_features = [content_layer[0]
for content_layer in content_outputs[num_style_layers:]]
return style_features, content_features
"""### Computing the loss and gradients
Here we use [**tf.GradientTape**](https://www.tensorflow.org/programmers_guide/eager#computing_gradients) to compute the gradient. It allows us to take advantage of the automatic differentiation available by tracing operations for computing the gradient later. It records the operations during the forward pass and then is able to compute the gradient of our loss function with respect to our input image for the backwards pass.
"""
def compute_loss(model, loss_weights, init_image, gram_style_features, content_features):
"""This function will compute the loss total loss.
Arguments:
model: The model that will give us access to the intermediate layers
loss_weights: The weights of each contribution of each loss function.
(style weight, content weight, and total variation weight)
init_image: Our initial base image. This image is what we are updating with
our optimization process. We apply the gradients wrt the loss we are
calculating to this image.
gram_style_features: Precomputed gram matrices corresponding to the
defined style layers of interest.
content_features: Precomputed outputs from defined content layers of
interest.
Returns:
returns the total loss, style loss, content loss, and total variational loss
"""
style_weight, content_weight = loss_weights
# Feed our init image through our model. This will give us the content and
# style representations at our desired layers. Since we're using eager
# our model is callable just like any other function!
model_outputs = model(init_image)
style_output_features = model_outputs[:num_style_layers]
content_output_features = model_outputs[num_style_layers:]
style_score = 0
content_score = 0
# Accumulate style losses from all layers
# Here, we equally weight each contribution of each loss layer
weight_per_style_layer = 1.0 / float(num_style_layers)
for target_style, comb_style in zip(gram_style_features, style_output_features):
style_score += weight_per_style_layer * \
get_style_loss(comb_style[0], target_style)
# Accumulate content losses from all layers
weight_per_content_layer = 1.0 / float(num_content_layers)
for target_content, comb_content in zip(content_features, content_output_features):
content_score += weight_per_content_layer * \
get_content_loss(comb_content[0], target_content)
style_score *= style_weight
content_score *= content_weight
# Get total loss
loss = style_score + content_score
return loss, style_score, content_score
"""Then | |
<filename>lib_lanmt_model.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from nmtlab.models.transformer import Transformer
from nmtlab.modules.transformer_modules import TransformerEmbedding
from nmtlab.modules.transformer_modules import PositionalEmbedding
from nmtlab.modules.transformer_modules import LabelSmoothingKLDivLoss
from nmtlab.utils import OPTS
from nmtlab.utils import TensorMap
from nmtlab.utils import smoothed_bleu
from lib_lanmt_modules import TransformerEncoder
from lib_lanmt_modules import TransformerCrossEncoder
from lib_lanmt_modules import LengthConverter
from lib_vae import VAEBottleneck
class LANMTModel(Transformer):
def __init__(self,
prior_layers=3, decoder_layers=3,
q_layers=6,
latent_dim=8,
KL_budget=1., KL_weight=1.,
budget_annealing=True,
max_train_steps=100000,
**kwargs):
"""Create Latent-variable non-autoregressive NMT model.
Args:
prior_layers - number of layers in prior p(z|x)
decoder_layers - number of layers in decoder p(y|z)
q_layers - number of layers in approximator q(z|x,y)
latent_dim - dimension of latent variables
KL_budget - budget of KL divergence
KL_weight - weight of the KL term,
budget_annealing - whether anneal the KL budget
max_train_steps - max training iterations
"""
self.prior_layers = prior_layers
self.decoder_layers = decoder_layers
self.q_layers = q_layers
self.latent_dim = latent_dim
self.KL_budget = KL_budget
self.KL_weight = KL_weight
self.budget_annealing = budget_annealing
self.max_train_steps = max_train_steps
if OPTS.finetune:
self.training_criteria = "BLEU"
else:
self.training_criteria = "loss"
super(LANMTModel, self).__init__(**kwargs)
def prepare(self):
"""Define the modules
"""
# Embedding layers
self.x_embed_layer = TransformerEmbedding(self._src_vocab_size, self.embed_size)
self.y_embed_layer = TransformerEmbedding(self._tgt_vocab_size, self.embed_size)
self.pos_embed_layer = PositionalEmbedding(self.hidden_size)
# Length Transformation
self.length_converter = LengthConverter()
self.length_embed_layer = nn.Embedding(500, self.hidden_size)
# Prior p(z|x)
self.prior_encoder = TransformerEncoder(self.x_embed_layer, self.hidden_size, self.prior_layers)
self.prior_prob_estimator = nn.Linear(self.hidden_size, self.latent_dim * 2)
# Approximator q(z|x,y)
self.q_encoder_y = TransformerEncoder(self.y_embed_layer, self.hidden_size, self.q_layers)
self.q_encoder_xy = TransformerCrossEncoder(None, self.hidden_size, self.q_layers)
# Decoder p(y|x,z)
self.decoder = TransformerCrossEncoder(None, self.hidden_size, self.decoder_layers, skip_connect=True)
# Bottleneck
self.bottleneck = VAEBottleneck(self.hidden_size, z_size=self.latent_dim)
self.latent2vector_nn = nn.Linear(self.latent_dim, self.hidden_size)
# Length prediction
self.length_predictor = nn.Linear(self.hidden_size, 100)
# Word probability estimator
self.expander_nn = nn.Linear(self.hidden_size, self._tgt_vocab_size)
self.label_smooth = LabelSmoothingKLDivLoss(0.1, self._tgt_vocab_size, 0)
self.set_stepwise_training(False)
def initialize_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# if self._fp16:
# self.half()
def compute_Q(self, x, y):
"""Compute the approximated posterior q(z|x,y) and sample from it.
"""
x_mask = self.to_float(torch.ne(x, 0))
y_mask = self.to_float(torch.ne(y, 0))
# Compute p(z|y,x) and sample z
q_states = self.compute_Q_states(self.x_embed_layer(x), x_mask, y, y_mask)
sampled_latent, q_prob = self.sample_from_Q(q_states, sampling=False)
return sampled_latent, q_prob
def compute_Q_states(self, x_states, x_mask, y, y_mask):
"""Compute the states for estimating q(z|x,y).
"""
y_states = self.q_encoder_y(y, y_mask)
if y.size(0) > x_states.size(0) and x_states.size(0) == 1:
x_states = x_states.expand(y.size(0), -1, -1)
x_mask = x_mask.expand(y.size(0), -1)
states = self.q_encoder_xy(x_states, x_mask, y_states, y_mask)
return states
def sample_from_Q(self, q_states, sampling=True):
"""Estimate q(z|x,y) and sample a latent variable from it.
"""
sampled_z, q_prob = self.bottleneck(q_states, sampling=sampling)
full_vector = self.latent2vector_nn(sampled_z)
return full_vector, q_prob
def compute_length_predictor_loss(self, xz_states, z, z_mask, y_mask):
"""Get the loss for length predictor.
"""
y_lens = y_mask.sum(1) - 1
delta = (y_lens - z_mask.sum(1) + 50.).long().clamp(0, 99)
mean_z = ((z + xz_states) * z_mask[:, :, None]).sum(1) / z_mask.sum(1)[:, None]
logits = self.length_predictor(mean_z)
length_loss = F.cross_entropy(logits, delta, reduction="mean")
length_acc = self.to_float(logits.argmax(-1) == delta).mean()
length_scores = {
"len_loss": length_loss,
"len_acc": length_acc
}
return length_scores
def compute_vae_KL(self, prior_prob, q_prob):
"""Compute KL divergence given two Gaussians.
"""
mu1 = q_prob[:, :, :self.latent_dim]
var1 = F.softplus(q_prob[:, :, self.latent_dim:])
mu2 = prior_prob[:, :, :self.latent_dim]
var2 = F.softplus(prior_prob[:, :, self.latent_dim:])
kl = torch.log(var2 / (var1 + 1e-8) + 1e-8) + (
(torch.pow(var1, 2) + torch.pow(mu1 - mu2, 2)) / (2 * torch.pow(var2, 2))) - 0.5
kl = kl.sum(-1)
return kl
def convert_length(self, z, z_mask, target_lens):
"""Adjust the number of latent variables.
"""
rc = 1. / math.sqrt(2)
converted_vectors, _ = self.length_converter(z, target_lens, z_mask)
pos_embed = self.pos_embed_layer(converted_vectors)
len_embed = self.length_embed_layer(target_lens.long())
converted_vectors = rc * converted_vectors + 0.5 * pos_embed + 0.5 * len_embed[:, None, :]
return converted_vectors
def convert_length_with_delta(self, z, z_mask, delta):
"""Adjust the number of latent variables with predicted delta
"""
z = z.clone()
z_lens = z_mask.sum(1).long()
y_lens = z_lens + delta
converted_vectors = self.convert_length(z, z_mask, y_lens)
# Create target-side mask
arange = torch.arange(y_lens.max().long())
if torch.cuda.is_available():
arange = arange.cuda()
y_mask = self.to_float(arange[None, :].repeat(z.size(0), 1) < y_lens[:, None])
return converted_vectors, y_mask, y_lens
def deterministic_sample_from_prob(self, z_prob):
""" Obtain the mean vectors from multi-variate normal distributions.
"""
mean_vector = z_prob[:, :, :self.latent_dim]
full_vector = self.latent2vector_nn(mean_vector)
return full_vector
def predict_length(self, prior_states, z, z_mask):
"""Predict the target length based on latent variables and source states.
"""
mean_z = ((z + prior_states) * z_mask[:, :, None]).sum(1) / z_mask.sum(1)[:, None]
logits = self.length_predictor(mean_z)
delta = logits.argmax(-1) - 50
return delta
def compute_final_loss(self, q_prob, prior_prob, x_mask, score_map):
""" Compute the report the loss.
"""
kl = self.compute_vae_KL(prior_prob, q_prob)
# Apply budgets for KL divergence: KL = max(KL, budget)
budget_upperbound = self.KL_budget
if self.budget_annealing:
step = OPTS.trainer.global_step()
half_maxsteps = float(self.max_train_steps / 2)
if step > half_maxsteps:
rate = (float(step) - half_maxsteps) / half_maxsteps
min_budget = 0.
budget = min_budget + (budget_upperbound - min_budget) * (1. - rate)
else:
budget = budget_upperbound
else:
budget = self.KL_budget
score_map["KL_budget"] = torch.tensor(budget)
# Compute KL divergence
max_mask = self.to_float((kl - budget) > 0.)
kl = kl * max_mask + (1. - max_mask) * budget
kl_loss = (kl * x_mask / x_mask.shape[0]).sum()
# Report KL divergence
score_map["kl"] = kl_loss
# Also report the averge KL for each token
score_map["tok_kl"] = (kl * x_mask / x_mask.sum()).sum()
# Report cross-entropy loss
score_map["nll"] = score_map["loss"]
# Cross-entropy loss is *already* backproped when computing softmaxes in shards
# So only need to compute the remaining losses and then backprop them
remain_loss = score_map["kl"].clone() * self.KL_weight
if "len_loss" in score_map:
remain_loss += score_map["len_loss"]
# Report the combined loss
score_map["loss"] = remain_loss + score_map["nll"]
return score_map, remain_loss
def forward(self, x, y, sampling=False, return_code=False):
"""Model training.
"""
score_map = {}
x_mask = self.to_float(torch.ne(x, 0))
y_mask = self.to_float(torch.ne(y, 0))
# ----------- Compute prior and approximated posterior -------------#
# Compute p(z|x)
prior_states = self.prior_encoder(x, x_mask)
prior_prob = self.prior_prob_estimator(prior_states)
# Compute q(z|x,y) and sample z
q_states = self.compute_Q_states(self.x_embed_layer(x), x_mask, y, y_mask)
# Sample latent variables from q(z|x,y)
z_mask = x_mask
sampled_z, q_prob = self.sample_from_Q(q_states)
# ----------------- Convert the length of latents ------------------#
# Compute length prediction loss
length_scores = self.compute_length_predictor_loss(prior_states, sampled_z, z_mask, y_mask)
score_map.update(length_scores)
# Padding z to fit target states
z_with_y_length = self.convert_length(sampled_z, z_mask, y_mask.sum(-1))
# -------------------------- Decoder -------------------------------#
decoder_states = self.decoder(z_with_y_length, y_mask, prior_states, x_mask)
# -------------------------- Compute losses ------------------------#
decoder_outputs = TensorMap({"final_states": decoder_states})
denom = x.shape[0]
if self._shard_size is not None and self._shard_size > 0:
loss_scores, decoder_tensors, decoder_grads = self.compute_shard_loss(
decoder_outputs, y, y_mask, denominator=denom, ignore_first_token=False, backward=False
)
loss_scores["word_acc"] *= float(y_mask.shape[0]) / self.to_float(y_mask.sum())
score_map.update(loss_scores)
else:
raise SystemError("Shard size must be setted or the memory is not enough for this model.")
score_map, remain_loss = self.compute_final_loss(q_prob, prior_prob, z_mask, score_map)
# Report smoothed BLEU during validation
if not torch.is_grad_enabled() and self.training_criteria == "BLEU":
logits = self.expander_nn(decoder_outputs["final_states"])
predictions = logits.argmax(-1)
score_map["BLEU"] = - self.get_BLEU(predictions, y)
# -------------------------- Bacprop gradient --------------------#
if self._shard_size is not None and self._shard_size > 0 and decoder_tensors is not None:
decoder_tensors.append(remain_loss)
decoder_grads.append(None)
torch.autograd.backward(decoder_tensors, decoder_grads)
if torch.isnan(score_map["loss"]) or torch.isinf(score_map["loss"]):
import pdb;pdb.set_trace()
return score_map
def translate(self, x, latent=None, prior_states=None, refine_step=0):
""" Testing codes.
"""
x_mask = self.to_float(torch.ne(x, 0))
# Compute p(z|x)
if prior_states is None:
prior_states = self.prior_encoder(x, x_mask)
# Sample latent variables from prior if it's not given
if latent is None:
prior_prob = self.prior_prob_estimator(prior_states)
if not OPTS.Tlatent_search:
latent = self.deterministic_sample_from_prob(prior_prob)
else:
latent = self.bottleneck.sample_any_dist(prior_prob, samples=OPTS.Tcandidate_num, noise_level=0.5)
latent = self.latent2vector_nn(latent)
# Predict length
length_delta = self.predict_length(prior_states, latent, x_mask)
# Adjust the number of latent
converted_z, y_mask, y_lens = self.convert_length_with_delta(latent, x_mask, length_delta + 1)
if converted_z.size(1) == 0:
return None, latent, prior_prob.argmax(-1)
# Run decoder to predict the target words
decoder_states = self.decoder(converted_z, y_mask, prior_states, x_mask)
logits = self.expander_nn(decoder_states)
# Get the target predictions
if OPTS.Tlatent_search and not OPTS.Tteacher_rescore:
# Latent search without teacher rescoring is dangeous
# because the generative model framework can't effeciently and correctly score hypotheses
if refine_step == OPTS.Trefine_steps:
# In the finally step, pick the best hypotheses
logprobs, preds = torch.log_softmax(logits, 2).max(2)
logprobs = (logprobs * y_mask).sum(1) # x batch x 1
preds = preds * y_mask.long()
# after deterimistic refinement
pred = preds[logprobs.argmax()].unsqueeze(0)
else:
# Just return all candidates
pred = logits.argmax(-1)
pred = pred * y_mask.long()
else:
pred = logits.argmax(-1)
return pred, latent, prior_states
def get_BLEU(self, | |
in seq]),
IntVector([x.tm_sec for x in seq])]
return POSIXct._sexp_from_seq(seq, lambda elt: time.tzname[0], f)
@staticmethod
def sexp_from_datetime(seq):
""" return a POSIXct vector from a sequence of
datetime.datetime elements. """
def f(seq):
return [IntVector([x.year for x in seq]),
IntVector([x.month for x in seq]),
IntVector([x.day for x in seq]),
IntVector([x.hour for x in seq]),
IntVector([x.minute for x in seq]),
IntVector([x.second for x in seq])]
return POSIXct._sexp_from_seq(seq, attrgetter('tzinfo'), f)
class Array(Vector):
""" An R array """
_dimnames_get = baseenv_ri['dimnames']
_dimnames_set = baseenv_ri['dimnames<-']
_dim_get = baseenv_ri['dim']
_dim_set = baseenv_ri['dim<-']
_isarray = baseenv_ri['is.array']
def __init__(self, obj):
super(Array, self).__init__(obj)
#import pdb; pdb.set_trace()
if not self._isarray(self)[0]:
raise(TypeError("The object must be representing an R array"))
def __dim_get(self):
res = self._dim_get(self)
res = conversion.ri2ro(res)
return res
def __dim_set(self, value):
value = conversion.py2ro(value)
res = self._dim_set(self, value)
#FIXME: not properly done
raise(Exception("Not yet implemented"))
dim = property(__dim_get, __dim_set,
"Get or set the dimension of the array.")
def __dimnames_get(self):
""" Return a list of name vectors
(like the R function 'dimnames' does)."""
res = self._dimnames_get(self)
res = conversion.ri2ro(res)
return res
def __dimnames_set(self, value):
""" Set list of name vectors
(like the R function 'dimnames' does)."""
value = conversion.ri2ro(value)
res = self._dimnames_set(self, value)
self.__sexp__ = res.__sexp__
names = property(__dimnames_get, __dimnames_set, None,
"names associated with the dimension.")
dimnames = names
class Matrix(Array):
""" An R matrix """
_transpose = baseenv_ri['t']
_rownames = baseenv_ri['rownames']
_colnames = baseenv_ri['colnames']
_dot = baseenv_ri['%*%']
_crossprod = baseenv_ri['crossprod']
_tcrossprod = baseenv_ri['tcrossprod']
_svd = baseenv_ri['svd']
_eigen = baseenv_ri['eigen']
def __nrow_get(self):
""" Number of rows.
:rtype: integer """
return self.dim[0]
nrow = property(__nrow_get, None, None, "Number of rows")
def __ncol_get(self):
""" Number of columns.
:rtype: integer """
return self.dim[1]
ncol = property(__ncol_get, None, None, "Number of columns")
def __rownames_get(self):
""" Row names
:rtype: SexpVector
"""
res = self._rownames(self)
return conversion.ri2ro(res)
def __rownames_set(self, rn):
if isinstance(rn, StrSexpVector):
if len(rn) != self.nrow:
raise ValueError('Invalid length.')
if self.dimnames is NULL:
dn = ListVector.from_length(2)
dn[0] = rn
self.do_slot_assign('dimnames', dn)
else:
dn = self.dimnames
dn[0] = rn
else:
raise ValueError('The rownames attribute can only be an R string vector.')
rownames = property(__rownames_get, __rownames_set, None, "Row names")
def __colnames_get(self):
""" Column names
:rtype: SexpVector
"""
res = self._colnames(self)
return conversion.ri2ro(res)
def __colnames_set(self, cn):
if isinstance(cn, StrSexpVector):
if len(cn) != self.ncol:
raise ValueError('Invalid length.')
if self.dimnames is NULL:
dn = ListVector.from_length(2)
dn[1] = cn
self.do_slot_assign('dimnames', dn)
else:
dn = self.dimnames
dn[1] = cn
else:
raise ValueError('The colnames attribute can only be an R string vector.')
colnames = property(__colnames_get, __colnames_set, None, "Column names")
def transpose(self):
""" transpose the matrix """
res = self._transpose(self)
return conversion.ri2ro(res)
def crossprod(self, m):
""" crossproduct X'.Y"""
res = self._crossprod(self, conversion.ri2ro(m))
return conversion.ri2ro(res)
def tcrossprod(self, m):
""" crossproduct X.Y'"""
res = self._tcrossprod(self, m)
return conversion.ri2ro(res)
def svd(self, nu = None, nv = None, linpack = False):
""" SVD decomposition.
If nu is None, it is given the default value min(tuple(self.dim)).
If nv is None, it is given the default value min(tuple(self.dim)).
"""
if nu is None:
nu = min(tuple(self.dim))
if nv is None:
nv = min(tuple(self.dim))
res = self._svd(self, nu = nu, nv = nv, LINPACK = False)
return conversion.ri2ro(res)
def dot(self, m):
""" Matrix multiplication """
res = self._dot(self, m)
return conversion.ri2ro(res)
def eigen(self):
""" Eigen values """
res = self._eigen(self)
return conversion.ri2ro(res)
class DataFrame(ListVector):
""" R 'data.frame'.
"""
_dataframe_name = rinterface.StrSexpVector(('data.frame',))
_read_csv = utils_ri['read.csv']
_write_table = utils_ri['write.table']
_cbind = rinterface.baseenv['cbind.data.frame']
_rbind = rinterface.baseenv['rbind.data.frame']
_is_list = rinterface.baseenv['is.list']
_html_template = jinja2.Template(
"""
<span>R/rpy2 DataFrame ({{ nrows }} x {{ ncolumns }})</span>
<table>
<thead>
<tr>
{% for name in column_names %}
<th>{{ name }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row_i in rows %}
<tr>
{% for col_i in columns %}
<td>
{{ elements[col_i][row_i] }}
</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
""")
def __init__(self, obj, stringsasfactor=False):
""" Create a new data frame.
:param obj: object inheriting from rpy2.rinterface.SexpVector,
or inheriting from TaggedList
or a mapping name -> value
:param stringsasfactors: Boolean indicating whether vectors
of strings should be turned to vectors. Note
that factors will not be turned to string vectors.
"""
if isinstance(obj, rinterface.SexpVector):
if obj.typeof != rinterface.VECSXP:
raise ValueError("obj should of typeof VECSXP"+\
" (and we get %s)" % rinterface.str_typeint(obj.typeof))
if self._is_list(obj)[0] or \
globalenv_ri.get('inherits')(obj, self._dataframe_name)[0]:
#FIXME: is it really a good idea to pass R lists
# to the constructor ?
super(DataFrame, self).__init__(obj)
return
else:
raise ValueError(
"When passing R objects to build a DataFrame," +\
" the R object must be a list or inherit from" +\
" the R class 'data.frame'")
elif isinstance(obj, rlc.TaggedList):
kv = [(k, conversion.py2ri(v)) for k,v in obj.items()]
else:
try:
kv = [(str(k), conversion.py2ri(obj[k])) for k in obj]
except TypeError:
raise ValueError("obj can be either "+
"an instance of an iter-able class" +
"(such a Python dict, rpy2.rlike.container OrdDict" +
" or an instance of rpy2.rinterface.SexpVector" +
" of type VECSXP")
# Check if there is a conflicting column name
if 'stringsAsFactors' in (k for k,v in kv):
warnings.warn('The column name "stringsAsFactors" is '
'conflicting with named parameter '
'in underlying R function "data.frame()".')
else:
kv.append(('stringsAsFactors', stringsasfactor))
# Call R's data frame constructor
kv = tuple(kv)
df = baseenv_ri.get("data.frame").rcall(kv, globalenv_ri)
super(DataFrame, self).__init__(df)
def _repr_html_(self, max_items=7):
names = list()
if len(self) <= max_items:
names.extend(self.names)
else:
half_items = max_items // 2
for i in range(0, half_items):
try:
name = self.names[i]
except TypeError:
name = '[no name]'
names.append(name)
names.append('...')
for i in range(-half_items, 0):
try:
name = self.names[i]
except TypeError:
name = '[no name]'
names.append(name)
elements = list()
for e in self._iter_repr(max_items=max_items):
if hasattr(e, '_repr_html_'):
elements.append(tuple(e._iter_formatted()))
else:
elements.append(['...',] * len(elements[-1]))
d = {'column_names': names,
'rows': tuple(range(len(elements))),
'columns': tuple(range(len(names))),
'nrows': self.nrow,
'ncolumns': self.ncol,
'elements': elements}
html = self._html_template.render(d)
return html
def _get_nrow(self):
""" Number of rows.
:rtype: integer """
return baseenv_ri["nrow"](self)[0]
nrow = property(_get_nrow, None, None)
def _get_ncol(self):
""" Number of columns.
:rtype: integer """
return baseenv_ri["ncol"](self)[0]
ncol = property(_get_ncol, None, None)
def _get_rownames(self):
res = baseenv_ri["rownames"](self)
return conversion.ri2ro(res)
def _set_rownames(self, rownames):
res = baseenv_ri["rownames<-"](self, conversion.py2ri(rownames))
self.__sexp__ = res.__sexp__
rownames = property(_get_rownames, _set_rownames, None,
"Row names")
def _get_colnames(self):
res = baseenv_ri["colnames"](self)
return conversion.ri2ro(res)
def _set_colnames(self, colnames):
res = baseenv_ri["colnames<-"](self, conversion.py2ri(colnames))
self.__sexp__ = res.__sexp__
colnames = property(_get_colnames, _set_colnames, None)
def __getitem__(self, i):
# Make sure this is not a List returned
# 3rd-party conversions could return objects
# that no longer inherit from rpy2's R objects.
# We need to use the low-level __getitem__
# to bypass the conversion mechanism.
# R's data.frames have no representation at the C-API level
# (they are lists)
tmp = rinterface.ListSexpVector.__getitem__(self, i)
if tmp.typeof == rinterface.VECSXP:
return DataFrame(tmp)
else:
return conversion.ri2ro(tmp)
def cbind(self, *args, **kwargs):
""" bind objects as supplementary columns """
new_args = [self, ] + [conversion.ri2ro(x) for x in args]
new_kwargs = dict([(k, conversion.ri2ro(v)) for k,v in kwargs.items()])
res = self._cbind(*new_args, **new_kwargs)
return conversion.ri2ro(res)
def rbind(self, *args, **kwargs):
""" bind objects as supplementary rows """
new_args = [conversion.ri2ro(x) for x in args]
new_kwargs = dict([(k, conversion.ri2ro(v)) for k,v in kwargs.items()])
res = self._rbind(self, *new_args, **new_kwargs)
return conversion.ri2ro(res)
def head(self, *args, **kwargs):
""" Call the R generic 'head()'. """
res = utils_ri['head'](self, *args, **kwargs)
return conversion.ri2ro(res)
@staticmethod
def from_csvfile(path, header = True, sep = ",",
quote = "\"", dec = ".",
row_names = rinterface.MissingArg,
col_names = rinterface.MissingArg,
fill = True, comment_char = "",
na_strings = [],
as_is = False):
""" Create an instance from data in a .csv file.
path : string with a path
header : boolean (heading line with column names or not)
sep : separator character
quote : quote character
row_names : column name, or column index for column names (warning: indexing starts at one in R)
fill : boolean (fill the lines when less entries than columns)
comment_char : comment character
na_strings : a list of strings which are | |
<filename>source/ftp_client.py
# imports
import argparse
import os
import sys
from socket import *
# classes
class Client:
def __init__(self):
"""
goal: define class properties
type: (self) -> ()
"""
# input variables
self.userinput = ""
self.tokens = []
# socket variables
self.ftp_socket = None
self.host = ""
self.port = "21"
# login variables
self.username = ""
self.password = ""
# filesystem variables
self.client_cfg = "./ftp_client.cfg"
self.test_file = "./tests/testfile.txt"
# dataport variables
self.data_socket = None
self.data_address = ""
self.dataport_min = 60020
self.dataport_max = 61000
self.data_port = self.dataport_min
self.next_dataport = 1
self.dataport_backlog = 1
def start(self):
"""
goal: define client startup
type: (self) -> ()
"""
self.eventloop()
def eventloop(self):
"""
goal: define client eventloop
type: (self) -> ()
"""
while True:
self.userinput = menu("ftp>")
self.tokens = parser(self.userinput)
self.dispatch()
def configure(self):
"""
goal: configure client
type: (self) -> ()
"""
try:
for line in open(self.client_cfg):
tokens = parser(line)
command = tokens[0]
arglist = tokens[1:]
if command.startswith("# "):
pass
elif command == "host":
self.host = "".join(arglist)
elif command == "port":
self.port = "".join(arglist)
elif command == "data_port_max":
self.dataport_max = "".join(arglist)
elif command == "data_port_min":
self.dataport_min = "".join(arglist)
elif command == "default_ftp_port":
self.port = "".join(arglist)
elif command == "default_mode":
print("default mode = {}".format("".join(arglist)))
elif command == "default_debug_mode":
print("default debug mode = {}".format("".join(arglist)))
elif command == "default_verbose_mode":
print("default verbose mode = {}".format("".join(arglist)))
elif command == "default_test_file":
self.test_file = "".join(arglist)
elif command == "default_log_file":
print("default log file = {}".format("".join(arglist)))
except Exception as e:
print("ftp: configuration error: {}".format(e))
def arguments(self):
"""
goal: manage command line arguments
type: (self) -> ()
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-H", "--hostname", help="enter hostname")
arg_parser.add_argument("-u", "--username", help="enter username")
arg_parser.add_argument("-w", "--password", help="enter password")
arg_parser.add_argument("-fp", "--ftp_port", help="enter port")
arg_parser.add_argument("-d", "--dataport", help="enter dataport range")
arg_parser.add_argument("-c", "--config", help="enter configuration file")
arg_parser.add_argument("-t", "--test", help="enter test file")
arg_parser.add_argument("-L", "--log", help="enter log file")
arg_parser.add_argument("-D", "--debug", help="toogle debug mode", choices=["on", "off"])
arg_parser.add_argument("-P", "--passive", help="passive mode", action="store_true")
arg_parser.add_argument("-A", "--active", help="active mode", action="store_true")
arg_parser.add_argument("-V", "--verbose", help="verbose mode", action="store_true")
arg_parser.add_argument("-T", "--test_default", help="run default test", action="store_true")
arg_parser.add_argument("--all", help="all output to log file, still display", action="store_true")
arg_parser.add_argument("--lall", help="log all output to this file")
arg_parser.add_argument("--only", help="only log all output", action="store_true")
arg_parser.add_argument("--version", help="display version", action="store_true")
arg_parser.add_argument("--info", help="display client info", action="store_true")
args = arg_parser.parse_args()
if args.hostname:
self.host = args.hostname
if args.username:
self.username = args.username
if args.password:
self.password = args.password
if args.ftp_port:
self.port = args.ftp_port
if args.dataport:
self.data_port = args.dataport
if args.config:
self.client_cfg = args.config
if args.test:
print("test = {}".format(args.test))
if args.log:
print("log = {}".format(args.log))
if args.debug:
print("debug = {}".format(args.debug))
if args.passive:
print("passive = {}".format(args.passive))
if args.active:
print("active = {}".format(args.active))
if args.verbose:
print("verbose = {}".format(args.verbose))
if args.test_default:
self.test_me()
sys.exit()
if args.all:
print("all = {}".format(args.all))
if args.lall:
print("lall = {}".format(args.lall))
if args.only:
print("only = {}".format(args.only))
if args.version:
print("version: 0.1")
sys.exit()
if args.info:
print("name: <NAME>")
print("id: 2702278")
sys.exit()
def dispatch(self):
"""
goal: execute valid commands
type: (self) -> ()
"""
try:
command = self.tokens[0].lower()
arglist = self.tokens[1:]
if command in ("exit", "bye", "quit"):
if self.ftp_socket:
ftp_logout(self.ftp_socket)
self.logout()
sys.exit()
elif command in ("pwd",):
if not arglist:
ftp_pwd(self.ftp_socket)
elif command in ("noop",):
if not arglist:
ftp_noop(self.ftp_socket)
elif command in ("logout", "close"):
if not arglist:
print("Logged out", self.username)
ftp_logout(self.ftp_socket)
self.logout()
elif command in ("type",):
if not arglist:
ftp_type(self.ftp_socket)
elif command in ("list", "dir", "ls"):
if not arglist:
self.data_socket = self.dataport()
if self.data_socket:
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_list(self.ftp_socket, self.data_socket)
self.data_socket = None
elif command in ("cwd", "cd"):
if len(arglist) == 1:
path = arglist[0]
ftp_cwd(self.ftp_socket, path)
elif command in ("cdup",):
if not arglist:
ftp_cdup(self.ftp_socket)
elif command in ("mkd", "mkdir"):
if len(arglist) == 1:
path = arglist[0]
ftp_mkd(self.ftp_socket, path)
elif command in ("dele", "delete"):
if len(arglist) == 1:
path = arglist[0]
ftp_dele(self.ftp_socket, path)
elif command in ("rmd", "rmdir"):
if len(arglist) == 1:
path = arglist[0]
ftp_rmd(self.ftp_socket, path)
elif command in ("rn", "rename"):
if len(arglist) == 2:
path = arglist[0]
new_path = arglist[1]
ftp_rn(self.ftp_socket, path, new_path)
elif command in ("retr", "get"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# retrieve file
if self.data_socket:
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_retr(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("stor", "put", "send"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# send file
if self.data_socket and os.path.exists(path) and os.path.isfile(path):
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_stor(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("appe", "append"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# send file
if self.data_socket and os.path.exists(path) and os.path.isfile(path):
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_appe(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("open", "ftp"):
if len(arglist) == 2 and arglist[1].isnumeric():
# attempt connection
host = arglist[0]
port = arglist[1]
self.ftp_socket = ftp_open(host, int(port))
# login to server
if self.ftp_socket:
# get server reply
print("Connected to {}".format(host))
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
# login tree
if code != "230":
self.login()
else:
print("User logged in, proceed")
# debugging
elif command == "try":
# fast way to try (host, address) from config, for debugging
if len(arglist) == 0:
# atttempt connection
self.ftp_socket = ftp_open(self.host, int(self.port))
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
# not logged in
if code == "530":
self.login()
else:
print("already logged in")
else:
print("Invalid command")
except Exception as e:
print("Error:", e)
def login(self):
"""
goal: define login protocol
type: (self) -> ()
help: Client.login <-> User.authenticate
"""
self.username = menu("username:")
self.password = menu("password:")
send_message(self.ftp_socket, self.username)
send_message(self.ftp_socket, self.password)
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
if code == "230":
print("Logged into {}".format(self.host))
else:
print("Failed to log into {}".format(self.host))
self.logout()
def logout(self):
"""
goal: define logout protocol
type: (self) -> ()
"""
self.username = ""
self.password = ""
self.ftp_socket.close()
self.ftp_socket = None
def dataport(self):
"""
goal: create dataport
type: (self) -> socket | none
"""
try:
self.data_address = gethostbyname("")
self.next_dataport += 1
self.data_port = (self.dataport_min + self.next_dataport) % self.dataport_max
# create dataport
data_socket = socket(AF_INET, SOCK_STREAM)
data_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
data_socket.bind((self.data_address, self.data_port))
data_socket.listen(self.dataport_backlog)
return data_socket
except Exception as e:
print("Dataport error:", e)
return None
def test_me(self):
"""
goal: run test file
type: (self) -> ()
"""
if os.path.exists(self.test_file) and os.path.isfile(self.test_file):
for line in open(self.test_file):
tokens = parser(line)
command = tokens[0]
if command.startswith("# ") or not command:
pass
else:
self.tokens = tokens
self.dispatch()
pause = input("(press enter to continue): ")
# interface functions
def menu(prompt):
"""
goal: get and return userinput
type: (string) -> string
"""
userinput = input("{} ".format(prompt))
return userinput.strip()
def parser(userinput):
"""
goal: convert userinput into tokens
type: (string) -> [string]
"""
return userinput.strip().split()
# message functions
def send_message(ftp_socket, message):
"""
goal: send a message
type: (socket, string) -> ()
"""
if ftp_socket:
message = "\0" if not message else message
ftp_socket.send(message.encode())
def get_message(ftp_socket):
"""
goal: receive a message
type: (socket) -> string
"""
if ftp_socket:
return ftp_socket.recv(1024).decode()
def parse_reply(reply):
"""
goal: parse ftp server replay
type: (string) -> (string, string)
"""
tokens = parser(reply)
code = tokens[0]
message = " ".join(tokens[1:])
return code, message
# ftp commands
def ftp_pwd(ftp_socket):
"""
goal: print working directory
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "pwd")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
message = get_message(ftp_socket)
print(message)
def ftp_noop(ftp_socket):
"""
goal: simply recieve an ok reply
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "noop")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
print(message)
def ftp_logout(ftp_socket):
"""
goal: logout user
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "logout")
reply = get_message(ftp_socket)
def ftp_type(ftp_socket):
"""
goal: print out representation type
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "type")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
rep_type = get_message(ftp_socket)
print("type =", rep_type)
def ftp_port(ftp_socket, address, port):
"""
goal: let server know about data port
type: (socket, string, int) -> ()
"""
if ftp_socket:
port_command = "port {} {}".format(address, port)
send_message(ftp_socket, port_command)
reply = get_message(ftp_socket)
def ftp_list(ftp_socket, data_socket):
"""
goal: list directory contents
type: (socket, socket) -> ()
"""
if ftp_socket and data_socket:
send_message(ftp_socket, "list")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == | |
def test_only_required_fields(self):
del self.supplemental_description['assertionReferenceId']
del self.supplemental_description['assertionReferenceIdLabel']
del self.supplemental_description['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_all_required_fields_missing(self):
del self.supplemental_description['assertionId']
del self.supplemental_description['informationType']
del self.supplemental_description['dataObject']
del self.supplemental_description['securityTag']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 2)
self.assertEqual(errors[0].causes[0].message, 'required properties [assertionId, dataSize, informationType, securityTag, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [assertionId, dataObject, informationType, securityTag] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
def test_additional_field(self):
self.supplemental_description['foo'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [dataObject, foo] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional property foo is not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_additional_fields(self):
self.supplemental_description['foo'] = 'a'
self.supplemental_description['bar'] = 'a'
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional properties [bar, dataObject, foo] are not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'additional properties [bar, foo] are not allowed')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_missing(self):
del self.supplemental_description['assertionId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [assertionId, dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property assertionId is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_null(self):
self.supplemental_description['assertionId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_short(self):
self.supplemental_description['assertionId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_id_too_long(self):
self.supplemental_description['assertionId'] = 'a' * 257
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '{}' for assertionId property is too long, maximum length 256".format('a' * 257))
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_missing(self):
del self.supplemental_description['assertionReferenceId']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_null(self):
self.supplemental_description['assertionReferenceId'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceId with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_too_short(self):
self.supplemental_description['assertionReferenceId'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceId property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceId')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_missing(self):
del self.supplemental_description['assertionReferenceIdLabel']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_assertion_reference_id_label_null(self):
self.supplemental_description['assertionReferenceIdLabel'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property assertionReferenceIdLabel with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_assertion_reference_id_label_too_short(self):
self.supplemental_description['assertionReferenceIdLabel'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for assertionReferenceIdLabel property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/assertionReferenceIdLabel')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_missing(self):
del self.supplemental_description['system']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 0)
def test_system_null(self):
self.supplemental_description['system'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property system with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_system_too_short(self):
self.supplemental_description['system'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for system property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/system')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_missing(self):
del self.supplemental_description['informationType']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, informationType, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'required property informationType is missing')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_null(self):
self.supplemental_description['informationType'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property informationType with null value should be of type string')
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_information_type_too_short(self):
self.supplemental_description['informationType'] = ''
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, "property value '' for informationType property is too short, minimum length 1")
self.assertEqual(errors[0].causes[2].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]/informationType')
self.assertEqual(len(errors[0].causes[2].causes), 0)
def test_data_object_missing(self):
del self.supplemental_description['dataObject']
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 2)
self.assertEqual(errors[0].causes[0].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required property dataObject is missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
def test_data_object_null(self):
self.supplemental_description['dataObject'] = None
errors = TiesSchemaValidator().all_errors(json.dumps(self.ties))
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].message, 'content for array property at index 0 in supplementalDescriptions does not match any of the possible schema definitions')
self.assertEqual(errors[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes), 3)
self.assertEqual(errors[0].causes[0].message, 'additional property dataObject is not allowed')
self.assertEqual(errors[0].causes[0].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[0].causes), 0)
self.assertEqual(errors[0].causes[1].message, 'required properties [dataSize, sha256DataHash] are missing')
self.assertEqual(errors[0].causes[1].location, '/objectItems[0]/objectAssertions/supplementalDescriptions[0]')
self.assertEqual(len(errors[0].causes[1].causes), 0)
self.assertEqual(errors[0].causes[2].message, 'property dataObject with null value should be of type object')
self.assertEqual(errors[0].causes[2].location, | |
= -np.ones(dim)
self.adecuaN = 0.0
nota = 1
else:
nota = -1
return nota
def ExtraeTags(self, raiz, llave):
vec = []
item = {"NombresSalidas": "A", "NombresEntradas": "N"}
i = 0
while True:
n = raiz.find(llave).find(item[llave] + str(i))
if n == None:
break
else:
vec.append(n.text)
i += 1
return vec
def AbrirTXT(self, archivo):
file = open(archivo, "r")
data = file.readlines()
file.close()
if "Patrones: " in data[0]:
# comienza a procesarse la informacion
tx = StringIO("".join(data[3:]))
self.patrones = np.loadtxt(tx, delimiter=",")
if self.patrones.ndim != 2:
self.patrones = np.zeros((3, 2))
nota = -2
elif self.patrones.shape[1] < 2 or self.patrones.shape[0] < 3:
self.patrones = np.zeros((3, 2))
nota = -2
elif not self.ClasesPatronadas():
self.patrones = np.zeros((3, 2))
nota = -3
else:
# leer etiquetas en caso de que marche bien
dim = self.patrones.shape[1] - 1
cla = int(self.patrones[:, dim].max() + 1)
# primera linea es el titulo del problema
self.titulo = data[0].replace("\n", "").replace("Patrones: ", "")
# segunda linea son los nombres de las salidas o clases
apo = data[1].replace("Salidas:", "").replace(" ", "").replace("\n", "").split(",")
if len(apo) == cla:
self.apodos = apo
else:
self.apodos = []
for m in range(cla):
self.apodos.append("")
# tercera linea son los nombres de las entradas
apo = data[2].replace("Entradas:", "").replace(" ", "").replace("\n", "").split(",")
if len(apo) == dim:
self.entradas = apo
else:
self.entradas = []
for i in range(dim):
self.entradas.append("")
# poner valores por defecto para normalizacion
self.adecuaH = np.ones(dim)
self.adecuaL = -np.ones(dim)
self.adecuaN = 0.0
nota = 1
else:
nota = -1
return nota
def ClasesPatronadas(self):
res = True
dim = self.patrones.shape[1] - 1
cla = int(self.patrones[:, dim].max() + 1)
for m in range(cla):
if not m in self.patrones[:, dim]:
res = False
break
return res
def ClasesEntrenadas(self):
res = True
dim = self.patrones.shape[1] - 1
cla = int(self.patrones[:, dim].max() + 1)
for m in range(cla):
if not m in self.patrones[:self.numPEVT[0], dim]:
res = False
break
return res
def AdecuarPatrones(self, normal, general, zscore):
dim = self.patrones.shape[1] - 1
if zscore == 1:
self.adecuaN = -1.0
if not general:
promedio = np.zeros(dim) + np.mean(self.patrones[:, :dim])
desviEst = np.zeros(dim) + np.std(self.patrones[:, :dim])
else:
promedio = np.mean(self.patrones[:, :dim], axis=0)
desviEst = np.std(self.patrones[:, :dim], axis=0)
self.adecuaH = promedio
self.adecuaL = desviEst
self.patrones[:, :dim] = (self.patrones[:, :dim] - promedio) / desviEst
else:
self.adecuaN = normal
mn = -normal
Mn = normal
if not general:
m = np.zeros(dim) + np.min(self.patrones[:, :dim])
M = np.zeros(dim) + np.max(self.patrones[:, :dim])
else:
m = np.min(self.patrones[:, :dim], axis=0)
M = np.max(self.patrones[:, :dim], axis=0)
self.adecuaH = M
self.adecuaL = m
self.patrones[:, :dim] = ((self.patrones[:, :dim] - m) / (M - m)) * (Mn - mn) + mn
class Red():
def __init__(self):
# guarda los pesos sinapticos de toda la red
self.pesW = np.array([0.0])
# guarda el numero de dendritas para cada neurona
self.numK = np.array([0])
# dice si la dendrita esta activa o no
self.actK = np.array([True])
# guarda el error calculado para esta red
self.error = 0.0
# guarda el error asociado a la mejor solucion hallada
self.errorB = 0.0
# maximo valor que pueden tener los pesos sinapticos
self.lim = 1000.0
def EjecutarRed(self, entradas):
X = entradas.copy()
while X.size < self.pesW.size / 2:
X = np.hstack((X, entradas))
W = self.pesW.copy().reshape(-1, 2)
WH = W[:, 0] - X
WL = X - W[:, 1]
Wmki = np.minimum(WH, WL)
Wmki = Wmki.reshape(-1, entradas.size)
Smk = Wmki.min(axis=1)
Smk = np.where(self.actK, Smk, -1000000.0)
Zm = np.zeros(self.numK.size)
n = 0
for m in range(Zm.size):
Zm[m] = Smk[n:(n + self.numK[m])].max()
n += self.numK[m]
Zm = np.exp(Zm)
Ym = Zm / min(Zm.sum(), 1000000.0)
return Ym
def errorCM(self, patrones):
self.error = 0.0
dim = patrones.shape[1] - 1
for p in range(patrones.shape[0]):
if np.argmax(self.EjecutarRed(patrones[p, :dim])) != patrones[p, dim]:
self.error += 1.0
self.error /= patrones.shape[0]
def errorRL(self, patrones):
self.error = 0.0
dim = patrones.shape[1] - 1
for p in range(patrones.shape[0]):
self.error += -np.log10(np.clip(self.EjecutarRed(patrones[p, :dim])[int(patrones[p, dim])],
0.000001, 1.0)) / 6.0
self.error /= patrones.shape[0]
def KmediasItera(self, patrones, clusters, dimension):
if clusters != 0:
self.Kmedias(patrones, clusters, dimension)
else:
errorV = np.ones(2)
cc = 0
res = True
while res:
res = False
cc += 1
self.Kmedias(patrones, cc, dimension)
self.errorCM(patrones)
if max(errorV[0] - self.error, errorV[1] - errorV[0]) > 0.01:
res = True
errorV[1] = errorV[0]
errorV[0] = self.error
def Kmedias(self, patrones, clusters, dimension):
dim = patrones.shape[1] - 1
# calcula la dimension de las cajas
mrg = (patrones[:, :dim].max(axis=0) - patrones[:, :dim].min(axis=0)) * dimension * 0.25
# crea vector que asocia el patron al centroide
Ap = np.ones(patrones.shape[0], dtype=int) * -1
# crea matriz que guardara los centroides
Cen = np.zeros((clusters, dim))
# crear la red DMNN haciendo internamente el K means para cada clase m
self.numK = np.zeros(int(patrones[:, dim].max() + 1), dtype=int)
for m in range(self.numK.size):
self.numK[m] = np.where(patrones[:, dim] == m, 1, 0).sum()
mayor = self.numK.max()
for m in range(self.numK.size):
self.numK[m] = max(1, min(self.numK[m], int(np.ceil((self.numK[m] / mayor) * clusters))))
self.actK = np.ones(self.numK.sum()) > 0
self.pesW = np.array([])
for m in range(self.numK.size):
# escoge los centroides al azar
enlist = [-1]
for c in range(self.numK[m]):
uno = np.random.randint(patrones.shape[0])
while patrones[uno, dim] != m or uno in enlist:
uno = np.random.randint(patrones.shape[0])
enlist.append(uno)
Cen[c, :] = patrones[uno, :dim]
# hacer ciclo del K medias
cambio = True
while cambio:
cambio = False
# asociar patrones al centroide mas cercano
for p in range(patrones.shape[0]):
if patrones[p, dim] == m:
dist = np.sqrt(np.power(Cen[:self.numK[m], :] - patrones[p, :dim], 2.0).sum(axis=1))
Ap[p] = np.argmin(dist)
else:
Ap[p] = -1
# mover centroides al promedio de sus patrones asociados
viejo = Cen[:self.numK[m], :].copy()
Cen[:self.numK[m], :] *= 0.0
for c in range(self.numK[m]):
n = 0.0
for p in range(patrones.shape[0]):
if Ap[p] == c:
Cen[c, :] += patrones[p, :dim]
n += 1.0
Cen[c, :] /= (n if n != 0 else 0.000001)
if (False in (viejo == Cen[:self.numK[m], :])):
cambio = True
# crear las cajas para la clase m, utilizando la dimension dada
vH = (Cen[:self.numK[m], :] + mrg).ravel()
vL = (Cen[:self.numK[m], :] - mrg).ravel()
self.pesW = np.concatenate((self.pesW, np.dstack((vH, vL)).ravel()))
def DyC(self, patrones, margen, unir):
dim = patrones.shape[1] - 1
# calcular margen
mrg = (patrones[:, :dim].max(axis=0) - patrones[:, :dim].min(axis=0)) * margen * 0.05
# crear la primera caja que abarca todos los patrones
pertenece = np.array([-1])
vH = patrones[:, :dim].max(axis=0)
vL = patrones[:, :dim].min(axis=0)
cajasH = np.atleast_2d(vH + (vH - vL) * 0.05).copy()
cajasL = np.atleast_2d(vL - (vH - vL) * 0.05).copy()
# hacer ciclo de division de cajas
while -1 in pertenece:
for c in range(pertenece.size):
if pertenece[c] == -1:
# verifica que solo un tipo de patron este en la caja
res = -2
for p in range(patrones.shape[0]):
den = True
if True in np.hstack((patrones[p, :dim] > cajasH[c, :], patrones[p, :dim] < cajasL[c, :])):
den = False
if den:
if res == -2:
res = patrones[p, dim]
elif res != patrones[p, dim]:
res = -1
break
# decide que hacer segun la respuesta anterior
if res != -1:
# se asocia la caja a la clase y no se le afecta mas
pertenece[c] = res
else:
# desactiva la caja y crea un nuevo sistema
pertenece[c] = -2
nuevasH = np.atleast_2d(cajasH[c, :]).copy()
nuevasL = np.atleast_2d(cajasL[c, :]).copy()
# iterativamente se duplican y dividen
for i in range(dim):
dl = abs(nuevasH[0, i] - nuevasL[0, i]) / 2.0
dl = min(dl + mrg[i], dl * 1.9)
for h in range(pow(2, i)):
nuevasH = np.concatenate((nuevasH, np.atleast_2d(nuevasH[h, :]).copy()), axis=0)
nuevasL = np.concatenate((nuevasL, np.atleast_2d(nuevasL[h, :]).copy()), axis=0)
nuevasH[h, i] -= dl
nuevasL[-1, i] += dl
# se agregan las nuevas cajas a las originales
pertenece = np.concatenate((pertenece, np.ones(nuevasH.shape[0], dtype=int) * -1))
cajasH = np.concatenate((cajasH, nuevasH), axis=0)
cajasL = np.concatenate((cajasL, nuevasL), axis=0)
# hacer ciclo de union de hipercajas
if unir:
cambio = True
while cambio:
cambio = False
for c in range(pertenece.size):
if pertenece[c] != -2:
for cc in range(c, pertenece.size):
if pertenece[c] == pertenece[cc] and c != cc:
# forma la caja mas grande posible entre ambas
nuevasH = np.where(cajasH[c, :] | |
The protocol currently only
defines one value for this, "X{C{HMAC-SHA1}}".
@type assoc_type: str
@ivar session: An object that knows how to handle association
requests of a certain type.
@see: U{OpenID Specs, Mode: associate
<http://openid.net/specs.bml#mode-associate>}
"""
mode = "associate"
assoc_type = 'HMAC-SHA1'
session_classes = {
None: PlainTextServerSession,
'DH-SHA1': DiffieHellmanServerSession,
}
def __init__(self, session):
"""Construct me.
The session is assigned directly as a class attribute. See my
L{class documentation<AssociateRequest>} for its description.
"""
super(AssociateRequest, self).__init__()
self.session = session
def fromQuery(klass, query):
"""Construct me from a web query.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@returntype: L{AssociateRequest}
"""
session_type = query.get(OPENID_PREFIX + 'session_type')
try:
session_class = klass.session_classes[session_type]
except KeyError:
raise ProtocolError(query,
"Unknown session type %r" % (session_type,))
try:
session = session_class.fromQuery(query)
except ValueError, why:
raise ProtocolError(query, 'Error parsing %s session: %s' %
(session_class.session_type, why[0]))
return klass(session)
fromQuery = classmethod(fromQuery)
def answer(self, assoc):
"""Respond to this request with an X{association}.
@param assoc: The association to send back.
@type assoc: L{openid.association.Association}
@returns: A response with the association information, encrypted
to the consumer's X{public key} if appropriate.
@returntype: L{OpenIDResponse}
"""
response = OpenIDResponse(self)
response.fields.update({
'expires_in': '%d' % (assoc.getExpiresIn(),),
'assoc_type': 'HMAC-SHA1',
'assoc_handle': assoc.handle,
})
response.fields.update(self.session.answer(assoc.secret))
if self.session.session_type != 'plaintext':
response.fields['session_type'] = self.session.session_type
return response
class CheckIDRequest(OpenIDRequest):
"""A request to confirm the identity of a user.
This class handles requests for openid modes X{C{checkid_immediate}}
and X{C{checkid_setup}}.
@cvar mode: "X{C{checkid_immediate}}" or "X{C{checkid_setup}}"
@type mode: str
@ivar immediate: Is this an immediate-mode request?
@type immediate: bool
@ivar identity: The identity URL being checked.
@type identity: str
@ivar trust_root: "Are you Frank?" asks the checkid request. "Who wants
to know?" C{trust_root}, that's who. This URL identifies the party
making the request, and the user will use that to make her decision
about what answer she trusts them to have.
@type trust_root: str
@ivar return_to: The URL to send the user agent back to to reply to this
request.
@type return_to: str
@ivar assoc_handle: Provided in smart mode requests, a handle for a
previously established association. C{None} for dumb mode requests.
@type assoc_handle: str
"""
def __init__(self, identity, return_to, trust_root=None, immediate=False,
assoc_handle=None):
"""Construct me.
These parameters are assigned directly as class attributes, see
my L{class documentation<CheckIDRequest>} for their descriptions.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
"""
self.assoc_handle = assoc_handle
self.identity = identity
self.return_to = return_to
self.trust_root = trust_root or return_to
if immediate:
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
if not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(None, self.return_to)
if not self.trustRootValid():
raise UntrustedReturnURL(None, self.return_to, self.trust_root)
def fromQuery(klass, query):
"""Construct me from a web query.
@raises ProtocolError: When not all required parameters are present
in the query.
@raises MalformedReturnURL: When the C{return_to} URL is not a URL.
@raises UntrustedReturnURL: When the C{return_to} URL is outside
the C{trust_root}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@returntype: L{CheckIDRequest}
"""
self = klass.__new__(klass)
mode = query[OPENID_PREFIX + 'mode']
if mode == "checkid_immediate":
self.immediate = True
self.mode = "checkid_immediate"
else:
self.immediate = False
self.mode = "checkid_setup"
required = [
'identity',
'return_to',
]
for field in required:
value = query.get(OPENID_PREFIX + field)
if not value:
raise ProtocolError(
query,
text="Missing required field %s from %r"
% (field, query))
setattr(self, field, value)
# There's a case for making self.trust_root be a TrustRoot
# here. But if TrustRoot isn't currently part of the "public" API,
# I'm not sure it's worth doing.
self.trust_root = query.get(OPENID_PREFIX + 'trust_root', self.return_to)
self.assoc_handle = query.get(OPENID_PREFIX + 'assoc_handle')
# Using TrustRoot.parse here is a bit misleading, as we're not
# parsing return_to as a trust root at all. However, valid URLs
# are valid trust roots, so we can use this to get an idea if it
# is a valid URL. Not all trust roots are valid return_to URLs,
# however (particularly ones with wildcards), so this is still a
# little sketchy.
if not TrustRoot.parse(self.return_to):
raise MalformedReturnURL(query, self.return_to)
# I first thought that checking to see if the return_to is within
# the trust_root is premature here, a logic-not-decoding thing. But
# it was argued that this is really part of data validation. A
# request with an invalid trust_root/return_to is broken regardless of
# application, right?
if not self.trustRootValid():
raise UntrustedReturnURL(query, self.return_to, self.trust_root)
return self
fromQuery = classmethod(fromQuery)
def trustRootValid(self):
"""Is my return_to under my trust_root?
@returntype: bool
"""
if not self.trust_root:
return True
tr = TrustRoot.parse(self.trust_root)
if tr is None:
raise MalformedTrustRoot(None, self.trust_root)
return tr.validateURL(self.return_to)
def answer(self, allow, server_url=None):
"""Respond to this request.
@param allow: Allow this user to claim this identity, and allow the
consumer to have this information?
@type allow: bool
@param server_url: When an immediate mode request does not
succeed, it gets back a URL where the request may be
carried out in a not-so-immediate fashion. Pass my URL
in here (the fully qualified address of this server's
endpoint, i.e. C{http://example.com/server}), and I
will use it as a base for the URL for a new request.
Optional for requests where C{CheckIDRequest.immediate} is C{False}
or C{allow} is C{True}.
@type server_url: str
@returntype: L{OpenIDResponse}
"""
if allow or self.immediate:
mode = 'id_res'
else:
mode = 'cancel'
response = OpenIDResponse(self)
if allow:
response.addFields(None, {
'mode': mode,
'identity': self.identity,
'return_to': self.return_to,
})
else:
response.addField(None, 'mode', mode, False)
if self.immediate:
if not server_url:
raise ValueError("setup_url is required for allow=False "
"in immediate mode.")
# Make a new request just like me, but with immediate=False.
setup_request = self.__class__(
self.identity, self.return_to, self.trust_root,
immediate=False, assoc_handle=self.assoc_handle)
setup_url = setup_request.encodeToURL(server_url)
response.addField(None, 'user_setup_url', setup_url, False)
return response
def encodeToURL(self, server_url):
"""Encode this request as a URL to GET.
@param server_url: The URL of the OpenID server to make this request of.
@type server_url: str
@returntype: str
"""
# Imported from the alternate reality where these classes are used
# in both the client and server code, so Requests are Encodable too.
# That's right, code imported from alternate realities all for the
# love of you, id_res/user_setup_url.
q = {'mode': self.mode,
'identity': self.identity,
'return_to': self.return_to}
if self.trust_root:
q['trust_root'] = self.trust_root
if self.assoc_handle:
q['assoc_handle'] = self.assoc_handle
q = dict([(OPENID_PREFIX + k, v) for k, v in q.iteritems()])
return oidutil.appendArgs(server_url, q)
def getCancelURL(self):
"""Get the URL to cancel this request.
Useful for creating a "Cancel" button on a web form so that operation
can be carried out directly without another trip through the server.
(Except you probably want to make another trip through the server so
that it knows that the user did make a decision. Or you could simulate
this method by doing C{.answer(False).encodeToURL()})
@returntype: str
@returns: The return_to URL with openid.mode = cancel.
"""
if self.immediate:
raise ValueError("Cancel is not an appropriate response to "
"immediate mode requests.")
return oidutil.appendArgs(self.return_to, {OPENID_PREFIX + 'mode':
'cancel'})
def __str__(self):
return '<%s id:%r im:%s tr:%r ah:%r>' % (self.__class__.__name__,
self.identity,
self.immediate,
self.trust_root,
self.assoc_handle)
class OpenIDResponse(object):
"""I am a response to an OpenID request.
@ivar request: The request I respond to.
@type request: L{OpenIDRequest}
@ivar fields: My parameters as a dictionary with each key mapping to
one value. Keys are parameter names with no leading "C{openid.}".
e.g. "C{identity}" and "C{mac_key}", never "C{openid.identity}".
@type fields: dict
@ivar signed: The names of the fields which should be signed.
@type signed: list of str
"""
# Implementer's note: In a more symmetric client/server
# implementation, there would be more types of OpenIDResponse
# object and they would have validated attributes according to the
# type of response. But as it is, Response objects in a server are
# basically write-only, their only job is to go out over the wire,
# so this is just a loose wrapper around OpenIDResponse.fields.
def __init__(self, request):
"""Make a response to an L{OpenIDRequest}.
@type request: L{OpenIDRequest}
"""
self.request = request
self.fields = {}
self.signed = []
| |
0.3696 * self.C_wp) # + 2.38 * (self.A_BT / self.C_b)
self.S_APP = 0.05 * self.S_T # Wet area of appendages
self.S_B = self.L * self.B # Area of flat bottom
self.D_s = 0.7 * self.T # Diameter of the screw
def calculate_frictional_resistance(self, V_0, h):
"""1) Frictional resistance
- 1st resistance component defined by Holtrop and Mennen (1982)
- A modification to the original friction line is applied, based on literature of Zeng (2018), to account for shallow water effects """
self.R_e = V_0 * self.L / self.nu # Reynolds number
self.D = h - self.T # distance from bottom ship to the bottom of the fairway
# Friction coefficient in deep water
self.Cf_0 = 0.075 / ((np.log10(self.R_e) - 2) ** 2)
# Friction coefficient proposed, taking into account shallow water effects
self.Cf_proposed = (0.08169 / ((np.log10(self.R_e) - 1.717) ** 2)) * (
1 + (0.003998 / (np.log10(self.R_e) - 4.393)) * (self.D / self.L) ** (-1.083))
# 'a' is the coefficient needed to calculate the Katsui friction coefficient
self.a = 0.042612 * np.log10(self.R_e) + 0.56725
self.Cf_katsui = 0.0066577 / ((np.log10(self.R_e) - 4.3762) ** self.a)
# The average velocity underneath the ship, taking into account the shallow water effect
if h / self.T <= 4:
self.V_B = 0.4277 * V_0 * np.exp((h / self.T) ** (-0.07625))
else:
self.V_B = V_0
# cf_proposed cannot be applied directly, since a vessel also has non-horizontal wet surfaces that have to be taken
# into account. Therefore, the following formula for the final friction coefficient 'C_f' is defined:
self.C_f = self.Cf_0 + (self.Cf_proposed - self.Cf_katsui) * (self.S_B / self.S_T) * (self.V_B / V_0) ** 2
# The total frictional resistance R_f [kN]:
self.R_f = (self.C_f * 0.5 * self.rho * (V_0 ** 2) * self.S_T) / 1000
def calculate_viscous_resistance(self):
"""2) Viscous resistance
- 2nd resistance component defined by Holtrop and Mennen (1982)
- Form factor (1 + k1) has to be multiplied by the frictional resistance R_f, to account for the effect of viscosity"""
# c_14 accounts for the specific shape of the afterbody
self.c_14 = 1 + 0.0011 * self.c_stern
# the form factor (1+k1) describes the viscous resistance
self.one_k1 = 0.93 + 0.487 * self.c_14 * ((self.B / self.L) ** 1.068) * ((self.T / self.L) ** 0.461) * (
(self.L / self.L_R) ** 0.122) * (((self.L ** 3) / self.delta) ** 0.365) * (
(1 - self.C_p) ** (-0.604))
def calculate_appendage_resistance(self, V_0):
"""3) Appendage resistance
- 3rd resistance component defined by Holtrop and Mennen (1982)
- Appendages (like a rudder, shafts, skeg) result in additional frictional resistance"""
# Frictional resistance resulting from wetted area of appendages: R_APP [kN]
self.R_APP = (0.5 * self.rho * (V_0 ** 2) * self.S_APP * self.one_k2 * self.C_f) / 1000
def karpov(self, V_0, h):
"""Intermediate calculation: Karpov
- The Karpov method computes a velocity correction that accounts for limited water depth (corrected velocity V2)
- V2 has to be implemented in the wave resistance and the residual resistance terms"""
# The Froude number used in the Karpov method is the depth related froude number F_nh
# The different alpha** curves are determined with a sixth power polynomial approximation in Excel
# A distinction is made between different ranges of Froude numbers, because this resulted in a better approximation of the curve
self.F_nh = V_0 / np.sqrt(self.g * h)
if self.F_nh <= 0.4:
if 0 <= h / self.T < 1.75:
self.alpha_xx = (-4 * 10 ** (
-12)) * self.F_nh ** 3 - 0.2143 * self.F_nh ** 2 - 0.0643 * self.F_nh + 0.9997
if 1.75 <= h / self.T < 2.25:
self.alpha_xx = -0.8333 * self.F_nh ** 3 + 0.25 * self.F_nh ** 2 - 0.0167 * self.F_nh + 1
if 2.25 <= h / self.T < 2.75:
self.alpha_xx = -1.25 * self.F_nh ** 4 + 0.5833 * self.F_nh ** 3 - 0.0375 * self.F_nh ** 2 - 0.0108 * self.F_nh + 1
if h / self.T >= 2.75:
self.alpha_xx = 1
if self.F_nh > 0.4:
if 0 <= h / self.T < 1.75:
self.alpha_xx = -0.9274 * self.F_nh ** 6 + 9.5953 * self.F_nh ** 5 - 37.197 * self.F_nh ** 4 + 69.666 * self.F_nh ** 3 - 65.391 * self.F_nh ** 2 + 28.025 * self.F_nh - 3.4143
if 1.75 <= h / self.T < 2.25:
self.alpha_xx = 2.2152 * self.F_nh ** 6 - 11.852 * self.F_nh ** 5 + 21.499 * self.F_nh ** 4 - 12.174 * self.F_nh ** 3 - 4.7873 * self.F_nh ** 2 + 5.8662 * self.F_nh - 0.2652
if 2.25 <= h / self.T < 2.75:
self.alpha_xx = 1.2205 * self.F_nh ** 6 - 5.4999 * self.F_nh ** 5 + 5.7966 * self.F_nh ** 4 + 6.6491 * self.F_nh ** 3 - 16.123 * self.F_nh ** 2 + 9.2016 * self.F_nh - 0.6342
if 2.75 <= h / self.T < 3.25:
self.alpha_xx = -0.4085 * self.F_nh ** 6 + 4.534 * self.F_nh ** 5 - 18.443 * self.F_nh ** 4 + 35.744 * self.F_nh ** 3 - 34.381 * self.F_nh ** 2 + 15.042 * self.F_nh - 1.3807
if 3.25 <= h / self.T < 3.75:
self.alpha_xx = 0.4078 * self.F_nh ** 6 - 0.919 * self.F_nh ** 5 - 3.8292 * self.F_nh ** 4 + 15.738 * self.F_nh ** 3 - 19.766 * self.F_nh ** 2 + 9.7466 * self.F_nh - 0.6409
if 3.75 <= h / self.T < 4.5:
self.alpha_xx = 0.3067 * self.F_nh ** 6 - 0.3404 * self.F_nh ** 5 - 5.0511 * self.F_nh ** 4 + 16.892 * self.F_nh ** 3 - 20.265 * self.F_nh ** 2 + 9.9002 * self.F_nh - 0.6712
if 4.5 <= h / self.T < 5.5:
self.alpha_xx = 0.3212 * self.F_nh ** 6 - 0.3559 * self.F_nh ** 5 - 5.1056 * self.F_nh ** 4 + 16.926 * self.F_nh ** 3 - 20.253 * self.F_nh ** 2 + 10.013 * self.F_nh - 0.7196
if 5.5 <= h / self.T < 6.5:
self.alpha_xx = 0.9252 * self.F_nh ** 6 - 4.2574 * self.F_nh ** 5 + 5.0363 * self.F_nh ** 4 + 3.3282 * self.F_nh ** 3 - 10.367 * self.F_nh ** 2 + 6.3993 * self.F_nh - 0.2074
if 6.5 <= h / self.T < 7.5:
self.alpha_xx = 0.8442 * self.F_nh ** 6 - 4.0261 * self.F_nh ** 5 + 5.313 * self.F_nh ** 4 + 1.6442 * self.F_nh ** 3 - 8.1848 * self.F_nh ** 2 + 5.3209 * self.F_nh - 0.0267
if 7.5 <= h / self.T < 8.5:
self.alpha_xx = 0.1211 * self.F_nh ** 6 + 0.628 * self.F_nh ** 5 - 6.5106 * self.F_nh ** 4 + 16.7 * self.F_nh ** 3 - 18.267 * self.F_nh ** 2 + 8.7077 * self.F_nh - 0.4745
if 8.5 <= h / self.T < 9.5:
if self.F_nh < 0.6:
self.alpha_xx = 1
if self.F_nh >= 0.6:
self.alpha_xx = -6.4069 * self.F_nh ** 6 + 47.308 * self.F_nh ** 5 - 141.93 * self.F_nh ** 4 + 220.23 * self.F_nh ** 3 - 185.05 * self.F_nh ** 2 + 79.25 * self.F_nh - 12.484
if h / self.T >= 9.5:
if self.F_nh < 0.6:
self.alpha_xx = 1
if self.F_nh >= 0.6:
self.alpha_xx = -6.0727 * self.F_nh ** 6 + 44.97 * self.F_nh ** 5 - 135.21 * self.F_nh ** 4 + 210.13 * self.F_nh ** 3 - 176.72 * self.F_nh ** 2 + 75.728 * self.F_nh - 11.893
self.V_2 = V_0 / self.alpha_xx
def calculate_wave_resistance(self, V_0, h):
"""4) Wave resistance
- 4th resistance component defined by Holtrop and Mennen (1982)
- When the speed or the vessel size increases, the wave making resistance increases
- In shallow water, the wave resistance shows an asymptotical behaviour by reaching the critical speed"""
self.karpov(V_0, h)
self.F_n = self.V_2 / np.sqrt(self.g * self.L) # | |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.monitor.v20180724 import monitor_client as monitor_client_v20180724
from tencentcloud.monitor.v20180724 import models as models_v20180724
from tccli.services.monitor import v20180724
from tccli.services.monitor.v20180724 import help as v20180724_help
def doDescribeProductEventList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeProductEventList", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"ProductName": Utils.try_to_json(argv, "--ProductName"),
"EventName": Utils.try_to_json(argv, "--EventName"),
"InstanceId": Utils.try_to_json(argv, "--InstanceId"),
"Dimensions": Utils.try_to_json(argv, "--Dimensions"),
"RegionList": Utils.try_to_json(argv, "--RegionList"),
"Type": Utils.try_to_json(argv, "--Type"),
"Status": Utils.try_to_json(argv, "--Status"),
"Project": Utils.try_to_json(argv, "--Project"),
"IsAlarmConfig": Utils.try_to_json(argv, "--IsAlarmConfig"),
"TimeOrder": argv.get("--TimeOrder"),
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeProductEventListRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeProductEventList(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAccidentEventList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeAccidentEventList", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"UpdateTimeOrder": argv.get("--UpdateTimeOrder"),
"OccurTimeOrder": argv.get("--OccurTimeOrder"),
"AccidentType": Utils.try_to_json(argv, "--AccidentType"),
"AccidentEvent": Utils.try_to_json(argv, "--AccidentEvent"),
"AccidentStatus": Utils.try_to_json(argv, "--AccidentStatus"),
"AccidentRegion": Utils.try_to_json(argv, "--AccidentRegion"),
"AffectResource": argv.get("--AffectResource"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAccidentEventListRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeAccidentEventList(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUnBindingPolicyObject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("UnBindingPolicyObject", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"GroupId": Utils.try_to_json(argv, "--GroupId"),
"UniqueId": Utils.try_to_json(argv, "--UniqueId"),
"InstanceGroupId": Utils.try_to_json(argv, "--InstanceGroupId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UnBindingPolicyObjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.UnBindingPolicyObject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBindingPolicyObject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("BindingPolicyObject", g_param[OptionsDefine.Version])
return
param = {
"GroupId": Utils.try_to_json(argv, "--GroupId"),
"Module": argv.get("--Module"),
"InstanceGroupId": Utils.try_to_json(argv, "--InstanceGroupId"),
"Dimensions": Utils.try_to_json(argv, "--Dimensions"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BindingPolicyObjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.BindingPolicyObject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyAlarmReceivers(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyAlarmReceivers", g_param[OptionsDefine.Version])
return
param = {
"GroupId": Utils.try_to_json(argv, "--GroupId"),
"Module": argv.get("--Module"),
"ReceiverInfos": Utils.try_to_json(argv, "--ReceiverInfos"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyAlarmReceiversRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyAlarmReceivers(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBindingPolicyObjectList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeBindingPolicyObjectList", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"GroupId": Utils.try_to_json(argv, "--GroupId"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Dimensions": Utils.try_to_json(argv, "--Dimensions"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBindingPolicyObjectListRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeBindingPolicyObjectList(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doSendCustomAlarmMsg(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("SendCustomAlarmMsg", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"PolicyId": argv.get("--PolicyId"),
"Msg": argv.get("--Msg"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SendCustomAlarmMsgRequest()
model.from_json_string(json.dumps(param))
rsp = client.SendCustomAlarmMsg(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeletePolicyGroup(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeletePolicyGroup", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"GroupId": Utils.try_to_json(argv, "--GroupId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeletePolicyGroupRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeletePolicyGroup(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBaseMetrics(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeBaseMetrics", g_param[OptionsDefine.Version])
return
param = {
"Namespace": argv.get("--Namespace"),
"MetricName": argv.get("--MetricName"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBaseMetricsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeBaseMetrics(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePolicyGroupInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePolicyGroupInfo", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"GroupId": Utils.try_to_json(argv, "--GroupId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePolicyGroupInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePolicyGroupInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePolicyGroupList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePolicyGroupList", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Like": argv.get("--Like"),
"InstanceGroupId": Utils.try_to_json(argv, "--InstanceGroupId"),
"UpdateTimeOrder": argv.get("--UpdateTimeOrder"),
"ProjectIds": Utils.try_to_json(argv, "--ProjectIds"),
"ViewNames": Utils.try_to_json(argv, "--ViewNames"),
"FilterUnuseReceiver": Utils.try_to_json(argv, "--FilterUnuseReceiver"),
"Receivers": Utils.try_to_json(argv, "--Receivers"),
"ReceiverUserList": Utils.try_to_json(argv, "--ReceiverUserList"),
"Dimensions": argv.get("--Dimensions"),
"ConditionTempGroupId": argv.get("--ConditionTempGroupId"),
"ReceiverType": argv.get("--ReceiverType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePolicyGroupListRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePolicyGroupList(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBasicAlarmList(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeBasicAlarmList", g_param[OptionsDefine.Version])
return
param = {
"Module": argv.get("--Module"),
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"OccurTimeOrder": argv.get("--OccurTimeOrder"),
"ProjectIds": Utils.try_to_json(argv, "--ProjectIds"),
"ViewNames": Utils.try_to_json(argv, "--ViewNames"),
"AlarmStatus": Utils.try_to_json(argv, "--AlarmStatus"),
"ObjLike": argv.get("--ObjLike"),
"InstanceGroupIds": Utils.try_to_json(argv, "--InstanceGroupIds"),
"MetricNames": Utils.try_to_json(argv, "--MetricNames"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.MonitorClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBasicAlarmListRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeBasicAlarmList(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetMonitorData(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("GetMonitorData", g_param[OptionsDefine.Version])
return
param = {
"Namespace": argv.get("--Namespace"),
"MetricName": | |
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by <NAME>, augmented by <NAME> and <NAME>
__about__ = """Heap queues
[explanation by <NAME>]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at | |
service_name: Optional[str] = None):
"""
Configure the Admission Controller
:param bool enabled: Enable the admission controller to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods
:param bool mutate_unlabelled: MutateUnlabelled enables injecting config without having the pod label 'admission.datadoghq.com/enabled="true"'
:param str service_name: ServiceName corresponds to the webhook service name
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if mutate_unlabelled is not None:
pulumi.set(__self__, "mutate_unlabelled", mutate_unlabelled)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Enable the admission controller to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="mutateUnlabelled")
def mutate_unlabelled(self) -> Optional[bool]:
"""
MutateUnlabelled enables injecting config without having the pod label 'admission.datadoghq.com/enabled="true"'
"""
return pulumi.get(self, "mutate_unlabelled")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[str]:
"""
ServiceName corresponds to the webhook service name
"""
return pulumi.get(self, "service_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigConfd(dict):
"""
Confd Provide additional cluster check configurations. Each key will become a file in /conf.d see https://docs.datadoghq.com/agent/autodiscovery/ for more details.
"""
def __init__(__self__, *,
config_map_name: Optional[str] = None):
"""
Confd Provide additional cluster check configurations. Each key will become a file in /conf.d see https://docs.datadoghq.com/agent/autodiscovery/ for more details.
:param str config_map_name: ConfigMapName name of a ConfigMap used to mount a directory
"""
if config_map_name is not None:
pulumi.set(__self__, "config_map_name", config_map_name)
@property
@pulumi.getter(name="configMapName")
def config_map_name(self) -> Optional[str]:
"""
ConfigMapName name of a ConfigMap used to mount a directory
"""
return pulumi.get(self, "config_map_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnv(dict):
"""
EnvVar represents an environment variable present in a Container.
"""
def __init__(__self__, *,
name: str,
value: Optional[str] = None,
value_from: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFrom'] = None):
"""
EnvVar represents an environment variable present in a Container.
:param str name: Name of the environment variable. Must be a C_IDENTIFIER.
:param str value: Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromArgs' value_from: Source for the environment variable's value. Cannot be used if value is not empty.
"""
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the environment variable. Must be a C_IDENTIFIER.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFrom']:
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
return pulumi.get(self, "value_from")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFrom(dict):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
"""
def __init__(__self__, *,
config_map_key_ref: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromConfigMapKeyRef'] = None,
field_ref: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromFieldRef'] = None,
resource_field_ref: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRef'] = None,
secret_key_ref: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromSecretKeyRef'] = None):
"""
Source for the environment variable's value. Cannot be used if value is not empty.
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromConfigMapKeyRefArgs' config_map_key_ref: Selects a key of a ConfigMap.
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromFieldRefArgs' field_ref: Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRefArgs' resource_field_ref: Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromSecretKeyRefArgs' secret_key_ref: Selects a key of a secret in the pod's namespace
"""
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromConfigMapKeyRef']:
"""
Selects a key of a ConfigMap.
"""
return pulumi.get(self, "config_map_key_ref")
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromFieldRef']:
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
return pulumi.get(self, "field_ref")
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRef']:
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
return pulumi.get(self, "resource_field_ref")
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromSecretKeyRef']:
"""
Selects a key of a secret in the pod's namespace
"""
return pulumi.get(self, "secret_key_ref")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFromConfigMapKeyRef(dict):
"""
Selects a key of a ConfigMap.
"""
def __init__(__self__, *,
key: str,
name: Optional[str] = None,
optional: Optional[bool] = None):
"""
Selects a key of a ConfigMap.
:param str key: The key to select.
:param str name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
:param bool optional: Specify whether the ConfigMap or its key must be defined
"""
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> str:
"""
The key to select.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
"""
Specify whether the ConfigMap or its key must be defined
"""
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFromFieldRef(dict):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
"""
def __init__(__self__, *,
field_path: str,
api_version: Optional[str] = None):
"""
Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
:param str field_path: Path of the field to select in the specified API version.
:param str api_version: Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
pulumi.set(__self__, "field_path", field_path)
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> str:
"""
Path of the field to select in the specified API version.
"""
return pulumi.get(self, "field_path")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
Version of the schema the FieldPath is written in terms of, defaults to "v1".
"""
return pulumi.get(self, "api_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRef(dict):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
"""
def __init__(__self__, *,
resource: str,
container_name: Optional[str] = None,
divisor: Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRefDivisor'] = None):
"""
Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
:param str resource: Required: resource to select
:param str container_name: Container name: required for volumes, optional for env vars
:param 'DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRefDivisorArgs' divisor: Specifies the output format of the exposed resources, defaults to "1"
"""
pulumi.set(__self__, "resource", resource)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
@property
@pulumi.getter
def resource(self) -> str:
"""
Required: resource to select
"""
return pulumi.get(self, "resource")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container name: required for volumes, optional for env vars
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def divisor(self) -> Optional['outputs.DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRefDivisor']:
"""
Specifies the output format of the exposed resources, defaults to "1"
"""
return pulumi.get(self, "divisor")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFromResourceFieldRefDivisor(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterAgentConfigEnvValueFromSecretKeyRef(dict):
"""
Selects a key of a secret | |
"""Represents an execution of a program.
It is first created with the program parameters: executable, parameters of
execution, command-line; the function execute then allows multiple
executions of the program in different folders and with different
arguments."""
def __init__(self, executablePath, executionParams, cmd, evaluationContext, language=''):
# Check time and memory limits
if executionParams['timeLimitMs'] > CFG_MAX_TIMELIMIT:
raise Exception("Time limit (%d) for command %s too high." % (executionParams['timeLimitMs'], cmdLine))
if executionParams['memoryLimitKb'] > CFG_MAX_MEMORYLIMIT:
raise Exception("Memory limit (%d) for command %s too high." % (executionParams['memoryLimitKb'], cmdLine))
self.executablePath = executablePath
self.executionParams = executionParams
self.cmd = cmd
self.evaluationContext = evaluationContext
self.language = language
# Transformation of time and memory limits for the language
self.realMemoryLimitKb = CFG_TRANSFORM_MEM.get(language, CFG_TRANSFORM_MEM_DEFAULT)(executionParams['memoryLimitKb'])
(timeTransform, self.timeUntransform) = CFG_TRANSFORM_TIME.get(language, CFG_TRANSFORM_TIME_DEFAULT)
self.realTimeLimit = timeTransform(executionParams['timeLimitMs'])
# Copy values from arguments
self.baseReport = {'timeLimitMs': self.executionParams['timeLimitMs'],
'memoryLimitKb': self.executionParams['memoryLimitKb'],
'wasCached': False,
'realMemoryLimitKb': self.realMemoryLimitKb,
'realTimeLimitMs': self.realTimeLimit}
if 'continueOnError' in self.executionParams:
self.baseReport['continueOnError'] = self.executionParams['continueOnError']
logging.debug("New Execution initialized for executable `%s`, cmd `%s`" % (executablePath, cmd))
def _prepareExecute(self, workingDir, stdinFile=None, stdoutFile=None, stderrFile=None):
"""Prepares the execution in workingDir, checks stdinFile and
stdoutFile paths."""
# Copy executable to workingDir
if self.executablePath:
deployPath = os.path.join(workingDir, os.path.basename(self.executablePath))
if os.path.islink(deployPath):
try:
os.unlink(deployPath)
except:
pass
try:
symlink(self.executablePath, deployPath)
except:
# The executable was probably already imported
pass
# Check input file path
if not stdinFile:
pass
elif not os.path.isfile(stdinFile):
raise Exception("Input file `%s` not found while preparing to execute command `%s`." % (stdinFile, cmdLine))
elif not isInRestrict(stdinFile):
raise Exception("Using `%s` as input file not allowed." % stdinFile)
self.stdinFile = stdinFile
# Make stdoutFile path
if stdoutFile == None:
self.stdoutFile = os.path.join(workingDir, 'stdout')
elif not isInRestrict(stdoutFile):
raise Exception("Writing to file `%s` not allowed." % stdoutFile)
else:
self.stdoutFile = stdoutFile
# Make stderrFile path
if stderrFile == None:
self.stderrFile = os.path.join(workingDir, 'stderr')
elif not isInRestrict(stderrFile):
raise Exception("Writing to file `%s` not allowed." % stderrFile)
else:
self.stderrFile = stderrFile
# Add files from executionParams/addFiles
for fileDescr in self.executionParams.get('addFiles', []):
getFile(fileDescr, workingDir, errorFatal=False)
# Set up the environment variables
self.env = os.environ.copy()
self.env['TASKGRADER_LOCALE'] = self.evaluationContext['options']['locale']
def _doExecute(self, workingDir, args=None):
"""Executes the command in workingDir with args."""
cmdLine = self.cmd + ((' ' + args) if args else '')
# Open stdin file
stdinHandle = (open(self.stdinFile, 'rb') if self.stdinFile else None)
proc = subprocess.Popen(shlex.split(cmdLine), stdin=stdinHandle, stdout=open(self.stdoutFile, 'w'),
stderr=open(self.stderrFile, 'w'), cwd=workingDir, env=self.env)
# We allow a wall time of 3 times the timeLimit
waitWithTimeout(proc, (1+int(self.executionParams['timeLimitMs']/1000))*CFG_WALLTIME_FACTOR)
# Make execution report
report = {}
report.update(self.baseReport)
report.update({
'commandLine': cmdLine,
'timeTakenMs': -1, # We don't know
'realTimeTakenMs': -1, # We don't know
'wasKilled': False,
'exitCode': proc.returncode,
'exitSig': -1 # We don't know
})
report['stdout'] = capture(self.stdoutFile, name='stdout',
truncateSize=self.executionParams.get('stdoutTruncateKb', -1) * 1024)
report['stderr'] = capture(self.stderrFile, name='stderr',
truncateSize=self.executionParams.get('stderrTruncateKb', -1) * 1024)
filesReports = []
for f in globOfGlobs(workingDir, self.executionParams.get('getFiles', [])):
filesReports.append(capture(f, name=os.path.basename(f)))
report['files'] = filesReports
return report
def execute(self, workingDir, args=None, stdinFile=None, stdoutFile=None, stderrFile=None):
"""Execute the program in workingDir, with command-line arguments args,
and standard input and output redirected from stdinFile and to
stdoutFile."""
logging.info("Executing executable `%s`, cmd `%s`, args `%s` in dir `%s`" % (self.executablePath, self.cmd, args, workingDir))
self.workingDir = workingDir
self._prepareExecute(workingDir, stdinFile, stdoutFile, stderrFile)
return self._doExecute(workingDir, args)
class IsolatedExecution(Execution):
"""Represents an execution encapsulated in isolate."""
def _doExecute(self, workingDir, args=None):
# Check isolate is present
if not os.path.isfile(CFG_ISOLATEBIN) or (os.stat(CFG_ISOLATEBIN).st_mode & stat.S_ISUID) == 0:
logging.warning("Isolate is not properly configured, falling back to normal execution. Check documentation for more information.")
return Execution._doExecute(self, workingDir, args=args)
if not os.path.isfile(CFG_RIGHTSBIN) or (os.stat(CFG_RIGHTSBIN).st_mode & stat.S_ISUID) == 0:
logging.warning("Box-rights for isolate is not properly configured, falling back to normal execution. Check documentation for more information.")
return Execution._doExecute(self, workingDir, args=args)
cmdLine = self.cmd + ((' ' + args) if args else '')
report = {}
report.update(self.baseReport)
# Box ID is required if multiple isolate instances are running concurrently
boxId = (os.getpid() % 100)
isolateCommonOpts = ['--box-id=%d' % boxId]
if CFG_CONTROLGROUPS:
isolateCommonOpts.append('--cg')
# Initialize isolate box
initProc = subprocess.Popen([CFG_ISOLATEBIN, '--init'] + isolateCommonOpts, stdout=subprocess.PIPE, cwd=workingDir)
(isolateDir, isolateErr) = communicateWithTimeout(initProc, 10)
if initProc.returncode != 0:
raise Exception("Error while initializing isolate box (#%d)." % initProc.returncode)
# isolatePath will be the path of the sandbox, as given by isolate
isolateDir = os.path.join(isolateDir.strip(), 'box/')
# Build isolate command line
isolatedCmdLine = CFG_ISOLATEBIN
isolatedCmdLine += ' --processes'
isolatedCmdLine += ' --env=HOME --env=PATH --env=LANG --env=LC_ALL --env=TASKGRADER_LOCALE'
isolatedCmdLine += ' --meta=%s' % os.path.join(workingDir, 'isolate.meta')
# Add access to some folders
for folder in CFG_ISOLATE_AVAILABLE:
isolatedCmdLine += ' --dir="%s":maybe' % folder
# Use an unique box ID
isolatedCmdLine += ' --box-id=%d' % boxId
if self.executionParams['timeLimitMs'] > 0:
isolatedCmdLine += ' --time=' + str(self.realTimeLimit / 1000.)
isolatedCmdLine += ' --wall-time=' + str(CFG_WALLTIME_FACTOR * self.realTimeLimit / 1000.)
if self.executionParams['memoryLimitKb'] > 0:
if CFG_CONTROLGROUPS:
isolatedCmdLine += ' --cg-mem=' + str(self.realMemoryLimitKb)
else:
isolatedCmdLine += ' --mem=' + str(self.realMemoryLimitKb)
if self.stdinFile:
filecopy(self.stdinFile, os.path.join(workingDir, 'isolated.stdin'), fromlocal=True)
isolatedCmdLine += ' --stdin=isolated.stdin'
if CFG_CONTROLGROUPS:
isolatedCmdLine += ' --cg --cg-timing'
isolatedCmdLine += ' --stdout=isolated.stdout --stderr=isolated.stderr'
isolatedCmdLine += ' --run -- ' + cmdLine
# Clean old isolate files
for f in ['isolated.stdout', 'isolated.stderr', 'isolated.meta']:
try:
os.unlink(os.path.join(workingDir, f))
except:
pass
# Copy files from working directory to sandbox
dircopy(workingDir, isolateDir)
# Create meta file with right owner/permissions
open(os.path.join(workingDir, 'isolate.meta'), 'w')
logging.debug("Executing isolate: `%s`" % isolatedCmdLine)
# Execute the isolated program
proc = subprocess.Popen(shlex.split(isolatedCmdLine), cwd=workingDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.env)
(procOut, procErr) = communicateWithTimeout(proc, int(10 + 3 * self.realTimeLimit / 1000.))
# Get metadata from isolate execution
isolateMeta = {}
try:
for l in open(os.path.join(workingDir, 'isolate.meta'), 'r').readlines():
[name, val] = l.split(':', 1)
isolateMeta[name] = val.strip()
except:
pass
if proc.returncode > 1:
# Isolate error (0 and 1 refer to the program inside the sandbox)
# Try to cleanup sandbox
cleanProc = subprocess.Popen([CFG_ISOLATEBIN, '--cleanup', '--box-id=%d' % boxId], cwd=workingDir)
waitWithTimeout(cleanProc, 10)
raise Exception("""Internal isolate error, please check installation: #%d %s
while trying to execute `%s` in folder `%s`.
stdout: %s
stderr: %s""" % (proc.returncode, isolateMeta.get('status', ''),
cmdLine, workingDir,
procOut, procErr))
# Set file rights so that we can access the files
rightsProc = subprocess.Popen([CFG_RIGHTSBIN])
waitWithTimeout(rightsProc, 30)
# Copy back the files from sandbox
dircopy(isolateDir, workingDir, overwrite=False)
filecopy(os.path.join(isolateDir, 'isolated.stdout'), self.stdoutFile)
filecopy(os.path.join(isolateDir, 'isolated.stderr'), self.stderrFile)
# Generate execution report
if isolateMeta.has_key('time'):
report['realTimeTakenMs'] = int(float(isolateMeta['time'])*1000)
report['timeTakenMs'] = int(self.timeUntransform(report['realTimeTakenMs']))
else:
report['realTimeTakenMs'] = -1
report['timeTakenMs'] = -1
# Memory used: cg-mem is only available when control groups are
# enabled, and max-rss can be slightly inaccurate
if isolateMeta.has_key('cg-mem'):
report['memoryUsedKb'] = int(isolateMeta['cg-mem'])
elif isolateMeta.has_key('max-rss'):
report['memoryUsedKb'] = int(isolateMeta['max-rss'])
else:
report['memoryUsedKb'] = -1
report['commandLine'] = cmdLine
report['wasKilled'] = isolateMeta.has_key('killed')
report['exitCode'] = int(isolateMeta.get('exitcode', proc.returncode))
if isolateMeta.get('status', '') == 'TO':
# Timed-out, custom value
report['exitSig'] = 137
elif isolateMeta.get('status', '') == 'SG':
report['exitSig'] = int(isolateMeta.get('exitsig', 0))
else:
report['exitSig'] = 0
report['stdout'] = capture(os.path.join(workingDir, 'isolated.stdout'), name='stdout',
truncateSize=self.executionParams.get('stdoutTruncateKb', -1) * 1024)
report['stderr'] = capture(os.path.join(workingDir, 'isolated.stderr'), name='stderr',
truncateSize=self.executionParams.get('stderrTruncateKb', -1) * 1024)
# Cleanup sandbox
cleanProc = subprocess.Popen([CFG_ISOLATEBIN, '--cleanup'] + isolateCommonOpts, cwd=workingDir)
waitWithTimeout(cleanProc, 10)
return report
def which(name):
"""Searches for a program in PATH."""
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
fpath, basename = os.path.split(name)
if fpath and is_exe(name):
return name
else:
for spath in os.environ['PATH'].split(os.pathsep):
spath = spath.strip('"')
fullpath = os.path.join(spath, name)
if is_exe(os.path.join(spath, name)):
return fullpath
return False
class Language():
"""Represents a language, gives functions for aspects specific to each
language."""
lang = 'default'
# The dependencies will be searched for, if one is not found, the Language
# will raise an exception in __init__.
# The full path of each dependency will be stored in self.deppaths; note
# that this full path is then used by code, so always add additional
# dependencies at the end of the list.
dependencies = []
# Can programs in this language be isolated?
isolationPossible = True
def __init__(self):
"""Class initialization: check the required dependencies are present."""
self.deppaths = []
for f in self.dependencies:
deppath = which(f)
if not deppath:
raise UnsupportedLanguage("Cannot use language '%s', dependency `%s` missing." % (self.lang, f))
self.deppaths.append(deppath)
def _getPossiblePaths(self, baseDir, filename):
"""Returns the possible paths for a dependency filename, for a build
based in baseDir. Used by getSource."""
return [
# We search for [language]-[name] in | |
"""
Script to collect metrics (total desired, actual and pending) from a DC
"""
from __future__ import print_function
import json
import operator
import sys
import time
from collections import defaultdict, namedtuple
from datetime import timedelta
from functools import partial
import attr
from effect import ComposedDispatcher, Effect, Func
from effect.do import do
from silverberg.cluster import RoundRobinCassandraCluster
from toolz.curried import filter, get_in
from toolz.dicttoolz import keyfilter, merge
from toolz.itertoolz import groupby
from toolz.recipes import countby
from twisted.application.internet import TimerService
from twisted.application.service import Service
from twisted.internet import defer, task
from twisted.internet.endpoints import clientFromString
from twisted.python import usage
from txeffect import exc_info_to_failure, perform
from otter.auth import generate_authenticator
from otter.cloud_client import TenantScope, service_request
from otter.constants import ServiceType, get_service_configs
from otter.convergence.composition import tenant_is_enabled
from otter.convergence.gathering import get_all_scaling_group_servers
from otter.convergence.model import NovaServer, group_id_from_metadata
from otter.convergence.planning import Destiny, get_destiny
from otter.effect_dispatcher import get_legacy_dispatcher
from otter.log import log as otter_log
from otter.models.cass import CassScalingGroupCollection
from otter.models.intents import GetAllValidGroups, get_model_dispatcher
from otter.util.fp import partition_bool
GroupMetrics = namedtuple('GroupMetrics',
'tenant_id group_id desired actual pending')
def get_tenant_metrics(tenant_id, scaling_groups, grouped_servers,
_print=False):
"""
Produce per-group metrics for all the groups of a tenant
:param list scaling_groups: Tenant's scaling groups as dict from CASS
:param dict grouped_servers: Servers from Nova grouped based on
scaling group ID.
:return: generator of (tenantId, groupId, desired, actual) GroupMetrics
"""
if _print:
print('processing tenant {} with groups {} and servers {}'.format(
tenant_id, len(scaling_groups), len(grouped_servers)))
groups = {g['groupId']: g for g in scaling_groups}
for group_id in set(groups.keys() + grouped_servers.keys()):
servers = grouped_servers.get(group_id, [])
if group_id in groups:
group = groups[group_id]
if group.get("status") in ("ERROR", "DISABLED"):
continue
else:
group = {'groupId': group_id_from_metadata(servers[0]['metadata']),
'desired': 0}
servers = map(NovaServer.from_server_details_json, servers)
counts = defaultdict(lambda: 0)
counts.update(countby(get_destiny, servers))
active = counts[Destiny.CONSIDER_AVAILABLE] + \
counts[Destiny.AVOID_REPLACING]
ignore = counts[Destiny.DELETE] + counts[Destiny.CLEANUP] + \
counts[Destiny.IGNORE]
yield GroupMetrics(tenant_id, group['groupId'], group['desired'],
active, len(servers) - ignore - active)
def get_all_metrics_effects(tenanted_groups, log, _print=False):
"""
Gather server data for and produce metrics for all groups
across all tenants in a region
:param dict tenanted_groups: Scaling groups grouped with tenantId
:param bool _print: Should the function print while processing?
:return: ``list`` of :obj:`Effect` of (``list`` of :obj:`GroupMetrics`)
or None
"""
effs = []
for tenant_id, groups in tenanted_groups.iteritems():
eff = get_all_scaling_group_servers()
eff = Effect(TenantScope(eff, tenant_id))
eff = eff.on(partial(get_tenant_metrics, tenant_id, groups,
_print=_print))
eff = eff.on(list)
eff = eff.on(
error=lambda exc_info: log.err(exc_info_to_failure(exc_info)))
effs.append(eff)
return effs
def _perform_limited_effects(dispatcher, effects, limit):
"""
Perform the effects in parallel up to a limit.
It'd be nice if effect.parallel had a "limit" parameter.
"""
# TODO: Use cooperator instead
sem = defer.DeferredSemaphore(limit)
defs = [sem.run(perform, dispatcher, eff) for eff in effects]
return defer.gatherResults(defs)
def get_all_metrics(dispatcher, tenanted_groups, log, _print=False,
get_all_metrics_effects=get_all_metrics_effects):
"""
Gather server data and produce metrics for all groups across all tenants
in a region.
:param dispatcher: An Effect dispatcher.
:param dict tenanted_groups: Scaling Groups grouped on tenantid
:param bool _print: Should the function print while processing?
:return: ``list`` of `GroupMetrics` as `Deferred`
"""
effs = get_all_metrics_effects(tenanted_groups, log, _print=_print)
d = _perform_limited_effects(dispatcher, effs, 10)
d.addCallback(filter(lambda x: x is not None))
return d.addCallback(lambda x: reduce(operator.add, x, []))
@attr.s
class Metric(object):
desired = attr.ib(default=0)
actual = attr.ib(default=0)
pending = attr.ib(default=0)
def calc_total(group_metrics):
"""
Calculate total metric for all groups and per tenant
:param group_metrics: List of :obj:`GroupMetric`
:return (``dict``, :obj:`Metric`) where dict is tenant-id -> `Metric`
representing per tenant metric and second element is total metric
"""
tenanted = defaultdict(Metric)
total = Metric()
for gm in group_metrics:
total.desired += gm.desired
total.actual += gm.actual
total.pending += gm.pending
tenanted[gm.tenant_id].desired += gm.desired
tenanted[gm.tenant_id].actual += gm.actual
tenanted[gm.tenant_id].pending += gm.pending
return tenanted, total
@do
def add_to_cloud_metrics(ttl, region, group_metrics, num_tenants, config,
log=None, _print=False):
"""
Add total number of desired, actual and pending servers of a region
to Cloud metrics.
:param str region: which region's metric is collected
:param group_metrics: List of :obj:`GroupMetric`
:param int num_tenants: total number of tenants
:param dict config: Config json dict containing convergence tenants info
:param log: Optional logger
:param bool _print: Should it print activity on stdout? Useful when running
as a script
:return: `Effect` with None
"""
epoch = yield Effect(Func(time.time))
metric_part = {'collectionTime': int(epoch * 1000),
'ttlInSeconds': ttl}
tenanted_metrics, total = calc_total(group_metrics)
if log is not None:
log.msg(
'total desired: {td}, total_actual: {ta}, total pending: {tp}',
td=total.desired, ta=total.actual, tp=total.pending)
if _print:
print('total desired: {}, total actual: {}, total pending: {}'.format(
total.desired, total.actual, total.pending))
metrics = [('desired', total.desired), ('actual', total.actual),
('pending', total.pending), ('tenants', num_tenants),
('groups', len(group_metrics))]
for tenant_id, metric in sorted(tenanted_metrics.items()):
metrics.append(("{}.desired".format(tenant_id), metric.desired))
metrics.append(("{}.actual".format(tenant_id), metric.actual))
metrics.append(("{}.pending".format(tenant_id), metric.pending))
# convergence tenants desired and actual
conv_tenants = keyfilter(
partial(tenant_is_enabled,
get_config_value=lambda k: get_in([k], config)),
tenanted_metrics)
conv_desired = sum(m.desired for m in conv_tenants.itervalues())
conv_actual = sum(m.actual for m in conv_tenants.itervalues())
metrics.extend(
[("conv_desired", conv_desired), ("conv_actual", conv_actual),
("conv_divergence", conv_desired - conv_actual)])
data = [merge(metric_part,
{'metricValue': value,
'metricName': '{}.{}'.format(region, metric)})
for metric, value in metrics]
yield service_request(ServiceType.CLOUD_METRICS_INGEST,
'POST', 'ingest', data=data, log=log)
def connect_cass_servers(reactor, config):
"""
Connect to Cassandra servers and return the connection
"""
seed_endpoints = [clientFromString(reactor, str(host))
for host in config['seed_hosts']]
return RoundRobinCassandraCluster(
seed_endpoints, config['keyspace'], disconnect_on_cancel=True)
def get_dispatcher(reactor, authenticator, log, service_configs, store):
return ComposedDispatcher([
get_legacy_dispatcher(reactor, authenticator, log, service_configs),
get_model_dispatcher(log, store)
])
@defer.inlineCallbacks
def collect_metrics(reactor, config, log, client=None, authenticator=None,
_print=False):
"""
Start collecting the metrics
:param reactor: Twisted reactor
:param dict config: Configuration got from file containing all info
needed to collect metrics
:param :class:`silverberg.client.CQLClient` client:
Optional cassandra client. A new client will be created
if this is not given and disconnected before returing
:param :class:`otter.auth.IAuthenticator` authenticator:
Optional authenticator. A new authenticator will be created
if this is not given
:param bool _print: Should debug messages be printed to stdout?
:return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
"""
_client = client or connect_cass_servers(reactor, config['cassandra'])
authenticator = authenticator or generate_authenticator(reactor,
config['identity'])
store = CassScalingGroupCollection(_client, reactor, 1000)
dispatcher = get_dispatcher(reactor, authenticator, log,
get_service_configs(config), store)
# calculate metrics on launch_server and non-paused groups
groups = yield perform(dispatcher, Effect(GetAllValidGroups()))
groups = [
g for g in groups
if json.loads(g["launch_config"]).get("type") == "launch_server" and
(not g.get("paused", False))]
tenanted_groups = groupby(lambda g: g["tenantId"], groups)
group_metrics = yield get_all_metrics(
dispatcher, tenanted_groups, log, _print=_print)
# Add to cloud metrics
metr_conf = config.get("metrics", None)
if metr_conf is not None:
eff = add_to_cloud_metrics(
metr_conf['ttl'], config['region'], group_metrics,
len(tenanted_groups), config, log, _print)
eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
yield perform(dispatcher, eff)
log.msg('added to cloud metrics')
if _print:
print('added to cloud metrics')
if _print:
group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
reverse=True)
print('groups sorted as per divergence')
print('\n'.join(map(str, group_metrics)))
# Disconnect only if we created the client
if not client:
yield _client.disconnect()
defer.returnValue(group_metrics)
class Options(usage.Options):
"""
Options for otter-metrics service
"""
optParameters = [["config", "c", "config.json",
"path to JSON configuration file"]]
def postOptions(self):
"""
Parse config file and add nova service name
"""
self.open = getattr(self, 'open', None) or open # For testing
self.update(json.load(self.open(self['config'])))
def metrics_set(metrics):
return set((g.tenant_id, g.group_id) for g in metrics)
def unchanged_divergent_groups(clock, current, timeout, group_metrics):
"""
Return list of GroupMetrics that have been divergent and unchanged for
timeout seconds
:param IReactorTime clock: Twisted time used to track
:param dict current: Currently tracked divergent groups
:param float timeout: Timeout in seconds
:param list group_metrics: List of group metrics
:return: (updated current, List of (group, divergent_time) tuples)
"""
converged, diverged = partition_bool(
lambda gm: gm.actual + gm.pending == gm.desired, group_metrics)
# stop tracking all converged and deleted groups
deleted = set(current.keys()) - metrics_set(group_metrics)
updated = current.copy()
for g in metrics_set(converged) | deleted:
updated.pop(g, None)
# Start tracking divergent groups depending on whether they've changed
now = clock.seconds()
to_log, new = [], {}
for gm in diverged:
pair = (gm.tenant_id, gm.group_id)
if pair in updated:
last_time, values = updated[pair]
if values != hash((gm.desired, gm.actual, gm.pending)):
del updated[pair]
continue
time_diff = now - last_time
if time_diff > timeout and time_diff % timeout <= 60:
# log on intervals of timeout. For example, if timeout is 1 hr
# then log every hour it remains diverged
to_log.append((gm, time_diff))
else:
new[pair] = now, hash((gm.desired, gm.actual, gm.pending))
return merge(updated, new), to_log
class MetricsService(Service, object):
"""
Service collects metrics on continuous basis
"""
def __init__(self, reactor, config, log, clock=None, collect=None):
"""
Initialize the service by connecting to Cassandra and setting up
authenticator
:param reactor: Twisted reactor for connection purposes
:param dict config: All the config necessary to run the service.
Comes from config file
:param IReactorTime clock: Optional reactor | |
<gh_stars>1-10
import gc
import networkx as nx
import numpy as np
import os
import logging
import pickle
from dca.schemes import (
DCALoggers,
DelaunayGraphVisualizer,
REData,
ExperimentDirs,
DelaunayGraphParams,
HDBSCANParams,
GeomCAParams,
QueryData,
)
from dca.delaunay_graph import DelaunayGraph
from typing import Optional, List
import pandas as pd
import dca.delaunay_graph_utils as graph_utils
import dca.visualization as visualization
import logging.config
from dca.loggers import logger_time, get_parameters
import json
# -------------------------------------------------------------------------- #
# Logging settings
# -------------------------------------------------------------------------- #
logger = logging.getLogger("DCA_info_logger")
result_logger = logging.getLogger("DCA_result_logger")
time_logger = logging.getLogger("DCA_time_logger")
# -------------------------------------------------------------------------- #
# -------------------------------------------------------------------------- #
class DCA:
def __init__(
self,
dirs: ExperimentDirs,
Delaunay_graph_params: DelaunayGraphParams,
clustering_params: HDBSCANParams,
GeomCA_params: GeomCAParams,
loggers: DCALoggers,
random_seed: int = 1111,
):
np.random.seed(random_seed)
# Paths
self.root = dirs.experiment_dir
self.precomputed_dir = dirs.precomputed_dir
self.DCA_dir = dirs.DCA_dir
self.visualization_dir = dirs.visualization_dir
self.results_dir = dirs.results_dir
self.logs_dir = dirs.logs_dir
# Initialize the loggers
self.loggers = loggers
logging.config.dictConfig(loggers.loggers)
# Parameters
self.GeomCA_params = GeomCA_params
self.graph_params = Delaunay_graph_params
self.clustering_params = clustering_params
self.save_parameters(
[dirs, Delaunay_graph_params, clustering_params, GeomCA_params],
random_seed,
loggers.version,
)
# Visualisation
self.visualize_Delaunay_graph = False
# -------------------------------------------------------------------------- #
# Prepocess data and save parameters
# -------------------------------------------------------------------------- #
def preprocess_data(self, data: REData):
"""
Prepares the input array for Delaunay approximation.
:param data: R and E data parameters.
:return: DelaunayGraphVisualizer object.
"""
input_array_filepath = os.path.join(self.root, data.input_array_filepath)
if not os.path.isfile(input_array_filepath):
input_array = np.concatenate([data.R, data.E]).astype(np.float32)
np.save(input_array_filepath, input_array)
if data.visualize:
G_visualizer = DelaunayGraphVisualizer(
os.path.join(self.root, data.input_array_filepath),
data.num_R,
data.num_E,
)
logger.debug("DelaunayGraphVisualizer initialized")
return G_visualizer
def preprocess_query_data(self, query_data: QueryData):
"""
Prepares the input array of query points for query point Delaunay approximation.
:param query_data: query data parameters.
"""
query_input_array_filepath = os.path.join(
self.root, query_data.query_input_array_filepath
)
if not os.path.isfile(query_input_array_filepath):
input_array = query_data.Q.astype(np.float32)
np.save(query_input_array_filepath, input_array)
def save_parameters(
self,
params_list: List,
random_seed: int = 1111,
version: int = 0,
):
"""
Saves input parameters.
:param params_list: list of input parameters.
:param random_seed:
:param version: experiment version index.
"""
params_dict = {"random_seed": random_seed}
for params in params_list:
dict = get_parameters(params)
params_dict = {**params_dict, **dict}
with open(
os.path.join(self.logs_dir, f"version{version}_input.json"),
"w",
) as f:
json.dump(params_dict, f, indent=4)
# -------------------------------------------------------------------------- #
# DCA: R and E
# -------------------------------------------------------------------------- #
@logger_time
def fit(self, data: REData):
"""
DCA
Runs DCA algorithm on the given sets of representations R and E.
:param data: R and E data parameters.
:return: DCA local and global evaluation scores.
"""
print("Starting to run DCA...")
# Preprocess input data
G_visualizer = self.preprocess_data(data)
logger.debug("Input data saved")
# Get Delaunay graph
G = self.get_Delaunay_graph(data, G_visualizer)
logger.debug("DelaunayGraph initialized")
print("- Delaunay graph approximated.")
n_points = data.num_R + data.num_E
del data
gc.collect()
# Get Delaunay connected components
(
component_vertex_idx_mapping,
first_non_trivial_component,
) = self.get_Delaunay_connected_components(n_points, G.graph, G_visualizer)
logger.debug("Delaunay connected components obtained")
G.set_first_trivial_component_idx(first_non_trivial_component.item())
logger.debug(
"Delaunay first non trivial component set to: %s",
first_non_trivial_component,
)
print("- Distilled Delaunay graph built.")
# Analyse Delaunay connected components
self.analyse_Delaunay_connected_components(
component_vertex_idx_mapping, G, G_visualizer
)
logger.debug("Delaunay connected components analysed")
print("- Distilled Delaunay graph analysed.")
# Save results
output = self.save_DCA_logs(G)
logger.debug("- DCA results saved.")
# Plot results
visualization._plot_RE_components_consistency(
G,
self.visualization_dir,
min_comp_size=2,
annotate_largest=True,
display_smaller=False,
)
visualization._plot_RE_components_quality(
G,
self.visualization_dir,
min_comp_size=2,
annotate_largest=True,
display_smaller=False,
)
logger.debug("DCA results visualized")
print("- DCA executed, results saved to: {0}.".format(self.DCA_dir))
return output
@logger_time
def get_Delaunay_graph(
self, data: REData, visualizer: Optional[DelaunayGraphVisualizer] = None
):
"""
Phase 1
Approximates and filters Delunay graph on the given sets of representations R and E.
:param data: R and E data parameters.
:param visualizer: DelaunayGraphVisualizer object.
:return: approximated and filtered Delaunay graph.
"""
# Build Delaunay edges if it does not exists
graph_utils._approximate_Delaunay_edges(
self.root, data.input_array_filepath, self.graph_params
)
logger.debug(
"Delaunay graph {0} created with parameter nrays={1}.".format(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath),
self.graph_params.T,
)
)
# Filter Delaunay edges if specified
if self.graph_params.sphere_coverage == 1.0:
Delaunay_edges = np.load(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath)
)[:, :2]
Delaunay_edges_len = np.load(
os.path.join(self.root, self.graph_params.unfiltered_edges_len_filepath)
)
logger.debug("Unfiltered Delaunay edges of shape: %s", Delaunay_edges.shape)
else:
logger.debug(
"Chosen sphere coverage: %s", self.graph_params.sphere_coverage
)
unfiltered_Delaunay_edges_shape = graph_utils._filter_Delaunay_edges(
os.path.join(self.root, self.graph_params.unfiltered_edges_filepath),
os.path.join(
self.root, self.graph_params.unfiltered_edges_len_filepath
),
self.graph_params,
os.path.join(self.root, self.graph_params.filtered_edges_filepath),
os.path.join(self.root, self.graph_params.filtered_edges_len_filepath),
data.num_R + data.num_E,
)
Delaunay_edges = np.load(
os.path.join(self.root, self.graph_params.filtered_edges_filepath)
)
Delaunay_edges_len = np.load(
os.path.join(self.root, self.graph_params.filtered_edges_len_filepath)
)
logger.debug(
"Unfiltered Delaunay graph shape: %s", unfiltered_Delaunay_edges_shape
)
logger.debug("Filtered Delaunay graph shape: %s", Delaunay_edges.shape)
logger.debug("Delaunay edges extracted")
# Init DelaunayGraph
G = DelaunayGraph(data.num_R, data.num_E)
G.init_Delaunay_graph(Delaunay_edges, Delaunay_edges_len)
if visualizer is not None:
visualization._plot_Delaunay_graph(
visualizer,
edges=Delaunay_edges,
filename="approximated_Delaunay_graph",
root=self.visualization_dir,
)
logger.debug("Delaunay edges visualized")
return G
@logger_time
def get_Delaunay_connected_components(
self,
n_points: int,
graph: nx.Graph,
visualizer: Optional[DelaunayGraphVisualizer] = None,
):
"""
Phase 2
Distilles the approximated Delunay graph into connected components.
:param n_points: total number of points in R and E.
:param graph: approximated Delaunay graph.
:param visualizer: DelaunayGraphVisualizer object.
:return: dict with keys representing component indices and arrays of
corresponding vertices containined in each component as values;
index of the first non trivial component.
"""
# Perform HDBSCAN clustering
input_array_labels = graph_utils._distil_Delaunay_connected_components(
self.root, self.clustering_params, graph
)
logger.debug("HDBSCAN executed")
logger.debug(
"Number of significant connected components: %s",
len(np.unique(input_array_labels)),
)
if visualizer is not None:
xlim, ylim = visualization._plot_Delaunay_graph(
visualizer,
graph.edges,
filename="Delaunay_components",
root=self.visualization_dir,
labels=input_array_labels,
)
logger.debug("Delaunay connected components visualized")
visualizer.xlim = xlim
visualizer.ylim = ylim
logger.debug(f"DelaunayGraphVisualizer updated xlim={xlim} and ylim={ylim}")
# Extract components sorted by their vertex size
(
component_vertex_idx_mapping,
first_non_trivial_component,
) = graph_utils._sort_Delaunay_connected_components(
self.root,
input_array_labels,
self.clustering_params,
n_points,
)
logger.debug("Delaunay connected components extracted")
gc.collect()
return (component_vertex_idx_mapping, first_non_trivial_component)
@logger_time
def analyse_Delaunay_connected_components(
self,
component_vertex_idx_mapping: dict,
G: DelaunayGraph,
visualizer: Optional[DelaunayGraphVisualizer] = None,
discard_component_graph: Optional[bool] = True,
):
"""
Phase 3
Analyses the connected components of the distilled Delunay graph.
:param component_vertex_idx_mapping: dictionary of vertex indices contained in each component.
:param G: distilled Delaunay graph.
:param visualizer: DelaunayGraphVisualizer object.
:param discard_component_graph: whether to discard the component nx.Graph object (storage heavy).
"""
for comp_idx, comp_vertices in component_vertex_idx_mapping.items():
subgraph_RE_comp = G.graph.subgraph(comp_vertices)
if nx.is_empty(subgraph_RE_comp):
subgraph_RE_comp = nx.Graph()
subgraph_RE_comp.add_nodes_from(comp_vertices)
if visualizer is not None and comp_idx < G.first_trivial_component_idx:
visualization._plot_Delaunay_graph(
visualizer,
edges=subgraph_RE_comp.edges,
filename=f"component_{comp_idx}_Delaunay",
root=self.visualization_dir,
vertices=np.array(comp_vertices),
keep_range=True,
)
logger.debug(f"Delaunay connected component {comp_idx} visualized")
subgraph_R_comp, subgraph_E_comp = graph_utils._extract_RE_subgraphs(
subgraph_RE_comp, G.num_R, G.num_E
)
(
comp_R_idxs,
comp_E_idxs,
comp_consistency,
comp_quality,
num_comp_RE_edges,
num_total_comp_edges,
) = graph_utils._evaluate_Delaunay_component(
subgraph_RE_comp, subgraph_R_comp, subgraph_E_comp, G.num_R
)
logger.debug(f"Delaunay connected component {comp_idx} analyzed")
G.update_local_stats(
comp_R_idxs,
comp_E_idxs,
comp_consistency,
comp_quality,
num_comp_RE_edges,
num_total_comp_edges,
self.GeomCA_params.comp_consistency_threshold,
self.GeomCA_params.comp_quality_threshold,
None if not discard_component_graph else subgraph_RE_comp,
)
logger.debug(f"DelaunayGraph updated local stats with component {comp_idx}")
if visualizer is not None:
visualization._plot_isolated_components(
G, visualizer, self.visualization_dir
)
logger.debug("Isolated Delaunay connected components")
visualization._plot_Delaunay_graph(
visualizer,
edges=G.distil_edges(),
filename="distilled_Delaunay_graph",
root=self.visualization_dir,
)
logger.debug("distilled Delaunay edges visualized")
G.update_global_stats()
logger.debug(f"DelaunayGraph updated global stats")
def save_DCA_logs(self, G: DelaunayGraph):
"""
Saves DCA scores to files.
:param G: distilled Delaunay graph with local and global evaluation scores.
"""
path = os.path.join(self.results_dir, "network_stats.pkl")
with open(path, "wb") as f:
pickle.dump(G.network_stats, f)
logger.debug(f"DelaunayGraph network_stats saved")
path = os.path.join(self.results_dir, "components_stats.pkl")
with open(path, "wb") as f:
pickle.dump(G.comp_stats, f)
logger.debug(f"DelaunayGraph components_stats saved")
output = G.save_stats()
with open(os.path.join(self.DCA_dir, "output.json"), "w") as fp:
json.dump(output, fp, indent=4)
def cleanup(self, remove_visualizations: bool = True, remove_logs: bool = True):
"""
Removes the DCA files in the experiment folder. Default removes all files except for the output scores.
:param remove_visualizations: whether to remove the visualizations.
:param remove_logs: whether to remove the logging files.
"""
# Remove precomputed folder
os.system(f"rm -r {self.precomputed_dir}")
# Remove DCA dir
os.system((f"rm -r {self.results_dir}"))
# Remove logs
if remove_logs:
os.system(f"rm -r {self.logs_dir}")
else: # Remove all non-log files, eg npy from qDCA
for file in os.listdir(str(self.logs_dir)):
if not file.endswith(".logs"):
os.system(f"rm {file}")
# Remove logs
if remove_visualizations:
os.system(f"rm -r {self.visualization_dir}")
print("- Cleanup completed.")
# -------------------------------------------------------------------------- #
# qDCA: query point processing
# -------------------------------------------------------------------------- #
@logger_time
def process_query_points(
self,
init_data: REData,
query_data: QueryData,
assign_to_comp: bool = False,
consider_several_assignments: bool = False,
assign_to_R: bool = False,
assign_to_RE: bool = False,
return_len: bool = False,
):
"""
query point Delaunay Component Analysis (q-DCA).
:param init_data: R and E data parameters.
:param query_data: query data parameters.
:param assign_to_comp: whether to assign query points to fundamental components.
:param consider_several_assignments: whether to consider fliexible assignment.
:param assign_to_R: whether to assign query points to R points only.
:param assign_to_RE: whether to assign query points to R and E points.
:param return_len: whether to return the length of the shortest edges.
:return: dataframe of query point indices and the associated assignments.
"""
self.loggers.qdca_flag = True
G = DelaunayGraph(init_data.num_R, init_data.num_E)
G.load_existing(self.results_dir)
logger.debug("Loaded existing DelaunayGraph")
self.preprocess_query_data(query_data)
if assign_to_comp:
(
query_points_comp_labels,
considered_comp_idx_list,
) = self.assign_query_points_to_components(
init_data,
query_data,
G,
consider_several_assignments=consider_several_assignments,
)
logger.debug("Query points assigned to connected components")
print(
| |
#!/usr/bin/env python
from os.path import abspath, isfile
from optparse import OptionGroup
from pydpiper.pipeline import Pipeline
from pydpiper.file_handling import createBaseName, createLogFile, removeBaseAndExtension
from pydpiper.application import AbstractApplication
from atoms_and_modules.registration_file_handling import RegistrationPipeFH
import atoms_and_modules.registration_functions as rf
import atoms_and_modules.minc_parameters as mp
from atoms_and_modules.minc_atoms import blur, mincresample, mincANTS, mincAverage, minctracc
from atoms_and_modules.stats_tools import addStatsOptions, CalcStats
import sys
import logging
logger = logging.getLogger(__name__)
def addNlinRegOptionGroup(parser):
"""option group for the command line argument parser"""
group = OptionGroup(parser, "Nonlinear registration options",
"Options for performing a non-linear registration")
group.add_option("--target-avg", dest="target_avg",
type="string", default=None,
help="Starting target for non-linear alignment. (Often in lsq12 space)")
group.add_option("--target-mask", dest="target_mask",
type="string", default=None,
help="Optional mask for target.")
group.add_option("--registration-method", dest="reg_method",
default="mincANTS", type="string",
help="Specify whether to use minctracc or mincANTS for non-linear registrations. "
"Default is mincANTS (and minctracc when running MAGeT.py).")
group.add_option("--nlin-protocol", dest="nlin_protocol",
type="string", default=None,
help="Can optionally specify a registration protocol that is different from defaults. "
"Parameters must be specified as in either or the following examples: \n"
"applications_testing/test_data/minctracc_example_nlin_protocol.csv \n"
"applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n"
"Default is None.")
parser.add_option_group(group)
def finalGenerationFileNames(inputFH):
"""Set up and return filenames for final nlin generation, since we don't want to use defaults here.
The naming of the final resampled files/transforms will be the same regardless of registration
protocol (minctracc vs mincANTS) or number of generations.
"""
registerDir = inputFH.setOutputDirectory("transforms")
registerFileName = removeBaseAndExtension(inputFH.basename) + "-final-nlin.xfm"
registerOutput = createBaseName(registerDir, registerFileName)
resampleDir = inputFH.setOutputDirectory("resampled")
resampleFileName = removeBaseAndExtension(inputFH.basename) + "-resampled-final-nlin.mnc"
resampleOutput = createBaseName(resampleDir, resampleFileName)
return (registerOutput, resampleOutput)
class NonlinearRegistration(AbstractApplication):
"""
This class performs an iterative non-linear registration between one or more files
and a single target. It currently supports two non-linear registration programs:
minctracc (mni_autoreg, McGill) and mincANTS (Advanced Normalization Tools, U Penn).
Optionally, statistics may be calculated at the end of the registration.
Source:
One or more files with or without a mask.
Target:
Single input file with or without a mask.
This application was designed to be called on inputs and targets that have already
undergone an affine registration. (e.g. translations, rotations, scales and shears)
This is sometimes referred to as LSQ12. However, this class is generic enough that
this iterative alignment could be called on sources and a target that are not in
LSQ12 space, although it is likely that the alignment will be less successful.
"""
def setup_options(self):
"""Add option groups from specific modules"""
rf.addGenRegOptionGroup(self.parser)
addNlinRegOptionGroup(self.parser)
addStatsOptions(self.parser)
self.parser.set_usage("%prog [options] input files")
def setup_appName(self):
appName = "Nonlinear-registration"
return appName
def run(self):
options = self.options
args = self.args
# Setup output directories for non-linear registration.
dirs = rf.setupDirectories(self.outputDir, options.pipeline_name, module="NLIN")
#Initialize input files (from args)
inputFiles = rf.initializeInputFiles(args, dirs.processedDir, maskDir=options.mask_dir)
#Setup initial target and run iterative non-linear registration
if options.target_avg:
createAvg=False
else:
createAvg=True
nlinObj = initializeAndRunNLIN(self.outputDir,
inputFiles,
dirs.nlinDir,
avgPrefix=options.pipeline_name,
createAvg=createAvg,
targetAvg=options.target_avg,
targetMask=options.target_mask,
nlin_protocol=options.nlin_protocol,
reg_method=options.reg_method)
self.pipeline.addPipeline(nlinObj.p)
self.nlinAverages = nlinObj.nlinAverages
"""Calculate statistics between final nlin average and individual mice"""
if options.calc_stats:
"""Choose final average from array of nlin averages"""
finalNlin = self.nlinAverages[-1]
"""For each input file, calculate statistics from finalNlin to input"""
for inputFH in inputFiles:
stats = CalcStats(inputFH, finalNlin, options.stats_kernels)
self.pipeline.addPipeline(stats.p)
class initializeAndRunNLIN(object):
"""Class to setup target average (if needed),
instantiate correct version of NLIN class,
and run NLIN registration."""
def __init__(self,
targetOutputDir, #Output directory for files related to initial target (often _lsq12)
inputFiles,
nlinDir,
avgPrefix, #Prefix for nlin-1.mnc, ... nlin-k.mnc
createAvg=True, #True=call mincAvg, False=targetAvg already exists
targetAvg=None, #Optional path to initial target - passing name does not guarantee existence
targetMask=None, #Optional path to mask for initial target
nlin_protocol=None,
reg_method=None):
self.p = Pipeline()
self.targetOutputDir = targetOutputDir
self.inputFiles = inputFiles
self.nlinDir = nlinDir
self.avgPrefix = avgPrefix
self.createAvg = createAvg
self.targetAvg = targetAvg
self.targetMask = targetMask
self.nlin_protocol = nlin_protocol
self.reg_method = reg_method
# setup initialTarget (if needed) and initialize non-linear module
self.setupTarget()
self.initNlinModule()
#iterate through non-linear registration and setup averages
self.nlinModule.iterate()
self.p.addPipeline(self.nlinModule.p)
self.nlinAverages = self.nlinModule.nlinAverages
self.nlinParams = self.nlinModule.nlinParams
def setupTarget(self):
if self.targetAvg:
if isinstance(self.targetAvg, str):
self.initialTarget = RegistrationPipeFH(self.targetAvg,
mask=self.targetMask,
basedir=self.targetOutputDir)
self.outputAvg = self.targetAvg
elif isinstance(self.targetAvg, RegistrationPipeFH):
self.initialTarget = self.targetAvg
self.outputAvg = self.targetAvg.getLastBasevol()
if not self.initialTarget.getMask():
if self.targetMask:
self.initialTarget.setMask(self.targetMask)
else:
print "You have passed a target average that is neither a string nor a file handler: " + str(self.targetAvg)
print "Exiting..."
else:
self.targetAvg = abspath(self.targetOutputDir) + "/" + "initial-target.mnc"
self.initialTarget = RegistrationPipeFH(self.targetAvg,
mask=self.targetMask,
basedir=self.targetOutputDir)
self.outputAvg = self.targetAvg
if self.createAvg:
avg = mincAverage(self.inputFiles,
self.initialTarget,
output=self.outputAvg,
defaultDir=self.targetOutputDir)
self.p.addStage(avg)
def initNlinModule(self):
if self.reg_method=="mincANTS":
self.nlinModule = NLINANTS(self.inputFiles, self.initialTarget, self.nlinDir, self.avgPrefix, self.nlin_protocol)
elif self.reg_method=="minctracc":
self.nlinModule = NLINminctracc(self.inputFiles, self.initialTarget, self.nlinDir, self.avgPrefix, self.nlin_protocol)
else:
logger.error("Incorrect registration method specified: " + self.reg_method)
sys.exit()
class NLINBase(object):
"""
This is the parent class for any iterative non-linear registration.
Subclasses should extend the following methods:
addBlurStage()
regAndResample()
"""
def __init__(self, inputArray, targetFH, nlinOutputDir, avgPrefix, nlin_protocol):
self.p = Pipeline()
"""Initial inputs should be an array of fileHandlers with lastBasevol in lsq12 space"""
self.inputs = inputArray
"""Initial target should be the file handler for the lsq12 average"""
self.target = targetFH
"""Output directory should be _nlin """
self.nlinDir = nlinOutputDir
"""Prefix to pre-pend to averages at each generation"""
self.avgPrefix = avgPrefix
"""Empty array that we will fill with averages as we create them"""
self.nlinAverages = []
"""Create the blurring resolution from the file resolution"""
if nlin_protocol==None:
self.fileRes = rf.returnFinestResolution(self.inputs[0])
else:
self.fileRes = None
# Create new nlin group for each input prior to registration
for i in range(len(self.inputs)):
self.inputs[i].newGroup(groupName="nlin")
def addBlurStage(self):
"""
Add blurs to pipeline. Because blurs are handled differently by
parameter arrays in minctracc and mincANTS subclasses, they are added
to the pipeline via function call.
"""
pass
def regAndResample(self):
"""Registration and resampling calls"""
pass
def iterate(self):
for i in range(self.generations):
outputName = "nlin-%g.mnc" % (i+1)
if self.avgPrefix:
outputName = str(self.avgPrefix) + "-" + outputName
nlinOutput = abspath(self.nlinDir) + "/" + outputName
nlinFH = RegistrationPipeFH(nlinOutput, mask=self.target.getMask(), basedir=self.nlinDir)
self.addBlurStage(self.target, i)
filesToAvg = []
for inputFH in self.inputs:
self.addBlurStage(inputFH, i)
self.regAndResample(inputFH, i, filesToAvg, nlinFH)
"""Because we don't reset lastBasevol on each inputFH, call mincAverage with files only.
We create fileHandler first though, so we have log directory.
This solution seems a bit hackish--may want to modify?
Additionally, we are currently using the full RegistrationPipeFH class, but ultimately
we'll want to create a third class that is somewhere between a full and base class.
"""
logBase = removeBaseAndExtension(nlinOutput)
avgLog = createLogFile(nlinFH.logDir, logBase)
avg = mincAverage(filesToAvg, nlinOutput, logFile=avgLog)
self.p.addStage(avg)
"""Reset target for next iteration and add to array"""
self.target = nlinFH
self.nlinAverages.append(nlinFH)
"""Create a final nlin group to add to the inputFH.
lastBasevol = by default, will grab the lastBasevol used in these calculations (e.g. lsq12)
setLastXfm between final nlin average and inputFH will be set for stats calculations.
"""
if i == (self.generations -1):
for inputFH in self.inputs:
"""NOTE: The last xfm being set below is NOT the result of a registration between
inputFH and nlinFH, but rather is the output transform from the previous generation's
average."""
finalXfm = inputFH.getLastXfm(self.nlinAverages[self.generations-2])
inputFH.newGroup(groupName="final")
inputFH.setLastXfm(nlinFH, finalXfm)
class NLINANTS(NLINBase):
"""
This class does an iterative non-linear registration using the mincANTS
registration protocol. The default number of generations is three.
"""
def __init__(self, inputArray, targetFH, nlinOutputDir, avgPrefix, nlin_protocol=None):
NLINBase.__init__(self, inputArray, targetFH, nlinOutputDir, avgPrefix, nlin_protocol)
"""Setup parameters, either as defaults, or read from a .csv"""
self.nlinParams = mp.setMincANTSParams(self.fileRes, reg_protocol=nlin_protocol)
self.blurs = self.nlinParams.blurs
self.gradient = self.nlinParams.gradient
self.similarityMetric = self.nlinParams.similarityMetric
self.weight = self.nlinParams.weight
self.radiusHisto = self.nlinParams.radiusHisto
self.transformationModel = self.nlinParams.transformationModel
self.regularization = self.nlinParams.regularization
self.iterations = self.nlinParams.iterations
self.useMask = self.nlinParams.useMask
self.generations = self.nlinParams.generations
def addBlurStage(self, FH, i):
for j in self.blurs[i]:
if j != -1:
tblur = blur(FH, j, gradient=True)
self.p.addStage(tblur)
def regAndResample(self, inputFH, i, filesToAvg, nlinFH):
"""For last generation, override default output names.
Note that the defaultDir specified in the mincANTS call
is ignored in this instance. """
if i == (self.generations -1):
registerOutput, resampleOutput = finalGenerationFileNames(inputFH)
else:
registerOutput = None
resampleOutput = | |
<gh_stars>10-100
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.4
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from nucleus_api.configuration import Configuration
class Model(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'benchmark_id': 'str',
'cash_sec': 'str',
'category': 'str',
'client_id': 'str',
'create_date': 'datetime',
'currency_code': 'str',
'default_drift_factor': 'float',
'description': 'str',
'downside': 'bool',
'drift_rebal': 'bool',
'id': 'str',
'is_active': 'bool',
'metadata': 'dict(str, str)',
'name': 'str',
'node_map': 'list[AllocationNodeMap]',
'period_rebal': 'bool',
'rebalance_period': 'int',
'safe_sec': 'str',
'sec_rotation': 'bool',
'secondary_id': 'str',
'tax_efficiency_id': 'int',
'update_date': 'datetime'
}
attribute_map = {
'benchmark_id': 'benchmark_id',
'cash_sec': 'cash_sec',
'category': 'category',
'client_id': 'client_id',
'create_date': 'create_date',
'currency_code': 'currency_code',
'default_drift_factor': 'default_drift_factor',
'description': 'description',
'downside': 'downside',
'drift_rebal': 'drift_rebal',
'id': 'id',
'is_active': 'is_active',
'metadata': 'metadata',
'name': 'name',
'node_map': 'node_map',
'period_rebal': 'period_rebal',
'rebalance_period': 'rebalance_period',
'safe_sec': 'safe_sec',
'sec_rotation': 'sec_rotation',
'secondary_id': 'secondary_id',
'tax_efficiency_id': 'tax_efficiency_id',
'update_date': 'update_date'
}
def __init__(self, benchmark_id=None, cash_sec=None, category=None, client_id=None, create_date=None, currency_code=None, default_drift_factor=None, description=None, downside=None, drift_rebal=None, id=None, is_active=None, metadata=None, name=None, node_map=None, period_rebal=None, rebalance_period=None, safe_sec=None, sec_rotation=None, secondary_id=None, tax_efficiency_id=None, update_date=None, _configuration=None): # noqa: E501
"""Model - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._benchmark_id = None
self._cash_sec = None
self._category = None
self._client_id = None
self._create_date = None
self._currency_code = None
self._default_drift_factor = None
self._description = None
self._downside = None
self._drift_rebal = None
self._id = None
self._is_active = None
self._metadata = None
self._name = None
self._node_map = None
self._period_rebal = None
self._rebalance_period = None
self._safe_sec = None
self._sec_rotation = None
self._secondary_id = None
self._tax_efficiency_id = None
self._update_date = None
self.discriminator = None
if benchmark_id is not None:
self.benchmark_id = benchmark_id
if cash_sec is not None:
self.cash_sec = cash_sec
if category is not None:
self.category = category
if client_id is not None:
self.client_id = client_id
if create_date is not None:
self.create_date = create_date
if currency_code is not None:
self.currency_code = currency_code
if default_drift_factor is not None:
self.default_drift_factor = default_drift_factor
if description is not None:
self.description = description
if downside is not None:
self.downside = downside
if drift_rebal is not None:
self.drift_rebal = drift_rebal
if id is not None:
self.id = id
if is_active is not None:
self.is_active = is_active
if metadata is not None:
self.metadata = metadata
self.name = name
if node_map is not None:
self.node_map = node_map
if period_rebal is not None:
self.period_rebal = period_rebal
if rebalance_period is not None:
self.rebalance_period = rebalance_period
if safe_sec is not None:
self.safe_sec = safe_sec
if sec_rotation is not None:
self.sec_rotation = sec_rotation
if secondary_id is not None:
self.secondary_id = secondary_id
if tax_efficiency_id is not None:
self.tax_efficiency_id = tax_efficiency_id
if update_date is not None:
self.update_date = update_date
@property
def benchmark_id(self):
"""Gets the benchmark_id of this Model. # noqa: E501
benchmarkId # noqa: E501
:return: The benchmark_id of this Model. # noqa: E501
:rtype: str
"""
return self._benchmark_id
@benchmark_id.setter
def benchmark_id(self, benchmark_id):
"""Sets the benchmark_id of this Model.
benchmarkId # noqa: E501
:param benchmark_id: The benchmark_id of this Model. # noqa: E501
:type: str
"""
self._benchmark_id = benchmark_id
@property
def cash_sec(self):
"""Gets the cash_sec of this Model. # noqa: E501
cashSec # noqa: E501
:return: The cash_sec of this Model. # noqa: E501
:rtype: str
"""
return self._cash_sec
@cash_sec.setter
def cash_sec(self, cash_sec):
"""Sets the cash_sec of this Model.
cashSec # noqa: E501
:param cash_sec: The cash_sec of this Model. # noqa: E501
:type: str
"""
self._cash_sec = cash_sec
@property
def category(self):
"""Gets the category of this Model. # noqa: E501
category # noqa: E501
:return: The category of this Model. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Model.
category # noqa: E501
:param category: The category of this Model. # noqa: E501
:type: str
"""
self._category = category
@property
def client_id(self):
"""Gets the client_id of this Model. # noqa: E501
clientId # noqa: E501
:return: The client_id of this Model. # noqa: E501
:rtype: str
"""
return self._client_id
@client_id.setter
def client_id(self, client_id):
"""Sets the client_id of this Model.
clientId # noqa: E501
:param client_id: The client_id of this Model. # noqa: E501
:type: str
"""
self._client_id = client_id
@property
def create_date(self):
"""Gets the create_date of this Model. # noqa: E501
:return: The create_date of this Model. # noqa: E501
:rtype: datetime
"""
return self._create_date
@create_date.setter
def create_date(self, create_date):
"""Sets the create_date of this Model.
:param create_date: The create_date of this Model. # noqa: E501
:type: datetime
"""
self._create_date = create_date
@property
def currency_code(self):
"""Gets the currency_code of this Model. # noqa: E501
currency_code # noqa: E501
:return: The currency_code of this Model. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this Model.
currency_code # noqa: E501
:param currency_code: The currency_code of this Model. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def default_drift_factor(self):
"""Gets the default_drift_factor of this Model. # noqa: E501
defaultDriftFactor # noqa: E501
:return: The default_drift_factor of this Model. # noqa: E501
:rtype: float
"""
return self._default_drift_factor
@default_drift_factor.setter
def default_drift_factor(self, default_drift_factor):
"""Sets the default_drift_factor of this Model.
defaultDriftFactor # noqa: E501
:param default_drift_factor: The default_drift_factor of this Model. # noqa: E501
:type: float
"""
self._default_drift_factor = default_drift_factor
@property
def description(self):
"""Gets the description of this Model. # noqa: E501
description # noqa: E501
:return: The description of this Model. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Model.
description # noqa: E501
:param description: The description of this Model. # noqa: E501
:type: str
"""
self._description = description
@property
def downside(self):
"""Gets the downside of this Model. # noqa: E501
downside # noqa: E501
:return: The downside of this Model. # noqa: E501
:rtype: bool
"""
return self._downside
@downside.setter
def downside(self, downside):
"""Sets the downside of this Model.
downside # noqa: E501
:param downside: The downside of this Model. # noqa: E501
:type: bool
"""
self._downside = downside
@property
def drift_rebal(self):
"""Gets the drift_rebal of this Model. # noqa: E501
driftRebal # noqa: E501
:return: The drift_rebal of this Model. # noqa: E501
:rtype: bool
"""
return self._drift_rebal
@drift_rebal.setter
def drift_rebal(self, drift_rebal):
"""Sets the drift_rebal of this Model.
driftRebal # noqa: E501
:param drift_rebal: The drift_rebal of this Model. # noqa: E501
:type: bool
"""
self._drift_rebal = drift_rebal
@property
def id(self):
"""Gets the id of this Model. # noqa: E501
:return: The id of this Model. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Model.
:param id: The id of this Model. # noqa: E501
:type: str
"""
self._id = id
@property
def is_active(self):
"""Gets the is_active of this Model. # noqa: E501
isActive # noqa: E501
:return: The is_active of this Model. # noqa: E501
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active):
"""Sets the is_active of this Model.
isActive # noqa: E501
:param is_active: The is_active of this Model. # noqa: E501
:type: bool
"""
self._is_active = is_active
@property
def metadata(self):
"""Gets the metadata of this Model. # noqa: E501
:return: The metadata of this Model. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Model.
:param metadata: The metadata of this Model. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def name(self):
"""Gets the name of this Model. # noqa: E501
name # noqa: E501
:return: The name of this Model. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Model.
name # noqa: E501
:param name: The name of this Model. # noqa: E501
:type: | |
import numpy as np
import math
import pandas as pd
from datetime import date, datetime, timedelta
import yfinance as yf
# from db import connect_to_db
# Create a function to round any number to the smallest multiple of 100
def round_down(x):
return int(math.floor(x / 100.0)) * 100
def rsi(data, column, window=2):
data = data.copy()
# Establish gains and losses for each day
data["Variation"] = data[column].diff()
data = data[1:]
data["Gain"] = np.where(data["Variation"] > 0, data["Variation"], 0)
data["Loss"] = np.where(data["Variation"] < 0, data["Variation"], 0)
# Calculate simple averages so we can initialize the classic averages
simple_avg_gain = data["Gain"].rolling(window).mean()
simple_avg_loss = data["Loss"].abs().rolling(window).mean()
classic_avg_gain = simple_avg_gain.copy()
classic_avg_loss = simple_avg_loss.copy()
for i in range(window, len(classic_avg_gain)):
classic_avg_gain[i] = (
classic_avg_gain[i - 1] * (window - 1) + data["Gain"].iloc[i]
) / window
classic_avg_loss[i] = (
classic_avg_loss[i - 1] * (window - 1) + data["Loss"].abs().iloc[i]
) / window
# Calculate the RSI
RS = classic_avg_gain / classic_avg_loss
RSI = 100 - (100 / (1 + RS))
return RSI
def strategy_points(data, rsi_parameter=None):
rsi_parameter = 30 if rsi_parameter is None else rsi_parameter
data["Target1"] = data["High"].shift(1)
data["Target2"] = data["High"].shift(2)
data["Target"] = data[["Target1", "Target2"]].max(axis=1)
# We don't need them anymore
data.drop(columns=["Target1", "Target2"], inplace=True)
# Define exact buy price
data["Buy Price"] = np.where(data["IFR2"] <= rsi_parameter, data["Close"], np.nan)
# Define exact sell price
data["Sell Price"] = np.where(
data["High"] > data["Target"],
np.where(data["Open"] > data["Target"], data["Open"], data["Target"]),
np.nan,
)
return data
def backtest_algorithm(data, initial_capital=10000):
# List with the total capital after every operation
total_capital = [initial_capital]
# List with profits for every operation. We initialize with 0 so
# both lists have the same size
all_profits = [0]
ongoing = False
shares, entry = 0, 0
for i in range(0, len(data)):
if ongoing == True:
if ~(np.isnan(data["Sell Price"][i])):
# Define exit point and total profit
exit = data["Sell Price"][i]
profit = shares * (exit - entry)
# Append profit to list and create a new entry with the capital
# after the operation is complete
all_profits += [profit]
current_capital = total_capital[
-1
] # current capital is the last entry in the list
total_capital += [current_capital + profit]
ongoing = False
else:
if ~(np.isnan(data["Buy Price"][i])):
entry = data["Buy Price"][i]
shares = round_down(initial_capital / entry)
ongoing = True
return all_profits, total_capital
def get_drawdown(data, column="Adj Close"):
data["Max"] = data[column].cummax()
data["Delta"] = data["Max"] - data[column]
data["Drawdown"] = 100 * (data["Delta"] / data["Max"])
max_drawdown = data["Drawdown"].max()
return max_drawdown
def get_beta(asset, benchmark):
asset_change = asset.pct_change()[1:]
bench_change = benchmark.pct_change()[1:]
corr = asset_change.corr(bench_change)
std_asset = asset_change.std()
std_bench = bench_change.std()
beta = corr * (std_asset / std_bench)
return beta, corr, std_asset, std_bench
def strategy_test(all_profits, total_capital):
gains = sum(x > 0 for x in all_profits)
losses = sum(x < 0 for x in all_profits)
num_operations = gains + losses
pct_gains = 100 * (gains / num_operations)
pct_losses = 100 - pct_gains
total_profit = sum(all_profits)
# The first value entry in total_capital is the initial capital
pct_profit = (total_profit / total_capital[0]) * 100
# Compute drawdown
total_capital = pd.DataFrame(data=total_capital, columns=["total_capital"])
drawdown = get_drawdown(data=total_capital, column="total_capital")
return {
"num_operations": int(num_operations),
"gains": int(gains),
"pct_gains": pct_gains.round(),
"losses": int(losses),
"pct_losses": pct_losses.round(),
"total_profit": total_profit,
"pct_profit": pct_profit,
"drawdown": drawdown,
}
def get_tickers(portfolio_name):
# engine = connect_to_db()
# tickers = pd.read_sql(
# f"""
# SELECT asset.yf_symbol
# FROM asset
# INNER JOIN asset_portfolio
# ON asset.id = asset_portfolio.asset_id
# INNER JOIN portfolio
# ON portfolio.id = asset_portfolio.portfolio_id
# WHERE portfolio.name = '{portfolio_name}';
# """,
# engine)
# tickers = list(tickers["yf_symbol"])
tickers = [
"ABEV3.SA",
"ASAI3.SA",
"AZUL4.SA",
"BTOW3.SA",
"B3SA3.SA",
"BIDI11.SA",
"BBSE3.SA",
"BRML3.SA",
"BBDC3.SA",
"BBDC4.SA",
"BRAP4.SA",
"BBAS3.SA",
"BRKM5.SA",
"BRFS3.SA",
"BPAC11.SA",
"CRFB3.SA",
"CCRO3.SA",
"CMIG4.SA",
"HGTX3.SA",
"CIEL3.SA",
"COGN3.SA",
"CPLE6.SA",
"CSAN3.SA",
"CPFE3.SA",
"CVCB3.SA",
"CYRE3.SA",
"ECOR3.SA",
"ELET3.SA",
"ELET6.SA",
"EMBR3.SA",
"ENBR3.SA",
"ENGI11.SA",
"ENEV3.SA",
"EGIE3.SA",
"EQTL3.SA",
"EZTC3.SA",
"FLRY3.SA",
"GGBR4.SA",
"GOAU4.SA",
"GOLL4.SA",
"NTCO3.SA",
"HAPV3.SA",
"HYPE3.SA",
"IGTA3.SA",
"GNDI3.SA",
"IRBR3.SA",
"ITSA4.SA",
"ITUB4.SA",
"JBSS3.SA",
"JHSF3.SA",
"KLBN11.SA",
"RENT3.SA",
"LCAM3.SA",
"LWSA3.SA",
"LAME4.SA",
"LREN3.SA",
"MGLU3.SA",
"MRFG3.SA",
"BEEF3.SA",
"MRVE3.SA",
"MULT3.SA",
"PCAR3.SA",
"PETR3.SA",
"PETR4.SA",
"BRDT3.SA",
"PRIO3.SA",
"QUAL3.SA",
"RADL3.SA",
"RAIL3.SA",
"SBSP3.SA",
"SANB11.SA",
"CSNA3.SA",
"SULA11.SA",
"SUZB3.SA",
"TAEE11.SA",
"VIVT3.SA",
"TIMS3.SA",
"TOTS3.SA",
"UGPA3.SA",
"USIM5.SA",
"VALE3.SA",
"VVAR3.SA",
"WEGE3.SA",
"YDUQ3.SA",
]
return tickers
def below_bands(data, k=2, n=20):
std = data.rolling(n).std()
middle_band = data.rolling(n).mean()
lower_band = middle_band - k * std
return data < lower_band
def above_bands(data, k=2, n=20):
std = data.rolling(n).std()
middle_band = data.rolling(n).mean()
upper_band = middle_band + k * std
return data > upper_band
def bb(data, k=2, n=20):
std = data["Adj Close"].rolling(n).std()
data["Middle Band"] = data["Adj Close"].rolling(n).mean()
data["Upper Band"] = data["Middle Band"] + std * k
data["Lower Band"] = data["Middle Band"] - std * k
return data
def position_relative_to_bands(asset_name, data, k=2, n=20):
if below_bands(data, k, n)[-1]:
return f"{asset_name} está abaixo das Bandas de Bollinger"
elif above_bands(data, k, n)[-1]:
return f"{asset_name} está acima das Bandas de Bollinger"
else:
return f"{asset_name} está dentro das Bandas de Bollinger"
def stochastic(df, k_window=8, mma_window=3):
df = df.copy()
n_highest_high = df["High"].rolling(k_window).max()
n_lowest_low = df["Low"].rolling(k_window).min()
df["%K"] = (
(df["Adj Close"] - n_lowest_low) / (n_highest_high - n_lowest_low)
) * 100
df["%D"] = df["%K"].rolling(mma_window).mean()
df["Slow %K"] = df["%D"]
df["Slow %D"] = df["Slow %K"].rolling(mma_window).mean()
return df
def get_data(tickers, columns, start, end):
df = yf.download(tickers, start=start, end=end).copy()[columns]
return df
def get_interval(start_delta, end_delta=1):
start = (datetime.today() - timedelta(days=start_delta)).strftime("%Y-%m-%d")
end = (date.today() + timedelta(days=end_delta)).strftime("%Y-%m-%d")
return start, end
def get_rsi_info(df, tickers):
df.columns = [" ".join(col).strip() for col in df.columns.values]
all_rsi = {}
for ticker in tickers:
new_df = df[["Open " + ticker, "High " + ticker, "Adj Close " + ticker]].rename(
columns={
"Open " + ticker: "Open",
"High " + ticker: "High",
"Adj Close " + ticker: "Adj Close",
}
)
new_df.dropna(inplace=True)
rsi_value = int(round(rsi(new_df, "Adj Close", 2)[-1]))
max_today = new_df["High"][-1]
max_1_day_ago = new_df["High"][-2]
# Target is the max value of today and yesterday. This is because the operation
# starts at the end of the current day, and all possible sells are in the next day
# Therefore, tomorrow the last two days will be today and yesterday
target = max(max_today, max_1_day_ago)
price = new_df["Adj Close"][-1]
upside = ((target - price) / price) * 100
# Variation of the last 100 days
interval = 100
start_date = (datetime.today() - timedelta(days=interval)).strftime("%Y-%m-%d")
first_date = new_df.index[new_df.index >= start_date][0]
index = new_df.index.get_loc(first_date)
initial_price = new_df.iloc[index]["Adj Close"]
variation = ((price - initial_price) / initial_price) * 100
# Figure out if MM50 is up
mm50 = new_df["Adj Close"].rolling(50).mean()
mm50_today = mm50[-1]
mm50_prev = mm50[-2]
mm50_is_up = 1 if mm50_today > mm50_prev else 0
all_rsi[ticker.replace(".SA", "")] = {
"rsi": rsi_value,
"target": target.round(2),
"price": price.round(2),
"upside": upside.round(2),
"mm50_is_up": mm50_is_up,
"variation": variation.round(2),
}
return all_rsi
def get_stochastic_info(df, tickers):
df.columns = [" ".join(col).strip() for col in df.columns.values]
all_stochastic = {}
for ticker in tickers:
new_df = df[["High " + ticker, "Low " + ticker, "Adj Close " + ticker]].rename(
columns={
"High " + ticker: "High",
"Low " + ticker: "Low",
"Adj Close " + ticker: "Adj Close",
}
)
new_df.dropna(inplace=True)
new_df = stochastic(new_df)
# current price
price = new_df["Adj Close"][-1]
# Variation of the last 100 days
interval = 100
start_date = (datetime.today() - timedelta(days=interval)).strftime("%Y-%m-%d")
first_date = new_df.index[new_df.index >= start_date][0]
index = new_df.index.get_loc(first_date)
initial_price = new_df.iloc[index]["Adj Close"]
variation = ((price - initial_price) / initial_price) * 100
# Figure out if slow K is up
k_today = new_df["Slow %K"][-1]
k_prev = new_df["Slow %K"][-2]
k_is_up = 1 if k_today > k_prev else 0
# Figure out if slow K crossed above or under D
d_today = new_df["Slow %D"][-1]
d_prev = new_df["Slow %D"][-2]
k_crossed_above = 1 if (k_prev < d_prev) & (k_today > d_today) else 0
k_crossed_below = 1 if (k_prev > d_prev) & (k_today < d_today) else 0
# Figure out if MME80 is up
mme80 = new_df["Adj Close"].ewm(span=80).mean()
mme80_today = mme80[-1]
mme80_prev = mme80[-2]
mme80_is_up = 1 if mme80_today > mme80_prev else 0
all_stochastic[ticker.replace(".SA", "")] = {
"k": int(round(k_today)),
"d": int(round(d_today)),
"price": price.round(2),
"variation": variation.round(2),
"k_is_up": k_is_up,
"k_crossed_above": k_crossed_above,
"k_crossed_below": k_crossed_below,
"mme80_is_up": mme80_is_up,
}
return all_stochastic
def get_beta_info(df, tickers, ibov):
df.columns = [" ".join(col).strip() for col in df.columns.values]
all_beta = {}
for ticker in tickers:
new_df = df["Adj Close " + ticker]
new_df.dropna(inplace=True)
beta, corr, std_asset, std_bench = get_beta(new_df, ibov)
all_beta[ticker.replace(".SA", "")] = {
"beta": round(beta, 2),
"corr": round(corr, 2),
"std_asset": round(std_asset, 4),
"std_bench": | |
-1,
}
},
'camera_settings': {
'azimuth': 180,
'distance': 0.3,
'elevation': -50,
'lookat': np.array([0.02, 0.004, 0.09]),
},
}
SLIDE_BEADS_VISION_KWARGS = {
'pixel_wrapper_kwargs': {
'pixels_only': False,
'normalize': False,
'render_kwargs': {
'width': 32,
'height': 32,
'camera_id': -1,
},
},
'camera_settings': {
'azimuth': 90,
'distance': 0.37,
'elevation': -45,
'lookat': (0, 0.046, -0.016),
},
}
ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK_VISION = {
'gym': {
'Point2D': {
'Maze-v0': {
'action_scale': 0.5,
'images_are_rgb': True,
# === Use environment's count-based reward ===
'reward_type': 'none',
'use_count_reward': False,
'n_bins': tune.grid_search([50]), # Number of bins to discretize the space with
# === EASY ===
# 'wall_shape': 'easy-maze',
# 'init_pos_range': ((-2.5, -2.5), (-2.5, -2.5)),
# 'target_pos_range': ((2.5, -2.5), (2.5, -2.5)),
# === MEDIUM ===
'wall_shape': 'medium-maze',
'init_pos_range': ((-3, -3), (-3, -3)),
'target_pos_range': ((3, 3), (3, 3)),
# === HARD ===
# 'wall_shape': 'hard-maze',
# 'init_pos_range': ((-3, -3), (-3, -3)),
# 'target_pos_range': ((-0.5, 1.25), (-0.5, 1.25)),
# === HORIZONTAL (3 walls) ===
# 'wall_shape': 'horizontal-maze',
# 'init_pos_range': ((-3, -3), (-3, -3)),
# 'target_pos_range': ((-3, 3), (-3, 3)),
'render_onscreen': False,
'observation_keys': ('pixels', 'state_observation'),
'convert_obs_to_image': True,
'show_goal': False,
'ball_pixel_radius': 1,
'pixel_wrapper_kwargs': {
'render_kwargs': {
'mode': 'rgb_array',
'width': 48,
'height': 48,
'invert_colors': True,
},
'pixels_only': False,
'normalize': False,
},
},
},
'StateSawyer': {
'PickAndPlace3DEnv-v0': {
'observation_keys': ('pixels', 'state', ),
'pixel_wrapper_kwargs': {
'render_kwargs': {
'mode': 'rgb_array',
'width': 28,
'height': 28,
},
'pixels_only': False,
'normalize': False,
},
},
},
'Image48Sawyer': {
'PickAndPlace3DEnv-v0': {
'observation_keys': ('pixels',),
'pixel_wrapper_kwargs': {
'render_kwargs': {
'mode': 'rgb_array',
'width': 48,
'height': 48,
},
'pixels_only': False,
'normalize': False,
},
},
},
'DClaw': {
# === FIXED SCREW RANDOM RESET EVAL TASK BELOW ===
'TurnFixed-v0': {
**FIXED_SCREW_VISION_KWARGS,
# 'init_pos_range': (-np.pi, np.pi), # Random reset between -pi, pi
# Reset to every 45 degrees between -pi and pi
'init_pos_range': list(np.arange(-np.pi, np.pi, np.pi / 4)),
# === GOAL = -90 DEGREES ===
# Single goal + RND reset controller
'target_pos_range': [-np.pi / 2, -np.pi / 2],
# 2 goal + no RND reset controller
# 'target_pos_range': [-np.pi / 2, np.pi / 2],
# 1 goal + no RND reset controller
# 'target_pos_range': [-np.pi / 2],
'observation_keys': (
'pixels',
'<KEY>',
'last_action',
# == BELOW JUST FOR LOGGING ==
'object_angle_cos',
'object_angle_sin',
),
},
# === FIXED SCREW RESET FREE TASK BELOW ===
'TurnResetFree-v0': {
**FIXED_SCREW_VISION_KWARGS,
'reset_fingers': True,
'init_pos_range': (0, 0),
# Single goal + RND reset controller
'target_pos_range': [-np.pi / 2, -np.pi / 2],
# 2 goal + no RND reset controller
# 'target_pos_range': [-np.pi / 2, np.pi / 2],
# 1 goal + no RND reset controller
# 'target_pos_range': [-np.pi / 2],
'observation_keys': (
'claw_qpos',
'pixels',
'last_action',
# === BELOW JUST FOR LOGGING ===
'object_angle_cos',
'object_angle_sin',
),
},
'TurnFreeValve3Fixed-v0': {
'observation_keys': (
'claw_qpos',
'last_action',
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
),
'init_qpos_range': ((0, 0, 0, 0, 0, 0), ) * 2,
'target_qpos_range': ((0, 0, 0, 0, 0, np.pi), ) * 2,
},
# Random evaluation environment for free screw
# 'TurnFreeValve3Fixed-v0': {
# **FREE_SCREW_VISION_KWARGS,
# # Random init evaluations
# # 'init_qpos_range': (
# # (-0.08, -0.08, 0, 0, 0, -np.pi),
# # (0.08, 0.08, 0, 0, 0, np.pi)
# # ),
# # Evaluations from fixed set of inits
# 'init_qpos_range': [
# (0, 0, 0, 0, 0, 0),
# (0, 0, 0, 0, 0, -np.pi),
# (0, 0, 0, 0, 0, -np.pi / 2),
# (0, 0, 0, 0, 0, np.pi / 2),
# (-0.05, 0.075, 0, 0, 0, -np.pi),
# (-0.075, 0.05, 0, 0, 0, -np.pi / 2),
# (-0.05, 0.05, 0, 0, 0, -3 * np.pi / 4),
# (-0.07, 0.07, 0, 0, 0, np.pi / 4),
# (0, 0.075, 0, 0, 0, -np.pi),
# (0.05, 0.075, 0, 0, 0, -np.pi),
# (0.075, 0.05, 0, 0, 0, np.pi / 2),
# (0.05, 0.05, 0, 0, 0, 3 * np.pi / 4),
# (0.07, 0.07, 0, 0, 0, -np.pi / 4),
# (-0.05, -0.075, 0, 0, 0, 0),
# (-0.075, -0.05, 0, 0, 0, -np.pi / 2),
# (-0.05, -0.05, 0, 0, 0, -np.pi / 4),
# (-0.07, -0.07, 0, 0, 0, 3 * np.pi / 4),
# (0, -0.075, 0, 0, 0, 0),
# (0.05, -0.075, 0, 0, 0, 0),
# (0.075, -0.05, 0, 0, 0, np.pi / 2),
# (0.05, -0.05, 0, 0, 0, np.pi / 4),
# (0.07, -0.07, 0, 0, 0, -3 * np.pi / 4),
# (-0.075, 0, 0, 0, 0, -np.pi / 2),
# (0.075, 0, 0, 0, 0, np.pi / 2),
# ],
# 'cycle_inits': True,
# # 1 goal for RND reset controller
# # 'target_qpos_range': [
# # (0, 0, 0, 0, 0, -np.pi / 2),
# # (0, 0, 0, 0, 0, -np.pi / 2),
# # ],
# # 2 goal, no RND reset controller
# # 'target_qpos_range': [
# # (0, 0, 0, 0, 0, -np.pi / 2),
# # (0, 0, 0, 0, 0, np.pi / 2),
# # ],
# # 2 goals
# 'target_qpos_range': [
# # (top left, center)
# # (-0.05, -0.05, 0, 0, 0, -np.pi / 2),
# # (0, 0, 0, 0, 0, np.pi / 2),
# # bottom right, top right
# (0.075, 0.075, 0, 0, 0, -np.pi),
# (-0.075, 0.075, 0, 0, 0, -np.pi)
# ],
# 'observation_keys': (
# 'pixels',
# 'claw_qpos',
# 'last_action',
# # === BELOW IS JUST FOR LOGGING ===
# 'object_xy_position',
# 'object_z_orientation_cos',
# 'object_z_orientation_sin',
# ),
# },
'TurnFreeValve3ResetFree-v0': {
**FREE_SCREW_VISION_KWARGS,
'init_qpos_range': [(0, 0, 0, 0, 0, 0)],
# Below needs to be 2 for a MultiVICEGAN run, since the goals switch
# Single goal + RND reset controller
# 'target_qpos_range': [
# (0, 0, 0, 0, 0, -np.pi / 2),
# (0, 0, 0, 0, 0, -np.pi / 2), # Second goal is arbitrary
# ],
# 2 goal, no RND reset controller
# 'target_qpos_range': [
# (0, 0, 0, 0, 0, -np.pi / 2),
# (0, 0, 0, 0, 0, np.pi / 2),
# ],
# 2 goals
'target_qpos_range': [
# (top left, center)
# (-0.05, -0.05, 0, 0, 0, -np.pi / 2),
# (0, 0, 0, 0, 0, np.pi / 2),
# bottom right, top right
(0.075, 0.075, 0, 0, 0, -np.pi),
(-0.075, 0.075, 0, 0, 0, -np.pi)
],
'swap_goal_upon_completion': False,
'observation_keys': (
'pixels',
'claw_qpos',
'last_action',
# === BELOW IS JUST FOR LOGGING ===
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
),
},
# === FREE SCREW HARDWARE ===
'TurnFreeValve3Hardware-v0': {
'pixel_wrapper_kwargs': {
'pixels_only': False,
'normalize': False,
'render_kwargs': {
'width': 32,
'height': 32,
'camera_id': -1,
'box_warp': True,
}
},
'observation_keys': (
'<KEY>',
'pixels',
'last_action',
),
'device_path': '/dev/ttyUSB0',
'camera_config': {
'topic': '/kinect2_001161563647/qhd/image_color',
'image_shape': (256, 256, 3),
}
},
'TurnFreeValve3ResetFreeSwapGoal-v0': {
**FREE_SCREW_VISION_KWARGS,
'reset_fingers': True,
'reset_frequency': 0,
'goals': [
(0, 0, 0, 0, 0, np.pi / 2),
(0, 0, 0, 0, 0, -np.pi / 2),
],
'observation_keys': (
'<KEY>',
'last_action',
'target_xy_position',
'target_z_orientation_cos',
'target_z_orientation_sin',
'goal_index',
'pixels',
# === BELOW IS JUST FOR LOGGING ===
'object_xy_position',
'object_orientation_cos',
'object_orientation_sin',
),
},
'TurnFreeValve3ResetFreeSwapGoalEval-v0': {
**FREE_SCREW_VISION_KWARGS,
'goals': [
(0, 0, 0, 0, 0, np.pi / 2),
(0, 0, 0, 0, 0, -np.pi / 2),
],
'observation_keys': (
'<KEY>',
'last_action',
'target_xy_position',
'target_z_orientation_cos',
'target_z_orientation_sin',
'goal_index',
'pixels',
# === BELOW IS JUST FOR LOGGING ===
'object_xy_position',
'object_orientation_cos',
'object_orientation_sin',
),
},
'LiftDDFixed-v0': {
'reward_keys_and_weights': {
'object_to_target_z_position_distance_reward': 1,
'object_to_target_xy_position_distance_reward': 0,
'object_to_target_orientation_distance_reward': 0, #tune.sample_from([1, 5]), #5,
},
'target_qpos_range': [(0, 0, 0.05, 0, 0, 0)],
'pixel_wrapper_kwargs': {
'observation_key': 'pixels',
'pixels_only': False,
'render_kwargs': {
'width': 32,
'height': 32,
},
},
'observation_keys': (
'claw_qpos',
'object_position',
'object_quaternion',
'last_action',
'target_position',
'target_quaternion',
'pixels',
),
'camera_settings': {
'azimuth': 180,
'distance': 0.26,
'elevation': -40,
'lookat': (0, 0, 0.06),
}
},
'LiftDDResetFree-v0': {
'reward_keys_and_weights': {
'object_to_target_z_position_distance_reward': 1,
'object_to_target_xy_position_distance_reward': 0,
'object_to_target_orientation_distance_reward': 0, #tune.sample_from([1, 5]), #5,
},
# 'target_qpos_range': (
# (-0.1, -0.1, 0.0, 0, 0, 0),
# (0.1, 0.1, 0.0, 0, 0, 0), # bgreen side up
# ),
'target_qpos_range': [(0, 0, 0.05, 0, 0, 0)],
'pixel_wrapper_kwargs': {
'observation_key': 'pixels',
'pixels_only': False,
'render_kwargs': {
'width': 32,
'height': 32,
},
},
'observation_keys': (
'claw_qpos',
'object_position',
'object_quaternion',
'last_action',
| |
from copy import deepcopy
from unittest import TestCase
from django.test import TransactionTestCase
from django.core.files.storage import default_storage
from django.test import override_settings
from botocore.exceptions import NoCredentialsError
import datetime
from dateutil.parser import parse
from djconnectwise import models
from djconnectwise.utils import get_hash
from djconnectwise.sync import InvalidObjectException
from . import fixtures
from . import fixture_utils
from . import mocks
from .. import sync
from ..sync import log_sync_job
class AssertSyncMixin:
def assert_sync_job(self):
qset = \
models.SyncJob.objects.filter(
entity_name=self.model_class.__bases__[0].__name__
)
assert qset.exists()
class SynchronizerTestMixin(AssertSyncMixin):
synchronizer_class = None
model_class = None
fixture = None
def call_api(self, return_data):
raise NotImplementedError
def _assert_fields(self, instance, json_data):
raise NotImplementedError
def _sync(self, return_data):
_, get_patch = self.call_api(return_data)
self.synchronizer = self.synchronizer_class()
self.synchronizer.sync()
return _, get_patch
def _sync_with_results(self, return_data):
_, get_patch = self.call_api(return_data)
self.synchronizer = self.synchronizer_class()
self.synchronizer.sync()
return self.synchronizer.sync()
def test_sync(self):
self._sync(self.fixture)
instance_dict = {c['id']: c for c in self.fixture}
for instance in self.model_class.objects.all():
json_data = instance_dict[instance.id]
self._assert_fields(instance, json_data)
self.assert_sync_job()
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'Some New Name'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
name = 'Some New Name'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestBatchConditionMixin(TestCase):
def test_get_optimal_size(self):
synchronizer = sync.BatchConditionMixin()
size = synchronizer.get_optimal_size([31, 35, 43, 52, 58])
self.assertEqual(size, 5)
sync.MAX_URL_LENGTH = 310
sync.MIN_URL_LENGTH = 305
size = synchronizer.get_optimal_size(
[1, 2, 3, 43434, 54562, 54568, 65643]
)
self.assertEqual(size, 3)
size = synchronizer.get_optimal_size(
[442434, 53462, 552468, 63443]
)
self.assertEqual(size, 1)
size = synchronizer.get_optimal_size([1])
self.assertEqual(size, 1)
size = synchronizer.get_optimal_size([])
self.assertIsNone(size)
class TestTerritorySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TerritorySynchronizer
model_class = models.TerritoryTracker
fixture = fixtures.API_SYSTEM_TERRITORY_LIST
def setUp(self):
super().setUp()
fixture_utils.init_territories()
def call_api(self, return_data):
return mocks.system_api_get_territories_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A Different Territory'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestCompanySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanySynchronizer
model_class = models.CompanyTracker
fixture = fixtures.API_COMPANY_LIST
def setUp(self):
fixture_utils.init_territories()
mocks.company_api_get_company_statuses_call(
fixtures.API_COMPANY_STATUS_LIST)
sync.CompanyStatusSynchronizer().sync()
fixture_utils.init_company_types()
def call_api(self, return_data):
return mocks.company_api_get_call(return_data)
def _assert_fields(self, company, api_company):
self.assertEqual(company.name, api_company['name'])
self.assertEqual(company.identifier, api_company['identifier'])
self.assertEqual(company.phone_number, api_company['phoneNumber'])
self.assertEqual(company.fax_number, api_company['faxNumber'])
self.assertEqual(company.address_line1, api_company['addressLine1'])
self.assertEqual(company.address_line2, api_company['addressLine1'])
self.assertEqual(company.city, api_company['city'])
self.assertEqual(company.state_identifier, api_company['state'])
self.assertEqual(company.zip, api_company['zip'])
self.assertEqual(company.status.id, api_company['status']['id'])
self.assertEqual(
company.company_types.first().id, api_company['typeIds'][0])
class TestCompanyStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanyStatusSynchronizer
model_class = models.CompanyStatusTracker
fixture = fixtures.API_COMPANY_STATUS_LIST
def call_api(self, return_data):
return mocks.company_api_get_company_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.notify_flag, json_data['notifyFlag'])
self.assertEqual(instance.dissalow_saving_flag,
json_data['disallowSavingFlag'])
self.assertEqual(instance.notification_message,
json_data['notificationMessage'])
self.assertEqual(instance.custom_note_flag,
json_data['customNoteFlag'])
self.assertEqual(instance.cancel_open_tracks_flag,
json_data['cancelOpenTracksFlag'])
class TestTimeEntrySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TimeEntrySynchronizer
model_class = models.TimeEntryTracker
fixture = fixtures.API_TIME_ENTRY_LIST
def setUp(self):
super().setUp()
fixture_utils.init_board_statuses()
fixture_utils.init_tickets()
fixture_utils.init_time_entries()
def call_api(self, return_data):
return mocks.time_api_get_time_entries_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
start = '2003-10-06T14:48:18Z'
new_json = deepcopy(self.fixture[0])
new_json["timeStart"] = start
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.time_start,
start)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
start = '2003-10-06T14:48:18Z'
new_json = deepcopy(self.fixture[0])
new_json["timeStart"] = start
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = self._sync_with_results(
new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.charge_to_id.id, json_data['chargeToId'])
self.assertEqual(instance.charge_to_type, json_data['chargeToType'])
self.assertEqual(instance.time_start, parse(json_data['timeStart']))
self.assertEqual(instance.time_end, parse(json_data['timeEnd']))
self.assertEqual(instance.actual_hours, json_data['actualHours'])
self.assertEqual(instance.billable_option, json_data['billableOption'])
self.assertEqual(instance.notes, json_data['notes'])
class TestCompanyTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanyTypeSynchronizer
model_class = models.CompanyTypeTracker
fixture = fixtures.API_COMPANY_TYPES_LIST
def call_api(self, return_data):
return mocks.company_api_get_company_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.vendor_flag, json_data['vendorFlag'])
class TestScheduleEntriesSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleEntriesSynchronizer
model_class = models.ScheduleEntryTracker
fixture = fixtures.API_SCHEDULE_ENTRIES
def setUp(self):
super().setUp()
fixture_utils.init_boards()
fixture_utils.init_territories()
fixture_utils.init_companies()
fixture_utils.init_project_statuses()
fixture_utils.init_projects()
fixture_utils.init_locations()
fixture_utils.init_priorities()
fixture_utils.init_members()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunity_statuses()
fixture_utils.init_opportunity_types()
fixture_utils.init_opportunities()
fixture_utils.init_teams()
fixture_utils.init_board_statuses()
fixture_utils.init_schedule_statuses()
fixture_utils.init_schedule_types()
fixture_utils.init_tickets()
fixture_utils.init_activities()
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_entries_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = '<NAME>'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name,
name)
self._assert_fields(changed, new_json)
def test_schedule_object_assignment(self):
self._sync(self.fixture)
json_data = self.fixture[0]
schedule_entry = self.model_class.objects.get(id=json_data['id'])
self.assertEqual(schedule_entry.schedule_type.identifier, "S")
json_data = self.fixture[1]
schedule_entry = self.model_class.objects.get(id=json_data['id'])
self.assertEqual(schedule_entry.schedule_type.identifier, "C")
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.done_flag, json_data['doneFlag'])
self.assertEqual(instance.date_start,
parse(json_data['dateStart']))
self.assertEqual(instance.date_end,
parse(json_data['dateEnd']))
# verify referenced objects
if instance.activity_object is not None:
self.assertEqual(instance.activity_object.id,
json_data['objectId'])
if instance.ticket_object is not None:
self.assertEqual(instance.ticket_object.id, json_data['objectId'])
self.assertEqual(instance.where.id, json_data['where']['id'])
self.assertEqual(instance.member.id, json_data['member']['id'])
self.assertEqual(instance.status.id, json_data['status']['id'])
self.assertEqual(instance.schedule_type.id, json_data['type']['id'])
class TestScheduleTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleTypeSynchronizer
model_class = models.ScheduleTypeTracker
fixture = fixtures.API_SCHEDULE_TYPE_LIST
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestScheduleStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleStatusSynchronizer
model_class = models.ScheduleStatusTracker
fixture = fixtures.API_SCHEDULE_STATUS_LIST
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestProjectStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectStatusSynchronizer
model_class = models.ProjectStatusTracker
fixture = fixtures.API_PROJECT_STATUSES
def call_api(self, return_data):
return mocks.projects_api_get_project_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
class TestProjectTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectTypeSynchronizer
model_class = models.ProjectTypeTracker
fixture = fixtures.API_PROJECT_TYPES
def call_api(self, return_data):
return mocks.projects_api_get_project_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestProjectPhaseSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectPhaseSynchronizer
model_class = models.ProjectPhaseTracker
fixture = fixtures.API_PROJECT_PHASE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_projects()
fixture_utils.init_project_phases()
def call_api(self, return_data):
return mocks.projects_api_get_project_phases_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.description, json_data['description'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.notes, json_data['notes'])
self.assertEqual(instance.scheduled_hours, json_data['scheduledHours'])
self.assertEqual(instance.actual_hours, json_data['actualHours'])
self.assertEqual(instance.budget_hours, json_data['budgetHours'])
self.assertEqual(instance.project_id, json_data['projectId'])
self.assertEqual(
instance.scheduled_start, parse(json_data['scheduledStart']).date()
)
self.assertEqual(
instance.scheduled_end, parse(json_data['scheduledEnd']).date()
)
self.assertEqual(
instance.actual_start, parse(json_data['actualStart']).date()
)
self.assertEqual(
instance.actual_end, parse(json_data['actualEnd']).date()
)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.description, description)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestProjectSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectSynchronizer
model_class = models.ProjectTracker
fixture = fixtures.API_PROJECT_LIST
def setUp(self):
super().setUp()
fixture_utils.init_project_statuses()
fixture_utils.init_members()
def call_api(self, return_data):
return mocks.project_api_get_projects_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.manager_id, json_data['manager']['id'])
self.assertAlmostEqual(
float(instance.actual_hours),
json_data['actualHours']
)
self.assertAlmostEqual(
float(instance.budget_hours),
json_data['budgetHours']
)
self.assertAlmostEqual(
float(instance.scheduled_hours),
json_data['scheduledHours']
)
self.assertEqual(
instance.actual_start, parse(json_data['actualStart']).date()
)
self.assertEqual(
instance.actual_end, parse(json_data['actualEnd']).date()
)
self.assertEqual(
instance.estimated_start, parse(json_data['estimatedStart']).date()
)
self.assertEqual(
instance.estimated_end, parse(json_data['estimatedEnd']).date()
)
class TestTeamSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TeamSynchronizer
model_class = models.TeamTracker
fixture = fixtures.API_SERVICE_TEAM_LIST
def call_api(self, return_data):
return mocks.service_api_get_teams_call(return_data)
def setUp(self):
fixture_utils.init_boards()
def _assert_fields(self, team, team_json):
ids = set([t.id for t in team.members.all()])
self.assertEqual(team.id, team_json['id'])
self.assertEqual(team.name, team_json['name'])
self.assertEqual(team.board.id, team_json['boardId'])
self.assertTrue(ids < set(team_json['members']))
class TestPrioritySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.PrioritySynchronizer
model_class = models.TicketPriorityTracker
fixture = fixtures.API_SERVICE_PRIORITY_LIST
def _assert_fields(self, priority, api_priority):
assert priority.name == api_priority['name']
assert priority.id == api_priority['id']
if 'color' in api_priority.keys():
assert priority.color == api_priority['color']
else:
assert priority.color in self.valid_prio_colors
if 'sortOrder' in api_priority.keys():
assert priority.sort == api_priority['sortOrder']
else:
assert priority.sort is None
def setUp(self):
self.synchronizer = sync.PrioritySynchronizer()
self.valid_prio_colors = \
list(models.TicketPriority.DEFAULT_COLORS.values()) + \
[models.TicketPriority.DEFAULT_COLOR]
def call_api(self, return_data):
return mocks.service_api_get_priorities_call(return_data)
class TestLocationSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.LocationSynchronizer
model_class = models.LocationTracker
fixture = fixtures.API_SERVICE_LOCATION_LIST
def _assert_fields(self, location, api_location):
self.assertEqual(location.name, api_location['name'])
self.assertEqual(location.id, api_location['id'])
self.assertEqual(location.where, api_location['where'])
def setUp(self):
self.synchronizer = sync.LocationSynchronizer()
def call_api(self, return_data):
return mocks.service_api_get_locations_call(return_data)
class TestBoardSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.BoardSynchronizer
model_class = models.ConnectWiseBoardTracker
fixture = fixtures.API_BOARD_LIST
def call_api(self, return_data):
return mocks.service_api_get_boards_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.inactive, json_data['inactiveFlag'])
self.assertEqual(instance.work_role.name,
json_data['workRole']['name'])
self.assertEqual(instance.work_type.name,
json_data['workType']['name'])
def setUp(self):
super().setUp()
fixture_utils.init_work_roles()
fixture_utils.init_work_types()
fixture_utils.init_boards()
class TestBoardStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.BoardStatusSynchronizer
model_class = models.BoardStatusTracker
fixture = fixtures.API_BOARD_STATUS_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.sort_order, json_data['sortOrder'])
self.assertEqual(instance.display_on_board,
json_data['displayOnBoard'])
self.assertEqual(instance.inactive, json_data['inactive'])
self.assertEqual(instance.closed_status, json_data['closedStatus'])
def setUp(self):
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.service_api_get_statuses_call(return_data)
class TestServiceNoteSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ServiceNoteSynchronizer
model_class = models.ServiceNoteTracker
fixture = fixtures.API_SERVICE_NOTE_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.ticket.id, json_data['ticketId'])
self.assertEqual(instance.text, json_data['text'])
self.assertEqual(instance.detail_description_flag,
json_data['detailDescriptionFlag'])
self.assertEqual(instance.internal_analysis_flag,
json_data['internalAnalysisFlag'])
self.assertEqual(instance.resolution_flag, json_data['resolutionFlag'])
self.assertEqual(instance.member.identifier,
json_data['member']['identifier'])
self.assertEqual(instance.date_created,
parse(json_data['dateCreated']))
self.assertEqual(instance.created_by, json_data['createdBy'])
self.assertEqual(instance.internal_flag, json_data['internalFlag'])
self.assertEqual(instance.external_flag, json_data['externalFlag'])
def setUp(self):
super().setUp()
fixture_utils.init_service_notes()
fixture_utils.init_tickets()
def call_api(self, return_data):
return mocks.service_api_get_notes_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
flag = False
new_json = deepcopy(self.fixture[0])
new_json['detailDescriptionFlag'] = flag
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.detail_description_flag,
flag)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
new_json = deepcopy(self.fixture[0])
new_json['detailDescriptionFlag'] = False
new_json_list = [new_json]
# Sync it twice to be sure that the data | |
== 'pyneb':
#
# #Calculate density using pyneb
# den = self.density_pyneb(Ion, Temp)
#
# #Check for non physical output #WARNING: For pyneb this is too general
# den = self.check_issues(magnitude = den, parameter_type = 'density')
#
# return den
#
# def temperature_determination(self, Ion, dens = None, methodology = None, Temp = None, RecCorr = None):
#
# #Direct methods
# if methodology == 'Epm2014':
#
# if Ion == 'O3':
#
# emLine = ["O3_4959A","O3_5007A","O3_4363A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TOIII')
#
# if Ion == 'O2':
# data_dict = {'Temp': Temp}
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TOII_approx_TOIII')
#
# if Ion == 'S3':
#
# emLine = ["S3_9069A","S3_9531A","S3_6312A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TSIII')
#
# if Ion == 'N2':
#
# emLine = ["N2_6548A","N2_6584A", "N2_5755A", 'H1_4861A']
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# if RecCorr != None:
# if data_dict["N2_5755A"] != None:
# data_dict["N2_5755A"] = data_dict["N2_5755A"] - RecCorr * data_dict['H1_4861A']
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TNII')
#
# if methodology == 'Haegele2008':
#
# if Ion == 'S2':
#
# emLine = ["S2_6716A","S2_6731A", "S2_4069A", 'S2_4076A']
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TSII')
#
# if Ion == 'O2':
#
# emLine = ['O2_3726A',"O2_3729A","O2_7319A",'O2_7330A']
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# temp = self.empiric_formulae(data_dict, methodology, Ion, 'TOII')
#
#
# #Using pyneb
# if methodology == 'pyneb':
#
# temp = self.temperature_pyneb(Ion, dens)
#
# return temp
#
# def argon_IonAbundance(self, Temp, Den, Ion, methodology = 'Haegele2008'):
#
# if methodology == 'Haegele2008':
#
# if Ion == 'Ar3':
#
# emLine = ["Ar3_7136A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['ArIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'ArIII_HII')
# print 'The argon thing III', self.Properties_dict['ArIII_HII']
# return
#
# if Ion == 'Ar4':
#
# emLine = ["Ar4_4740A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['ArIV_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'ArIV_HII')
# print 'The argon thing IV', self.Properties_dict['ArIV_HII']
# return
#
# def oxygen_IonAbundance(self, Temp, Den, Ion, methodology = 'Epm2014', RecCorr = None):
#
# if methodology == 'Epm2014':
#
# if Ion == 'O3':
#
# emLine = ["O3_4959A","O3_5007A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['OIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'OIII_HII')
#
# return
#
# if Ion == 'O2':
# #This assumes both lines are blended
# emLine = ["O2_3726A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta, Mode='Integrated')
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['OII_HII_3279A'] = self.empiric_formulae(data_dict, methodology, Ion, 'OII_HII')
#
# if methodology == 'Fabian2006':
#
# if Ion == 'O2':
#
# emLine = ["O2_7319A","O2_7330A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
#
# if RecCorr == None:
# self.Properties_dict['OII_HII_7319A'] = self.empiric_formulae(data_dict, methodology, Ion, 'OII_HII_7319A')
# else:
# data_dict["O2_7319A"] = data_dict["O2_7319A"] - RecCorr
# self.Properties_dict['OII_HII_7319A'] = self.empiric_formulae(data_dict, methodology, Ion, 'OII_HII_7319A')
#
# return
#
# def nitrogen_IonAbundance(self, Temp, Den, Ion, methodology = 'Epm2014'):
#
# if methodology == 'Epm2014':
#
# if Ion == 'N2':
#
# emLine = ["N2_6548A","N2_6584A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['NII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'NII_HII')
#
# return
#
# def sulfur_IonAbundance(self, Temp, Den, Ion, methodology = 'Haegele2008'):
#
# if methodology == 'Haegele2008':
#
# if Ion == 'S3':
#
# emLine = ["S3_9069A","S3_9531A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['SIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'SIII_HII')
#
# return
#
# if Ion == 'S2':
#
# emLine = ["S2_6716A","S2_6731A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['SII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'SII_HII')
#
# if methodology == 'Vital2015':
# if Ion == 'S3':
# emLine = ["S3_9069A","S3_9531A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['SIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'SIII_HII')
#
# return
#
# if Ion == 'S2':
# methodology = 'Haegele2008'
# emLine = ["S2_6716A","S2_6731A","H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['SII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'SII_HII')
#
#
# return
#
# def helium_IonAbundance(self, Temp, Den, Ion, methodology = 'Fabian2006'):
#
# if methodology == 'Fabian2006':
#
# if Ion == 'He1':
# emLine = ["He1_4472A", "He1_5876A", "He1_6678A", "H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['HeII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'HeII_HII')
#
# return
#
# if Ion == 'He2':
# emLine = ["He2_4686A", "H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['HeIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'HeIII_HII')
#
# return
#
# if methodology == 'Vital2015':
#
# if Ion == 'He1':
# emLine = ["He1_4472A", "He1_5876A", "He1_6678A", "H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['HeII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'HeII_HII')
#
# if Ion == 'He2':
# emLine = ["He2_4686A", "H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'] = Temp
# self.Properties_dict['HeIII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'HeIII_HII')
#
# if methodology == 'PyNeb':
#
# if Ion == 'He1':
# emLine = ["He1_4472A", "He1_5876A", "He1_6678A", "H1_4861A"]
# data_dict = self.getLinesFlux_dered(Lines_List = emLine, cHbeta = self.cHbeta)
# data_dict['Temp'], data_dict['Den'] = Temp, Den
# self.Properties_dict['HeII_HII'] = self.empiric_formulae(data_dict, methodology, Ion, 'HeII_HII')
#
# return
#
# if Ion == 'He2':
#
# return
#
# def empiric_formulae(self, data_dict, methodology, Ion, Parameter):
#
# Magnitude = None
# if data_dict != None:
# if (None not in data_dict.values()):
# if methodology == 'Epm2014':
# if Parameter == 'nSII':
#
# self.Properties_dict['R_SII'] = data_dict['S2_6716A'] / data_dict['S2_6731A']
# t4 = data_dict['Temp'] / 10000.0
# a_0 = 16.054 - 7.79 / t4 - 11.32 * t4
# a_1 = -22.66 + 11.08 / t4 + 16.02 * t4
# b_0 = -21.61 + 11.89 / t4 + 14.59 * t4
# b_1 = 9.17 - 5.09 / t4 - 6.18 * t4
# Magnitude = 1000.0 * (self.Properties_dict['R_SII']*a_0 + a_1) / (self.Properties_dict['R_SII']*b_0 + b_1)
# #self.Properties_dict['nSII'] = 1000.0 * (self.Properties_dict['R_SII']*a_0 + a_1) / (self.Properties_dict['R_SII']*b_0 + b_1)
#
# elif Parameter == 'TOIII':
# self.Properties_dict['R_OIII'] = (data_dict['O3_4959A'] + data_dict['O3_5007A']) / data_dict['O3_4363A']
# self.Properties_dict['TOIII'] = (0.7840 - 0.0001357 * self.Properties_dict['R_OIII'] + (48.44 / self.Properties_dict['R_OIII'])) * 10000
# Magnitude = self.Properties_dict['TOIII']
#
# elif Parameter == 'TOII_approx_TOIII':
# t4 = data_dict['Temp'] / 10000
# self.Properties_dict['TOII_approx_TOIII'] = (1.397 /( (1/t4) + 0.385) ) * 10000
# Magnitude = self.Properties_dict['TOII_approx_TOIII']
#
# elif Parameter == 'TSIII_approx_TOIII':
# #From equation TOIII = 1.0807 * TSIII - 0.0846
# t4 = data_dict['Temp'] / 10000
# self.Properties_dict['TSIII_approx_TOIII'] = ((t4 + 0.0846) / 1.0807)*10000
#
# elif Parameter == 'TOIII_approx_TSIII':
# t4 = data_dict['Temp'] / 10000
# self.Properties_dict['TOIII_approx_TSIII'] = (1.0807 * t4 - 0.0846)*10000
# Magnitude = self.Properties_dict['TOIII_approx_TSIII']
#
# elif Parameter == 'TSIII':
# self.Properties_dict['R_SIII'] = (data_dict['S3_9069A'] + data_dict['S3_9531A']) / data_dict['S3_6312A']
# self.Properties_dict['TSIII'] = (0.5147 + 0.0003187 * self.Properties_dict['R_SIII'] + (23.6404 / self.Properties_dict['R_SIII'])) * 10000
# Magnitude = self.Properties_dict['TSIII']
#
# elif Parameter == 'TNII':
# self.Properties_dict['R_NII'] = (data_dict['N2_6548A'] + data_dict['N2_6584A']) / data_dict['N2_5755A']
# self.Properties_dict['TNII'] = (0.6153 - 0.0001529 * self.Properties_dict['R_NII'] + (35.3641 / self.Properties_dict['R_NII'])) * 10000
# Magnitude = self.Properties_dict['TNII']
#
# elif Parameter == 'TNII_approx_TOIII':
# t4 = data_dict['Temp'] / 10000
# self.Properties_dict['TNII_approx_TOIII'] = (1.452 /( (1/t4) + 0.479))*10000
# Magnitude = self.Properties_dict['TNII_approx_TOIII']
#
# elif Parameter == 'OIII_HII':
# t4 = data_dict['Temp'] / 10000
# logOIII_logHII = -12 + umath.log10((data_dict['O3_4959A'] + data_dict['O3_5007A']) / data_dict['H1_4861A']) + 6.1868 + 1.2491 / t4 - 0.5816 * umath.log10(t4)
# Magnitude = umath.pow(10, logOIII_logHII)
#
# elif Parameter == 'OII_HII_3279A':
# t4 = data_dict['Temp'] / 10000
# ne = data_dict['Den']
# logOII_logHII = -12 + umath.log10((data_dict['O2_3726A']) / data_dict['H1_4861A']) + 5.887 + 1.641 / t4 - 0.543 * umath.log10(t4) + 0.000114 * ne
# Magnitude = umath.pow(10, logOII_logHII)
#
# elif Parameter == 'NII_HII':
# t4 = data_dict['Temp'] / 10000
# logNII_logHII = -12 + umath.log10((data_dict['N2_6548A'] + data_dict['N2_6584A']) / data_dict['H1_4861A']) + 6.291 + 0.90221 / t4 - 0.5511 * umath.log10(t4)
# Magnitude = umath.pow(10, logNII_logHII)
#
# elif methodology == 'Angeles2015':
#
# if Parameter == 'SIV_HII':
#
# ArIII_HII = data_dict['ArIII_HII']
# ArIV_HII = data_dict['ArIV_HII']
# SIII_HII = data_dict['SIII_HII']
#
# if (ArIII_HII != None) and (ArIV_HII != None) and (ArIV_HII != 0.0): #Somehow ArIV should be saved as None
# logAr2Ar3 = umath.log10(ArIII_HII/ArIV_HII)
#
# else:
# logAr2Ar3 = ufloat(0.0, 0.0)
#
# logSIV = umath.log10(SIII_HII) - (logAr2Ar3 - self.n_SIV_correction) / self.m_SIV_correction
#
#
# self.Properties_dict['SIV_HII'] = umath.pow(10, logSIV)
# Magnitude = self.Properties_dict['SIV_HII']
#
# # Old formulation
# # ArIII_HII = data_dict['ArIII_HII']
# # ArIV_HII = data_dict['ArIV_HII']
# # SIII_HII = data_dict['SIII_HII']
# #
# # | |
dtype=np.float32)
# wangpeng modify 20200601
# snow_ref_bt11um[ref_lat > 40] = ref_bt11um[ref_lat > 40] + 5.
snow_ref_bt11um[ref_lat_abs >= 40] = ref_bt11um[ref_lat_abs >= 40] + 5.
idx_ = np.logical_and(ref_lat_abs >= 20, ref_lat_abs < 40)
snow_ref_bt11um[idx_] = ref_bt11um[idx_] + 18. - ref_dem[idx_] / 800.
idx_1 = np.logical_and.reduce(
(idx_land, judge, rr_46 > 3.1, tbb_31 > snow_ref_bt11um, tbb_31 <= 278))
idx_ = np.logical_and(idx_1, ref_lat_abs >= 20)
i_mark[idx_] = 200
i_step[idx_] = 51
i_tag[idx_] = 2
# judge[idx_] = False
del idx_
# wangpeng modfiy 20200629
# idx_ = np.logical_and(idx_1, ~(ref_lat_abs > 20))
idx_ = np.logical_and(idx_1, ref_lat_abs < 20)
i_mark[idx_] = 50
i_step[idx_] = 52
i_tag[idx_] = 2
judge[idx_] = False
del idx_1
# !!!! TESTING For SNOW ON LAND
# !!!! SNOW-4
if no_none((dr_16, ref_06, tbb_31, rr_46, dt_02, ref_02, ref_bt11um)):
idx_ = np.logical_and.reduce((idx_land, judge, dr_16 > 10, ref_06 < 19.5, tbb_31 < 276.15,
rr_46 > 1.5, 2.45 < dt_02, dt_02 < 15, ref_02 > 26,
tbb_31 > ref_bt11um + 5.0))
i_mark[idx_] = 200
i_step[idx_] = 53
i_tag[idx_] = 2
del idx_
# !!!! TESTING For SNOW ON LAND
# !!!! SNOW-5
if no_none((ndsi_6, ref_bt11um, tbb_31)):
idx_ = np.logical_and.reduce(
(idx_land, judge, ndsi_6 > 0.52, tbb_31 > ref_bt11um + 2, tbb_31 < 278))
i_mark[idx_] = 200
i_step[idx_] = 54
i_tag[idx_] = 2
del idx_
if no_none((ndsi_6, tbb_31, ndvis, ref_02)):
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 > 0.12, ndsi_6 < 0.52, tbb_31 > ref_bt11um,
tbb_31 < 276.15, ndvis > 0.16, ref_02 > 26))
i_mark[idx_] = 200
i_step[idx_] = 55
i_tag[idx_] = 2
del idx_
# !!!! TESTING For SNOW ON LAND
# !!!! Eliminate_Snow-1
# !!!------------------------------------------------------------------------!!!
# !!!! IceCloud_Overlay_WaterCloud_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
if no_none((ref_dem, ref_06, tbb_31, ref_26)):
# wangpeng modify 20200629 add judge
idx_ = np.logical_and.reduce((judge, i_mark == 200, ref_dem <= 3000, ndsi_6 >= 0.38, ref_06 >= 10, ref_06 <= 25,
ref_26 >= 0.01, ref_26 <= 55, tbb_31 >= 235, tbb_31 <= 275))
ice_cloud_sums = np.zeros(data_shape, dtype=np.int8)
idx_1 = np.logical_and(idx_, np.isfinite(ref_06))
y_r138_x_r164_first = y_r138_x_r164[0, 0]
# print("sss", y_r138_x_r164.shape)
# print("4444", ref_26[row, col] * 100, y_r138_x_r164[
# (np.round(ref_06[row, col] * 10) - y_r138_x_r164_first).astype(np.int32), 1])
idx_2 = ref_26[idx_1] * 100 > y_r138_x_r164[
(np.round(ref_06[idx_1] * 10) - y_r138_x_r164_first).astype(np.int32), 1]
ary_2 = np.zeros(len(idx_2), dtype=np.int8)
ary_2[idx_2] = 1
ice_cloud_sums[idx_1] += ary_2
y_t11_m_t12_x_r164_first = y_t11_m_t12_x_r164[0, 0]
idx_2 = dt_12[idx_1] * 100. > y_t11_m_t12_x_r164[
(np.round(ref_06[idx_1] * 10) - y_t11_m_t12_x_r164_first).astype(np.int32), 1]
ary_2 = np.zeros(len(idx_2), dtype=np.int8)
ary_2[idx_2] = 1
ice_cloud_sums[idx_1] += ary_2
# ice_cloud_sums[idx_1][idx_2] += 1
idx_1 = np.logical_and(idx_, np.isfinite(tbb_31))
y_r164_x_t11_first = y_r164_x_t11[0, 0]
idx_2 = ref_06[idx_1] * 100 > y_r164_x_t11[
(np.round(tbb_31[idx_1] * 10) - y_r164_x_t11_first).astype(np.int32), 1]
ary_2 = np.zeros(len(idx_2), dtype=np.int8)
ary_2[idx_2] = 1
ice_cloud_sums[idx_1] += ary_2
# ice_cloud_sums[idx_1][idx_2] += 1
# print("5555", ref_06[row, col] * 100, y_r164_x_t11[
# (np.round(tbb_31[row, col] * 10) - y_r164_x_t11_first).astype(np.int32), 1])
idx_1 = np.logical_and(idx_, np.isfinite(ref_26))
y_r164_x_r138_first = y_r164_x_r138[0, 0]
idx_2 = ref_06[idx_1] * 100 > y_r164_x_r138[
(np.round(ref_26[idx_1] * 10) - y_r164_x_r138_first).astype(np.int32), 1]
ary_2 = np.zeros(len(idx_2), dtype=np.int8)
ary_2[idx_2] = 1
ice_cloud_sums[idx_1] += ary_2
# ice_cloud_sums[idx_1][idx_2] += 1
# print("6666", ref_06[row, col] * 100, y_r164_x_r138[
# (np.round(ref_26[row, col] * 10) - y_r164_x_r138_first).astype(np.int32), 1])
#
# print("7777", dt_12[row, col] * 100, y_t11_m_t12_x_r164[
# (np.round(ref_06[row, col] * 10) - y_t11_m_t12_x_r164_first).astype(np.int32), 1])
idx_ = ice_cloud_sums >= 2
print(ice_cloud_sums[row, col])
print(np.unique(ice_cloud_sums))
i_mark[idx_] = 50
i_step[idx_] = 56
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For SNOW ON LAND
# !!!! Eliminate_Snow-2
# !!!------------------------------------------------------------------------!!!
# !!!! Monthly_SnowPackLine_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
if no_none((ref_lon, ref_lat)):
_condition = np.logical_or(i_mark == 200, i_mark == 100)
i_nor_s = np.zeros(data_shape, dtype=np.int8)
i_nor_s[ref_lat > 0] = 1
_condition2 = np.abs(r_mon_snow_line[np.round((ref_lon + 180) * 10).astype(np.int16),
int(j_month - 1), i_nor_s]) > abs(ref_lat)
idx_ = np.logical_and.reduce(
(idx_land, judge, _condition, _condition2))
i_mark[idx_] = 50
i_step[idx_] = 57
i_tag[idx_] = 2
judge[idx_] = False
del _condition2, idx_
idx_ = np.logical_and.reduce((idx_land, judge, i_mark == 200))
judge[idx_] = False
del _condition, idx_
# !!!! TESTING For CLOUD
if no_none((ref_26, tbb_31, ref_bt11um, ref_01, ref_06)):
# wangpeng modify 20200629
# idx_ = np.logical_and.reduce((ref_06 > 29, ref_01 > 24,
# np.logical_or(ref_26 > 13.5,
# np.logical_and(ref_26 > 7.5, tbb_31 < ref_bt11um + 8))))
idx_ = np.logical_and.reduce((judge, ref_06 > 29, ref_01 > 24,
np.logical_or(ref_26 > 13.5,
np.logical_and(ref_26 > 7.5, tbb_31 < ref_bt11um + 8))))
i_mark[idx_] = 50
i_step[idx_] = 58
i_tag[idx_] = 2
# judge[idx_] = False
del idx_
# !!!! Mending TEST For Clear Land # 原代码也注释了这段代码
# if no_none((ndvis, tbb_31, dr_16, ndsi_6)):
# idx_ = np.logical_and(ndvis > 0.11, tbb_31 < 280)
# idx_ = np.logical_or.reduce((idx_, dr_16 < 0, ndsi_6 < -0.15))
# idx_ = np.logical_and.reduce((idx_land, judge, idx_))
# i_mark[idx_] = 25
# i_step[idx_] = 59
# i_tag[idx_] = 2
# judge[idx_] = False
if no_none((ndvis, tbb_31)):
idx_ = np.logical_and.reduce(
(idx_land, judge, ndvis > 0.11, tbb_31 < 280))
# wangpeng modify 20200628
# i_mark[idx_] = 50
i_mark[idx_] = 25
i_step[idx_] = 60
i_tag[idx_] = 2
judge[idx_] = False
if no_none((dr_16,)):
idx_ = np.logical_and.reduce((idx_land, judge, dr_16 < 0))
i_mark[idx_] = 25
i_step[idx_] = 61
i_tag[idx_] = 2
judge[idx_] = False
if no_none((ndsi_6,)):
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 < -0.15))
i_mark[idx_] = 25
i_step[idx_] = 62
i_tag[idx_] = 2
judge[idx_] = False
# !!!! Mending TEST For Clear Land and Cloud by Hai-T11
if no_none((tbb_31, rr_46)):
idx_ = np.logical_and(tbb_31 > 280, rr_46 < 1.35)
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 66
i_tag[idx_] = 2
judge[idx_] = False
if no_none((tbb_31, ref_dem, ref_01, ref_06, tbb_20, rr_46)):
# wangpeng modify 20200629
# idx_ = np.logical_and.reduce((tbb_31 < 280, ref_dem >= 3000, ref_01 >= 40, ref_06 < 20,
# tbb_20 < 295, rr_46 > 1.3))
idx_ = np.logical_and.reduce((tbb_31 <= 280, ref_dem >= 3300, ref_01 >= 40, ref_06 < 20,
tbb_20 <= 295, rr_46 > 1.3))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 200
i_step[idx_] = 67
i_tag[idx_] = 2
judge[idx_] = False
if no_none((tbb_31, rr_46, ref_02)):
idx_ = np.logical_and.reduce((tbb_31 < 280, rr_46 < 1.4, ref_02 < 28))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 68
i_tag[idx_] = 2
judge[idx_] = False
if no_none((tbb_31, rr_46, ref_02)):
idx_ = np.logical_and.reduce((tbb_31 < 280, rr_46 < 1.4, ref_02 >= 28))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 50
i_step[idx_] = 69
i_tag[idx_] = 2
judge[idx_] = False
# !!!! UNKNOWN TYPE
idx_ = np.logical_and.reduce((idx_land, judge))
i_mark[idx_] = 1
i_step[idx_] = 99
i_tag[idx_] = 2
judge[idx_] = False
# !!!------------------------------------------------------------------------!!!
# !!!! Monthly_SnowPackLine_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
# # !!!! Take Snow-on-Ice Pixel above Water-body as ICE
idx_ = np.logical_and.reduce((idx_available, lsm == 0, i_mark == 200))
i_mark[idx_] = 100
# i_step[idx_] = 71
# del idx_
# idx_1 = np.logical_and.reduce((idx_available, lsm == 0, i_mark == 1))
# if ref_02 is not None:
# idx_ = np.logical_and(idx_1, ref_02 < 18)
# i_mark[idx_] = 39
# i_step[idx_] = 72
# idx_ = np.logical_and(idx_1, ref_02 > 19)
# i_mark[idx_] = 50
# i_step[idx_] = 73
# if ref_26 is not None:
# idx_ = np.logical_and(idx_1, ref_26 > 1.5)
# i_mark[idx_] = 50
# i_step[idx_] = 74
#
# idx_1 = np.logical_and.reduce((idx_available, lsm == 1, i_mark == 1))
# judge = np.full(data_shape, True, dtype=np.bool)
#
# if no_none((ndsi_6, tbb_31, dt_01)):
# idx_ = np.logical_and.reduce(
# (idx_1, ndsi_6 > 0.27, tbb_31 < 273.15, dt_01 > 2.45, dt_01 < 14.10))
# i_mark[idx_] = 200
# i_step[idx_] = 75
# i_tag[idx_] = 2
# judge[idx_] = False
#
# if ref_02 is not None:
# idx_ = np.logical_and.reduce((idx_1, judge, ref_02 > 9.1, ref_02 < 26))
# i_mark[idx_] = 25
# i_step[idx_] = 76
#
# idx_ = np.logical_and.reduce((idx_1, judge, ref_02 > 1.1, ref_02 < 8))
# i_mark[idx_] = 25
# i_step[idx_] = 77
#
# idx_ = np.logical_and.reduce((idx_1, judge, ref_02 > 46))
# i_mark[idx_] = 50
# i_step[idx_] = 78
#
# if ref_26 is not None:
# idx_ = np.logical_and.reduce((idx_1, judge, ref_26 > 10))
# i_mark[idx_] = 50
# i_step[idx_] = 79
# del idx_, idx_1, judge
# i_available[judge == 0] = 0
# !!!==========================================================================!!!
# !
# ! SE by Tree-Decision Algorithm after CM
# !
# !!!--------------------------------------------------------------------------!!!
# !!!! Value = 0 : Invalid
# !!!! Value = 1 : Coastlines
# !!!! Value = 2 : Uncertain
# !!!! Value = 3 : Cloud
# !!!! Value = 4 : Poss Land Clear
# !!!! Value = 5 : Land Clear
# !!!! Value = | |
a blueprint for a given node
Parameters
----------
bp_id
(str) - ID of AOS blueprint
node_id
(str) - ID of node within AOS blueprint for which to retrieve
rendered configuration
config_type
(str) - type of configuration to retrieve. Options are
"deployed" (default), "staging", "operation"
Returns
-------
(dict) - dictionary containing the rendered config as a key value
"""
return self.rest.json_resp_get(
f"/api/blueprints/{bp_id}/nodes/{node_id}/"
f"config-rendering?type={config_type}"
)
# Interface maps
def assign_interface_maps_raw(self, bp_id: str, assignments: dict):
"""
Assign interface maps to blueprint system nodes.
Parameters
----------
bp_id
(str) ID of blueprint
assignments
(dict) mapping of blueprint system node IDs and global interface
maps.
{
"assignments": {'bp-node-id': 'Cumulus_VX__AOS-7x10-Spine',
'bp-node-id': 'Arista_vEOS__AOS-7x10-Leaf'}
Returns
-------
"""
im_path = f"/api/blueprints/{bp_id}/interface-map-assignments"
self.rest.patch(uri=im_path, data=assignments)
def assign_interface_map_by_name(
self, bp_id: str, node_names: list, im_name: str
):
"""
Assign interface map to one or more blueprint system nodes
based on system node name.
Parameters
----------
bp_id
(str) ID of blueprint
node_names
(list) Blueprint system node names. Must match
eg ['spine1', 'spine2']
im_name
(str) interface map name to assign to system node
Returns
-------
"""
bp_nodes = self.get_bp_system_nodes(bp_id)
assignment = dict()
for node in node_names:
for value in bp_nodes.values():
if value["label"] == node:
assignment[value["id"]] = im_name
data = {"assignments": assignment}
self.assign_interface_maps_raw(bp_id=bp_id, assignments=data)
return data
# Connectivity Templates
def get_connectivity_templates_all(self, bp_id: str) -> dict:
r_path = f"/api/blueprints/{bp_id}/obj-policy-export"
return self.rest.json_resp_get(r_path)
def get_connectivity_template(self, bp_id: str, ct_id: str) -> dict:
r_path = f"/api/blueprints/{bp_id}/obj-policy-export/{ct_id}"
return self.rest.json_resp_get(r_path)
def find_connectivity_template_by_name(self, bp_id: str, ct_name: str) -> dict:
cts = self.get_connectivity_templates_all(bp_id)
for ct in cts["policies"]:
if ct_name in ct["label"] and ct["policy_type_name"] == "batch":
return ct
return {}
def create_connectivity_template_from_json(
self, bp_id: str, data: dict
) -> Optional[response]:
ct_path = f"/api/blueprints/{bp_id}/obj-policy-import"
return self.rest.put(ct_path, data=data)
def update_connectivity_template(
self, bp_id: str, data: dict
) -> Optional[response]:
ct_path = f"/api/blueprints/{bp_id}/obj-policy-batch-apply"
return self.rest.patch(ct_path, data=data)
def delete_connectivity_template(
self, bp_id: str, ct_id: str
) -> Optional[response]:
r_path = f"/api/blueprints/{bp_id}/policies/{ct_id}"
params = {"delete_recursive": True}
self.rest.delete(r_path, params=params)
def get_endpoint_policy(self, bp_id: str, policy_id: str) -> dict:
p_path = f"/api/blueprints/{bp_id}/endpoint-policies/{policy_id}"
return self.rest.json_resp_get(p_path)
def get_endpoint_policies(self, bp_id: str, ptype: str = "staging") -> Dict:
"""
Retrieve existing endpoint policies for a given blueprint
Parameters
----------
bp_id
(str) - ID of AOS blueprint
ptype
(str) - (optional) type parameter, defaults to "staging"
Returns
-------
(dict) - endpoint policies
"""
return self.rest.json_resp_get(
f"/api/blueprints/{bp_id}/experience/web/endpoint-policies?type={ptype}"
)
def get_endpoint_policy_app_points(
self, bp_id: str, policy_id: str = None
) -> dict:
p_path = f"/api/blueprints/{bp_id}/obj-policy-application-points"
params = {"policy": policy_id}
return self.rest.json_resp_get(p_path, params=params)
def get_routing_policies(self, bp_id: str, bp_type="staging") -> Dict:
"""
Retrieve existing routing policies for a given blueprint
Parameters
----------
bp_id
(str) - ID of AOS blueprint
bp_type
(str) - (optional) type parameter, defaults to "staging"
Returns
-------
(dict) - routing policies
"""
return self.rest.json_resp_get(
f"/api/blueprints/{bp_id}/routing-policies?type={bp_type}"
)
# External Routers
def get_external_routers_all(self, bp_id: str):
"""
Returns all external routers imported into a given blueprint
Parameters
----------
bp_id
(str) ID of blueprint
Returns
-------
"""
r_path = f"/api/blueprints/{bp_id}/external-routers"
return self.rest.json_resp_get(r_path)["items"]
def get_external_router(self, bp_id: str, bp_rtr_id: str):
"""
Returns given external router node based on external router id
Parameters
----------
bp_id
(str) ID of blueprint
bp_rtr_id
(str) Blueprint node ID of external router
Returns
-------
"""
r_path = f"/api/blueprints/{bp_id}/external-routers/{bp_rtr_id}"
return self.rest.json_resp_get(r_path)
def find_external_router_by_name(self, bp_id: str, rtr_name: str):
"""
Returns all external routers imported into a blueprint matching the
given name (label).
Parameters
----------
bp_id
(str) ID of blueprint
rtr_name
(str) ID of blueprint
Returns
-------
(list)
"""
return [
i
for i in self.get_external_routers_all(bp_id)
if i["display_name"] == rtr_name
]
def get_external_router_links(self, bp_id: str):
"""
Returns all links available for a given external router for fabric
connectivity
Parameters
----------
bp_id
(str) ID of blueprint
Returns
-------
"""
rl_path = f"/api/blueprints/{bp_id}/external-router-links"
links = self.rest.json_resp_get(rl_path)
return links["links"]
def apply_external_router(
self,
bp_id: str,
ext_rtr_id: str = None,
ext_rtr_name: str = None,
connectivity_type: str = "l3",
links: list = None,
):
"""
Assigns a given external router to a blueprint and configures
the fabric connectivity type required for peering with the external
router.
ext_rtr_id or ex_rtr_name required
Parameters
----------
bp_id
(str) ID of blueprint
ext_rtr_id
(str) Optional - Blueprint node ID of external router
ext_rtr_name
(str) Optional - Name of external router
connectivity_type
(str) connectivity type for fabric connections to external router
['l3', 'l2', 'bond']
links
(list) Optional - List of links to apply to external router connectivity
Returns
-------
"""
rtr_path = f"/api/blueprints/{bp_id}/external-routers"
if ext_rtr_name:
external_router = AosExternalRouter(self.rest)
ext_rtr = external_router.find_by_name(rtr_name=ext_rtr_name)
if not ext_rtr:
raise AosAPIResourceNotFound(
f"Unable to find external router " f"with name {ext_rtr_name}"
)
ext_rtr_id = ext_rtr[0].id
rtr_data = {"router_id": ext_rtr_id}
bp_rtr_id = self.rest.json_resp_post(rtr_path, data=rtr_data)["id"]
if not links:
links = list(self.get_external_router_links(bp_id))
r_link_body = {"connectivity_type": connectivity_type, "links": list(links)}
r_link_path = f"/api/blueprints/{bp_id}/external-router-links/{bp_rtr_id}"
self.rest.put(r_link_path, data=r_link_body)
return bp_rtr_id
def delete_external_router(self, bp_id: str, bp_rtr_id: str):
r_path = f"/api/blueprints/{bp_id}/external-routers/{bp_rtr_id}"
self.rest.delete(r_path)
def create_ext_generic_systems(
self,
bp_id: str,
hostname: str,
asn: str = None,
loopback_ip: str = None,
tags: list = None,
):
"""
Creates external-generic blueprint node for external router usage in
configuration templates
Parameters
----------
bp_id
(str) ID of blueprint
hostname
(str) Name assigned to the node and device as hostname
asn
(str) ASN number assigned to external router for BGP peering
with the AOS managed fabric
loopback_ip
(str) IPv4 address assigned to the external-router for bgp
loopback peering.
example: "10.10.11.11/32"
tags:
(list) Blueprint tags associated with the node.
Returns
-------
dict
"""
n_path = f"/api/blueprints/{bp_id}/external-generic-systems"
data = {"hostname": hostname, "label": hostname, "tags": tags}
ext_rtr = self.rest.json_resp_post(n_path, data=data)
n_asn_path = f"/api/blueprints/{bp_id}/systems/{ext_rtr['id']}/domain"
n_lo_path = f"/api/blueprints/{bp_id}/systems/{ext_rtr['id']}/loopback/0"
self.rest.patch(n_asn_path, data={"domain_id": asn})
self.rest.patch(n_lo_path, data={"ipv4_addr": loopback_ip})
return self.get_bp_node_by_id(bp_id, ext_rtr["id"])
def delete_external_generic_system(
self, bp_id: str, node_id: str
) -> Optional[response]:
r_path = f"/api/blueprints/{bp_id}/external-generic-systems/{node_id}"
self.rest.delete(r_path)
# IBA probes and dashboards
def get_all_probes(self, bp_id: str):
"""
Return all IBA probes for a given blueprint
Parameters
----------
bp_id
(str) ID of blueprint
Returns
-------
(obj) json object
"""
p_path = f"/api/blueprints/{bp_id}/probes"
return self.rest.json_resp_get(p_path)
def get_predefined_probes(self, bp_id: str):
"""
Return all IBA predefined probes for a given blueprint
Parameters
----------
bp_id
(str) ID of blueprint
Returns
-------
(obj) json object
"""
p_path = f"/api/blueprints/{bp_id}/iba/predefined-probes"
return self.rest.json_resp_get(p_path)
def get_probe(self, bp_id: str, probe_id: str = None, probe_name: str = None):
"""
Return IBA probe for a given blueprint by ID or name
Parameters
----------
bp_id
(str) ID of blueprint
probe_id
(str) ID of IBA probe
probe_name
(str) name of IBA probe
Returns
-------
(obj) json object
"""
probes = self.get_all_probes(bp_id=bp_id)
if probes:
if probe_name:
for p in probes["items"]:
if p["label"] == probe_name:
return p
raise AosAPIError(f"IBA Probe {probe_name} not found")
if probe_id:
for p in probes["items"]:
if p["id"] == probe_id:
return p
raise AosAPIError(f"IBA Probe {probe_id} not found")
# Security Zones
def get_all_security_zones(self, bp_id: str) -> List[SecurityZone]:
"""
Return all security-zones (VRFs) in a given blueprint
Parameters
----------
bp_id
(str) - ID of AOS Blueprint
Returns
-------
[SecurityZone]
"""
sec_zones = self.rest.json_resp_get(
f"/api/blueprints/{bp_id}/security-zones"
)["items"]
return [SecurityZone.from_json(sz) for sz in sec_zones.values()]
def get_security_zone(self, bp_id, sz_id) -> SecurityZone:
"""
Return security-zone (VRF) in a given blueprint based on ID.
Parameters
----------
bp_id
(str) - ID of AOS blueprint
sz_id
(str) - ID of security-zone
Returns
-------
SecurityZone
"""
return SecurityZone.from_json(
self.rest.json_resp_get(
f"/api/blueprints/{bp_id}/security-zones/{sz_id}"
)
)
def find_sz_by_name(self, bp_id: str, name: str) -> Optional[SecurityZone]:
"""
Returns security-zones (VRF) in a given blueprint based on name.
Parameters
----------
bp_id
(str) - ID of AOS blueprint
name
(str) - ID of security-zone
Returns
-------
SecurityZone
"""
for sz in self.get_all_security_zones(bp_id):
if sz.vrf_name == name:
return sz
def create_security_zone_from_json(
self, bp_id: str, payload: dict, params: dict = None
):
"""
Create a security-zone in the given blueprint using a
preformatted json object.
Parameters
----------
bp_id
(str) - ID of AOS blueprint
payload
(str) - json object for payload
params
(dict) - supported endpoint paramaters. Most common is
{'async': 'full'} which returns created task ID for tracking
Returns
-------
(obj) - security-zone ID
"""
sz_path = f"/api/blueprints/{bp_id}/security-zones"
return self.rest.json_resp_post(uri=sz_path, data=payload, params=params)
def apply_security_zone_dhcp(self, bp_id: str, sz_id: str, dhcp_servers: dict):
self.rest.put(
uri=f"/api/blueprints/{bp_id}/security-zones/{sz_id}/dhcp-servers",
data=dhcp_servers,
)
return self.get_security_zone(bp_id, sz_id)
def create_security_zone(
self,
bp_id: str,
name: str,
routing_policy: dict = None,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.