input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
considered as first day of
pandemic in given voivodeship, based od percentage of counties in which
died at least one pearson.
+ day number since 04-03-2020, which is considered as first day of
pandemic in given voivodeship, based od percentage of counties in which
died at least one pearson.
+ day number since 04-03-2020, which is considered as first day of
pandemic in given voivodeship, based od percentage of counties in which
at least one person fell ill.
+ death tolls correlated to both dates.
"""
@classmethod
def __show_and_save(cls, fig, plot_type, plot_name, save, show,
file_format='pdf'):
"""Function that shows and saves figures.
Parameters:
:param fig: figure to be shown/saved,
:type fig: matplotlib figure,
:param plot_type: general description of a plot type, each unique type will have its own folder,
:type plot_type: str,
:param plot_name: detailed name of plot, it will serve as filename,
:type plot_name: str,
:param save: save plot?
:type save: bool,
:param show: show plot?
:type show: bool,
"""
# set fig as current figure
plt.figure(fig)
if save:
save_dir = f'{Directories.ABM_DIR}/RESULTS/plots/{plot_type}/'
Path(save_dir).mkdir(parents=True, exist_ok=True)
plt.savefig(save_dir + plot_name + '.' + file_format.lower())
if show:
plt.show()
plt.close(fig)
@classmethod
def _get_plot_name_from_title(cls, title: str):
"""
Creates plot filename from its title.
Parameters
----------
title : str
plot title as it is seen by plt.show()
Returns
-------
plot_name : str
plot_name which can be used as plot filename
"""
plot_name = title
plot_name = plot_name.replace('\n', ' ')
plot_name = plot_name.replace(' ', ' ')
if plot_name[-1:] == ' ':
plot_name = plot_name[:-1]
return plot_name
@staticmethod
def _annotate_day_0_on_X_axis_as_date(
ax: matplotlib.axes,
date0: datetime.datetime) -> None:
ax.set_xticks(ax.get_xticks().tolist()[1:-1])
ax.set_xticklabels(
[f'{x:.0f}' if x != 0 else f'{date0: %Y-%m-%d}' for x in
ax.get_xticks().tolist()])
@classmethod
def show_real_death_toll_shifted_by_hand(cls,
starting_day=10,
day_in_which_colors_are_set=60,
last_day=100,
directory_to_data=None,
shift_simulated=False,
save=False,
show=True): # sourcery no-metrics
"""
Makes many death toll plots. On each plot there is death toll for group of similar
voivodeships. Plots are shifted along X axis in such a way, that pandemic begins
in starting_day.
Similarity was defined by hand, by looking at death tolls of all voivodeships
shifted such plots started with chosen value of death toll and looking for what
initial value of death toll plot smoothly increases.
If directory_to_data is given than shifted simulated data are also plotted.
"""
warnings.warn(
"Please use `VisRealDTShiftedByHand.plot()` instead of this.",
DeprecationWarning)
# make dict: dict[starting_deaths] = list(voivodeship1, voivodeship2, ...) *****************************
voivodeship_starting_deaths = RealData.get_starting_deaths_by_hand()
unique_death_shifts = sorted(
list(set(voivodeship_starting_deaths.values())))
death_shifts = {
death_shift: [
voivodeship
for voivodeship, val in voivodeship_starting_deaths.items()
if val == death_shift
]
for death_shift in unique_death_shifts
}
# ****************************************************************************************************
# for each pair (minimum_deaths, [voivodeship1, voivodeship2, ..]
for minimum_deaths, voivodeships in death_shifts.items():
shifted_real_death_toll = \
RealData.get_shifted_real_death_toll_to_common_start_by_num_of_deaths(
starting_day=starting_day,
minimum_deaths=minimum_deaths)
true_start_day = RealData.get_date_of_first_n_death(
n=minimum_deaths)
# df_indices_order[voivodeship] = num of voivodeship sorted shifted_real_death_toll
# by death toll in day = 60. Used to determine colors of lines in the plot.
df_indices_order = sort_df_indices_by_col(
df=shifted_real_death_toll, column=day_in_which_colors_are_set)
death_toll_final_order = {}
i = 0
for voivodeship in df_indices_order:
if voivodeship in voivodeships:
death_toll_final_order[voivodeship] = i
i += 1
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax.set_title(
f"Death toll for ,,similar'' voivodeships, shifted in such a way, that in day {starting_day} "
f"death toll is not less than {minimum_deaths}.\n"
f"Mapping: (voivodeship, line color) was performed in day {day_in_which_colors_are_set} "
f"with respect to death toll in that day.")
ax.set_xlabel(
f't, days since first {minimum_deaths} people died in given voivodeship')
ax.set_ylabel(
f'Death toll (since first {minimum_deaths} people died in given voivodeship)')
num_of_lines = len(voivodeships)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, num_of_lines)]
for voivodeship, index in death_toll_final_order.items():
x = shifted_real_death_toll.columns # x = days of pandemic = [0, 1, ...]
y = shifted_real_death_toll.loc[voivodeship]
left = voivodeship
right = f"day {starting_day} = {true_start_day[voivodeship]}"
label = '{:<20} {:>22}'.format(left, right)
ax.plot(x[:last_day], y[:last_day],
label=label,
color=colors[index],
linewidth=3)
if directory_to_data:
fnames = all_fnames_from_dir(directory=directory_to_data)
num_of_lines = len(fnames)
cmap = plt.get_cmap('vir idis')
colors = [cmap(i) for i in np.linspace(0, 1, num_of_lines)]
for i, fname in enumerate(fnames):
beta = float(
variable_params_from_fname(fname=fname)['beta'])
mortality = float(
variable_params_from_fname(fname=fname)['mortality'])
visibility = float(
variable_params_from_fname(fname=fname)['visibility'])
df = pd.read_csv(fname)
common_day = 0
if shift_simulated:
y = np.array(df['Dead people'])
try:
common_day = np.where(y >= minimum_deaths)[0][0]
except IndexError:
common_day = 0
y = y[common_day - starting_day:]
x = list(range(len(y)))
else:
x = df['Day']
y = df['Dead people']
beta_info = r'$\beta$=' + f'{beta}'
mortality_info = f'mortality={mortality * 100:.1f}%'
visibility_info = f'visibility={visibility * 100:.0f}%'
day_info = f"day {starting_day}=0"
if shift_simulated:
day_info = f"day {starting_day}={common_day}"
label = '{:<10} {:>15} {:>15} {:>10}'.format(beta_info,
mortality_info,
visibility_info,
day_info)
ax.plot(x[:last_day], y[:last_day], label=label,
color=colors[i],
linewidth=1, linestyle='dashed')
ax.legend(prop={'family': 'monospace'}, loc='upper left')
plt.tight_layout()
if save:
plot_type = 'Real death toll for similar voivodeships, shifted by hand'
save_dir = 'results/plots/' + plot_type + '/'
Path(save_dir).mkdir(parents=True, exist_ok=True)
name = f'minimum_deaths={minimum_deaths}, ' \
f'starting_day={starting_day}, ' \
f'day_in_which_colors_are_set={day_in_which_colors_are_set} ' \
f'last_day={last_day}'
plt.savefig(save_dir + name + '.pdf')
if show:
plt.show()
plt.close(fig)
@classmethod
def plot_pandemic_starting_days_by_touched_counties(cls,
percent_of_death_counties: int,
percent_of_infected_counties: int,
normalize_by_population: bool,
save=False,
show=True):
"""
Plots first day of pandemic for all voivodeships since data were collected.
ALso plots death toll in that day.
"""
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
# PLot first (main) plot on main ax (starting day) **********************************************************
# make title and labels
main_info = ("Day that is consider as starting day of pandemic "
"for given voivodeship and death toll in that day.\n"
" Day 0 is 04-03-2020.")
ax.set_title(main_info)
ax.set_xlabel('Voivodeship')
ax.set_ylabel(
'Day number since 04-03-2020 which is considered to be the beginning of a pandemic')
# get starting day of pandemic by percent of touched counties
starting_days_deaths = RealData.starting_days(
by='deaths',
percent_of_touched_counties=percent_of_death_counties,
ignore_healthy_counties=False)
starting_days_infections = RealData.starting_days(
by='infections',
percent_of_touched_counties=percent_of_infected_counties,
ignore_healthy_counties=False)
# get voivodeships and used them to synchronize plots
voivodeships_synchro = starting_days_infections.keys()
color_infections = 'lime'
color_deaths = 'deepskyblue'
# plot starting_days_infections
l1 = ax.plot(voivodeships_synchro,
[starting_days_infections[voivodeship] for voivodeship in
voivodeships_synchro],
color=color_infections, linestyle='-.', marker='o',
mec='black',
label=(
f'Starting day by {percent_of_infected_counties}% of counties with '
f'at least one infected case.'))
# plot starting_days_deaths
l2 = ax.plot(voivodeships_synchro,
[starting_days_deaths[voivodeship] for voivodeship in
voivodeships_synchro],
color=color_deaths, linestyle='-.', marker='o',
mec='black',
label=(
f'Starting day by {percent_of_death_counties}% of counties with '
f'at least one death case.'))
# set y_lim to 0
ax.set_ylim([-5, None])
# rotate label of outer x-axis
for tick in ax.get_xticklabels():
tick.set_rotation(45)
# **************************************************************************************************
# Plot second plot on the other y-axis (death toll in starting day) ********************************
# get starting death toll by starting day by percent of touched counties
starting_death_toll_deaths = RealData.get_starting_death_toll_for_voivodeships_by_days(
voivodeships_days=starting_days_deaths)
starting_death_toll_infections = RealData.get_starting_death_toll_for_voivodeships_by_days(
voivodeships_days=starting_days_infections)
# second y axis
ax2 = ax.twinx()
lab_death_toll_deaths = 'Death toll in starting day (by deaths).'
lab_death_toll_infections = 'Death toll in starting day (by infections).'
# plot death toll on the second y-axis (normalized or not)
if normalize_by_population:
y_label2 = '(Death toll / population) ' r'$\cdot 10^5$'
population = RealData.get_real_general_data()['population']
# preserve order of voivodeship on X axis
p3 = ax2.scatter(voivodeships_synchro,
[starting_death_toll_infections[voivodeship] /
population[voivodeship] * (10 ** 5)
for voivodeship in voivodeships_synchro],
color=color_infections,
marker='s',
edgecolors='black',
label=lab_death_toll_infections)
p4 = ax2.scatter(voivodeships_synchro,
[starting_death_toll_deaths[voivodeship] /
population[voivodeship] * (10 ** 5)
for voivodeship in voivodeships_synchro],
color=color_deaths,
marker='s',
edgecolors='black',
label=lab_death_toll_deaths)
else:
y_label2 = 'Death toll (in given day)'
p3 = ax2.scatter(voivodeships_synchro,
[starting_death_toll_infections[voivodeship] for
voivodeship in voivodeships_synchro],
color=color_infections,
marker='s',
edgecolors='black',
label=lab_death_toll_infections)
p4 = ax2.scatter(voivodeships_synchro,
[starting_death_toll_deaths[voivodeship] for
voivodeship in voivodeships_synchro],
color=color_deaths,
marker='s',
edgecolors='black',
label=lab_death_toll_deaths)
# set y2 axis label
ax2.set_ylabel(y_label2)
# ***********************************************************************************************************
# add legend (both y-axis have common legend)
plots = [l1, l2, p3, p4]
# for some reason scatter plot is a standard object, but line plot is a list containing lines
plots = [p if type(p) != list else p[0] for p in plots]
labs = [p.get_label() for p in plots]
ax.legend(plots, labs)
plt.tight_layout()
cls.__show_and_save(fig=fig,
plot_type='Starting days for voivodeships based on touched district',
plot_name=(
f'percent_of_death_counties={percent_of_death_counties}, '
f'percent_of_infected_counties={percent_of_infected_counties}, '
f'normalize_by_population={normalize_by_population}'),
save=save,
show=show)
@classmethod
def plot_last_day_finding_process(
cls,
voivodeships=('all',),
start_days_by=StartingDayBy.INFECTIONS,
percent_of_touched_counties=80,
last_date='2020-07-01',
death_toll_smooth_out_win_size=21,
death_toll_smooth_out_polyorder=3,
derivative_half_win_size=3,
show=True,
save=False,
): # sourcery no-metrics
"""
Plots crucial steps in finding last day of pandemic in voivodeships.
Plots:
- death tool
- death tool smoothed
- slope of smoothed up death toll
"""
# get voivodeships
| |
#!/usr/bin/env python3
"""
Smoketest.py: Regression testing utility for Graphyne. Multiprocessing wrapper for Smokest, allowing multiple simultaneous tests against different persistence types.
"""
from tkinter.test.runtktests import this_dir_path
from graphyne.DatabaseDrivers.DriverTermplate import linkTypes
__author__ = '<NAME>'
__copyright__ = 'Copyright 2016, <NAME>'
__license__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Production'
from xml.dom import minidom
from time import ctime
from os.path import expanduser
import copy
import os
import codecs
import time
import decimal
import queue
import sys
import argparse
#from os.path import expanduser
import graphyne.Graph as Graph
import graphyne.Fileutils as Fileutils
import graphyne.Exceptions as Exceptions
responseQueue = queue.Queue()
entityList = []
api = None
global testImplicit
testImplicit = True
#Globals
#graphDir = expanduser("~")
#graphDir = os.getcwd()
graphDir = os.path.dirname(os.path.abspath(__file__))
testDirPath = os.path.join("Config", "Test")
configDirPath = os.path.join("utils", "Config")
resultFile = None
moduleName = 'Smoketest'
logType = Graph.logTypes.CONTENT
logLevel = Graph.logLevel
class DBError(ValueError):
pass
def testMetaMemeProperty():
method = moduleName + '.' + 'testMetaMemeProperty'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Properties.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
n = n+1
stringArray = str.split(eachReadLine)
testArgumentMap = {stringArray[1] : stringArray[2]}
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
#colums after 2 can me repeated in pairs. 4/3 and 6/5 can also contain argument/vlaue pairs
try: testArgumentMap[str(stringArray[3])] = str(stringArray[4])
except: pass
try: testArgumentMap[str(stringArray[5])] = str(stringArray[6])
except: pass
try: testArgumentMap[str(stringArray[7])] = str(stringArray[8])
except: pass
try: testArgumentMap[str(stringArray[9])] = str(stringArray[10])
except: pass
try: testArgumentMap[str(stringArray[11])] = str(stringArray[12])
except: pass
removeMe = 'XXX'
try:
del testArgumentMap[removeMe]
except: pass
allTrue = True
errata = []
try:
mmToTest = Graph.templateRepository.templates[stringArray[0]]
props = mmToTest.properties
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing metameme %s, props = %s" %(mmToTest.path.fullTemplatePath, props)])
for testKey in testArgumentMap.keys():
testType = testArgumentMap[testKey]
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testKey = %s, testType = %s" %(testKey, testType)])
#ToDo: Fix Me. We should not be using temp properties anymore
try:
prop = mmToTest.getProperty(testKey)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "prop = %s" %(prop)])
splitName = testKey.rpartition('.')
if (prop is not None) and (prop.name.find(splitName[2]) < 0):
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s and test property %s don't match" %(prop.name, testKey)])
allTrue = False
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s and test property %s match" %(prop.name, testKey)])
if prop is not None:
if prop.propertyType != testType:
Graph.logQ.put( [logType , logLevel.WARNING , method , "property %s type %s and testType %s do not match" %(prop.name, prop.propertyType, testType)])
allTrue = False
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "property %s type %s and testType %s match" %(prop.name, prop.propertyType, testType)])
else:
Graph.logQ.put( [logType , logLevel.WARNING , method , "property %s is invalid" %(testKey)])
except Exception as e:
Graph.logQ.put( [logType , logLevel.ERROR , method , "Error pulling testkey %s from %s's properties. Traceback = %s" %(testKey, mmToTest.path.fullTemplatePath, e)])
allTrue = False
if allTrue == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testkey %s has no match" %(testKey)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(allTrue)
expectedResult = stringArray[13]
results = [n, testcase, allTrueResult, expectedResult, copy.deepcopy(errata)]
resultSet.append(results)
del errata
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeSingleton():
method = moduleName + '.' + 'testMetaMemeSingleton'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Singleton.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is expected to be a singleton == %s' %(stringArray[0], expectedTestResult)])
testResult = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
if mmToTest.isSingleton == True:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is a singleton' %(stringArray[0])])
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is not a singleton' %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeSwitch():
method = moduleName + '.' + 'testMetaMemeSwitch'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Switch.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
expectedTestResult = False
if stringArray[1] == 'TRUE':
expectedTestResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is expected to be a singleton == %s' %(stringArray[0], expectedTestResult)])
testResult = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
if mmToTest.isSwitch == True:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is a switch' %(stringArray[0])])
testResult = True
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , 'Metameme %s is not a switch' %(stringArray[0])])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = stringArray[1]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMetaMemeEnhancements():
method = moduleName + '.' + 'testMetaMemeEnhancements'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
#try:
testFileName = os.path.join(testDirPath, "MetaMeme_Enhances.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
testArgumentList = []
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, metameme %s" %(n, stringArray[0])])
#columns 1&2 may contain data
if stringArray[1] != 'XXX':
testArgumentList.append(stringArray[1])
if stringArray[2] != 'XXX':
testArgumentList.append(stringArray[2])
allTrue = False
try:
mmToTest = Graph.templateRepository.resolveTemplateAbsolutely(stringArray[0])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing metameme %s, enhancements = %s" %(mmToTest.path.fullTemplatePath, mmToTest.enhances)])
for testArgument in testArgumentList:
#Hack alert! If we have no enhancements in the testcase, the result should be false.
# Hence we initialize to false, but if we actually have test cases, we re-initialize to True
allTrue = True
for testArgument in testArgumentList:
amIextended = Graph.templateRepository.resolveTemplate(mmToTest.path, testArgument)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "checking to see if %s, enhances %s" %(mmToTest.path.fullTemplatePath, amIextended.path.fullTemplatePath)])
#iterate over the enhancement list and see if we have a match
testResult = False
for enhancement in mmToTest.enhances:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "testing enhancement %s against %s" %(enhancement, amIextended.path.fullTemplatePath)])
try:
enhancedMetaMeme = Graph.templateRepository.resolveTemplate(mmToTest.path, enhancement)
if enhancedMetaMeme.path.fullTemplatePath == amIextended.path.fullTemplatePath:
testResult = True
Graph.logQ.put( [logType , logLevel.DEBUG , method , "enhancement %s == %s" %(enhancement, amIextended.path.fullTemplatePath)])
else:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "enhancement %s != %s" %(enhancement, amIextended.path.fullTemplatePath)])
except:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "tested metameme %s extends metameme %s, but is not in the repository." %(enhancement, mmToTest.path.fullTemplatePath)])
if testResult == False:
allTrue = False
if allTrue == False:
Graph.logQ.put( [logType , logLevel.DEBUG , method , "tested metameme %s does not have sought tested enhancement %s" %(mmToTest.path.fullTemplatePath, amIextended.path.fullTemplatePath)])
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(allTrue)
expectedResult = stringArray[3]
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testMemeValidity():
method = moduleName + | |
"""Carry out off-policy analysis using previously generated obs. data."""
import argparse
import pickle
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# config
THRESHOLDS = np.arange(300, 850, 5)
AXISFONTSIZE = 20
LEGENDSIZE = 20
TITLESIZE = 20
TICKSIZE = 15
def plot_estimation_errors(args):
"""Plot subfigures needed for figure 5."""
# loop over thresholds
all_inds = np.ones(shape=valid_data.shape[0]).astype(bool)
inds_0 = valid_data[:, 0] < 0.5
inds_1 = valid_data[:, 0] > 0.5
scores = valid_data[:, 1]
# compute estimation errors
results = {}
for do_rewards in [True, False]:
results[do_rewards] = {}
for outcome_type in ['utilities', 'score_changes']:
print(outcome_type)
if outcome_type == 'utilities':
outcome_models = utility_outcome_models
else:
outcome_models = delta_outcome_models
for inds, nm in [(all_inds, 'all'), (inds_0, '0'), (inds_1, '1')]:
print(nm)
scores = valid_data[inds, 1]
res = {}
for thr in THRESHOLDS:
thr_results = []
target_treatments = (scores > thr).astype(float)
for i in range(len(train_data)):
(true_mean_reward, regression_mean_reward,
iw_mean_reward, dr_mean_reward) = evaluate_policy(
valid_data, target_treatments, inds,
clf_outcome=outcome_models[i],
clf_treatment=treatment_models[i],
outcome_type=outcome_type, rewards=do_rewards)
thr_results.append(
(true_mean_reward, regression_mean_reward,
iw_mean_reward, dr_mean_reward)
)
res[thr] = thr_results
if do_rewards:
plt.clf()
reg_means = [np.mean([abs(res[thr][i][1] - res[thr][i][0])
for i in range(len(train_data))])
for thr in THRESHOLDS]
reg_stds = [np.std([abs(res[thr][i][1] - res[thr][i][0])
for i in range(len(train_data))])
for thr in THRESHOLDS]
dr_means = [np.mean([abs(res[thr][i][3] - res[thr][i][0])
for i in range(len(train_data))])
for thr in THRESHOLDS]
dr_stds = [np.std([abs(res[thr][i][3] - res[thr][i][0])
for i in range(len(train_data))])
for thr in THRESHOLDS]
regcol = 'b'
drcol = 'r'
plt.fill_between(
THRESHOLDS,
[reg_means[i] - reg_stds[i] for i in range(len(reg_means))],
[reg_means[i] + reg_stds[i] for i in range(len(reg_means))],
color=regcol, alpha=0.5
)
plt.plot(THRESHOLDS, reg_means, label='Regression', c=regcol)
plt.fill_between(
THRESHOLDS,
[dr_means[i] - dr_stds[i] for i in range(len(dr_means))],
[dr_means[i] + dr_stds[i] for i in range(len(dr_means))],
color=drcol, alpha=0.5
)
plt.plot(THRESHOLDS, dr_means, label='Doubly Robust', c=drcol)
plt.xlabel('Threshold', fontsize=AXISFONTSIZE)
plt.ylabel('{} Estimation Error'.format(
'$E_{\pi}[u]$' if outcome_type == 'utilities'
else '$E_{\pi}[\Delta]$'), fontsize=AXISFONTSIZE
)
plt.title('Group A={}'.format(nm) if nm in ('0', '1') else nm, fontsize=TITLESIZE)
plt.xticks(fontsize=TICKSIZE)
plt.yticks(fontsize=TICKSIZE)
plt.legend(prop={'size': LEGENDSIZE})
plt.tight_layout()
plt.savefig(
os.path.join(args.results_dir,
'threshold_policy_evaluation_{}_group_{}.pdf'
.format(outcome_type, nm)))
results[do_rewards][(outcome_type, nm)] = res
inds = {'all': all_inds,
'0': inds_0,
'1': inds_1,
} # indices denoting positive and negative treatments
return inds, results
def test_eqopp_thresholds(lmbda, best_thr, test_data):
"""Get true eqopp distance."""
inds_0 = test_data[:, 0] < 0.5
inds_1 = test_data[:, 0] > 0.5
true_eqopp_value_0 = get_true_eqopp_value(test_data, inds_0, best_thr[0])
true_eqopp_value_1 = get_true_eqopp_value(test_data, inds_1, best_thr[1])
print('P(Y | T): {:.5f}, {:.5f}'.format(
true_eqopp_value_0, true_eqopp_value_1))
true_eqopp_distance = abs(true_eqopp_value_0 - true_eqopp_value_1)
true_utility = get_true_utility(test_data, best_thr[0], best_thr[1])
true_obj_value = true_utility - true_eqopp_distance * lmbda
print('True obj value at chosen thresholds: {:.3f}'.format(
true_obj_value))
print('True utility: {:.3f}, True eqopp: {:.5f}'.format(
true_utility, true_eqopp_distance))
return (true_utility, true_eqopp_distance)
def plot_policy_objective(args, train_data, valid_data, test_data, results,
inds):
"""Plot figure 6."""
lmbdas = np.arange(0, 1, 0.1)
res_eqopp = {}
for clf_i in range(len(train_data)):
print(clf_i)
res_eqopp[clf_i] = {}
thr_cache_res = get_eqopp_threshold_cache(
1, 'regression', clf_i, valid_data, results, inds)
thr_cache_dr = get_eqopp_threshold_cache(
3, 'doubly robust', clf_i, valid_data, results, inds)
for lmbda in lmbdas:
print('regression estimator')
best_thr_reg = get_eqopp_thresholds_from_cache(lmbda, thr_cache_res)
u_reg, eq_reg = test_eqopp_thresholds(
lmbda, best_thr_reg, test_data)
print('doubly robust estimator')
best_thr_dr = get_eqopp_thresholds_from_cache(lmbda, thr_cache_dr)
u_dr, eq_dr = test_eqopp_thresholds(lmbda, best_thr_dr, test_data)
res_eqopp[clf_i][lmbda] = {
'reg': (u_reg, eq_reg), 'dr': (u_dr, eq_dr)
}
lw = 3
plt.clf()
regcol = 'b'
drcol = 'r'
reg_means = np.array([np.mean([res_eqopp[clf_i][lmbda]['reg'][0]
- lmbda * res_eqopp[clf_i][lmbda]['reg'][1]
for clf_i in range(len(train_data))])
for lmbda in lmbdas])
dr_means = np.array([np.mean([res_eqopp[clf_i][lmbda]['dr'][0]
- lmbda * res_eqopp[clf_i][lmbda]['dr'][1]
for clf_i in range(len(train_data))])
for lmbda in lmbdas])
reg_stds = np.array([np.std([res_eqopp[clf_i][lmbda]['reg'][0]
- lmbda * res_eqopp[clf_i][lmbda]['reg'][1]
for clf_i in range(len(train_data))])
for lmbda in lmbdas])
dr_stds = np.array([np.std([res_eqopp[clf_i][lmbda]['dr'][0]
- lmbda * res_eqopp[clf_i][lmbda]['dr'][1]
for clf_i in range(len(train_data))])
for lmbda in lmbdas])
plt.fill_between(lmbdas,
[reg_means[i] - reg_stds[i] for i in range(len(reg_means))],
[reg_means[i] + reg_stds[i] for i in range(len(reg_means))],
color=regcol, alpha=0.3)
plt.plot(lmbdas, reg_means,
label='Regression', lw=lw)
plt.fill_between(lmbdas,
[dr_means[i] - dr_stds[i] for i in range(len(reg_means))],
[dr_means[i] + dr_stds[i] for i in range(len(reg_means))],
color=drcol, alpha=0.3)
plt.plot(lmbdas, dr_means,
label='Doubly Robust', lw=lw)
plt.xlabel('$\lambda$', fontsize=AXISFONTSIZE)
plt.ylabel('$\mathcal{V}_{\pi}$', fontsize=AXISFONTSIZE)
plt.xticks(fontsize=TICKSIZE)
plt.yticks(fontsize=TICKSIZE)
plt.legend(prop={'size': LEGENDSIZE})
plt.tight_layout()
plt.savefig(os.path.join(args.results_dir, 'policy_objective.pdf'))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Off-policy analysis.')
parser.add_argument('--seed', type=int, default=10,
help='Random seed for the analysis.')
parser.add_argument('--obs_data_dir', type=str,
default='/tmp/causal-dyna-fair/observational_data',
help='Directory containing observational data.')
parser.add_argument('--results_dir', type=str,
default='/tmp/causal-dyna-fair/off_policy_eval',
help='Directory where plots should be saved.')
args = parser.parse_args()
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
np.random.seed(args.seed)
# Load, process, and split data
NSEEDS = 13 # number of observational datasets previously generated
data_filenames = [
os.path.join(args.obs_data_dir, 'data.seed{:d}.p'.format(seed))
for seed in range(NSEEDS)
]
data_files = []
for data_filename in data_filenames:
with open(data_filename, 'rb') as f:
data = pickle.load(f)
data_files.append(data)
# The keys of the pickle dict are
# * 'source_df' - pd.DataFrame from max profit policy
# with missing values removed
# * 'source_df_all' - pd.DataFrame from max profit policy
# with _all_ values included
#
# For each pd.DataFrame, the columns are
# 'group_0': 1 if race == 'White' else 0
# 'credit_score_0': initial credit score
# 'Score Change': difference in final and initial credit score
# 'Profit': bank profit for this individual
# 'Loan Approved': whether loan was offered
# 'Loan Repaid': whether loan was repaid
def process_dataframe(df, flip):
cols = df['source_df_all'].columns
indices = {cols[i]: i for i in range(len(cols))}
d = df['source_df_all'].values
noise = np.random.binomial(1, 1 - flip, size=d.shape[0])
loan_i = indices['Loan Approved']
noisy_treatments = noise * d[:, loan_i] + (1 - noise) * (
1 - d[:, loan_i]
)
dn = np.copy(d)
dn[:, loan_i] = noisy_treatments
return dn
flip_prob = 0.1 # need this to satisfy overlap assumption
n_train_data = 11 # 11 dsets used to train
valid_data_ix = 11 # 12th used for validation data
test_data_ix = 12 # 13th is test set
train_data = [process_dataframe(d, flip_prob)
for d in data_files[:n_train_data]]
valid_data = process_dataframe(data_files[valid_data_ix], flip_prob)
test_data = process_dataframe(data_files[test_data_ix], flip_prob)
def train_outcome_model(d, outcome_type):
assert outcome_type in ('utilities', 'score_changes')
received_treatment = d[:, 4].astype(bool)
outcomes = d[received_treatment, 5]
clf_outcome = LogisticRegression(solver='liblinear').fit(
d[received_treatment, :2], outcomes
)
return clf_outcome
def train_treatment_model(d):
clf_treatment = LogisticRegression(solver='liblinear').fit(
d[:, :2], d[:, 4]
)
return clf_treatment
utility_outcome_models = [train_outcome_model(tr, 'utilities')
for tr in train_data]
delta_outcome_models = [train_outcome_model(tr, 'score_changes')
for tr in train_data]
treatment_models = [train_treatment_model(tr) for tr in train_data]
def evaluate_policy(d, target_treatments, inds,
clf_outcome, clf_treatment, outcome_type='utilities',
rewards=True):
assert outcome_type in ('utilities', 'score_changes')
treatments = d[inds, 4]
all_user_data = d[inds, :2]
if outcome_type == 'utilities':
true_outcomes = d[inds, 5]
if rewards:
true_utilities = true_outcomes - 4 * (1 - true_outcomes)
else:
true_utilities = true_outcomes
else:
true_outcomes = d[inds, 5]
if rewards:
true_utilities = true_outcomes * 75 - 150 * (1 - true_outcomes)
else:
true_utilities = true_outcomes
# true reward of policy
true_mean_reward = np.mean(target_treatments * true_utilities)
# regression estimate of policy reward
predicted_outcomes = clf_outcome.predict_proba(all_user_data)
if outcome_type == 'utilities':
if rewards:
predicted_utilities = predicted_outcomes[:, 1] \
- 4 * predicted_outcomes[:, 0]
else:
predicted_utilities = predicted_outcomes[:, 1]
else:
if rewards:
predicted_utilities = 75 * predicted_outcomes[:, 1] \
- 150 * predicted_outcomes[:, 0]
else:
predicted_utilities = predicted_outcomes[:, 1]
regression_reward = target_treatments * predicted_utilities
regression_mean_reward = np.mean(regression_reward)
# inverse probability weighting estimate of policy value
C = np.equal(target_treatments.astype(int), treatments.astype(int))
predicted_treatments = clf_treatment.predict_proba(all_user_data)[:, 1]
predicted_C = predicted_treatments * target_treatments + (
1 - predicted_treatments
) * (1 - target_treatments)
observed_reward = true_utilities * treatments
weighted_values = ((C * observed_reward) / predicted_C)
iw_mean_reward = np.mean(weighted_values)
# doubly robust estimate of policy value
dr_value = weighted_values - (
(C - predicted_C) / predicted_C
) * regression_reward
dr_mean_reward = np.mean(dr_value)
return (true_mean_reward,
regression_mean_reward,
iw_mean_reward,
dr_mean_reward)
inds, results = plot_estimation_errors(args)
def estimate_p_y_1(data, inds, outcome_type, estimator_index, clf_i):
if outcome_type == 'utilities':
outcome_models = utility_outcome_models
else:
outcome_models = delta_outcome_models
p = 0.9
nrounds = 20
estimates = []
for _ in range(nrounds):
target_treatments = np.random.binomial(1, p, size=inds.sum())
eval_results = evaluate_policy(
data, target_treatments, inds,
clf_outcome=outcome_models[clf_i],
clf_treatment=treatment_models[clf_i],
outcome_type=outcome_type, rewards=False
)
estimates.append(eval_results[estimator_index] * (1 / p))
return np.mean(estimates)
p_y_1_vals = {}
for nm_, inds_ in inds.items():
nm_ = str(nm_)
for est_index in [0, 1, 3]:
for outcome_type in ['utilities', 'score_changes']:
print(nm_, est_index, outcome_type)
p_y_1_vals[(outcome_type, nm_, est_index)] = []
for clf_i in range(len(train_data)):
mn = np.mean(estimate_p_y_1(
valid_data, inds_, outcome_type, est_index, clf_i))
p_y_1_vals[(outcome_type, nm_, est_index)].append(mn)
def get_eqopp_value(data, inds, thr, group_name, estimator_index,
clf_i, results):
# want P(T = 1 | Y_1 = 1) = P(Y = 1 | T = 1) * P(T = 1) / P(Y = 1)
scores = | |
file_out = './molgears/public/img/bitmap/thumb%s.bmp' %row.gid
img.thumbnail(size, Image.ANTIALIAS)
img.save(file_out)
sheet.insert_bitmap(file_out , i,j, 5, 5)
j+=1
if 'smiles' in options:
sheet.write(i,j, str(row.mol.structure))
j+=1
if 'inchi' in options:
sheet.write(i,j, str(row.mol.inchi))
j+=1
if 'lso' in options:
sheet.write(i,j, row.lso)
j+=1
if 'num_atoms' in options:
sheet.write(i,j,str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
j+=1
if 'mw' in options:
sheet.write(i,j, str(row.mol.mw))
j+=1
if 'logp' in options:
sheet.write(i,j, str(row.mol.logp))
j+=1
if 'hba' in options:
sheet.write(i,j, str(row.mol.hba))
j+=1
if 'hbd' in options:
sheet.write(i,j, str(row.mol.hbd))
j+=1
if 'tpsa' in options:
sheet.write(i,j, str(row.mol.tpsa))
j+=1
if 'form' in options:
sheet.write(i,j, row.form)
j+=1
if 'state' in options:
sheet.write(i,j, str(row.state))
j+=1
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
sheet.write(i,j, pur)
j+=1
if 'create_date' in options:
sheet.write(i,j, str(row.create_date))
j+=1
if 'owner' in options:
sheet.write(i,j, row.owner)
j+=1
if 'principal' in options:
sheet.write(i,j, row.principal)
j+=1
if 'priority' in options:
sheet.write(i,j, row.priority)
j+=1
if 'etap' in options:
sheet.write(i,j, str(next(obj.etap for obj in row.effort if obj.id==row.effort_default)) + '/' + str(next(obj.etap_max for obj in row.effort if obj.id==row.effort_default)))
j+=1
if 'status' in options:
sheet.write(i,j, row.status.name)
j+=1
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
sheet.write(i,j,tagsy)
j+=1
if 'notes' in options:
sheet.write(i,j, row.notes)
j+=1
i += 1
wbk.save(filepath)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'csv' or 'txt':
filename = userid + '_selected.' + kw['file_type']
filepath = os.path.join('./molgears/files/download/', filename)
from molgears.widgets.unicodeCSV import UnicodeWriter
import csv
if kw['file_type'] == u'csv':
delimiter = ';'
else:
delimiter = ' '
with open(filepath, 'wb') as csvfile:
spamwriter = UnicodeWriter(csvfile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in scompounds:
line =[]
if 'smiles' in options:
line.append(str(row.mol.structure))
if 'name' in options:
line.append(row.mol.name)
if 'nr' in options:
line.append(unicode(scompounds.index(row)+1))
if 'gid' in options:
line.append(unicode(row.gid))
if 'id' in options:
line.append(unicode(row.id))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
line.append(names)
if 'inchi' in options:
line.append(row.mol.inchi)
if 'lso' in options:
line.append(row.lso)
if 'num_atoms' in options:
line.append(unicode(row.mol.num_hvy_atoms)+'/'+unicode(row.mol.num_atoms))
if 'mw' in options:
line.append(unicode(row.mol.mw))
if 'logp' in options:
line.append(unicode(row.mol.logp))
if 'hba' in options:
line.append(unicode(row.mol.hba))
if 'hbd' in options:
line.append(unicode(row.mol.hbd))
if 'tpsa' in options:
line.append(unicode(row.mol.tpsa))
if 'form' in options:
line.append(row.form)
if 'state' in options:
line.append(unicode(row.state))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
line.append(pur)
if 'create_date' in options:
line.append(unicode(row.create_date))
if 'owner' in options:
line.append(row.owner)
if 'principal' in options:
line.append(row.principal)
if 'priority' in options:
line.append(unicode(row.priority))
if 'etap' in options:
line.append(unicode(next(obj.etap for obj in row.effort if obj.id==row.effort_default)) + u'/' + unicode(next(obj.etap_max for obj in row.effort if obj.id==row.effort_default)))
if 'status' in options:
line.append(row.status.name)
if 'tags' in options:
tagsy= ''
for tag in row.mol.tags:
tagsy += tag.name + ', '
line.append(tagsy)
if 'notes' in options:
line.append(row.notes)
spamwriter.writerow(line)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
if selection and not search_clicked:
argv =''
for arg in selection:
argv += '/' + arg
if kw['akcja'] == u'edit':
if len(selection) == 1:
redirect('/%s/synthesis/edit%s' % (pname, argv))
else:
redirect('/%s/synthesis/multiedit/get_all%s' % (pname, argv))
elif kw['akcja'] == u'etap':
if len(selection) == 1:
redirect('/%s/synthesis/etap%s' % (pname, argv))
else:
redirect('/%s/synthesis/multietap/get_all%s' % (pname, argv))
elif kw['akcja'] == u'addreag':
if len(selection) == 1:
redirect('/%s/synthesis/addreag%s' % (pname, argv))
else:
redirect('/%s/synthesis/multiaddreag/get_all%s' % (pname, argv))
elif kw['akcja'] == u'recive':
if len(selection) == 1:
redirect('/%s/synthesis/accept%s' % (pname, argv))
else:
flash(l_(u'Recive Compounds one by one'), 'error')
redirect('/%s/synthesis/get_all' % pname)
else:
redirect('/%s/synthesis/%s%s' % (pname, kw['akcja'], argv))
user = DBSession.query(User).filter_by(user_name=userid).first()
items = user.items_per_page
page_url = paginate.PageURL_WebOb(request)
currentPage = paginate.Page(scompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage, user=user, status=status, tmpl=tmpl, page='synthesis', alltags=alltags, allstatus=allstatus, similarity=similarity, pname=pname, ulists=ulists)
@expose('molgears.templates.users.synthesis.get_all')
def get_all(self, page=1, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
userid = request.identity['repoze.who.userid']
status = DBSession.query(SStatus).get(3)
scompound = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname))
dsc = True
order = 'id'
selection = None
ulist=None
tmpl = ''
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
allstatus = [stat for stat in DBSession.query( SStatus ).all()]
similarity = None
kierownik = has_permission('kierownik')
user = DBSession.query(User).filter_by(user_name=userid).first()
ulists = [l for l in user.lists if l.table == 'SCompounds']
try:
if kw['search'] != u'':
search_clicked = kw['search']
else:
search_clicked = None
except Exception:
search_clicked = None
if kw:
if kw.has_key('mylist'):
try:
ulist_id = int(kw['mylist'])
ulist = DBSession.query(UserLists).get(ulist_id)
except Exception:
flash(l_(u'List error'), 'error')
redirect(request.headers['Referer'])
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
if ulist.table == 'SCompounds':
scompound = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(SCompound.id.in_(elements))
else:
flash(l_(u'Table error'), 'error')
redirect(request.headers['Referer'])
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
for k, v in kw.iteritems():
if str(k) == 'desc' and str(v) != '1':
dsc = None
elif str(k) == 'order_by':
if v == 'gid':
order = SCompound.gid
elif v == 'create_date':
order = SCompound.create_date
elif v == 'etap_diff':
order = SCompound.etap_diff
elif v == 'status':
order = SCompound.status_id
elif v == 'priority':
order = SCompound.priority
elif v == 'status_id':
for sc in DBSession.query(SCompound).all():
if sc.stat2_date:
sc.diff_date = (datetime.now()-sc.stat2_date).days/7
else:
sc.diff_date = 0
order = (SCompound.status_id, SCompound.diff_date)
else:
order = v
if str(k) != 'select' and str(k) != 'remove' and str(v) != u'':
if str(k) == 'statusy':
if isinstance(kw['text_status'], (list, tuple)):
for stat in kw['text_status']:
tmpl += 'statusy' + '=' + str(stat) + '&'
else:
tmpl += 'statusy' + '=' + str(kw['text_status']) + '&'
else:
tmpl += str(k) + '=' + str(v) + '&'
else:
try:
if isinstance(kw['select'], basestring):
selection = [kw['select']]
else:
selection = [id for id in kw['select']]
except Exception:
selection = None
#SERCH OPTIONS START:
if search_clicked:
try:
smiles = str(kw['smiles'])
method = str(kw['method'])
except Exception:
smiles = None
method = None
pass
if smiles:
if checksmi(smiles):
from razi.functions import functions
from razi.expression import TxtMoleculeElement
if method == 'similarity':
from razi.postgresql_rdkit import tanimoto_threshold
threshold = float(user.threshold)/100.0
DBSession.execute(tanimoto_threshold.set(threshold))
limit = user.limit_sim
query_bfp = functions.morgan_b(TxtMoleculeElement(smiles), 2)
constraint = Compound.morgan.tanimoto_similar(query_bfp)
tanimoto_sml = Compound.morgan.tanimoto_similarity(query_bfp).label('tanimoto')
search = DBSession.query(SCompound, tanimoto_sml).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(constraint).order_by(desc(tanimoto_sml)).limit(limit).all()
scompound = ()
similarity = ()
for row in search:
scompound += (row[0], )
similarity += (row[1], )
items = user.items_per_page
page_url = paginate.PageURL_WebOb(request)
currentPage = paginate.Page(scompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage, user=user, status=status, tmpl=tmpl, kierownik = kierownik, page='synthesis', alltags=alltags, allstatus=allstatus, similarity=similarity, pname=pname, ulists=ulists, ulist=ulist)
elif method == 'substructure':
constraint = Compound.structure.contains(smiles)
scompound = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
elif method == 'identity':
scompound = DBSession.query(SCompound).join(SCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(Compound.structure.equals(smiles))
else:
flash(l_(u'SMILES error'), 'warning')
redirect(request.headers['Referer'])
if kw.has_key('text_GID') and kw['text_GID'] !=u'':
try:
gid = int(kw['text_GID'])
scompound = scompound.filter_by(gid = gid )
except Exception as msg:
flash(l_(u'GID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_ID') and kw['text_ID'] !=u'':
try:
id = int(kw['text_ID'])
scompound = scompound.filter(SCompound.id == id)
except Exception as msg:
flash(l_(u'ID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_name') and kw['text_name'] !=u'':
scompound = scompound.filter(Compound.names.any(Names.name.like(kw['text_name'].strip().replace('*', '%'))))
if kw.has_key('text_lso') and kw['text_lso'] !=u'':
scompound = scompound.filter(SCompound.lso.like(kw['text_lso'].replace('*', '%')))
if kw.has_key('text_owner') and kw['text_owner'] !=u'':
scompound = scompound.filter(SCompound.owner.like(kw['text_owner'].replace('*', '%')))
if kw.has_key('text_principal') and kw['text_principal'] !=u'':
scompound = scompound.filter(SCompound.principal.like(kw['text_principal'].replace('*', '%')))
if kw.has_key('text_notes') and kw['text_notes'] !=u'':
scompound = scompound.filter(SCompound.notes.like(kw['text_notes'].replace('*', '%')))
if kw.has_key('text_priority') and kw['text_priority'] !=u'':
try:
id = int(kw['text_priority'])
scompound = scompound.filter(SCompound.priority == id)
except Exception as msg:
flash(l_(u'Priority should be a number from 0 to 5'), 'error')
redirect(request.headers['Referer'])
if kw.has_key('date_from') and kw['date_from'] !=u'':
date_from = datetime.strptime(str(kw['date_from']), '%Y-%m-%d')
scompound = scompound.filter(SCompound.create_date > date_from)
else:
date_from = None
if kw.has_key('date_to') and kw['date_to'] !=u'':
date_to = datetime.strptime(str(kw['date_to']), '%Y-%m-%d')
if date_from:
if date_to>date_from:
scompound = scompound.filter(SCompound.create_date < date_to)
else:
flash(l_(u'The End date must be later than the initial'), 'error')
redirect(request.headers['Referer'])
else:
scompound = scompound.filter(SCompound.create_date < date_to)
try:
tags = kw['text_tags']
except Exception:
tags = None
pass
if tags:
if isinstance(tags, basestring):
tagi = eval(tags)
if type(tagi) != type([]):
tagi = [int(tags)]
else:
tagi = [int(tid) for tid in tags]
scompound = scompound.filter(Compound.tags.any(Tags.id.in_(tagi)))
try:
statusy = kw['text_status']
if isinstance(statusy, basestring):
statusy = [int(statusy)]
else:
statusy = [int(sid) for sid in statusy]
statusy.sort()
except Exception as msg:
statusy = None
pass
if statusy:
scompound = scompound.filter(SCompound.status_id.in_(statusy))
lower_num = statusy[0]
if not lower_num <5:
lower_num = 1;
if kw.has_key('date_stat_from') and kw['date_stat_from'] !=u'':
date_stat_from = datetime.strptime(str(kw['date_stat_from']), '%Y-%m-%d')
| |
__author__ = 'ad'
import six
import json
from abc import ABCMeta
try:
from collections import OrderedDict
except ImportError:
# For python 2.6 additional package ordereddict should be installed
from ordereddict import OrderedDict
try:
from lxml.etree import fromstring as parse_xml_string
from lxml.etree import _Element as XMLElement
except ImportError:
from xml.etree.ElementTree import fromstring as parse_xml_string
from xml.etree.ElementTree import Element as XMLElement
@six.add_metaclass(ABCMeta)
class BaseField(object):
def __init__(self, required=False, field_name=None, default=None):
super(BaseField, self).__init__()
self.required = required
self.field_name = field_name
self.default = default
def check_default_value(self, value):
if value is None and self.default is not None:
value = self.default
return value
def validate(self, value):
""" Validate ``value``
:raise ValueError: in case of validation errors
"""
if value is None and self.required:
raise ValueError(
"Missed value for the required field: {0}".format(
self.field_name))
def from_python(self, value):
"""
Do serialization steps on `value`
"""
return value
def to_python(self, value):
"""
Convert JSON representation of an object ``value`` to python
representation. If not overriden, this method returns results
of call to self.validate.
"""
value = self.check_default_value(value)
self.validate(value)
return value
class Null(BaseField):
""" Class represent JSON null """
def validate(self, value):
super(Null, self).validate(value)
if value is not None:
raise ValueError('Expected None, got {0}'.format(value))
class Choice(BaseField):
""" Field with a set of choices.
Choices may be of any type and provided value is
checked for inclusion in provided choices collection.
"""
def __init__(self, choices=None, **kwargs):
super(Choice, self).__init__(**kwargs)
self._choices = choices or set()
def validate(self, value):
""" Check ``value`` is present in ``self._choices``.
"""
super(Choice, self).validate(value)
if (value is not None) and (value not in self._choices):
raise ValueError(
"Got an unexpected value in the field `{0}`: {1}. "
"Value should be one of: {2}.".format(
self.field_name, value, self._choices))
class String(BaseField):
"""
Class represent JSON string type
>>> some_field = String(max_len=1000)
>>> some_field.to_python("Some thing") == "Some thing"
"""
def __init__(self, max_len=None, **kwargs):
"""
Constructor
:param max_len: Restrict maximum length of the field
:type max_len: int
"""
super(String, self).__init__(**kwargs)
self._max_len = max_len
def validate(self, value):
super(String, self).validate(value)
if value is None:
return
if not isinstance(value, six.string_types):
raise ValueError(
"{0!r} expected to be string but got {1}".format(
value, type(value).__name__))
if self._max_len is not None:
value_len = len(value)
if value_len > self._max_len:
raise ValueError(
"Length of field exceeds maximum allowed: {0}."
"Expected max length more than {1}".format(
value_len, self._max_len))
class Bool(BaseField):
"""
Class represent JSON bool type
>>> some_field = Bool()
>>> some_field.to_python(True) == True
"""
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: bool
:return: None
"""
super(Bool, self).validate(value)
if (value is not None) and not isinstance(value, bool):
raise ValueError("{0!r} expected to be bool".format(value))
class Int(BaseField):
"""
Class represent JSON integer type
>>> some_field = Int()
>>> some_field.to_python(1) == 1
"""
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: int or long
:return: None
"""
super(Int, self).validate(value)
if (value is not None) and not isinstance(value, six.integer_types):
raise ValueError("{0!r} expected to be integer".format(value))
class Float(BaseField):
"""
Class represent JSON integer type
>>> some_field = Float()
>>> some_field.to_python(1.0) == 1.0
"""
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: float
:return: None
"""
super(Float, self).validate(value)
if (value is not None) and not isinstance(value, float):
raise ValueError(
"{0!r} expected to be integer but got {1}".format(
value, type(value).__name__))
class List(BaseField):
"""
Class represent JSON list type
>>> list_field = List(String(max_len=100))
>>> list_field.to_python(["Some string"]) == ["Some string"]
>>> list_field.to_python([2])
Traceback (most recent call last):
...
ValueError: '2' expected to be string
"""
def __init__(self, element_type, min_len=None, max_len=None, **kwargs):
"""
Constructor for List field type
:param element_type: list element type
:type element_type: instance of BaseField
"""
super(List, self).__init__(**kwargs)
if not isinstance(element_type, BaseField):
raise ValueError(
"Invalid type of 'element_type': expected to be instance of "
"subclass of BaseField but got {0!r}".format(element_type))
self._min_len = min_len
self._max_len = max_len
self._element_type = element_type
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: list
:return: None
"""
super(List, self).validate(value)
if value is None:
return
if not isinstance(value, list):
raise ValueError("{0!r} expected to be list".format(value))
value_len = len(value)
if (self._max_len is not None) and (value_len > self._max_len):
raise ValueError(
"Length of field exceeds maximum allowed: {0}. "
"Expected value of length not more than {1}".format(
value_len, self._max_len))
if (self._min_len is not None) and (value_len < self._min_len):
raise ValueError(
"Length of field is less than minimum allowed: {0}."
"Expected value of length not less than {1}".format(
value_len, self._max_len))
def to_python(self, value):
"""
Convert value to python representation
:param value: a list to process
:type value: list
:return: list
:rtype: list
"""
value = self.check_default_value(value)
if value is not None:
value = [self._element_type.to_python(element) for element in value]
return super(List, self).to_python(value)
class Map(BaseField):
"""
Class represent JSON object type
>>> some_field = Map(String, List(String))
>>> some_field.to_python({"f1": ["val"]}) == {"f1": ["val"]}
>>> some_field.to_python({2: ["val"]})
Traceback (most recent call last):
...
ValueError: '2' expected to be string
"""
def __init__(self, key_type, value_type, **kwargs):
"""
Constructor for List field type
"""
super(Map, self).__init__(**kwargs)
if not isinstance(key_type, BaseField):
raise ValueError(
"Invalid type of 'key_type': expected to be instance of "
"subclass of BaseField but it is {0!r}".format(key_type))
if not isinstance(value_type, BaseField):
raise ValueError(
"Invalid type of 'value_type': expected to be instance "
"of subclass of BaseField but it is {0!r}".format(value_type))
self._value_type = value_type
self._key_type = key_type
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: dict
:return: None
"""
super(Map, self).validate(value)
if value is None:
return
if not isinstance(value, dict):
raise ValueError("{0!r} expected to be dict".format(value))
for key, val in value.items():
self._key_type.to_python(key)
self._value_type.to_python(val)
def to_python(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: list or dict
:return: None
"""
from pyraml.parser import ParseContext
value = self.check_default_value(value)
if value is not None:
# At this point we could get list of dict or dict
if isinstance(value, ParseContext):
value = value.data
if isinstance(value, list):
_value = OrderedDict()
for item in value:
if not isinstance(item, (dict, OrderedDict)):
raise ValueError("{0!r} expected to be dict or list of "
"dict".format(value))
_value.update(
OrderedDict([
(self._key_type.to_python(key), self._value_type.to_python(val))
for key, val in item.items()])
)
value = _value
else:
_value = OrderedDict()
for key, val in value.items():
_value[self._key_type.to_python(key)] = self._value_type.to_python(val)
value = _value
return super(Map, self).to_python(value)
class Reference(BaseField):
"""
Class represent reference to another model
>>> from model import Model
>>> class RamlDocumentation(Model):
>>> content = String()
>>> title = String()
>>> some_field = List(Reference(RamlDocumentation))
>>> doc = RamlDocumentation(content="Test content", title="Title")
>>> some_field.to_python([doc]) == [doc]
>>> some_field.to_python([2])
Traceback (most recent call last):
...
ValueError: '2' expected to be RamlDocumentation
"""
def __init__(self, ref_class, **kwargs):
"""
Constructor for Reference
:param ref_class: model class to reference to
:type ref_class: class of pyraml.model.Model
:param kwargs: additional attributes for BaseField constructor
:type kwargs: dict
"""
super(Reference, self).__init__(**kwargs)
self.ref_class = ref_class
def _lazy_import(self):
"""
If self.ref_class is string like "pyraml.entities.RamlTrait"
just import the class and assign it to self.ref_class
:return: None
"""
if isinstance(self.ref_class, six.string_types):
module_path, _, class_name = self.ref_class.rpartition('.')
mod = __import__(module_path, fromlist=[class_name])
self.ref_class = getattr(mod, class_name)
def validate(self, value):
"""
Validate value to match rules
:param value: value to validate
:type value: pyraml.model.Model
:return: None
"""
self._lazy_import()
super(Reference, self).validate(value)
if value is None:
return
if not isinstance(value, self.ref_class):
raise ValueError("{0!r} expected to be {1} or dict".format(
value, self.ref_class))
def to_python(self, value):
"""
Convert value to python representation
:param value: a model to process
:type value: pyraml.model.Model or dict
:return: int or long
:rtype: int or long
"""
self._lazy_import()
value = self.check_default_value(value)
if hasattr(self.ref_class, 'notNull') and value is None:
value = {'notNull': True}
if isinstance(value, self.ref_class):
# Value is already instance of ref_class, don't need to convert it
pass
elif isinstance(value, dict):
# Value is JSON object, convert it to `ref_class`
value = self.ref_class.from_json(value)
elif value is None:
# Value empty, just instantiate empty `ref_class`
value = self.ref_class()
else:
raise ValueError("{0!r} expected to be {1} or dict".format(
value, self.ref_class))
return super(Reference, self).to_python(value)
class Or(BaseField):
"""
| |
(redis_client.config_get("client-output-buffer-limit")
["client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
# Record the log files in Redis.
record_log_files_in_redis(address(node_ip_address, port), node_ip_address,
[stdout_file, stderr_file])
return port, p
def start_log_monitor(redis_address, node_ip_address, stdout_file=None,
stderr_file=None, cleanup=cleanup):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address (str): The IP address of the node that this log monitor is
running on.
stdout_file: A file handle opened for writing to redirect stdout to. If no
redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If no
redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true, then
this process will be killed by services.cleanup() when the Python process
that imported services exits.
"""
log_monitor_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"log_monitor.py")
p = subprocess.Popen(["python", log_monitor_filepath,
"--redis-address", redis_address,
"--node-ip-address", node_ip_address],
stdout=stdout_file, stderr=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_LOG_MONITOR].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_global_scheduler(redis_address, node_ip_address, stdout_file=None,
stderr_file=None, cleanup=True):
"""Start a global scheduler process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address: The IP address of the node that this scheduler will run
on.
stdout_file: A file handle opened for writing to redirect stdout to. If no
redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If no
redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true, then
this process will be killed by services.cleanup() when the Python process
that imported services exits.
"""
p = global_scheduler.start_global_scheduler(redis_address, node_ip_address,
stdout_file=stdout_file,
stderr_file=stderr_file)
if cleanup:
all_processes[PROCESS_TYPE_GLOBAL_SCHEDULER].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
def start_webui(redis_address, node_ip_address, backend_stdout_file=None,
backend_stderr_file=None, polymer_stdout_file=None,
polymer_stderr_file=None, cleanup=True):
"""Attempt to start the Ray web UI.
Args:
redis_address (str): The address of the Redis server.
node_ip_address: The IP address of the node that this process will run on.
backend_stdout_file: A file handle opened for writing to redirect the
backend stdout to. If no redirection should happen, then this should be
None.
backend_stderr_file: A file handle opened for writing to redirect the
backend stderr to. If no redirection should happen, then this should be
None.
polymer_stdout_file: A file handle opened for writing to redirect the
polymer stdout to. If no redirection should happen, then this should be
None.
polymer_stderr_file: A file handle opened for writing to redirect the
polymer stderr to. If no redirection should happen, then this should be
None.
cleanup (bool): True if using Ray in local mode. If cleanup is True, then
this process will be killed by services.cleanup() when the Python process
that imported services exits.
Return:
True if the web UI was successfully started, otherwise false.
"""
webui_backend_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../webui/backend/ray_ui.py")
webui_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../../webui/")
if sys.version_info >= (3, 0):
python_executable = "python"
else:
# If the user is using Python 2, it is still possible to run the webserver
# separately with Python 3, so try to find a Python 3 executable.
try:
python_executable = subprocess.check_output(
["which", "python3"]).decode("ascii").strip()
except Exception as e:
print("Not starting the web UI because the web UI requires Python 3.")
return False
backend_process = subprocess.Popen([python_executable,
webui_backend_filepath,
"--redis-address", redis_address],
stdout=backend_stdout_file,
stderr=backend_stderr_file)
time.sleep(0.1)
if backend_process.poll() is not None:
# Failed to start the web UI.
print("The web UI failed to start.")
return False
# Try to start polymer. If this fails, it may that port 8080 is already in
# use. It'd be nice to test for this, but doing so by calling "bind" may
# start using the port and prevent polymer from using it.
try:
polymer_process = subprocess.Popen(["polymer", "serve", "--port", "8080"],
cwd=webui_directory,
stdout=polymer_stdout_file,
stderr=polymer_stderr_file)
except Exception as e:
print("Failed to start polymer.")
# Kill the backend since it won't work without polymer.
try:
backend_process.kill()
except Exception as e:
pass
return False
# Unfortunately this block of code is unlikely to catch any problems because
# when polymer throws an error on startup, it is typically after several
# seconds.
time.sleep(0.1)
if polymer_process.poll() is not None:
# Failed to start polymer.
print("Failed to serve the web UI with polymer.")
# Kill the backend since it won't work without polymer.
try:
backend_process.kill()
except Exception as e:
pass
return False
if cleanup:
all_processes[PROCESS_TYPE_WEB_UI].append(backend_process)
all_processes[PROCESS_TYPE_WEB_UI].append(polymer_process)
record_log_files_in_redis(redis_address, node_ip_address,
[backend_stdout_file, backend_stderr_file,
polymer_stdout_file, polymer_stderr_file])
return True
def start_local_scheduler(redis_address,
node_ip_address,
plasma_store_name,
plasma_manager_name,
worker_path,
plasma_address=None,
stdout_file=None,
stderr_file=None,
cleanup=True,
num_cpus=None,
num_gpus=None,
num_workers=0):
"""Start a local scheduler process.
Args:
redis_address (str): The address of the Redis instance.
node_ip_address (str): The IP address of the node that this local scheduler
is running on.
plasma_store_name (str): The name of the plasma store socket to connect to.
plasma_manager_name (str): The name of the plasma manager socket to connect
to.
worker_path (str): The path of the script to use when the local scheduler
starts up new workers.
stdout_file: A file handle opened for writing to redirect stdout to. If no
redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If no
redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true, then
this process will be killed by serices.cleanup() when the Python process
that imported services exits.
num_cpus: The number of CPUs the local scheduler should be configured with.
num_gpus: The number of GPUs the local scheduler should be configured with.
num_workers (int): The number of workers that the local scheduler should
start.
Return:
The name of the local scheduler socket.
"""
if num_cpus is None:
# By default, use the number of hardware execution threads for the number
# of cores.
num_cpus = psutil.cpu_count()
if num_gpus is None:
# By default, assume this node has no GPUs.
num_gpus = 0
print("Starting local scheduler with {} CPUs and {} GPUs.".format(num_cpus,
num_gpus))
local_scheduler_name, p = ray.local_scheduler.start_local_scheduler(
plasma_store_name,
plasma_manager_name,
worker_path=worker_path,
node_ip_address=node_ip_address,
redis_address=redis_address,
plasma_address=plasma_address,
use_profiler=RUN_LOCAL_SCHEDULER_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
static_resource_list=[num_cpus, num_gpus],
num_workers=num_workers)
if cleanup:
all_processes[PROCESS_TYPE_LOCAL_SCHEDULER].append(p)
record_log_files_in_redis(redis_address, node_ip_address,
[stdout_file, stderr_file])
return local_scheduler_name
def start_objstore(node_ip_address, redis_address, object_manager_port=None,
store_stdout_file=None, store_stderr_file=None,
manager_stdout_file=None, manager_stderr_file=None,
cleanup=True, objstore_memory=None):
"""This method starts an object store process.
Args:
node_ip_address (str): The IP address of the node running the object store.
redis_address (str): The address of the Redis instance to connect to.
object_manager_port (int): The port to use for the object manager. If this
is not provided, one will be generated randomly.
store_stdout_file: A file handle opened for writing to redirect stdout to.
If no redirection should happen, then this should be None.
store_stderr_file: A file handle opened for writing to redirect stderr to.
If no redirection should happen, then this should be None.
manager_stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
manager_stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
cleanup (bool): True if using Ray in local mode. If cleanup is true, then
this process will be killed by serices.cleanup() when the Python process
that imported services exits.
objstore_memory: The amount of memory (in bytes) to start the object store
with.
Return:
A tuple of the Plasma store socket name, the Plasma manager socket name,
and the plasma manager port.
"""
if objstore_memory is None:
# Compute a fraction of the system memory for the Plasma store to use.
system_memory = psutil.virtual_memory().total
if sys.platform == "linux" or sys.platform == "linux2":
# On linux we use /dev/shm, its size is half the size of the physical
# memory. To not overflow it, we set the plasma memory limit to 0.4 times
# the size of the physical memory.
objstore_memory = int(system_memory * 0.4)
# Compare the requested memory size to the memory available in | |
"""Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import COCOroot, COCODetection
import torch.utils.data as data
from models.refinedet import build_refinedet
# from models.s2rn import build_s2rn
from layers import Detect_RefineDet
from utils.nms_wrapper import nms
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
# parser.add_argument('--voc_root', default=VOC_ROOT,
# help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
parser.add_argument('--input_size', default='512', choices=['320', '512'],
type=str, help='RefineDet320 or RefineDet512')
parser.add_argument('--retest', default=False, type=bool,
help='test cache results')
parser.add_argument('--show_image', action="store_true", default=False, help='show detection results')
parser.add_argument('--vis_thres', default=0.5, type=float, help='visualization_threshold')
parser.add_argument('--prefix', default='weights/lr_5e4', type=str, help='File path to save results')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
class BaseTransform(object):
"""Defines the transformations that should be applied to test PIL image
for input into the network
dimension -> tensorize -> color adj
Arguments:
resize (int): input dimension to SSD
rgb_means ((int,int,int)): average RGB of the dataset
(104,117,123)
swap ((int,int,int)): final order of channels
Returns:
transform (transform) : callable transform to be applied to test/val
data
"""
def __init__(self, resize, rgb_means, swap=(2, 0, 1)):
self.means = rgb_means
self.resize = resize
self.swap = swap
# assume input is cv2 img for now
def __call__(self, img):
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = interp_methods[0]
img = cv2.resize(np.float32(img), (self.resize, self.resize),interpolation = interp_method).astype(np.float32)
img -= self.means
img = img.transpose(self.swap)
return torch.from_numpy(img)
def test_net(save_folder, net, device, num_classes, dataset, transform, top_k, max_per_image=300, confidence_threshold=0.005, nms_threshold=0.4, AP_stats=None):
num_images = len(dataset)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(num_classes)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
if not os.path.exists(save_folder):
os.mkdir(save_folder)
det_file = os.path.join(save_folder, 'detections.pkl')
if args.retest:
f = open(det_file,'rb')
all_boxes = pickle.load(f)
print('Evaluating detections')
dataset.evaluate_detections(all_boxes, save_folder)
return
for i in range(num_images):
img, target = dataset.pull_image(i)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
x = transform(img).unsqueeze(0)
x = x.to(device)
scale = scale.to(device)
if args.show_image:
h, w, _ = img.shape
xr = net.size / w
yr = net.size / h
img_gt = img.astype(np.uint8)
img_gt = cv2.resize(img_gt, (net.size, net.size),interpolation=cv2.INTER_LINEAR)
for b in target:
b[0] *= xr
b[2] *= xr
b[1] *= yr
b[3] *= yr
b = list(map(int, b))
cv2.rectangle(img_gt, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
# cx = b[2]
# cy = b[1]
# text = "ship"
# cv2.putText(img_gt, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0))
_t['im_detect'].tic()
# boxes, scores = net(x, i, img_gt)
boxes, scores = net(x)
else:
_t['im_detect'].tic()
boxes, scores = net(x)
boxes = boxes[0]
scores=scores[0]
# scale each detection back up to the image
boxes *= scale
boxes = boxes.cpu().numpy()
scores = scores.cpu().numpy()
for j in range(1, num_classes):
inds = np.where(scores[:, j] > confidence_threshold)[0]
if len(inds) == 0:
all_boxes[j][i] = np.empty([0, 5], dtype=np.float32)
continue
c_bboxes = boxes[inds]
c_scores = scores[inds, j]
# keep top-K before NMS
order = c_scores.argsort()[::-1][:top_k]
c_bboxes = c_bboxes[order]
c_scores = c_scores[order]
c_dets = np.hstack((c_bboxes, c_scores[:, np.newaxis])).astype(
np.float32, copy=False)
keep = nms(c_dets, nms_threshold, force_cpu=(not args.cuda))
c_dets = c_dets[keep, :]
c_dets = c_dets[:max_per_image, :]
all_boxes[j][i] = c_dets
_t['im_detect'].toc()
# print('im_detect: {:d}/{:d} forward_nms_time{:.4f}s'.format(i + 1, num_images, _t['im_detect'].average_time))
if args.show_image:
boxes = all_boxes[1][i][:]
for b in boxes:
b[0] *= xr
b[2] *= xr
b[1] *= yr
b[3] *= yr
if b[4] < args.vis_thres:
continue
b = list(map(int, b))
cv2.rectangle(img_gt, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[2]
cy = b[1] + 12
# text = "{:.2f}".format(b[4])
# cv2.putText(img_gt, text, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255))
# cv2.imshow('res', img_gt)
# cv2.waitKey(0)
save_gt_dir = os.path.join(save_folder, 'gt_img')
if not os.path.exists(save_gt_dir):
os.mkdir(save_gt_dir)
cv2.imwrite(save_gt_dir + f'/{i}.png',img_gt, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
# with open(det_file, 'wb') as f:
# pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('\nFPS: {} {} \n'.format(1 / (_t['im_detect'].average_time), 1 / _t['im_detect'].average_time))
print('Evaluating detections')
stats = dataset.evaluate_detections(all_boxes, save_folder)
AP_stats['ap'].append(stats[0])
AP_stats['ap50'].append(stats[1])
AP_stats['ap75'].append(stats[2])
AP_stats['ap_small'].append(stats[3])
AP_stats['ap_medium'].append(stats[4])
AP_stats['ap_large'].append(stats[5])
if __name__ == '__main__':
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
# args.trained_model = 'weights/lr_1e3/RefineDet512_COCO_final.pth'
# args.trained_model = 'weights/lr_5e4/RefineDet512_COCO_final.pth'
# args.cuda = False
# args.retest = True
# args.show_image = True
args.vis_thres = 0.3
prefix = args.prefix
# prefix = 'weights/lr_5e4'
# prefix = 'weights/lr_1e3'
prefix = 'weights/lr_2e3'
prefix = 'weights/2e3'
# prefix = 'weights/lr_3e3'
# prefix = 'weights/lr_4e3'
# prefix = 'weights/srn_1e3'
# prefix = 'weights/srn_2e3'
# prefix = 'weights/srn_3e3'
# prefix = 'weights/srn_4e3'
# prefix = 'weights/srnv2_4e3'
save_folder = os.path.join(args.save_folder, prefix.split('/')[-1])
nms_threshold = 0.49
confidence_threshold = 0.01
objectness_thre = 0.01
num_classes = 2
top_k = 1000
keep_top_k = 500
torch.set_grad_enabled(False)
# load data
rgb_means = (98.13131, 98.13131, 98.13131)
dataset = COCODetection(COCOroot, [('sarship', 'test')], None)
# dataset = COCODetection(COCOroot, [('sarship', 'test_inshore')], None)
# dataset = COCODetection(COCOroot, [('sarship', 'test_offshore')], None)
# load net
detect = Detect_RefineDet(num_classes, int(args.input_size), 0, top_k, confidence_threshold, nms_threshold, objectness_thre, keep_top_k)
net = build_refinedet('test', int(args.input_size), num_classes, detector=detect)
# net = build_s2rn('test', int(args.input_size), num_classes, detector=detect)
load_to_cpu = not args.cuda
cudnn.benchmark = True
device = torch.device('cuda' if args.cuda else 'cpu')
ap_stats = {"ap": [], "ap50": [], "ap75": [], "ap_small": [], "ap_medium": [], "ap_large": [], "epoch": []}
start_epoch = 10; step = 10
start_epoch = 200; step = 5
ToBeTested = []
ToBeTested = [prefix + f'/RefineDet512_COCO_epoches_{epoch}.pth' for epoch in range(start_epoch, 300, step)]
ToBeTested.append(prefix + '/RefineDet512_COCO_final.pth')
# ToBeTested.append(prefix + '/RefineDet512_COCO_epoches_250.pth')
# ToBeTested *= 5
for index, model_path in enumerate(ToBeTested):
args.trained_model = model_path
net = load_model(net, args.trained_model, load_to_cpu)
net.eval()
print('Finished loading model!')
# print(net)
net = net.to(device)
# evaluation
ap_stats['epoch'].append(start_epoch + index * step)
print("evaluating epoch: {}".format(ap_stats['epoch'][-1]))
test_net(save_folder, net, device, num_classes, dataset,
BaseTransform(net.size, rgb_means, (2, 0, 1)), top_k,
keep_top_k, confidence_threshold=confidence_threshold, nms_threshold=nms_threshold, AP_stats=ap_stats)
print(ap_stats)
res_file = os.path.join(save_folder, 'ap_stats.json')
max_idx = np.argmax(np.asarray(ap_stats['ap50']))
print('Best ap50: {:.4f} at epoch {}'.format(ap_stats['ap50'][max_idx], ap_stats['epoch'][max_idx]))
print('ap: {:.4f}, ap50: {:.4f}, ap75: {:.4f}, ap_s: {:.4f}, ap_m: {:.4f}, ap_l: {:.4f}'.\
format(ap_stats['ap'][max_idx], ap_stats['ap50'][max_idx], ap_stats['ap75'][max_idx], ap_stats['ap_small'][max_idx], ap_stats['ap_medium'][max_idx], ap_stats['ap_large'][max_idx]))
max_idx = np.argmax(np.asarray(ap_stats['ap']))
print('Best ap : {:.4f} at epoch {}'.format(ap_stats['ap'][max_idx], ap_stats['epoch'][max_idx]))
print('ap: {:.4f}, ap50: {:.4f}, ap75: {:.4f}, ap_s: {:.4f}, ap_m: {:.4f}, ap_l: {:.4f}'.\
format(ap_stats['ap'][max_idx], ap_stats['ap50'][max_idx], ap_stats['ap75'][max_idx], ap_stats['ap_small'][max_idx], ap_stats['ap_medium'][max_idx], ap_stats['ap_large'][max_idx]))
import json
print('Writing ap stats json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(ap_stats, fid)
with open(res_file) as f:
ap_stats = json.load(f)
from plot_curve import plot_map, plot_loss
fig_name = 'ap.png'
fig_name = 'ap_last10.png'
metrics = ['ap', 'ap75', 'ap50', 'ap_small', 'ap_medium', 'ap_large']
legend = ['ap', 'ap75', 'ap50', 'ap_small', 'ap_medium', 'ap_large']
# plot_map(save_folder, ap_stats, metrics, legend, fig_name)
# txt_log = prefix + '/log.txt'
# plot_loss(save_folder, txt_log)
"""
refinedet
lr_2e3
Best ap50: 0.9802 at epoch 240
ap: 0.6022, ap50: 0.9802, ap75: 0.6750, | |
"""Defines procedures for training, and evaluation automatic camfi annotation models,
and for using them for making automatic annotations (inference). Depends on camfi.util,
camfi.datamodel.autoannotation, camfi.datamodel.geometry, camfi.datamode.via, as well
as ._torchutils and ._models."""
from datetime import datetime
import itertools
from math import pi
from pathlib import Path
from typing import Any, Callable, Optional, Union
from sys import stderr
import numpy as np
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
NonNegativeFloat,
PositiveFloat,
PositiveInt,
ValidationError,
validator,
)
from scipy import sparse
import torch
from torch.utils.data import DataLoader
from torchvision.models.detection.mask_rcnn import MaskRCNN
from tqdm import tqdm, trange
from camfi.datamodel.autoannotation import CamfiDataset, Prediction
from camfi.datamodel.geometry import (
BoundingBox,
CircleShapeAttributes,
PolylineShapeAttributes,
)
from camfi.datamodel.via import (
ViaFileAttributes,
ViaMetadata,
ViaProject,
ViaRegion,
ViaRegionAttributes,
)
from camfi.models import model_urls
from camfi.util import (
endpoint_truncate,
smallest_enclosing_circle,
weighted_intersection_over_minimum,
Field,
)
from ._torchutils import collate_fn, get_model_instance_segmentation, train_one_epoch
def load_annotation_model(model_path_or_url: Union[Path, str]) -> MaskRCNN:
"""Loads a camfi annotation model. Accepts any model key provided in
camfi.models, a Path object, or a URL str.
Parameters
----------
model_path_or_url : Union[Path, str]
Path to .pth file specifying model parameters, model name defined in
camfi.models.model_urls, or url to model to download from the internet.
Returns
-------
model : MaskRCNN
Instance segmentation model used for automatic annotation.
"""
print(f"Loading model: {model_path_or_url}", file=stderr)
model = get_model_instance_segmentation(2, pretrained=False)
if isinstance(model_path_or_url, Path):
state_dict = torch.load(model_path_or_url)
elif model_path_or_url in model_urls:
state_dict = torch.hub.load_state_dict_from_url(model_urls[model_path_or_url])
else:
state_dict = torch.hub.load_state_dict_from_url(model_path_or_url)
model.load_state_dict(state_dict)
return model
def copy_annotation_model(model: MaskRCNN) -> MaskRCNN:
"""Copies a camfi annotation model.
Parameters
----------
model : MaskRCNN
Model to copy.
Returns
-------
model_copy : MaskRCNN
Copy of model.
"""
model_copy = get_model_instance_segmentation(2, pretrained=False)
model_copy.load_state_dict(model.state_dict())
return model_copy
def train_model(
dataset: CamfiDataset,
load_pretrained_model: Optional[Union[Path, str]] = None,
device: Union[str, torch.device] = "cpu",
batch_size: int = 5,
num_workers: int = 2,
num_epochs: int = 10,
outdir: DirectoryPath = Path(),
model_name: Optional[str] = None,
save_intermediate: bool = False,
) -> Path:
"""Trains a camfi instance segmentation annotation model on specified dataset,
saving to trained model to outdir.
Parameters
----------
dataset : CamfiDataset
Dataset on which to train the model.
load_pretrained_model : Optional[Union[Path, str]]
Path or url to model parameters file. If set, will load the pretrained
parameters. By default, will start with a model pre-trained on the Microsoft
COCO dataset.
device : Union[str, torch.device]
E.g. "cpu" or "cuda". Training is typically much faster on a GPU. Use "cuda" for
Nvidia GPUs.
batch_size : int
Number of images to load at once.
num_workers : int
Number of worker processes for data loader to spawn.
num_epochs : int
Number of epochs to train.
outdir : DirectoryPath
Path to directory where to save model(s).
model_name : Optional[str]
Identifier to include in model save file. By default the current date in
YYYYmmdd format.
save_intermediate : bool
If True, model is saved after each epoch, not just after all epoch are complete.
This is recommended, especially if training on a service which could terminate
unpredicatbly (e.g. Google Colab).
Returns
-------
model_path : Path
Path to saved model.
"""
# Parameter setting
device = torch.device(device)
if model_name is None:
model_name = datetime.now().strftime("%Y%m%d")
# Initialise data_loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
# Initialise model
if load_pretrained_model is not None:
model = load_annotation_model(load_pretrained_model)
else:
model = get_model_instance_segmentation(2)
model.to(device)
# Initialise optimiser and lr_scheduler
params = [p for p in model.parameters() if p.requires_grad]
optimiser = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimiser, step_size=3, gamma=0.1)
# Train the model
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimiser, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
if save_intermediate or epoch == num_epochs - 1:
save_path = outdir / f"{model_name}_{epoch}_model.pth"
torch.save(model.state_dict(), save_path)
print(f"Training complete. Model saved at {save_path}")
return save_path
class Annotator(BaseModel):
"""Provides methods for automatically annotating images of flying insects using a
pre-trained instance segmentation model.
Parameters
----------
dataset : CamfiDataset
Dataset to annotate.
model : Union[str, Path, MaskRCNN]
Either a path to state dict file which defines the segmentation model, or a url
pointing to a model to download, or one of the model names defined in
camfi.models.model_urls.
Alternatively, a MaskRCNN instance can be given directly.
device : Union[str, torch.device]
Specifies device to run inference on. E.g. set to "cuda" to use an Nvidia GPU.
backup_device : Optional[Union[str, torch.device]]
Specifies device to run inference on when a runtime error occurs while using
device. Probably only makes sense to set this to "cpu" if device="cuda". This
option enables the annotator to leverage a GPU with limited memory capacity
without crashing if a difficult image is encountered.
backup_model: Optional[MaskRCNN]
Defines the backup model. Will be automatically generated if backup_device is
set. Should not be set manually.
split_angle : PositiveFloat
Approximate maximum angle between polyline segments in degrees. Note that this
will immediately be converted to radians upon instantiation of Annotator.
poly_order : PositiveInt
Order of polynomial used for fitting motion blur paths.
endpoint_method : Callable[[np.ndarray, ...], tuple[NonNegativeInt, NonNegativeInt]]
Method to find endpoints of motion blurs. The first argument to this method
should be a cropped mask np.ndarray.
endpoint_extra_args : list[Any]
Extra arguments to pass to endpoint_method.
score_thresh : float
Score threshold between 0.0 and 1.0 for automatic annotations to be kept.
overlap_thresh : float
Minimum proportion of overlap (weighted intersection over minimum) between two
instance segmentation masks to infer that one of the masks should be discarded.
edge_thresh : NonNegativeInt
Minimum distance an annotation has to be from the edge of the image before it is
converted from a polyline annotation to a circle annotation.
"""
dataset: CamfiDataset
model: MaskRCNN = "release"
device: Union[str, torch.device] = "cpu"
backup_device: Optional[Union[str, torch.device]] = None
backup_model: Optional[MaskRCNN] = None
split_angle: PositiveFloat = 15.0
poly_order: PositiveInt = 2
endpoint_method: Callable[
..., tuple[NonNegativeInt, NonNegativeInt]
] = endpoint_truncate
endpoint_extra_args: list[Any] = [10]
score_thresh: float = 0.4
overlap_thresh: float = 0.4
edge_thresh: NonNegativeInt = 20
backup_model_used: int = 0
class Config:
arbitrary_types_allowed = True
@validator("model", pre=True, always=True)
def get_model(cls, v):
if isinstance(v, MaskRCNN):
return v
else:
return load_annotation_model(v)
@validator("device", always=True)
def put_model_on_device_and_set_to_eval(cls, v, values):
print(f"Putting model on device: {v}", file=stderr)
v = torch.device(v)
values["model"].to(v)
values["model"].eval()
return v
@validator("backup_model", pre=True, always=True)
def copy_model_to_backup_device(cls, v, values):
assert v is None, "Should not set 'backup_model'. It will be set automatically"
if "backup_device" in values and values["backup_device"] is not None:
v = copy_annotation_model(values["model"])
v.to(values["backup_device"])
v.eval()
return v
@validator("split_angle", always=True)
def convert_split_angle_to_radians(cls, v):
return v * pi / 180.0
def get_prediction(self, img_idx: NonNegativeInt) -> Prediction:
"""Run predicion on a single image. First tries to use the model on self.device,
and falls back to the model on self.backup_device if a RuntimeError is caught
(if set).
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
prediction: Prediction
Output of model prediction.
"""
try:
img, _ = self.dataset[img_idx]
except (OSError, RuntimeError) as e:
print(
f"Error loading {self.dataset.metadata(img_idx).filename}. {e!r}. Skipping.",
file=stderr,
)
return Prediction.empty()
with torch.no_grad():
try:
prediction = self.model([img.to(self.device)])[0]
except RuntimeError:
if self.backup_model:
prediction = self.backup_model([img.to(self.backup_device)])[0]
self.backup_model_used += 1
else:
raise
del img
return Prediction.from_tensor_dict(prediction)
def filter_annotations(self, prediction: Prediction) -> Prediction:
"""Applies self.score_thresh and self.overlap_thresh to filter out poor quality
annotations.
Parameters
----------
prediction : Prediction
Output of model prediction.
Returns
-------
filtered_prediction : Prediction
Filtered prediction.
"""
# Remove predictions with below-threshold score
prediction = prediction.filter_by_score(self.score_thresh)
n_predictions = len(prediction)
if n_predictions == 0:
return prediction
# Calculate mask overlaps for all pairs of predicted instances
mask_overlaps = np.zeros((n_predictions, n_predictions), dtype="f4")
for i, j in itertools.combinations(range(n_predictions), 2):
if prediction.boxes[i].overlaps(prediction.boxes[j]):
mask_overlaps[i, j] = weighted_intersection_over_minimum(
prediction.masks[i], prediction.masks[j]
)
mask_overlaps[j, i] = mask_overlaps[i, j]
# Remove worst overlapping instances until there are no above-threshold overlaps
keep = set(range(n_predictions))
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
while np.any(overlap_mask):
# Figure out which overlapping annotation has the worst score
overlap_annotations = np.where(overlap_mask)[0]
to_discard = overlap_annotations[
np.argmin(np.array(prediction.scores)[overlap_annotations])
]
# Remove the annotation
keep.remove(to_discard)
mask_overlaps[to_discard, :] = 0.0
mask_overlaps[:, to_discard] = 0.0
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
return prediction.get_subset_from_index(list(keep))
def fit_poly(
self,
box: BoundingBox,
mask: torch.Tensor,
) -> Union[PolylineShapeAttributes, CircleShapeAttributes, None]:
"""Uses polynomial regression to fit a polyline annotation to the provided
segmentation mask.
Parameters
----------
box : BoundingBox
Fully | |
<filename>ibis/omniscidb/operations.py<gh_stars>0
import warnings
from datetime import date, datetime
from io import StringIO
import ibis
import ibis.common.exceptions as com
import ibis.common.geospatial as geo
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.rules as rlz
import ibis.expr.types as ir
import ibis.util as util
from ibis.impala import compiler as impala_compiler
from ibis.omniscidb.identifiers import quote_identifier
_sql_type_names = {
'boolean': 'boolean',
'date': 'date',
'decimal': 'decimal',
'double': 'double',
'float32': 'float',
'float64': 'double',
'int8': 'smallint',
'int16': 'smallint',
'int32': 'int',
'int64': 'bigint',
'linestring': 'linestring',
'multipolygon': 'multipolygon',
'point': 'point',
'polygon': 'polygon',
'string': 'text',
'time': 'time',
'timestamp': 'timestamp',
}
def _is_floating(*args):
for arg in args:
if isinstance(arg, ir.FloatingColumn):
return True
return False
def _type_to_sql_string(tval):
if isinstance(tval, dt.Decimal):
return 'decimal({}, {})'.format(tval.precision, tval.scale)
else:
return _sql_type_names[tval.name.lower()]
def _cast(translator, expr):
from ibis.omniscidb.client import OmniSciDBDataType
op = expr.op()
arg, target = op.args
arg_ = translator.translate(arg)
if isinstance(arg, ir.GeoSpatialValue):
# NOTE: CastToGeography expects geometry with SRID=4326
type_ = target.geotype.upper()
if type_ == 'GEOMETRY':
raise com.UnsupportedOperationError(
'OmnisciDB/OmniSciDB doesn\'t support yet convert '
+ 'from GEOGRAPHY to GEOMETRY.'
)
else:
type_ = str(OmniSciDBDataType.from_ibis(target, nullable=False))
return 'CAST({0!s} AS {1!s})'.format(arg_, type_)
def _all(expr):
op = expr.op()
arg = op.args[0]
if isinstance(arg, ir.BooleanValue):
arg = arg.ifelse(1, 0)
return (1 - arg).sum() == 0
def _any(expr):
op = expr.op()
arg = op.args[0]
if isinstance(arg, ir.BooleanValue):
arg = arg.ifelse(1, 0)
return arg.sum() >= 0
def _not_any(expr):
op = expr.op()
arg = op.args[0]
if isinstance(arg, ir.BooleanValue):
arg = arg.ifelse(1, 0)
return arg.sum() == 0
def _not_all(expr):
op = expr.op()
arg = op.args[0]
if isinstance(arg, ir.BooleanValue):
arg = arg.ifelse(1, 0)
return (1 - arg).sum() != 0
def _parenthesize(translator, expr):
op = expr.op()
op_klass = type(op)
# function calls don't need parens
what_ = translator.translate(expr)
if (op_klass in _binary_infix_ops) or (op_klass in _unary_ops):
return '({0!s})'.format(what_)
else:
return what_
def fixed_arity(func_name, arity):
def formatter(translator, expr):
op = expr.op()
arg_count = len(op.args)
if arity != arg_count:
msg = 'Incorrect number of args {0} instead of {1}'
raise com.UnsupportedOperationError(msg.format(arg_count, arity))
return _call(translator, func_name, *op.args)
formatter.__name__ = func_name
return formatter
def unary(func_name):
return fixed_arity(func_name, 1)
def _reduction_format(
translator,
func_name,
sql_func_name=None,
sql_signature='{}({})',
arg=None,
args=None,
where=None,
):
if not sql_func_name:
sql_func_name = func_name
if where is not None:
arg = where.ifelse(arg, ibis.NA)
return sql_signature.format(
sql_func_name, ', '.join(map(translator.translate, [arg] + list(args)))
)
def _reduction(func_name, sql_func_name=None, sql_signature='{}({})'):
def formatter(translator, expr):
op = expr.op()
# HACK: support trailing arguments
where = op.where
args = []
for arg in op.args:
if arg is not where:
if arg.type().equals(dt.boolean):
arg = arg.ifelse(1, 0)
args.append(arg)
return _reduction_format(
translator,
func_name,
sql_func_name,
sql_signature,
args[0],
args[1:],
where,
)
formatter.__name__ = func_name
return formatter
def _variance_like(func):
variants = {'sample': '{}_SAMP'.format(func), 'pop': '{}_POP'.format(func)}
def formatter(translator, expr):
arg, how, where = expr.op().args
return _reduction_format(
translator, variants[how].upper(), None, '{}({})', arg, [], where
)
formatter.__name__ = func
return formatter
def unary_prefix_op(prefix_op):
def formatter(translator, expr):
op = expr.op()
arg = _parenthesize(translator, op.args[0])
return '{0!s} {1!s}'.format(prefix_op.upper(), arg)
formatter.__name__ = prefix_op
return formatter
def binary_infix_op(infix_sym):
def formatter(translator, expr):
op = expr.op()
left, right = op.args[0], op.args[1]
left_ = _parenthesize(translator, left)
right_ = _parenthesize(translator, right)
return '{0!s} {1!s} {2!s}'.format(left_, infix_sym, right_)
return formatter
def _call(translator, func, *args):
args_ = ', '.join(map(translator.translate, args))
return '{0!s}({1!s})'.format(func, args_)
def _extract_field(sql_attr):
def extract_field_formatter(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
return 'EXTRACT({} FROM {})'.format(sql_attr, arg)
return extract_field_formatter
# STATS
def _corr(translator, expr):
# pull out the arguments to the expression
args = expr.op().args
x, y, how, where = args
# compile the argument
compiled_x = translator.translate(x)
compiled_y = translator.translate(y)
return 'CORR({}, {})'.format(compiled_x, compiled_y)
def _cov(translator, expr):
# pull out the arguments to the expression
args = expr.op().args
x, y, how, where = args
# compile the argument
compiled_x = translator.translate(x)
compiled_y = translator.translate(y)
return 'COVAR_{}({}, {})'.format(how[:4].upper(), compiled_x, compiled_y)
# STRING
def _length(func_name='length', sql_func_name='CHAR_LENGTH'):
def __lenght(translator, expr):
# pull out the arguments to the expression
arg = expr.op().args[0]
# compile the argument
compiled_arg = translator.translate(arg)
return '{}({})'.format(sql_func_name, compiled_arg)
__lenght.__name__ = func_name
return __lenght
def _contains(translator, expr):
arg, pattern = expr.op().args[:2]
pattern_ = '%{}%'.format(translator.translate(pattern)[1:-1])
return _parenthesize(translator, arg.like(pattern_).ifelse(1, -1))
# GENERIC
def _value_list(translator, expr):
op = expr.op()
values_ = map(translator.translate, op.values)
return '({0})'.format(', '.join(values_))
def _interval_format(translator, expr):
dtype = expr.type()
if dtype.unit in {'ms', 'us', 'ns'}:
raise com.UnsupportedOperationError(
"OmniSciDB doesn't support subsecond interval resolutions"
)
return '{1}, (sign){0}'.format(expr.op().value, dtype.resolution.upper())
def _interval_from_integer(translator, expr):
op = expr.op()
arg, unit = op.args
dtype = expr.type()
if dtype.unit in {'ms', 'us', 'ns'}:
raise com.UnsupportedOperationError(
"OmniSciDB doesn't support subsecond interval resolutions"
)
arg_ = translator.translate(arg)
return '{}, (sign){}'.format(dtype.resolution.upper(), arg_)
def _timestamp_op(func, op_sign='+'):
def _formatter(translator, expr):
op = expr.op()
left, right = op.args
formatted_left = translator.translate(left)
formatted_right = translator.translate(right)
if isinstance(left, ir.DateValue):
formatted_left = 'CAST({} as timestamp)'.format(formatted_left)
return '{}({}, {})'.format(
func, formatted_right.replace('(sign)', op_sign), formatted_left
)
return _formatter
def _set_literal_format(translator, expr):
value_type = expr.type().value_type
formatted = [
translator.translate(ir.literal(x, type=value_type))
for x in expr.op().value
]
return '({})'.format(', '.join(formatted))
def _cross_join(translator, expr):
args = expr.op().args
left, right = args[:2]
return translator.translate(left.join(right, ibis.literal(True)))
def literal(translator, expr):
op = expr.op()
value = op.value
# geo spatial data type
if isinstance(expr, ir.GeoSpatialScalar):
return geo.translate_literal(expr)
# primitive data type
elif isinstance(expr, ir.BooleanValue):
return '1' if value else '0'
elif isinstance(expr, ir.StringValue):
return "'{0!s}'".format(value.replace("'", "\\'"))
elif isinstance(expr, ir.NumericValue):
return repr(value)
elif isinstance(expr, ir.SetScalar):
return _set_literal_format(translator, expr)
elif isinstance(expr, ir.IntervalValue):
return _interval_format(translator, expr)
elif isinstance(expr, ir.TimestampValue):
if isinstance(value, datetime):
if value.microsecond != 0:
msg = 'Unsupported subsecond accuracy {}'
warnings.warn(msg.format(value))
value = value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, str):
# check if the datetime format is a valid format (
# '%Y-%m-%d %H:%M:%S' or '%Y-%m-%d'). if format is '%Y-%m-%d' it
# is converted to '%Y-%m-%d 00:00:00'
msg = (
"Literal datetime string should use '%Y-%m-%d %H:%M:%S' "
"format. When '%Y-%m-%d' format is used, datetime will be "
"converted automatically to '%Y-%m-%d 00:00:00'"
)
try:
dt_value = datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except ValueError:
try:
dt_value = datetime.strptime(value, '%Y-%m-%d')
warnings.warn(msg)
except ValueError:
raise Exception(msg)
value = dt_value.strftime('%Y-%m-%d %H:%M:%S')
return "'{0!s}'".format(value)
elif isinstance(expr, ir.DateValue):
if isinstance(value, date):
value = value.strftime('%Y-%m-%d')
return "toDate('{0!s}')".format(value)
# array data type
elif isinstance(expr, ir.ArrayValue):
return str(list(value))
else:
raise NotImplementedError(type(expr))
def _where(translator, expr):
# pull out the arguments to the expression
args = expr.op().args
condition, expr1, expr2 = args
expr = condition.ifelse(expr1, expr2)
return translator.translate(expr)
def raise_unsupported_expr_error(expr):
msg = "OmniSciDB backend doesn't support {} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
def raise_unsupported_op_error(translator, expr, *args):
msg = "OmniSciDB backend doesn't support {} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
# translator
def _name_expr(formatted_expr, quoted_name):
return '{} AS {}'.format(formatted_expr, quote_identifier(quoted_name))
class CaseFormatter:
def __init__(self, translator, base, cases, results, default):
self.translator = translator
self.base = base
self.cases = cases
self.results = results
self.default = default
# HACK
self.indent = 2
self.multiline = len(cases) > 1
self.buf = StringIO()
def _trans(self, expr):
return self.translator.translate(expr)
def get_result(self):
"""
:return:
"""
self.buf.seek(0)
self.buf.write('CASE')
if self.base is not None:
base_str = self._trans(self.base)
self.buf.write(' {0}'.format(base_str))
for case, result in zip(self.cases, self.results):
self._next_case()
case_str = self._trans(case)
result_str = self._trans(result)
self.buf.write('WHEN {0} THEN {1}'.format(case_str, result_str))
if self.default is not None:
self._next_case()
default_str = self._trans(self.default)
self.buf.write('ELSE {0}'.format(default_str))
if self.multiline:
self.buf.write('\nEND')
else:
self.buf.write(' END')
return self.buf.getvalue()
def _next_case(self):
if self.multiline:
self.buf.write('\n{0}'.format(' ' * self.indent))
else:
self.buf.write(' ')
def _table_array_view(translator, expr):
ctx = translator.context
table = expr.op().table
query = ctx.get_compiled_expr(table)
return '(\n{0}\n)'.format(util.indent(query, ctx.indent))
def _timestamp_truncate(translator, expr):
op = expr.op()
arg, unit = op.args
unit_ = dt.Interval(unit=unit).resolution.upper()
# return _call_date_trunc(translator, converter, arg)
arg_ = translator.translate(arg)
return 'DATE_TRUNC({0!s}, {1!s})'.format(unit_, arg_)
def _table_column(translator, expr):
op = expr.op()
field_name = op.name
quoted_name = quote_identifier(field_name, force=True)
table = op.table
ctx = translator.context
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if translator.permit_subquery and ctx.is_foreign_expr(table):
proj_expr = table.projection([field_name]).to_array()
return _table_array_view(translator, proj_expr)
if ctx.need_aliases():
alias = ctx.get_ref(table)
if alias is not None:
quoted_name = '{}.{}'.format(alias, quoted_name)
return quoted_name
# AGGREGATION
approx_count_distinct = _reduction(
'approx_nunique',
sql_func_name='approx_count_distinct',
sql_signature='{}({}, 100)',
)
count_distinct = _reduction('count')
count = _reduction('count')
def _arbitrary(translator, expr):
arg, how, where = expr.op().args
if how not in (None, 'last'):
raise com.UnsupportedOperationError(
'{!r} value not supported for arbitrary in OmniSciDB'.format(how)
)
if where is not None:
arg = where.ifelse(arg, ibis.NA)
return 'SAMPLE({})'.format(translator.translate(arg))
# MATH
class NumericTruncate(ops.NumericBinaryOp):
"""Truncates x to y decimal places"""
output_type = rlz.shape_like('left', ops.dt.float)
# GEOMETRIC
class Conv_4326_900913_X(ops.UnaryOp):
"""
Converts WGS-84 latitude to WGS-84 Web Mercator x coordinate.
"""
output_type = rlz.shape_like('arg', ops.dt.float)
class Conv_4326_900913_Y(ops.UnaryOp):
"""
Converts WGS-84 longitude to WGS-84 Web Mercator y coordinate.
"""
| |
logging.info(
"No optimizer config provided, therefore no optimizer was created"
)
return
else:
# Preserve the configuration
if not isinstance(optim_config, DictConfig):
optim_config = OmegaConf.create(optim_config)
# See if internal config has `optim` namespace before preservation
if self._cfg is not None and hasattr(self._cfg, "optim"):
if self._cfg.optim is None:
self._cfg.optim = copy.deepcopy(optim_config)
else:
with open_dict(self._cfg.optim):
self._cfg.optim = copy.deepcopy(optim_config)
# Setup optimizer and scheduler
if optim_config is not None and isinstance(optim_config, DictConfig):
optim_config = OmegaConf.to_container(optim_config, resolve=True)
if self._trainer is None:
logging.warning(
f"Trainer wasn't specified in model constructor. Make sure that you really wanted it."
)
if "sched" in optim_config and self._trainer is not None:
if not isinstance(self._trainer.accumulate_grad_batches, int):
raise ValueError(
"We do not currently support gradient acculumation that is not an integer."
)
if self._trainer.max_steps is None:
# Store information needed to calculate max_steps
optim_config["sched"]["t_max_epochs"] = self._trainer.max_epochs
optim_config["sched"][
"t_accumulate_grad_batches"
] = self._trainer.accumulate_grad_batches
optim_config["sched"][
"t_limit_train_batches"
] = self._trainer.limit_train_batches
if self._trainer.distributed_backend is None:
optim_config["sched"]["t_num_workers"] = self._trainer.num_gpus or 1
elif self._trainer.distributed_backend == "ddp_cpu":
optim_config["sched"]["t_num_workers"] = (
self._trainer.num_processes * self._trainer.num_nodes
)
elif self._trainer.distributed_backend == "ddp":
optim_config["sched"]["t_num_workers"] = (
self._trainer.num_gpus * self._trainer.num_nodes
)
else:
logging.warning(
f"The lightning trainer received accelerator: {self._trainer.distributed_backend}. We "
"recommend to use 'ddp' instead."
)
optim_config["sched"]["t_num_workers"] = (
self._trainer.num_gpus * self._trainer.num_nodes
)
else:
optim_config["sched"]["max_steps"] = self._trainer.max_steps
# Force into DictConfig from nested structure
optim_config = OmegaConf.create(optim_config)
# Get back nested dict so we its mutable
optim_config = OmegaConf.to_container(optim_config, resolve=True)
# Extract scheduler config if inside optimizer config
if "sched" in optim_config:
scheduler_config = optim_config.pop("sched")
else:
scheduler_config = None
# Check if caller provided optimizer name, default to Adam otherwise
optimizer_cls = optim_config.get("_target_", None)
if optimizer_cls is None:
# Try to get optimizer name for dynamic resolution, defaulting to Adam
optimizer_name = optim_config.get("name", "adam")
else:
if inspect.isclass(optimizer_cls):
optimizer_name = optimizer_cls.__name__.lower()
else:
# resolve the class name (lowercase) from the class path if not provided
optimizer_name = optimizer_cls.split(".")[-1].lower()
# We are guarenteed to have lr since it is required by the argparser
# But maybe user forgot to pass it to this function
lr = optim_config.get("lr", None)
# Check if caller has optimizer kwargs, default to empty dictionary
if "args" in optim_config:
optimizer_args = optim_config.pop("args")
optimizer_args = optim.parse_optimizer_args(optimizer_name, optimizer_args)
else:
optimizer_args = copy.deepcopy(optim_config)
# Remove extra parameters from optimizer_args nest
# Assume all other parameters are to be passed into optimizer constructor
optimizer_args.pop("name", None)
optimizer_args.pop("cls", None)
optimizer_args.pop("lr", None)
# Adaptive schedulers don't need `lr`
if lr is not None:
optimizer_args["lr"] = lr
# Actually instantiate the optimizer
if optimizer_cls is not None:
if inspect.isclass(optimizer_cls):
optimizer = optimizer_cls(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
else:
# Attempt class path resolution
try:
optimizer_cls = OmegaConf.create({"_target_": optimizer_cls})
if lr is not None:
optimizer_config = {"lr": lr}
else:
optimizer_config = {}
optimizer_config.update(optimizer_args)
optimizer_instance = hydra.utils.instantiate(
optimizer_cls, self.parameters(), **optimizer_config
) # type: DictConfig
logging.info("Optimizer config = %s", str(optimizer_instance))
self._optimizer = optimizer_instance
except Exception as e:
logging.error(
"Could not instantiate class path - {} with kwargs {}".format(
optimizer_cls, str(optimizer_config)
)
)
raise e
else:
optimizer = optim.get_optimizer(optimizer_name)
optimizer = optimizer(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
# Try to instantiate scheduler for optimizer
self._scheduler = prepare_lr_scheduler(
optimizer=self._optimizer,
scheduler_config=scheduler_config,
train_dataloader=self._train_dl,
)
# Return the optimizer with/without scheduler
# This return allows multiple optimizers or schedulers to be created
return self._optimizer, self._scheduler
def configure_optimizers(self):
self.setup_optimization()
if self._scheduler is None:
return self._optimizer
else:
return [self._optimizer], [self._scheduler]
def train_dataloader(self):
if self._train_dl is not None:
return self._train_dl
def val_dataloader(self):
if self._validation_dl is not None:
return self._validation_dl
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def validation_epoch_end(
self,
outputs: Union[
List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]
],
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Validation set which automatically supports multiple data loaders
via `multi_validation_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_validation_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `val_loss`,
only the `val_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `val_dl_idx: int`
inside the `validation_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_validation_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and "log" in output_dict:
self.log_dict(output_dict.pop("log"), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {"log": {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, val_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_validation_dataloader_prefix(
dataloader_idx
)
dataloader_logs = self.multi_validation_epoch_end(
val_outputs, dataloader_idx=dataloader_idx
)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `val_loss` resolution first (if provided outside logs)
if "val_loss" in dataloader_logs:
if (
"val_loss" not in output_dict
and dataloader_idx == self._val_dl_idx
):
output_dict["val_loss"] = dataloader_logs["val_loss"]
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == "log":
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the metric, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if (
k_log not in output_dict["log"]
and dataloader_idx == self._val_dl_idx
):
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
# Store log value
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict["log"]
output_logs.update(log_dict)
# Update global log storage
output_dict["log"] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if "log" in output_dict:
self.log_dict(output_dict.pop("log"), on_epoch=True)
# return everything else
return output_dict
def test_epoch_end(
self,
outputs: Union[
List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]
],
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Test set which automatically supports multiple data loaders
via `multi_test_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_test_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `test_loss`,
only the `test_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `test_dl_idx: int`
inside the `test_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_test_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and "log" in output_dict:
self.log_dict(output_dict.pop("log"), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {"log": {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, test_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi | |
<filename>matrix-python-project/cover_generator/typesetting/model/four.py
import sys, os, time, json, random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from cover_generator.typesetting.more import More
from cover_generator.typesetting.mark import Mark
from cover_generator.typesetting.build import Build
from utils.snow_id import SnowId
sys.path.append(os.getcwd())
class Four(object):
def __init__(self, folder_key):
self.image_list = None
self.rank_model = None
self.tb = None
with open("cover_generator/typesetting/style.json", 'r') as f0:
style_config = json.load(f0)
self.model = style_config["four"]
self.func_map = {
1: self.quadruple_vertical_build,
2: self.quadruple_horizontal_build,
3: self.chairs_build,
4: self.chairs_spin_build,
5: self.h2v2_build,
6: self.h2v2_spin_build,
7: self.windows_build,
8: self.windows_vertical_build,
9: self.windows_horizontal_build,
}
self._build = Build(folder_key, folder_key + "_temp")
def quadruple_vertical(self, image_list):
return More(image_list, self.model[0]["unit_detail"], "41").main()
def quadruple_horizontal(self, image_list):
return More(image_list, self.model[1]["unit_detail"], "42").main()
def chairs(self, image_list):
return More(image_list, self.model[2]["unit_detail"], "43").main()
def chairs_spin(self, image_list):
return More(image_list, self.model[3]["unit_detail"], "44").main()
def h2v2(self, image_list):
return More(image_list, self.model[4]["unit_detail"], "45").main()
def h2v2_spin(self, image_list):
return More(image_list, self.model[5]["unit_detail"], "46").main()
def windows(self, image_list):
return More(image_list, self.model[6]["unit_detail"], "47").main()
def windows_vertical(self, image_list):
return More(image_list, self.model[7]["unit_detail"], "48").main()
def windows_horizontal(self, image_list):
return More(image_list, self.model[8]["unit_detail"], "49").main()
def build(self, image_list, model):
self.tb = Image.open("cover_generator/background.jpg")
self.image_list = image_list
self.rank_model = model
self.func_map[int(model["model_id"][1])]()
def quadruple_vertical_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[0]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[0]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[0]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[0]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[2], (0, 960))
self.tb.paste(pic_4, (540, 0))
else:
self.tb.paste(pic_list[0], (540, 0))
self.tb.paste(pic_list[1], (540, 480))
self.tb.paste(pic_list[2], (540, 960))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def quadruple_horizontal_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[1]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[1]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[1]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[1]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 1080,
# "height": 720
# }
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (360, 0))
self.tb.paste(pic_list[2], (720, 0))
self.tb.paste(pic_4, (0, 720))
else:
self.tb.paste(pic_list[0], (0, 720))
self.tb.paste(pic_list[1], (360, 720))
self.tb.paste(pic_list[2], (720, 720))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def chairs_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[2]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[2]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[2]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[2]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_2, pic_3]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 3)
# {
# "width": 720,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 720
# },
# {
# "width": 360,
# "height": 1440
# }
# 保存
if kind == 0:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_list[1], (0, 720))
self.tb.paste(pic_list[0], (360, 720))
self.tb.paste(pic_4, (720, 0))
elif kind == 1:
self.tb.paste(pic_1, (360, 0))
self.tb.paste(pic_list[1], (360, 720))
self.tb.paste(pic_list[0], (720, 720))
self.tb.paste(pic_4, (0, 0))
elif kind == 2:
self.tb.paste(pic_1, (0, 720))
self.tb.paste(pic_list[1], (0, 0))
self.tb.paste(pic_list[0], (360, 0))
self.tb.paste(pic_4, (720, 0))
else:
self.tb.paste(pic_1, (360, 720))
self.tb.paste(pic_list[1], (360, 0))
self.tb.paste(pic_list[0], (720, 0))
self.tb.paste(pic_4, (0, 0))
self._build.save(self.tb)
def chairs_spin_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[3]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[3]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[3]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[3]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_3, pic_4]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 3)
# 保存
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 540,
# "height": 960
# },
# {
# "width": 540,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# }
if kind == 0:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_2, (0, 480))
self.tb.paste(pic_list[1], (540, 480))
self.tb.paste(pic_list[0], (540, 960))
elif kind == 1:
self.tb.paste(pic_1, (0, 0))
self.tb.paste(pic_2, (540, 480))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[0], (0, 960))
elif kind == 2:
self.tb.paste(pic_1, (0, 960))
self.tb.paste(pic_2, (0, 0))
self.tb.paste(pic_list[1], (540, 0))
self.tb.paste(pic_list[0], (540, 480))
else:
self.tb.paste(pic_1, (0, 960))
self.tb.paste(pic_2, (540, 0))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[0], (0, 0))
self._build.save(self.tb)
def h2v2_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[4]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[4]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[4]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[4]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处三种结构
kind = random.randint(0, 2)
# 保存
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 720))
self.tb.paste(pic_list_2[0], (360, 0))
self.tb.paste(pic_list_2[1], (720, 0))
elif kind == 1:
self.tb.paste(pic_list_1[0], (720, 0))
self.tb.paste(pic_list_1[1], (720, 720))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (360, 0))
else:
self.tb.paste(pic_list_1[0], (360, 0))
self.tb.paste(pic_list_1[1], (360, 720))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (720, 0))
self._build.save(self.tb)
def h2v2_spin_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[5]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[5]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[5]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[5]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list_1 = [pic_1, pic_2]
random.shuffle(pic_list_1)
pic_list_2 = [pic_3, pic_4]
random.shuffle(pic_list_2)
# 结构也需要shuffle,此处三种结构
kind = random.randint(0, 2)
# 保存
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 1080,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# },
# {
# "width": 540,
# "height": 480
# }
if kind == 0:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 480))
self.tb.paste(pic_list_2[0], (0, 960))
self.tb.paste(pic_list_2[1], (540, 960))
elif kind == 1:
self.tb.paste(pic_list_1[0], (0, 480))
self.tb.paste(pic_list_1[1], (0, 960))
self.tb.paste(pic_list_2[0], (0, 0))
self.tb.paste(pic_list_2[1], (540, 0))
else:
self.tb.paste(pic_list_1[0], (0, 0))
self.tb.paste(pic_list_1[1], (0, 960))
self.tb.paste(pic_list_2[0], (0, 480))
self.tb.paste(pic_list_2[1], (540, 480))
self._build.save(self.tb)
def windows_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[6]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[6]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[6]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 贴第四张图
image = self.image_list[self.rank_model["model_match"][3][1]]
model = self.model[6]["unit_detail"][3]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_4 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3, pic_4]
random.shuffle(pic_list)
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (540, 0))
| |
event
void printDependentParameters();
// Print couplings that are changed event by event
void printDependentCouplings();
private:
static Parameters_sm* instance;
};
#endif // Pythia8_parameters_sm_H
"""% misc.get_pkg_info()
goal_file_cc = \
"""//==========================================================================
// This file has been automatically generated for Pythia 8 by
# MadGraph5_aMC@NLO v. %(version)s, %(date)s
# By the MadGraph5_aMC@NLO Development Team
# Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include <iostream>
#include "Parameters_sm.h"
#include "Pythia8/PythiaStdlib.h"
using namespace Pythia8;
// Initialize static instance
Parameters_sm* Parameters_sm::instance = 0;
// Function to get static instance - only one instance per program
Parameters_sm* Parameters_sm::getInstance(){
if (instance == 0)
instance = new Parameters_sm();
return instance;
}
void Parameters_sm::setIndependentParameters(ParticleData*& pd, Couplings*& csm, SusyLesHouches*& slhaPtr){
mdl_WTau=pd->mWidth(15);
mdl_WH=pd->mWidth(25);
mdl_WT=pd->mWidth(6);
mdl_WW=pd->mWidth(24);
mdl_WZ=pd->mWidth(23);
mdl_MTA=pd->m0(15);
mdl_MM=pd->m0(13);
mdl_Me=pd->m0(11);
mdl_MH=pd->m0(25);
mdl_MB=pd->m0(5);
mdl_MT=pd->m0(6);
mdl_MC=pd->m0(4);
mdl_MZ=pd->m0(23);
mdl_ymtau=pd->mRun(15, pd->m0(24));
mdl_ymm=pd->mRun(13, pd->m0(24));
mdl_yme=pd->mRun(11, pd->m0(24));
mdl_ymt=pd->mRun(6, pd->m0(24));
mdl_ymb=pd->mRun(5, pd->m0(24));
mdl_ymc=pd->mRun(4, pd->m0(24));
if(!slhaPtr->getEntry<double>("wolfenstein", 4, mdl_etaWS)){
cout << "Warning, setting mdl_etaWS to 3.410000e-01" << endl;
mdl_etaWS = 3.410000e-01;}
if(!slhaPtr->getEntry<double>("wolfenstein", 3, mdl_rhoWS)){
cout << "Warning, setting mdl_rhoWS to 1.320000e-01" << endl;
mdl_rhoWS = 1.320000e-01;}
if(!slhaPtr->getEntry<double>("wolfenstein", 2, mdl_AWS)){
cout << "Warning, setting mdl_AWS to 8.080000e-01" << endl;
mdl_AWS = 8.080000e-01;}
if(!slhaPtr->getEntry<double>("wolfenstein", 1, mdl_lamWS)){
cout << "Warning, setting mdl_lamWS to 2.253000e-01" << endl;
mdl_lamWS = 2.253000e-01;}
mdl_Gf = M_PI*csm->alphaEM(((pd->m0(23))*(pd->m0(23))))*((pd->m0(23))*(pd->m0(23)))/(sqrt(2.)*((pd->m0(24))*(pd->m0(24)))*(((pd->m0(23))*(pd->m0(23)))-((pd->m0(24))*(pd->m0(24)))));
aEWM1 = 1./csm->alphaEM(((pd->m0(23))*(pd->m0(23))));
ZERO = 0.;
mdl_lamWS__exp__2 = ((mdl_lamWS)*(mdl_lamWS));
mdl_CKM1x1 = 1.-mdl_lamWS__exp__2/2.;
mdl_CKM1x2 = mdl_lamWS;
mdl_complexi = std::complex<double>(0.,1.);
mdl_lamWS__exp__3 = ((mdl_lamWS)*(mdl_lamWS)*(mdl_lamWS));
mdl_CKM1x3 = mdl_AWS*mdl_lamWS__exp__3*(-(mdl_etaWS*mdl_complexi)+mdl_rhoWS);
mdl_CKM2x1 = -mdl_lamWS;
mdl_CKM2x2 = 1.-mdl_lamWS__exp__2/2.;
mdl_CKM2x3 = mdl_AWS*mdl_lamWS__exp__2;
mdl_CKM3x1 = mdl_AWS*mdl_lamWS__exp__3*(1.-mdl_etaWS*mdl_complexi-mdl_rhoWS);
mdl_CKM3x2 = -(mdl_AWS*mdl_lamWS__exp__2);
mdl_CKM3x3 = 1.;
mdl_MZ__exp__2 = ((mdl_MZ)*(mdl_MZ));
mdl_MZ__exp__4 = ((mdl_MZ)*(mdl_MZ)*(mdl_MZ)*(mdl_MZ));
mdl_sqrt__2 = sqrt(2.);
mdl_MH__exp__2 = ((mdl_MH)*(mdl_MH));
mdl_conjg__CKM1x3 = conj(mdl_CKM1x3);
mdl_conjg__CKM2x3 = conj(mdl_CKM2x3);
mdl_conjg__CKM3x3 = conj(mdl_CKM3x3);
mdl_conjg__CKM2x1 = conj(mdl_CKM2x1);
mdl_conjg__CKM3x1 = conj(mdl_CKM3x1);
mdl_conjg__CKM2x2 = conj(mdl_CKM2x2);
mdl_conjg__CKM3x2 = conj(mdl_CKM3x2);
mdl_conjg__CKM1x1 = conj(mdl_CKM1x1);
mdl_conjg__CKM1x2 = conj(mdl_CKM1x2);
mdl_aEW = 1./aEWM1;
mdl_MW = sqrt(mdl_MZ__exp__2/2.+sqrt(mdl_MZ__exp__4/4.-(mdl_aEW*M_PI*mdl_MZ__exp__2)/(mdl_Gf*mdl_sqrt__2)));
mdl_sqrt__aEW = sqrt(mdl_aEW);
mdl_ee = 2.*mdl_sqrt__aEW*sqrt(M_PI);
mdl_MW__exp__2 = ((mdl_MW)*(mdl_MW));
mdl_sw2 = 1.-mdl_MW__exp__2/mdl_MZ__exp__2;
mdl_cw = sqrt(1.-mdl_sw2);
mdl_sqrt__sw2 = sqrt(mdl_sw2);
mdl_sw = mdl_sqrt__sw2;
mdl_g1 = mdl_ee/mdl_cw;
mdl_gw = mdl_ee/mdl_sw;
mdl_vev = (2.*mdl_MW*mdl_sw)/mdl_ee;
mdl_vev__exp__2 = ((mdl_vev)*(mdl_vev));
mdl_lam = mdl_MH__exp__2/(2.*mdl_vev__exp__2);
mdl_yb = (mdl_ymb*mdl_sqrt__2)/mdl_vev;
mdl_yc = (mdl_ymc*mdl_sqrt__2)/mdl_vev;
mdl_ye = (mdl_yme*mdl_sqrt__2)/mdl_vev;
mdl_ym = (mdl_ymm*mdl_sqrt__2)/mdl_vev;
mdl_yt = (mdl_ymt*mdl_sqrt__2)/mdl_vev;
mdl_ytau = (mdl_ymtau*mdl_sqrt__2)/mdl_vev;
mdl_muH = sqrt(mdl_lam*mdl_vev__exp__2);
mdl_I1x31 = mdl_yb*mdl_conjg__CKM1x3;
mdl_I1x32 = mdl_yb*mdl_conjg__CKM2x3;
mdl_I1x33 = mdl_yb*mdl_conjg__CKM3x3;
mdl_I2x12 = mdl_yc*mdl_conjg__CKM2x1;
mdl_I2x13 = mdl_yt*mdl_conjg__CKM3x1;
mdl_I2x22 = mdl_yc*mdl_conjg__CKM2x2;
mdl_I2x23 = mdl_yt*mdl_conjg__CKM3x2;
mdl_I2x32 = mdl_yc*mdl_conjg__CKM2x3;
mdl_I2x33 = mdl_yt*mdl_conjg__CKM3x3;
mdl_I3x21 = mdl_CKM2x1*mdl_yc;
mdl_I3x22 = mdl_CKM2x2*mdl_yc;
mdl_I3x23 = mdl_CKM2x3*mdl_yc;
mdl_I3x31 = mdl_CKM3x1*mdl_yt;
mdl_I3x32 = mdl_CKM3x2*mdl_yt;
mdl_I3x33 = mdl_CKM3x3*mdl_yt;
mdl_I4x13 = mdl_CKM1x3*mdl_yb;
mdl_I4x23 = mdl_CKM2x3*mdl_yb;
mdl_I4x33 = mdl_CKM3x3*mdl_yb;
mdl_ee__exp__2 = ((mdl_ee)*(mdl_ee));
mdl_sw__exp__2 = ((mdl_sw)*(mdl_sw));
mdl_cw__exp__2 = ((mdl_cw)*(mdl_cw));
}
void Parameters_sm::setIndependentCouplings(){
GC_1 = -(mdl_ee*mdl_complexi)/3.;
GC_2 = (2.*mdl_ee*mdl_complexi)/3.;
GC_3 = -(mdl_ee*mdl_complexi);
GC_4 = mdl_ee*mdl_complexi;
GC_5 = mdl_ee__exp__2*mdl_complexi;
GC_6 = 2.*mdl_ee__exp__2*mdl_complexi;
GC_7 = -mdl_ee__exp__2/(2.*mdl_cw);
GC_8 = (mdl_ee__exp__2*mdl_complexi)/(2.*mdl_cw);
GC_9 = mdl_ee__exp__2/(2.*mdl_cw);
GC_13 = mdl_I1x31;
GC_14 = mdl_I1x32;
GC_15 = mdl_I1x33;
GC_16 = -mdl_I2x12;
GC_17 = -mdl_I2x13;
GC_18 = -mdl_I2x22;
GC_19 = -mdl_I2x23;
GC_20 = -mdl_I2x32;
GC_21 = -mdl_I2x33;
GC_22 = mdl_I3x21;
GC_23 = mdl_I3x22;
GC_24 = mdl_I3x23;
GC_25 = mdl_I3x31;
GC_26 = mdl_I3x32;
GC_27 = mdl_I3x33;
GC_28 = -mdl_I4x13;
GC_29 = -mdl_I4x23;
GC_30 = -mdl_I4x33;
GC_31 = -2.*mdl_complexi*mdl_lam;
GC_32 = -4.*mdl_complexi*mdl_lam;
GC_33 = -6.*mdl_complexi*mdl_lam;
GC_34 = (mdl_ee__exp__2*mdl_complexi)/(2.*mdl_sw__exp__2);
GC_35 = -((mdl_ee__exp__2*mdl_complexi)/mdl_sw__exp__2);
GC_36 = (mdl_cw__exp__2*mdl_ee__exp__2*mdl_complexi)/mdl_sw__exp__2;
GC_37 = -mdl_ee/(2.*mdl_sw);
GC_38 = -(mdl_ee*mdl_complexi)/(2.*mdl_sw);
GC_39 = (mdl_ee*mdl_complexi)/(2.*mdl_sw);
GC_40 = (mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_41 = (mdl_CKM1x1*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_42 = (mdl_CKM1x2*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_43 = (mdl_CKM1x3*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_44 = (mdl_CKM2x1*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_45 = (mdl_CKM2x2*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_46 = (mdl_CKM2x3*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_47 = (mdl_CKM3x1*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_48 = (mdl_CKM3x2*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_49 = (mdl_CKM3x3*mdl_ee*mdl_complexi)/(mdl_sw*mdl_sqrt__2);
GC_50 = -(mdl_cw*mdl_ee*mdl_complexi)/(2.*mdl_sw);
GC_51 = (mdl_cw*mdl_ee*mdl_complexi)/(2.*mdl_sw);
GC_52 = -((mdl_cw*mdl_ee*mdl_complexi)/mdl_sw);
GC_53 = (mdl_cw*mdl_ee*mdl_complexi)/mdl_sw;
GC_54 = -mdl_ee__exp__2/(2.*mdl_sw);
GC_55 = -(mdl_ee__exp__2*mdl_complexi)/(2.*mdl_sw);
GC_56 = mdl_ee__exp__2/(2.*mdl_sw);
GC_57 = (-2.*mdl_cw*mdl_ee__exp__2*mdl_complexi)/mdl_sw;
GC_58 = -(mdl_ee*mdl_complexi*mdl_sw)/(6.*mdl_cw);
GC_59 = (mdl_ee*mdl_complexi*mdl_sw)/(2.*mdl_cw);
GC_60 = -(mdl_cw*mdl_ee)/(2.*mdl_sw)-(mdl_ee*mdl_sw)/(2.*mdl_cw);
GC_61 = -(mdl_cw*mdl_ee*mdl_complexi)/(2.*mdl_sw)+(mdl_ee*mdl_complexi*mdl_sw)/(2.*mdl_cw);
GC_62 = (mdl_cw*mdl_ee*mdl_complexi)/(2.*mdl_sw)+(mdl_ee*mdl_complexi*mdl_sw)/(2.*mdl_cw);
GC_63 = (mdl_cw*mdl_ee__exp__2*mdl_complexi)/mdl_sw-(mdl_ee__exp__2*mdl_complexi*mdl_sw)/mdl_cw;
GC_64 = -(mdl_ee__exp__2*mdl_complexi)+(mdl_cw__exp__2*mdl_ee__exp__2*mdl_complexi)/(2.*mdl_sw__exp__2)+(mdl_ee__exp__2*mdl_complexi*mdl_sw__exp__2)/(2.*mdl_cw__exp__2);
GC_65 = mdl_ee__exp__2*mdl_complexi+(mdl_cw__exp__2*mdl_ee__exp__2*mdl_complexi)/(2.*mdl_sw__exp__2)+(mdl_ee__exp__2*mdl_complexi*mdl_sw__exp__2)/(2.*mdl_cw__exp__2);
GC_66 = -(mdl_ee__exp__2*mdl_vev)/(2.*mdl_cw);
GC_67 = (mdl_ee__exp__2*mdl_vev)/(2.*mdl_cw);
GC_68 = -2.*mdl_complexi*mdl_lam*mdl_vev;
GC_69 = -6.*mdl_complexi*mdl_lam*mdl_vev;
GC_70 = -(mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_71 = -(mdl_ee__exp__2*mdl_complexi*mdl_vev)/(4.*mdl_sw__exp__2);
GC_72 = (mdl_ee__exp__2*mdl_complexi*mdl_vev)/(2.*mdl_sw__exp__2);
GC_73 = (mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_74 = -(mdl_ee__exp__2*mdl_vev)/(2.*mdl_sw);
GC_75 = (mdl_ee__exp__2*mdl_vev)/(2.*mdl_sw);
GC_76 = -(mdl_ee__exp__2*mdl_vev)/(4.*mdl_cw)-(mdl_cw*mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_77 = (mdl_ee__exp__2*mdl_vev)/(4.*mdl_cw)-(mdl_cw*mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_78 = -(mdl_ee__exp__2*mdl_vev)/(4.*mdl_cw)+(mdl_cw*mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_79 = (mdl_ee__exp__2*mdl_vev)/(4.*mdl_cw)+(mdl_cw*mdl_ee__exp__2*mdl_vev)/(4.*mdl_sw__exp__2);
GC_80 = -(mdl_ee__exp__2*mdl_complexi*mdl_vev)/2.-(mdl_cw__exp__2*mdl_ee__exp__2*mdl_complexi*mdl_vev)/(4.*mdl_sw__exp__2)-(mdl_ee__exp__2*mdl_complexi*mdl_sw__exp__2*mdl_vev)/(4.*mdl_cw__exp__2);
GC_81 = mdl_ee__exp__2*mdl_complexi*mdl_vev+(mdl_cw__exp__2*mdl_ee__exp__2*mdl_complexi*mdl_vev)/(2.*mdl_sw__exp__2)+(mdl_ee__exp__2*mdl_complexi*mdl_sw__exp__2*mdl_vev)/(2.*mdl_cw__exp__2);
GC_82 = -(mdl_yb/mdl_sqrt__2);
GC_83 = -((mdl_complexi*mdl_yb)/mdl_sqrt__2);
GC_84 = -((mdl_complexi*mdl_yc)/mdl_sqrt__2);
GC_85 = mdl_yc/mdl_sqrt__2;
GC_86 = -mdl_ye;
GC_87 = mdl_ye;
GC_88 = -(mdl_ye/mdl_sqrt__2);
GC_89 = -((mdl_complexi*mdl_ye)/mdl_sqrt__2);
GC_90 = -mdl_ym;
GC_91 = mdl_ym;
GC_92 = -(mdl_ym/mdl_sqrt__2);
GC_93 = -((mdl_complexi*mdl_ym)/mdl_sqrt__2);
GC_94 = -((mdl_complexi*mdl_yt)/mdl_sqrt__2);
GC_95 = mdl_yt/mdl_sqrt__2;
GC_96 = -mdl_ytau;
GC_97 = mdl_ytau;
GC_98 = -(mdl_ytau/mdl_sqrt__2);
GC_99 = -((mdl_complexi*mdl_ytau)/mdl_sqrt__2);
GC_100 = (mdl_ee*mdl_complexi*mdl_conjg__CKM1x1)/(mdl_sw*mdl_sqrt__2);
GC_101 = (mdl_ee*mdl_complexi*mdl_conjg__CKM1x2)/(mdl_sw*mdl_sqrt__2);
GC_102 = (mdl_ee*mdl_complexi*mdl_conjg__CKM1x3)/(mdl_sw*mdl_sqrt__2);
GC_103 = (mdl_ee*mdl_complexi*mdl_conjg__CKM2x1)/(mdl_sw*mdl_sqrt__2);
GC_104 = (mdl_ee*mdl_complexi*mdl_conjg__CKM2x2)/(mdl_sw*mdl_sqrt__2);
GC_105 = (mdl_ee*mdl_complexi*mdl_conjg__CKM2x3)/(mdl_sw*mdl_sqrt__2);
GC_106 = (mdl_ee*mdl_complexi*mdl_conjg__CKM3x1)/(mdl_sw*mdl_sqrt__2);
GC_107 = (mdl_ee*mdl_complexi*mdl_conjg__CKM3x2)/(mdl_sw*mdl_sqrt__2);
GC_108 = (mdl_ee*mdl_complexi*mdl_conjg__CKM3x3)/(mdl_sw*mdl_sqrt__2);
}
void Parameters_sm::setDependentParameters(ParticleData*& pd, Couplings*& csm, SusyLesHouches*& slhaPtr, double alpS){
aS = alpS;
mdl_sqrt__aS = sqrt(aS);
G = 2.*mdl_sqrt__aS*sqrt(M_PI);
mdl_G__exp__2 = ((G)*(G));
}
void Parameters_sm::setDependentCouplings(){
GC_12 = mdl_complexi*mdl_G__exp__2;
GC_11 = mdl_complexi*G;
GC_10 = -G;
}
// Routines for printing out parameters
void Parameters_sm::printIndependentParameters(){
cout << "sm model parameters independent of event kinematics:" << endl;
cout << setw(20) << "mdl_WTau " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_WTau << endl;
cout << setw(20) << "mdl_WH " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_WH << endl;
cout << setw(20) << "mdl_WT " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_WT << endl;
cout << setw(20) << "mdl_WW " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_WW << endl;
cout << setw(20) << "mdl_WZ " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_WZ << endl;
cout << setw(20) << "mdl_MTA " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MTA << endl;
cout << setw(20) << "mdl_MM " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MM << endl;
cout << setw(20) << "mdl_Me " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_Me << endl;
cout << setw(20) << "mdl_MH " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MH << endl;
cout << setw(20) << "mdl_MB " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MB << endl;
cout << setw(20) << "mdl_MT " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MT << endl;
cout << setw(20) << "mdl_MC " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MC << endl;
cout << setw(20) << "mdl_MZ " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MZ << endl;
cout << setw(20) << "mdl_ymtau " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ymtau << endl;
cout << setw(20) << "mdl_ymm " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ymm << endl;
cout << setw(20) << "mdl_yme " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_yme << endl;
cout << setw(20) << "mdl_ymt " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ymt << endl;
cout << setw(20) << "mdl_ymb " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ymb << endl;
cout << setw(20) << "mdl_ymc " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_ymc << endl;
cout << setw(20) << "mdl_etaWS " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_etaWS << endl;
cout << setw(20) << "mdl_rhoWS " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_rhoWS << endl;
cout << setw(20) << "mdl_AWS " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_AWS << endl;
cout << setw(20) << "mdl_lamWS " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_lamWS << endl;
cout << setw(20) << "mdl_Gf " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_Gf << endl;
cout << setw(20) << "aEWM1 " << "= " << setiosflags(ios::scientific) << setw(10) << aEWM1 << endl;
cout << setw(20) << "ZERO " << "= " << setiosflags(ios::scientific) << setw(10) << ZERO << endl;
cout << setw(20) << "mdl_lamWS__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_lamWS__exp__2 << endl;
cout << setw(20) << "mdl_CKM1x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM1x1 << endl;
cout << setw(20) << "mdl_CKM1x2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM1x2 << endl;
cout << setw(20) << "mdl_complexi " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_complexi << endl;
cout << setw(20) << "mdl_lamWS__exp__3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_lamWS__exp__3 << endl;
cout << setw(20) << "mdl_CKM1x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM1x3 << endl;
cout << setw(20) << "mdl_CKM2x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM2x1 << endl;
cout << setw(20) << "mdl_CKM2x2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM2x2 << endl;
cout << setw(20) << "mdl_CKM2x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM2x3 << endl;
cout << setw(20) << "mdl_CKM3x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM3x1 << endl;
cout << setw(20) << "mdl_CKM3x2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM3x2 << endl;
cout << setw(20) << "mdl_CKM3x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_CKM3x3 << endl;
cout << setw(20) << "mdl_MZ__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MZ__exp__2 << endl;
cout << setw(20) << "mdl_MZ__exp__4 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MZ__exp__4 << endl;
cout << setw(20) << "mdl_sqrt__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_sqrt__2 << endl;
cout << setw(20) << "mdl_MH__exp__2 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_MH__exp__2 << endl;
cout << setw(20) << "mdl_conjg__CKM1x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM1x3 << endl;
cout << setw(20) << "mdl_conjg__CKM2x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM2x3 << endl;
cout << setw(20) << "mdl_conjg__CKM3x3 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM3x3 << endl;
cout << setw(20) << "mdl_conjg__CKM2x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM2x1 << endl;
cout << setw(20) << "mdl_conjg__CKM3x1 " << "= " << setiosflags(ios::scientific) << setw(10) << mdl_conjg__CKM3x1 << endl;
cout << setw(20) << "mdl_conjg__CKM2x2 | |
noqa: E501
return self.api_client.call_api(
'/process-definition/key/{key}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProcessDefinitionDto', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_process_definition_diagram(self, id, **kwargs): # noqa: E501
"""Get Diagram # noqa: E501
Retrieves the diagram of a process definition. If the process definition's deployment contains an image resource with the same file name as the process definition, the deployed image will be returned by the Get Diagram endpoint. Example: `someProcess.bpmn` and `someProcess.png`. Supported file extentions for the image are: `svg`, `png`, `jpg`, and `gif`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_process_definition_diagram(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the process definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_process_definition_diagram_with_http_info(id, **kwargs) # noqa: E501
def get_process_definition_diagram_with_http_info(self, id, **kwargs): # noqa: E501
"""Get Diagram # noqa: E501
Retrieves the diagram of a process definition. If the process definition's deployment contains an image resource with the same file name as the process definition, the deployed image will be returned by the Get Diagram endpoint. Example: `someProcess.bpmn` and `someProcess.png`. Supported file extentions for the image are: `svg`, `png`, `jpg`, and `gif`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_process_definition_diagram_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: The id of the process definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_process_definition_diagram" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_process_definition_diagram`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream', '*/*', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/process-definition/{id}/diagram', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_process_definition_diagram_by_key(self, key, **kwargs): # noqa: E501
"""Get Diagram # noqa: E501
Retrieves the diagram for the latest version of the process definition which belongs to no tenant. If the process definition's deployment contains an image resource with the same file name as the process definition, the deployed image will be returned by the Get Diagram endpoint. Example: `someProcess.bpmn` and `someProcess.png`. Supported file extentions for the image are: `svg`, `png`, `jpg`, and `gif`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_process_definition_diagram_by_key(key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The key of the process definition. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_process_definition_diagram_by_key_with_http_info(key, **kwargs) # noqa: E501
def get_process_definition_diagram_by_key_with_http_info(self, key, **kwargs): # noqa: E501
"""Get Diagram # noqa: E501
Retrieves the diagram for the latest version of the process definition which belongs to no tenant. If the process definition's deployment contains an image resource with the same file name as the process definition, the deployed image will be returned by the Get Diagram endpoint. Example: `someProcess.bpmn` and `someProcess.png`. Supported file extentions for the image are: `svg`, `png`, `jpg`, and `gif`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_process_definition_diagram_by_key_with_http_info(key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The key of the process definition. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(file, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'key'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_process_definition_diagram_by_key" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'key' is set
if self.api_client.client_side_validation and ('key' not in local_var_params or # noqa: E501
local_var_params['key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `key` when calling `get_process_definition_diagram_by_key`") # noqa: E501
collection_formats = {}
path_params = {}
if 'key' in local_var_params:
path_params['key'] = local_var_params['key'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream', '*/*', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/process-definition/key/{key}/diagram', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_process_definition_diagram_by_key_and_tenant_id(self, key, tenant_id, **kwargs): # noqa: E501
"""Get Diagram # noqa: E501
Retrieves the diagram for the latest version of the process definition for tenant. If the process definition's deployment contains an image resource with the same file name as the process definition, the deployed image will be returned by the Get Diagram endpoint. Example: `someProcess.bpmn` and `someProcess.png`. Supported file extentions for the image are: `svg`, `png`, `jpg`, and `gif`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_process_definition_diagram_by_key_and_tenant_id(key, tenant_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The key of the process definition. (required)
:param str tenant_id: The id of the tenant the process definition belongs to. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_process_definition_diagram_by_key_and_tenant_id_with_http_info(key, tenant_id, **kwargs) # noqa: E501
def get_process_definition_diagram_by_key_and_tenant_id_with_http_info(self, | |
<reponame>ValentinoUberti/mcimporter
# coding=latin-1
from fpdf import FPDF, HTMLMixin
from dns.resolver import NoMetaqueries
#from twisted.words.protocols.oscar import CAP_CHAT
import os
import datetime
from money import *
from datetime import timedelta
from calendar import monthrange
class MyFPDF(FPDF, HTMLMixin):
pass
class FATTURA():
pdf = None
html = ""
rows = []
def __init__(self,tipo_fattura,date,numero_interno_fattura,anteprima=False):
self.pdf = MyFPDF("P", "mm", "A4")
self.date=date
self.numero_interno_ddt = numero_interno_fattura
self.tipo_fattura=tipo_fattura
self.valuta="Valuta : EURO"
self.numero_fattura="Numero fattura {0}".format(numero_interno_fattura)
self.data_fattura="del {0}".format(date)
self.pdf.set_auto_page_break(False)
self.rows_per_page = 26
self.rows=[]
self.f2_list=[]
self.anteprima = anteprima
def add_row(self,codice,descrizione,riferimento_ordine,um,quantita,prezzo,sconti,importo,ci):
record = []
record.append(codice)
record.append(descrizione)
record.append(riferimento_ordine)
record.append(um)
record.append(quantita)
record.append(prezzo)
record.append(sconti)
record.append(str(importo))
record.append(ci)
self.rows.append(record)
def print_footer(self):
self.pdf.set_xy(2,242)
self.pdf.set_font_size(8)
f=False
self.pdf.cell(34, 0, self.totale_merce,0,0,"",f)
self.pdf.cell(30, 0, self.sconto,0,0,"",f)
self.pdf.cell(30, 0, self.netto_merce,0,0,"",f)
self.pdf.cell(24, 0, self.spese_varie,0,0,"",f)
self.pdf.cell(30, 0, self.spese_trasporto,0,0,"",f)
self.pdf.cell(30, 0, self.totale_imponibile,0,0,"",f)
self.pdf.cell(26, 0, self.totale_imposta,0,0,"",f)
print "SCRITTO1"
pass
def print_total(self):
self.pdf.set_font_size(12)
self.pdf.set_xy(180,260)
self.pdf.cell(34, 0, self.totale_documento,0,0,"")
print "SCRITTO2"
def print_footer_2(self):
self.pdf.set_font_size(8)
y = 250
for row in self.f2_list:
self.pdf.set_xy(2,y+5)
self.pdf.set_font_size(8)
print row
f=False
self.pdf.cell(34, 0, row[0],0,0,"",f)
self.pdf.cell(30, 0, row[1],0,0,"",f)
self.pdf.cell(24, 0, row[2],0,0,"",f)
self.pdf.cell(30, 0, row[3],0,0,"",f)
self.pdf.cell(30, 0, row[4],0,0,"",f)
self.pdf.cell(30, 0, row[5],0,0,"",f)
y +=5
if not "/" in self.pagamento:
self.pdf.set_xy(90,283)
self.pdf.set_font_size(10)
self.pdf.cell(30, 0, self.scadenza + " " + self.totale_documento,0,0,"")
print "SCRITTO3"
else:
self.pdf.set_xy(60,283)
self.pdf.set_font_size(10)
s=self.pagamento
res = self.totale_documento.split(',')
print res
full_price = float('.'.join([res[0].replace('.', ''), res[1]]))
first_half = round(full_price / 2,2)
second_half= full_price - first_half
print first_half,second_half
importo1 = Money(str(first_half),"EUR")
importo1 = importo1.format("it_IT").encode('ascii', 'ignore').decode('ascii')
importo2 = Money(str(second_half),"EUR")
importo2 = importo2.format("it_IT").encode('ascii', 'ignore').decode('ascii')
st = int(s[s.index("/")+1:s.index("/")+4]) - int(s[s.index("/")-3:s.index("/")])
second_date = datetime.datetime.strptime(self.scadenza,"%d/%m/%Y").date()
first_date = second_date - datetime.timedelta(days = int(st) +1)
first_date = first_date.strftime("%d/%m/%Y")
second_date = second_date.strftime("%d/%m/%Y")
self.pdf.cell(50, 0, first_date + " " + importo1,0,0,"")
self.pdf.cell(30, 0, second_date + " " + importo2,0,0,"")
def insert_rows(self):
page_number = (len(self.rows) / self.rows_per_page) +1
row_index = 0
f = False
self.pdf.set_fill_color(220, 220, 220)
for page in range(0,page_number):
if row_index < len(self.rows):
#print row_index,len(self.rows)-1
self.add_header()
print "Add header"
print "righe totali {0} riga corrente {1}".format(len(self.rows),row_index)
if (page < (page_number -1)):
print "MINORE : current page {0} page number {1}".format(page,page_number -1)
print "riga corrente {0} righe massime {1}",format(row_index,str(self.rows_per_page))
if not row_index + self.rows_per_page == len(self.rows):
self.pdf.set_xy(164, 281)
self.pdf.set_font('', '')
self.pdf.set_font_size(18)
self.pdf.write(1, "SEGUE >>>")
self.pdf.set_xy(2,100)
else:
"""
Print footer data
"""
print "oCIO : current page {0} page number {1}".format(page,page_number)
#print "SONO QUIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII"
self.pdf.set_font('', '')
self.print_footer()
self.print_footer_2()
self.print_total()
print "fINITO INTERNO"
pass
else:
"""
Print footer data
"""
#print "SONO QUIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII"
self.pdf.set_font('', '')
self.print_footer()
self.print_footer_2()
self.print_total()
print "FINITOOOOOOOOOOO"
pass
try:
self.pdf.set_xy(2,100)
self.pdf.set_font_size(8)
for line_number in range(row_index,row_index + self.rows_per_page):
if line_number % 2 == 1:
self.pdf.rect(2, self.pdf.get_y()-2, 24, 4, style = 'F')
self.pdf.rect(28, self.pdf.get_y()-2, 74, 4, style = 'F')
self.pdf.rect(106, self.pdf.get_y()-2, 20, 4, style = 'F')
self.pdf.rect(130, self.pdf.get_y()-2, 6, 4, style = 'F')
self.pdf.rect(140, self.pdf.get_y()-2, 8, 4, style = 'F')
self.pdf.rect(151, self.pdf.get_y()-2, 15, 4, style = 'F')
self.pdf.rect(168, self.pdf.get_y()-2, 10, 4, style = 'F')
self.pdf.rect(181, self.pdf.get_y()-2, 16, 4, style = 'F')
self.pdf.rect(201, self.pdf.get_y()-2, 7, 4, style = 'F')
#print "rect"
else:
f = False
#print "LINE NuMBER :",line_number
self.pdf.set_x(2)
#print self.rows[line_number]
self.pdf.cell(26, 0, self.rows[line_number][0],0,0,"",f)
self.pdf.cell(80, 0, self.rows[line_number][1],fill=f)
self.pdf.cell(22, 0, self.rows[line_number][2],fill=f)
self.pdf.cell(12, 0, self.rows[line_number][3],fill=f)
self.pdf.cell(8, 0, self.rows[line_number][4],fill=f)
self.pdf.cell(14, 0, self.rows[line_number][5],align='R',fill=f)
self.pdf.cell(14, 0, self.rows[line_number][6],fill=f)
self.pdf.cell(20, 0, self.rows[line_number][7],align='R',fill=f)
self.pdf.cell(10, 0, self.rows[line_number][8],align='R',fill=f)
self.pdf.set_y(self.pdf.get_y()+5)
row_index+=1
except Exception,e:
print e
pass
pass
def add_header(self):
self.pdf.add_page()
#img_name = "logo.png"
#print os.getcwd()
if not self.anteprima:
all_link = os.getcwd()+"/applications/gestionale/static/images/logo.png"
else:
all_link = os.getcwd()+"/applications/gestionale/static/images/anteprima.png"
#all_link = "logo.png"
self.pdf.image(all_link, x=1, y=2, w=209)
#self.pdf.image(img_name, x=1, y=2, w=209)
self.pdf.set_font('Times','', 12)
#self.pdf.cell(0,40,ln=1)
#
self.pdf.rect(1,50.5,208,5)
self.pdf.set_xy(10, 52.5)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(8)
self.pdf.write(1, self.tipo_fattura)
self.pdf.set_font('', '')
self.pdf.set_x(self.pdf.get_string_width(self.tipo_fattura)+25)
self.pdf.write(1, self.valuta)
self.pdf.set_x(115)
self.pdf.set_font('', 'B')
self.pdf.write(1, self.numero_fattura)
self.pdf.set_font('', '')
self.pdf.set_x(150)
self.pdf.write(1, self.data_fattura)
self.pdf.set_x(180)
self.pdf.write(1, "Pag: {0}".format(self.pdf.page_no()))
"""
Intestazione
"""
self.pdf.rect(1,58,102,30) #Cliente
self.pdf.rect(105,58,104,30) # <NAME> consegna
"""
Righe
"""
#self.pdf.rect(1, 121, 208, 141)
self.pdf.rect(1, 90, 208.1, 141) #Angolo destro righe quantità
#self.pdf.rect(1, 90, 26, 141) #larghezza codice
#self.pdf.rect(27, 90, 100, 141) #larghezza descrizione
#self.pdf.rect(127, 90, 50, 141) #riferimeto ordine
#self.pdf.rect(177, 90, 10, 141) #riferimeto ordine
#self.pdf.rect(147, 121, 30, 141) #riferimeto ordine
#self.pdf.rect(177, 121, 10.1, 141) #riferimeto ordine
self.pdf.rect(1, 90, 208, 6)
self.pdf.rect(1, 90, 208, 6)
"""
self.pdf.set_xy(8,92)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Trasporto a mezzo")
self.pdf.set_xy(57,92)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Causale del trasporto")
self.pdf.set_xy(106,92)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Inizio trasporto (data/ora)")
self.pdf.set_xy(158.5,92)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Firma del conducente")
self.pdf.set_xy(8,107)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Vettore")
self.pdf.set_xy(57,107)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Residenza o domicilio")
self.pdf.set_xy(106,107)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Data e ora del ritiro")
self.pdf.set_xy(158.5,107)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Firma del conducente")
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.set_xy(8,99)
self.pdf.write(1, self.trasporto)
self.pdf.set_xy(57,99)
self.pdf.write(1, self.causale)
self.pdf.set_xy(106,99)
self.pdf.write(1, self.inizio_trasporto)
self.pdf.set_xy(8,114)
self.pdf.write(1, self.vettore)
self.pdf.set_xy(57,114)
self.pdf.write(1, self.residenza)
self.pdf.set_xy(106,114)
self.pdf.write(1, self.data_ritiro)
"""
self.pdf.set_xy(8,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.write(1, "Codice")
self.pdf.line(27, 90, 27, 231)
self.pdf.set_xy(50,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.write(1, "Descrizione")
self.pdf.line(103, 90, 103, 231)
self.pdf.set_xy(104,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.write(1, "Rif Vs. ordine")
self.pdf.line(128, 90, 128, 231)
self.pdf.set_xy(129,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.write(0, "U.M")
self.pdf.line(138, 90, 138, 231)
self.pdf.set_xy(140,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.cell(10,0, "Q.ta")
self.pdf.line(150, 90, 150, 231)
self.pdf.set_xy(154,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.cell(10,0, "Prezzo")
self.pdf.line(167, 90, 167, 231)
self.pdf.set_xy(168,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.cell(10,0, "Sconti")
self.pdf.line(180, 90, 180, 231)
self.pdf.set_xy(184,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.cell(10,0, "Importo")
self.pdf.line(200, 90, 200, 231)
self.pdf.set_xy(201,93)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(10)
self.pdf.cell(10,0, "C.I.")
"""
Footter
"""
self.pdf.rect(1, 234, 208, 52)
#self.pdf.line(1, 279, 209, 279)
#self.pdf.line(1 +69, 264, 1+69, 294)
#self.pdf.line(1 +69*2, 264, 1+69*2, 294)
self.pdf.rect(1, 286, 208, 4)
self.pdf.set_xy(2,237)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(7)
self.pdf.write(1, "Totale merce")
self.pdf.set_xy(36,237)
self.pdf.write(1, "Sconto %")
self.pdf.set_xy(66,237)
self.pdf.write(1, "Netto merce")
self.pdf.set_xy(96,237)
self.pdf.write(1, "Spese varie")
self.pdf.set_xy(120,237)
self.pdf.write(1, "Spese trasporto")
self.pdf.set_xy(150,237)
self.pdf.write(1, "Totale imponibile")
self.pdf.set_xy(180,237)
self.pdf.write(1, "Totale imposta")
self.pdf.line(1, 246, 209, 246)
self.pdf.line(1, 270, 209, 270)
self.pdf.line(1, 276, 209, 276)
self.pdf.set_xy(30,273)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(8)
self.pdf.write(1, "CONTRIBUTO AMBIENTALE CONAI ASSOLTO")
self.pdf.set_xy(160,273)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(8)
self.pdf.write(1, "MADE IN ITALY")
self.pdf.set_xy(90,277)
self.pdf.set_font('', '')
self.pdf.set_font_size(6)
self.pdf.write(1, "Scadenza rate e relativo importo")
self.pdf.set_xy(2,250)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(7)
self.pdf.write(1, "Codice Iva")
self.pdf.line(18, 246, 18, 270)
self.pdf.set_xy(36,250)
self.pdf.write(1, "Spese accessorie")
self.pdf.line(60, 246, 60, 270)
self.pdf.set_xy(66,250)
self.pdf.write(1, "Imponibile")
self.pdf.line(86, 246, 86, 270)
self.pdf.set_xy(96,250)
self.pdf.write(1, "Iva")
self.pdf.line(110, 246, 110, 270)
self.pdf.set_xy(120,250)
self.pdf.write(1, "Imposta")
self.pdf.line(140, 246, 140, 270)
self.pdf.set_xy(150,250)
self.pdf.write(1, "Note")
self.pdf.line(170, 246, 170, 270)
self.pdf.set_xy(180,250)
self.pdf.write(1, "Tot. documento")
dicitura = """CONDIZIONI DI VENDITA : la merca viaggia ad esclusivo rischio e pericolo del compratore, anche se venduta porto franco. \nNon si accettano reclami trascorsi 8 giorni dal ricevimento della merce."""
dicitura2 = """Per qualsiasi controversia e' competente il Foro di emissione. In caso di ritardato pagamento decorrono gli interessi commerciali d'uso. Le eventuali spese di bolli per l'emmisioni di R.B. sono a carico del compratore."""
self.pdf.set_xy(1,292)
self.pdf.set_font('', '')
self.pdf.set_font_size(6)
self.pdf.cell(0,0, dicitura,0,0)
self.pdf.set_xy(1,293)
self.pdf.cell(0,3, dicitura2)
self.pdf.set_xy(43,60)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.write(1, "Cliente")
self.pdf.set_xy(139,60)
self.pdf.write(1, "Dettaglio")
"""
Cliente
"""
self.pdf.set_xy(2,65)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.cell(50,5,self.nome,0,2)
self.pdf.cell(50,5,self.indirizzo,0,2)
self.pdf.cell(50,5,self.cap + " "+self.citta + " "+self.provincia,0,2)
self.pdf.cell(50,5,"Partita IVA : "+self.pi,0,2)
self.pdf.set_xy(106,65)
self.pdf.cell(50,4,"Codice cliente : " + self.codice_cliente,0,2)
self.pdf.cell(50,4,"Banca : " +self.banca,0,2)
self.pdf.cell(50,4,"Iban : "+self.iban,0,2)
self.pdf.cell(50,4,"Pagamento : "+self.pagamento,0,2)
#self.pdf.cell(50,4,self.da_ddt,0,2)
print "SELF IBAN : ",self.iban
"""
self.pdf.set_xy(22,267+15)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.cell(0,0, "Annotazioni")
self.pdf.set_xy(26+69,267+15)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.cell(0,0, "Peso Kg")
self.pdf.set_xy(18+69*2,267+15)
self.pdf.set_font('', 'B')
self.pdf.set_font_size(12)
self.pdf.cell(0,0, "Firma destinatario")
self.pdf.set_xy(2,267+8)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.write(0, self.aspetto_esteriore)
self.pdf.set_xy(2,267+20)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.write(0, self.annotazioni)
self.pdf.set_xy(2+69+30,267+8)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.write(0, self.numero_colli)
self.pdf.set_xy(2+69*2+23,267+8)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.write(0, self.porto)
self.pdf.set_xy(2+69+30,267+20)
self.pdf.set_font('', '')
self.pdf.set_font_size(8)
self.pdf.write(0, self.peso)
"""
def intestazione(self,nome,citta,indirizzo,cap,provincia,nazione="",cf="",pi=""):
self.nome = nome
self.citta = citta
self.indirizzo = indirizzo
| |
<filename>tacotron2/model.py<gh_stars>1-10
#: *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import numpy as np
import math
import os
import torch
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers import ConvNorm, LinearNorm
from common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size, attention_dim):
super(LocationLayer, self).__init__()
self.location_conv = ConvNorm(1, attention_n_filters,
kernel_size=attention_kernel_size,
padding=int((attention_kernel_size - 1) / 2),
stride=1, dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cum):
processed_attention_weights = self.location_conv(attention_weights_cum)
processed_attention_weights = processed_attention_weights.transpose(1, 2)
processed_attention_weights = self.location_dense(processed_attention_weights)
return processed_attention_weights
class Attention(nn.Module):
def __init__(self, query_dim, memory_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(query_dim, attention_dim, w_init_gain='tanh')
self.memory_layer = LinearNorm(memory_dim, attention_dim, w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, memory, attention_weights_cum):
"""
PARAMS
------
query: decoder output (B, decoder_dim)
memory: encoder outputs (B, T_in, embed_dim)
attention_weights_cum: cumulative attention weights (B, 1, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
# [B, T_in, attn_dim]
key = self.memory_layer(memory)
# [B, 1, attn_dim]
query = self.query_layer(query.unsqueeze(1))
# [B, T, attn_dim]
location_sensitive_weights = self.location_layer(attention_weights_cum)
# score function
energies = self.v(torch.tanh(query + location_sensitive_weights + key))
energies = energies.squeeze(-1)
return energies
def forward(self, query, memory, attention_weights_cum, mask=None):
"""
PARAMS
------
query: attention rnn last output [B, decoder_dim]
memory: encoder outputs [B, T_in, embed_dim]
attention_weights_cum: cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(query, memory, attention_weights_cum)
if mask is not None:
alignment.masked_fill_(mask, self.score_mask_value)
# [B, T_in]
attention_weights = F.softmax(alignment, dim=1)
# [B, 1, T_in] * [B, T_in, embbed_dim]
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
# [B, embbed_dim]
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size) for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x, inference=False):
if inference:
for linear in self.layers:
x = F.relu(linear(x), inplace=True)
x0 = x[0].unsqueeze(0)
mask = torch.bernoulli(x0.new(x0.size()).fill_(0.5))
mask = mask.expand(x.size(0), x.size(1))
x = x*mask*2
else:
for linear in self.layers:
x = F.dropout(F.relu(linear(x), inplace=True), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = torch.tanh(self.convolutions[i](x))
return self.convolutions[-1](x)
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions, encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.encoder_lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, text_lengths):
for conv in self.convolutions:
x = F.relu(conv(x), inplace=True)
# [B, encoder_dim, T_in] -> [B, T_in, encoder_dim]
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
text_lengths = text_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(x, text_lengths, batch_first=True)
# [B, T_in, encoder_dim]
outputs, _ = self.encoder_lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
prenet_dim, decoder_rnn_dim,
max_decoder_steps, gate_threshold,
decoder_n_lstms, p_decoder_dropout):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.decoder_n_lstms = decoder_n_lstms
self.p_decoder_dropout = p_decoder_dropout
self.prenet = Prenet(n_mel_channels, [prenet_dim, prenet_dim])
self.lstm0 = nn.LSTMCell(prenet_dim + encoder_embedding_dim, decoder_rnn_dim)
self.lstm1 = nn.LSTMCell(decoder_rnn_dim + encoder_embedding_dim, decoder_rnn_dim)
self.attention_layer = Attention(decoder_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.linear_projection = LinearNorm(decoder_rnn_dim + encoder_embedding_dim, n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(decoder_rnn_dim + encoder_embedding_dim, n_frames_per_step, w_init_gain='sigmoid')
def initialize_decoder_states(self, memory, mask=None):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.h0 = torch.zeros(B, self.decoder_rnn_dim).cuda()
self.c0 = torch.zeros(B, self.decoder_rnn_dim).cuda()
self.h1 = torch.zeros(B, self.decoder_rnn_dim).cuda()
self.c1 = torch.zeros(B, self.decoder_rnn_dim).cuda()
self.attention_weights = memory.new(B, MAX_TIME).zero_()
self.attention_weights_cum = memory.new(B, MAX_TIME).zero_()
self.attention_context = memory.new(B, self.encoder_embedding_dim).zero_()
self.memory = memory
self.mask = mask
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments, mel_lengths=None):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B, T_in) -> (B, T_out, T_in)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B, n_frames_per_step) -> (B, T_out, n_frames_per_step)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
# (B, T_out, n_frames_per_step) -> (B, T_out)
gate_outputs = gate_outputs.contiguous().view(gate_outputs.size(0), -1)
# (T_out, B, n_mel_channels * n_frames_per_step) -> (B, T_out, n_mel_channels * n_frames_per_step)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
# mel lengths scale to the target length
if mel_lengths is not None:
mel_lengths *= self.n_frames_per_step
return mel_outputs, gate_outputs, alignments, mel_lengths
def decode(self, prenet_output):
""" Decoder step using stored states, attention and memory
PARAMS
------
prenet_output: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
x = torch.cat((prenet_output, self.attention_context), dim=-1)
self.h0, self.c0 = self.lstm0(x, (self.h0, self.c0))
x = F.dropout(self.h0, self.p_decoder_dropout, self.training)
x = torch.cat((x, self.attention_context), dim=-1)
self.h1, self.c1 = self.lstm1(x, (self.h1, self.c1))
self.query = F.dropout(self.h1, self.p_decoder_dropout, self.training)
attention_weights_cumulative = self.attention_weights_cum.unsqueeze(1)
self.attention_context, self.attention_weights = self.attention_layer(
self.query, self.memory, attention_weights_cumulative, self.mask)
# [B, MAX_TIME]
# Avoid '+=' as in-place operation in case of gradient computation
self.attention_weights_cum = self.attention_weights_cum + self.attention_weights
x = torch.cat((self.query, self.attention_context), dim=-1)
# [B, n_mel_channels * n_frames_per_step]
mel_output = self.linear_projection(x)
# [B, n_frames_per_step]
gate_output = self.gate_layer(x)
return mel_output, gate_output, self.attention_weights
def forward(self, memory, targets, memory_lengths, gta=False):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
targets: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
go_frame = memory.new(memory.size(0), self.n_mel_channels).zero_().unsqueeze(0)
# (B, n_mel_channels, T_out) -> (T_out, B, n_mel_channels)
targets = targets.permute(2, 0, 1)
decoder_inputs = torch.cat((go_frame, targets), dim=0)
prenet_outputs = self.prenet(decoder_inputs, inference=gta)
mask =~ get_mask_from_lengths(memory_lengths) if memory.size(0) > 1 else None
self.initialize_decoder_states(memory, mask)
mel_outputs, gate_outputs, alignments = [], [], []
# size - 1 for ignoring EOS synbol
while len(mel_outputs) < decoder_inputs.size(0) - 1:
prenet_output = prenet_outputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(prenet_output)
mel_outputs += [mel_output]
gate_outputs += [gate_output]
alignments += [attention_weights]
return self.parse_decoder_outputs(mel_outputs, gate_outputs, alignments)
def infer(self, memory, memory_lengths):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: | |
emailAddress)
emailZendesk = emailAddress
connectionRequired = False
except:
connectionRequired = True
#messageDetail.ReplyToChat("User is not connected with me")
# messageDetail.ReplyToChat("User is not connected with me. Shall I send a Connection Request? Y/N")
#
# #askQuestion(messageDetail)
#
# autoConnection = ""
# def ask(messageDetail):
#
# messageDetail.ReplyToChat("Y or N?")
# time.sleep(10)
#
# autoConnection = messageDetail.Command.MessageText
# print(autoConnection)
#
# # while 1:
# # print("waiting")
# # autoConnection = messageDetail.Command.MessageText
# # print(autoConnection)
# # if autoConnection == "Y" or autoConnection == "N":
# # break
#
# ask(messageDetail)
#
# autoConnection = messageDetail.Command.MessageText
# print(autoConnection)
#
# if autoConnection == "Y":
# messageDetail.ReplyToChat("You said Yes")
# if autoConnection == "N":
# messageDetail.ReplyToChat("You said No")
#if connectionRequired:
data_lenght = len(dataComp)
if data_lenght > 450:
try:
#print("inside > 450")
# query = "type:user " + firstName + " " + lastName + "email:" + emailAddress
query = "type:user " + emailAddress
except:
query = "type:user " + firstName + " " + lastName
#print(query)
elif data_lenght < 450:
try:
#print("inside < 450")
# query = "type:user " + firstName + " " + lastName + "email:" + emailAddress + "organization:" + company
#query = "type:user " + emailAddress + " organization:" + company
query = "type:user " + emailAddress
except:
#query = "type:user " + firstName + " " + lastName + " organization:" + company
query = "type:user " + firstName + " " + lastName
#print(query)
else:
return messageDetail.ReplyToChat("No user information available")
botlog.LogSymphonyInfo(query)
results = zendesk.search(query=query)
#print(results)
if str(results).startswith("{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat("This user does not exist on Zendesk, the name is misspelled or does not belong to this organisation. Please use this format: /createRequest @mention| subject| description")
elif str(results).startswith("{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat("This organisation/company does not exist in Zendesk or name is misspelled. Please use this format: /createRequest @mention| subject| description")
else:
data = json.dumps(results, indent=2)
d = json.loads(data)
#print(str(d))
for index in range(len(d["results"])):
name = d["results"][index]["name"]
email = str(d["results"][index]["email"])
#print("EmailAddress from Zendesk: " + email)
#############################
organization_id = str(d["results"][index]["organization_id"])
zendeskUser_id = str(d["results"][index]["id"])
#print(str(zendeskUser_id))
try:
# Convert the Zendesk ID to company name
conn.request("GET", "/api/v2/users/" + str(zendeskUser_id) + "/organizations.json", headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d_org = json.loads(data)
try:
org_Name = str(d_org["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("<", "<").replace("\"", """).replace("&","&").replace("'", "'").replace(">", ">")
organization = str(org_name_temp)
#print(str(organization))
except:
organization = "Company not yet created"
except:
organization = company
#############################
emailZendesk = email
else:
botlog.LogSymphonyInfo("Email Address: " + emailZendesk)
# except:
# return messageDetail.ReplyToChat("You did not enter a Requester or the requester's email address is not valid. Please use this format: /createRequest @mention, subject, description")
try:
requestSubject = str(detail[1]).replace("\ufffd", "")
#botlog.LogSymphonyInfo("Request Subject: " + requestSubject)
except:
return messageDetail.ReplyToChat("You did not enter a Subject. Please use this format: /ZDRequest @mention| subject| description")
try:
# import sys
# print(sys.stdout.encoding)
requestComment = str(detail[2]).replace("\u200b", "").replace("\n", "\\n").replace("\ufffd", "")
# requestComment = requestCommentTemp.encode('utf-8')
#botlog.LogSymphonyInfo("Request Description: " + requestComment)
#botlog.LogSymphonyInfo("**********")
except:
return messageDetail.ReplyToChat("You did not enter a description/comment. Please use this format: /ZDRequest @mention| subject| description")
conn = http.client.HTTPSConnection(_configDef['zdesk_config']['zdesk_api'])
payload = "{\n\"request\": \n{\n\"subject\": \"" + str(organization) + ":" + str(requestSubject) + "\", \n \"priority\": \"normal\",\n\"type\": \"incident\",\n\"comment\": \n{\n\"body\": \"" + str(requestComment) + "\"\n}\n}\n}"
# payload = \
# {
# 'request': {
# # 'requester': {
# # 'name': '',
# # 'email': '',
# # },
# #{
# 'subject': str(requestSubject),
# 'comment':
# {
# 'body': str(requestComment)
# },
# 'priority': "normal",
# 'type': "incident",
# 'ticket_field_entries':
# {
# 'ticket_field_id': _configDef['zdesk_config']['zdesk_sev_field'],
# 'value': 'severity_3'
# },
# }
# }
#print(type(payload))
base64Encoded = base64.b64encode(bytes((emailZendesk + "/token:" + _configDef['zdesk_config']['zdesk_password']), 'utf-8'))
base64Enc = (base64Encoded.decode("utf-8"))
#print(str(base64Enc))
base = ("Basic " + base64Enc)
#print(str(base))
headers = {
'email_address': emailZendesk +"/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': base,
'cache-control': "no-cache",
'content-type': "application/json"
}
#print(str(headers))
conn.request("POST", "/api/v2/requests.json", payload.encode('utf-8'), headers)
res = conn.getresponse()
data = res.read()
tempdata = (data.decode("utf-8"))
result = str(tempdata)
ticketidSplit = result.split(":")
ticketURLid = ticketidSplit[4][:-9]
link = _configDef['zdesk_config']['zdesk_link'] + ticketURLid
linkCreated = "<a href =\"" + _configDef['zdesk_config']['zdesk_link'] + ticketURLid + "\">" + link + "</a>"
messageDetail.ReplyToChatV2("New Zendesk Request created: " + linkCreated)
## Will show the Ticket details in a table format.
headers_ticket = {
'email_address': emailZendesk +"/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': base
}
try:
conn.request("GET", "/api/v2/tickets/" + ticketURLid + ".json", headers=headers_ticket)
res = conn.getresponse()
data = res.read()
request_raw = data.decode("utf-8")
# data_raw = json.dumps(request_raw, indent=2)
# data_dict = ast.literal_eval(data_raw)
# d = json.loads(data_dict)
data_dict = json.loads(str(request_raw))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
requestid = str(d["ticket"]["id"])
requeststatus = str(d["ticket"]["status"])
requestpriority = str(d["ticket"]["priority"])
requestseverity = str(d["ticket"]["tags"])
if (len(d["ticket"]["tags"])) == 0:
noTag = True
else:
noTag = False
notSet = True
if noTag:
sev = "Not set"
notSet = False
for index_tags in range(len(d["ticket"]["tags"])):
tags = str((d["ticket"]["tags"][index_tags]))
if tags.startswith("severity_1"):
sev = "Severity 1"
notSet = False
elif tags.startswith("severity_2"):
sev = "Severity 2"
notSet = False
elif tags.startswith("severity_3"):
sev = "Severity 3"
notSet = False
elif tags.startswith("severity_4"):
sev = "Severity 4"
notSet = False
if notSet:
sev = "Not Set"
notSet = False
requestseverity = sev
requestsubject_temps = str(d["ticket"]["subject"])
requestsubject = str(requestsubject_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
requestdescription_temps = str(d["ticket"]["description"])
requestdescription = str(requestdescription_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">").replace("\n\n \n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n", "<br/><br/>").replace("\n\n \n\n", "<br/><br/>").replace("\n\n", "<br/><br/>").replace("\n", "<br/>")
requestorganization_id = str(d["ticket"]["organization_id"])
requestrequester_id = str(d["ticket"]["requester_id"])
requestcreated_at = str(d["ticket"]["created_at"]).replace("T", " ").replace("Z", "")
requestupdated_at = str(d["ticket"]["updated_at"]).replace("T", " ").replace("Z", "")
requestassignee_id = str(d["ticket"]["assignee_id"])
except:
conn.request("GET", "/api/v2/requests/" + ticketURLid + ".json", headers=headers_ticket)
res = conn.getresponse()
data = res.read()
request_raw = data.decode("utf-8")
# data_raw = json.dumps(request_raw, indent=2)
# data_dict = ast.literal_eval(data_raw)
# d = json.loads(data_dict)
data_dict = json.loads(str(request_raw))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
requestid = str(d["request"]["id"])
requeststatus = str(d["request"]["status"])
requestpriority = str(d["request"]["priority"])
#requestseverity = str(d["request"]["severity"])
requestsubject_temps = str(d["request"]["subject"])
requestsubject = requestsubject_temps.replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
requestdescription_temps = str(d["request"]["description"])
requestdescription = str(requestdescription_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">").replace("\n\n \n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n", "<br/><br/>").replace("\n\n \n\n", "<br/><br/>").replace("\n\n", "<br/><br/>").replace("\n", "<br/>")
requestorganization_id = str(d["request"]["organization_id"])
requestrequester_id = str(d["request"]["requester_id"])
requestcreated_at = str(d["request"]["created_at"]).replace("T", " ").replace("Z", "")
requestupdated_at = str(d["request"]["updated_at"]).replace("T", " ").replace("Z", "")
requestassignee_id = str(d["request"]["assignee_id"])
request_id = str(requestid)
request_status = str(requeststatus)
request_priority = str(requestpriority)
#request_severity = str(requestseverity)
request_severity = ("Not set")
request_subject = str(requestsubject)
request_desc = str(requestdescription)
desc = str(request_desc)
request_org = str(requestorganization_id)
request_requestor = str(requestrequester_id)
request_created = str(requestcreated_at)
request_updated = str(requestupdated_at)
# To get the name of the requester given the requesterID
headers_users = {
'email_address': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'content-type': "application/json"
}
conn.request("GET", "/api/v2/users/" + request_requestor, headers=headers_users)
res = conn.getresponse()
userRequesterId = res.read()
tempUserRequester = str(userRequesterId.decode('utf-8'))
# data = json.dumps(tempUserRequester, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserRequester))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
req_name = str(d["user"]["name"])
requesterName = req_name
try:
request_assignee = str(requestassignee_id)
# To get the name of the assignee given the assigneeID
conn.request("GET", "/api/v2/users/" + request_assignee, headers=headers)
res = conn.getresponse()
userAssigneeId = res.read()
tempUserAssignee = str(userAssigneeId.decode('utf-8'))
# data = json.dumps(tempUserAssignee, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserAssignee))
data = json.dumps(data_dict, indent=2)
d = json.loads(str(data))
assign_name = str(d["user"]["name"])
assigneeName = assign_name
except:
assigneeName = "Not assigned"
assignee_flag = True
requesterTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/requester/requested_tickets"
assigneeTicket = (_configDef['zdesk_config']['zdesk_url']) + "/agent/users/" + request_assignee + "/assigned_tickets"
OrgTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/organization/tickets"
# Convert the Zendesk ID to company name
headers_org = {
'email_address': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': (_configDef['zdesk_config']['zdesk_password']),
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'content-type': "application/json"
}
conn.request("GET", "/api/v2/users/" + requestrequester_id + "/organizations.json", headers=headers_org)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
try:
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
#print(orgName)
except:
orgName = "Company not yet created"
# table_body = ""
# | |
<reponame>larrywang0128/video_analyzer<gh_stars>0
# In[]:
##################################################
## Set up environment and load facial recognition model
##################################################
import os
import dlib
import cv2
import numpy as np
from scipy.spatial import distance as dist
# import plotly
import matplotlib
matplotlib.use('agg') # this avoid using package tkinter which is not supported by AWS
import matplotlib.pyplot as plt
import time
# give path to the trained shape predictor model: shape_predictor_68_face_landmarks.dat
##### Download model from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
##### Note that the license for the iBUG 300-W dataset excludes commercial use.
##### So you should contact Imperial College London to find out if it's OK for you to use this model file in a commercial product.
from django.conf import settings
predictor_path = settings.BASE_DIR + "/MLmodels/shape_predictor_68_face_landmarks.dat"
# load face detector from dlib - identifying all faces on an image
detector = dlib.get_frontal_face_detector()
# load share predictor - identifying position 64 trackers on face
##### see tracker index here: https://www.pyimagesearch.com/2017/04/10/detect-eyes-nose-lips-jaw-dlib-opencv-python/
predictor = dlib.shape_predictor(predictor_path)
# In[]:
##################################################
## Track the position of face and its details in a video
##################################################
'''
Function face_68_tracker read a video file and return the coordinates of face positions for each frame.
Input:
- video_path: full path of video for analysis.
- verbose: a boolean with default value as False. If True, video with face trackers will be displayed.
- allow_interupt: a boolean with default value as False. If True, video analysis can be interupted by press "q".
- save_video: a boolean with default value as False. If True, then a copy of video with facial marks will be created.
- save_path: Specify the full path (including filename and extension) to store the marked video. By default, the same directory of source video is used.
Suffix '_marked' is added to the source video name. Video format is .mp4
Output:
- summary: a dictionary that contains meta data of the video:
* total_frame: number of frames in total
* processed_frame: number of frames that have been processed
* fps: frame per second
* width: width of the frame
* height: height of the frame
* interupt: whether the processing has been interupted
- face_tracker: a dictionary that contains face details:
* start_times: a list of the start time for each frame
* head_positions: a list of dlib.rectangle object with (left, top, right, bottom) cordinate of the face for each frame
* tracker_shapes: a list of dlib.full_object_detection object with 68 tracker postions of the face for each frame
* tracker_coords: Each value in the list corresponds to a frame. The value is a list of tuples that covers the x-y coordinates of all trackers
- errors: a list of errors generated during analysis
'''
def face_68_tracker(video_path, verbose=True, allow_interupt=False, save_video=False, save_path=None):
t_start = time.time()
if verbose:
print("")
print("Start processing video: %s" % video_path)
print("...")
# initialize output
summary = {}
face_tracker = {'start_times':[], 'head_positions':[], 'tracker_shapes':[],'tracker_coords':[]}
errors = []
interupt = False
# capture the video for analysis
cap = cv2.VideoCapture(video_path)
# store the meta data of the video
if(cap.isOpened()):
total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps= float(cap.get(cv2.CAP_PROP_FPS))
width= int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height= int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else:
# return error message
errors.append("Analysis failed. Video file cannot be accessed: %s" % video_path)
return summary, face_tracker, errors
# create a videoWriter if save value is true
if save_video:
if save_path is None:
# split video path and file name
source_path, source_name = os.path.split(video_path)
# split source_name into video name and extension
video_name, video_extension = os.path.splitext(source_name)
# full path of saved video
save_path = source_path + '/' + video_name + "_marked.mp4"
else:
# create directory for save_path
save_dir, save_name = os.path.split(save_path)
# makedirs() allows making multi-level directory
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# save video
# MP4V for mp4; XVID for avi
# fourcc = cv2.VideoWriter_fourcc(*'MP4V')
# out = cv2.VideoWriter(save_path, fourcc, fps, (width,height))
out = cv2.VideoWriter(save_path, 0x00000021, fps, (width,height))
# initialize frame counter
f_count = 0
while(cap.isOpened()):
# initialize values
pos = None
shape = None
# read each frame
check, source_frame = cap.read() # frame is in BGR
if check:
# make a copy of source_frame
frame = source_frame
# calculate frame start_time
start_time = f_count / fps
f_count += 1
# detect face area in the frame. the second parameter specifies number of times for upsampling. this will enlarge the frame to detect more faces
dets = detector(frame, 0)
# throw an error message when more than one face is dected.
if len(dets) == 1:
# detect facial trackers
pos = dets[0]
shape = predictor(frame, pos)
# convert coordinates of tacker into tuple
points=[]
for i in range(68):
points.append((shape.part(i).x, shape.part(i).y))
elif len(dets) == 0:
pass
else:
raise Exception("More than one face is dected in video.")
# store face trackers
face_tracker['start_times'].append(start_time)
face_tracker['head_positions'].append(pos)
face_tracker['tracker_shapes'].append(shape)
face_tracker['tracker_coords'].append(points)
# display and save frame based on parameter
if verbose or save_video:
# draw facial position on frame; color is BGR
frame = draw_dets(frame, pos, color=(255,255,0), pt=2)
# draw facial tracker on frame
frame = draw_shape(frame, shape, color=(255,0,0))
if verbose:
# display video
cv2.imshow(video_path, frame)
if save_video:
# save video
out.write(frame)
# if interuption is allowed, then press 'q' to exit.
if allow_interupt:
if cv2.waitKey(1) & 0xFF == ord('q'):
interupt = True
break
else:
break
# Release everything if job is finished
cap.release()
if save_video:
out.release()
cv2.destroyAllWindows()
# store output
summary['total_frame'] = total_frame
summary['processed_frame'] = f_count
summary['fps'] = fps
summary['width'] = width
summary['height'] = height
summary['interupt'] = interupt
# print processing time
t_end = time.time()
if verbose:
print("Process Completed")
print("Total frames: %d; Length: %.2fs" % (total_frame, total_frame/fps))
print("Processing time: %.2fs" % (t_end-t_start))
return summary, face_tracker, errors
# In[]:
##################################################
## Use OpenCV to draw facial position and details on frame image
##################################################
'''
Function draw_dets draws a rectangle around the face in the frame image
Input:
- img: targe frame image
- dets: dlib.rectangle object for the target frame
- color: BGR color of the rectangle
- pt: thickness of line
Output:
- out_img: post processed frame image
'''
def draw_dets(img, dets, color=(0,0,0), pt=1): # color is in BGR order
out_img = img
# return original image if dets is None
if dets == None:
return out_img
# identify top-left and bottom-right corner of rectangle
left_top = (dets.left(), dets.top())
right_bottom = (dets.right(), dets.bottom())
# add rectangle to image
out_img = cv2.rectangle(img, left_top, right_bottom, color, pt)
return out_img
'''
Function draw_shape draws the outline of face details on frame
Input:
- img: targe frame image
- shape: dlib.full_object_detection object for the target frame
- color: BGR color of the rectangle
- pt: thickness of line
Output:
- out_img: post processed frame
'''
def draw_shape(img, shape, color=(0,0,0), pt=1):
out_img = img
# return original image if shape is None
if shape == None:
return out_img
points=[]
for i in range(68):
# convert shape.part to tuple cordination
points.append((shape.part(i).x, shape.part(i).y))
# connect trackers in sequence
if i in [0,17,22,27,36,42,48,60]:
pass
else:
out_img = cv2.line(out_img, points[i-1], points[i], color, pt)
# connect additional trackers that are not consecutive
out_img = cv2.line(out_img, points[35], points[30], color, pt)
out_img = cv2.line(out_img, points[41], points[36], color, pt)
out_img = cv2.line(out_img, points[47], points[42], color, pt)
out_img = cv2.line(out_img, points[59], points[48], color, pt)
out_img = cv2.line(out_img, points[67], points[60], color, pt)
return out_img
# In[]:
##################################################
## Calculate eye_aspect_ratio for each frame, and accumulative counts of blink
##################################################
'''
Function eye_ratio_calc calculates eye_aspect_ratio given the coordinates of facial details
Input:
- tracker_coord: a list of x-y coordinates for the 68 facial trackers
- method: a string that specifies the method to calculate eye aspect ratio. "both" returns the average ratio of both eyes. "left" or "right"
returns the ratio of a single. Default value is "both".
Output:
- eye_ratio: the calculated eye aspect ratio
'''
def eye_ratio_calc(tracker_coord, method='both'):
if method not in ['both', 'left','right']:
raise ValueError("Invalid calculation method: %s" % method)
# eye aspect ratio for right eye
right_eye_height = (dist.euclidean(tracker_coord[37], tracker_coord[41]) + dist.euclidean(tracker_coord[38], tracker_coord[40])) * 0.5
right_eye_width = dist.euclidean(tracker_coord[36], tracker_coord[39])
right_eye_ratio = right_eye_height / right_eye_width
if method == 'right':
eye_ratio = right_eye_ratio
return eye_ratio
# eye aspect ratio for left eye
left_eye_height = (dist.euclidean(tracker_coord[43], | |
%d, geounits: %s, levels: %s',
geolevel, geounit_ids, levels)
for level in levels:
# if this geolevel is the requested geolevel
if geolevel == level.id:
searching = True
guFilter = Q(id__in=geounit_ids)
# Get the area defined by the union of the geounits
selection = safe_union(Geounit.objects.filter(guFilter))
selection = enforce_multi(selection, collapse=True)
# Begin crafting the query to get the id and geom
q_ids = Q(id__in=geounit_ids)
# create a boundary if one doesn't exist
if not boundary:
boundary = empty_geom(selection.srid)
if inside:
# Searching inside the boundary
if level != base_geolevel:
# Search by geometry
q_geom = Q(geom__within=boundary)
else:
# Search by centroid
q_geom = Q(center__intersects=boundary)
else:
# Searching outside the boundary
if level != base_geolevel:
# Search by geometry
q_geom = Q(geom__relate=(boundary, 'F********'))
else:
# Search by centroid
q_geom = Q(geom__relate=(boundary, 'F********'))
results = Geounit.objects.filter(q_ids, q_geom)
logger.debug('Found %d geounits in boundary at level %s',
results.count(), level)
units += list(results)
# if we're at the base level, and haven't collected any
# geometries, return the units here
if level == base_geolevel:
return units
# only query geolevels below (smaller in size, after the
# primary search geolevel) the geolevel parameter
elif searching:
# union the selected geometries
if len(units) == 0:
union = None
else:
# this always rebuilds the current extent of all the
# selected geounits
geoms = safe_union(
GeometryCollection(
map(lambda unit: unit.geom, units),
srid=units[0].geom.srid))
union = enforce_multi(geoms, collapse=True)
# set or merge this onto the existing selection
if union is None:
intersects = selection
else:
intersects = selection.difference(union)
if inside:
# the remainder geometry is the intersection of the
# district and the difference of the selected geounits
# and the current extent
try:
remainder = boundary.intersection(intersects)
except GEOSException, ex:
logger.info(
"Caught GEOSException while intersecting 'boundary' with 'intersects'."
)
logger.debug('Reason:', ex)
remainder = empty_geom(boundary.srid)
else:
# the remainder geometry is the geounit selection
# differenced with the boundary (leaving the
# selection that lies outside the boundary)
# differenced with the intersection (the selection
# outside the boundary and outside the accumulated
# geometry)
try:
remainder = selection.difference(boundary)
try:
remainder = remainder.intersection(intersects)
except GEOSException, ex:
logger.info(
"Caught GEOSException while intersecting 'remainder' with 'intersects'."
)
logger.debug('Reason:', ex)
remainder = empty_geom(boundary.srid)
except GEOSException, ex:
logger.info(
"Caught GEOSException while differencing 'selection' with 'boundary'."
)
logger.debug('Reason:', ex)
remainder = empty_geom(boundary.srid)
remainder = enforce_multi(remainder)
# Check if the remainder is empty -- it may have been
# converted, or errored out above, in which case we just
# have to move on.
if not remainder.empty:
if level == base_geolevel:
# Query by center
q_geom = Q(center__intersects=remainder)
else:
# Query by geom
q_geom = Q(geom__within=remainder)
units += list(level.geounit_set.filter(q_geom))
# Send back the collected Geounits
return units
def __unicode__(self):
"""
Represent the Geounit as a unicode string. This is the Geounit's
name.
"""
return self.name
def aggregate(self, parent, subject=None, spatial=True):
"""
Aggregate this geounit to the composite boundary of the geounits
in "parent" geolevel. Compute numerical aggregates on the subject,
if specified.
Parameters:
parent -- The 'parent' geolevel, which contains the smaller
geographic units that comprise this geounit.
subject -- The subject to aggregate and compute. If omitted,
all subjects are computed.
spatial -- Compute the geometric aggregates as well as
numeric aggregates.
"""
geo = 0
num = 0
parentunits = Geounit.objects.filter(
tree_code__startswith=self.tree_code, geolevel__in=[parent])
parentunits.update(child=self)
unioned = [x.geom.unary_union for x in parentunits]
if any([x.geom_type == 'MultiPolygon' for x in unioned]):
multis = [x for x in unioned if x.geom_type == 'MultiPolygon']
singles = [x for x in unioned if x.geom_type != 'MultiPolygon']
newgeo = multis[0].union(MultiPolygon(singles).unary_union)
for other in multis[1:]:
newgeo = newgeo.union(other)
else:
newgeo = MultiPolygon(unioned).unary_union
# reform the parent units as a list of IDs
parentunits = list(parentunits.values_list('id', flat=True))
if newgeo is None:
return (
geo,
num,
)
if spatial:
difference = newgeo.difference(self.geom).area
if difference != 0:
# if there is any difference in the area, then assume that
# this aggregate is an inaccurate aggregate of it's parents
# aggregate geometry
# all geolevels of this geounit should have the same tolerance
tolerance = self.geolevel.all()[0].tolerance
newsimple = newgeo.simplify(
preserve_topology=True, tolerance=tolerance)
# enforce_multi is defined in redistricting.models
self.geom = enforce_multi(newgeo)
self.simple = enforce_multi(newsimple)
self.save()
geo += 1
if subject is None:
# No subject provided? Do all of them
subject_qs = Subject.objects.all()
else:
if isinstance(subject, Subject):
# Subject parameter is a Subject object, wrap it in a list
subject_qs = [subject]
elif isinstance(subject, str):
# Subject parameter is a Subject name, filter by name
subject_qs = Subject.objects.filter(name=subject)
else:
# Subject parameter is an ID, filter by ID
subject_qs = Subject.objects.filter(id=subject)
# aggregate data values
for subject_item in subject_qs:
qset = Characteristic.objects.filter(
geounit__in=parentunits, subject=subject_item)
aggdata = qset.aggregate(Sum('number'))['number__sum']
percentage = '0000.00000000'
if aggdata and subject_item.percentage_denominator:
dset = Characteristic.objects.filter(
geounit__in=parentunits,
subject=subject_item.percentage_denominator)
denominator_data = dset.aggregate(Sum('number'))['number__sum']
if denominator_data > 0:
percentage = aggdata / denominator_data
if aggdata is None:
aggdata = "0.0"
mychar = self.characteristic_set.filter(subject=subject_item)
if mychar.count() < 1:
mychar = Characteristic(
geounit=self,
subject=subject_item,
number=aggdata,
percentage=percentage)
mychar.save()
num += 1
else:
mychar = mychar[0]
if aggdata != mychar.number:
mychar.number = aggdata
mychar.percentage = percentage
mychar.save()
num += 1
return (
geo,
num,
)
class Characteristic(models.Model):
"""
A data value for a Geounit's Subject.
A Characteristic is the numerical data value measured for a Geounit for
a specific Subject. For example, this could be 1,200 for the Total
Population of Ada County.
"""
# The subject that this value relates to
subject = models.ForeignKey(Subject)
# The Geounit that this value relates to
geounit = models.ForeignKey(Geounit)
# The value as a raw decimal number
number = models.DecimalField(max_digits=12, decimal_places=4)
# The value as a percentage of the value for this geounit of the subject given as
# the percentage_denominator (if any)
percentage = models.DecimalField(
max_digits=12, decimal_places=8, null=True, blank=True)
class Meta:
unique_together = ("subject", "geounit")
def __unicode__(self):
"""
Represent the Characteristic as a unicode string. The
Characteristic string is in the form of "Subject for Geounit:
Number"
"""
return u'%s for %s: %s' % (self.subject, self.geounit, self.number)
# Enumerated type used for determining a plan's state of processing
ProcessingState = ChoicesEnum(
UNKNOWN=(-1, 'Unknown'),
READY=(0, 'Ready'),
CREATING=(1, 'Creating'),
REAGGREGATING=(2, 'Reaggregating'),
NEEDS_REAGG=(3, 'Needs reaggregation'),
)
class Plan(models.Model):
"""
A collection of Districts for an area of coverage, like a state.
A Plan is created by a user to represent multiple Districts. A Plan
may be a template (created by admins, copyable by all users), or shared
(created by users, copyable by all users). In addition, Plans are
versioned; the Plan version is the most recent version of all Districts
that are a part of this Plan.
"""
# The name of this plan
name = models.CharField(max_length=200)
# A description of the plan
description = models.CharField(max_length=500, db_index=True, blank=True)
# Is this plan a template?
is_template = models.BooleanField(default=False)
# Is this plan shared?
is_shared = models.BooleanField(default=False)
# The processing state of this plan (see ProcessingState Enum)
processing_state = models.IntegerField(
choices=ProcessingState.choices(), default=ProcessingState.UNKNOWN)
# Is this plan considered a valid plan based on validation criteria?
is_valid = models.BooleanField(default=False)
# The most recent version of the districts in this plan.
version = models.PositiveIntegerField(default=0)
# The oldest available stored version of this plan.
min_version = models.PositiveIntegerField(default=0)
# The time when this Plan was created.
created = models.DateTimeField(auto_now_add=True)
# The time when this Plan was edited.
edited = models.DateTimeField(auto_now=True)
# The owner of this Plan
owner = models.ForeignKey(User)
# The legislative body that this plan is for
legislative_body = models.ForeignKey(LegislativeBody)
# A flag to indicate that upon post_save, when a plan is created,
# it should create an Unassigned district. There are times when
# this behaviour should be skipped (when copying plans, for example)
create_unassigned = True
def __unicode__(self):
"""
Represent the Plan as a unicode string. This is the Plan's name.
"""
return self.name
class Meta:
"""
Define a unique constraint on 2 fields of this model.
"""
unique_together = (
'name',
'owner',
'legislative_body',
)
def is_community(self):
| |
rotational axis and object's rotational axis
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
local_dir = np.array([0.0, 0.0, 1.0])
obj_dir = np.dot(obj_trans[:3, :3], local_dir)
world_dir = robot_trans[:3, :3].dot(local_dir)
obj_dir = obj_dir / np.linalg.norm(obj_dir)
world_dir = world_dir / np.linalg.norm(world_dir)
rot_val = np.array([[np.abs(np.dot(obj_dir, world_dir)) - 1]])
# computing robot's jacobian
arm_jac = np.array(
[
np.dot(obj_dir, np.cross(joint.GetAxis(), world_dir))
for joint in arm_joints
]
).T.copy()
arm_jac = arm_jac.reshape((1, len(arm_joints)))
base_jac = np.array(np.dot(obj_dir, np.cross([0, 0, 1], world_dir)))
base_jac = np.array([[0, 0, base_jac]])
# computing object's jacobian
obj_jac = np.array(
[np.dot(world_dir, np.cross(axis, obj_dir)) for axis in axises]
)
obj_jac = np.r_[[0, 0, 0], obj_jac].reshape((1, 6))
# Create final 1x26 jacobian matrix
rot_jac = np.hstack(
(base_jac, np.zeros((1, 1)), arm_jac, np.zeros((1, 9)), obj_jac)
)
return (rot_val, rot_jac)
class PR2InGripperPosRight(PR2InGripperRight):
# InGripper, Robot, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
# Sets up constants
self.coeff = const.IN_GRIPPER_COEFF
self.opt_coeff = const.INGRIPPER_OPT_COEFF
self.eval_f = lambda x: self.pos_check(x)[0]
self.eval_grad = lambda x: self.pos_check(x)[1]
super(PR2InGripperPosRight, self).__init__(
name, params, expected_param_types, env, debug
)
# "Robot": (("backHeight", np.array([0], dtype=np.int)),
# ("lArmPose", np.array(range(7), dtype=np.int)),
# ("lGripper", np.array([0], dtype=np.int)),
# ("rArmPose", np.array(range(7), dtype=np.int)),
# ("rGripper", np.array([0], dtype=np.int)),
# ("pose", np.array([0,1,2], dtype=np.int)))
#
# "Can": (("pose", np.array([0,1,2], dtype=np.int)),
# ("rotation", np.array([0,1,2], dtype=np.int)))
def pos_error(self, obj_trans, robot_trans, axises, arm_joints):
"""
This function calculates the value and the jacobian of the displacement between center of gripper and center of object
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
gp = np.array([0, 0, 0])
robot_pos = robot_trans[:3, 3]
obj_pos = obj_trans[:3, 3]
dist_val = (robot_pos.flatten() - obj_pos.flatten()).reshape((3, 1))
# Calculate the joint jacobian
arm_jac = np.array(
[
np.cross(joint.GetAxis(), robot_pos.flatten() - joint.GetAnchor())
for joint in arm_joints
]
).T.copy()
# Calculate jacobian for the robot base
base_jac = np.eye(3)
base_jac[:, 2] = np.cross(np.array([0, 0, 1]), robot_pos - self.x[17:20])
# Calculate jacobian for the back hight
torso_jac = np.array([[0], [0], [1]])
# Calculate object jacobian
obj_jac = (
-1
* np.array(
[
np.cross(axis, obj_pos - gp - obj_trans[:3, 3].flatten())
for axis in axises
]
).T
)
obj_jac = np.c_[-np.eye(3), obj_jac]
# Create final 3x26 jacobian matrix -> (Gradient checked to be correct)
dist_jac = np.hstack(
(torso_jac, np.zeros((3, 8)), arm_jac, np.zeros((3, 1)), base_jac, obj_jac)
)
return (dist_val, dist_jac)
class PR2InGripperRotRight(PR2InGripperRight):
# InGripper, Robot, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
# Sets up constants
self.coeff = const.IN_GRIPPER_COEFF
self.opt_coeff = const.INGRIPPER_OPT_COEFF
self.eval_f = lambda x: self.rot_check(x)[0]
self.eval_grad = lambda x: self.rot_check(x)[1]
super(PR2InGripperRotRight, self).__init__(
name, params, expected_param_types, env, debug
)
class PR2InGripperPosLeft(PR2InGripperLeft):
# InGripper, Robot, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
# Sets up constants
self.coeff = const.IN_GRIPPER_COEFF
self.opt_coeff = const.INGRIPPER_OPT_COEFF
self.eval_f = lambda x: self.pos_check(x)[0]
self.eval_grad = lambda x: self.pos_check(x)[1]
super(PR2InGripperPosLeft, self).__init__(
name, params, expected_param_types, env, debug
)
# "Robot": (("backHeight", np.array([0], dtype=np.int)),
# ("lArmPose", np.array(range(7), dtype=np.int)),
# ("lGripper", np.array([0], dtype=np.int)),
# ("rArmPose", np.array(range(7), dtype=np.int)),
# ("rGripper", np.array([0], dtype=np.int)),
# ("pose", np.array([0,1,2], dtype=np.int)))
#
# "Can": (("pose", np.array([0,1,2], dtype=np.int)),
# ("rotation", np.array([0,1,2], dtype=np.int)))
def pos_error(self, obj_trans, robot_trans, axises, arm_joints):
"""
This function calculates the value and the jacobian of the displacement between center of gripper and center of object
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
gp = np.array([0, 0, 0])
robot_pos = robot_trans[:3, 3]
obj_pos = obj_trans[:3, 3]
dist_val = (robot_pos.flatten() - obj_pos.flatten()).reshape((3, 1))
# Calculate the joint jacobian
arm_jac = np.array(
[
np.cross(joint.GetAxis(), robot_pos.flatten() - joint.GetAnchor())
for joint in arm_joints
]
).T.copy()
# Calculate jacobian for the robot base
base_jac = np.eye(3)
base_jac[:, 2] = np.cross(np.array([0, 0, 1]), robot_pos - self.x[17:20])
# Calculate jacobian for the back hight
torso_jac = np.array([[0], [0], [1]])
# Calculate object jacobian
obj_jac = (
-1
* np.array(
[
np.cross(axis, obj_pos - gp - obj_trans[:3, 3].flatten())
for axis in axises
]
).T
)
obj_jac = np.c_[-np.eye(3), obj_jac]
# Create final 3x26 jacobian matrix -> (Gradient checked to be correct)
dist_jac = np.hstack((torso_jac, arm_jac, np.zeros((3, 9)), base_jac, obj_jac))
return (dist_val, dist_jac)
class PR2InGripperRotLeft(PR2InGripperLeft):
# InGripper, Robot, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
# Sets up constants
self.coeff = const.IN_GRIPPER_COEFF
self.opt_coeff = const.INGRIPPER_OPT_COEFF
self.eval_f = lambda x: self.rot_check(x)[0]
self.eval_grad = lambda x: self.rot_check(x)[1]
super(PR2InGripperRotLeft, self).__init__(
name, params, expected_param_types, env, debug
)
class PR2BothEndsInGripper(PR2InGripper):
# PR2BothEndsInGripper Robot, Can
def __init__(self, name, params, expected_param_types, env=None, debug=False):
self.eval_dim = 6
super(PR2BothEndsInGripper, self).__init__(
name, params, expected_param_types, env, debug
)
# @profile
def both_arm_pos_check_f(self, x):
robot_body = self.robot.openrave_body
body = robot_body.env_body
self.arm = "left"
obj_trans, robot_trans, axises, arm_joints = self.robot_obj_kinematics(x)
l_ee_trans, l_arm_inds = self.get_robot_info(robot_body, "left")
l_arm_joints = [body.GetJointFromDOFIndex(ind) for ind in l_arm_inds]
r_ee_trans, r_arm_inds = self.get_robot_info(robot_body, "right")
r_arm_joints = [body.GetJointFromDOFIndex(ind) for ind in r_arm_inds]
rel_pt = np.array([0, 0, self.obj.geom.height / 2.0 + 0.1])
l_pos_val = self.rel_pos_error_f(obj_trans, l_ee_trans, rel_pt)
rel_pt = np.array([0, 0, -self.obj.geom.height / 2.0 - 0.1])
r_pos_val = self.rel_pos_error_f(obj_trans, r_ee_trans, rel_pt)
return np.vstack([l_pos_val, r_pos_val])
# @profile
def both_arm_pos_check_jac(self, x):
robot_body = self.robot.openrave_body
body = robot_body.env_body
self.set_robot_poses(x, robot_body)
l_ee_trans, l_arm_inds = self.get_robot_info(robot_body, "left")
r_ee_trans, r_arm_inds = self.get_robot_info(robot_body, "right")
self.arm = "right"
obj_body = self.obj.openrave_body
obj_body.set_pose(x[-6:-3], x[-3:])
obj_trans, robot_trans, axises, arm_joints = self.robot_obj_kinematics(x)
rel_pt = np.array([0, 0, -self.obj.geom.height / 2.0 - 0.1])
r_obj_pos_jac = self.rel_pos_error_jac(
obj_trans, r_ee_trans, axises, arm_joints, rel_pt
)
self.arm = "left"
obj_body = self.obj.openrave_body
obj_body.set_pose(x[-6:-3], x[-3:])
obj_trans, robot_trans, axises, arm_joints = self.robot_obj_kinematics(x)
rel_pt = np.array([0, 0, self.obj.geom.height / 2.0 + 0.1])
l_obj_pos_jac = self.rel_pos_error_jac(
obj_trans, l_ee_trans, axises, arm_joints, rel_pt
)
return np.vstack([l_obj_pos_jac, r_obj_pos_jac])
def stacked_f(self, x):
return np.vstack([self.coeff * self.both_arm_pos_check_f(x)])
def stacked_grad(self, x):
return np.vstack([self.coeff * self.both_arm_pos_check_jac(x)])
class PR2CloseGrippers(robot_predicates.InContacts):
def __init__(self, name, params, expected_param_types, env=None, debug=False):
# Define constants
self.GRIPPER_CLOSE = const.GRIPPER_CLOSE_VALUE
self.GRIPPER_OPEN = const.GRIPPER_OPEN_VALUE
self.attr_inds = OrderedDict(
[(params[0], [ATTRMAP[params[0]._type][1], ATTRMAP[params[0]._type][3]])]
)
super(PR2CloseGrippers, self).__init__(
name, params, expected_param_types, env, debug
)
class PR2EEReachable(robot_predicates.EEReachable):
# EEUnreachable Robot, StartPose, EEPose
def __init__(
self,
name,
params,
expected_param_types,
env=None,
debug=False,
steps=const.EEREACHABLE_STEPS,
):
self.attr_inds = OrderedDict(
[
(params[0], list(ATTRMAP[params[0]._type])),
(params[2], list(ATTRMAP[params[2]._type])),
]
)
self.attr_dim = 26
super(PR2EEReachable, self).__init__(
name, params, expected_param_types, env, debug, steps
)
def resample(self, negated, t, plan):
return ee_reachable_resample(self, negated, t, plan)
def set_robot_poses(self, x, robot_body):
# Provide functionality of setting robot poses
back_height = x[0]
l_arm_pose, l_gripper = x[1:8], x[8]
r_arm_pose, r_gripper = x[9:16], x[16]
base_pose = x[17:20]
robot_body.set_pose(base_pose)
dof_value_map = {
"backHeight": back_height,
"lArmPose": l_arm_pose,
"lGripper": l_gripper,
"rArmPose": r_arm_pose,
"rGripper": r_gripper,
}
robot_body.set_dof(dof_value_map)
def get_robot_info(self, robot_body):
# Provide functionality of Obtaining Robot information
tool_link = robot_body.env_body.GetLink("r_gripper_tool_frame")
robot_trans = tool_link.GetTransform()
arm_inds = robot_body.env_body.GetManipulator("rightarm").GetArmIndices()
return robot_trans, arm_inds
def get_rel_pt(self, rel_step):
if rel_step <= 0:
return rel_step * np.array([const.APPROACH_DIST, 0, 0])
else:
return rel_step * np.array([0, 0, const.RETREAT_DIST])
def stacked_f(self, x):
i = 0
f_res = []
start, end = self.active_range
for s in range(start, end + 1):
rel_pt = self.get_rel_pt(s)
f_res.append(
self.ee_pose_check_rel_obj(x[i : i + self.attr_dim], rel_pt)[0]
)
i += self.attr_dim
return np.vstack(tuple(f_res))
def stacked_grad(self, x):
f_grad = []
start, end = self.active_range
t = 2 * self._steps + 1
k = 3
grad = np.zeros((k * t, self.attr_dim * t))
i = 0
j = 0
for s in range(start, end + 1):
rel_pt = self.get_rel_pt(s)
grad[j : j + k, i : i + self.attr_dim] = self.ee_pose_check_rel_obj(
x[i : i + self.attr_dim], rel_pt
)[1]
i += self.attr_dim
j += k
return grad
def pos_error_rel_to_obj(self, obj_trans, robot_trans, axises, arm_joints, rel_pt):
"""
This function calculates the value and the jacobian of the displacement between center of gripper and a point relative to the object
obj_trans: object's rave_body transformation
robot_trans: robot gripper's rave_body transformation
axises: rotational axises of the object
arm_joints: list of robot joints
"""
gp = rel_pt
robot_pos = robot_trans[:3, 3]
obj_pos = np.dot(obj_trans, np.r_[gp, 1])[:3]
dist_val = (robot_pos.flatten() - obj_pos.flatten()).reshape((3, 1))
# Calculate the joint jacobian
arm_jac = np.array(
[
np.cross(joint.GetAxis(), robot_pos.flatten() - joint.GetAnchor())
for joint in arm_joints
]
).T.copy()
# Calculate jacobian for the robot base
base_jac = np.eye(3)
base_jac[:, 2] = np.cross(np.array([0, 0, 1]), | |
<filename>src/RanorexLibrary.py
#####################################################################
### File created by <NAME>, 2018 ###
#####################################################################
from distutils.util import strtobool
from robot.api import logger
import time
class RanorexLibrary(object):
""" The RanorexLibrary main object.
It is imported into a Robot test suite file in the "Library" section:
*** Settings***
Documentation This is the file where the RanorexLibrary will be imported.
Library RanorexLibrary path\\to\\Ranorex
The RanorexLibrary takes one argument:
The path to Ranorex has to be given so the RobotLibrary knows where to import the Ranorex .dll files from. Normally this path looks something like this: C:\\Program Files (x86)\\Ranorex 8.3Beta. Please make sure to use double back slashes (because of Robot-reasons).
"""
__version__ = '0.1'
ROBOT_LIBRARY_SCOPE = 'TEST_SUITE'
ROBOT_LIBRARY_DOC_FORMAT = 'reST'
_logLevel = "INFO"
def __init__(self, pathToRanorex = "C:\\Program Files (x86)\\Ranorex\\Studio\\Bin"):
import setupRanorexLibrary
setupRanorexLibrary.importDlls(pathToRanorex)
global Ranorex
import Ranorex
global System
import System
Ranorex.Core.Resolver.AssemblyLoader.Initialize()
Ranorex.TestingBootstrapper.SetupCore()
Ranorex.Mouse.DefaultMoveTime = 300
Ranorex.Keyboard.DefaultKeyPressTime = 100
Ranorex.Delay.SpeedFactor = 1
def _log(self, msg):
logger.write(msg, self._logLevel, html=False)
def run_application(self, appname, arguments = "", workingDirectory = "", maximized = "False"):
""" Runs an Application.
This is the suggested way to run an application using Ranorex functionality. Other libraries might offer other functions that open an application, so you have to decide which one you like the most.
:param appname: This is the path to the executable file of the application to be started.
:param arguments: This argument is passed to the started application as command line arguments.
:param workingDirectory: This is the path to the directory that Ranorex tries to give the application as working directory.
:param maximized: True or False. Whether Ranorex tries to open the application with a maximized window or not. Might not work for all applications.
Example:
| `Run Application` | calc.exe |
| `Run Application` | C:\\Program Files\\Internet Explorer\\iexplore.exe | | | True |
| `Run Application` | yourApp.exe | /help | C:\\path\\to\\yourWorkingDirectory | False |
"""
self._log("Starting application " + appname + ".")
maxim = False
if maximized == "True":
maxim = True
Ranorex.Host.Local.RunApplication(appname, arguments, workingDirectory, maxim)
def close_application(self, ranorexpath, gracePeriod = "0"):
""" Closes an application that contains a specified UI element.
This keyword looks for a UI element specified by a RanoreXPath and tries to close the parent process of this element.
:param ranorexpath: This path specifies an element within the application that should be closed.
:param gracePeriod: Milliseconds until the application is killed if it hasn't closed properly until then. If this value is 0, the app will never be killed.
:returns: True if the application has closed within the grace period, otherwise false.
Example:
| `Close Application` | /winapp[@<EMAIL>='Microsoft.Windows<EMAIL>'] | |
| `Close Application` | /winapp[@<EMAIL>='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | 300 |
"""
self._log("Closing application with element " + ranorexpath + " within " + gracePeriod + "ms.")
intGracePeriod = int(gracePeriod)
return Ranorex.Host.Current.CloseApplication(ranorexpath, intGracePeriod)
def click(self, ranorexpath, location = "Center", mousebutton = "Left", duration = "Ranorex.Mouse.DefaultMoveTime", count = "1"):
""" Performs a mouse click on a UI element.
:param ranorexpath: This is the RanoreXPath of the element that gets clicked.
:param location: The location where the element should be clicked. Possible values: Center, CenterLeft, CenterRight, LowerCenter, LowerRight, LowerLeft, UpperCenter, UpperLeft, UpperRight
:param mousebutton: Which mouse button should be clicked. Possible values are listed on the corresponding .NET framework page: https://msdn.microsoft.com/de-de/library/system.windows.forms.mousebuttons(v=vs.110).aspx
:param duration: The duration of the mouse click in ms. Defaults value is 300ms.
:param count: Number of clicks that should be performed. Default is (of course) 1.
Example:
| `Click` | /winapp[@packagename='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | | | | |
| `Click` | /winapp[@packag<EMAIL>='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | UpperLeft | | | |
| `Click` | /winapp[@packagename='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | UpperLeft | | 350 | |
| `Click` | /winapp[@packagename='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | | | | 2 |
"""
self._log("Clicking on element " + ranorexpath + " at location " + location + " " + count + " time(s) with " + mousebutton + " mouse button, taking " + duration + " ms.")
if mousebutton == "":
mousebutton = "Left"
if location == "":
location = "Center"
if count == "":
count = "1"
if duration == "":
duration = "Ranorex.Mouse.DefaultMoveTime"
self._click(ranorexpath, location, mousebutton, duration, count)
def _normalizeMouseButton(self, mousebutton):
return "System.Windows.Forms.MouseButtons." + mousebutton
def _normalizeLocation(self, location):
return "Ranorex.Location." + location
def _click(self, ranorexpath, location, mousebutton, duration, count):
mousebutton = self._normalizeMouseButton(mousebutton)
location = self._normalizeLocation(location)
exec("Ranorex.Unknown(ranorexpath).Click(" + mousebutton + ", " + location + ", " + "int(" + count + "), " + duration + ")")
def right_click(self, ranorexpath, location = "Center", duration = "Ranorex.Mouse.DefaultMoveTime", count = "1"):
""" Performs a right click on a UI element.
This action is equivalent to the click action if "Right" is given as the mousebutton parameter.
:param ranorexpath: This is the RanoreXPath of the element that gets clicked.
:param location: The location where the element should be clicked. Possible values: Center, CenterLeft, CenterRight, LowerCenter, LowerRight, LowerLeft, UpperCenter, UpperLeft, UpperRight
:param duration: The duration of the mouse click in ms. Defaults value is 300ms.
:param count: Number of clicks that should be performed. Default is (of course) 1.
Example:
| `Right Click` | /winapp[@packagename='Microsoft.<EMAIL>']//button[@automationid='num1Button'] | | | |
| `Right Click` | /winapp[@packagename='Microsoft.<EMAIL>Calculator']//button[@automationid='num1Button'] | UpperLeft | | |
| `Right Click` | /winapp[@packagename='Microsoft.WindowsCalculator']//button[@automationid='num1Button'] | UpperLeft | 350 | |
| `Right Click` | /winapp[@packagename='Microsoft.<EMAIL>Calculator']//button[@automationid='num1Button'] | | | 2 |
"""
self._log("Right clicking on element " + ranorexpath + " at location " + location + " " + count + " time(s) , taking " + duration + " ms.")
if location == "":
location = "Center"
if count == "":
count = "1"
if duration == "":
duration = "Ranorex.Mouse.DefaultMoveTime"
self._click(ranorexpath, location, "Right", duration, count)
def double_click(self, ranorexpath, location = "Center", mousebuttons = "Left", duration = "Ranorex.Mouse.DefaultMoveTime"):
""" Performs a double click on a UI element.
:param ranorexpath: This is the RanoreXPath of the element that gets clicked.
:param location: The location where the element should be clicked. Possible values: Center, CenterLeft, CenterRight, LowerCenter, LowerRight, LowerLeft, UpperCenter, UpperLeft, UpperRight
:param mousebutton: Which mouse button should be clicked. Possible values are listed on the corresponding .NET framework page: https://msdn.microsoft.com/de-de/library/system.windows.forms.mousebuttons(v=vs.110).aspx
:param duration: The duration of the mouse click in ms. Defaults value is 300ms.
Example:
| `Double Click` | /win<EMAIL>[@<EMAIL>='<EMAIL>']//button[@automationid='<PASSWORD>'] | | | |
| `Double Click` | /winapp[@<EMAIL>='<EMAIL>']//button[@automationid='num<PASSWORD>'] | UpperLeft | Right | 100 |
"""
self._log("Double clicking on element " + ranorexpath + " at location " + location + " with " + mousebuttons + " mouse button, taking " + duration + " ms.")
if location == "":
location = "Center"
if mousebuttons == "":
mousebuttons = "Left"
if duration == "":
duration = "Ranorex.Mouse.DefaultMoveTime"
mousebuttons = self._normalizeMouseButton(mousebuttons)
location = self._normalizeLocation(location)
exec("Ranorex.Unknown(ranorexpath).DoubleClick(" + mousebuttons + ", " + location + ", " + duration + ")")
def _moveMouseToElement(self, ranorexpath, location, duration):
exec("Ranorex.Unknown(ranorexpath).MoveTo(" + location + ", " + duration + ")")
def mouse_down(self, ranorexpath, location = "Center", button = "Left", duration = "Ranorex.Mouse.DefaultMoveTime"):
""" Performs a Mouse Down action on a UI element.
:param ranorexpath: This is the RanoreXPath of the element that gets clicked.
:param location: The location where the element should be clicked. Possible values: Center, CenterLeft, CenterRight, LowerCenter, LowerRight, LowerLeft, UpperCenter, UpperLeft, UpperRight
:param mousebutton: Which mouse button should be clicked. Possible values are listed on the corresponding .NET framework page: https://msdn.microsoft.com/de-de/library/system.windows.forms.mousebuttons(v=vs.110).aspx
:param duration: The duration of the mouse click in ms. Defaults value is 300ms.
Example:
| `Mouse Down` | /win<EMAIL>[@<EMAIL>='<EMAIL>']//button[@automationid='num1Button'] | | | |
| `Mouse Down` | /winapp[@<EMAIL>='<EMAIL>']//button[@automationid='num1Button'] | LowerCenter | Right | 250 |
"""
self._log("Mouse down on element " + ranorexpath + " at location " + location + " with " + button + " mouse button, taking " + duration + " ms.")
if location == "":
location = "Center"
if button == "":
button = "Left"
if duration == "":
duration = "Ranorex.Mouse.DefaultMoveTime"
location = self._normalizeLocation(location)
button = self._normalizeMouseButton(button)
self._moveMouseToElement(ranorexpath, location, duration)
exec("Ranorex.Mouse.ButtonDown(" + button | |
Raises
======
ValueError
This error is raised when the coefficient matrix, non-homogeneous term
or the antiderivative, if passed, are not a matrix or
do not have correct dimensions
NonSquareMatrixError
When the coefficient matrix or its antiderivative, if passed is not a
square matrix
NotImplementedError
If the coefficient matrix does not have a commutative antiderivative
See Also
========
linear_ode_to_matrix: Coefficient matrix computation function
canonical_odes: System of ODEs representation change
linodesolve_type: Getting information about systems of ODEs to pass in this solver
"""
if not isinstance(A, MatrixBase):
raise ValueError(filldedent('''\
The coefficients of the system of ODEs should be of type Matrix
'''))
if not A.is_square:
raise NonSquareMatrixError(filldedent('''\
The coefficient matrix must be a square
'''))
if b is not None:
if not isinstance(b, MatrixBase):
raise ValueError(filldedent('''\
The non-homogeneous terms of the system of ODEs should be of type Matrix
'''))
if A.rows != b.rows:
raise ValueError(filldedent('''\
The system of ODEs should have the same number of non-homogeneous terms and the number of
equations
'''))
if B is not None:
if not isinstance(B, MatrixBase):
raise ValueError(filldedent('''\
The antiderivative of coefficients of the system of ODEs should be of type Matrix
'''))
if not B.is_square:
raise NonSquareMatrixError(filldedent('''\
The antiderivative of the coefficient matrix must be a square
'''))
if A.rows != B.rows:
raise ValueError(filldedent('''\
The coefficient matrix and its antiderivative should have same dimensions
'''))
if not any(type == "type{}".format(i) for i in range(1, 7)) and not type == "auto":
raise ValueError(filldedent('''\
The input type should be a valid one
'''))
n = A.rows
# constants = numbered_symbols(prefix='C', cls=Dummy, start=const_idx+1)
Cvect = Matrix(list(Dummy() for _ in range(n)))
if any(type == typ for typ in ["type2", "type4", "type6"]) and b is None:
b = zeros(n, 1)
is_transformed = tau is not None
passed_type = type
if type == "auto":
system_info = linodesolve_type(A, t, b=b)
type = system_info["type_of_equation"]
B = system_info["antiderivative"]
if type in ("type5", "type6"):
is_transformed = True
if passed_type != "auto":
if tau is None:
system_info = _first_order_type5_6_subs(A, t, b=b)
if not system_info:
raise ValueError(filldedent('''
The system passed isn't {}.
'''.format(type)))
tau = system_info['tau']
t = system_info['t_']
A = system_info['A']
b = system_info['b']
if type in ("type1", "type2", "type5", "type6"):
P, J = matrix_exp_jordan_form(A, t)
P = simplify(P)
if type in ("type1", "type5"):
sol_vector = P * (J * Cvect)
else:
Jinv = J.subs(t, -t)
sol_vector = P * J * ((Jinv * P.inv() * b).applyfunc(lambda x: Integral(x, t)) + Cvect)
else:
if B is None:
B, _ = _is_commutative_anti_derivative(A, t)
if type == "type3":
sol_vector = B.exp() * Cvect
else:
sol_vector = B.exp() * (((-B).exp() * b).applyfunc(lambda x: Integral(x, t)) + Cvect)
if is_transformed:
sol_vector = sol_vector.subs(t, tau)
gens = sol_vector.atoms(exp)
if type != "type1":
sol_vector = [expand_mul(s) for s in sol_vector]
sol_vector = [collect(s, ordered(gens), exact=True) for s in sol_vector]
if doit:
sol_vector = [s.doit() for s in sol_vector]
return sol_vector
def _matrix_is_constant(M, t):
"""Checks if the matrix M is independent of t or not."""
return all(coef.as_independent(t, as_Add=True)[1] == 0 for coef in M)
def canonical_odes(eqs, funcs, t):
r"""
Function that solves for highest order derivatives in a system
Explanation
===========
This function inputs a system of ODEs and based on the system,
the dependent variables and their highest order, returns the system
in the following form:
.. math::
X'(t) = A(t) X(t) + b(t)
Here, $X(t)$ is the vector of dependent variables of lower order, $A(t)$ is
the coefficient matrix, $b(t)$ is the non-homogeneous term and $X'(t)$ is the
vector of dependent variables in their respective highest order. We use the term
canonical form to imply the system of ODEs which is of the above form.
If the system passed has a non-linear term with multiple solutions, then a list of
systems is returned in its canonical form.
Parameters
==========
eqs : List
List of the ODEs
funcs : List
List of dependent variables
t : Symbol
Independent variable
Examples
========
>>> from sympy import symbols, Function, Eq, Derivative
>>> from sympy.solvers.ode.systems import canonical_odes
>>> f, g = symbols("f g", cls=Function)
>>> x, y = symbols("x y")
>>> funcs = [f(x), g(x)]
>>> eqs = [Eq(f(x).diff(x) - 7*f(x), 12*g(x)), Eq(g(x).diff(x) + g(x), 20*f(x))]
>>> canonical_eqs = canonical_odes(eqs, funcs, x)
>>> canonical_eqs
[[Eq(Derivative(f(x), x), 7*f(x) + 12*g(x)), Eq(Derivative(g(x), x), 20*f(x) - g(x))]]
>>> system = [Eq(Derivative(f(x), x)**2 - 2*Derivative(f(x), x) + 1, 4), Eq(-y*f(x) + Derivative(g(x), x), 0)]
>>> canonical_system = canonical_odes(system, funcs, x)
>>> canonical_system
[[Eq(Derivative(f(x), x), -1), Eq(Derivative(g(x), x), y*f(x))], [Eq(Derivative(f(x), x), 3), Eq(Derivative(g(x), x), y*f(x))]]
Returns
=======
List
"""
from sympy.solvers.solvers import solve
order = _get_func_order(eqs, funcs)
canon_eqs = solve(eqs, *[func.diff(t, order[func]) for func in funcs], dict=True)
systems = []
for eq in canon_eqs:
system = [Eq(func.diff(t, order[func]), eq[func.diff(t, order[func])]) for func in funcs]
systems.append(system)
return systems
def _is_commutative_anti_derivative(A, t):
r"""
Helper function for determining if the Matrix passed is commutative with its antiderivative
Explanation
===========
This function checks if the Matrix $A$ passed is commutative with its antiderivative with respect
to the independent variable $t$.
.. math::
B(t) = \int A(t) dt
The function outputs two values, first one being the antiderivative $B(t)$, second one being a
boolean value, if True, then the matrix $A(t)$ passed is commutative with $B(t)$, else the matrix
passed isn't commutative with $B(t)$.
Parameters
==========
A : Matrix
The matrix which has to be checked
t : Symbol
Independent variable
Examples
========
>>> from sympy import symbols, Matrix
>>> from sympy.solvers.ode.systems import _is_commutative_anti_derivative
>>> t = symbols("t")
>>> A = Matrix([[1, t], [-t, 1]])
>>> B, is_commuting = _is_commutative_anti_derivative(A, t)
>>> is_commuting
True
Returns
=======
Matrix, Boolean
"""
B = integrate(A, t)
is_commuting = (B*A - A*B).applyfunc(expand).applyfunc(factor_terms).is_zero_matrix
is_commuting = False if is_commuting is None else is_commuting
return B, is_commuting
def _factor_matrix(A, t):
term = None
for element in A:
temp_term = element.as_independent(t)[1]
if temp_term.has(t):
term = temp_term
break
if term is not None:
A_factored = (A/term).applyfunc(ratsimp)
can_factor = _matrix_is_constant(A_factored, t)
term = (term, A_factored) if can_factor else None
return term
def _is_second_order_type2(A, t):
term = _factor_matrix(A, t)
is_type2 = False
if term is not None:
term = 1/term[0]
is_type2 = term.is_polynomial()
if is_type2:
poly = Poly(term.expand(), t)
monoms = poly.monoms()
if monoms[0][0] in (2, 4):
cs = _get_poly_coeffs(poly, 4)
a, b, c, d, e = cs
a1 = powdenest(sqrt(a), force=True)
c1 = powdenest(sqrt(e), force=True)
b1 = powdenest(sqrt(c - 2*a1*c1), force=True)
is_type2 = (b == 2*a1*b1) and (d == 2*b1*c1)
term = a1*t**2 + b1*t + c1
else:
is_type2 = False
return is_type2, term
def _get_poly_coeffs(poly, order):
cs = [0 for _ in range(order+1)]
for c, m in zip(poly.coeffs(), poly.monoms()):
cs[-1-m[0]] = c
return cs
def _match_second_order_type(A1, A0, t, b=None):
r"""
Works only for second order system in its canonical form.
Type 0: Constant coefficient matrix, can be simply solved by
introducing dummy variables.
Type 1: When the substitution: $U = t*X' - X$ works for reducing
the second order system to first order system.
Type 2: When the system is of the form: $poly * X'' = A*X$ where
$poly$ is square of a quadratic polynomial with respect to
*t* and $A$ is a constant coefficient matrix.
"""
match = {"type_of_equation": "type0"}
n = A1.shape[0]
if _matrix_is_constant(A1, t) and _matrix_is_constant(A0, t):
return match
if (A1 + A0*t).applyfunc(expand_mul).is_zero_matrix:
match.update({"type_of_equation": "type1", "A1": A1})
elif A1.is_zero_matrix and (b is None or b.is_zero_matrix):
is_type2, term = _is_second_order_type2(A0, t)
if is_type2:
a, b, c = _get_poly_coeffs(Poly(term, t), 2)
A = (A0*(term**2).expand()).applyfunc(ratsimp) + (b**2/4 - a*c)*eye(n, n)
tau = integrate(1/term, t)
t_ = Symbol("{}_".format(t))
match.update({"type_of_equation": "type2", "A0": A,
"g(t)": sqrt(term), "tau": tau, "is_transformed": True,
"t_": t_})
return match
def _second_order_subs_type1(A, b, funcs, t):
r"""
For a linear, second order system of ODEs, a particular substitution.
A system of the below form can be reduced to a linear first order system of
ODEs:
.. math::
X'' = A(t) * (t*X' - X) + b(t)
By substituting:
.. math:: | |
= data_sex[field_format][index] * gain
fluxerr_aper = data_sex[field_format_err][index] * gain
flux_diff = (flux_opt - flux_aper) / flux_aper
xlabel = 'S/N (AUTO)'
ylabel = '(E_FLUX_OPT - {}) / {}'.format(field_format, field_format)
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel=xlabel, ylabel=ylabel,
filename='{}_fluxopt_vs_fluxaper_{}xFWHM.pdf'.format(base, aper),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
flux_diff = (flux_auto - flux_aper) / flux_aper
ylabel = '(E_FLUX_AUTO - {}) / {}'.format(field_format, field_format)
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel=xlabel, ylabel=ylabel,
filename='{}_fluxauto_vs_fluxaper_{}xFWHM.pdf'.format(base, aper),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
if mypsffit:
flux_diff = (flux_mypsf - flux_aper) / flux_aper
ylabel = '(E_FLUX_MYPSF - {}) / {}'.format(field_format, field_format)
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel=xlabel, ylabel=ylabel,
filename='{}_fluxmypsf_vs_fluxaper_{}xFWHM.pdf'.format(base, aper),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
# compare with flux_psf if psffit catalog available
sexcat_ldac_psffit = '{}_cat_ldac_psffit.fits'.format(base)
if os.path.isfile(sexcat_ldac_psffit):
# read SExtractor psffit fits table
data_sex = read_hdulist (sexcat_ldac_psffit)
flux_sexpsf = data_sex['E_FLUX_PSF'][index] * gain
fluxerr_sexpsf = data_sex['E_FLUXERR_PSF'][index] * gain
s2n_sexpsf = data_sex['E_FLUX_PSF'][index] / data_sex['E_FLUXERR_PSF'][index]
flux_diff = (flux_sexpsf - flux_opt) / flux_opt
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel='S/N (AUTO)',
ylabel='(E_FLUX_SEXPSF - E_FLUX_OPT) / E_FLUX_OPT',
filename='{}_fluxsexpsf_vs_fluxopt.pdf'.format(base),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
if mypsffit:
# and compare 'my' psf with SExtractor psf
flux_diff = (flux_sexpsf - flux_mypsf) / flux_mypsf
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel='S/N (AUTO)',
ylabel='(E_FLUX_SEXPSF - E_FLUX_MYPSF) / E_FLUX_MYPSF',
filename='{}_fluxsexpsf_vs_fluxmypsf.pdf'.format(base),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
# and compare auto with SExtractor psf
flux_diff = (flux_sexpsf - flux_auto) / flux_auto
plot_scatter (s2n_auto, flux_diff, limits, class_star,
xlabel='S/N (AUTO)', ylabel='(E_FLUX_SEXPSF - E_FLUX_AUTO) / E_FLUX_AUTO',
filename='{}_fluxsexpsf_vs_fluxauto.pdf'.format(base),
title='rainbow color coding follows CLASS_STAR: '
'from purple (star) to red (galaxy)')
if get_par(set_zogy.timing,tel):
log_timing_memory (t0=t, label='prep_optimal_subtraction', log=log)
if get_par(set_zogy.low_RAM,tel):
# in low_RAM mode, save fftdata, fftdata_bkg_std and
# fftdata_mask to numpy files, and let the variable names
# point to the filenames from which they will be read in again
# later on
fftdata = save_npy_fits (
fftdata, filename='{}_fftdata.npy'.format(base))
fftdata_bkg_std = save_npy_fits (
fftdata_bkg_std, filename='{}_fftdata_bkg_std.npy'.format(base))
fftdata_mask = save_npy_fits (
fftdata_mask, filename='{}_fftdata_mask.npy'.format(base))
#if get_par(set_zogy.verbose,tel):
# log.info('fftdata.dtype {}'.format(fftdata.dtype))
# log.info('psf.dtype {}'.format(psf.dtype))
# log.info('psf_orig.dtype {}'.format(psf_orig.dtype))
# log.info('fftdata_bkg.dtype {}'.format(fftdata_bkg.dtype))
# log.info('fftdata_bkg_std.dtype {}'.format(fftdata_bkg_std.dtype))
# save fftdata
#return fftdata, psf, psf_orig, fftdata_bkg, fftdata_bkg_std, fftdata_mask
return fftdata, psf, psf_orig, fftdata_bkg_std, fftdata_mask
################################################################################
def save_npy_fits (data, filename=None, header=None):
"""function to save [data] to a file. Depending on the extension, it
is saved as a numpy (.npy) or fits (.fits) file. If filename is
not provided, a temporary numpy file is created. The filename used
is returned.
"""
if filename is None:
# if filename is not provided, generate temporary file
f = tempfile.NamedTemporaryFile(delete=False, suffix='.npy')
filename = f.name
ext = filename.split('.')[-1]
if 'fits' in ext:
# save [data] as fits file
fits.writeto (filename, data, header, overwrite=True)
else:
# save [data] as numpy file
np.save(filename, data)
# return filename
return filename
################################################################################
def create_modify_mask (data, satlevel, data_mask=None):
"""function to identify the saturated and adjacent pixels in input
data and add these to an existing mask, or create a new mask if
[data_mask] is not provided
"""
if data_mask is None:
data_mask = np.zeros(data.shape, dtype='uint8')
value = get_par(set_zogy.mask_value['saturated'],tel)
mask_sat_check = (data_mask & value == value)
# if no saturated pixels already present, add them
if np.sum(mask_sat_check) == 0:
mask_sat = (data >= 0.8*satlevel)
data_mask[mask_sat] += get_par(set_zogy.mask_value['saturated'],tel)
# pixels connected to saturated pixels
mask_sat_adj = ndimage.binary_dilation(mask_sat,structure=
np.ones((3,3)).astype('bool'))
mask_sat_adj[mask_sat] = False
data_mask[mask_sat_adj] += get_par(set_zogy.mask_value
['saturated-connected'],tel)
return data_mask
################################################################################
def collect_zps (ra_sex, dec_sex, airmass_sex, xcoords_sex, ycoords_sex,
flux_opt, fluxerr_opt, ra_cal, dec_cal, mag_cal, magerr_cal,
exptime, filt, log=None):
if log is not None:
if get_par(set_zogy.timing,tel): t = time.time()
log.info('executing collect_zps ...')
# maximum distance in degrees between sources to match
dist_max = 3./3600
# record zeropoints in array with same size as number of
# calibration stars in the FOV
ncal = np.shape(ra_cal)[0]
zp_array = np.zeros(ncal)
x_array = np.zeros(ncal)
y_array = np.zeros(ncal)
# instrumental magnitudes and errors
nrows = np.shape(ra_sex)[0]
mag_sex_inst = np.zeros(nrows)-1
magerr_sex_inst = np.zeros(nrows)-1
mag_sex_inst = -2.5*np.log10(flux_opt/exptime)
pogson = 2.5/np.log(10.)
magerr_sex_inst = pogson*fluxerr_opt/flux_opt
# sort calibration catalog arrays in brightness
index_sort = np.argsort(mag_cal)
ra_cal_sort = ra_cal[index_sort]
dec_cal_sort = dec_cal[index_sort]
mag_cal_sort = mag_cal[index_sort]
nmatch = 0
# loop calibration stars and find a match in SExtractor sources
for i in range(ncal):
index_match = find_stars (ra_sex, dec_sex, ra_cal_sort[i], dec_cal_sort[i],
dist_max, search='circle')
if len(index_match) > 0:
# take closest object if more than a single match
index_match = index_match[0]
# calculate its zeropoint; need to calculate airmass for
# each star, as around A=2, difference in airmass across
# the FOV is 0.1, i.e. a 5% change
zp_array[i] = (mag_cal_sort[i] - mag_sex_inst[index_match] +
airmass_sex[index_match] *
get_par(set_zogy.ext_coeff,tel)[filt])
x_array[i] = xcoords_sex[index_match]
y_array[i] = ycoords_sex[index_match]
nmatch += 1
mask_nonzero = (zp_array != 0)
if log is not None:
log.info ('number of matches in collect_zps: {}'
.format(np.sum(mask_nonzero)))
if get_par(set_zogy.timing,tel):
log_timing_memory (t0=t, label='collect_zps', log=log)
return x_array[mask_nonzero], y_array[mask_nonzero], zp_array[mask_nonzero]
################################################################################
def calc_zp (x_array, y_array, zp_array, filt, imtype, data_shape=None,
zp_type='single', boxsize=None, log=None):
if log is not None:
if get_par(set_zogy.timing,tel): t = time.time()
log.info('executing calc_zp ...')
if imtype=='new':
base = base_new
else:
base = base_ref
if zp_type == 'single':
# determine median zeropoint, requiring at least 5 non-zero values
# in zp_array
nmax = get_par(set_zogy.phot_ncal_max,tel)
if np.sum(zp_array != 0) >= 5:
__, zp_median, zp_std = sigma_clipped_stats (
zp_array[0:nmax].astype('float64'))
# make histrogram plot if needed
if get_par(set_zogy.make_plots,tel):
clipped_stats (zp_array[0:nmax], clip_zeros=True,
make_hist=get_par(set_zogy.make_plots,tel),
name_hist='{}_zp_hist.pdf'.format(base),
hist_xlabel='{} zeropoint (mag)'.format(filt),
log=log)
nmatch = len(zp_array[0:nmax])
else:
if log is not None:
log.warning ('could not determine median and/or std for lack of '
'calibration stars (<5); returning zeros')
zp_std, zp_median = 0, 0
nmatch = 0
elif zp_type == 'channels':
# determine zeropoints of the 16 channels of the
# MeerLICHT/BlackGEM CCD
zp_median, zp_std, nmatch = zps_medarray (x_array, y_array, zp_array,
1320, 5280, (2,8), nval_min=5)
elif zp_type == 'background':
# determine zeropoints on the scale of the input boxsize
ysize, xsize = data_shape
if log is not None:
if ysize % boxsize != 0 or xsize % boxsize !=0:
log.warning ('input boxsize in function calc_zp does not fit '
'integer times in image')
nysubs = int(ysize / boxsize)
nxsubs = int(xsize / boxsize)
zp_median, zp_std, nmatch = zps_medarray (x_array, y_array, zp_array,
boxsize, boxsize,
(nysubs, nxsubs), nval_min=3)
if log is not None:
if get_par(set_zogy.verbose,tel):
log.info('zp_median: {}'.format(zp_median))
log.info('zp_std: {}'.format(zp_std))
log.info('nmatch: {}'.format(nmatch))
if get_par(set_zogy.timing,tel):
log_timing_memory (t0=t, label='calc_zp', log=log)
return zp_median, zp_std, nmatch
################################################################################
def zps_medarray (xcoords, ycoords, zps, dx, dy, array_shape, nval_min):
"""Function that returns three arrays with shape [array_shape] with
the clipped median, standard deviation and number of zeropoint
values [zps] over rectangles with size [dx] x [dy] and shape
[array_shape]. This is used to calculate the median zeropoints
over the MeerLICHT/BlackGEM channels, or to create a "mini" image
with the median zeropoints over some box size, similar to the mini
background and standard deviation images.
"""
# initialize output arrays
zps_median = np.zeros(array_shape).ravel().astype('float32')
zps_std = np.zeros(array_shape).ravel().astype('float32')
zps_nstars = np.zeros(array_shape).ravel().astype(int)
# determine the indices of the origins of the subimages
nsubs = zps_median.size
ny, nx = array_shape
index_x = np.tile(np.arange(nx) * dx, ny)
index_y = np.repeat(np.arange(ny) * dy, nx)
# initialize the integer [sub_array] with same size as
# coordinates/zeropoint arrays
ncoords = np.size(xcoords)
sub_array = np.zeros(ncoords).astype(int)
# determine to which subimage the coordinates belong
x = (xcoords-0.5).astype(int)
y = (ycoords-0.5).astype(int)
for nsub in range(nsubs):
mask_sub = ((x >= index_x[nsub]) & (x < index_x[nsub]+dx) &
(y >= index_y[nsub]) & (y < index_y[nsub]+dy))
sub_array[mask_sub] = nsub
# record number of stars in subimage
zps_nstars[nsub] = np.sum(mask_sub)
# loop subimages and determine median zeropoint
for nsub in range(nsubs):
# mask that identifies zeropoints in current subimage
mask = (sub_array==nsub)
# only determine median when sufficient number of values
# [nval_min] are available; otherwise leave it at zero
if np.sum(mask) >= nval_min:
mean, median, std = sigma_clipped_stats (zps[mask])
zps_median[nsub] = median
| |
<reponame>Ehsan-aghapour/AI-Sheduling-Reprodution<filename>examples/gemm_tuner/GemmTuner.py<gh_stars>1000+
# Copyright (c) 2019-2020 ARM Limited.
#
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/python3
import argparse
import csv
import json
import logging
import math
import os
from collections import Counter, defaultdict, deque, namedtuple
from enum import Enum
from pathlib import Path
from typing import Deque, Dict, Generator, List, NamedTuple, Set, Tuple, Union
################################################################################
# Types
################################################################################
# Gemm strategy
Strategy = Enum("Strategy", ["Native", "ReshapedOnlyRHS", "Reshaped"])
# Gemm parameter
class GEMMParam(NamedTuple):
M: int # Number of lhs matrix rows
N: int # Number of rhs matrix columns
K: int # Number of lhs matrix columns/rhs matrix rows
B: int # Batch size
data_type: str # Data type
@classmethod
def parse_from_strs(cls, *M_N_K_B, data_type):
return cls(*map(int, M_N_K_B), str(data_type))
def __str__(self):
return ",".join(map(str, self))
# Gemm configuration for strategy Native
class NativeGEMMConfig(NamedTuple):
m0: int # Number of rows processed by the matrix multiplication
n0: int # Number of columns processed by the matrix multiplication
k0: int # Number of partial accumulations performed by the matrix multiplication
@classmethod
def parse_from_strs(cls, *args):
(*mnk,) = map(int, args)
return cls(*mnk)
def __str__(self):
return ",".join(map(str, self))
# Gemm configuration for strategy Reshaped Only RHS
class ReshapedOnlyRHSGEMMConfig(NamedTuple):
m0: int # Number of rows processed by the matrix multiplication
n0: int # Number of columns processed by the matrix multiplication
k0: int # Number of partial accumulations performed by the matrix multiplication
# Number of horizontal blocks of size (k0xn0) stored on the same output row
h0: int
# Interleave rhs matrix (1) / Do not interleave rhs matrix (0)
interleave_rhs: bool
# Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0)
transpose_rhs: bool
# Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0)
export_to_cl_image_rhs: bool
@classmethod
def parse_from_strs(cls, *args):
(*mnkh, interleave_rhs, transpose_rhs, export_to_cl_image_rhs,) = map(int, args)
interleave_rhs = interleave_rhs == 1
transpose_rhs = transpose_rhs == 1
export_to_cl_image_rhs = export_to_cl_image_rhs == 1
return cls(*mnkh, interleave_rhs, transpose_rhs, export_to_cl_image_rhs)
def __str__(self):
return ",".join(map(str, self))
# Gemm configuration for strategy Reshaped
class ReshapedGEMMConfig(NamedTuple):
m0: int # Number of rows processed by the matrix multiplication
n0: int # Number of columns processed by the matrix multiplication
k0: int # Number of partial accumulations performed by the matrix multiplication
# Number of vertical blocks of size (m0xk0) stored on the same output row
v0: int
# Number of horizontal blocks of size (k0xn0) stored on the same output row
h0: int
# Interleave lhs matrix (1) / Do not interleave lhs matrix (0)
interleave_lhs: bool
# Interleave rhs matrix (1) / Do not interleave rhs matrix (0)
interleave_rhs: bool
# Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0)
transpose_rhs: bool
# Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0)
export_to_cl_image_rhs: bool
@classmethod
def parse_from_strs(cls, *args):
(*mnkvh, interleave_lhs, interleave_rhs, transpose_rhs, export_to_cl_image_rhs,) = map(int, args)
interleave_lhs = interleave_lhs == 1
interleave_rhs = interleave_rhs == 1
transpose_rhs = transpose_rhs == 1
export_to_cl_image_rhs = export_to_cl_image_rhs == 1
return cls(*mnkvh, interleave_lhs, interleave_rhs, transpose_rhs, export_to_cl_image_rhs)
def __str__(self):
return ",".join(map(str, self))
# Measurement we take from the benchmark result.
class Measurement(NamedTuple):
opencl_timer_ms_reshape: float
opencl_timer_ms_kernel: float
def get_total_ms(self):
return self.opencl_timer_ms_reshape + self.opencl_timer_ms_kernel
def is_close_to(self, other, tol):
return math.fabs(self.get_total_ms() - other.get_total_ms()) < tol
def is_better_than(self, other, tol):
return self.get_total_ms() < other.get_total_ms() and not self.is_close_to(
other
)
def __add__(self, other):
return Measurement(
self.opencl_timer_ms_reshape + other.opencl_timer_ms_reshape,
self.opencl_timer_ms_kernel + other.opencl_timer_ms_kernel,
)
def __sub__(self, other):
return Measurement(
self.opencl_timer_ms_reshape - other.opencl_timer_ms_reshape,
self.opencl_timer_ms_kernel - other.opencl_timer_ms_kernel,
)
def __mul__(self, other):
return Measurement(
self.opencl_timer_ms_reshape * other.opencl_timer_ms_reshape,
self.opencl_timer_ms_kernel * other.opencl_timer_ms_kernel,
)
def __floordiv__(self, other):
return Measurement(
self.opencl_timer_ms_reshape // other.opencl_timer_ms_reshape,
self.opencl_timer_ms_kernel // other.opencl_timer_ms_kernel,
)
def __truediv__(self, other):
return Measurement(
self.opencl_timer_ms_reshape / other.opencl_timer_ms_reshape,
self.opencl_timer_ms_kernel / other.opencl_timer_ms_kernel,
)
def __pow__(self, power):
return Measurement(
self.opencl_timer_ms_reshape ** power, self.opencl_timer_ms_kernel ** power
)
def __str__(self):
return ",".join(map(str, self))
# GEMMConfig Type
GEMMConfigT = Union[NativeGEMMConfig,
ReshapedOnlyRHSGEMMConfig, ReshapedGEMMConfig]
# Representation of the benchmark result from a single experiment
class BenchmarkResult(NamedTuple):
gemm_param: GEMMParam
strategy: Strategy
gemm_config: GEMMConfigT
measurement: Measurement
class GEMMBenchmarkResultRecorder:
""" A recorder that records and organises GEMM Benchmark results, and produces various reports on the record.
"""
SummaryLevel = Enum("SummaryLevel", ["Short", "Detailed"])
def __init__(self, tol=0.01):
""" Initializer
"""
self._benchmark_result_record: List[BenchmarkResult] = []
# Strategies recorded
self._strategies = set()
self._tol = tol
def add(self, benchmark_result: BenchmarkResult):
""" Add a benchmark result to the record.
"""
gemm_param, strategy, gemm_config, measurement = benchmark_result
# Update strategies encoutnered
self._strategies.add(strategy)
self._benchmark_result_record.append(benchmark_result)
def get_record(self) -> Generator[BenchmarkResult, None, None]:
""" Return an iterator that iterates over the record.
"""
yield from self._benchmark_result_record
def get_best_gemm_configs(self):
""" Get the best GEMMConfig set per GEMMParam per Strategy
"""
best_gc_sets: Dict[
Tuple[GEMMParam, Strategy], List[Tuple[GEMMConfig, Measurement]]
] = defaultdict(list)
for gemm_param, strategy, gemm_config, measurement in self.get_record():
best_gc_set = best_gc_sets.setdefault((gemm_param, strategy), [])
best_gc_set.append((gemm_config, measurement))
# Sort the best config set (list)
best_gc_set = sorted(
best_gc_set, key=lambda gc_and_m: gc_and_m[1].get_total_ms()
)
# Filter out configs that are beyond tolerance to the best GEMMConfig's measurement
best_gc, best_m = best_gc_set[0]
best_gc_set_new = [
(gemm_config, measurement)
for gemm_config, measurement in best_gc_set[1:]
if measurement.is_close_to(best_m, self._tol)
]
# Add back the best config
best_gc_set_new.insert(0, (best_gc, best_m))
best_gc_sets[(gemm_param, strategy)] = best_gc_set_new
return best_gc_sets
def get_best_gemm_configs_as_sequence(self):
""" Get the best GEMMConfig set per GEMMParam per Strategy, and flatten the result into a sequence
of BenchmarkResults
"""
for (
(gemm_param, strategy),
best_gc_sets,
) in self.get_best_gemm_configs().items():
for best_gemm_config, best_measurement in best_gc_sets:
yield BenchmarkResult(
gemm_param, strategy, best_gemm_config, best_measurement
)
def get_config_distributions(self):
""" Return GEMMConfigDistribution for each strategy
"""
gemm_config_distributions: Dict[Strategy, GEMMConfigDistribution] = defaultdict(
GEMMConfigDistribution
)
for benchmark_result in self.get_best_gemm_configs_as_sequence():
_, strategy, _, _ = benchmark_result
gemm_config_distributions[strategy].add(benchmark_result)
return gemm_config_distributions
def get_best_gemm_strategies(self):
""" Get the best Stratey per GEMMParam
"""
all_results: Dict[GEMMParam, List[Tuple[Strategy, Measurement]]] = defaultdict(
list
)
best_strategies: Dict[GEMMParam, Strategy] = {}
for gemm_param, strategy, gemm_config, measurement in self.get_record():
all_results[gemm_param].append((strategy, measurement))
for gemm_param, results_set in all_results.items():
# Sort the best results set (list)
results_set = sorted(
results_set, key=lambda s_and_m: s_and_m[1].get_total_ms()
)
# Select best Strategy
best_s, best_m = results_set[0]
best_strategies[gemm_param] = best_s
return best_strategies
def save_to_jsons(self, out_dir, only_best_config=True):
""" Save records to an output directory of JSON files.
The directory is organized such that each strategy gets its own JSON file.
The directory also includes a JSON file to define the best strategy per GEMM Param.
"""
if not os.path.exists(out_dir):
logging.info(
"Output directory {} does not exist. Creating...".format(
out_dir)
)
os.mkdir(out_dir)
out_json_path = os.path.join(out_dir, "gemm_type_selection.json")
if check_out_path(out_json_path):
results = self.get_best_gemm_strategies()
results = {str(key): value.name for key, value in results.items()}
dump_json(out_json_path, results)
for strategy in self._strategies:
out_json_path = os.path.join(
out_dir, ("gemm_config_" + strategy.name.lower() + ".json")
)
if check_out_path(out_json_path):
record = (
self.get_best_gemm_configs_as_sequence()
if only_best_config
else self.get_record()
)
results = defaultdict(list)
for res in record:
if res.strategy == strategy:
results[str(res.gemm_param)].append(
{
"GEMMConfig": str(res.gemm_config),
"OpenCL_Timer_ms_reshape": str(
res.measurement.opencl_timer_ms_reshape
),
"OpenCL_Timer_ms_kernel": str(
res.measurement.opencl_timer_ms_kernel
),
}
)
dump_json(out_json_path, results)
def summary(self, sum_level=SummaryLevel.Short):
""" Return the summary string of the record
"""
num_raw_records = sum(1 for _ in self.get_record())
gemm_params_per_strategy = defaultdict(list)
for gemm_param, strategy in self.get_best_gemm_configs().keys():
gemm_params_per_strategy[strategy].append(gemm_param)
global_summary = f"""
=== {self.__class__.__name__} Summary ===
[Global]
Strategies recorded: {", ".join(map(lambda s: s.name, self._strategies))}
Total number of | |
<reponame>silvergasp/pigweed
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
r"""Decodes and detokenizes strings from binary or Base64 input.
The main class provided by this module is the Detokenize class. To use it,
construct it with the path to an ELF or CSV database, a tokens.Database,
or a file object for an ELF file or CSV. Then, call the detokenize method with
encoded messages, one at a time. The detokenize method returns a
DetokenizedString object with the result.
For example,
from pw_tokenizer import detokenize
detok = detokenize.Detokenizer('path/to/my/image.elf')
print(detok.detokenize(b'\x12\x34\x56\x78\x03hi!'))
This module also provides a command line interface for decoding and detokenizing
messages from a file or stdin.
"""
import argparse
import base64
import binascii
from datetime import datetime
import io
import logging
import os
from pathlib import Path
import re
import string
import struct
import sys
import time
from typing import (BinaryIO, Callable, Dict, List, Iterable, Iterator, Match,
NamedTuple, Optional, Pattern, Tuple, Union)
try:
from pw_tokenizer import database, decode, encode, tokens
except ImportError:
# Append this path to the module search path to allow running this module
# without installing the pw_tokenizer package.
sys.path.append(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
from pw_tokenizer import database, decode, encode, tokens
ENCODED_TOKEN = struct.Struct('<I')
_LOG = logging.getLogger('pw_tokenizer')
class DetokenizedString:
"""A detokenized string, with all results if there are collisions."""
def __init__(self,
token: Optional[int],
format_string_entries: Iterable[tuple],
encoded_message: bytes,
show_errors: bool = False):
self.token = token
self.encoded_message = encoded_message
self._show_errors = show_errors
self.successes: List[decode.FormattedString] = []
self.failures: List[decode.FormattedString] = []
decode_attempts: List[Tuple[Tuple, decode.FormattedString]] = []
for entry, fmt in format_string_entries:
result = fmt.format(encoded_message[ENCODED_TOKEN.size:],
show_errors)
# Sort competing entries so the most likely matches appear first.
# Decoded strings are prioritized by whether they
#
# 1. decoded all bytes for all arguments without errors,
# 2. decoded all data,
# 3. have the fewest decoding errors,
# 4. decoded the most arguments successfully, or
# 5. have the most recent removal date, if they were removed.
#
# This must match the collision resolution logic in detokenize.cc.
score: Tuple = (
all(arg.ok() for arg in result.args) and not result.remaining,
not result.remaining, # decoded all data
-sum(not arg.ok() for arg in result.args), # fewest errors
len(result.args), # decoded the most arguments
entry.date_removed or datetime.max) # most recently present
decode_attempts.append((score, result))
# Sort the attempts by the score so the most likely results are first.
decode_attempts.sort(key=lambda value: value[0], reverse=True)
# Split out the successesful decodes from the failures.
for score, result in decode_attempts:
if score[0]:
self.successes.append(result)
else:
self.failures.append(result)
def ok(self) -> bool:
"""True if exactly one string decoded the arguments successfully."""
return len(self.successes) == 1
def matches(self) -> List[decode.FormattedString]:
"""Returns the strings that matched the token, best matches first."""
return self.successes + self.failures
def best_result(self) -> Optional[decode.FormattedString]:
"""Returns the string and args for the most likely decoded string."""
for string_and_args in self.matches():
return string_and_args
return None
def error_message(self) -> str:
"""If detokenization failed, returns a descriptive message."""
if self.ok():
return ''
if not self.matches():
if self.token is None:
return 'missing token'
return 'unknown token {:08x}'.format(self.token)
if len(self.matches()) == 1:
return 'decoding failed for {!r}'.format(self.matches()[0].value)
return '{} matches'.format(len(self.matches()))
def __str__(self) -> str:
"""Returns the string for the most likely result."""
result = self.best_result()
if result:
return result[0]
if self._show_errors:
return '<[ERROR: {}|{!r}]>'.format(self.error_message(),
self.encoded_message)
# Display the string as prefixed Base64 if it cannot be decoded.
return encode.prefixed_base64(self.encoded_message)
def __repr__(self) -> str:
if self.ok():
message = repr(str(self))
else:
message = 'ERROR: {}|{!r}'.format(self.error_message(),
self.encoded_message)
return '{}({})'.format(type(self).__name__, message)
class _TokenizedFormatString(NamedTuple):
entry: tokens.TokenizedStringEntry
format: decode.FormatString
class Detokenizer:
"""Main detokenization class; detokenizes strings and caches results."""
def __init__(self, *token_database_or_elf, show_errors: bool = False):
"""Decodes and detokenizes binary messages.
Args:
*token_database_or_elf: a path or file object for an ELF or CSV
database, a tokens.Database, or an elf_reader.Elf
show_errors: if True, an error message is used in place of the %
conversion specifier when an argument fails to decode
"""
self.database = database.load_token_database(*token_database_or_elf)
self.show_errors = show_errors
# Cache FormatStrings for faster lookup & formatting.
self._cache: Dict[int, List[_TokenizedFormatString]] = {}
def lookup(self, token: int) -> List[_TokenizedFormatString]:
"""Returns (TokenizedStringEntry, FormatString) list for matches."""
try:
return self._cache[token]
except KeyError:
format_strings = [
_TokenizedFormatString(entry, decode.FormatString(str(entry)))
for entry in self.database.token_to_entries[token]
]
self._cache[token] = format_strings
return format_strings
def detokenize(self, encoded_message: bytes) -> DetokenizedString:
"""Decodes and detokenizes a message as a DetokenizedString."""
if len(encoded_message) < ENCODED_TOKEN.size:
return DetokenizedString(None, (), encoded_message,
self.show_errors)
token, = ENCODED_TOKEN.unpack_from(encoded_message)
return DetokenizedString(token, self.lookup(token), encoded_message,
self.show_errors)
class AutoUpdatingDetokenizer:
"""Loads and updates a detokenizer from database paths."""
class _DatabasePath:
"""Tracks the modified time of a path or file object."""
def __init__(self, path):
self.path = path if isinstance(path, (str, Path)) else path.name
self._modified_time: Optional[float] = self._last_modified_time()
def updated(self) -> bool:
"""True if the path has been updated since the last call."""
modified_time = self._last_modified_time()
if modified_time is None or modified_time == self._modified_time:
return False
self._modified_time = modified_time
return True
def _last_modified_time(self) -> Optional[float]:
try:
return os.path.getmtime(self.path)
except FileNotFoundError:
return None
def load(self) -> tokens.Database:
try:
return database.load_token_database(self.path)
except FileNotFoundError:
return database.load_token_database()
def __init__(self,
*paths_or_files,
min_poll_period_s: float = 1.0) -> None:
self.paths = tuple(self._DatabasePath(path) for path in paths_or_files)
self.min_poll_period_s = min_poll_period_s
self._last_checked_time: float = time.time()
self._detokenizer = Detokenizer(*(path.load() for path in self.paths))
def detokenize(self, data: bytes) -> DetokenizedString:
"""Updates the token database if it has changed, then detokenizes."""
if time.time() - self._last_checked_time >= self.min_poll_period_s:
self._last_checked_time = time.time()
if any(path.updated() for path in self.paths):
_LOG.info('Changes detected; reloading token database')
self._detokenizer = Detokenizer(*(path.load()
for path in self.paths))
return self._detokenizer.detokenize(data)
_Detokenizer = Union[Detokenizer, AutoUpdatingDetokenizer]
class PrefixedMessageDecoder:
"""Parses messages that start with a prefix character from a byte stream."""
def __init__(self, prefix: Union[str, bytes], chars: Union[str, bytes]):
"""Parses prefixed messages.
Args:
prefix: one character that signifies the start of a message
chars: characters allowed in a message
"""
self._prefix = prefix.encode() if isinstance(prefix, str) else prefix
if isinstance(chars, str):
chars = chars.encode()
# Store the valid message bytes as a set of binary strings.
self._message_bytes = frozenset(chars[i:i + 1]
for i in range(len(chars)))
if len(self._prefix) != 1 or self._prefix in self._message_bytes:
raise ValueError(
'Invalid prefix {!r}: the prefix must be a single '
'character that is not a valid message character.'.format(
prefix))
self.data = bytearray()
def _read_next(self, fd: BinaryIO) -> Tuple[bytes, int]:
"""Returns the next character and its index."""
char = fd.read(1)
index = len(self.data)
self.data += char
return char, index
def read_messages(self,
binary_fd: BinaryIO) -> Iterator[Tuple[bool, bytes]]:
"""Parses prefixed messages; yields (is_message, contents) chunks."""
message_start = None
while True:
# This reads the file character-by-character. Non-message characters
# are yielded right away; message characters are grouped.
char, index = self._read_next(binary_fd)
# If in a message, keep reading until the message completes.
if message_start is not None:
if char in self._message_bytes:
continue
yield True, self.data[message_start:index]
message_start = None
# Handle a non-message character.
if not char:
return
if char == self._prefix:
message_start = index
else:
yield False, char
def transform(self, binary_fd: BinaryIO,
transform: Callable[[bytes], bytes]) -> Iterator[bytes]:
"""Yields the file with a transformation applied to the messages."""
for is_message, chunk in self.read_messages(binary_fd):
yield transform(chunk) if is_message else chunk
def _detokenize_prefixed_base64(
detokenizer: _Detokenizer, prefix: bytes,
recursion: int) -> Callable[[Match[bytes]], bytes]:
"""Returns a function that decodes prefixed Base64 with the detokenizer."""
def decode_and_detokenize(match: Match[bytes]) -> bytes:
"""Decodes prefixed base64 with the provided detokenizer."""
original = match.group(0)
try:
detokenized_string = detokenizer.detokenize(
base64.b64decode(original[1:], validate=True))
if detokenized_string.matches():
result = str(detokenized_string).encode()
if recursion > 0 and original != result:
result = detokenize_base64(detokenizer, result, prefix,
recursion - 1)
return result
except binascii.Error:
pass
return original
return decode_and_detokenize
BASE64_PREFIX = encode.BASE64_PREFIX.encode()
DEFAULT_RECURSION = 9
def _base64_message_regex(prefix: bytes) -> Pattern[bytes]:
"""Returns a regular expression for prefixed base64 tokenized strings."""
return re.compile(
# Base64 tokenized strings start with the prefix character ($)
re.escape(prefix) + (
# | |
multi reg",
"SBM" : "subtract multi reg",
"SES" : "sign extend single",
"SEW" : "sign extend word",
"SF" : "set flags",
"SL" : "shift left",
"SLI" : "shift left immediate",
"SLIM" : "shift left immediate multi reg",
"SLM" : "shift left multi reg",
"SMP" : "set memory protection",
"SR" : "shift right",
"SRI" : "shift right immediate",
"SRIM" : "shift right immediate multi reg",
"SRM" : "shift right multi reg",
"STS" : "store single",
# "STSD" : "store single and decrement",
# "STSI" : "store single and increment",
"STT" : "store tri",
# "STTD" : "store tri and decrement",
# "STTI" : "store tri and increment",
"STW" : "store word",
# "STWD" : "store word and decrement",
# "STWI" : "store word and increment",
"WT" : "wait",
"XR" : "xor",
"XRI" : "xor immediate",
"XRM" : "xor multi reg",
"ZES" : "zero extend single",
"ZEW" : "zero extend word"
}
def notify_get_autocmt(self):
"""
Get instruction comment. 'cmd' describes the instruction in question
@return: None or the comment string
"""
mnem = self.instruc[self.cmd.itype]['name']
if mnem in self.auto_comments:
return self.auto_comments[mnem]
return None
def notify_create_switch_xrefs(self, jumpea, swi):
"""Create xrefs for a custom jump table
@param jumpea: address of the jump insn
@param swi: switch information
@return: None
"""
pass
def notify_calc_step_over(self, ip):
"""
Calculate the address of the instruction which will be
executed after "step over". The kernel will put a breakpoint there.
If the step over is equal to step into or we can not calculate
the address, return BADADDR.
args:
ip - instruction address
returns: target or BADADDR
"""
return idaapi.BADADDR
def notify_may_be_func(self, state):
"""
can a function start here?
the instruction is in 'cmd'
arg: state -- autoanalysis phase
state == 0: creating functions
== 1: creating chunks
returns: probability 0..100
"""
return 0
def notify_str2reg(self, regname):
"""
Convert a register name to a register number
args: regname
Returns: register number or -1 if not avail
The register number is the register index in the regNames array
Most processor modules do not need to implement this callback
It is useful only if ph.regNames[reg] does not provide
the correct register names
"""
if regname in self.regNames:
return self.regNames.index(regname)
return -1
def notify_is_sane_insn(self, no_crefs):
"""
is the instruction sane for the current file type?
args: no_crefs
1: the instruction has no code refs to it.
ida just tries to convert unexplored bytes
to an instruction (but there is no other
reason to convert them into an instruction)
0: the instruction is created because
of some coderef, user request or another
weighty reason.
The instruction is in 'cmd'
returns: 1-ok, <=0-no, the instruction isn't
likely to appear in the program
"""
return 0
def notify_func_bounds(self, code, func_ea, max_func_end_ea):
"""
find_func_bounds() finished its work
The module may fine tune the function bounds
args:
possible code - one of FIND_FUNC_XXX (check find_func_bounds)
func_ea - func start ea
max_func_end_ea (from the kernel's point of view)
returns: possible_return_code
"""
return FIND_FUNC_OK
#if we leave these uncommented we won't get function headers
# def asm_func_header(self, func_ea):
# """generate function header lines"""
# pass
# def asm_func_footer(self, func_ea):
# """generate function footer lines"""
# pass
def asm_get_type_name(self, flag, ea_or_id):
"""
Get name of type of item at ea or id.
(i.e. one of: byte,word,dword,near,far,etc...)
"""
if isCode(flag):
pfn = get_func(ea_or_id)
# return get func name
elif isWord(flag):
return "word"
return ""
def notify_init(self, idp_file):
# init returns non-zero on success
return 1
def notify_outlabel(self, ea, colored_name):
"""
The kernel is going to generate an instruction label line
or a function header
args:
ea - instruction address
colored_name -
If returns value <=0, then the kernel should not generate the label
"""
return 1
def notify_rename(self, ea, new_name):
"""
The kernel is going to rename a byte
args:
ea -
new_name -
If returns value <=0, then the kernel should not rename it
"""
return 1
def notify_may_show_sreg(self, ea):
"""
The kernel wants to display the segment registers
in the messages window.
args:
ea
if this function returns 0
then the kernel will not show
the segment registers.
(assuming that the module have done it)
"""
return 1
def notify_coagulate(self, start_ea):
"""
Try to define some unexplored bytes
This notification will be called if the
kernel tried all possibilities and could
not find anything more useful than to
convert to array of bytes.
The module can help the kernel and convert
the bytes into something more useful.
args:
start_ea -
returns: number of converted bytes
"""
return 0
def notify_closebase(self):
"""
The database will be closed now
"""
pass
def notify_load_idasgn(self, short_sig_name):
"""
FLIRT signature have been loaded for normal processing
(not for recognition of startup sequences)
args:
short_sig_name
"""
pass
def notify_auto_empty(self):
"""
Info: all analysis queues are empty.
This callback is called once when the
initial analysis is finished. If the queue is
not empty upon the return from this callback,
it will be called later again
"""
pass
def notify_is_call_insn(self, ea):
"""
Is the instruction a "call"?
args
ea - instruction address
returns: 1-unknown, 0-no, 2-yes
"""
#m = GetMnem(ea)
#return m is not None and m.startswith("C") and not m.startswith("CM")
return 1
def notify_is_ret_insn(self, ea, strict):
"""
Is the instruction a "return"?
ea - instruction address
strict - 1: report only ret instructions
0: include instructions like "leave"
which begins the function epilog
returns: 1-unknown, 0-no, 2-yes
"""
#return GetMnem(ea) == "RE"
return 1
def notify_kernel_config_loaded(self):
"""
This callback is called when ida.cfg is parsed
"""
pass
def notify_is_alloca_probe(self, ea):
"""
Does the function at 'ea' behave as __alloca_probe?
args:
ea
returns: 2-yes, 1-false
"""
return 1
def notify_out_src_file_lnnum(self, filename, lnnum):
"""
Callback: generate analog of
#line "file.c" 123
directive.
args:
file - source file (may be NULL)
lnnum - line number
returns: 2-directive has been generated
"""
return 1
def notify_is_insn_table_jump(self):
"""
Callback: determine if instruction is a table jump or call
If CF_JUMP bit can not describe all kinds of table
jumps, please define this callback.
It will be called for insns with CF_JUMP bit set.
input: cmd structure contains the current instruction
returns: 1-yes, 0-no
"""
return 0
def notify_auto_empty_finally(self):
"""
Info: all analysis queues are empty definitively
"""
ss = strwinsetup_t()
ss.minlen = 7
ss.strtypes = 9
ss.ignore_heads = 1
ss.ea1 = 0
SetLongPrm(INF_STRTYPE, ASCSTR_UNICODE)
set_strlist_options(ss)
refresh_strlist(0, BADADDR)
si = string_info_t()
for i in range(get_strlist_qty()):
if get_strlist_item(i, si):
if not isCode(GetFlags(si.ea)):
ea = get_start(si.ea)
s = make_str(ea)
hd = ItemHead(si.ea)
do_unknown(hd, 0)
make_ascii_string(ea, len(s) + 1, ASCSTR_UNICODE)
MakeRptCmt(ea, "\"%s\"" % s)
def notify_is_indirect_jump(self):
"""
Callback: determine if instruction is an indrect jump
If CF_JUMP bit can not describe all jump types
jumps, please define this callback.
input: cmd structure contains the current instruction
returns: 1-use CF_JUMP, 2-no, 3-yes
"""
return 1
def notify_determined_main(self, main_ea):
"""
The main() function has been determined
"""
pass
def notify_validate_flirt_func(self, ea, funcname):
"""
flirt has recognized a library function
this callback can be used by a plugin or proc module
to intercept it and validate such a function
args:
start_ea
funcname
returns: -1-do not create a function,
1-function is validated
the idp module is allowed to modify 'cmd'
"""
return 1
def notify_set_proc_options(self, options):
"""
called if the user specified an option string in the command line:
-p<processor name>:<options>
can be used for e.g. setting a processor subtype
also called if option string is passed to set_processor_type()
and IDC's SetProcessorType()
args:
options
returns: <0 - bad option string
"""
return 1
def notify_newseg(self, start_ea, segm_name, segm_class):
"""
A new segment is about to be created
args:
start_ea
segm_name
segm_class
return 1-ok, 0-segment should not be created
"""
return 1
def notify_auto_queue_empty(self, type):
"""
One analysis queue is empty.
args:
atype_t type
This callback can be called many times, so
only the autoMark() functions can be used from it
(other functions may work but it is not tested)
"""
return | |
<gh_stars>0
import mysql.connector
from image_scraping import *
from auth import AUTH
import json
class Connector:
def __init__(self):
self.db = mysql.connector.connect(**AUTH)
self.cur = None
def create_cursor(self):
"""
Function: Creates the cursor in the var. self.cur to operate the database.
"""
self.cur = self.db.cursor(dictionary=True)
def close_cursor(self):
"""
Method: Closes the connection on the cursor cur
"""
if self.cur is not None:
self.cur.close()
self.cur = None
else:
print("Cursor does not exist.")
raise
def execute(self, code, param):
"""
Method: Executes the code with the given parameters
:param code: string containing code to be executed in MySql
:param param: touple containing values to be used in the string code
"""
if self.cur is not None:
if param is None:
self.cur.execute(code, param)
else:
self.cur.execute(code, param)
else:
print("Cursor does not exist.")
raise
def commit(self):
"""
Method: Commits changes to the database
"""
self.db.commit()
class UserDataBase(Connector):
def add_new_user(self, username, password, email=None, phone=None):
"""
Method: Adds new user into db in the table Uporabnik
:param username: The username of the user
:param password: <PASSWORD>
:param email: Email of the user, if not given is None
:param phone: Phone number of the user, if not given is None
:return: (True/False, reason, data_dict)
"""
# Create cursor
self.create_cursor()
# Create liked and watched Json files, dump them into string
liked = json.dumps({})
watched = json.dumps({})
# Check if phone or email was given
if phone is None and email is not None:
code = "INSERT INTO User(username, password, email, liked, watched) VALUES (%s, %s, %s, %s, %s)"
param = (username, password, email, liked, watched)
elif email is None and phone is not None:
code = "INSERT INTO User(username, password, phone, liked, watched) VALUES (%s, %s, %s, %s, %S)"
param = (username, password, phone, liked, watched)
# Execute the code
self.cur.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
def check_user_registration_params(self, username='', email='', phone=''):
""" Method checks if username, email, phone are already in the user table
:param username: users username
:param email: users email
:param phone: users phone
:return: True/False, working/if problem -> where
"""
print(username, email)
def username():
"""Function checks if username is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE username = %s"
param = (username,)
self.execute(code, param)
# If any user found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
def email():
"""Function checks if email is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE email = %s"
param = (email,)
self.cur.execute(code, param)
# If any email found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
def phone():
"""Function checks if phone is already in the user table
:return: True/False
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT idUser FROM User WHERE phoneNumber = %s"
param = (phone,)
self.cur.execute(code, param)
# If any email found, returns false
for user in self.cur:
self.close_cursor()
return False
self.close_cursor()
return True
# If faulty username
usr = username()
if not usr:
return False, 'username'
# If faulty email
if not email():
return False, 'Email'
# If faulty phone
if not phone():
return False, 'phone'
# If working
return True, 'working'
def delete_existing_user(self, id):
"""
Method: Deletes existing user with the given id.
:param id: idUser
:return: True/False if successful or not.
"""
# Create cursor
self.create_cursor()
# Delete user by id
code = "DELETE FROM User WHERE idUser = %s"
param = (id,)
self.cur.execute(code, param)
# Commit
self.commit()
# Close cursor
self.close_cursor()
def get_user_by_username(self, username):
"""
Function checks if user exists, returns True and the users data in a dict.
:return: Touple (True/False if user exists, {'userId': ,'username': ,'password': ,'email': ,'phone': })
"""
data = {}
# Create cursor
self.create_cursor()
# Search in database
code = "SELECT * FROM user WHERE username = %s"
param = (username,)
self.cur.execute(code, param)
# Should only be one username in database
for user in self.cur:
id_user = user['idUser']
password = user['password']
email = user['email']
phone = user['phoneNumber']
liked = json.loads(user['liked'])
watched = json.loads(user['watched'])
data = {'idUser': id_user,
'username': username,
'password': password,
'email': email,
'phone': phone,
'liked': liked,
'watched': watched}
self.close_cursor()
if data == {}:
return False, data
else:
return True, data
def get_user_by_id(self, id):
"""Function checks if user exists, returns True and the users data in a dict.
:return: Touple (True/False if user exists,
{'userId': ,'username': ,'email': ,'phone': , 'liked': JSON, 'watched' JSON})
"""
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM user WHERE idUser = %s"
param = (id,)
self.cur.execute(code, param)
# Should only be one username in database
# Saves user in a dict
for user in self.cur:
id_user = user['idUser']
username = user['username']
password = user['password']
email = user['email']
phone = user['phoneNumber']
liked = json.loads(user['liked'])
watched = json.loads(user['watched'])
self.close_cursor()
data = {'idUser': id_user,
'username': username,
'password': password,
'email': email,
'phone': phone,
'liked': liked,
'watched': watched}
return True, data
return False, {}
def save_watched_to_user(self, username, watched):
"""
Function saves liked and watched jsons to user in database
:param username: the id of the user
:param watched: dict of watched movies
:return: True/False if successful
"""
# Create cursor
self.create_cursor()
# Create watched json string
watched_json = json.dumps(watched)
# Search in database
code = "UPDATE user SET watched = %s WHERE username = %s"
param = (watched_json, username)
try:
self.cur.execute(code, param)
self.commit()
self.close_cursor()
return True
except:
return False
def save_liked_to_user(self, username, liked):
"""
Function saves liked and watched jsons to user in database
:param username: the id of the user
:param liked: dict of liked movies
:return: True/False if successful
"""
# Create cursor
self.create_cursor()
# Create liked json string
liked_json = json.dumps(liked)
# Search in database
code = "UPDATE user SET liked = %s WHERE username = %s"
param = (liked_json, username)
try:
self.execute(code, param)
self.commit()
self.close_cursor()
return True
except:
return False
def save_opinion_of_movie(self, username, idMovie, opinion, rate):
"""
Function saves given opinion about the movie to the user opinion table
:param username: users username
:param idMovie: id of movie
:param opinion: str("Luka suvcks balizz")
:return: True/False if successful
"""
try:
opinion_check, rating_check = self.get_all_opinions_of_user(username)
if idMovie not in rating_check.keys():
ver, user = self.get_user_by_username(username)
# Create cursor
self.create_cursor()
code = "INSERT INTO opinion(idUser, idMovie, opinion, ocena) VALUES (%s, %s, %s, %s)"
param = (user['idUser'], idMovie, opinion, rate)
# Execute the code
self.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
else:
ver, user = self.get_user_by_username(username)
# Create cursor
self.create_cursor()
code = "UPDATE opinion SET opinion = %s, ocena = %s WHERE idUser = %s AND idMovie = %s"
param = (opinion, rate, user['idUser'], idMovie)
# Execute the code
self.execute(code, param)
# Commit to database
self.commit()
# Close cursor
self.close_cursor()
except:
return False
return True
def get_all_opinions_of_user(self, username):
"""
Function returns a data list of movie ids and opinions the user has saved
:param username: users username
:return: {'literally id of movie': 'opinion', 'ex. tt123456': 'I very liked this movie', ...}
"""
data = {}
data2 = {}
self.create_cursor()
code = "SELECT opinion.idMovie,opinion.opinion, opinion.ocena FROM opinion JOIN user ON opinion.idUser = user.idUser WHERE user.username = %s"
param = (username,)
# Execute the code
self.cur.execute(code, param)
for opinion in self.cur:
data[opinion['idMovie']] = opinion['opinion']
data2[opinion['idMovie']] = opinion['ocena']
self.close_cursor()
return data, data2
class MovieDatabase(Connector):
def search_by_keyword(self, keyword):
"""Function gets a keyword that was typed in the search box, returns all the results.
Search by keyword on main page
:param keyword: string
:return: int(number_of_matches), sorted(list[dict("movieId": , "title": , "year": , ...)]),
"""
# Lists for saving data
movies_data = []
writers_and_directors_data = []
# Add % for keyword search
keyword = '%' + keyword + '%'
# Create cursor
self.create_cursor()
# SQL code
code = "SELECT * FROM movie WHERE | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Manage Rapyuta IO Resources
Specify credentials either in a pillar file or
in the minion's config file:
.. code-block:: yaml
rapyutaio.project_id: project-oidjfiasuhgw4hgfw4thw0hg
rapyutaio.auth_token: <PASSWORD>
It's also possible to specify ``project_id``, and ``auth_token`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
project_id: project-oidjfiasuhgw4hgfw4thw0hg
auth_token: <PASSWORD>
.. code-block:: yaml
Ensure IO package exists:
rapyutaio.package_present:
- name: grafana
- source: /path/to/local/file
- region: us-east-1
- project_id: project-oidjfiasuhgw4hgfw4thw0hg
- auth_token: <PASSWORD>
- profile: myprofile
"""
import logging
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtual_name__ = "rapyutaio"
def __virtual__():
"""
Only load if rapyutaio is available.
"""
if "rapyutaio.get_packages" not in __salt__:
return (False, "rapyutaio module could not be loaded")
return __virtual_name__
# -----------------------------------------------------------------------------
#
# Packages
#
# -----------------------------------------------------------------------------
def package_present(name,
source=None,
template=None,
defaults=None,
context=None,
contents=None,
show_changes=True,
saltenv="base"):
"""
Ensure that a package exists in the project catalog with matching definition.
name
Name of the package
source
Source file to upload to the catalog. This file should be hosted
on the Salt Master server (``salt://``).
template
If this setting is supplied, the named templating engine will be used to
render the source file. The following templates are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
defaults
Default context passed to the template.
context
Overrides default context variables passed to the template.
contents
Specify the contents of the manifest as YAML. Can be used in combination with
``source`` to override parts of the configuration. For example if the source
file contained this:
.. code-block:: json
{
"name": "Test Package",
"packageVersion": "v1.0.0"
}
and the state contained this:
.. code-block:: yaml
My Test Package:
- source: salt://test.json
- contents:
packageVersion: v1.0.1
the resulting manifest would be:
.. code-block:: yaml
name: Test Package
packageVersion: v1.0.1
show_changes
Output a unified diff of the old manifest and new manifest. If ``False``
return a boolean if any changes were made.
"""
ret = {
"name": name,
"result": False,
"changes": {},
"comment": ""
}
new_manifest = {}
#
# Get the content of the new manifest
#
if source is not None:
source_contents = __salt__['cp.get_file_str'](source, saltenv=saltenv)
if source_contents is False:
ret['comment'] = "Source file not found: {}".format(source)
return ret
if template is not None:
source_contents = __salt__["file.apply_template_on_contents"](
source_contents, template, context, defaults, saltenv
)
try:
new_manifest = __utils__['yaml.load'](source_contents)
except Exception:
try:
new_manifest = __utils__['json.loads'](source_contents)
except Exception:
ret['comment'] = "Manifest source must be a JSON or YAML file"
return ret
if contents is not None:
new_manifest = __salt__['rapyutaio._deep_merge'](new_manifest, contents)
if new_manifest == {}:
ret['comment'] = "package_present requires either 'source' or 'contents'"
return ret
#
# Allow setting the name via the state
#
if 'name' not in new_manifest:
new_manifest['name'] = name
man_name = new_manifest['name']
man_version = new_manifest['packageVersion']
#
# Fetch the existing/old manifest if it exists
#
try:
old_package = __salt__['rapyutaio.get_package'](name=man_name,
version=man_version)
except CommandExecutionError as e:
ret['comment'] = e
return ret
if old_package:
old_package_uid = old_package['packageInfo']['guid']
old_manifest = __salt__['rapyutaio.get_manifest'](guid=old_package_uid)
else:
old_manifest = {}
if old_manifest:
# Is the new manifest different to the old
ret['changes'] = __utils__['data.recursive_diff'](old_manifest, new_manifest)
if not ret['changes']:
# The manifest is already in the correct state so return immediately
ret['result'] = True
ret['comment'] = "Package '{} {}' is in the correct state".format(man_name, man_version)
return ret
#
# Test
#
if __opts__['test']:
# Always return a None result for dry-runs
ret['result'] = None
if ret['changes']:
ret['comment'] = "Package '{} {}' would be updated".format(man_name, man_version)
else:
ret['comment'] = "New package '{} {}' would be created".format(man_name, man_version)
ret['changes'] = {
'new': new_manifest,
'old': old_manifest
}
if not show_changes:
ret['changes'] = "<show_changes=False>"
return ret
# TODO: Create a "clean" manifest from the remote/existing manifest that only contains keys
# that we know are required or will be used and compare only those
#
# Delete the existing manifest if it exists and is different to the new manifest
#
if old_manifest is not None:
if not ret['changes']:
ret['comment'] = "Package '{} {}' is in the correct state".format(man_name, man_version)
ret['result'] = True
return ret
# First check that the package is not in use
pkg_deployments = __salt__['rapyutaio.get_deployments'](package_uid=old_package_uid)
if pkg_deployments != []:
ret['comment'] = "Package '{} {}' is in use and can't be updated.".format(man_name, man_version)
return ret
try:
__salt__['rapyutaio.delete_package'](guid=old_package_uid)
except CommandExecutionError as e:
ret['comment'] = e
return ret
#
# Attempt to upload the new manifest
#
response = __salt__['rapyutaio.create_package'](manifest=new_manifest)
ret['result'] = True
if old_manifest is not None:
# Replacing existing manifest
ret['comment'] = "Package '{} {}' was updated".format(man_name, man_version)
else:
# Creating new manifest
ret['changes'] = response
ret['comment'] = "New package '{} {}' created".format(man_name, man_version)
return ret
def package_absent(name, version):
"""
Removes the version of a package if it exists.
name
Name of the package
version
Version of the package
"""
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
}
try:
package = __salt__['rapyutaio.get_package'](name=name, version=version)
except CommandExecutionError as e:
ret['comment'] = e
return ret
if not package:
ret['result'] = True
ret['comment'] = "Package '{0} {1}' is not present".format(name, version)
return ret
#
# test=True
#
if __opts__['test']:
# Always return a None result for dry-runs
ret['result'] = None
ret['comment'] = "Package '{0} {1}' would be deleted".format(name, version)
return ret
try:
__salt__['rapyutaio.delete_package'](name=name, version=version)
except CommandExecutionError as e:
ret['comment'] = e
return ret
ret['result'] = True
ret['changes']['old'] = package
ret['changes']['new'] = None
ret['comment'] = "Package {0} {1} deleted".format(name, version)
return ret
# -----------------------------------------------------------------------------
#
# Networks
#
# -----------------------------------------------------------------------------
def network_present(name,
runtime,
ros_distro,
device=None,
interface=None,
restart_policy=None):
"""
Ensure a ROS routed network exists with matching definition.
name
Name of the network
ros_distro
ROS distribution to use for the network, Kinetic or Melodic, based
on the version of the components it will be binding to.
runtime
Either ``cloud`` or ``device``
.. code-block:: yaml
Ensure Demo Cloud network exists:
rapyutaio.network_present:
- name: cloud_demo
- ros_distro: kinetic
- runtime: cloud
Ensure Demo Device network exists:
rapyutaio.network_present:
- name: device_demo
- runtime: device
- rosDistro: melodic
- interface: enp2s0
- device: robot1
- restart_policy: no
device
Name of the device to use for a device routed network.
interface
Network interface to bind to with a device routed network
restart_policy
Restart policy for the device routed network. One of these values:
- no
- always
- on-failure
"""
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
}
old_network = __salt__['rapyutaio.get_network'](name=name)
new_network = {
"name": name,
"runtime": runtime,
"rosDistro": ros_distro,
"parameters": {},
}
if runtime == "device":
device_obj = __salt__['rapyuta.device'](name=device)
if device_obj is None:
ret['comment'] = f"Device {device} not found"
return ret
parameters = {
"NETWORK_INTERFACE": interface,
"device_id": device_obj['uuid'],
"restart_policy": restart_policy,
}
if old_network:
log.debug(old_network)
ret['changes'] = __utils__['data.recursive_diff'](
{
"name": old_network['name'],
"runtime": old_network['runtime'],
"rosDistro": old_network['rosDistro'],
"parameters": old_network.get('parameters', {}),
},
new_network
)
if ret['changes']:
ret['result'] = False
ret['comment'] = "Network '{0}' exists but is different.".format(name)
else:
ret['result'] = True
ret['comment'] = "Network '{0}' is in the correct state.".format(name)
return ret
if __opts__['test']:
# Always return a None result for dry-runs
ret['result'] = None
ret['comment'] = "Network '{0}' would be created.".format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = new_network
return ret
response = __salt__['rapyutaio.create_network'](name=name,
runtime=runtime,
ros_distro=ros_distro,
parameters=parameters)
ret['result'] = True
ret['comment'] = "New network {0} created".format(name)
ret['changes'] = response
return ret
def network_absent(name):
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
}
old_network = __salt__['rapyutaio.get_network'](name=name)
if not old_network:
ret['result'] = True
ret['comment'] = "Network {0} is not present".format(name)
return ret
old_network_guid = old_network['guid']
ret['changes'] = {
'old': old_network,
'new': None
}
#
# test=True
#
if __opts__['test']:
# Always return a None result for dry-runs
ret['result'] = None
ret['comment'] = "Network {0} would be deleted".format(name)
return ret
__salt__['rapyutaio.delete_network'](guid=old_network_guid)
ret['result'] = True
ret['comment'] = "Network {0} deleted".format(name)
return ret
# -----------------------------------------------------------------------------
#
# Volumes
#
# -----------------------------------------------------------------------------
def volume_present():
pass
def volume_attached():
pass
def volume_absent():
pass
# -----------------------------------------------------------------------------
#
# Deployments
#
# -----------------------------------------------------------------------------
def deployment_present(name,
package_name,
package_version,
parameters={},
dependencies=[]):
ret = {
"name": name,
"result": False,
"comment": "",
"changes": {},
}
log.info(f"deployment_present: {name}")
existing_deployment = __salt__['rapyutaio.get_deployment'](name=name)
log.info(f"existing_deployment: {existing_deployment}")
if existing_deployment is not None:
pkg_id = existing_deployment['packageId']
existing_dpl_pkg = __salt__['rapyutaio.get_package'](name=package_name,
version=package_version)
log.fatal(existing_dpl_pkg)
if pkg_id | |
<gh_stars>0
import copy
import warnings
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutBufferSamples, Schedule
from stable_baselines3.common.utils import explained_variance
from torch import nn
from torch.distributions import kl_divergence
from torch.nn import functional as F
from sb3_contrib.common.utils import conjugate_gradient_solver, flat_grad
class TRPO(OnPolicyAlgorithm):
"""
Trust Region Policy Optimization (TRPO)
Paper: https://arxiv.org/abs/1502.05477
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
and Stable Baselines (TRPO from https://github.com/hill-a/stable-baselines)
Introduction to TRPO: https://spinningup.openai.com/en/latest/algorithms/trpo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate for the value function, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size for the value function
:param gamma: Discount factor
:param cg_max_steps: maximum number of steps in the Conjugate Gradient algorithm
for computing the Hessian vector product
:param cg_damping: damping in the Hessian vector product computation
:param line_search_shrinking_factor: step-size reduction factor for the line-search
(i.e., ``theta_new = theta + alpha^i * step``)
:param line_search_max_iter: maximum number of iteration
for the backtracking line-search
:param n_critic_updates: number of critic updates per policy update
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param normalize_advantage: Whether to normalize or not the advantage
:param target_kl: Target Kullback-Leibler divergence between updates.
Should be small for stability. Values like 0.01, 0.05.
:param sub_sampling_factor: Sub-sample the batch to make computation faster
see p40-42 of <NAME> thesis http://joschu.net/docs/thesis.pdf
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
n_steps: int = 2048,
batch_size: int = 128,
gamma: float = 0.99,
cg_max_steps: int = 15,
cg_damping: float = 0.1,
line_search_shrinking_factor: float = 0.8,
line_search_max_iter: int = 10,
n_critic_updates: int = 10,
gae_lambda: float = 0.95,
use_sde: bool = False,
sde_sample_freq: int = -1,
normalize_advantage: bool = True,
target_kl: float = 0.01,
sub_sampling_factor: int = 1,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(TRPO, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=0.0, # entropy bonus is not used by TRPO
vf_coef=0.0, # value function is optimized separately
max_grad_norm=0.0,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
policy_base=ActorCriticPolicy,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
self.normalize_advantage = normalize_advantage
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
if normalize_advantage:
assert buffer_size > 1, (
"`n_steps * n_envs` must be greater than 1. "
f"Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
)
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
# Conjugate gradients parameters
self.cg_max_steps = cg_max_steps
self.cg_damping = cg_damping
# Backtracking line search parameters
self.line_search_shrinking_factor = line_search_shrinking_factor
self.line_search_max_iter = line_search_max_iter
self.target_kl = target_kl
self.n_critic_updates = n_critic_updates
self.sub_sampling_factor = sub_sampling_factor
if _init_setup_model:
self._setup_model()
def _compute_actor_grad(
self, kl_div: th.Tensor, policy_objective: th.Tensor
) -> Tuple[List[nn.Parameter], th.Tensor, th.Tensor, List[Tuple[int, ...]]]:
"""
Compute actor gradients for kl div and surrogate objectives.
:param kl_div: The KL divergence objective
:param policy_objective: The surrogate objective ("classic" policy gradient)
:return: List of actor params, gradients and gradients shape.
"""
# This is necessary because not all the parameters in the policy have gradients w.r.t. the KL divergence
# The policy objective is also called surrogate objective
policy_objective_gradients = []
# Contains the gradients of the KL divergence
grad_kl = []
# Contains the shape of the gradients of the KL divergence w.r.t each parameter
# This way the flattened gradient can be reshaped back into the original shapes and applied to
# the parameters
grad_shape = []
# Contains the parameters which have non-zeros KL divergence gradients
# The list is used during the line-search to apply the step to each parameters
actor_params = []
for name, param in self.policy.named_parameters():
# Skip parameters related to value function based on name
# this work for built-in policies only (not custom ones)
if "value" in name:
continue
# For each parameter we compute the gradient of the KL divergence w.r.t to that parameter
kl_param_grad, *_ = th.autograd.grad(
kl_div,
param,
create_graph=True,
retain_graph=True,
allow_unused=True,
only_inputs=True,
)
# If the gradient is not zero (not None), we store the parameter in the actor_params list
# and add the gradient and its shape to grad_kl and grad_shape respectively
if kl_param_grad is not None:
# If the parameter impacts the KL divergence (i.e. the policy)
# we compute the gradient of the policy objective w.r.t to the parameter
# this avoids computing the gradient if it's not going to be used in the conjugate gradient step
policy_objective_grad, *_ = th.autograd.grad(policy_objective, param, retain_graph=True, only_inputs=True)
grad_shape.append(kl_param_grad.shape)
grad_kl.append(kl_param_grad.view(-1))
policy_objective_gradients.append(policy_objective_grad.view(-1))
actor_params.append(param)
# Gradients are concatenated before the conjugate gradient step
policy_objective_gradients = th.cat(policy_objective_gradients)
grad_kl = th.cat(grad_kl)
return actor_params, policy_objective_gradients, grad_kl, grad_shape
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
policy_objective_values = []
kl_divergences = []
line_search_results = []
value_losses = []
# This will only loop once (get all data in one go)
for rollout_data in self.rollout_buffer.get(batch_size=None):
# Optional: sub-sample data for faster computation
if self.sub_sampling_factor > 1:
rollout_data = RolloutBufferSamples(
rollout_data.observations[:: self.sub_sampling_factor],
rollout_data.actions[:: self.sub_sampling_factor],
None, # old values, not used here
rollout_data.old_log_prob[:: self.sub_sampling_factor],
rollout_data.advantages[:: self.sub_sampling_factor],
None, # returns, not used here
)
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
if self.use_sde:
# batch_size is only used for the value function
self.policy.reset_noise(actions.shape[0])
with th.no_grad():
# Note: is copy enough, no need for deepcopy?
# If using gSDE and deepcopy, we need to use `old_distribution.distribution`
# directly to avoid PyTorch errors.
old_distribution = copy.copy(self.policy.get_distribution(rollout_data.observations))
distribution = self.policy.get_distribution(rollout_data.observations)
log_prob = distribution.log_prob(actions)
advantages = rollout_data.advantages
if self.normalize_advantage:
| |
local namespace.
for func_name in func_names:
setattr(base, func_name, func)
func.__globals__[func_name] = _thunk
return _thunk
return inner
class DirectOutputThingMixin:
"""This is the interface for OutputThings that should be directly
scheduled by the scheduler (e.g. through schedule_recurring(),
schedule_periodic(), or schedule_periodic_on_separate_thread).
"""
def _observe(self):
"""Get an event and call the appropriate dispatch function.
"""
raise NotImplemented
class EventLoopOutputThingMixin:
"""OutputThing that gets messages from an event loop, either the same
loop as the scheduler or a separate one.
"""
def _observe_event_loop(self):
"""Call the event OutputThing's event loop. When
an event occurs, the appropriate _dispatch method should
be called.
"""
raise NotImplemented
def _stop_loop(self):
"""When this method is called, the OutputThing should exit the
event loop as soon as possible.
"""
raise NotImplemented
class IterableAsOutputThing(OutputThing, DirectOutputThingMixin):
"""Convert any interable to an OutputThing. This can be
used with the schedule_recurring() and schedule_periodic()
methods of the scheduler.
"""
def __init__(self, iterable, name=None):
super().__init__()
self.iterable = iterable
self.name = name
def _observe(self):
try:
event = self.iterable.__next__()
except StopIteration:
self._close()
self._dispatch_completed()
except FatalError:
self._close()
raise
except Exception as e:
# If the iterable throws an exception, we treat it as non-fatal.
# The error is dispatched downstream and the connection closed.
# If other sensors are running, things will continue.
tb.print_exc()
self._close()
self._dispatch_error(e)
else:
self._dispatch_next(event)
def _close(self):
"""This method is called when we stop the iteration, either due to
reaching the end of the sequence or an error. It can be overridden by
subclasses to clean up any state and release resources (e.g. closing
open files/connections).
"""
pass
def __str__(self):
if hasattr(self, 'name') and self.name:
return self.name
else:
return super().__str__()
def from_iterable(i):
return IterableAsOutputThing(i)
def from_list(l):
return IterableAsOutputThing(iter(l))
# XXX Move this out of base.py
class FunctionIteratorAsOutputThing(OutputThing, DirectOutputThingMixin):
"""Generates an OutputThing sequence by running a state-driven loop
producing the sequence's elements. Example::
res = GenerateOutputThing(0,
lambda x: x < 10,
lambda x: x + 1,
lambda x: x)
initial_state: Initial state.
condition: Condition to terminate generation (upon returning False).
iterate: Iteration step function.
result_selector: Selector function for results produced in the sequence.
Returns the generated sequence.
"""
def __init__(self, initial_state, condition, iterate, result_selector):
super().__init__()
self.value = initial_state
self.condition = condition
self.iterate = iterate
self.result_selector = result_selector
self.first = True
def _observe(self):
try:
if self.first: # first time: just send the value
self.first = False
if self.condition(self.value):
r = self.result_selector(self.value)
self._dispatch_next(r)
else:
self._dispatch_completed()
else:
if self.condition(self.value):
self.value = self.iterate(self.value)
r = self.result_selector(self.value)
self._dispatch_next(r)
else:
self._dispatch_completed()
except Exception as e:
self._dispatch_error(e)
def from_func(init, cond, iter, selector):
return FunctionIteratorAsOutputThing(init, cond, iter, selector)
# Define a default sensor event as a tuple of sensor id, timestamp, and value.
SensorEvent = namedtuple('SensorEvent', ['sensor_id', 'ts', 'val'])
def make_sensor_event(sensor, sample):
"""Given a sensor object and a sample taken from that sensor,
return a SensorEvent tuple."""
return SensorEvent(sensor_id=sensor.sensor_id, ts=time.time(),
val=sample)
class SensorAsOutputThing(OutputThing, DirectOutputThingMixin):
"""OutputThing that samples a sensor upon its observe call, creates
an event from the sample, and dispatches it forward. A sensor is just
an object that has a sensor_id property and a sample() method. If the
sensor wants to complete the stream, it should throw a StopIteration
exception.
By default, it generates SensorEvent instances. This behavior can be
changed by passing in a different function for make_event_fn.
"""
def __init__(self, sensor, make_event_fn=make_sensor_event):
super().__init__()
self.sensor = sensor
self.make_event_fn = make_event_fn
def _observe(self):
try:
self._dispatch_next(self.make_event_fn(self.sensor,
self.sensor.sample()))
except FatalError:
raise
except StopIteration:
self._dispatch_completed()
except Exception as e:
self._dispatch_error(e)
def __repr__(self):
return 'SensorAsOutputThing(%s)' % repr(self.sensor)
class BlockingInputThing:
"""This implements a InputThing which may potential block when sending an
event outside the system. The InputThing is run on a separate thread. We
create proxy methods for each port that can be called directly - these
methods just queue up the call to run in the worker thread.
The actual implementation of the InputThing goes in the _on_next,
_on_completed, and _on_error methods. Note that we don't dispatch to separate
methods for each port. This is because the port is likely to end up as
just a message field rather than as a separate destination in the lower
layers.
"""
def __init__(self, scheduler, ports=None):
if ports==None:
self.ports = ['default',]
else:
self.ports = ports
self.num_closed_ports = 0
# create local proxy methods for each port
for port in self.ports:
setattr(self, _on_next_name(port),
lambda x: self.__queue__.put((self._on_next, False,
[port, x]),))
setattr(self, _on_completed_name(port),
lambda: self.__queue__.put((self._on_completed, True,
[port]),))
setattr(self, _on_error_name(port),
lambda e: self.__queue__.put((self._on_error, True,
[port, e]),))
self.__queue__ = queue.Queue()
self.scheduler = scheduler
self.thread = _ThreadForBlockingInputThing(self, scheduler)
self.scheduler.active_schedules[self] = self.request_stop
def start():
self.thread.start()
self.scheduler.event_loop.call_soon(start)
def request_stop(self):
"""This can be called to stop the thread before it is automatically
stopped when all ports are closed. The close() method will be
called and the InputThing cannot be restarted later.
"""
if self.thread==None:
return # no thread to stop
self.__queue__.put(None) # special stop token
def _wait_and_dispatch(self):
"""Called by main loop of blocking thread to block for a request
and then dispatch it. Returns True if it processed a normal request
and False if it got a stop message or there is no more events possible.
"""
action = self.__queue__.get()
if action is not None:
(method, closing_port, args) = action
method(*args)
if closing_port:
self.num_closed_ports += 1
if self.num_closed_ports==len(self.ports):
# no more ports can receive events, treat this
# as a stop.
print("Stopping blocking InputThing %s" % self)
return False
return True # more work possible
else:
return False # stop requested
def _on_next(self, port, x):
"""Process the on_next event. Called in blocking thread."""
pass
def _on_completed(self, port):
"""Process the on_completed event. Called in blocking thread."""
pass
def _on_error(self, port, e):
"""Process the on_error event. Called in blocking thread."""
pass
def _close(self):
"""This is called when all ports have been closed. This can be used
to close any connections, etc.
"""
pass
class _ThreadForBlockingInputThing(threading.Thread):
"""Background thread for a InputThing that passes events to the
external world and might block.
"""
def __init__(self, input_thing, scheduler):
self.input_thing = input_thing
self.scheduler= scheduler
self.stop_requested = False
super().__init__()
def run(self):
try:
more = True
while more:
more = self.input_thing._wait_and_dispatch()
except Exception as e:
msg = "_wait_and_dispatch for %s exited with error: %s" % \
(self.input_thing, e)
logger.exception(msg)
self.input_thing._close()
self.input_thing.thread = None # disassociate this thread
def die(): # need to stop the scheduler in the main loop
del self.scheduler.active_schedules[self.input_thing]
raise ScheduleError(msg) from e
self.scheduler.event_loop.call_soon_threadsafe(die)
else:
self.input_thing._close()
self.input_thing.thread = None # disassociate this thread
def done():
self.scheduler._remove_from_active_schedules(self.input_thing)
self.scheduler.event_loop.call_soon_threadsafe(done)
class _ThreadForBlockingOutputThing(threading.Thread):
"""Background thread for OutputThings that might block.
"""
def __init__(self, output_thing, interval, scheduler):
self.output_thing = output_thing
self.interval = interval
self.scheduler = scheduler
self.stop_requested = False
super().__init__()
def _stop_loop(self):
self.stop_requested = True
def run(self):
def enqueue_fn(fn, *args):
self.scheduler.event_loop.call_soon_threadsafe(fn, *args)
self.output_thing._schedule(enqueue_fn=enqueue_fn)
try:
while True:
if self.stop_requested:
break
start = time.time()
self.output_thing._observe()
if self.output_thing._has_connections():
break
time_left = self.interval - (time.time() - start)
if time_left > 0 and (not self.stop_requested):
time.sleep(time_left)
except Exception as e:
msg = "_observe for %s exited with error" % self.output_thing
logger.exception(msg)
def die(): # need to stop the scheduler in the main loop
del self.scheduler.active_schedules[self.output_thing]
raise ScheduleError(msg) from e
self.scheduler.event_loop.call_soon_threadsafe(die)
else:
def done():
self.scheduler._remove_from_active_schedules(self.output_thing)
self.scheduler.event_loop.call_soon_threadsafe(done)
class ScheduleError(FatalError):
pass
class Scheduler:
"""Wrap an asyncio event loop and provide methods for various kinds of
periodic scheduling.
"""
def __init__(self, event_loop):
self.event_loop = event_loop
self.active_schedules = {} # mapping from task to schedule handle
self.pending_futures = {}
self.next_future_id = 1
# Set the following to an exception if we are exiting the loop due to
# an exception. We will then raise a SchedulerError when the event loop
# exits.
self.fatal_error = None
# we set the exception handler to stop all active schedules and
# break out of the event loop if we get an unexpected error.
def exception_handler(loop, context):
assert loop==self.event_loop
self.fatal_error = context['exception']
self.stop()
self.event_loop.set_exception_handler(exception_handler)
def _remove_from_active_schedules(self, output_thing):
"""Remove the specified OutputThing from the active_schedules map.
If there are no more active schedules, we will request exiting of
the event loop. This method must be run from the | |
<gh_stars>0
# -*- coding: utf-8 -*-
'''
Tools for Web Flayer
'''
# Python
import os
import re
import sys
import time
import random
import pprint
import urllib
# 3rd party
import requests
from termcolor import colored
import psycopg2
from psycopg2.extras import Json
from bs4 import BeautifulSoup
# Internal
import flayer.event
class Output(object):
'''
Used for outputting data
'''
def __init__(self, opts):
'''
Initialize
'''
self.opts = opts
def action(self, msg, force=False):
'''
Something is currently happening
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('action_color', 'green')))
def info(self, msg, force=False):
'''
Informational only
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('info_color', 'cyan')))
def warn(self, msg, force=False):
'''
Something is possibly wrong, but not enough to stop running
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('warn_color', 'yellow')))
def error(self, msg, force=False):
'''
Something is wrong enough to halt execution
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('error_color', 'red'), attrs=['bold']))
def process_url(url_uuid, url, content, parsers):
'''
Process a URL
'''
fun = None
for mod in parsers:
if fun is not None:
break
if not mod.endswith('.func_map'):
continue
fun = parsers[mod](url)
fun(url_uuid, url, content)
def get_url(
url,
parent=None,
referer=None,
dbclient=None,
client=requests,
opts=None,
context=None,
):
'''
Download a URL (if necessary) and store it
'''
out = Output(opts)
headers = opts['headers'].copy()
data = opts.get('data', None)
if referer:
headers['referer'] = referer
if flayer.db.check_domain_wait(dbclient, url) is False:
# We need to put this URL back into the queue
queue_urls([url], dbclient, opts)
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
wait = 0
if opts.get('no_db_cache') is True:
# Skip all the DB stuff and just download the URL
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
req.raise_for_status()
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req.headers)))
content = req.text
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
if url not in opts['warned']:
opts['warned'].append(url)
return 0, content
cur = dbclient.cursor()
exists = False
# Check for URL in DB
cur.execute('''
SELECT uuid, url, last_retrieved
FROM urls
WHERE url = %s
''', [url])
if cur.rowcount < 1:
# URL has never been retrieved
cur.execute('''
INSERT INTO urls
(url) VALUES (%s)
RETURNING uuid
''', [url])
dbclient.commit()
url_uuid = cur.fetchone()[0]
out.action('{} has not been retrieved before, new UUID is {}'.format(url, url_uuid))
else:
# URL has been retrieved, get its UUID
url_uuid = cur.fetchone()[0]
out.warn('{} exists, UUID is {}'.format(url, url_uuid))
exists = True
if url not in opts['warned']:
opts['warned'].append(url)
# Save referer relationships
if parent:
try:
cur.execute('''
INSERT INTO referers
(url_uuid, referer_uuid)
VALUES
(%s, %s)
''', [url_uuid, parent])
dbclient.commit()
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
if opts['force_directories'] and not opts['save_path']:
opts['save_path'] = '.'
# Check for content
cur.execute('''
SELECT data, uuid
FROM content
WHERE url_uuid = %s
ORDER BY retrieved
LIMIT 1
''', [url_uuid])
if cur.rowcount < 1:
try:
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
except requests.exceptions.ConnectionError as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
except requests.exceptions.InvalidSchema as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
INSERT INTO content
(url_uuid, data) VALUES (%s, %s)
''',
[
url_uuid,
Json({
'content': content.replace('\x00', ''),
'status': req.status_code,
})
]
)
dbclient.commit()
else:
if opts['force'] is True:
row_id = cur.fetchone()[1]
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
UPDATE content
SET url_uuid = %s, data = %s
WHERE uuid = %s
''',
[
url_uuid,
Json({'content': content}),
row_id
]
)
dbclient.commit()
else:
content = cur.fetchone()[0]['content']
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
if exists is False:
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
return url_uuid, content
def _save_path(url, url_uuid, req, wait, opts, context, dbclient):
'''
Save the URL to a path
'''
urlcomps = urllib.parse.urlparse(url)
if opts['force_directories']:
newpath = urlcomps[2].lstrip('/')
file_name = os.path.join(opts['save_path'], urlcomps[1], newpath)
else:
file_name = os.path.join(opts['save_path'], urlcomps[2].split('/')[-1])
return status(req, url, url_uuid, file_name, wait, opts, context, dbclient)
def status(
req,
media_url,
url_uuid,
file_name,
wait=0,
opts=None,
context=None,
dbclient=None,
):
'''
Show status of the download
'''
out = Output(opts)
if opts is None:
opts = {}
if context is None:
context = {}
file_name = _rename(media_url, file_name, opts)
cache_dir = '/'.join(file_name.split('/')[:-1])
try:
os.makedirs(cache_dir, mode=0o0755, exist_ok=True)
except PermissionError as exc:
out.error('Cannot create directory {}: {}'.format(cache_dir, exc))
is_text = False
req_headers = req.headers
for header in list(req_headers):
if header.lower().startswith('content-type'):
if req_headers[header].startswith('text'):
is_text = True
content = ''
cur = dbclient.cursor()
agent_id = opts.get('id', 'unknown')
cur.execute(
'INSERT INTO active_dl (url_uuid, started_by) VALUES (%s, %s)',
[url_uuid, agent_id]
)
cur.execute('SELECT url FROM urls WHERE uuid = %s', [url_uuid])
root_url = cur.fetchone()[0]
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
out.action('Downloading: {}'.format(media_url))
if os.path.exists(file_name):
if opts['overwrite']:
out.warn('... {} exists, overwriting'.format(file_name))
else:
out.warn('... {} exists, skipping'.format(file_name))
return None, {}
if not opts['daemon']:
sys.stdout.write(colored('...Saving to: ', 'green'))
out.info(file_name)
buffer_size = 4096
total = int(req.headers.get('Content-Length', 0))
count = 0
try:
point = int(total / 100)
#increment = int(total / buffer_size)
except ZeroDivisionError:
out.error('Divide by zero error, status not available')
point = 0
#increment = 0
start_time = time.time()
last_time = time.time()
delay_blocks = 0
delay_count = 0
context['dl_data'] = {
'url': root_url,
'media_url': media_url,
'url_uuid': url_uuid,
'bytes_total': '',
'bytes_elapsed': '',
'time_total': '',
'time_left': '',
'kbsec': 0,
}
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'started'}, opts)
try:
with open(file_name, 'wb') as fhp:
#old_time = time.time()
try:
for block in req.iter_content(buffer_size):
if opts.get('hard_stop'):
queue_urls([media_url], dbclient, opts)
break
if opts.get('abort'):
break
if is_text is True:
content += str(block)
fhp.write(block)
count += buffer_size
delay_blocks += buffer_size
delay_count += 1
#old_time = time.time()
time_delay = time.time() - last_time
if time_delay >= float(1):
last_time = time.time()
try:
blocks_left = int((total - count) / buffer_size)
except ZeroDivisionError:
blocks_left = 0
kbsec = (buffer_size / 1024) * delay_count
try:
seconds_left = ((blocks_left * buffer_size) / 1024) / kbsec
except ZeroDivisionError:
seconds_left = 0
minutes_left = int(seconds_left / 60)
minsecs_left = seconds_left % 60
time_left = '%d:%02d' % (minutes_left, minsecs_left)
seconds_elapsed = time.time() - start_time
seconds_total = seconds_elapsed + seconds_left
minutes_total = int(seconds_total / 60)
minsecs_total = int(seconds_total % 60)
time_total = '%d:%02d' % (minutes_total, minsecs_total)
try:
percent = int(count / point)
except ZeroDivisionError:
percent = 0
context['dl_data']['bytes_total'] = total # pylint: disable=bad-whitespace
context['dl_data']['bytes_elapsed'] = count # pylint: disable=bad-whitespace
context['dl_data']['time_total'] = time_total # pylint: disable=bad-whitespace
context['dl_data']['time_left'] = time_left # pylint: disable=bad-whitespace
context['dl_data']['kbsec'] = kbsec # pylint: disable=bad-whitespace
if not opts['daemon']:
sys.stdout.write('\x1b[2K\r')
sys.stdout.write(
colored('Total size is {} '.format(sizeof_fmt(total)), 'green'))
sys.stdout.write(colored('({} bytes), '.format(total), 'green'))
sys.stdout.write(colored('{}%, '.format(str(percent)), 'cyan'))
sys.stdout.write(colored(kbsec, 'cyan'))
sys.stdout.write(colored(' KiB/s, ', 'cyan'))
sys.stdout.write(colored('{}/{} left'.format(time_left, time_total), 'cyan'))
sys.stdout.flush()
delay_blocks = 0
delay_count = 0
except OSError as exc:
out.error('OS Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except ProtocolError as exc:
out.error('Protocol Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except Exception as exc:
out.error('Exception: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except OSError as exc:
out.error('There was an error opening {}: {}'.format(file_name, exc))
del context['dl_data']
if opts.get('hard_stop') or opts.get('abort'):
os.remove(file_name)
if is_text is True and opts.get('save_html', True) is False:
os.remove(file_name)
if not content:
content = None
cur.execute('DELETE FROM active_dl WHERE url_uuid = %s', [url_uuid])
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'complete'}, opts)
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
if not opts['daemon']:
print()
time.sleep(wait)
return content, req_headers
def sizeof_fmt(num, suffix='B'):
'''
Show human-readable sizes
'''
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s " % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s " % (num, 'Yi', suffix)
def dbsave_media(cur, media_url, url_uuid, file_name, dbclient):
'''
Save a media item into the database, once it's been downloaded
cur: Database cursor
media_url: The URL of the image/video that was downloaded
url_uuid: The UUID of the parent of the media_url
file_name: The place where the media_url was downloaded to
'''
try:
cur.execute('''
INSERT INTO urls (url) | |
126 CA ALA C 8 134.694 148.314 139.861 1.00109.62 C
ATOM 127 C ALA C 8 134.235 147.958 138.451 1.00104.90 C
ATOM 128 O ALA C 8 135.061 147.658 137.588 1.00100.70 O
ATOM 129 CB ALA C 8 133.968 147.451 140.881 1.00105.20 C
ATOM 130 N LYS C 9 132.918 148.001 138.237 1.00110.41 N
ATOM 131 CA LYS C 9 132.253 147.544 137.008 1.00105.03 C
ATOM 132 C LYS C 9 132.904 146.294 136.405 1.00102.80 C
ATOM 133 O LYS C 9 133.005 146.153 135.186 1.00 99.81 O
ATOM 134 CB LYS C 9 132.189 148.673 135.963 1.00112.95 C
ATOM 135 CG LYS C 9 133.525 149.191 135.441 1.00112.95 C
ATOM 136 CD LYS C 9 133.328 150.209 134.328 1.00112.95 C
ATOM 137 CE LYS C 9 132.560 151.424 134.819 1.00112.95 C
ATOM 138 NZ LYS C 9 132.363 152.431 133.739 1.00112.95 N
ATOM 139 N LYS C 10 133.322 145.383 137.277 1.00110.13 N
ATOM 140 CA LYS C 10 134.037 144.180 136.866 1.00102.77 C
ATOM 141 C LYS C 10 133.111 143.155 136.226 1.00105.18 C
ATOM 142 O LYS C 10 131.911 143.125 136.504 1.00108.74 O
ATOM 143 CB LYS C 10 134.751 143.556 138.067 1.00111.37 C
ATOM 144 CG LYS C 10 133.823 143.193 139.216 1.00118.43 C
ATOM 145 CD LYS C 10 134.592 142.612 140.391 1.00115.34 C
ATOM 146 CE LYS C 10 133.659 142.253 141.535 1.00125.20 C
ATOM 147 NZ LYS C 10 132.907 143.438 142.032 1.00133.50 N
TER
ATOM 148 N GLU D 3 149.071 188.264 151.543 1.00111.15 N
ATOM 149 CA GLU D 3 147.633 188.359 151.765 1.00111.15 C
ATOM 150 C GLU D 3 146.945 189.074 150.607 1.00111.15 C
ATOM 151 O GLU D 3 147.575 189.848 149.885 1.00111.15 O
ATOM 152 CB GLU D 3 147.340 189.084 153.079 1.00 85.33 C
ATOM 153 N LYS D 4 145.651 188.806 150.445 1.00121.08 N
ATOM 154 CA LYS D 4 144.847 189.386 149.369 1.00121.08 C
ATOM 155 C LYS D 4 145.471 189.125 148.001 1.00121.08 C
ATOM 156 O LYS D 4 146.041 188.058 147.767 1.00121.08 O
ATOM 157 CB LYS D 4 144.659 190.890 149.587 1.00 80.08 C
ATOM 158 N ARG D 5 145.357 190.109 147.110 1.00127.58 N
ATOM 159 CA ARG D 5 145.900 190.021 145.755 1.00127.58 C
ATOM 160 C ARG D 5 145.409 188.771 145.027 1.00127.58 C
ATOM 161 O ARG D 5 146.157 188.143 144.278 1.00127.58 O
ATOM 162 CB ARG D 5 147.431 190.042 145.788 1.00 92.50 C
ATOM 163 N LEU D 6 144.148 188.421 145.256 1.00133.20 N
ATOM 164 CA LEU D 6 143.564 187.219 144.673 1.00124.80 C
ATOM 165 C LEU D 6 143.376 187.355 143.166 1.00116.65 C
ATOM 166 O LEU D 6 142.718 188.282 142.693 1.00115.20 O
ATOM 167 CB LEU D 6 142.224 186.904 145.341 1.00123.11 C
ATOM 168 N SER D 7 143.963 186.427 142.417 1.00122.44 N
ATOM 169 CA SER D 7 143.809 186.402 140.968 1.00117.76 C
ATOM 170 C SER D 7 142.553 185.629 140.584 1.00113.02 C
ATOM 171 O SER D 7 142.520 184.401 140.668 1.00 99.09 O
ATOM 172 CB SER D 7 145.040 185.778 140.305 1.00114.82 C
ATOM 173 OG SER D 7 145.223 184.439 140.728 1.00100.01 O
ATOM 174 N ALA D 8 141.521 186.352 140.164 1.00110.90 N
ATOM 175 CA ALA D 8 140.239 185.731 139.856 1.00103.90 C
ATOM 176 C ALA D 8 139.759 186.055 138.445 1.00 99.18 C
ATOM 177 O ALA D 8 139.729 185.175 137.584 1.00 94.98 O
ATOM 178 CB ALA D 8 139.193 186.157 140.876 1.00100.30 C
ATOM 179 N LYS D 9 139.393 187.321 138.229 1.00101.28 N
ATOM 180 CA LYS D 9 138.753 187.810 136.999 1.00 95.90 C
ATOM 181 C LYS D 9 137.768 186.804 136.395 1.00 93.67 C
ATOM 182 O LYS D 9 137.670 186.658 135.176 1.00 90.68 O
ATOM 183 CB LYS D 9 139.808 188.219 135.955 1.00108.30 C
ATOM 184 CG LYS D 9 140.718 187.109 135.437 1.00108.30 C
ATOM 185 CD LYS D 9 141.626 187.610 134.325 1.00108.30 C
ATOM 186 CE LYS D 9 142.544 188.716 134.815 1.00108.30 C
ATOM 187 NZ LYS D 9 143.443 189.213 133.736 1.00108.30 N
ATOM 188 N LYS D 10 137.023 186.128 137.267 1.00 96.18 N
ATOM 189 CA LYS D 10 136.102 185.076 136.857 1.00 88.82 C
ATOM 190 C LYS D 10 134.841 185.639 136.213 1.00 91.23 C
ATOM 191 O LYS D 10 134.442 186.771 136.486 1.00 94.79 O
ATOM 192 CB LYS D 10 135.729 184.206 138.058 1.00 88.01 C
ATOM 193 CG LYS D 10 135.099 184.979 139.206 1.00 95.07 C
ATOM 194 CD LYS D 10 134.786 184.070 140.383 1.00 91.98 C
ATOM 195 CE LYS D 10 134.160 184.848 141.529 1.00101.84 C
ATOM 196 NZ LYS D 10 135.058 185.930 142.023 1.00110.14 N
TER
ATOM 197 N GLU E 3 182.665 184.397 151.568 1.00101.17 N
ATOM 198 CA GLU E 3 182.312 185.794 151.790 1.00101.17 C
ATOM 199 C GLU E 3 182.779 186.669 150.633 1.00101.17 C
ATOM 200 O GLU E 3 183.711 186.309 149.911 1.00101.17 O
ATOM 201 CB GLU E 3 182.910 186.296 153.105 1.00 79.37 C
ATOM 202 N LYS E 4 182.124 187.817 150.471 1.00110.10 N
ATOM 203 CA LYS E 4 182.429 188.761 149.395 1.00110.10 C
ATOM 204 C LYS E 4 182.375 188.087 148.027 1.00110.10 C
ATOM 205 O LYS E 4 181.536 187.216 147.793 1.00110.10 O
ATOM 206 CB LYS E 4 183.802 189.404 149.613 1.00 77.29 C
ATOM 207 N ARG E 5 183.275 188.499 147.136 1.00120.36 N
ATOM 208 CA ARG E 5 183.360 187.955 145.781 1.00120.36 C
ATOM 209 C ARG E 5 182.020 188.037 145.053 1.00120.36 C
ATOM 210 O ARG E 5 181.654 187.131 144.304 1.00120.36 O
ATOM 211 CB ARG E 5 183.852 186.505 145.814 1.00 88.94 C
ATOM 212 N LEU E 6 181.298 189.128 145.281 1.00129.21 N
ATOM 213 CA LEU E 6 179.976 189.314 144.698 1.00120.81 C
ATOM 214 C LEU E 6 180.049 189.534 143.190 1.00112.66 C
ATOM 215 O LEU E 6 180.727 190.446 142.718 1.00111.21 O
ATOM 216 CB LEU E 6 179.263 190.492 145.364 1.00125.07 C
ATOM 217 N SER E 7 179.346 188.691 142.442 1.00122.90 N
ATOM 218 CA SER E 7 179.276 188.827 140.992 1.00118.22 C
ATOM 219 C SER E 7 178.153 189.782 140.606 1.00113.48 C
ATOM 220 O SER E 7 176.975 189.433 140.684 1.00 99.55 O
ATOM 221 CB SER E 7 179.064 187.463 140.330 1.00118.74 C
ATOM 222 OG SER E 7 177.846 186.876 140.753 1.00103.93 O
ATOM 223 N ALA E 8 178.522 190.989 140.191 1.00119.02 N
ATOM 224 CA ALA E 8 177.536 192.016 139.881 1.00112.02 C
ATOM 225 C ALA E 8 177.696 192.572 138.470 1.00107.30 C
ATOM 226 O ALA E 8 176.851 192.329 137.609 1.00103.10 O
ATOM 227 CB ALA E 8 177.617 193.143 140.901 1.00107.03 C
ATOM 228 N LYS E 9 178.787 193.311 138.255 1.00112.72 N
ATOM 229 CA LYS E 9 179.055 194.071 137.025 1.00107.34 C
ATOM 230 C LYS E 9 177.794 194.698 136.420 1.00105.11 C
ATOM 231 O LYS E 9 177.627 194.746 135.201 1.00102.12 O
ATOM 232 CB LYS E 9 179.771 193.193 135.981 1.00119.28 C
ATOM 233 CG LYS E 9 178.996 191.986 135.462 1.00119.28 C
ATOM 234 CD LYS E 9 179.753 191.277 134.350 1.00119.28 C
ATOM 235 CE LYS E 9 181.089 190.746 134.841 1.00119.28 C
ATOM 236 NZ LYS E 9 181.840 190.046 133.762 1.00119.28 N
ATOM 237 N LYS E 10 176.921 195.194 137.291 1.00111.63 N
ATOM 238 CA LYS E 10 175.636 195.745 136.880 1.00104.27 C
ATOM 239 C LYS E 10 175.782 197.120 136.237 1.00106.68 C
ATOM 240 O LYS E 10 176.736 197.848 136.513 1.00110.24 O
ATOM 241 CB LYS E 10 174.691 195.831 138.080 1.00108.19 C
ATOM 242 CG LYS E 10 175.230 196.669 139.229 1.00115.25 C
ATOM 243 CD LYS E 10 174.268 196.685 140.406 1.00112.16 C
ATOM 244 CE LYS E 10 174.812 197.522 141.551 1.00122.02 C
ATOM 245 NZ LYS E 10 176.119 197.005 142.046 1.00130.32 N
TER
HETATM 246 MG MG A 401 195.263 159.228 130.323 1.00 89.18 MG
HETATM 247 MG MG A 402 190.491 155.655 128.902 1.00 58.96 MG
TER
HETATM 248 MG MG B 401 169.312 131.516 130.255 1.00 89.18 MG
HETATM 249 MG MG B 402 164.437 134.951 128.844 1.00 58.96 MG
TER
HETATM 250 MG MG C 401 134.961 147.586 130.281 1.00 89.18 MG
HETATM 251 MG MG C | |
hour_dict[key] = dat0
elif j == 1:
hour_dict[key] = dat1
elif j == 2:
hour_dict[key] = dat2
elif j == 3:
hour_dict[key] = dat3
elif j == 4:
hour_dict[key] = dat4
elif j == 5:
hour_dict[key] = dat5
elif j == 6:
hour_dict[key] = dat6
elif j == 7:
hour_dict[key] = dat7
j += 1
mos_dict[apid] = hour_dict #marry the hour_dict to the proper key in mos_dict
##########################
# Start of executed code #
##########################
while True:
logger.info('Start of metar-display-v4.py executed code main loop')
#Time calculations, dependent on 'hour_to_display' offset. this determines how far in the future the TAF data should be.
#This time is recalculated everytime the FAA data gets updated
zulu = datetime.utcnow() + timedelta(hours=hour_to_display) #Get current time plus Offset
current_zulu = zulu.strftime('%Y-%m-%dT%H:%M:%SZ') #Format time to match whats reported in TAF
current_hr_zulu = zulu.strftime('%H') #Zulu time formated for just the hour, to compare to MOS data
logger.debug('datetime - ' + str(datetime.utcnow()))
logger.debug('zulu - ' + str(zulu))
logger.debug('hour_to_display - ' + str(hour_to_display))
logger.debug('current_zulu - ' + str(current_zulu))
#Get current date and time
now = datetime.now()
dt_string = now.strftime("%I:%M%p") #12:00PM format
#Dictionary definitions. Need to reset whenever new weather is received
stationiddict = {} #hold the airport identifiers
windsdict = {} #holds the wind speeds by identifier
wnddirdict = {} #holds the wind direction by identifier
wxstringdict = {} #holds the weather conditions by identifier
wndgustdict = {} #hold wind gust by identifier - Mez
#read airports file - read each time weather is updated in case a change to "airports" file was made while script was running.
try:
with open("/NeoSectional/airports") as f:
airports = f.readlines()
except IOError as error:
logger.error('Airports file could not be loaded.')
logger.error(error)
break
airports = [x.strip() for x in airports]
logger.info("Airports File Loaded")
#read hmdata file and display the top 10 airports on the OLEDs
try:
with open("/NeoSectional/hmdata") as f:
hmdata = f.readlines()
except IOError as error:
logger.error('Heat Map file could not be loaded.')
logger.error(error)
break
hmdata = [x.strip() for x in hmdata]
logger.info("Heat Map File Loaded")
for line in hmdata:
hmap, numland = line.split()
hmdata_dict[hmap] = int(numland)
hmdata_sorted = sorted(hmdata_dict.items(), key=lambda x:x[1], reverse=True)
hmdata_sorted.insert(0, 'Top AP\nLandings')
print(hmdata_sorted)
#depending on what data is to be displayed, either use an URL for METARs and TAFs or read file from drive (pass).
if metar_taf_mos == 1: #Check to see if the script should display TAF data (0) or METAR data (1)
#Define URL to get weather METARS. If no METAR reported withing the last 2.5 hours, Airport LED will be white (nowx).
url = "https://www.aviationweather.gov/adds/dataserver_current/httpparam?dataSource=metars&requestType=retrieve&format=xml&mostRecentForEachStation=constraint&hoursBeforeNow="+str(metar_age)+"&stationString="
logger.info("METAR Data Loading")
elif metar_taf_mos == 0: #TAF data
#Define URL to get weather URL for TAF. If no TAF reported for an airport, the Airport LED will be white (nowx).
url = "https://www.aviationweather.gov/adds/dataserver_current/httpparam?dataSource=tafs&requestType=retrieve&format=xml&mostRecentForEachStation=constraint&hoursBeforeNow="+str(metar_age)+"&stationString="
logger.info("TAF Data Loading")
elif metar_taf_mos == 2: #MOS data. This is not accessible in the same way as METARs and TAF's.
pass #This elif is not strictly needed and is only here for clarity
logger.info("MOS Data Loading")
elif metar_taf_mos == 3: #Heat Map data.
pass #This elif is not strictly needed and is only here for clarity
logger.info("Heat Map Data Loading")
#Build URL to submit to FAA with the proper airports from the airports file
if metar_taf_mos != 2 and metar_taf_mos != 3:
for airportcode in airports:
if airportcode == "NULL" or airportcode == "LGND":
continue
url = url + airportcode + ","
url = url[:-1] #strip trailing comma from string
logger.debug(url)
while True: #check internet availability and retry if necessary. Power outage, map may boot quicker than router.
try:
content = urllib.request.urlopen(url)
logger.info('Internet Available')
logger.info(url)
break
except:
logger.warning('FAA Data is Not Available')
logger.info(url)
time.sleep(delay_time)
pass
root = ET.fromstring(content.read()) #Process XML data returned from FAA
#MOS decode routine
#MOS data is downloaded daily from; https://www.weather.gov/mdl/mos_gfsmos_mav to the local drive by crontab scheduling.
#Then this routine reads through the entire file looking for those airports that are in the airports file. If airport is
#found, the data needed to display the weather for the next 24 hours is captured into mos_dict, which is nested with
#hour_dict, which holds the airport's MOS data by 3 hour chunks. See; https://www.weather.gov/mdl/mos_gfsmos_mavcard for
#a breakdown of what the MOS data looks like and what each line represents.
if metar_taf_mos == 2:
#Read current MOS text file
try:
file = open(mos_filepath, 'r')
lines = file.readlines()
except IOError as error:
logger.error('MOS data file could not be loaded.')
logger.error(error)
break
for line in lines: #read the MOS data file line by line0
line = str(line)
#Ignore blank lines of MOS airport
if line.startswith(' '):
ap_flag = 0
continue
#Check for and grab date of MOS
if 'DT /' in line:
unused, dt_cat, month, unused, unused, day, unused = line.split(" ",6)
continue
#Check for and grab the Airport ID of the current MOS
if 'MOS' in line:
unused, apid, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, updt1, updt2, v13 = line.split(" ", 14)
mos_updt_time = updt1 + ' ' + updt2 #Grab the MOS report's update timestamp
dt_string = mos_updt_time
#If this Airport ID is in the airports file then grab all the info needed from this MOS
if apid in airports:
ap_flag = 1
cat_counter = 0 #used to determine if a category is being reported in MOS or not. If not, need to inject i$
dat0, dat1, dat2, dat3, dat4, dat5, dat6, dat7 = ([] for i in range(8)) #Clear lists
continue
#If we just found an airport that is in our airports file, then grab the appropriate weather data from it's MOS
if ap_flag:
xtra, cat, value = line.split(" ",2) #capture the category the line read represents
#Check if the needed categories are being read and if so, grab its data
if cat in categories:
cat_counter += 1 #used to check if a category is not in mos report for airport
if cat == 'HR': #hour designation
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip())) #grab all the hours from line read
for j in range(8):
tmp = temp[j].strip()
hour_dict[tmp] = '' #create hour dictionary based on mos data
keys = list(hour_dict.keys()) #Get the hours which are the keys in this dict, so they can be prope$
else:
#Checking for missing lines of data and x out if necessary.
if (cat_counter == 5 and cat != 'P06')\
or (cat_counter == 6 and cat != 'T06')\
or (cat_counter == 7 and cat != 'POZ')\
or (cat_counter == 8 and cat != 'POS')\
or (cat_counter == 9 and cat != 'TYP'):
#calculate the number of consecutive missing cats and inject 9's into those positions
a = categories.index(last_cat)+1
b = categories.index(cat)+1
c = b - a - 1
logger.debug(apid,last_cat,cat,a,b,c)
for j in range(c):
temp = ['9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9']
set_data()
cat_counter += 1
#Now write the orignal cat data read from the line in the mos file
cat_counter += 1
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
last_cat = cat
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip())) #add the actual line of data read
set_data()
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
else:
#continue to decode the next category data that was read.
last_cat = cat #store what the last read cat was.
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip()))
set_data()
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
#Now grab the data needed to display on map. Key: [airport][hr][j] - using nested dictionaries
# airport = from airport file, 4 character ID. hr = 1 of 8 three-hour periods of time, 00 03 06 09 12 15 18 21
# j = index to weather categories, in this order; 'CLD','WDR','WSP','P06', 'T06', 'POZ', 'POS', 'TYP','CIG','VIS','OBV'.
# See; https://www.weather.gov/mdl/mos_gfsmos_mavcard for description of available data.
for airport in airports:
if airport in mos_dict:
logger.debug('\n' + airport)
logger.debug(categories)
mos_time = int(current_hr_zulu) + | |
'''
The MIT License (MIT)
(c) <NAME> 2014 (<EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import glob
import sys
import os
import os.path
import meanie3D.app.utils
import meanie3D.app.external
from subprocess import call
# make sure external commands are available
meanie3D.app.external.locateCommands(['convert', 'composite','python'])
ret_code, paths_string = meanie3D.app.external.execute_command('python','-c "import sys; print sys.path"', True)
if ret_code == 0:
print "Attempting to locate python module netCDF4"
result = meanie3D.app.utils.find_in_paths(["/usr/lib","/usr/local/lib"],"netCDF4","site-packages")
if not result:
result = meanie3D.app.utils.find_in_paths(["/usr/lib","/usr/local/lib"],"netCDF4","dist-packages")
if not result:
print "Failed to locate python module netCDF4"
exit(-1)
else:
print "Found netCDF4 at %s" % result
sys.path.append(os.path.split(result)[0]);
print "Python path after adding system search directories:"
print(sys.path)
else:
print "Failed to obtain system's python path"
exit(-1)
import netCDF4
import visit
# ---------------------------------------------------------
#
# Global configuration handling
#
# ---------------------------------------------------------
# Global variable to help making sure the global visit configuration
# is only ran twice to avoid adding multiple color tables etc.
did_global_config_execute = False
def run_global_visit_configuration(configuration):
'''
Creates any configuration items regarded as global,
such as creating color tables and named views.
:param configuration:
:return:
'''
# Only run this once
global did_global_config_execute
if did_global_config_execute:
return
# Color tables
if getValueForKeyPath(configuration, 'postprocessing.visit.colorTables'):
createColorTables(configuration, 'postprocessing.visit.colorTables')
saveWindowAttributes = getValueForKeyPath(configuration, 'postprocessing.visit.saveWindowAttributes');
if saveWindowAttributes:
s = visit.GetSaveWindowAttributes()
updateVisitObjectFromDictionary(s, saveWindowAttributes)
visit.SetSaveWindowAttributes(s)
# Level or verbosity
visit.SuppressMessages(2)
visit.SuppressQueryOutputOn()
did_global_config_execute = True
return
# ---------------------------------------------------------
#
# Plotting functions, helpers
#
# ---------------------------------------------------------
def addPseudocolorPlot(databaseFile, configuration, time_index=-1):
'''
Adds a pseudocolor plot
:param databaseFile:
:param configuration:
{
"variable":"X",
"PseudocolorAttributes": {
see visit.PseudocolorAttributes()
},
"ThresholdAttributes" : {
see visit.ThresholdAttributes()
}
}
:param time_index:
:return:
'''
variable = getValueForKeyPath(configuration, 'variable')
attributes = getValueForKeyPath(configuration, 'PseudocolorAttributes')
if variable and attributes:
# Open data file
visit.OpenDatabase(databaseFile)
# Add the plot
visit.AddPlot("Pseudocolor", variable)
# Set time slider if necessary
if time_index >= 0:
visit.SetTimeSliderState(time_index)
p = visit.PseudocolorAttributes()
updateVisitObjectFromDictionary(p, attributes)
visit.SetPlotOptions(p)
# Threshold?
threshold = getValueForKeyPath(configuration, "ThresholdAttributes")
if threshold:
visit.AddOperator("Threshold")
t = visit.ThresholdAttributes()
updateVisitObjectFromDictionary(t, threshold)
visit.SetOperatorOptions(t)
return
def addContourPlot(databaseFile, configuration):
'''
Adds a contour plot.
:param databaseFile:
:param configuration:
{
"variable":"X",
"ContourAttributes": {
see visit.ContourAttributes()
},
"ThresholdAttributes" : {
see visit.ThresholdAttributes()
}
}
:return:
'''
variable = getValueForKeyPath(configuration, 'variable')
attributes = getValueForKeyPath(configuration, 'ContourAttributes')
if variable and attributes:
# Open data file
visit.OpenDatabase(databaseFile)
# Add the plot
visit.AddPlot("Contour", variable)
p = visit.ContourAttributes()
updateVisitObjectFromDictionary(p, attributes)
visit.SetPlotOptions(p)
# Threshold?
threshold = getValueForKeyPath(configuration, "ThresholdAttributes")
if threshold:
visit.AddOperator("Threshold")
t = visit.ThresholdAttributes()
updateVisitObjectFromDictionary(t, threshold)
visit.SetOperatorOptions(t)
return
def addVectorPlot(databaseFile, configuration):
'''
Adds a vector plot.
:param databaseFile:
:param configuration:
{
"variable":"X",
"VectorAttributes": {
see visit.VectorAttributes()
}
}
:return:
'''
variable = getValueForKeyPath(configuration, 'variable')
attributes = getValueForKeyPath(configuration, 'VectorAttributes')
if variable and attributes:
visit.OpenDatabase(databaseFile)
visit.AddPlot("Vector", variable)
p = visit.VectorAttributes()
updateVisitObjectFromDictionary(p, attributes)
visit.SetPlotOptions(p)
return
def addLabelPlot(file, configuration):
'''
Adds a label plot.
:param file:
:param configuration:
{
"variable":"X",
"LabelAttributes": {
see visit.LabelAttributes()
}
}
:return:
'''
variable = getValueForKeyPath(configuration, 'variable')
attributes = getValueForKeyPath(configuration, 'LabelAttributes')
if variable and attributes:
visit.OpenDatabase(file)
visit.AddPlot("Label", variable)
a = visit.LabelAttributes()
updateVisitObjectFromDictionary(a, attributes)
visit.SetActivePlots(visit.GetNumPlots() - 1)
visit.SetPlotOptions(a)
return
def addTextAnnotation(x, y, message):
'''
Add a 2D text annotation of height 2%
:param x:
:param y:
:param message:
:return:
'''
try:
textAnnotation = visit.GetAnnotationObject("Text2D1")
except visit.VisItException:
textAnnotation = visit.CreateAnnotationObject("Text2D")
if textAnnotation:
textAnnotation.text = message;
textAnnotation.position = (x, y)
textAnnotation.height = 0.02
return
def setViewFromDict(viewConfig):
'''
Sets the view up with the given perspective object from the configuration.
:param viewConfig: you can use all keys found in GetView2D() or GetView3D()
:return:
'''
if viewConfig:
if 'windowCoords' in viewConfig or 'viewportCoords' in viewConfig:
view = visit.GetView2D()
updateVisitObjectFromDictionary(view, viewConfig)
visit.SetView2D(view)
else:
view = visit.GetView3D()
updateVisitObjectFromDictionary(view, viewConfig)
visit.SetView3D(view)
return
def setView(configuration, path):
'''
Sets the view up with the given perspective object from the configuration.
:param configuration: you can use all keys found in GetView2D() or GetView3D()
:param path:
:return:
'''
viewConfig = meanie3D.app.utils.getValueForKeyPath(configuration, path)
setViewFromDict(viewConfig)
def addPseudocolorPlots(databaseFile, configuration, path, time_index = -1):
'''
Plots mapdata according to the configuration given. Note
that $ variables will be replaced with the value found in
the environment.
:param databaseFile:
:param configuration:
:param path:
:param time_index:
:return:
'''
plots = getValueForKeyPath(configuration, path)
if plots:
visit.OpenDatabase(databaseFile)
for plot in plots:
addPseudocolorPlot(databaseFile, plot, time_index)
return
def setAnnotations(configuration, path):
'''
Set annotation attributes from defaults and configuration.
:param configuration: see visit.GetAnnotationAttributes()
:param path:
:return:
'''
ca = meanie3D.app.utils.getValueForKeyPath(configuration, path)
if ca:
a = visit.GetAnnotationAttributes()
updateVisitObjectFromDictionary(a, ca)
visit.SetAnnotationAttributes(a)
return
def createColorTables(configuration, path):
'''
Creates a list of colortables from the configuration.
:param configuration:
:param path:
:return:
'''
colorTables = getValueForKeyPath(configuration, path)
if colorTables:
for colorTable in colorTables:
colors = colorTable['colors']
positions = colorTable['positions']
ccpl = visit.ColorControlPointList()
ccpl.categoryName = "meanie3D"
for i in range(0, len(positions)):
controlPoint = visit.ColorControlPoint()
controlPoint.colors = tuple(colors[i])
controlPoint.position = positions[i]
ccpl.AddControlPoints(controlPoint)
name = colorTable['name']
visit.AddColorTable(name, ccpl)
def close_pattern(pattern):
'''
Closes the databases of all files matching the given pattern.
:param pattern:
:return:
'''
list = glob.glob(pattern)
for file in list:
visit.CloseDatabase(file)
return
def plotMapdata(configuration, path):
'''
Plots mapdata according to the configuration given. Note
that $ variables will be replaced with the value found in
the environment.
:param configuration:
:param path:
:return:
'''
mapConfig = getValueForKeyPath(configuration, path)
if mapConfig:
# Find the map data file
mapFile = os.path.expandvars(mapConfig['mapDataFile'])
# Check if data file exists
if os.path.exists(mapFile):
addPseudocolorPlots(mapFile, configuration, path + ".plots")
else:
print "ERROR:could not find map file at " + mapFile
return
# ---------------------------------------------------------
#
# File utilities
#
# ---------------------------------------------------------
def path(filename):
'''
Extracts the path part from the given filename.
:param filename:
:return:
'''
path = os.path.dirname(filename)
return path
def naked_name(filename):
'''
Extracts the filename WITHOUT extension from the the given filename.
:param filename:
:return:
'''
stripped = os.path.splitext(filename)[0]
return stripped
# ---------------------------------------------------------
#
# Handling .png exports / images
#
# ---------------------------------------------------------
def saveImagesForViews(views, basename):
'''
Iterates over the given view objects, sets each in turn and
saves an image. If there is only one view, the resulting image
filenames are 'basename_<number>.png'. If there are more than
one view, the resulting image filenames look like 'p<viewNum>_basename_<number>.png'
:param views:
:param basename:
:return:
'''
if views:
for i in range(0, len(views)):
view = views[i]
setViewFromDict(view)
if len(views) > 1:
filename = "p%d_%s" % (i, basename)
else:
filename = basename
saveImage(filename, 1)
else:
saveImage(basename, 1)
return
def saveImage(basename, progressive):
'''
Saves the current window to an image file.
:param basename:
:param progressive:
:return:
'''
s = visit.GetSaveWindowAttributes()
s.progressive = progressive
s.fileName = basename + "_"
s.outputToCurrentDirectory = 0
s.outputDirectory = os.getcwd()
visit.SetSaveWindowAttributes(s)
visit.SaveWindow()
return
def createMovieForViews(views, basename, format):
'''
Creates movies for the formats given from the images based on
the basename given. Depending on the views, the filenames used
start with 'p<viewNum>_basename_' or
:param views:
:param basename:
:param format:
:return:
'''
if views:
for i in range(0, len(views)):
if len(views) > 1:
movie_fn = "p%d_%s" % (i, basename)
else:
movie_fn = basename
image_fn = movie_fn + "_"
create_movie(image_fn, movie_fn + os.path.extsep + format)
else:
image_fn = basename + "_"
create_movie(image_fn, basename + os.path.extsep + format)
def createMoviesForViews(views, basename, formats):
'''
:param views:
:param basename:
:param formats: ("gif","m4v" etc.)
:return:
'''
if formats:
for k in range(0, len(formats)):
createMovieForViews(views, basename, formats[k])
def delete_images(views, basename, image_count):
'''
Checks if the image file(s) with the given number exists and deletes them.
:param views:
:param basename:
:param image_count:
:return:
'''
number_postfix = str(image_count).rjust(4, '0') + ".png";
result = False
if len(views) > 1:
for i in range(0, len(views)):
fn = "p" + str(i) + "_" + basename + "_" + number_postfix
if (os.path.exists(fn)):
os.remove(fn)
else:
fn = | |
<filename>fairseq/data/audio/word_aligned_audio_dataset.py<gh_stars>0
from fairseq.data import FairseqDataset
# from .. import FairseqDataset
import logging
import numpy as np
import os
import pyarrow
import re
import torch
import random
from collections import defaultdict
from tqdm import tqdm
logger = logging.getLogger(__name__)
def verify_size(reps):
# check dimensions and change them if necessary
if reps.dim() == 3:
# we assume (batch, timesteps, hidden_size)
reps = reps.squeeze(0) # squeeze away batch dimension
assert reps.dim() == 2
elif reps.dim() == 2:
pass # all is good!
else:
raise ValueError("speech representations have an incorrect number of dimensions")
return reps
def get_timesteps_from_filename(filename):
# grep out length
matches = re.findall(r'len(\d+)', filename)
assert len(matches) == 1
return matches[0]
def random_sampling(a_list, num_samples):
if num_samples >= len(a_list):
return a_list
else:
return random.sample(a_list, num_samples)
def zeropad_to_len(t, targ_len):
len_diff = targ_len - t.size(0)
if t.dim() == 1:
zero_padded_t = torch.cat([t, t.new_zeros(len_diff)])
elif t.dim() == 2:
zero_padded_t = torch.cat([t, t.new_zeros(len_diff, t.size(1))])
else:
raise ValueError
return zero_padded_t, len_diff
class WordAlignedAudioDataset(FairseqDataset):
"""
A dataset that maps between word-tokens in a corpus to their speech representations.
Speech aligned at the word-level can be represented as any two dimensional matrix of shape (timesteps, dimensions).
E.g.:
- mel-spectrograms (timesteps, number of mel bins)
- wav2vec2.0 representations (timesteps, hidden size of wav2vec2.0 representations)
The dataset is structured as a key-value mapping:
- key: An index from 0 to the total number of tokens in a corpus.
Subsequently the index uniquely identifies any token in the corpus
- value: A dictionary associated with that token. That contains:
- path to a token's speech representations
- the token's graphemes
This dataset also contains a mapping between a word in its graphemic form and its examples in the corpus.
This is used to speed up the retrieval of positive and negative examples for triplet loss/contrastive training.
The data_path is the path to the speech corpus rearranged and cut at the word-level,
it should have the following structure (please refer to fairseq/examples/lexicon_learner/wordalign_speechreps.py):
- data_path
- word1
- word1_LJ010-0292_001.pt
- word1_LJ010-0292_002.pt
- ...
- word2
- word2_LJ001-0012_001.pt
- word2_LJ002-0024_001.pt
- ...
- ...
- word1, word2, ... subfolders refer to a particular wordtype in the corpus.
- .pt files contain speech representations that map to a particular example of a wordtype.
It is named as:
<wordtype>_<utt id>_occ<numbered occurrence in the utterance>_len<num of timesteps in sequence>.pt
Training set:
Validation set:
Seen:
Unseen:
"""
def __init__(
self,
data_path,
split,
save_dir,
cache_all_data=True, # warning doing so with large datasets could lead to OOM!
max_train_wordtypes=None, # leave as None to use as many wordtypes as possible for training
max_train_examples_per_wordtype=None, # leave as None to use all examples for each wordtype
min_train_examples_per_wordtype=2,
valid_seen_wordtypes=100, # how many wordtypes seen during training to include in validation
valid_unseen_wordtypes=100, # how many wordtypes to leave out of training and include in validation
valid_examples_per_wordtype=25, # for valid-seen and valid-unseen
randomise_wordtypes=True,
random_seed=1337,
wordtypes_to_ignore=('SIL', '<unk>'),
debug_only_include_words_beginning_with=None,
padding_index_offset=1,
):
super().__init__()
logger.info(f"Creating dataset...")
# valid-seen is by definition a subset of the training dataset
if max_train_wordtypes is not None and max_train_wordtypes < valid_seen_wordtypes:
raise ValueError(f"max_train_wordtypes ({max_train_wordtypes}) < valid_seen_wordtypes ({valid_seen_wordtypes})")
# need at least 2 examples for training and 2 for validation (2+2=4)
# so that we can pull at least 1 positive example during training and validation for a wordtype
assert min_train_examples_per_wordtype >= 2
assert valid_examples_per_wordtype >= 2
min_examples_per_wordtype = min_train_examples_per_wordtype + valid_examples_per_wordtype
# if max_train_examples_per_wordtype is not None:
# assert max_train_examples_per_wordtype >= min_examples_per_wordtype, f"At least {min_examples_per_wordtype} examples needed to draw a positive example for a given anchor during either training or validation. (max_train_examples_per_wordtype={max_train_examples_per_wordtype})"
# check data split
if split == "test":
raise NotImplementedError
if split not in ["train", "valid-seen", "valid-unseen"]:
raise ValueError(f"'{split}' not a correct dataset split.")
################################################################################################################
### Open main data folder and load word-aligned speech reps for all words in the vocab
self.examples = [] # each example is a filepath to some reps, or the representations themselves
self.sizes = []
all_indices = []
self.cache_all_data = cache_all_data
self.padding_index_offset = padding_index_offset
# create a mapping between a wordtype and a list of positive examples of that wordtype
# this data structure is used to quickly find positive and negative examples for a particular word token
self.wordtype2indices = defaultdict(set)
# load all subfolders (each of which correspond to a unique wordtype)
all_subfolders = sorted(os.listdir(data_path))
# (for debugging) optionally only include wordtypes that start with a certain letter to speed up debugging
if debug_only_include_words_beginning_with is not None:
all_subfolders = [w for w in all_subfolders if w.startswith(debug_only_include_words_beginning_with)]
# optionally randomise the order so its not alphabetical
if randomise_wordtypes:
random.seed(random_seed)
random.shuffle(all_subfolders)
# skip wordtypes we wish to ignore
for w in wordtypes_to_ignore:
if w in all_subfolders:
all_subfolders.remove(w)
# skip any wordtypes from consideration if they do not have enough examples
skipped_wordtypes = []
logger.info(f"Skipping wordtypes that do not have enough examples...")
for wordtype in tqdm(all_subfolders, unit='wordtype'):
all_wordtoken_files = os.listdir(os.path.join(data_path, wordtype))
if len(all_wordtoken_files) < min_examples_per_wordtype:
skipped_wordtypes.append(wordtype)
for w in skipped_wordtypes:
all_subfolders.remove(w)
logger.info(f"Did not include {len(skipped_wordtypes)} wordtypes because they have fewer than {min_examples_per_wordtype} examples.")
# calculate start and end wordtype indices depending on the dataset split/split subset
if split == "train":
start_wordtype_idx = 0
if max_train_wordtypes is None:
end_wordtype_idx = len(all_subfolders) - valid_unseen_wordtypes
else:
if len(all_subfolders) >= max_train_wordtypes + valid_unseen_wordtypes:
end_wordtype_idx = max_train_wordtypes
else:
end_wordtype_idx = len(all_subfolders) - valid_unseen_wordtypes
elif split == "valid-seen":
start_wordtype_idx = 0
end_wordtype_idx = valid_seen_wordtypes
elif split == "valid-unseen":
start_wordtype_idx = -valid_unseen_wordtypes
end_wordtype_idx = None
else:
raise ValueError(f"'{split}' not a correct dataset split or dataset split subset.")
wordtype_to_incl_idx = 0
for wordtype in tqdm(all_subfolders[start_wordtype_idx:end_wordtype_idx], unit='wordtype'):
all_wordtoken_files = os.listdir(os.path.join(data_path, wordtype))
all_wordtoken_files = sorted(all_wordtoken_files) # ensure consistent ordering
# calculate start and end wordtoken indices depending on the dataset split/split subset
if split in ["train"]:
start_wordtoken_idx = 0
if max_train_examples_per_wordtype is None:
end_wordtoken_idx = len(all_wordtoken_files) - valid_examples_per_wordtype
else:
if len(all_wordtoken_files) >= max_train_examples_per_wordtype + valid_examples_per_wordtype:
end_wordtoken_idx = max_train_examples_per_wordtype
else:
end_wordtoken_idx = len(all_wordtoken_files) - valid_examples_per_wordtype
elif split in ["valid-seen", "valid-unseen"]:
start_wordtoken_idx = -valid_examples_per_wordtype
end_wordtoken_idx = None
else:
raise ValueError(f"'{split}' not a correct dataset split or dataset split subset.")
for wordtoken_file in all_wordtoken_files[start_wordtoken_idx:end_wordtoken_idx]:
filepath = os.path.join(data_path, wordtype, wordtoken_file)
# assign data associated with this word token / index
self.sizes.append(int(get_timesteps_from_filename(wordtoken_file)))
self.examples.append(filepath)
self.wordtype2indices[wordtype].add(wordtype_to_incl_idx)
all_indices.append(wordtype_to_incl_idx)
wordtype_to_incl_idx += 1
# cache all data in order to avoid accessing disk (locally or network) during training
# (more feasible when loading smaller data formats such as hubert codes vs full wav2vec2.0 vectors)
if self.cache_all_data:
self.cached_data = []
logger.info(f"Caching {len(self.examples)} examples.")
for fp in tqdm(self.examples):
self.cached_data.append(torch.load(fp).int() + self.padding_index_offset)
# Sanity checks
assert all_indices == list(range(len(self.examples)))
assert_msg = f"len(self.sizes)=={len(self.sizes)}, len(self.fnames)=={len(self.examples)}, sum(len(v) for v in " \
f"self.wordtype2indices.values())=={sum(len(v) for v in self.wordtype2indices.values())}, idx + 1=={wordtype_to_incl_idx} "
assert len(self.sizes) == len(self.examples) == sum(
len(v) for v in self.wordtype2indices.values()) == wordtype_to_incl_idx, assert_msg
# Assign object params
self.sizes = np.array(self.sizes, dtype=np.int64)
self.all_indices = set(all_indices)
# self.examples = pyarrow.array(self.examples) # uncomment to increase performance using pyarrow
# Print/save important information and stats about this dataset
logger.info(f"Finished creating word-aligned speech representations {split} dataset containing {len(self.wordtype2indices)} wordtypes "
f"and {len(self.examples)} word tokens in total.")
if split in ["valid-seen", "valid-unseen"]:
logger.info(f"{split} wordtypes are: {' '.join(self.wordtype2indices.keys())}")
self.save_wordtypes_to_disk(os.path.join(save_dir, f'{split}_{len(self.wordtype2indices.keys())}_wordtypes.csv'))
def __getitem__(self, anchor_index):
positive_index = list(self.get_positive_indices(anchor_index, num_examples=1))[0]
negative_index = list(self.get_negative_indices(anchor_index, num_examples=1))[0]
# load inputs
if self.cache_all_data:
anchor_in = self.cached_data[anchor_index]
positive_in = self.cached_data[positive_index]
negative_in = self.cached_data[negative_index]
else:
anchor_in = torch.load(self.examples[anchor_index]).int() + self.padding_index_offset # TODO warning this will not work with continuous reps!
positive_in = torch.load(self.examples[positive_index]).int() + self.padding_index_offset
negative_in = torch.load(self.examples[negative_index]).int() + self.padding_index_offset
# create tensors for indicating where we want to output targets
# e.g. 1 in timesteps where there is a grapheme and 0 where we do not have a grapheme
# (padding will be performed later by collater)
anchor_tgt = torch.ones(self.get_tgt_len(anchor_index, units="graphemes"), dtype=torch.int)
positive_tgt = torch.ones(self.get_tgt_len(positive_index, units="graphemes"), dtype=torch.int)
negative_tgt = torch.ones(self.get_tgt_len(negative_index, units="graphemes"), dtype=torch.int)
# print("debug", self.index2wordtype(anchor_index), anchor_tgt)
return {
"anchor_index": anchor_index,
"positive_index": positive_index,
"negative_index": negative_index,
"anchor_in": anchor_in, # e.g. speech reps of the anchor word
"positive_in": positive_in,
"negative_in": negative_in,
"anchor_tgt": anchor_tgt,
"positive_tgt": positive_tgt,
"negative_tgt": negative_tgt,
}
def get_tgt_len(self, wordtoken_index, units="graphemes"):
"""
return the length of some metadata related to the wordtype associated | |
'''
test_modified_modules_backend.py
Contains test cases related to the backend functions of modified modules.
'''
from nose.tools import assert_equal, assert_not_equal, assert_true, assert_false
from components import model
from components import helper
from components.handlers.modified_modules import Modified
from components.handlers.module_edit import EditModuleInfo
from components.handlers.module_restore import RestoreModule
class TestCode(object):
'''
This class runs the test cases related to modified modules.
'''
def __init__(self):
self.current_ay = model.get_current_ay()
self.next_ay = self.get_next_ay(self.current_ay)
self.next_next_ay = self.get_next_ay(self.next_ay)
self.modified_modules_handler = Modified()
self.module_edit_handler = EditModuleInfo()
self.module_restore_handler = RestoreModule()
def get_next_ay(self, ay):
'''
Return the AY that comes after the given AY
'''
ay = ay.split(' ')[1].split('/')
return 'AY ' + str(int(ay[0])+1) + '/' + str(int(ay[1])+1)
def setUp(self):
'''
Add dummy modules and mountings into database
'''
# Dummy modules
model.add_module('BB1001', 'Dummy Module 1',
"This module's quota is NOT modified", 1, 'Active')
model.add_module('BB1002', 'Dummy Module 2',
"This module's quota for sem 1 is modified", 2, 'Active')
model.add_module('BB1003', 'Dummy Module 3',
"This module's quota for sem 2 is modified", 3, 'Active')
model.add_module('BB1004', 'Dummy Module 4',
"This module's quota for sem 1 has become specified", 4, 'Active')
model.add_module('BB1005', 'Dummy Module 5',
"This module's quota for sem 2 has become unspecified", 5, 'Active')
model.add_module('BB1006', 'Dummy Module 6',
"This module's quota for both sem 1 & 2 have been modified",
6, 'Active')
model.add_module('BB2001', 'Dummy Module 1',
"This module is unmounted from sem 1", 1, 'Active')
model.add_module('BB2002', 'Dummy Module 2',
"This module is remounted in sem 2", 2, 'Active')
model.add_module('BB2003', 'Dummy Module 3',
"This module is changed from mounted in sem 1 to sem 2", 3, 'Active')
model.add_module('BB2004', 'Dummy Module 4',
"This module's mounting is modified but quota is not modified",
4, 'Active')
model.add_module('BB3001', 'Dummy Module 1',
"This module's quota is modified and will be restored", 1, 'Active')
model.add_module('BB3002', 'Dummy Module 2',
"This module's quota has been specified and will be restored" +\
"to unspecified", 2, 'Active')
model.add_module('BB3003', 'Dummy Module 3',
"This module's has been mounted and will be restored to unmounted",
3, 'Active')
model.add_module('BB3004', 'Dummy Module 4',
"This module's has been unmounted and will be restored to mounted",
4, 'Active')
model.add_module('BB3005', 'Dummy Module 5',
"This module's name will be restored", 5, 'Active')
model.add_module('BB3006', 'Dummy Module 6',
"This module's name, description and MC will be restored", 6,
'Active')
# Dummy fixed mountings
model.add_fixed_mounting('BB1001', self.current_ay+' Sem 1', 10)
model.add_fixed_mounting('BB1001', self.current_ay+' Sem 2', 10)
model.add_fixed_mounting('BB1002', self.current_ay+' Sem 1', 20)
model.add_fixed_mounting('BB1002', self.current_ay+' Sem 2', 20)
model.add_fixed_mounting('BB1003', self.current_ay+' Sem 1', 30)
model.add_fixed_mounting('BB1003', self.current_ay+' Sem 2', 30)
model.add_fixed_mounting('BB1004', self.current_ay+' Sem 1', None)
model.add_fixed_mounting('BB1004', self.current_ay+' Sem 2', None)
model.add_fixed_mounting('BB1005', self.current_ay+' Sem 1', 50)
model.add_fixed_mounting('BB1005', self.current_ay+' Sem 2', 50)
model.add_fixed_mounting('BB1006', self.current_ay+' Sem 1', 60)
model.add_fixed_mounting('BB1006', self.current_ay+' Sem 2', 60)
model.add_fixed_mounting('BB2001', self.current_ay+' Sem 1', 10)
model.add_fixed_mounting('BB2001', self.current_ay+' Sem 2', 10)
model.add_fixed_mounting('BB2002', self.current_ay+' Sem 1', 20)
model.add_fixed_mounting('BB2003', self.current_ay+' Sem 1', 30)
model.add_fixed_mounting('BB2004', self.current_ay+' Sem 1', None)
model.add_fixed_mounting('BB3001', self.current_ay+' Sem 1', 10)
model.add_fixed_mounting('BB3002', self.current_ay+' Sem 1', None)
model.add_fixed_mounting('BB3004', self.current_ay+' Sem 2', 40)
# Dummy tentative mountings
model.add_tenta_mounting('BB1001', self.next_ay+' Sem 1', 10)
model.add_tenta_mounting('BB1001', self.next_ay+' Sem 2', 10)
model.add_tenta_mounting('BB1002', self.next_ay+' Sem 1', 999)
model.add_tenta_mounting('BB1002', self.next_ay+' Sem 2', 20)
model.add_tenta_mounting('BB1003', self.next_ay+' Sem 1', 30)
model.add_tenta_mounting('BB1003', self.next_ay+' Sem 2', 999)
model.add_tenta_mounting('BB1004', self.next_ay+' Sem 1', 999)
model.add_tenta_mounting('BB1004', self.next_ay+' Sem 2', None)
model.add_tenta_mounting('BB1005', self.next_ay+' Sem 1', 50)
model.add_tenta_mounting('BB1005', self.next_ay+' Sem 2', None)
model.add_tenta_mounting('BB1006', self.next_ay+' Sem 1', 999)
model.add_tenta_mounting('BB1006', self.next_ay+' Sem 2', None)
model.add_tenta_mounting('BB2001', self.next_ay+' Sem 2', 10)
model.add_tenta_mounting('BB2002', self.next_ay+' Sem 1', 20)
model.add_tenta_mounting('BB2002', self.next_ay+' Sem 2', 20)
model.add_tenta_mounting('BB2003', self.next_ay+' Sem 2', 30)
model.add_tenta_mounting('BB2004', self.next_ay+' Sem 1', None)
model.add_tenta_mounting('BB2004', self.next_ay+' Sem 2', None)
model.add_tenta_mounting('BB3001', self.next_ay+' Sem 1', 999)
model.add_tenta_mounting('BB3002', self.next_ay+' Sem 1', 999)
model.add_tenta_mounting('BB3003', self.next_ay+' Sem 2', 999)
# Dummy module backup
model.store_original_module_info('BB3005', 'Original Module Name',
"This module's name will be restored", 5)
model.store_original_module_info('BB3006', 'Original Module Name',
"Original Module Description", 0)
def tearDown(self):
'''
Clean up the database after all test cases are ran
'''
model.NUMBER_OF_AY_SEMS_IN_SYSTEM = 2
model.delete_fixed_mounting('BB1001', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1001', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1002', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1002', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1003', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1003', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1004', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1004', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1005', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1005', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1006', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1006', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB2001', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB2001', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB2002', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB2003', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB2004', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB3001', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB3002', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB3004', self.current_ay+' Sem 2')
model.delete_tenta_mounting('BB1001', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1001', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1002', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1002', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1003', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1003', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1004', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1004', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1005', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1005', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1006', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1006', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB2001', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB2002', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB2002', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB2003', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB2004', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB2004', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB3001', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB3002', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB3003', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB3004', self.next_ay+' Sem 2')
model.delete_module('BB1001')
model.delete_module('BB1002')
model.delete_module('BB1003')
model.delete_module('BB1004')
model.delete_module('BB1005')
model.delete_module('BB1006')
model.delete_module('BB2001')
model.delete_module('BB2002')
model.delete_module('BB2003')
model.delete_module('BB2004')
model.delete_module('BB3001')
model.delete_module('BB3002')
model.delete_module('BB3003')
model.delete_module('BB3004')
model.delete_module('BB3005')
model.delete_module('BB3006')
def test_quota_not_modified(self):
'''
Test that a module whose quota is NOT modified
will not appear in the list of modified modules
'''
test_module_code = 'BB1001'
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
for module in modified_modules:
code = module[0]
if code == test_module_code:
is_in_modified_modules = True
break
assert_false(is_in_modified_modules)
def test_quota_modified_sem_1(self):
'''
Test that a module whose quota for sem 1 is modified
will appear in the list of modified modules
'''
test_module_code = 'BB1002'
test_current_aysem = self.current_ay+" Sem 1"
test_current_quota = 20
test_target_aysem = self.next_ay+" Sem 1"
test_modified_quota = 999
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
current_aysem = None
current_quota = -1
target_aysem = None
modified_quota = -1
for module in modified_modules:
code = module[0]
if code == test_module_code:
is_in_modified_modules = True
current_aysem = module[2]
target_aysem = module[3]
current_quota = module[4]
modified_quota = module[5]
break
assert_true(is_in_modified_modules)
assert_equal(test_current_aysem, current_aysem)
assert_equal(test_current_quota, current_quota)
assert_equal(test_target_aysem, target_aysem)
assert_equal(test_modified_quota, modified_quota)
def test_quota_modified_sem_2(self):
'''
Test that a module whose quota for sem 2 is modified
will appear in the list of modified modules
'''
test_module_code = 'BB1003'
test_current_aysem = self.current_ay+" Sem 2"
test_current_quota = 30
test_target_aysem = self.next_ay+" Sem 2"
test_modified_quota = 999
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
current_aysem = None
current_quota = -1
target_aysem = None
modified_quota = -1
for module in modified_modules:
code = module[0]
if code == test_module_code:
is_in_modified_modules = True
current_aysem = module[2]
target_aysem = module[3]
current_quota = module[4]
modified_quota = module[5]
break
assert_true(is_in_modified_modules)
assert_equal(test_current_aysem, current_aysem)
assert_equal(test_current_quota, current_quota)
assert_equal(test_target_aysem, target_aysem)
assert_equal(test_modified_quota, modified_quota)
def test_quota_specified(self):
'''
Test that a module whose quota for a sem became specified
will appear in the list of modified modules
'''
test_module_code = 'BB1004'
test_current_aysem = self.current_ay+" Sem 1"
test_current_quota = None
test_target_aysem = self.next_ay+" Sem 1"
test_modified_quota = 999
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
current_aysem = None
current_quota = -1
target_aysem = None
modified_quota = -1
for module in modified_modules:
code = module[0]
if code == test_module_code:
is_in_modified_modules = True
current_aysem = module[2]
target_aysem = module[3]
current_quota = module[4]
modified_quota = module[5]
break
assert_true(is_in_modified_modules)
assert_equal(test_current_aysem, current_aysem)
assert_equal(test_current_quota, current_quota)
assert_equal(test_target_aysem, target_aysem)
assert_equal(test_modified_quota, modified_quota)
def test_quota_unspecified(self):
'''
Test that a module whose quota for a sem became UNspecified
will appear in the list of modified modules
'''
test_module_code = 'BB1005'
test_current_aysem = self.current_ay+" Sem 2"
test_current_quota = 50
test_target_aysem = self.next_ay+" Sem 2"
test_modified_quota = None
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
current_aysem = None
current_quota = -1
target_aysem = None
modified_quota = -1
for module in modified_modules:
code = module[0]
if code == test_module_code:
is_in_modified_modules = True
current_aysem = module[2]
target_aysem = module[3]
current_quota = module[4]
modified_quota = module[5]
break
assert_true(is_in_modified_modules)
assert_equal(test_current_aysem, current_aysem)
assert_equal(test_current_quota, current_quota)
assert_equal(test_target_aysem, target_aysem)
assert_equal(test_modified_quota, modified_quota)
def test_quota_modified_both_sems(self):
'''
Test that a module whose quota for both sems have been modified
will appear in the list of modified modules as two entries
'''
# Test if entry for sem 1 exists
test_module_code = 'BB1006'
test_current_aysem = self.current_ay+" Sem 1"
test_current_quota = 60
test_target_aysem = self.next_ay+" Sem 1"
test_modified_quota = 999
modified_modules = self.modified_modules_handler.get_modules_with_modified_quota()
is_in_modified_modules = False
current_aysem | |
Primary trigger half-press
'Joy_2': {'Type': 'Digital', 'x': 2044, 'y': 424, 'width': 642, 'height': 108}, # Fire button
'Joy_3': {'Type': 'Digital', 'x': 2124, 'y': 234, 'width': 642, 'height': 108}, # S1 button
'Joy_4': {'Type': 'Digital', 'x': 3064, 'y': 496, 'width': 752}, # S2 button
'Joy_5': {'Type': 'Digital', 'x': 3064, 'y': 584, 'width': 752}, # S3 button
'Joy_6': {'Type': 'Digital', 'x': 2044, 'y': 764, 'width': 642, 'height': 108}, # S4 button
'Joy_7': {'Type': 'Digital', 'x': 2044, 'y': 1018, 'width': 642, 'height': 108}, # S5 button
'Joy_8': {'Type': 'Digital', 'x': 2954, 'y': 386, 'width': 832}, # Ministick button
'Joy_VAxis': {'Type': 'Analogue', 'x': 2954, 'y': 330, 'width': 832}, # Ministick X
'Joy_UAxis': {'Type': 'Analogue', 'x': 2954, 'y': 274, 'width': 832}, # Ministick Y
'Joy_9': {'Type': 'Digital', 'x': 2034, 'y': 660, 'width': 652}, # Primary trigger full-press
'Joy_POV1Up': {'Type': 'Digital', 'x': 3124, 'y': 694, 'width': 692}, # POV hat up
'Joy_POV1Right': {'Type': 'Digital', 'x': 3124, 'y': 750, 'width': 692}, # POV hat right
'Joy_POV1Down': {'Type': 'Digital', 'x': 3124, 'y': 806, 'width': 692}, # POV hat down
'Joy_POV1Left': {'Type': 'Digital', 'x': 3124, 'y': 862, 'width': 692}, # POV hat left
'Joy_XAxis': {'Type': 'Analogue', 'x': 3144, 'y': 1060, 'width': 672}, # Stick X axis
'Joy_YAxis': {'Type': 'Analogue', 'x': 3144, 'y': 1004, 'width': 672}, # Stick Y axis
'Joy_RXAxis': {'Type': 'Analogue', 'x': 3084, 'y': 1824, 'width': 592}, # Trim 2
'Joy_RYAxis': {'Type': 'Analogue', 'x': 3224, 'y': 1744, 'width': 592}, # Trim 3
'Joy_RZAxis': {'Type': 'Analogue', 'x': 2934, 'y': 1904, 'width': 592}, # Trim 1
},
'LogitechG940Throttle': {
},
'LogitechG940Pedals': {
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'Joy_XAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal (unconfirmed)
'Joy_YAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal (unconfirmed)
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'T-Rudder': {
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal
'Joy_ZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'SaitekProFlightRudderPedals': {
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'06A30765': { # Copy of SaitekProFlightRudderPedals (although actually Cessna pedals)
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'displayName': 'C<NAME>als',
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'SaitekProFlightCombatRudderPedals': {
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal (unconfirmed)
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal (unconfirmed)
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'SlawFlightControlRudder': {
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal (unconfirmed)
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal (unconfirmed)
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'16D00A38': { # MFG Crosswind
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'displayName': 'MFG Crosswind',
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal (unconfirmed)
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal (unconfirmed)
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'85640203': { # MFG Crosswind (alternate address)
# Although the individual pedals are analogue, they often have digital binds due to their nature so we pretend they are digital
'displayName': 'MFG Crosswind',
'Joy_XAxis': {'Type': 'Digital', 'x': 164, 'y': 588, 'width': 1332, 'height': 162}, # Left pedal (unconfirmed)
'Joy_YAxis': {'Type': 'Digital', 'x': 2264, 'y': 588, 'width': 1332, 'height': 162}, # Right pedal (unconfirmed)
'Joy_RZAxis': {'Type': 'Analogue', 'x': 1208, 'y': 331, 'width': 1332, 'height': 162}, # Rudder
},
'046DC29A': { # Logitech GT Wheel
'displayName': 'Logitech GT Wheel',
'Joy_1': {'Type': 'Digital', 'x': 2624, 'y': 1089, 'width': 1192, 'height': 54}, # Cross button
'Joy_2': {'Type': 'Digital', 'x': 2624, 'y': 1184, 'width': 1192, 'height': 54}, # Square button
'Joy_3': {'Type': 'Digital', 'x': 2624, 'y': 994, 'width': 1192, 'height': 54}, # Circle button
'Joy_4': {'Type': 'Digital', 'x': 2624, 'y': 900, 'width': 1192, 'height': 54}, # Triangle button
'Joy_5': {'Type': 'Digital', 'x': 2404, 'y': 594, 'width': 992, 'height': 108}, # Right back paddle
'Joy_6': {'Type': 'Digital', 'x': 24, 'y': 594, 'width': 992, 'height': 108}, # Left back paddle
'Joy_7': {'Type': 'Digital', 'x': 2404, 'y': 484, 'width': 992, 'height': 54}, # Right shoulder button
'Joy_8': {'Type': 'Digital', 'x': 24, 'y': 484, 'width': 992, 'height': 54}, # Left shoulder button
'Joy_9': {'Type': 'Digital', 'x': 484, 'y': 1720, 'width': 1192, 'height': 54}, # Select button
'Joy_10': {'Type': 'Digital', 'x': 1744, 'y': 1720, 'width': 1192, 'height': 54}, # Start button
'Joy_11': {'Type': 'Digital', 'x': 2404, 'y': 354, 'width': 992, 'height': 54}, # R3 button
'Joy_12': {'Type': 'Digital', 'x': 24, 'y': 354, 'width': 992, 'height': 54}, # L3 button
'Joy_13': {'Type': 'Digital', 'x': 2684, 'y': 808, 'width': 1132, 'height': 54}, # Gear stick towards
'Joy_14': {'Type': 'Digital', 'x': 2684, 'y': 752, 'width': 1132, 'height': 54}, # Gear stick away
'Joy_15': {'Type': 'Digital', 'x': 2684, 'y': 1494, 'width': 1132, 'height': 108}, # Dial button
'Joy_16': {'Type': 'Digital', 'x': 84, 'y': 1554, 'width': 1132, 'height': 54}, # Plus button
'Joy_17': {'Type': 'Digital', 'x': 2684, 'y': 1274, 'width': 1132, 'height': 108}, # Dial clockwise
'Joy_18': {'Type': 'Digital', 'x': 2684, 'y': 1384, 'width': 1132, 'height': 108}, # Dial anticlockwise
'Joy_19': {'Type': 'Digital', 'x': 84, 'y': 1610, 'width': 1132, 'height': 54}, # Minus button
'Joy_20': {'Type': 'Digital', 'x': 1544, 'y': 554, 'width': 332, 'height': 108}, # Horn
'Joy_21': {'Type': 'Digital', 'x': 1114, 'y': 1824, 'width': 1192, 'height': 54}, # Central button
'Joy_POV1Up': {'Type': 'Digital', 'x': 84, 'y': 870, 'width': 932, 'height': 108}, # PoV up
'Joy_POV1Right': {'Type': 'Digital', 'x': 84, 'y': 980, 'width': 932, 'height': 108}, # PoV right
'Joy_POV1Down': {'Type': 'Digital', 'x': 84, 'y': 1090, 'width': 932, 'height': 108}, # PoV down
'Joy_POV1Left': {'Type': 'Digital', 'x': 84, 'y': 1200, 'width': 932, 'height': 108}, # PoV left
'Joy_XAxis': {'Type': 'Analogue', 'x': 1284, 'y': 304, 'width': 932, 'height': 54}, # Steering wheel
},
'044FB351': { # Cougar MFD 1
'displayName': 'Cougar MFD 1',
'Joy_1': {'Type': 'Digital', 'x': 134, 'y': 604, 'width': 592, 'height': 100},
'Joy_2': {'Type': 'Digital', 'x': 239, 'y': 464, 'width': 592, 'height': 100},
'Joy_3': {'Type': 'Digital', 'x': 644, 'y': 320, 'width': 592, 'height': 100},
'Joy_4': {'Type': 'Digital', 'x': 1054, 'y': 464, 'width': 592, 'height': 100},
'Joy_5': {'Type': 'Digital', 'x': 1161, 'y': 604, 'width': 592, 'height': 100},
'Joy_6': {'Type': 'Digital', 'x': 1424, 'y': 932, 'width': 452, 'height': 100},
'Joy_7': {'Type': 'Digital', 'x': 1424, 'y': 1040, 'width': 452, 'height': 100},
'Joy_8': | |
self.nodes[1].sendalerttoaddress(addr0, amount, '', '', False, False, 1, i)
def test_alert_tx_change_is_by_default_sent_back_to_the_sender(self):
addr0 = self.nodes[0].getnewaddress()
alert_addr1 = self.nodes[1].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
# mine some coins to alert_addr1
self.nodes[1].generatetoaddress(200, alert_addr1['address'])
# create atx
amount = 10
atxid = self.nodes[1].sendalerttoaddress(addr0, amount)
atx = self.nodes[1].getrawtransaction(atxid, True)
fee = self.COINBASE_AMOUNT - self.sum_vouts_value(atx)
change = self.COINBASE_AMOUNT - amount - fee
change_vout = atx['vout'][self.find_vout_n(atx, change)]
# assert
assert len(change_vout['scriptPubKey']['addresses']) == 1
assert alert_addr1['address'] == change_vout['scriptPubKey']['addresses'][0]
def test_instant_tx_change_is_by_default_sent_back_to_the_sender(self):
addr0 = self.nodes[0].getnewaddress()
instant_addr1 = self.nodes[1].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
# mine some coins to instant_addr1
self.nodes[1].generatetoaddress(200, instant_addr1['address'])
# create itx
amount = 10
atxid = self.nodes[1].sendinstanttoaddress(addr0, amount, [self.alert_instant_privkey])
atx = self.nodes[1].getrawtransaction(atxid, True)
fee = self.COINBASE_AMOUNT - self.sum_vouts_value(atx)
change = self.COINBASE_AMOUNT - amount - fee
change_vout = atx['vout'][self.find_vout_n(atx, change)]
# assert
assert len(change_vout['scriptPubKey']['addresses']) == 1
assert instant_addr1['address'] == change_vout['scriptPubKey']['addresses'][0]
def test_sendalerttoaddress_selects_coins_on_instant_addresses_only(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == instant_addr0['address']]
assert len(coins_to_use) == 200
atxid = self.nodes[0].sendalerttoaddress(other_addr, self.COINBASE_AMOUNT * 200, '', '', True)
atx = self.nodes[0].getrawtransaction(atxid, True)
self.nodes[0].generatetoaddress(1, other_addr)
# assert
self.sync_all()
assert len(atx['vin']) == 200
assert {v['txid']: v['vout'] for v in atx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert atxid in self.nodes[0].getbestblock()['atx']
def test_sendalerttoaddress_selects_coins_on_instant_and_alert_addresses_only(self):
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_instant_pubkey)
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(100, instant_addr0['address'])
self.nodes[0].generatetoaddress(100, alert_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == alert_addr0['address']]
assert len(coins_to_use) == 100
atxid = self.nodes[0].sendalerttoaddress(other_addr, self.COINBASE_AMOUNT * 100, '', '', True)
atx = self.nodes[0].getrawtransaction(atxid, True)
self.nodes[0].generatetoaddress(1, other_addr)
# assert
self.sync_all()
assert len(atx['vin']) == 100
assert {v['txid']: v['vout'] for v in atx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert atxid in self.nodes[0].getbestblock()['atx']
def test_sendinstanttoaddress_selects_coins_on_instant_addresses_only(self):
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_recovery_pubkey)
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(100, alert_addr0['address'])
self.nodes[0].generatetoaddress(100, instant_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == instant_addr0['address']]
assert len(coins_to_use) == 100
txid = self.nodes[0].sendinstanttoaddress(other_addr, self.COINBASE_AMOUNT * 100, [self.alert_instant_privkey], '', '', True)
tx = self.nodes[0].getrawtransaction(txid, True)
self.nodes[0].generatetoaddress(1, other_addr)
# assert
self.sync_all()
assert len(tx['vin']) == 100
assert {v['txid']: v['vout'] for v in tx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert txid in self.nodes[0].getbestblock()['tx']
@introduce_and_reset_blockchain
def test_sendinstanttoaddress_selects_coins_on_instant_addresses_only_with_label(self):
label = "rainy day"
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_recovery_pubkey, label)
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(100, alert_addr0['address'])
self.nodes[0].generatetoaddress(100, instant_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == instant_addr0['address']]
txid = self.nodes[0].sendinstanttoaddress(other_addr, self.COINBASE_AMOUNT * 100, [self.alert_instant_privkey], '', '', True)
tx = self.nodes[0].getrawtransaction(txid, True)
self.nodes[0].generatetoaddress(1, other_addr)
transactions = self.nodes[0].listtransactions(label)
# assert
self.sync_all()
assert len(tx['vin']) == 100
assert {v['txid']: v['vout'] for v in tx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert txid in self.nodes[0].getbestblock()['tx']
assert transactions != None
for transaction in transactions:
assert transaction['label'] == label
@introduce_and_reset_blockchain
def test_sendinstanttoaddress_selects_coins_on_instant_addresses_only_differnet_addr_type(self):
addresss = ['legacy', 'p2sh-segwit', 'bech32', 'invalid']
cnt = 1
for address in addresss:
try:
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_recovery_pubkey, '', address)
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(100, alert_addr0['address'])
self.nodes[0].generatetoaddress(100, instant_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == instant_addr0['address']]
txid = self.nodes[0].sendinstanttoaddress(other_addr, self.COINBASE_AMOUNT * 100, [self.alert_instant_privkey], '', '', True)
tx = self.nodes[0].getrawtransaction(txid, True)
except JSONRPCException as e:
cnt -= 1
assert cnt == 0
def test_sendinstanttoaddress_with_multiple_instant_addresses(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
instant_addr01 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(100, instant_addr01['address'])
self.nodes[0].generatetoaddress(100, instant_addr0['address'])
self.nodes[0].generatetoaddress(200, addr0)
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] in [instant_addr0['address'], instant_addr01['address']]]
assert len(coins_to_use) == 200
txid = self.nodes[0].sendinstanttoaddress(other_addr, self.COINBASE_AMOUNT * 200, [self.alert_instant_privkey], '', '', True)
tx = self.nodes[0].getrawtransaction(txid, True)
self.nodes[0].generatetoaddress(1, other_addr)
# assert
self.sync_all()
assert len(tx['vin']) == 200
assert {v['txid']: v['vout'] for v in tx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert txid in self.nodes[0].getbestblock()['tx']
def test_sendinstanttoaddress_fails_when_no_coins_available_on_instant_addresses(self):
alert_addr0 = self.nodes[0].getnewvaultalertaddress(self.alert_instant_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].importprivkey(self.alert_instant_privkey)
self.nodes[0].generatetoaddress(200, alert_addr0['address']) # coins are available on alert address ...
self.nodes[0].generatetoaddress(200, addr0) # ... and regular address ...
error = None
try:
self.nodes[0].sendinstanttoaddress(other_addr, 10) # ... so this call should fail
except Exception as e:
error = e.error
# assert
self.sync_all()
assert error['code'] == -4
assert 'Insufficient funds' in error['message']
def test_sendtoaddress_fails_when_no_coins_available_on_regular_addresses(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(200, instant_addr0['address']) # coins are available only on instant address ...
error = None
try:
self.nodes[0].sendtoaddress(other_addr, 10) # ... so this call should fail
except Exception as e:
error = e.error
# assert
self.sync_all()
assert error['code'] == -4
assert 'Insufficient funds' in error['message']
def test_sendtoaddress_selects_coins_on_regular_addresses_only(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
other_addr = '2N34KyQQj97pAivV59wfTkzksYuPdR2jLfi'
self.nodes[0].generatetoaddress(200, addr0)
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
coins_to_use = self.nodes[0].listunspent()
coins_to_use = [c for c in coins_to_use if c['address'] == addr0]
assert len(coins_to_use) == 200
txid = self.nodes[0].sendtoaddress(other_addr, self.COINBASE_AMOUNT * 200, '', '', True)
tx = self.nodes[0].getrawtransaction(txid, True)
self.nodes[0].generatetoaddress(1, other_addr)
# assert
self.sync_all()
assert len(tx['vin']) == 200
assert {v['txid']: v['vout'] for v in tx['vin']} == {c['txid']: c['vout'] for c in coins_to_use}
assert txid in self.nodes[0].getbestblock()['tx']
def test_recovery_tx_is_incomplete_and_rejected_when_missing_recovery_key(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
info = self.nodes[0].getaddressinfo(instant_addr0['address'])
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [self.alert_instant_privkey], info['hex'], instant_addr0['redeemScript'])
assert not recoverytx['complete']
error = None
try:
self.nodes[0].sendrawtransaction(recoverytx['hex'])
except Exception as e:
error = e.error
# assert
self.sync_all()
assert error['code'] == -26
assert 'non-mandatory-script-verify-flag' in error['message']
def test_recovery_tx_is_rejected_when_missing_both_instant_and_recovery_keys(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
info = self.nodes[0].getaddressinfo(instant_addr0['address'])
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
error = None
try:
self.nodes[0].signrecoverytransaction(recoverytx, [], info['hex'], instant_addr0['redeemScript'])
except Exception as e:
error = e.error
# assert
self.sync_all()
assert int(self.nodes[0].getalertbalance('*', 0)) == 17664
assert int(self.nodes[0].getalertbalance('*', 1)) == 17500
assert error['code'] == -5
assert 'Produced invalid transaction type, type vaultrecovery was expected' in error['message']
def test_recovery_tx_is_incomplete_and_rejected_when_missing_instant_key(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
info = self.nodes[0].getaddressinfo(instant_addr0['address'])
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [self.alert_recovery_privkey], info['hex'], instant_addr0['redeemScript'])
assert not recoverytx['complete']
error = None
try:
self.nodes[0].sendrawtransaction(recoverytx['hex'])
except Exception as e:
error = e.error
# assert
self.sync_all()
assert error['code'] == -26
assert 'non-mandatory-script-verify-flag' in error['message']
def test_recovery_tx_when_all_keys_imported(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
info = self.nodes[0].getaddressinfo(instant_addr0['address'])
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# import keys into wallet
self.nodes[0].importprivkey(self.alert_recovery_privkey)
self.nodes[0].importprivkey(self.alert_instant_privkey)
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [], info['hex'], instant_addr0['redeemScript'])
# assert
self.sync_all()
assert recoverytx is not None
assert recoverytx != ''
def test_recovery_tx_is_rejected_when_only_recovery_key_imported(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# import keys into wallet
self.nodes[0].importprivkey(self.alert_recovery_privkey)
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [], instant_addr0['redeemScript'])
self.sync_all()
assert not recoverytx['complete']
assert 'Unable to sign input, zero signature (possibly missing key)' in recoverytx['errors'][0]['error']
def test_recovery_tx_is_rejected_when_only_instant_key_imported(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# import keys into wallet
self.nodes[0].importprivkey(self.alert_instant_privkey)
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [], instant_addr0['redeemScript'])
self.sync_all()
assert not recoverytx['complete']
assert 'Unable to sign input, zero signature (possibly missing key)' in recoverytx['errors'][0]['error']
def test_recovery_tx_when_instant_key_imported_and_recovery_key_given(self):
instant_addr0 = self.nodes[0].getnewvaultinstantaddress(self.alert_instant_pubkey, self.alert_recovery_pubkey)
addr0 = self.nodes[0].getnewaddress()
addr1 = self.nodes[1].getnewaddress()
self.nodes[0].generatetoaddress(200, instant_addr0['address'])
# send atx and mine block with this atx
atxid = self.nodes[0].sendalerttoaddress(addr1, 10)
self.nodes[0].generatetoaddress(1, instant_addr0['address'])
# import keys into wallet
self.nodes[0].importprivkey(self.alert_instant_privkey)
# recover atx
recoverytx = self.nodes[0].createrecoverytransaction(atxid, [{addr0: 174.99}])
recoverytx = self.nodes[0].signrecoverytransaction(recoverytx, [self.alert_recovery_privkey], instant_addr0['redeemScript'])
# assert
self.sync_all()
assert recoverytx is not None
assert recoverytx != ''
| |
nocomment(l,"--")
# propagate line number
codeonly.append((i," ".join(l)))
weird = {} # stores all errors
for i,line in codeonly:
for token in tokenizer.findall(line):
# it can be all in caps (constants)
if token.upper() == token:
continue
# hex definition, give up
if "0x" in token:
continue
# exceptions...
if mixed.match(token.lower()):
continue
if caps.findall(token):
weird.setdefault("Found Capitals in token: %s" % repr(token),[]).append(i)
# reconsitute errors, with multiple line number if error has occured multiple time
for s,nums in weird.items():
err = "%s: %s" % (",".join(map(str,nums)),s)
errors.append(err)
return errors
def validate_procfunc_defs(content):
errors = []
# no () in definition
func_proc = re.compile("^(procedure|function)")
no_spaces = re.compile(".*\s+\(.*is")
for i,line in content:
# don't even check both (), not needed
if func_proc.match(line) and not "(" in line:
errors.append("%d: %s missing (). Calls must also be explicit" % (i,repr(line)))
if func_proc.match(line) and no_spaces.match(line):
errors.append("%d: found a space before parenthesis: %s" % (i,repr(line)))
return errors
def validate_code(content):
errslw = validate_lower_case(content)
errspf = validate_procfunc_defs(content)
# ...
return errslw + errspf
def validate(filename):
errors = []
warnings = []
errs,warns = validate_filename(filename)
errors.extend(errs)
warnings.extend(warns)
# also extract line number (enumerate from 0, count from 1)
content = [(i + 1,l) for i,l in enumerate(open(filename,"r").readlines())]
errors.extend(validate_header(content))
# remaining content has no more header
errs = validate_code(content)
errors.extend(errs)
return errors,warnings
def report(filename,errors,warnings):
print >> sys.stderr, "File: %s" % filename
print >> sys.stderr, "%d errors found" % len(errors)
for err in errors:
print >> sys.stderr, "\tERROR: %s:%s" % (os.path.basename(filename),err)
print >> sys.stderr
print >> sys.stderr, "%d warnings found" % len(warnings)
for warn in warnings:
print >> sys.stderr, "\twarning: %s:%s" % (os.path.basename(filename),warn)
if errors or warnings:
return True
def do_validate(args):
# No jallib args (yet!) for validating
# args contain jal file to validate
at_least_one_failed = False
if has_glob and len(args) == 1:
args = glob.glob(args[0])
for filename in args:
errs,warns = validate(filename)
if report(filename,errs,warns):
at_least_one_failed = True
if at_least_one_failed:
sys.exit(1)
else:
sys.exit(0)
#--------#
# SAMPLE #
#--------#
def parse_tags(content,*args):
current_tag = None
current_value = None
restags = {}
for l in content:
if "@jallib" in l:
what = l.split()
# only keep "interesting" jallib invokations
if len(what) == 3:
atjallib,tag,value = l.split()
if tag in args:
current_tag = tag
current_value = value
restags.setdefault(current_tag,{})[current_value] = []
if current_tag:
restags[current_tag][current_value].append(l)
return restags
def parse_sections(content):
return parse_tags(content,"section")
def merge_board_testfile(boardcontent,testcontent):
board = parse_sections(boardcontent)
# replace sections in testcontent
testcontent = os.linesep.join(testcontent)
toreplace = [m for m in re.finditer("((--)+)|(;+)\s*@jallib use (.*)",testcontent,re.MULTILINE) if m.groups()[-1]]
newcontent = ""
start = 0
for m in toreplace:
sectionname = m.groups()[-1].strip()
# when eating line sep char (to keep layout),
# remember some OS needs 2 chars !
newcontent += testcontent[start:m.start() - len(os.linesep)]
if board['section'].get(sectionname):
new = os.linesep.join(board['section'][sectionname])
start = m.end() + 1 # next char
newcontent += new
newcontent += testcontent[start:]
return newcontent
def normalize_linefeed(content):
# use single char
content = content.replace("\r\n","\n")
# split and join using OS settings
lines = re.split("[\n|\r]",content,re.MULTILINE)
content = "\n".join(lines)
return content
def generate_one_sample(boardfile,testfile,outfile,deleteiffailed=True):
# try to find which linefeed is used
board = file(boardfile).read().splitlines()
test = file(testfile).read().splitlines()
# keep test's headers, but enrich them with info about how files were merged
# headers need index (enumerate() on content)
# extract_header will change content in place, ie. will remove
# header from test content.
test = [t for t in enumerate(test)]
header = extract_header(test)
header = os.linesep.join([h for i,h in header])
header += os.linesep.join([
os.linesep + "--",
"-- This file has been generated by jallib.py from:",
"-- * board: %s" % os.path.basename(boardfile),
"-- * test : %s" % os.path.basename(testfile),
"--",
os.linesep])
# back to content without index
test = [l for i,l in test]
merged = merge_board_testfile(board,test)
# wb: write binary format, no ASCII/chars interpretation
fout = file(outfile,"wb")
fout.write(header)
fout.write(merged)
fout.close()
# compile it !
status = do_compile([outfile],exitonerror=False,clean=True)
if status == 0:
print "Succesfully generated sample '%s' from board '%s' and test '%s'" % (outfile,boardfile,testfile)
elif deleteiffailed:
# delete the file !
clean_compiler_products(outfile)
os.unlink(outfile)
raise Exception("Can't compile sample '%s' generated from '%s' and test '%s'" % (outfile,boardfile,testfile))
def find_board_files(boarddir):
return [os.path.join(boarddir,f) for f in os.listdir(boarddir) if not f.startswith(".") and f.startswith("board_") and f.endswith(".jal")]
def find_test_files(testdir):
# testdir contains "board" dir, to be excluded
testfiles = []
for d in [d for d in os.listdir(testdir) if d != "board" and not d.startswith(".")]:
d = os.path.join(testdir,d)
testfiles.extend([os.path.join(d,v) for v in get_jal_filenames(d,predicate=lambda _,x: x.startswith("test_")).values()])
return testfiles
def preferred_board(board):
data = file(board).read()
# well, should not consider comment but... should not occur, right ? :)
return "@jallib preferred" in data
def is_test_autoable(test):
return not "@jallib skip-auto" in file(test).read()
def generate_samples_for_board(path_to_sample,board,outdir=None):
if outdir:
assert os.path.isdir(outdir), "%s must be a directory when auto-generate samples (this is where samples will be stored)" % outdir
testpath = get_full_test_path(path_to_sample)
samplepath = get_full_sample_path(path_to_sample)
fulltestfiles = find_test_files(testpath)
picname = os.path.basename(board).split("_")[1] # naming convention
# in automated mode, only board files with "@jallib preferred" are kept.
# this is because there can be multiple boards for a given PIC, but only
# ony being used to auto-generate samples
for test in fulltestfiles:
if not is_test_autoable(test):
print >> sys.stderr, "Skip test '%s' because tagged 'skip-auto'" % test
continue
samplename = picname + "_" + os.path.basename(test)[5:] # remove "test_", naming convention
fullsamplepath = outdir and os.path.join(outdir,samplename) or get_full_sample_path(path_to_sample,samplename)
try:
generate_one_sample(board,test,fullsamplepath)
except Exception,e:
print >> sys.stderr,"Invalid board/test combination: %s" % e
import traceback
print >> sys.stderr, traceback.format_exc()
continue
def generate_samples_for_test(path_to_sample,test,outdir=None):
if outdir:
assert os.path.isdir(outdir), "%s must be a directory when auto-generate samples (this is where samples will be stored)" % outdir
samplepath = get_full_sample_path(path_to_sample)
boardpath = get_full_board_path(path_to_sample)
fullboardfiles = find_board_files(boardpath)
if not is_test_autoable(test):
print >> sys.stderr, "Skip test '%s' because tagged 'skip-auto'" % test
return
# in automated mode, only board files with "@jallib preferred" are kept.
# this is because there can be multiple boards for a given PIC, but only
# ony being used to auto-generate samples
for board in fullboardfiles:
if not preferred_board(board):
print >> sys.stderr,"board %s is not 'preferred', skip it" % board
continue
picname = os.path.basename(board).split("_")[1] # naming convention
samplename = picname + "_" + os.path.basename(test)[5:] # remove "test_", naming convention
fullsamplepath = outdir and os.path.join(outdir,samplename) or get_full_sample_path(path_to_sample,samplename)
try:
generate_one_sample(board,test,fullsamplepath)
except Exception,e:
print >> sys.stderr,"Invalid board/test combination: %s" % e
import traceback
print >> sys.stderr, traceback.format_exc()
continue
def generate_all_samples(path_to_sample,outdir=None):
boardpath = get_full_board_path(path_to_sample)
fullboarfiles = find_board_files(boardpath)
for board in fullboarfiles:
if not preferred_board(board):
print >> sys.stderr,"board %s is not 'preferred', skip it" % board
continue
generate_samples_for_board(path_to_sample,board,outdir)
def do_sample(args=[]):
try:
opts, args = getopt.getopt(args, ACTIONS['sample']['options'])
except getopt.error,e:
print >> sys.stderr, "Wrong option or missing argument: %s" % e.opt
sys.exit(255)
boardfile = None
testfile = None
outfile = None
automatic = None
path_to_sample = None
for o,v in opts:
if o == '-b':
boardfile = v
elif o == '-t':
testfile = v
elif o == '-o':
outfile = v
elif o == '-a':
automatic = True
path_to_sample = v
if automatic and path_to_sample and boardfile:
generate_samples_for_board(path_to_sample,boardfile,outdir=outfile)
elif automatic and testfile:
generate_samples_for_test(path_to_sample,testfile)
generate_samples_for_test(path_to_sample,testfile)
elif automatic and path_to_sample:
generate_all_samples(path_to_sample,outdir=outfile)
elif boardfile and testfile and outfile:
# don't delete sample if compilation fails: user knows what he's doing
generate_one_sample(boardfile,testfile,outfile,deleteiffailed=False)
else:
print >> sys.stderr, "Provide a board, a test file and an output file"
sys.exit(255)
#---------------#
# REINDENT FUNC #
#---------------#
POSTINC = ["assembler","block","case","while","for","forever","then","function","procedure"]
INLINEKW = ["assembler","block","case","function","procedure","if"]
PROTO = ["procedure","function"]
PREDEC = ["end"]
PREINCPOST = ["else","elsif"]
def reindent_file(filename,withchar,howmany):
'''
Default jallib standard is 3-spaces indentation
'''
indentchars = howmany * withchar
data = file(filename).read()
lines = re.split("\n|\r\n",data)
# End the file with a linefeed
if re.match("[\S]+", lines[-1]):
lines.append(os.linesep)
level = 0
content = []
for l in lines:
# This exception is known as Joep's exception :)
if l.startswith(";"):
content.append(l)
continue
# check if comments
try:
code,comchars,comment = re.match("(.*?)(-{2}|;)(.*)",l).groups()
except AttributeError,e:
# no comments, normalize
code = l
comchars = comment = ""
# remove strings between " and ', to focus only on jal keywords
onlyjalkw = re.sub("[\"|'].*[\"|']","",code)
fields = map(lambda x: x.lower(),onlyjalkw.strip().split())
do_postinc = do_preincpost = do_predec = False
if set(fields).intersection(set(POSTINC)):
do_postinc = True
if set(fields).intersection(set(PREINCPOST)):
do_preincpost = True
if set(fields).intersection(set(PREDEC)):
do_predec = True
# search for inline code
reg = "|".join(INLINEKW+ ["loop"])
| |
in range(len(tmpMatrix2)):
result.append(tmpMatrix2[i])
for i in range(len(occurenceMatrix)):
print(occurenceMatrix[i])
print("")
return result
elif (operator == 'under') and (score >= 0):
for i in range(len(tmpMatrix)):
if (float(tmpMatrix[i][s]) < score) and (float(tmpMatrix[i][s]) != -1):
_scoreFilter1(i,tmpMatrix,opFilter,occurenceMatrix,tmpMatrix2)
for i in range(len(tmpMatrix2)):
result.append(tmpMatrix2[i])
for i in range(len(occurenceMatrix)):
print(occurenceMatrix[i])
print("")
return result
elif (operator == 'range') and (score2 != None) and (score2 >= 0):
for i in range(len(tmpMatrix)):
if (float(tmpMatrix[i][s]) >= score) and (float(tmpMatrix[i][s]) <= score2):
_scoreFilter1(i,tmpMatrix,opFilter,occurenceMatrix,tmpMatrix2)
for i in range(len(tmpMatrix2)):
result.append(tmpMatrix2[i])
for i in range(len(occurenceMatrix)):
print(occurenceMatrix[i])
print("")
return result
elif (score != -1) and (score < 0):
print('errore,inserire parametri corretti')
return None
elif (operator) == 'over' and (score == -1):
print('errore,inserire parametri corretti')
return None
elif (operator == 'range') and (score2 == None or score2 < 0):
print('errore,inserire parametri corretti')
return None
elif (operator == 'under') and (score == -1):
print('errore,inserire parametri corretti')
return None
else:
print('errore,operazioni duplicate inserite')
return None
def _scoreFilter2(i,tmpMatrix,currentOperation,previousOperations,countFirstOperation):
p = 0 #probeID index
c = 1 #camera index
s = 2 #score index
op = 3 #operations index
atLeastOneOp = False
found = False
lastOp = None
atLeastOneOp = False
countOperations = 0
tmp = []
tmpOp = []
tmpResult = []
for j in range(len(tmpMatrix[i][op])):
if tmpMatrix[i][op][j]['name'].lower() != currentOperation:
countOperations = countOperations + 1
lastOp = tmpMatrix[i][op][j]['name']
else:
found = True
if lastOp != None:
previousOperations.append(lastOp)
break
if found == True:
tmp.append(tmpMatrix[i][op][j]['name'])
tmp.append(tmpMatrix[i][p])
tmp.append(tmpMatrix[i][c])
if countOperations > 0:
tmp.append(countOperations)
else:
tmp.append('First operation')
countFirstOperation = countFirstOperation + 1
tmp.append('score:' + '' + tmpMatrix[i][s])
tmpResult.append(tmp)
atLeastOneOp = True
return atLeastOneOp,tmpResult,countFirstOperation
def countOccurrence(previousOperations):
occurrence = []
duplicate = False
count = 0
tmpOcc = []
for k in range(len(previousOperations)):
for s in range(len(occurrence)):
if previousOperations[k] == occurrence[s][0]:
duplicate = True
if duplicate == False:
for n in range(len(previousOperations)):
if previousOperations[k] == previousOperations[n]:
count = count + 1
tmpOcc.append(previousOperations[k])
tmpOcc.append(count)
occurrence.append(tmpOcc)
tmpOcc = []
count = 0
duplicate = False
return occurrence
def scoreFilter2(finalScorePath, opHistoryPath, score, opFilter, operator='null', score2=None):
s = 2
atLeastOneOp = False
scoreMatrix = CSVreader(finalScorePath)
scoreMatrix.remove(scoreMatrix[0]) # Rimuovo l'intestazione
# probeHistory.json
with open(opHistoryPath) as f:
opHistory = json.load(f)
opFilter = [x.lower() for x in opFilter]
duplicateOP = check(opFilter)
if duplicateOP == False:
tmpMatrix,maxlenght = AddHistory(scoreMatrix,opHistory)
countOperations = 0
previousOperations = []
finalResult = []
result = []
occurrence = []
countFirstOperation = 0
if operator == 'null':
print('errore,inserire operazioni')
elif (operator == 'equal'):
for z in range(len(opFilter)):
currentOperation = opFilter[z]
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) == score:
atLeastOneOp,tmpResult,countFirstOperation = _scoreFilter2(i,tmpMatrix,currentOperation,previousOperations,countFirstOperation)
if atLeastOneOp == True:
result.append(tmpResult)
occurrence = countOccurrence(previousOperations)
finalResult.append(currentOperation +' '+ 'N Times First Operations: ' + str(countFirstOperation))
finalResult.append(occurrence)
previousOperations = []
occurrence = []
countFirstOperation = 0
elif (operator == 'over') and (score >= 0):
for z in range(len(opFilter)):
currentOperation = opFilter[z]
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) >= score:
atLeastOneOp,tmpResult,countFirstOperation = _scoreFilter2(i,tmpMatrix,currentOperation,previousOperations,countFirstOperation)
if atLeastOneOp == True:
result.append(tmpResult)
occurrence = countOccurrence(previousOperations)
finalResult.append(currentOperation +' '+ 'N Times First Operations: ' + str(countFirstOperation))
finalResult.append(occurrence)
previousOperations = []
occurrence = []
countFirstOperation = 0
elif (operator == 'under') and (score >= 0):
for z in range(len(opFilter)):
currentOperation = opFilter[z]
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) < score and float(tmpMatrix[i][s]) != -1:
atLeastOneOp,tmpResult,countFirstOperation = _scoreFilter2(i,tmpMatrix,currentOperation,previousOperations,countFirstOperation)
if atLeastOneOp == True:
result.append(tmpResult)
occurrence = countOccurrence(previousOperations)
finalResult.append(currentOperation +' '+ 'N Times First Operations: ' + str(countFirstOperation))
finalResult.append(occurrence)
previousOperations = []
occurrence = []
countFirstOperation = 0
elif (operator == 'range') and (score >= 0) and (score2 != None) and (score2 >= 0):
for z in range(len(opFilter)):
currentOperation = opFilter[z]
for i in range(len(tmpMatrix)):
if float(tmpMatrix[i][s]) >= score and float(tmpMatrix[i][s]) < score2:
atLeastOneOp,tmpResult,countFirstOperation = _scoreFilter2(i,tmpMatrix,currentOperation,previousOperations,countFirstOperation)
if atLeastOneOp == True:
result.append(tmpResult)
occurrence = countOccurrence(previousOperations)
finalResult.append(currentOperation +' '+ 'N Times First Operations: ' + str(countFirstOperation))
finalResult.append(occurrence)
previousOperations = []
occurrence = []
countFirstOperation = 0
elif (score != -1) and (score < 0):
print('errore,inserire parametri corretti')
return None, None
elif (operator == 'range') and score < 0:
print('errore,inserire parametri corretti')
return None, None
elif (operator == 'range') and (score2 == None or score2 < 0):
print('errore,inserire parametri corretti')
return None, None
elif (operator == 'under') and (score == -1):
print('errore,inserire parametri corretti')
return None, None
elif (operator == 'over') and (score == -1):
print('errore,inserire parametri corretti')
return None, None
for i in range(len(finalResult)):
try:
finalResult[i] = sortScoreMatrix(finalResult[i])
except IndexError:
continue
return result,finalResult
else:
print('errore,operazioni duplicate inserite!')
return None, None
def allOperations(finalScorePath, opHistoryPath):
# finalScore.csv
scoreMatrix = CSVreader(finalScorePath)
scoreMatrix.remove(scoreMatrix[0]) # Rimuovo l'intestazione
# Leggo le probe history
with open(opHistoryPath) as f:
opHistory = json.load(f)
all_operations = []
for i in range(len(scoreMatrix)):
for j in range(len(opHistory['probesFileID'])):
if scoreMatrix[i][0] == opHistory['probesFileID'][j]['probeID']:
for z in range(len(opHistory['probesFileID'][j]['operations'])):
if opHistory['probesFileID'][j]['operations'][z]['name'] not in all_operations:
all_operations.append(opHistory['probesFileID'][j]['operations'][z]['name'])
all_operations.sort()
printInColumn(all_operations)
def countcamera(V):
flag = False
tmp = []
cameraOccurrence = []
for i in range(len(V)):
for j in range(len(cameraOccurrence)):
if V[i] == cameraOccurrence[j][0]:
flag = True
if flag == False:
count = V.count(V[i])
tmp.append(V[i])
tmp.append((count/len(V))*100)
cameraOccurrence.append(tmp)
tmp = []
flag = False
cameraOccurrence = sortScoreMatrix(cameraOccurrence)
printInColumn(cameraOccurrence)
def cameraControl(finalScorePath,opHistoryPath,valoresoglia,valoreoptout):
score = CSVreader(finalScorePath)
score.remove(score[0])
with open(opHistoryPath) as f:
history = json.load(f)
M = []
for i in range(len(history['probesFileID'])):
for j in range(len(score)):
if history['probesFileID'][i]['probeID'] == score[j][0]:
tmp=[]
tmp=[score[j][0],score[j][1],score[j][3]]
M.append(tmp)
equal1 = []
under50 = []
over50 = []
for i in range(len(M)):
if float(M[i][2]) == valoreoptout:
equal1.append(M[i][1])
elif float(M[i][2]) < valoresoglia and float(M[i][2])!= valoreoptout :
under50.append(M[i][1])
elif float(M[i][2]) >= valoresoglia:
over50.append(M[i][1])
print("\n score =",valoreoptout)
countcamera(equal1)
print("\n score <",valoresoglia)
countcamera(under50)
print("\n score >",valoresoglia)
countcamera(over50)
print("")
def get_parser():
parser = argparse.ArgumentParser(description="", formatter_class=RawTextHelpFormatter)
subparser = parser.add_subparsers(dest='subcommand')
# subcommand create-json-probe-history
history_parser = subparser.add_parser('create-json-probe-history', help='Create a json file containing the probe history')
history_parser.add_argument("-p", "--probejournaljoin", required=True, help="insert probejournalname path")
history_parser.add_argument("-j", "--journalmask", required=True, help="insert journalmask path")
history_parser.add_argument("-o", "--operations", required=True, help="insert json file contains all operations")
# subcommand division for optout, matched and no_matched
division_parse = subparser.add_parser('division', help='Mostra la seguente tabella:'
'\n|ID_probe|Camera|Score|Manip/NonManip|Formato|'
'\n aaa xx 46.15 Y .avi\n')
division_parse.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
division_parse.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
division_parse.add_argument("-cp", "--camerapath", required=True, help="path of camera reference")
division_parse.add_argument("-vs", "--valoresoglia", type=int, required=True, help="valore di soglia")
division_parse.add_argument("-vo", "--valoreoptout", type=int, required=True, help="valore di optout")
# subcommand manipulation-analysis
manipulation_parser = subparser.add_parser('manipulation-analysis', help='List all possible manipulations')
manipulation_parser.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
manipulation_parser.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
manipulation_parser.add_argument("-cp", "--camerapath", required=True, help="path of camera reference")
manipulation_parser.add_argument("-vs", "--valoresoglia",type=int, required=True, help="valore di soglia")
manipulation_parser.add_argument("-vo", "--valoreoptout", type=int, required=True, help="valore di optout")
# subcommand manipulation-analysis
camera_parset = subparser.add_parser('camera-analysis', help='List all possible manipulations')
camera_parset.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
camera_parset.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
camera_parset.add_argument("-vs", "--valoresoglia",type=int, required=True, help="valore di soglia")
camera_parset.add_argument("-vo", "--valoreoptout", type=int, required=True, help="valore di optout")
# subcommand filter1
filter1_parser = subparser.add_parser('filter1', help='Mostra la seguente tabella'
' \n|ID_probe|Camera|Filtro|n° di quando è stato applicato|n° di operaziono subite|'
' \n aaa xxx Blur 2 5 ')
filter1_parser.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
filter1_parser.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
filter1_parser.add_argument("-s", "--score", required=True, type=int, help="insert score")
filter1_parser.add_argument("-op", "--operator", required=True, help="insert operator like over,under,equal,range")
filter1_parser.add_argument("-s2", "--score2", required=False, type=int, help="insert score2")
filter1_parser.add_argument("-o", "--operation", required=True, nargs='+', help="insert operation filter")
# subcommand filter2
filter2_parser = subparser.add_parser('filter2', help='Mostra la seguente tabella'
' \n|ID_probe|Filtro|Operazioni precedenti in base al numero passato|'
' \n aaa Blur OutputAVI,AddNoise 2 ')
filter2_parser.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
filter2_parser.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
filter2_parser.add_argument("-s", "--score", required=True, type=int, help="insert score")
filter2_parser.add_argument("-op", "--operator", required=True, help="insert operator like over,under,equal,range")
filter2_parser.add_argument("-s2", "--score2", required=False , type=int, help="insert score2")
filter2_parser.add_argument("-o", "--operation", required=True, nargs='+', help="insert operation filter")
# subcommand alloperations
all_operations_parser = subparser.add_parser('alloperations', help='Mostra tutte le operazioni che sono state fatte in tutte le probe')
all_operations_parser.add_argument("-sp", "--scorepath", required=True, help="path of score csv")
all_operations_parser.add_argument("-hp", "--probehistory", required=True, help="path of probe history")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.subcommand == 'create-json-probe-history':
createJsonProbeHistory(probePath=args.probejournaljoin, journalPath=args.journalmask, operationsPath=args.operations)
elif args.subcommand == 'division':
division(finalScorePath=args.scorepath, opHistoryPath=args.probehistory, cameraPath=args.camerapath, valore_soglia=args.valoresoglia, valore_optout=args.valoreoptout)
elif args.subcommand == 'manipulation-analysis':
manipulation_analysis(finalScorePath=args.scorepath, opHistoryPath=args.probehistory, cameraPath=args.camerapath, valore_soglia=args.valoresoglia, valore_optout=args.valoreoptout)
elif args.subcommand == 'camera-analysis':
cameraControl(finalScorePath=args.scorepath, opHistoryPath=args.probehistory, valoresoglia=args.valoresoglia, valoreoptout=args.valoreoptout)
elif args.subcommand == 'filter1':
result = scoreFilter1(finalScorePath=args.scorepath, opHistoryPath=args.probehistory, score=args.score, opFilter=args.operation, operator=args.operator, score2 = args.score2)
printInColumn(result)
elif args.subcommand == | |
'inputs-parameters': {'parameter': []},
'outputs-parameters': {'parameter': []},
'agents': {'agent': []},
}
return prop
def updateTestFileProperties(self, itemId, properties):
"""
Update properties of a specific test suite
@param itemId:
@type itemId:
@param properties:
@type properties:
"""
testSuites = self.testplan['testplan']['testfile']
for i in xrange(len(testSuites)):
if testSuites[i]['id'] == str(itemId):
testSuites[i]['properties'] = properties
def updateTestFileDescr(self, itemId, descr):
"""
Update the description of a specific test suite
@param itemId:
@type itemId:
@param descr:
@type descr:
"""
testSuites = self.testplan['testplan']['testfile']
for i in xrange(len(testSuites)):
if testSuites[i]['id'] == itemId:
testSuites[i]['description'] = unicode(descr)
def updateTestFileAlias(self, itemId, alias):
"""
Update the alias of a specific test suite
@param itemId:
@type itemId:
@param descr:
@type descr:
"""
testSuites = self.testplan['testplan']['testfile']
for i in xrange(len(testSuites)):
if testSuites[i]['id'] == itemId:
testSuites[i]['alias'] = unicode(alias)
def updateTestFileEnable(self, itemId, enableStatus):
"""
Update the enable field of a specific test suite
@param itemId:
@type itemId:
@param descr:
@type descr:
"""
testSuites = self.testplan['testplan']['testfile']
for i in xrange(len(testSuites)):
if testSuites[i]['id'] == itemId:
testSuites[i]['enable'] = str(enableStatus)
def updateTestFileColor(self, itemId, colorValue):
"""
Update the enable field of a specific test suite
@param itemId:
@type itemId:
@param descr:
@type descr:
"""
testSuites = self.testplan['testplan']['testfile']
for i in xrange(len(testSuites)):
if testSuites[i]['id'] == itemId:
testSuites[i]['color'] = str(colorValue)
def insertBeforeTestFile(self, fileName, type, itemId, parentId, currentId, properties,
descr='', enabled=2, extension='tsx', color="", alias="",
parentCondition=IF_OK, control=""):
"""
Insert test before
@param fileName:
@type fileName:
@param type:
@type type:
@param itemId:
@type itemId:
@param parentId:
@type parentId:
@param properties:
@type properties:
@param descr:
@type descr:
@param enabled: 2=enabled / 0=disabled
@type enabled: integer
"""
properties.update({'file': fileName, 'enable': str(enabled),
'extension': extension, 'type': type, 'id': itemId,
'parent': parentId, 'description': descr, 'color': color,
'alias': alias, 'parent-condition': parentCondition, 'control': control})
root = self.testplan['testplan']
tsf = root['testfile']
# find current index
for i in xrange(len(tsf)):
if tsf[i]['id'] == currentId:
root['testfile'].insert(i, properties)
break
def insertAfterTestFile(self, fileName, type, itemId, parentId, currentId, properties,
descr='', enabled=2, extension='tsx', color="", alias="",
parentCondition=IF_OK, control=""):
"""
Insert test after
@param fileName:
@type fileName:
@param type:
@type type:
@param itemId:
@type itemId:
@param parentId:
@type parentId:
@param properties:
@type properties:
@param descr:
@type descr:
@param enabled: 2=enabled / 0=disabled
@type enabled: integer
"""
properties.update({'file': fileName, 'enable': str(enabled), 'extension': extension,
'type': type, 'id': itemId, 'parent': parentId, 'description': descr,
'color': color, 'alias': alias,
'parent-condition': parentCondition, 'control': control})
root = self.testplan['testplan']
tsf = root['testfile']
# find current index
for i in xrange(len(tsf)):
if tsf[i]['id'] == currentId:
root['testfile'].insert(i + 1, properties)
break
def addTestFile(self, fileName, type, itemId, parentId, properties, descr='',
enabled=2, extension='tsx', color="", alias="", parentCondition=IF_OK,
control=""):
"""
Add test suite
@param fileName:
@type fileName:
@param type:
@type type:
@param itemId:
@type itemId:
@param parentId:
@type parentId:
@param properties:
@type properties:
@param descr:
@type descr:
@param enabled: 2=enabled / 0=disabled
@type enabled: integer
"""
properties.update({'file': fileName, 'enable': str(enabled), 'extension': extension,
'type': type, 'id': itemId, 'parent': parentId, 'description': descr,
'color': color, 'alias': alias, 'parent-condition': parentCondition,
'control': control})
root = self.testplan['testplan']
if 'testfile' not in root:
root['testfile'] = [properties]
else:
if not isinstance(root['testfile'], list):
root['testfile'] = [root['testfile']]
root['testfile'].append(properties)
def toXml(self):
"""
Return XML representation of the test plan
"""
# to avoid bad errors, remove orphan test
self.fixOrphan()
try:
# !!!!!!!!!!!!!!!!!!!!!!!!!!
self.fixPyXML(data=self.testplan['testplan'], key='testfile')
self.fixPyXML(
data=self.properties['properties']['inputs-parameters'],
key='parameter')
self.fixPyXML(
data=self.properties['properties']['outputs-parameters'],
key='parameter')
self.fixPyXML(
data=self.properties['properties']['probes'],
key='probe')
self.fixPyXML(
data=self.properties['properties']['agents'],
key='agent')
# BEGIN NEW in 2.0.0
self.fixPyXML(
data=self.properties['properties']['descriptions'],
key='description')
# END NEW in 2.0.0
for ts in self.testplan['testplan']['testfile']:
if sys.version_info < (3,): # python3 support
# issue Issue 258:
if isinstance(ts['description'], str):
ts['description'] = ts['description'].decode('utf8')
# issue Issue 258:
self.fixPyXML(
data=ts['properties']['descriptions'],
key='description')
self.fixPyXML(
data=ts['properties']['inputs-parameters'],
key='parameter')
self.fixPyXML(
data=ts['properties']['outputs-parameters'],
key='parameter')
self.fixPyXML(data=ts['properties']['probes'], key='probe')
self.fixPyXML(data=ts['properties']['agents'], key='agent')
# !!!!!!!!!!!!!!!!!!!!!!!!!!
xmlDataList = ['<?xml version="1.0" encoding="utf-8" ?>']
xmlDataList.append('<file>')
if sys.version_info > (3,): # python3 support
xmlDataList.append(
bytes2str(
self.codecD2X.parseDict(
dico=self.properties)))
xmlDataList.append(
bytes2str(
self.codecD2X.parseDict(
dico=self.testplan)))
else:
xmlDataList.append(
self.codecD2X.parseDict(
dico=self.properties))
xmlDataList.append(self.codecD2X.parseDict(dico=self.testplan))
xmlDataList.append(
'<testdevelopment>%s</testdevelopment>' %
unicode(
self.testdev))
xmlDataList.append('</file>')
ret = '\n'.join(xmlDataList)
# remove all invalid xml data
ret = removeInvalidXML(ret)
except Exception as e:
self.error("TestPlan > To Xml %s" % str(e))
ret = None
return ret
def fixParameterstoUTF8(self, val):
"""
Fix encodage not pretty....
@param val:
@type val:
"""
for param in val:
param['value'] = param['value'].decode("utf-8")
param['description'] = param['description'].decode("utf-8")
param['name'] = param['name'].decode("utf-8")
def fixDescriptionstoUTF8(self, val):
"""
Fix encodage not pretty....
@param val:
@type val:
"""
for descr in val:
descr['key'] = descr['key'].decode("utf-8")
if isinstance(descr['value'], dict):
pass
else:
descr['value'] = descr['value'].decode("utf-8")
def onLoad(self, decompressedData):
"""
Called on data model loading
"""
# reset properties
self.properties = {}
self.testplan = {}
decodedStatus = False
# decode content
try:
# Extract xml from the file data
ret = self.codecX2D.parseXml(xml=decompressedData)
except Exception as e:
self.error("TestPlan > Parse Xml %s" % str(e))
else:
try:
testplan = ret['file']['testplan']
properties = ret['file']['properties']
if sys.version_info > (3,): # python3 support
if isinstance(testplan, bytes):
testplan = ''
# BEGIN NEW in 5.2.0
if 'testsuite' in testplan:
testplan['testfile'] = testplan['testsuite']
testplan.pop('testsuite')
if '@testsuite' in testplan:
testplan['@testfile'] = testplan['@testsuite']
testplan.pop('@testsuite')
# END NEW
# BEGIN NEW in 5.1.0 :
if 'testdevelopment' not in ret['file']:
self.testdev = time.time()
else:
if sys.version_info > (3,): # python3 support
self.testdev = ret['file']['testdevelopment']
else:
self.testdev = ret['file']['testdevelopment'].decode(
"utf-8")
# END NEW in 5.1.0
except Exception as e:
self.error(
"TestPlan > extract properties, testplan %s" %
str(e))
else:
try:
# BEGIN NEW in 2.0.0: description and can be missing model
# file
tpl_def = {'description': [{'key': 'author', 'value': ''}, {'key': 'date', 'value': ''},
{'key': 'summary', 'value': ''}, {'key': 'prerequisites', 'value': ''}]}
if 'descriptions' not in properties:
properties['descriptions'] = tpl_def
if not len(testplan):
testplan = {'testfile': [], '@testfile': []}
if isinstance(testplan['testfile'], dict):
testplan['testfile'] = [testplan['testfile']]
for i in xrange(len(testplan['testfile'])):
if 'descriptions' not in testplan['testfile'][i]['properties']:
testplan['testfile'][i]['properties']['descriptions'] = tpl_def
# END NEW in 2.0.0
# NEW in 13.0.0
if 'descriptions' in properties:
foundPrerequis = False
foundRequirement = False
foundComments = False
foundLibraries = False
foundAdapters = False
foundState = False
creationDate = None
foundTestname = False
for kv in properties['descriptions']['description']:
if kv['key'] == 'prerequisites':
foundPrerequis = True
if kv['key'] == 'requirement':
foundRequirement = True
if kv['key'] == 'comments':
foundComments = True
if kv['key'] == 'libraries':
foundLibraries = True
if kv['key'] == 'adapters':
foundAdapters = True
if kv['key'] == 'date':
creationDate = kv['value']
if kv['key'] == 'state':
foundState = True
if kv['key'] == 'name':
foundTestname = True
if not foundPrerequis:
properties['descriptions']['description'].append(
{'key': 'prerequisites', 'value': ''})
if not foundRequirement:
properties['descriptions']['description'].append(
{'key': 'requirement', 'value': 'REQ_01'})
if not foundComments:
properties['descriptions']['description'].append(
{'key': 'comments', 'value': {'comments': {'comment': []}}})
if not foundLibraries:
properties['descriptions']['description'].append(
{'key': 'libraries', 'value': self.defLibrary})
if not foundAdapters:
properties['descriptions']['description'].append(
{'key': 'adapters', 'value': self.defAdapter})
if not foundState:
properties['descriptions']['description'].append(
{'key': 'state', 'value': 'Writing'})
if creationDate is not None:
properties['descriptions']['description'].insert(
1, {'key': 'creation date', 'value': creationDate})
if not foundTestname:
properties['descriptions']['description'].append(
{'key': 'name', 'value': self.testName})
# END NEW in 13.0.0
# to keep the compatibility
if 'outputs-parameters' not in properties:
properties['outputs-parameters'] = {
'parameter': copy.deepcopy(DEFAULT_OUTPUTS)}
if 'inputs-parameters' not in properties:
properties['inputs-parameters'] = properties['parameters']
properties.pop('parameters')
if 'agents' not in properties:
properties['agents'] = {
'agent': copy.deepcopy(DEFAULT_AGENTS)}
# bug fix in 10.1
if properties['agents'] == '' or properties['agents'] == b'': # python3 support
properties['agents'] = {'agent': [], '@agent': []}
# BEGIN NEW in 9.0.0 :
if isinstance(properties['agents']['agent'], dict):
properties['agents']['agent'] = [
properties['agents']['agent']]
for agt in properties['agents']['agent']:
if 'type' not in agt:
agt.update({'type': ''})
# END NEW in 9.0.0
except Exception as e:
self.error(
"TestPlan > fix backward compatibility: %s" %
str(e))
else:
try:
if isinstance(testplan, str) or isinstance(
testplan, bytes): # python3 support
testplan = {'testfile': [], '@testfile': []}
if isinstance(properties['probes'], str) or isinstance(
properties['probes'], bytes): # python3 support
properties['probes'] = {'probe': [], '@probe': []}
if isinstance(properties['inputs-parameters'], str) or isinstance(
properties['inputs-parameters'], bytes): # python3 support
properties['inputs-parameters'] = {
'parameter': [], '@parameter': []}
if isinstance(properties['outputs-parameters'], str) or isinstance(
properties['outputs-parameters'], bytes): # python3 support
properties['outputs-parameters'] = {
'parameter': [], '@parameter': []}
if isinstance(properties['agents'], str) or isinstance(
properties['agents'], bytes): # python3 support
properties['agents'] = {'agent': [], '@agent': []}
self.fixXML(data=properties['probes'], key='probe')
if '@probe' in properties['probes']:
self.fixXML(
data=properties['probes'], key='@probe')
self.fixXML(data=properties['agents'], key='agent')
if '@agent' in properties['agents']:
self.fixXML(
data=properties['agents'], key='@agent')
self.fixXML(
data=properties['inputs-parameters'],
key='parameter')
if '@parameter' in properties['inputs-parameters']:
self.fixXML(
data=properties['inputs-parameters'],
key='@parameter')
self.fixXML(
data=properties['outputs-parameters'],
key='parameter')
if '@parameter' in | |
<reponame>mcraig-ibme/fsl_sub
#!/usr/bin/env python
import copy
import io
import getpass
import os
import socket
import sys
import tempfile
import unittest
import fsl_sub
from ruamel.yaml import YAML
from unittest import skipIf
from unittest.mock import patch
from unittest.mock import MagicMock
from fsl_sub.exceptions import BadSubmission
YAML_CONF = '''---
method: sge
modulecmd: False
thread_control:
- OMP_NUM_THREADS
preserve_modules: True
export_vars: []
method_opts:
sge:
queues: True
large_job_split_pe: shmem
copy_environment: True
affinity_type: linear
affinity_control: threads
script_conf: True
mail_support: True
mail_modes:
b:
- b
e:
- e
a:
- a
f:
- a
- e
- b
n:
- n
mail_mode: a
map_ram: True
thread_ram_divide: True
notify_ram_usage: True
ram_resources:
- m_mem_free
- h_vmem
job_priorities: True
min_priority: -1023
max_priority: 0
array_holds: True
array_limit: True
architecture: False
job_resources: True
projects: True
coproc_opts:
cuda:
resource: gpu
classes: True
class_resource: gputype
class_types:
K:
resource: k80
doc: Kepler. ECC, double- or single-precision workloads
capability: 2
P:
resource: p100
doc: >
Pascal. ECC, double-, single- and half-precision
workloads
capability: 3
default_class: K
include_more_capable: True
uses_modules: True
uses_pe: False
module_parent: cuda
no_binding: True
queues:
gpu.q:
time: 18000
max_size: 250
slot_size: 64
max_slots: 20
copros:
cuda:
max_quantity: 4
classes:
- K
- P
- V
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 0
default: true
a.qa,a.qb,a.qc:
time: 1440
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 1
default: true
a.qa,a.qc:
time: 1440
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 1
default: true
a.qc:
time: 1440
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 1
default: true
b.qa,b.qb,b.qc:
time: 10080
max_size: 160
slot_size: 4
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 3
group: 2
b.qa,b.qc:
time: 10080
max_size: 240
slot_size: 16
max_slots: 16
map_ram: true
parallel_envs:
- shmem
priority: 2
group: 2
b.qc:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- shmem
priority: 1
group: 2
t.q:
time: 10080
max_size: 368
slot_size: 16
max_slots: 24
map_ram: true
parallel_envs:
- specialpe
priority: 1
group: 2
default_queues:
- a.qa,a,qb,a.qc
- a.qa,a.qc
- a.qc
'''
USER_EMAIL = "{username}@{hostname}".format(
username=getpass.getuser(),
hostname=socket.gethostname()
)
real_read_config = fsl_sub.config.read_config
class FakePlugin(object):
def submit(self):
pass
def qtest(self):
pass
def queue_exists(self):
pass
def plugin_version(self):
pass
def already_queued(self):
return False
def ShellConfig():
config = real_read_config()
config['method'] = 'shell'
return config
@patch('fsl_sub.plugins.fsl_sub_plugin_shell.os.getpid', return_value=111)
@patch('fsl_sub.config.read_config', side_effect=ShellConfig)
@patch('fsl_sub.getq_and_slots', autospec=True, return_value=('vs.q', 2))
class ShellPluginSubmitTests(unittest.TestCase):
def setUp(self):
self.tempd = tempfile.TemporaryDirectory()
self.here = os.getcwd()
os.chdir(self.tempd.name)
self.addCleanup(self.restore_dir)
def restore_dir(self):
os.chdir(self.here)
self.tempd.cleanup()
def test_basic_functionality(self, mock_gqas, mock_rc, mock_gp):
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-q', 'vs.q', 'echo', 'hello'])
sys.stdout = sys.__stdout__
self.assertEqual(text_trap.getvalue(), '111\n')
with open(os.path.join(self.tempd.name, 'echo.o111'), mode='r') as ofile:
output = ofile.read()
self.assertEqual(output.strip(), 'hello')
# doesn't work with Python 3.6!
@skipIf(sys.version_info.major <= 3 and sys.version_info.minor < 8, 'Requires python 3.8+')
def test_set_fslsub_parallel(self, mock_gqas, mock_rc, mock_gp):
with patch.dict('fsl_sub.os.environ', {}, clear=True) as mock_env:
with patch('fsl_sub.cmdline.submit', return_value=111) as mock_submit:
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-n', '-q', 'vs.q', '-t', 'mytasks'])
sys.stdout = sys.__stdout__
self.assertEqual(text_trap.getvalue(), '111\n')
try:
self.assertEqual('0', mock_env['FSLSUB_PARALLEL'])
except KeyError:
self.assertFail("FSLSUB_PARALLEL not set")
mock_submit.assert_called()
@skipIf(sys.version_info.major <= 3 and sys.version_info.minor < 8, 'Requires python 3.8+')
@patch('fsl_sub.parallel.process_pe_def', autospec=True, return_value=('openmp', 2))
def test_set_fslsub_parallel2(self, mock_ppd, mock_gqas, mock_rc, mock_gp):
with patch.dict('fsl_sub.os.environ', {}, clear=True) as mock_env:
with patch('fsl_sub.cmdline.submit', return_value=111) as mock_submit:
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-n', '-q', 'vs.q', '-s', 'openmp,2', '-t', 'mytasks', ])
sys.stdout = sys.__stdout__
self.assertEqual(text_trap.getvalue(), '111\n')
try:
self.assertEqual('0', mock_env['FSLSUB_PARALLEL'])
except KeyError:
self.assertFail("FSLSUB_PARALLEL not set")
mock_submit.assert_called()
@skipIf(sys.version_info.major <= 3 and sys.version_info.minor < 8, 'Requires python 3.8+')
@patch('fsl_sub.parallel.process_pe_def', autospec=True, return_value=('openmp', 2))
def test_set_fslsub_parallel3(self, mock_ppd, mock_gqas, mock_rc, mock_gp):
with patch.dict('fsl_sub.os.environ', {'FSLSUB_PARALLEL': '4'}, clear=True) as mock_env:
with patch('fsl_sub.cmdline.submit', return_value=111) as mock_submit:
with io.StringIO() as text_trap:
sys.stdout = text_trap
fsl_sub.cmdline.main(['-n', '-q', 'vs.q', '-s', 'openmp,2', '-t', 'mytasks', ])
sys.stdout = sys.__stdout__
self.assertEqual(text_trap.getvalue(), '111\n')
try:
self.assertEqual('4', mock_env['FSLSUB_PARALLEL'])
except KeyError:
self.assertFail("FSLSUB_PARALLEL not set")
mock_submit.assert_called()
@patch.dict(
'fsl_sub.os.environ', {}, clear=True
)
@patch(
'fsl_sub.shell_modules.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.config.read_config',
autospec=True,
return_value=YAML(typ='safe').load(YAML_CONF))
@patch(
'fsl_sub.load_plugins',
autospec=True
)
@patch(
'fsl_sub.check_command',
autospec=True,
return_value=True
)
@patch(
'fsl_sub.projects.project_list',
autospec=True,
return_value=['a', 'b', ])
class SubmitTests(unittest.TestCase):
def setUp(self):
self.base_config = YAML(typ='safe').load(YAML_CONF)
self.base_args = {
'architecture': None,
'array_hold': None,
'array_limit': None,
'array_specifier': None,
'array_task': False,
'coprocessor': None,
'coprocessor_toolkit': None,
'coprocessor_class': None,
'coprocessor_class_strict': False,
'coprocessor_multi': '1',
'export_vars': [
'OMP_NUM_THREADS=1',
'FSLSUB_PARALLEL=1',
],
'job_name': 'mycommand',
'parallel_env': None,
'queue': 'a.qa,a.qb,a.qc',
'threads': 1,
'jobhold': None,
'jobram': None,
'jobtime': None,
'keep_jobscript': False,
'logdir': None,
'mail_on': 'a',
'mailto': USER_EMAIL,
'priority': None,
'ramsplit': True,
'requeueable': True,
'resources': None,
'usescript': False,
'project': None
}
def test_unknown_queue(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
fsl_sub.submit(['mycommand', ], queue='unconfigured.q')
test_args = copy.deepcopy(self.base_args)
test_args['queue'] = 'unconfigured.q'
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**test_args)
def test_mem_env(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
with self.subTest('env not set - no memory specified'):
fsl_sub.submit(['mycommand', ], jobram=None)
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**self.base_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
with self.subTest('env set - no memory specified'):
with patch.dict(
'fsl_sub.os.environ',
{'FSLSUB_MEMORY_REQUIRED': '8G', },
clear=True):
test_args = copy.deepcopy(self.base_args)
test_args['queue'] = 'a.qa,a.qc'
test_args['jobram'] = 8
fsl_sub.submit(['mycommand', ], jobram=None)
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**test_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
with self.subTest('env set no units - no memory specified'):
with patch.dict(
'fsl_sub.os.environ',
{'FSLSUB_MEMORY_REQUIRED': '8', },
clear=True):
test_args = copy.deepcopy(self.base_args)
test_args['queue'] = 'a.qa,a.qc'
test_args['jobram'] = 8
fsl_sub.submit(['mycommand', ], jobram=None)
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**test_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
with self.subTest('env set small - no memory specified'):
with patch.dict(
'fsl_sub.os.environ',
{'FSLSUB_MEMORY_REQUIRED': '32M', },
clear=True):
test_args = copy.deepcopy(self.base_args)
test_args['queue'] = 'a.qa,a.qb,a.qc'
test_args['jobram'] = 1
fsl_sub.submit(['mycommand', ], jobram=None)
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**test_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
def test_projects_env(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
with self.subTest('env not set - no memory specified'):
fsl_sub.submit(['mycommand', ], project=None)
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', ],
**self.base_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
def test_stringcommand(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
with self.subTest('env not set - no memory specified'):
fsl_sub.submit('mycommand arg1 arg2')
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', 'arg1', 'arg2', ],
**self.base_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
def test_listcommand(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
with self.subTest('env not set - no memory specified'):
fsl_sub.submit(['mycommand', 'arg1', 'arg2', ])
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', 'arg1', 'arg2', ],
**self.base_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
def test_usespe(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
test_conf = copy.deepcopy(self.base_config)
test_conf['coproc_opts']['cuda']['uses_pe'] = 'shmem'
mock_confrc.return_value = test_conf
mock_rc.return_value = test_conf
test_args = copy.deepcopy(self.base_args)
test_args['coprocessor'] = 'cuda'
test_args['coprocessor_multi'] = '2'
test_args['threads'] = 2
test_args['parallel_env'] = 'shmem'
test_args['queue'] = 'gpu.q'
test_args['export_vars'] = [
'OMP_NUM_THREADS=2',
'FSLSUB_PARALLEL=2',
]
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
plugins['fsl_sub_plugin_sge'].qtest.return_value = '/usr/bin/qconf'
plugins['fsl_sub_plugin_sge'].queue_exists = MagicMock(
name='queue_exists')
plugins['fsl_sub_plugin_sge'].queue_exists.return_value = True
plugins['fsl_sub_plugin_sge'].BadSubmission = BadSubmission
mock_loadplugins.return_value = plugins
with self.subTest('MultiGPU with uses_pe'):
fsl_sub.submit(
['mycommand', 'arg1', 'arg2', ],
coprocessor='cuda',
coprocessor_multi='2')
plugins['fsl_sub_plugin_sge'].submit.assert_called_with(
['mycommand', 'arg1', 'arg2', ],
**test_args
)
plugins['fsl_sub_plugin_sge'].submit.reset_mock()
with self.subTest('MultiGPU with uses_pe - missing PE'):
test_conf['coproc_opts']['cuda']['uses_pe'] = 'openmp'
mock_confrc.return_value = test_conf
mock_rc.return_value = test_conf
with self.assertRaises(BadSubmission) as eo:
fsl_sub.submit(
['mycommand', 'arg1', 'arg2', ],
coprocessor='cuda',
coprocessor_multi='2')
self.assertEqual(
str(eo.exception),
"uses_pe set but selected queue gpu.q does not have PE openmp configured")
with self.subTest('MultiGPU with uses_pe - too many slots'):
test_conf['coproc_opts']['cuda']['uses_pe'] = 'shmem'
test_conf['queues']['gpu.q']['max_slots'] = 2
mock_confrc.return_value = test_conf
mock_rc.return_value = test_conf
with self.assertRaises(BadSubmission) as eo:
fsl_sub.submit(
['mycommand', 'arg1', 'arg2', ],
coprocessor='cuda',
coprocessor_multi='4')
self.assertEqual(
str(eo.exception),
"More GPUs than queue slots have been requested")
with self.subTest('MultiGPU with uses_pe - complex multigpu'):
test_conf['coproc_opts']['cuda']['uses_pe'] = 'shmem'
mock_confrc.return_value = test_conf
mock_rc.return_value = test_conf
with self.assertRaises(BadSubmission) as eo:
fsl_sub.submit(
['mycommand', 'arg1', 'arg2', ],
coprocessor='cuda',
coprocessor_multi='1,2')
self.assertEqual(
str(eo.exception),
"Specified coprocessor_multi argument is a complex value but cluster configured with 'uses_pe'"
" which requires a simple integer")
def test_fsl_sub_config(
self, mock_prjl, mock_checkcmd, mock_loadplugins,
mock_confrc, mock_rc, mock_smrc):
plugins = {}
plugins['fsl_sub_plugin_sge'] = FakePlugin()
plugins['fsl_sub_plugin_sge'].submit = MagicMock(name='submit')
plugins['fsl_sub_plugin_sge'].qtest = MagicMock(name='qtest')
| |
# -*- coding: utf-8 -*-
"""
mdfstudio utility functions and classes
Edit history
Author : yda
Date : 2020-11-12
Package name changed - asammdf to mdfstudio
Functions
---------
* get_text_v3 - Apply UHC encoding
* ChannelsDB.add - Do not add channel of same entry
"""
from functools import lru_cache
import logging
from pathlib import Path
from random import randint
import re
import string
from struct import Struct
import subprocess
import sys
from tempfile import TemporaryDirectory
import xml.etree.ElementTree as ET
try:
from cchardet import detect
except:
try:
from chardet import detect
except:
def detect(text):
for encoding in ("utf-8", "latin-1", "cp1250", "cp1252"):
try:
text.decode(encoding)
break
except:
continue
else:
encoding = None
return {"encoding": encoding}
import canmatrix.formats
import numpy as np
from numpy import arange, interp, where
from pandas import Series
from . import v2_v3_constants as v3c
from . import v4_constants as v4c
UINT8_u = Struct("<B").unpack
UINT16_u = Struct("<H").unpack
UINT32_p = Struct("<I").pack
UINT32_u = Struct("<I").unpack
UINT64_u = Struct("<Q").unpack
UINT8_uf = Struct("<B").unpack_from
UINT16_uf = Struct("<H").unpack_from
UINT32_uf = Struct("<I").unpack_from
UINT64_uf = Struct("<Q").unpack_from
FLOAT64_u = Struct("<d").unpack
FLOAT64_uf = Struct("<d").unpack_from
TWO_UINT64_u = Struct("<2Q").unpack
TWO_UINT64_uf = Struct("<2Q").unpack_from
BLK_COMMON_uf = Struct("<4s4xQ").unpack_from
BLK_COMMON_u = Struct("<4s4xQ8x").unpack
_xmlns_pattern = re.compile(' xmlns="[^"]*"')
logger = logging.getLogger("mdfstudio")
__all__ = [
"CHANNEL_COUNT",
"CONVERT",
"MERGE",
"ChannelsDB",
"UniqueDB",
"MdfException",
"get_fmt_v3",
"get_fmt_v4",
"get_text_v4",
"fmt_to_datatype_v3",
"fmt_to_datatype_v4",
"matlab_compatible",
"extract_cncomment_xml",
"validate_version_argument",
"MDF2_VERSIONS",
"MDF3_VERSIONS",
"MDF4_VERSIONS",
"SUPPORTED_VERSIONS",
]
CHANNEL_COUNT = (1000, 2000, 10000, 20000)
_channel_count = arange(0, 20000, 1000, dtype="<u4")
CONVERT = (10 * 2 ** 20, 20 * 2 ** 20, 30 * 2 ** 20, 40 * 2 ** 20)
CONVERT = interp(_channel_count, CHANNEL_COUNT, CONVERT).astype("<u4")
MERGE = (10 * 2 ** 20, 20 * 2 ** 20, 35 * 2 ** 20, 60 * 2 ** 20)
MERGE = interp(_channel_count, CHANNEL_COUNT, MERGE).astype("<u4")
CHANNEL_COUNT = _channel_count
MDF2_VERSIONS = ("2.00", "2.10", "2.14")
MDF3_VERSIONS = ("3.00", "3.10", "3.20", "3.30")
MDF4_VERSIONS = ("4.00", "4.10", "4.11", "4.20")
SUPPORTED_VERSIONS = MDF2_VERSIONS + MDF3_VERSIONS + MDF4_VERSIONS
ALLOWED_MATLAB_CHARS = set(string.ascii_letters + string.digits + "_")
class MdfException(Exception):
"""MDF Exception class"""
pass
def extract_cncomment_xml(comment):
"""extract *TX* tag or otherwise the *common_properties* from a xml comment
Paremeters
----------
comment : str
xml string comment
Returns
-------
comment : str
extracted string
"""
comment = comment.replace(' xmlns="http://www.asam.net/mdf/v4"', "")
try:
comment = ET.fromstring(comment)
match = comment.find(".//TX")
if match is None:
common_properties = comment.find(".//common_properties")
if common_properties is not None:
comment = []
for e in common_properties:
field = f'{e.get("name")}: {e.text}'
comment.append(field)
comment = "\n".join(field)
else:
comment = ""
else:
comment = match.text or ""
except ET.ParseError:
pass
return comment
def matlab_compatible(name):
""" make a channel name compatible with Matlab variable naming
Parameters
----------
name : str
channel name
Returns
-------
compatible_name : str
channel name compatible with Matlab
"""
compatible_name = [ch if ch in ALLOWED_MATLAB_CHARS else "_" for ch in name]
compatible_name = "".join(compatible_name)
if compatible_name[0] not in string.ascii_letters:
compatible_name = "M_" + compatible_name
# max variable name is 63 and 3 chars are reserved
# for get_unique_name in case of multiple channel name occurence
return compatible_name[:60]
def get_text_v3(address, stream, mapped=False, decode=True):
""" faster way to extract strings from mdf versions 2 and 3 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string
"""
if address == 0:
return "" if decode else b""
if mapped:
block_id = stream[address : address + 2]
if block_id != b"TX":
return "" if decode else b""
(size,) = UINT16_uf(stream, address + 2)
text_bytes = stream[address + 4 : address + size].strip(b" \r\t\n\0")
else:
stream.seek(address)
block_id = stream.read(2)
if block_id != b"TX":
return "" if decode else b""
size = UINT16_u(stream.read(2))[0] - 4
text_bytes = stream.read(size).strip(b" \r\t\n\0")
if decode:
try:
encoding = detect(text_bytes)["encoding"]
text = text_bytes.decode("latin-1")
if encoding == 'UHC':
text = text_bytes.decode(encoding, "ignore")
except UnicodeDecodeError:
try:
encoding = detect(text_bytes)["encoding"]
text = text_bytes.decode(encoding, "ignore")
except:
text = "<!text_decode_error>"
else:
text = text_bytes
return text
def get_text_v4(address, stream, mapped=False, decode=True):
""" faster way to extract strings from mdf version 4 TextBlock
Parameters
----------
address : int
TextBlock address
stream : handle
file IO handle
Returns
-------
text : str
unicode string
"""
if address == 0:
return "" if decode else b""
if mapped:
block_id, size = BLK_COMMON_uf(stream, address)
if block_id not in (b"##TX", b"##MD"):
return "" if decode else b""
text_bytes = stream[address + 24 : address + size].strip(b" \r\t\n\0")
else:
stream.seek(address)
block_id, size = BLK_COMMON_u(stream.read(24))
if block_id not in (b"##TX", b"##MD"):
return "" if decode else b""
text_bytes = stream.read(size - 24).strip(b" \r\t\n\0")
if decode:
try:
text = text_bytes.decode("utf-8")
except UnicodeDecodeError:
try:
encoding = detect(text_bytes)["encoding"]
text = text_bytes.decode(encoding, "ignore")
except:
text = "<!text_decode_error>"
else:
text = text_bytes
return text
def sanitize_xml(text):
return re.sub(_xmlns_pattern, "", text)
def extract_display_name(comment):
try:
display_name = ET.fromstring(sanitize_xml(comment)).find(".//names/display")
if display_name is not None:
display_name = display_name.text or ""
else:
display_name = ET.fromstring(sanitize_xml(comment)).find(".//names/name")
if display_name is not None:
display_name = display_name.text or ""
else:
display_name = ""
except:
display_name = ""
return display_name
@lru_cache(maxsize=1024)
def get_fmt_v3(data_type, size, byte_order=v3c.BYTE_ORDER_INTEL):
"""convert mdf versions 2 and 3 channel data type to numpy dtype format
string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
Returns
-------
fmt : str
numpy compatible data type format string
"""
if data_type in (v3c.DATA_TYPE_STRING, v3c.DATA_TYPE_BYTEARRAY):
size = size // 8
if data_type == v3c.DATA_TYPE_STRING:
fmt = f"S{size}"
else:
fmt = f"({size},)u1"
else:
if size > 64 and data_type in (
v3c.DATA_TYPE_UNSIGNED_INTEL,
v3c.DATA_TYPE_UNSIGNED,
v3c.DATA_TYPE_UNSIGNED_MOTOROLA,
):
fmt = f"({size // 8},)u1"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type == v3c.DATA_TYPE_UNSIGNED_INTEL:
fmt = f"<u{size}"
elif data_type == v3c.DATA_TYPE_UNSIGNED:
if byte_order == v3c.BYTE_ORDER_INTEL:
fmt = f"<u{size}"
else:
fmt = f">u{size}"
elif data_type == v3c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type == v3c.DATA_TYPE_SIGNED_INTEL:
fmt = f"<i{size}"
elif data_type == v3c.DATA_TYPE_SIGNED:
if byte_order == v3c.BYTE_ORDER_INTEL:
fmt = f"<i{size}"
else:
fmt = f">i{size}"
elif data_type == v3c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type in (v3c.DATA_TYPE_FLOAT_INTEL, v3c.DATA_TYPE_DOUBLE_INTEL):
fmt = f"<f{size}"
elif data_type in (
v3c.DATA_TYPE_FLOAT_MOTOROLA,
v3c.DATA_TYPE_DOUBLE_MOTOROLA,
):
fmt = f">f{size}"
elif data_type in (v3c.DATA_TYPE_FLOAT, v3c.DATA_TYPE_DOUBLE):
if byte_order == v3c.BYTE_ORDER_INTEL:
fmt = f"<f{size}"
else:
fmt = f">f{size}"
return fmt
@lru_cache(maxsize=1024)
def get_fmt_v4(data_type, size, channel_type=v4c.CHANNEL_TYPE_VALUE):
"""convert mdf version 4 channel data type to numpy dtype format string
Parameters
----------
data_type : int
mdf channel data type
size : int
data bit size
channel_type: int
mdf channel type
Returns
-------
fmt : str
numpy compatible data type format string
"""
if data_type in v4c.NON_SCALAR_TYPES:
size = size // 8
if data_type in (
v4c.DATA_TYPE_BYTEARRAY,
v4c.DATA_TYPE_MIME_STREAM,
v4c.DATA_TYPE_MIME_SAMPLE,
):
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"({size},)u1"
else:
fmt = f"<u{size}"
elif data_type in v4c.STRING_TYPES:
if channel_type == v4c.CHANNEL_TYPE_VALUE:
fmt = f"S{size}"
else:
fmt = f"<u{size}"
elif data_type == v4c.DATA_TYPE_CANOPEN_DATE:
fmt = "V7"
elif data_type == v4c.DATA_TYPE_CANOPEN_TIME:
fmt = "V6"
elif channel_type in v4c.VIRTUAL_TYPES:
if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL:
fmt = "<u8"
elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = ">u8"
elif data_type == v4c.DATA_TYPE_SIGNED_INTEL:
fmt = "<i8"
elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = ">i8"
elif data_type == v4c.DATA_TYPE_REAL_INTEL:
fmt = "<f8"
elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA:
fmt = ">f8"
elif data_type == v4c.DATA_TYPE_COMPLEX_INTEL:
fmt = "<c8"
elif data_type == v4c.DATA_TYPE_COMPLEX_MOTOROLA:
fmt = ">c8"
else:
if size > 64 and data_type in (
v4c.DATA_TYPE_UNSIGNED_INTEL,
v4c.DATA_TYPE_UNSIGNED,
):
fmt = f"({size // 8},)u1"
else:
if size <= 8:
size = 1
elif size <= 16:
size = 2
elif size <= 32:
size = 4
elif size <= 64:
size = 8
else:
size = size // 8
if data_type == v4c.DATA_TYPE_UNSIGNED_INTEL:
fmt = f"<u{size}"
elif data_type == v4c.DATA_TYPE_UNSIGNED_MOTOROLA:
fmt = f">u{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_INTEL:
fmt = f"<i{size}"
elif data_type == v4c.DATA_TYPE_SIGNED_MOTOROLA:
fmt = f">i{size}"
elif data_type == v4c.DATA_TYPE_REAL_INTEL:
if size == 1:
fmt = "<f2"
else:
fmt = f"<f{size}"
elif data_type == v4c.DATA_TYPE_REAL_MOTOROLA:
fmt = f">f{size}"
elif data_type == v4c.DATA_TYPE_COMPLEX_INTEL:
fmt = f"<c{size}"
elif data_type == v4c.DATA_TYPE_COMPLEX_MOTOROLA:
fmt = f">c{size}"
return fmt
@lru_cache(maxsize=1024)
def fmt_to_datatype_v3(fmt, shape, array=False):
"""convert numpy dtype format string to mdf versions 2 and 3
channel data type and size
Parameters
----------
fmt : numpy.dtype
numpy data type
shape : tuple
numpy array shape
array : bool
disambiguate between bytearray and channel array
Returns
-------
data_type, size : | |
normalization_axes = x.axes.sample_axes() - x.axes.recurrent_axis()
self.x = x - max(x, reduction_axes=normalization_axes)
self.exps = exp(self.x)
self.Z = sum(self.exps, reduction_axes=normalization_axes)
self.value_tensor = self.exps / self.Z
self.value_tensor.deriv_handler = self
def generate_adjoints(self, adjoints, delta):
"""
TODO.
Arguments:
adjoints: TODO
delta: TODO
op: TODO
Returns:
TODO
"""
z = delta * self.value_tensor
zs = sum(z)
self.x.generate_add_delta(adjoints, (z - zs * self.value_tensor))
def softmax(x, normalization_axes=None, **kwargs):
return SoftmaxOp(x, normalization_axes, **kwargs).value_tensor
class ReductionOp(TensorOp):
def __init__(self, x, reduction_axes=None, out_axes=None, dtype=None, **kwargs):
reduction_axes, out_axes = compute_reduction_axes(x, reduction_axes, out_axes)
self.reduction_axes = reduction_axes
self.kwargs = kwargs
super(ReductionOp, self).__init__(
args=(x,),
axes=out_axes,
dtype=dtype
)
def copy_with_new_args(self, args):
return type(self)(*args, reduction_axes=self.reduction_axes)
def compute_reduction_axes(x, reduction_axes, out_axes):
if reduction_axes is None and out_axes is None:
reduction_axes = x.axes.sample_axes() - x.axes.recurrent_axis()
out_axes = x.axes - reduction_axes
elif reduction_axes is None:
out_axes = make_axes(out_axes)
reduction_axes = x.axes - out_axes
elif out_axes is None:
reduction_axes = make_axes(reduction_axes)
out_axes = x.axes - reduction_axes
else:
out_axes = make_axes(out_axes)
reduction_axes = make_axes(reduction_axes)
# reduction_axes and out_axes must not overlap
if not reduction_axes & out_axes == make_axes(()):
raise ValueError("reduction_axes {} and out_axes {} must not overlap"
.format(reduction_axes, out_axes))
# union of reduction_axes and out_axes must be x.axes
if not (reduction_axes | out_axes).is_equal_set(x.axes):
raise ValueError(("union of reduction_axes {} and out_axes {} must "
"be x.axes {}")
.format(reduction_axes, out_axes, x.axes))
# out_axes must be the same order as x.axes
out_axes_index = [x.axes.index(axis) for axis in out_axes]
if sorted(out_axes_index) != out_axes_index:
raise ValueError("out_axes {} must has same order as x.axes {}"
.format(out_axes, x.axes))
return reduction_axes, out_axes
def create_reduction_op(name,
func_name=None,
generate_adjoints=None):
d = {}
if generate_adjoints is not None:
d['generate_adjoints'] = generate_adjoints
RedClass = type(name, (ReductionOp,), d)
def func(*args, **kwargs):
# handle the case where out_axes not in the same order of x's axes
if 'out_axes' in kwargs and kwargs['out_axes'] is not None:
x = args[0]
out_axes = kwargs['out_axes']
out_axes_index = [x.axes.index(axis) for axis in out_axes]
sorted_out_axes_index = sorted(out_axes_index)
if sorted_out_axes_index != out_axes_index:
# temp axes for reduction op
temp_out_axes = [x.axes[i] for i in sorted_out_axes_index]
kwargs['out_axes'] = temp_out_axes
reduction_op = RedClass(*args, **kwargs)
# reorder axes to requested out_axes
reordered_reduction_op = axes_with_order(reduction_op, out_axes)
return reordered_reduction_op
else:
return RedClass(*args, **kwargs)
else:
return RedClass(*args, **kwargs)
func.__name__ = func_name
return RedClass, func
def max_adjoints(self, adjoints, delta, x):
x.generate_add_delta(adjoints, equal(x, self) * delta)
Max, max = create_reduction_op('Max', 'max', max_adjoints)
def min_adjoints(self, adjoints, delta, x):
x.generate_add_delta(adjoints, equal(x, self) * delta)
Min, min = create_reduction_op('Min', 'min', min_adjoints)
def sum_adjoints(self, adjoints, delta, x):
x.generate_add_delta(
adjoints,
broadcast(delta, x.axes)
)
Sum, sum = create_reduction_op('Sum', 'sum', sum_adjoints)
def prod_adjoints(self, adjoints, delta, x):
# axes
axes = x.axes
reduction_axes = self.reduction_axes
# x_equal_zero
x_equal_zero = equal(x, 0)
# count 0's occurrence by reduction axes
x_zero_count = sum(x_equal_zero, reduction_axes=reduction_axes)
# create mask for zero count 0 and 1
mask_zero = broadcast(equal(x_zero_count, 0), axes=axes)
mask_one = broadcast(equal(x_zero_count, 1), axes=axes)
# replace all 0 to 1
x_replaced = equal(x, 0.) * 1. + (1. - equal(x, 0.)) * x
# do product of x_replace and gradient
x_replaced_prod = prod(x_replaced, reduction_axes=reduction_axes)
x_replaced_grad = x_replaced_prod / x_replaced
# multiply mask with mask for the two cases
x_grad = mask_zero * x_replaced_grad + mask_one * x_equal_zero * x_replaced_grad
x.generate_add_delta(
adjoints,
broadcast(delta, x.axes) * x_grad
)
Prod, prod = create_reduction_op('Prod', 'prod', prod_adjoints)
Argmax, _ = create_reduction_op('Argmax', 'argmax')
def argmax(x, dtype=None, **kwargs):
return Argmax(x, dtype=default_int_dtype(dtype), **kwargs)
Argmin, _ = create_reduction_op('Argmin', 'argmin')
def argmin(x, dtype=None, **kwargs):
return Argmin(x, dtype=default_int_dtype(dtype), **kwargs)
def variance(x, out_axes=None, reduction_axes=None):
return mean(square(x - mean(x, out_axes=out_axes, reduction_axes=reduction_axes)),
out_axes=out_axes, reduction_axes=reduction_axes)
class TensorSizeOp(TensorOp):
"""
A scalar returning the total size of a tensor.
Arguments:
x: The tensor whose axes we are measuring.
reduction_axes: if supplied, return the size
of these axes instead.
kwargs: options, including name
"""
def __init__(self, x, reduction_axes=None, out_axes=None, **kwargs):
if reduction_axes is None and out_axes is None:
reduction_axes = x.axes.sample_axes()
elif reduction_axes is None:
reduction_axes = x.axes - out_axes
self.reduction_axes = reduction_axes
super(TensorSizeOp, self).__init__(args=(x,), axes=())
def copy_with_new_args(self, args):
return type(self)(args[0], self.reduction_axes)
def tensor_size(x, reduction_axes=None, out_axes=None):
"""
A scalar returning the total size of a tensor in elements.
Arguments:
x: The tensor whose axes we are measuring.
reduction_axes: if supplied, return the size
of these axes instead.
"""
return TensorSizeOp(x, reduction_axes=reduction_axes, out_axes=out_axes)
def batch_size(x):
"""
Args:
x: A Tensor
Returns:
The size of the batch axis in x.
"""
return tensor_size(x, reduction_axes=x.axes.batch_axes())
def pad(x, paddings, axes=None):
"""
Pads a tensor with zeroes along each of its dimensions.
TODO: clean up slice / unslice used here
Arguments:
x: the tensor to be padded
paddings: the length of the padding along each dimension.
should be an array with the same length as x.axes.
Each element of the array should be either an integer,
in which case the padding will be symmetrical, or a tuple
of the form (before, after)
axes: the axes to be given to the padded tensor.
If unsupplied, we create new axes of the correct lengths.
Returns:
TensorOp: symbolic expression for the padded tensor
"""
if len(x.axes) != len(paddings):
raise ValueError((
"pad's paddings has length {pad} which needs to be the same "
"as the number of axes in x ({x})"
).format(
pad=len(paddings),
x=len(x.axes),
))
def pad_to_tuple(pad):
if isinstance(pad, int):
pad = (pad, pad)
return pad
def to_slice(pad):
s = (pad[0], -pad[1])
s = tuple(None if p == 0 else p for p in s)
return slice(s[0], s[1], 1)
paddings = tuple(pad_to_tuple(pad) for pad in paddings)
if axes is None:
axes = make_axes(
make_axis(length=axis.length + pad[0] + pad[1], name=axis.name)
if pad != (0, 0) else axis
for axis, pad in zip(x.axes, paddings)
)
slices = tuple(to_slice(p) for p in paddings)
return _unslice(x, slices, axes)
class OneHotOp(TensorOp):
"""
Converts a tensor containing class indices to a onehot representation.
For example, if x is a one-dimesnional tensor with value [0, 1], and the
number of classes is 2, we convert x to a onehot representation by replacing
0 and 1 with vectors: 0 -> [1, 0] and 1 -> [0, 1].
We add the added dimension in the leftmost place.
Arguments:
x: The tensor to convert to a onehot form.
axis: The axis along which to construct the onehot form. It should not be
in x and should have length equal to the number of classes.
"""
def __init__(self, x, axis, **kwargs):
self.axis = axis
super(OneHotOp, self).__init__(
args=(x,),
axes=make_axes((axis,)) + x.axes,
**kwargs
)
def copy_with_new_args(self, args):
return type(self)(*args, axis=self.axis)
def as_two_dim(self):
"""
Constructs a subgraph that is equivalent to this op and can be evaluated
by a transformer that only handles two dimensions.
Returns:
A subgraph equivalent to this op.
"""
x, = self.args
if len(x.axes) > 1:
x = flatten(x)
out = OneHotTwoDimOp(x, self.axis)
out = unflatten(
out,
[out.axes[0]] + list(out.axes[1].axes)
)
return out
else:
return OneHotTwoDimOp(x, self.axis)
def one_hot(x, axis):
"""
Args:
x: The one_hot tensor.
axis: The hot axis.
Returns:
OneHotOp: The op.
"""
return OneHotOp(x, axis)
class OneHotTwoDimOp(OneHotOp):
"""
Handles conversion from one-dimensional vector of class labels
to a two-dimensional onehot representation.
Arguments:
x: The tensor to convert to a onehot form.
axis: The axis along which to construct the onehot form. It should not be
in x and should have length equal to the number of classes.
"""
def __init__(self, x, axis, **kwargs):
assert len(x.axes) == 1
super(OneHotTwoDimOp, self).__init__(x, axis, **kwargs)
class SigmoidOp(ValueOp):
"""
Computes the sigmoid of x and handles autodiff for sigmoid.
Arguments:
x: The tensor argument.
kwargs: Other construction arguments.
Parameters:
x: The tensor argument.
"""
def __init__(self, x, **kwargs):
super(SigmoidOp, self).__init__(**kwargs)
self.x = x
self.value_tensor = reciprocal(exp(-x) + 1)
self.value_tensor.deriv_handler = self
def generate_adjoints(self, adjoints, delta):
self.x.generate_add_delta(adjoints, delta * self.value_tensor * (1.0 - self.value_tensor))
class SigmoidAtomicOp(UnaryElementWiseOp):
"""
Computes the sigmoid of x and handles autodiff for sigmoid.
Arguments:
x: The tensor argument.
kwargs: Other construction arguments.
Parameters:
x: The tensor argument.
"""
def __init__(self, x, **kwargs):
super(SigmoidAtomicOp, self).__init__(x, **kwargs)
self.x = x
self.deriv_handler = self
def generate_adjoints(self, adjoints, delta):
self.x.generate_add_delta(adjoints, delta * self * (1.0 - self))
def sigmoid(x):
"""
Computes the sigmoid of x.
Args:
x:
Returns:
The sigmoid computation.
"""
return SigmoidOp(x).value_tensor
def sigmoidAtomic(x):
"""
Computes the sigmoid of x.
Args:
| |
<filename>redpandas/redpd_plot/mesh.py
"""
Plot TFR
"""
import datetime as dt
from typing import List, Union
import matplotlib.pyplot as plt
from matplotlib.colorbar import Colorbar
from matplotlib.figure import Figure
import matplotlib.ticker as mticker
import numpy as np
import pandas as pd
from libquantum.plot_templates import plot_time_frequency_reps as pnl
import redpandas.redpd_scales as rpd_scales
from redpandas.redpd_plot.parameters import FigureParameters as FigParam
def sci_format(x,lim):
"""
Ticks formatter scientific notation with base 10. Adapted from https://www.py4u.net/discuss/140199
"""
a, b = '{:.0e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
def find_wiggle_num_tfr(df: pd.DataFrame,
mesh_tfr_label: Union[str, List[str]]) -> int:
"""
Find number of signals to plot
:param df: input pandas data frame
:param mesh_tfr_label: string for the mesh tfr column name in df. List of strings for multiple columns
:return: int, number of signals
"""
# Determine overall number of mesh panels in fig
wiggle_num_list = [] # number of wiggles
for mesh_n in range(len(mesh_tfr_label)):
mesh_tfr_label_individual = mesh_tfr_label[mesh_n] # individual mesh label from list
for n in df.index:
# check column exists and not empty
if mesh_tfr_label_individual in df.columns and type(df[mesh_tfr_label_individual][n]) != float:
if df[mesh_tfr_label_individual][n].ndim == 2: # aka audio
wiggle_num_list.append(1) # append 1 wiggle cause it will only be one tfr panel
else:
# Check if barometer, cause then only 1 wiggle
if mesh_tfr_label_individual.find("pressure") == 0 or mesh_tfr_label_individual.find("bar") == 0:
wiggle_num_list.append(1)
else: # if not barometer, its is a 3c sensors aka gyroscope/accelerometer/magnetometer
wiggle_num_list.append(3)
else:
continue
wiggle_num = sum(wiggle_num_list) # total number of signal that will be displayed
return wiggle_num
def find_ylabel_tfr(df: pd.DataFrame,
mesh_tfr_label: Union[str, List[str]],
sig_id_label: Union[str, List[str]]) -> List:
"""
Find ylabels
:param df: input pandas data frame
:param mesh_tfr_label: string for the mesh tfr column name in df. List of strings for multiple columns
:param sig_id_label: string for column name with station ids in df. Alternatively, you can also provide a
list of strings with custom labels, for example: ["Audio", "Acc X", "Acc Y", "Acc Z"]. The number of custom labels
provided needs to match the number of stations and signals in df.
:return: list of strings with y labels
"""
if sig_id_label == "index" or (type(sig_id_label) == str and sig_id_label in df.columns):
wiggle_yticklabel = [] # name/y label of wiggles
for mesh_n in range(len(mesh_tfr_label)):
mesh_tfr_label_individual = mesh_tfr_label[mesh_n] # individual mesh label from list
for n in df.index:
# check column exists and not empty
if mesh_tfr_label_individual in df.columns and type(df[mesh_tfr_label_individual][n]) != float:
if df[mesh_tfr_label_individual][n].ndim == 2: # aka audio
# Establish ylabel for wiggle
if sig_id_label == "index": # if ylabel for wiggle is index station
wiggle_yticklabel.append(df.index[n])
else:
wiggle_yticklabel.append(df[sig_id_label][n]) # if ylabel for wiggles is custom list
else:
for index_dimension, _ in enumerate(df[mesh_tfr_label_individual][n]):
# Establish ylabel for wiggle
if sig_id_label == "index": # if ylabel for wiggle is index station
wiggle_yticklabel.append(df.index[n])
else:
wiggle_yticklabel.append(df[sig_id_label][n]) # if ylabel for wiggles is custom list
else:
continue
else:
wiggle_yticklabel = sig_id_label
return wiggle_yticklabel
def find_x_max_min_lim(df: pd.DataFrame,
wiggle_num: int,
mesh_tfr_label: Union[str, List[str]],
mesh_time_label: Union[str, List[str]]):
"""
Find max/min x-axis limit
:param df: input pandas data frame
:param wiggle_num: int, number of signals that will be plotted
:param mesh_tfr_label: string for the mesh tfr column name in df. List of strings for multiple columns
:param mesh_time_label: string for the mesh time column name in df. List of strings for multiple columns
:return: values for x max and x min
"""
# loop to find xlim and tfr global max/min
x_lim_min = np.empty(wiggle_num)
x_lim_max = np.empty(wiggle_num)
index_wiggle_num_total = 0 # index to keep track of which wiggle
for mesh_n in range(len(mesh_tfr_label)):
for index_element in df.index:
mesh_tfr_label_individual = mesh_tfr_label[mesh_n] # individual mesh label from list
mesh_time_label_individual = mesh_time_label[mesh_n] # individual mesh label from list
# check column exists and not empty
if mesh_tfr_label_individual in df.columns and type(df[mesh_tfr_label_individual][index_element]) != float:
if df[mesh_tfr_label_individual][index_element].ndim == 2: # aka audio
# Extract max/min x limit for each wiggle that will be plotted
x_lim_min[index_wiggle_num_total] = np.min(df[mesh_time_label_individual][index_element])
x_lim_max[index_wiggle_num_total] = np.max(df[mesh_time_label_individual][index_element])
index_wiggle_num_total += 1
else:
for index_dimension, _ in enumerate(df[mesh_tfr_label_individual][index_element]):
# Extract max/min x limit for each wiggle that will be plotted
x_lim_min[index_wiggle_num_total] = np.min(df[mesh_time_label_individual][index_element][index_dimension])
x_lim_max[index_wiggle_num_total] = np.max(df[mesh_time_label_individual][index_element][index_dimension])
index_wiggle_num_total += 1
else:
continue
# Determine global min/max x limits
x_lim_min_total = np.min(x_lim_min)
x_lim_max_total = np.max(x_lim_max)
return x_lim_max_total, x_lim_min_total
def find_tfr_max_min_lim(df: pd.DataFrame,
wiggle_num: int,
mesh_tfr_label: Union[str, List[str]]):
"""
Find max/min limits TFR bits
:param df: input pandas data frame
:param wiggle_num: int, number of signals that will be plotted
:param mesh_tfr_label: string for the mesh tfr column name in df. List of strings for multiple columns
:return: values for tfr max and tfr min
"""
# loop to find xlim and tfr global max/min
tfr_min = np.empty(wiggle_num)
tfr_max = np.empty(wiggle_num)
index_wiggle_num_total = 0 # index to keep track of which wiggle
# for mesh_n in range(len(mesh_tfr_label)):
for index_element in df.index:
for mesh_n in range(len(mesh_tfr_label)):
mesh_tfr_label_individual = mesh_tfr_label[mesh_n] # individual mesh label from list
# check column exists and not empty
if mesh_tfr_label_individual in df.columns and type(df[mesh_tfr_label_individual][index_element]) != float:
if df[mesh_tfr_label_individual][index_element].ndim == 2: # aka audio
# Extract max/min mesh tfr value for each wiggle that will be plotted
tfr_min[index_wiggle_num_total] = np.min(df[mesh_tfr_label_individual][index_element])
tfr_max[index_wiggle_num_total] = np.max(df[mesh_tfr_label_individual][index_element])
index_wiggle_num_total += 1
else:
for index_dimension, _ in enumerate(df[mesh_tfr_label_individual][index_element]):
# Extract max/min mesh tfr value for each wiggle that will be plotted
tfr_min[index_wiggle_num_total] = np.min(df[mesh_tfr_label_individual][index_element][index_dimension])
tfr_max[index_wiggle_num_total] = np.max(df[mesh_tfr_label_individual][index_element][index_dimension])
index_wiggle_num_total += 1
else:
continue
# global min/max limits tfr
# tfr_min_total = np.min(tfr_min)
tfr_max_total = np.max(tfr_max) - 3
tfr_min_total = tfr_max_total - 18
return tfr_max_total, tfr_min_total
def find_mesh_color_and_scaling(wiggle_num: int,
mesh_color_scaling: Union[List[str], str] = 'auto',
mesh_color_range: Union[List[float], float] = 15.0):
"""
Find number of mesh color and scaling values if input is only int and string. Coded to avoid retyping same values
over and over in cases where stations have only one type of sensor.
:param wiggle_num: number of wiggles in df
:param mesh_color_scaling: optional, colorbar scaling, "auto" or "range". Default is 'auto'. The parameter common_colorbar
needs to be set to False to apply mesh_color_scaling
:param mesh_color_range: optional, range of colorbar. Default is 15. The parameter common_colorbar needs to be set
to False and mesh_color_scaling to "range "to apply mesh_color_range
:return:
"""
if (isinstance(mesh_color_range, float) is True or isinstance(mesh_color_range, int) is True) and isinstance(mesh_color_scaling, str):
list_mesh_color_range = [mesh_color_range] * wiggle_num
list_mesh_color_scaling = [mesh_color_scaling] * wiggle_num
return list_mesh_color_scaling, list_mesh_color_range
else:
return mesh_color_scaling, mesh_color_range
def plot_mesh_pandas(df: pd.DataFrame,
mesh_time_label: Union[str, List[str]],
mesh_frequency_label: Union[str, List[str]],
mesh_tfr_label: Union[str, List[str]],
sig_id_label: Union[str, List[str]],
t0_sig_epoch_s: float = None,
fig_title_show: bool = True,
fig_title: str = "STFT",
frequency_scaling: str = "log",
frequency_hz_ymin: float = rpd_scales.Slice.FU,
frequency_hz_ymax: float = rpd_scales.Slice.F0,
common_colorbar: bool = True,
ytick_values_show: bool = False,
mesh_color_scaling: Union[List[str], str] = 'auto',
mesh_color_range: Union[List[float], float] = 15.0,
show_figure: bool = True) -> Figure:
"""
Plots spectrogram for all signals in df
:param df: input pandas data frame. REQUIRED
:param mesh_time_label: string for the mesh time column name in df. List of strings for multiple columns. REQUIRED
:param mesh_frequency_label: string for the mesh frequency column name in df. List of strings for multiple columns. REQUIRED
:param mesh_tfr_label: string for the mesh tfr column name in df. List of strings for multiple columns. REQUIRED
:param sig_id_label: string for column name with station ids in df. REQUIRED. Alternatively, you can also provide a
list of strings with custom labels, for example: ["Audio", "Acc X", "Acc Y", "Acc Z"]. The number of custom labels
provided needs to match the number of stations and signals in df.
:param t0_sig_epoch_s: optional float, epoch time in seconds of first timestamp. Default is None
:param fig_title_show: optional bool, include a title in the figure if True. Default is True
:param fig_title: optional string, figure title label. Default is "STFT"
:param frequency_scaling: optional string, determine frequency scaling "log" or "lin". Default is "log"
:param frequency_hz_ymin: optional float, y axis min lim
:param frequency_hz_ymax: optional float, y axis max lim
:param common_colorbar: optional bool, display a colorbar for all mesh panels if True. Default is True
:param ytick_values_show: optional bool, display ytick values. Default is False
:param mesh_color_scaling: optional, colorbar scaling, "auto" or "range". Default is 'auto'. The parameter | |
self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'spatial_kernel_initializer': initializers.serialize(self.spatial_kernel_initializer),
'temporal_kernel_initializer': initializers.serialize(self.temporal_kernel_initializer),
'temporal_frequencies_initializer': initializers.serialize(self.temporal_frequencies_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'spatial_kernel_regularizer': regularizers.serialize(self.spatial_kernel_regularizer),
'temporal_kernel_regularizer': regularizers.serialize(self.temporal_kernel_regularizer),
'temporal_frequencies_regularizer': regularizers.serialize(self.temporal_frequencies_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'spatial_kernel_constraint': constraints.serialize(self.spatial_kernel_constraint),
'temporal_kernel_constraint': constraints.serialize(self.temporal_kernel_constraint),
'temporal_frequencies_constraint': constraints.serialize(self.temporal_frequencies_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Convolution2DEnergy_TemporalBasis, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# separate temporal freqs per channel
class Convolution2DEnergy_TemporalBasis2(Layer):
"""Convolution operator for filtering windows of time varying
two-dimensional inputs, such as a series of movie frames, with
learned filters inspired by simple-cell and complex-cell V1 neurons.
Filters are learned in a factorized representation, consisting of
orthogonal 2D filters, a set of vectors that control filter amplitude
over time, and a set of scalars that control the trade-off of the
orthogonal 2D filters over time. This representation can create a large
number of 3D spatio-temporal filters from a small number of parameters,
often with less than 1% of the parameters of a naive 3D convolutional
model. When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(12, 3, 64, 64)` for 12 64x64 RGB pictures.
# Examples
```python
# apply a 5x5 convolution with 8 simple-cell filters and
16 complex-cell filters with 4 amplitude profiles and 7
temporal frequencies on a 256x256 image:
model = Sequential()
model.add(Convolution2D(8, 16, 5, 5, 4, 7,
padding='same',
input_shape=(3, 256, 256)))
# now model.output_shape == (None, 4*7, 8+16, 256, 256)
```
# Arguments
filters_simple: Number of simple-cell filters to use.
filters_complex: Number of complex-cell filters to use.
spatial_kernel_size: Tuple containing number of rows and columns in the convolution kernel.
filters_temporal: Number of temporal amplitude filters
temporal_frequencies: Number of temporal frequencies (odd number)
temporal_frequencies_initial_max: Maximum temporal frequency, temporal frequencies initialized
as (-tf_max..., 0, ..., tf_max)
spatial_kernel_initializer: name of initialization function for the spatial kernel weights
(see [initializers](../initializers.md))
temporal_kernel_initializer: name of initialization function for the temporal kernel weights
(see [initializers](../initializers.md))
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
padding: 'valid', 'same' or 'full'
('full' requires the Theano backend).
strides: tuple of length 2. Factor by which to strides output.
Also called strides elsewhere.
spatial_kernel_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the spatial kernel weights matrix.
temporal_kernel_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L2 or Laplacian regularization), applied to the temporal amplitude
weights matrix.
bias_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
spatial_kernel_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
temporal_kernel_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
bias_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
use_bias: whether to include a bias
(i.e. make the layer affine rather than linear).
# Input shape
5D tensor with shape:
`(samples, time_steps, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, time_steps, rows, cols, channels)` if data_format='channels_last'.
# Output shape
5D tensor with shape:
`(samples, nb_temporal_filter, nb_filter, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, nb_temporal_filter, new_rows, new_cols, nb_filter)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self, filters_simple,
filters_complex,
filters_temporal,
spatial_kernel_size,
temporal_frequencies,
spatial_kernel_initializer='glorot_uniform',
temporal_kernel_initializer='glorot_uniform',
temporal_frequencies_initializer=step_init,
temporal_frequencies_initial_max=2,
temporal_frequencies_scaling=10,
bias_initializer='zeros',
activation='relu',
padding='valid',
strides=(1, 1),
dilation_rate=(1, 1),
data_format=K.image_data_format(),
spatial_kernel_regularizer=None,
temporal_kernel_regularizer=None,
temporal_frequencies_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
spatial_kernel_constraint=None,
temporal_kernel_constraint=None,
temporal_frequencies_constraint=None,
bias_constraint=None,
use_bias=True, **kwargs):
self.filters_simple = filters_simple
self.filters_complex = filters_complex
self.filters_temporal = filters_temporal
self.spatial_kernel_size = spatial_kernel_size
self.temporal_frequencies = temporal_frequencies
self.temporal_frequencies_initial_max = np.float32(temporal_frequencies_initial_max)
self.temporal_frequencies_scaling = np.float32(temporal_frequencies_scaling)
self.spatial_kernel_initializer = initializers.get(spatial_kernel_initializer)
self.temporal_kernel_initializer = initializers.get(temporal_kernel_initializer)
self.temporal_frequencies_initializer = initializers.get(temporal_frequencies_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.activation = activations.get(activation)
assert padding in {'valid', 'same'}, 'padding must be in {valid, same}'
self.padding = padding
self.strides = strides
self.dilation_rate = dilation_rate
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_first, channels_last}'
self.data_format = data_format
self.spatial_kernel_regularizer = regularizers.get(spatial_kernel_regularizer)
self.temporal_kernel_regularizer = regularizers.get(temporal_kernel_regularizer)
self.temporal_frequencies_regularizer = regularizers.get(temporal_frequencies_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.spatial_kernel_constraint = constraints.UnitNormOrthogonal(self.filters_complex + self.filters_simple)
self.temporal_kernel_constraint = constraints.get(temporal_kernel_constraint)
self.temporal_frequencies_constraint = constraints.get(temporal_frequencies_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.use_bias = use_bias
self.input_spec = [InputSpec(ndim=5)]
super(Convolution2DEnergy_TemporalBasis2, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 5
if self.data_format == 'channels_first':
channel_axis = 2
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
self.delays = input_shape[1]
input_dim = input_shape[channel_axis]
spatial_kernel_shape = self.spatial_kernel_size + (input_dim, 2*self.filters_complex + 2*self.filters_simple)
self.spatial_kernel = self.add_weight(spatial_kernel_shape,
initializer=self.spatial_kernel_initializer,
name='spatial_kernel',
regularizer=self.spatial_kernel_regularizer,
constraint=self.spatial_kernel_constraint)
self.temporal_kernel = self.add_weight((self.delays, self.filters_temporal),
initializer=self.temporal_kernel_initializer,
name='temporal_kernel',
regularizer=self.temporal_kernel_regularizer,
constraint=self.temporal_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.filters_complex + self.filters_simple,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.temporal_freqs = self.add_weight((self.temporal_frequencies, self.temporal_frequencies_initial_max/self.temporal_frequencies_scaling, self.filters_complex + self.filters_simple),
initializer=step_init2,
name='temporal_frequencies',
regularizer=self.temporal_frequencies_regularizer,
constraint=self.temporal_frequencies_constraint)
self.delays_pi = K.variable(2 * np.pi * np.arange(0, 1 + 1. / (self.delays - 1), 1. / (self.delays - 1))[:self.delays], name='delays_pi')
# Set input spec.
self.input_spec = InputSpec(ndim=5,
axes={channel_axis: input_dim})
self.built = True
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
conv_dim2 = input_shape[3]
conv_dim3 = input_shape[4]
elif self.data_format == 'channels_last':
conv_dim2 = input_shape[2]
conv_dim3 = input_shape[3]
else:
raise Exception('Invalid data_format: ' + self.data_format)
conv_dim2 = conv_utils.conv_output_length(conv_dim2,
self.spatial_kernel_size[0],
padding=self.padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
conv_dim3 = conv_utils.conv_output_length(conv_dim3,
self.spatial_kernel_size[1],
padding=self.padding,
stride=self.strides[1],
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters_temporal*self.temporal_frequencies, (self.filters_complex + self.filters_simple), conv_dim2, conv_dim3)
elif self.data_format == 'channels_last':
return (input_shape[0], self.filters_temporal*self.temporal_frequencies, conv_dim2, conv_dim3, (self.filters_complex + self.filters_simple))
else:
raise Exception('Invalid data_format: ' + self.data_format)
def call(self, inputs):
xshape = K.shape(inputs)
inputs = K.reshape(inputs, (-1, xshape[2], xshape[3], xshape[4]))
sin_step = K.reshape(K.sin(self.delays_pi[:, None, None, None]*self.temporal_freqs[None, :, :, None]*self.temporal_frequencies_scaling)*self.temporal_kernel[:, None, None, :],
(-1, self.filters_complex + self.filters_simple, self.filters_temporal*self.temporal_frequencies))
cos_step = K.reshape(K.cos(self.delays_pi[:, None, None, None]*self.temporal_freqs[None, :, :, None]*self.temporal_frequencies_scaling)*self.temporal_kernel[:, None, None, :],
(-1, self.filters_complex + self.filters_simple, self.filters_temporal*self.temporal_frequencies))
# delays x stack x tf*ampl
w0t = K.concatenate((cos_step, -sin_step), axis=0)
w1t = K.concatenate((sin_step, cos_step), axis=0)
conv_out1 = K.conv2d(
inputs,
self.spatial_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
conv_out1_shape = K.shape(conv_out1)
conv_out1 = K.reshape(conv_out1, (-1, self.delays, conv_out1_shape[1], conv_out1_shape[2], conv_out1_shape[3]))
if self.data_format == 'channels_first':
# samps x delays x stack x X x Y
conv_out1 = K.permute_dimensions(conv_out1, (0, 3, 4, 1, 2))
# samps x X x Y x delays x stack
elif self.data_format == 'channels_last':
# samps x delays x X x Y x stack
conv_out1 = K.permute_dimensions(conv_out1, (0, 2, 3, 1, 4))
# samps x X x Y x delays x stack
else:
raise Exception('Invalid data_format: ' + self.data_format)
# split out complex and simple filter pairs
conv_out12 = K.concatenate([conv_out1[:, :, :, :, :self.filters_complex], conv_out1[:, :, :, :, self.filters_complex+self.filters_simple:2*self.filters_complex+self.filters_simple]], axis=3)
conv_out34 = K.concatenate([conv_out1[:, :, :, :, self.filters_complex:self.filters_complex + self.filters_simple], conv_out1[:, :, :, :, 2*self.filters_complex + self.filters_simple:]], axis=3)
# apply temporal trade-off to get temporal filter outputs and compute complex and simple outputs
conv_out = K.sqrt(K.square(K.sum(conv_out12[..., None] * w0t[None, None, None, :, :self.filters_complex, :], axis=3)) + K.square(K.sum(conv_out12[..., None] * w1t[None, None, None, :, :self.filters_complex, :], axis=3)) + K.epsilon())
conv_outlin = K.sum(conv_out34[..., None] * w0t[None, None, None, :, self.filters_complex:, :], axis=3)
# samps x X x Y x stack x temporal_filters*temporal_frequencies
output = K.concatenate([conv_out, conv_outlin], axis=3)
if self.data_format == 'channels_first':
output = K.permute_dimensions(output, (0, 4, 3, 1, 2))
# samps x temporal_filters*temporal_frequencies x stack x X x Y
elif self.data_format == 'channels_last':
output = K.permute_dimensions(output, (0, 4, 1, 2, 3))
# samps x temporal_filters*temporal_frequencies x X x Y x stack
if self.bias:
if self.data_format == 'channels_first':
output += K.reshape(self.bias, (1, 1, self.filters_complex + self.filters_simple, 1, 1))
elif self.data_format == 'channels_last':
output += K.reshape(self.bias, (1, 1, 1, 1, self.filters_complex + self.filters_simple))
output = self.activation(output)
return output
def get_config(self):
config = {'filters_simple': self.filters_simple,
'filters_complex': self.filters_complex,
'filters_temporal': self.filters_temporal,
'spatial_kernel_size': self.spatial_kernel_size,
'temporal_frequencies': self.temporal_frequencies,
'temporal_frequencies_initial_max': self.temporal_frequencies_initial_max,
'temporal_frequencies_scaling': self.temporal_frequencies_scaling,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'spatial_kernel_initializer': initializers.serialize(self.spatial_kernel_initializer),
'temporal_kernel_initializer': initializers.serialize(self.temporal_kernel_initializer),
| |
CoefsPow, bird):
""" Perform the linear correlation function matrix multiplications """
bird.C11 = np.real(np.einsum('ns,ln->ls', CoefsPow, self.Mcf11))
def makeCct(self, CoefsPow, bird):
""" Perform the counterterm correlation function matrix multiplications """
bird.Cct = self.co.s**-2 * np.real(np.einsum('ns,ln->ls', CoefsPow, self.Mcfct))
def makeC22l(self, CoefsPow, bird):
""" Perform the 22-loop correlation function matrix multiplications """
bird.C22l = np.real(np.einsum('ns,ms,blnm->lbs', CoefsPow, CoefsPow, self.Mcf22, optimize=self.optipathC22l))
def makeC13l(self, CoefsPow, bird):
""" Perform the 13-loop correlation function matrix multiplications """
bird.C13l = np.real(np.einsum('ns,ms,blnm->lbs', CoefsPow, CoefsPow, self.Mcf13, optimize=self.optipathC13l))
def Coef(self, bird, window=None):
""" Perform the FFTLog (i.e. calculate the coefficients of the FFTLog) of the input linear power spectrum in the given a Bird().
Parameters
----------
bird : class
an object of type Bird()
"""
return self.fft.Coef(bird.kin, bird.Pin, window=window)
def Ps(self, bird, window=None):
""" Compute the loop power spectrum given a Bird(). Perform the FFTLog and the matrix multiplications.
Parameters
----------
bird : class
an object of type Bird()
"""
coef = self.Coef(bird, window=.2)
coefkPow = self.CoefkPow(coef)
self.makeP22(coefkPow, bird)
self.makeP13(coefkPow, bird)
def Cf(self, bird, window=None):
""" Compute the loop correlation function given a Bird(). Perform the FFTLog and the matrix multiplications.
Parameters
----------
bird : class
an object of type Bird()
"""
coef = self.Coef(bird, window=.2)
coefsPow = self.CoefsPow(coef)
self.makeC11(coefsPow, bird)
self.makeCct(coefsPow, bird)
self.makeC22l(coefsPow, bird)
self.makeC13l(coefsPow, bird)
if bird.with_nlo_bias: self.makeCnlo(coefsPow, bird)
def PsCf(self, bird, window=None):
""" Compute the loop power spectrum and correlation function given a Bird(). Perform the FFTLog and the matrix multiplications.
Parameters
----------
bird : class
an object of type Bird()
"""
coef = self.Coef(bird, window=.2)
coefkPow = self.CoefkPow(coef)
self.makeP22(coefkPow, bird)
self.makeP13(coefkPow, bird)
coefsPow = self.CoefsPow(coef)
self.makeC11(coefsPow, bird)
self.makeCct(coefsPow, bird)
self.makeC22l(coefsPow, bird)
self.makeC13l(coefsPow, bird)
if bird.with_nlo_bias: self.makeCnlo(coefsPow, bird)
# def setMang11(self):
# """ Compute the linear 'angular' correlation function matrices. Called at the instantiation of the class if the matrices are not loaded. """
# self.Mang11 = np.empty(shape=(self.co.Ng, self.Afft.Pow.shape[0]), dtype='complex')
# for l in range(self.co.Ng):
# for u, n1 in enumerate(-0.5 * self.Afft.Pow):
# self.Mang11[l, u] = (2*pi)**.5 * MPC(2 * l - 0.5, n1)
# def setMang(self):
# """ Compute the power spectrum to 'angular' correlation function spherical Bessel transform matrices. Called at the instantiation of the class if the matrices are not loaded. """
# self.Mang = np.empty(shape=(self.co.Ng, self.Afft.Pow.shape[0], self.Afft.Pow.shape[0]), dtype='complex')
# for l in range(self.co.Ng):
# for u, n1 in enumerate(-0.5 * self.Afft.Pow):
# for v, n2 in enumerate(-0.5 * self.Afft.Pow):
# self.Mang[l, u, v] = (2*pi)**.5 * MPC(2 * l - 0.5, n1 + n2 - 1.5)
# def setMang22(self):
# """ Compute the 22-loop 'angular' correlation function matrices. Called at the instantiation of the class if the matrices are not loaded. """
# self.Mang22 = np.einsum('lnm,bnm->blnm', self.Mang, self.M22)
# def setMang13(self):
# """ Compute the 13-loop 'angular' correlation function matrices. Called at the instantiation of the class if the matrices are not loaded. """
# self.Mang13 = np.einsum('lnm,bn->blnm', self.Mang, self.M13)
# def setMangct(self):
# """ Compute the counterterm 'angular' correlation function matrices. Called at the instantiation of the class if the matrices are not loaded. """
# self.Mangct = np.empty(shape=(self.co.Ng, self.Afft.Pow.shape[0]), dtype='complex')
# for l in range(self.co.Ng):
# for u, n1 in enumerate(-0.5 * self.Afft.Pow - 1.):
# self.Mangct[l, u] = (2*pi)**.5 * MPC(2 * l - 0.5, n1)
# def setrPow(self):
# """ Compute the r's to the powers of the FFTLog to evaluate the loop 'angular' correlation function. Called at the instantiation of the class. """
# self.rPow = self.r**0.5 * exp(np.einsum('n,s->ns', -self.Afft.Pow - 3., log(self.co.r)))
# def CoefrPow(self, Coef):
# """ Multiply the coefficients with the r's to the powers of the FFTLog to evaluate the 'angular' correlation function. """
# return np.einsum('n,ns->ns', Coef, self.rPow)
# def AngCoef(self, bird, window=None):
# return self.fft.Coef(bird.kin, bird.kin**-0.5 * bird.Pin, window=window)
# def makeA11(self, CoefrPow, bird):
# """ Perform the linear 'angular' correlation function matrix multiplications """
# bird.A11 = np.real(np.einsum('ns,ln->ls', CoefrPow, self.Mang11))
# def makeAct(self, CoefrPow, bird):
# """ Perform the counterterm 'angular' correlation function matrix multiplications """
# bird.Act = self.co.r**-2 * np.real(np.einsum('ns,ln->ls', CoefrPow, self.Mangct))
# def makeA22l(self, CoefrPow, bird):
# """ Perform the 22-loop 'angular' correlation function matrix multiplications """
# bird.A22l = np.real(np.einsum('ns,ms,blnm->lbs', CoefrPow, CoefrPow, self.Mang22, optimize=self.optipathA22l))
# def makeA13l(self, CoefrPow, bird):
# """ Perform the 13-loop 'angular' correlation function matrix multiplications """
# bird.A13l = np.real(np.einsum('ns,ms,blnm->lbs', CoefrPow, CoefrPow, self.Mang13, optimize=self.optipathA13l))
# def Ang(self, bird, window=None):
# coef = self.AngCoef(bird, window=.2)
# coefrPow = self.CoefrPow(coef)
# self.makeA11(coefrPow, bird)
# self.makeAct(coefrPow, bird)
# self.makeA22l(coefrPow, bird)
# self.makeA13l(coefrPow, bird)
def M13a(n1):
""" Common part of the 13-loop matrices """
return np.tan(n1 * pi) / (14. * (-3 + n1) * (-2 + n1) * (-1 + n1) * n1 * pi)
def M22a(n1, n2):
""" Common part of the 22-loop matrices """
return (gamma(1.5 - n1) * gamma(1.5 - n2) * gamma(-1.5 + n1 + n2)) / (8. * pi**1.5 * gamma(n1) * gamma(3 - n1 - n2) * gamma(n2))
# specific part of the 13-loop matrices
M13b = {
0: lambda n1: 1.125,
1: lambda n1: -(1 / (1 + n1)),
2: lambda n1: 2.25,
3: lambda n1: (3 * (-1 + 3 * n1)) / (4. * (1 + n1)),
4: lambda n1: -(1 / (1 + n1)),
5: lambda n1: -9 / (4 + 4 * n1),
6: lambda n1: (9 + 18 * n1) / (4 + 4 * n1),
7: lambda n1: (3 * (-5 + 3 * n1)) / (8. * (1 + n1)),
8: lambda n1: -9 / (4 + 4 * n1),
9: lambda n1: (9 * n1) / (4. + 4 * n1),
}
# specific part of the 22-loop matrices
M22b = {
0: lambda n1, n2: (6 + n1**4 * (4 - 24 * n2) - 7 * n2 + 8 * n1**5 * n2 - 13 * n2**2 + 4 * n2**3 + 4 * n2**4 + n1**2 * (-13 + 38 * n2 + 12 * n2**2 - 8 * n2**3) + 2 * n1**3 * (2 - 5 * n2 - 4 * n2**2 + 8 * n2**3) + n1 * (-7 - 6 * n2 + 38 * n2**2 - 10 * n2**3 - 24 * n2**4 + 8 * n2**5)) / (4. * n1 * (1 + n1) * (-1 + 2 * n1) * n2 * (1 + n2) * (-1 + 2 * n2)),
1: lambda n1, n2: (-18 + n1**2 * (1 - 11 * n2) - 12 * n2 + n2**2 + 10 * n2**3 + 2 * n1**3 * (5 + 7 * n2) + n1 * (-12 - 38 * n2 - 11 * n2**2 + 14 * n2**3)) / (7. * n1 * (1 + n1) * n2 * (1 + n2)),
2: lambda n1, n2: (-3 * n1 + 2 * n1**2 + n2 * (-3 + 2 * n2)) / (n1 * n2),
3: lambda n1, n2: (-4 * (-24 + n2 + 10 * n2**2) + 2 * n1 * (-2 + 51 * n2 + 21 * n2**2) + n1**2 * (-40 + 42 * n2 + 98 * n2**2)) / (49. * n1 * (1 + n1) * n2 * (1 + n2)),
4: lambda n1, n2: (4 * (3 - 2 * n2 + n1 * (-2 + 7 * n2))) / (7. * n1 * n2),
5: lambda n1, n2: 2,
6: lambda n1, n2: ((-3 + 2 * n1 + 2 * n2) * (-2 + 3 * n2 + 4 * n1**4 * n2 + 3 * n2**2 - 2 * n2**3 + n1**3 * (-2 - 2 * n2 + 4 * n2**2) + n1**2 * (3 - 10 * n2 - 4 * n2**2 + 4 * n2**3) + n1 * (3 + 2 * n2 - 10 * n2**2 - 2 * n2**3 | |
policyType, name):
"""
The MonitorPolicy is initialized with simply a policy type and a name.
There are two policy types: 'fabric' and 'access'. The 'fabric'
monitoring policies can be applied to certain MonitorTarget types and
'access' monitoring policies can be applied to other MonitorTarget
types. Initially however, both policies can have l1PhysIf as targets.
A name must be specified because it is used to build the distinguising
name (dn) along with the policyType in the APIC. The dn for "fabric"
policies will be /uni/fabric/monfabric-[name] and for "access" policies
it will be /uni/infra/moninfra-[name] in the APIC.
:param policyType: String specifying whether this is a fabric or\
access policy
:param name: String specifying a name for the policy.
"""
policyTypeEnum = ['fabric', 'access']
if policyType not in policyTypeEnum:
raise ValueError('Policy Type must be one of:', policyTypeEnum)
self.name = name
self.policyType = policyType
self.descr = ''
self.collection_policy = {}
self.monitor_target = {}
# assume that it has not been written to APIC. This is cleared if the
# policy is just loaded from APIC or the policy is written to the APIC.
self.modified = True
@classmethod
def get(cls, session):
"""
get() will get all of the monitor policies from the APIC and return
them as a list. It will get both fabric and access (infra) policies
including default policies.
:param session: the instance of Session used for APIC communication
:returns: List of MonitorPolicy objects
"""
result = []
aciObjects = cls._getClass(session, 'monInfraPol')
for data in aciObjects:
name = str(data['monInfraPol']['attributes']['name'])
policyObject = MonitorPolicy('access', name)
policyObject.set_description(data['monInfraPol']['attributes']['descr'])
cls._getPolicy(policyObject, session,
data['monInfraPol']['attributes']['dn'])
result.append(policyObject)
aciObjects = cls._getClass(session, 'monFabricPol')
for data in aciObjects:
name = str(data['monFabricPol']['attributes']['name'])
policyObject = MonitorPolicy('fabric', name)
policyObject.set_description(data['monFabricPol']['attributes']['descr'])
cls._getPolicy(policyObject, session,
data['monFabricPol']['attributes']['dn'])
result.append(policyObject)
return result
@staticmethod
def _getClass(session, aciClass):
"""
Get the class from the APIC
:param session: Session object instance
:param aciClass: string containing classname
:return: JSON dictionary containing class instances
"""
prefix = '/api/node/class/'
suffix = '.json?query-target=self'
class_query_url = prefix + aciClass + suffix
ret = session.get(class_query_url)
data = ret.json()['imdata']
return data
@classmethod
def _getPolicy(cls, policyObject, session, dn):
"""
Get the policy
:param policyObject: policyObject
:param session: Session class instance
:param dn: string containing the distinguished name
:return: None
"""
children = cls._getChildren(session, dn)
for child in children:
if child[0] == 'statsHierColl':
granularity = str(child[1]['attributes']['granularity'])
adminState = str(child[1]['attributes']['adminState'])
retention = str(child[1]['attributes']['histRet'])
collPolicy = CollectionPolicy(policyObject, granularity,
retention, adminState)
collPolicy.set_name(child[1]['attributes']['name'])
collPolicy.set_description(child[1]['attributes']['descr'])
if child[0] in ['monFabricTarget', 'monInfraTarget']:
scope = str(child[1]['attributes']['scope'])
# initially only l1PhysIf is supported as a target
if scope == 'l1PhysIf':
target = MonitorTarget(policyObject, scope)
target.set_name(str(child[1]['attributes']['name']))
target.set_description(str(child[1]['attributes']['descr']))
dn = child[1]['attributes']['dn']
targetChildren = cls._getChildren(session, dn)
for targetChild in targetChildren:
if targetChild[0] == 'statsReportable':
scope = str(targetChild[1]['attributes']['scope'])
scope = MonitorStats.statsDictionary[scope]
statFamily = MonitorStats(target, scope)
child_attr = targetChild[1]['attributes']
statFamily.set_name(str(child_attr['name']))
statFamily.set_description(str(child_attr['name']))
dn = targetChild[1]['attributes']['dn']
statChildren = cls._getChildren(session, dn)
for statChild in statChildren:
if statChild[0] == 'statsColl':
child_stats = statChild[1]['attributes']
granularity = str(child_stats['granularity'])
adminState = str(child_stats['adminState'])
retention = str(child_stats['histRet'])
collPolicy = CollectionPolicy(statFamily,
granularity,
retention,
adminState)
collPolicy.set_name(child_stats['name'])
collPolicy.set_description(child_stats['descr'])
if targetChild[0] == 'statsHierColl':
child_attr = targetChild[1]['attributes']
granularity = str(child_attr['granularity'])
adminState = str(child_attr['adminState'])
retention = str(child_attr['histRet'])
collPolicy = CollectionPolicy(target,
granularity,
retention,
adminState)
collPolicy.set_name(child_attr['name'])
collPolicy.set_description(child_attr['descr'])
@classmethod
def _getChildren(cls, session, dn):
"""
Get the children
:param session: Session instance object
:param dn: string containing the distinguished name
:return: json dictionary containing the children objects
"""
result = []
mo_query_url = '/api/mo/' + dn + '.json?query-target=children'
ret = session.get(mo_query_url)
mo_data = ret.json()['imdata']
for node in mo_data:
for key in node:
result.append((key, node[key]))
return result
def __str__(self):
"""
Return print string.
"""
return self.policyType + ':' + self.name
def flat(self, target='l1PhysIf'):
"""
This method will return a data structure that is a flattened version
of the monitor policy. The flattened version is one that walks through
the heirarchy of the policy and determines the administrative state and
retention policy for each granularity of each statistics family.
This is done for the target specified, i.e. 'l1PhysIf'
For example, if 'foo' is a MonitorPolicy object, then
flatPol = foo.flat('l1PhysIf') will return a dictionary that looks like
the following:
adminState = flatPol['counter_family']['granularity'].adminState
retention = flatPol['counter_family']['granularity'].retention
The dictionary will have all of the counter families for all of the
granularities and the value returned is the administrative state and
retention value that is the final result of resolving the policy
hierarchy.
:param target: APIC target object. This will default to 'l1PhysIf'
:returns: Dictionary of statistic administrative state and retentions
indexed by counter family and granularity.
"""
class Policy(object):
"""
Policy class
"""
def __init__(self):
self.adminState = 'disabled'
self.retention = 'none'
result = {}
# initialize data structure
for statFamily in MonitorStats.statsFamilyEnum:
result[statFamily] = {}
for granularity in CollectionPolicy.granularityEnum:
result[statFamily][granularity] = Policy()
# walk through the policy heirarchy and over-ride each
# policy with the more specific one
for granularity in self.collection_policy:
retention = self.collection_policy[granularity].retention
adminState = self.collection_policy[granularity].adminState
for statFamily in MonitorStats.statsFamilyEnum:
result[statFamily][granularity].adminState = adminState
result[statFamily][granularity].retention = retention
# now go through monitor targets
targetPolicy = self.monitor_target[target]
for granularity in targetPolicy.collection_policy:
retention = targetPolicy.collection_policy[granularity].retention
adminState = targetPolicy.collection_policy[granularity].adminState
for statFamily in MonitorStats.statsFamilyEnum:
if adminState != 'inherited':
result[statFamily][granularity].adminState = adminState
if retention != 'inherited':
result[statFamily][granularity].retention = retention
target_stats = targetPolicy.monitor_stats
for statFamily in target_stats:
collection_pol = target_stats[statFamily].collection_policy
for granularity in collection_pol:
retention = collection_pol[granularity].retention
adminState = collection_pol[granularity].adminState
if adminState != 'inherited':
result[statFamily][granularity].adminState = adminState
if retention != 'inherited':
result[statFamily][granularity].retention = retention
# if the lesser granularity is disabled, then the larger granularity
# is as well
for statFamily in MonitorStats.statsFamilyEnum:
disable_found = False
for granularity in CollectionPolicy.granularityEnum:
if result[statFamily][granularity].adminState == 'disabled':
disable_found = True
if disable_found:
result[statFamily][granularity].adminState = 'disabled'
return result
class MonitorTarget(BaseMonitorClass):
"""
This class is a child of a MonitorPolicy object. It is used to specify a
scope for appling a monitoring policy. An example scope would be the
Interface class, meaning that the monitoring policies specified here will
apply to all Interface clas objects (l1PhysIf in the APIC) that use the
parent MonitoringPolicy as their monitoring policy.
Children of the MonitorTarget will be CollectionPolicy objects that define
the collection policy for the specified target plus optional MonitorStats
objects that allow finer grained control over specific families of
statistics such as ingress packets, ingrPkts.
The CollectionPolicy children are contained in a dictionary called
"collection_policy" that is indexed by the granularity of the
CollectionPolicy, e.g. '5min', '15min', etc.
The MonitorStats children are contained in a dictionary called
"monitor_stats" that is indexed by the name of the statistics family,
e.g. 'ingrBytes', 'ingrPkts', etc.
"""
def __init__(self, parent, target):
"""
The MonitorTarget object is initialized with a parent of type
MonitorPolicy, and a target string. Initially, this toolkit only
supports a target of type 'l1PhysIf'. The 'l1PhyIf' target is a layer
1 physical interface or "port". The MonitorTarget will narrow the
scope of the policy specified by the children of the MonitorTarget to
be only the target class.
:param parent: Parent object that this monitor target is a child.
It must be of type MonitorPolicy
:param target: String specifying the target class for the Monitor
policy.
"""
targetEnum = ['l1PhysIf']
if not type(parent) in [MonitorPolicy]:
raise TypeError(('Parent of MonitorTarget must be one of type'
' MonitorPolicy'))
if target not in targetEnum:
raise ValueError('target must be one of:', targetEnum)
self._parent = parent
self.scope = target
self.descr = ''
self.name = ''
self._parent.add_target(self)
self.collection_policy = {}
self.monitor_stats = {}
# assume that it has not been written to APIC.
# This is cleared if the policy is just loaded from APIC
# or the policy is written to the APIC.
self.modified = True
def __str__(self):
return self.scope
class MonitorStats(BaseMonitorClass):
"""
This class is a child of a MonitorTarget object. It is used to specify
a scope for applying a monitoring policy that is more fine grained than
the MonitorTarget. Specifically, the MonitorStats object specifies a
statistics family such as "ingress packets" or "egress bytes".
"""
statsDictionary = {'eqptEgrBytes': 'egrBytes',
'eqptEgrPkts': 'egrPkts',
'eqptEgrTotal': 'egrTotal',
'eqptEgrDropPkts': 'egrDropPkts',
'eqptIngrBytes': 'ingrBytes',
'eqptIngrPkts': 'ingrPkts',
'eqptIngrTotal': 'ingrTotal',
| |
<gh_stars>1-10
import matplotlib
matplotlib.use('PS')
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.2)
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.handlelength'] = 5
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
import pylab as P
import os
import numpy as N
from matplotlib import cm
#Sami's repository
import db.sqlite as sq
import astronomy.hess_plot as h
import astronomy.datamanipulation as dm
def plot_tmerge(query1, query2,
xlabel, ylabel,
output, out_folder,
pmin=0.05, pmax=1.0,
xbin1=15, ybin1=15,
xbin2=15, ybin2=15,
y1ticks=[0, .4, .8, 1.2, 1.6, 2.0, 2.5, 3],
y2ticks=[.4, .8, 1.2, 1.6, 2.0, 2.5],
xmin1=7.9, xmax1=11.7,
xmin2=7.9, xmax2=11.7,
ymin=0.0, ymax=3.0,
scatters=False, mean=False):
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query1)
xd1 = data[:, 0]
yd1 = data[:, 1]
#get data, S_250 > 5 mJy
data = sq.get_data_sqliteSMNfunctions(path, db, query2)
xd2 = data[:, 0]
yd2 = data[:, 1]
#the fraction of no mergers?
nm1 = len(yd1[yd1 < 0.0]) / float(len(yd1)) * 100.
nm2 = len(yd2[yd2 < 0.0]) / float(len(yd2)) * 100.
#print out some statistics
print len(yd2)
print 'Mean tmerge of all galaxies', N.mean(yd1[yd1 > 0.0])
print 'Mean tmerge of SPIRE detected galaxies', N.mean(yd2[yd2 > 0.0])
print
print 'Max tmerge of all galaxies', N.max(yd1[yd1 > 0.0])
print 'Max tmerge of SPIRE detected galaxies', N.max(yd2[yd2 > 0.0])
print
print 'Fraction of all galaxies that have experienced a merger', 100. - nm1
print 'Fraction of SPIRE that have experienced a merger', 100. - nm2
#calculate 2D probability fields
sd1, sdmin1, sdmax1 = h.hess_plot(xd1, yd1, N.ones(len(xd1)),
xmin1, xmax1, xbin1,
ymin, ymax, ybin1,
pmax=pmax, pmin=pmin)
sd2, sdmin2, sdmax2 = h.hess_plot(xd2, yd2, N.ones(len(xd2)),
xmin2, xmax2, xbin2,
ymin, ymax, ybin2,
pmax=pmax, pmin=pmin)
#make the figure
fig = P.figure()
fig.subplots_adjust(wspace=0.0, hspace=0.01, left=0.08, bottom=0.07,
right=0.97, top=0.93)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ims = ax1.imshow(sd1, vmin=sdmin1, vmax=sdmax1,
origin='lower', cmap=cm.gray,
interpolation=None,
extent=[xmin1, xmax1, ymin, ymax],
aspect='auto', alpha=1)
ims = ax2.imshow(sd2, vmin=sdmin2, vmax=sdmax2,
origin='lower', cmap=cm.gray,
interpolation=None,
extent=[xmin2, xmax2, ymin, ymax],
aspect='auto', alpha=1)
ax2.scatter(xd2, yd2, s=7, marker='o',
color='blue')
#percentiles
xbin_midd1, y50d1, y16d1, y84d1 = dm.percentile_bins(xd1,
yd1,
xmin1,
xmax1,
nxbins=xbin1)
md1 = (y50d1 >= 0) & (y16d1 >= 0) & (y84d1 >= 0)
xbin_midd2, y50d2, y16d2, y84d2 = dm.percentile_bins(xd2,
yd2,
xmin2,
xmax2,
nxbins=xbin2)
md2 = (y50d2 >= 0) | (y16d2 >= 0) | (y84d2 >= 0)
ax1.plot(xbin_midd1[md1], y50d1[md1], 'r-')
ax1.plot(xbin_midd1[md1], y16d1[md1], 'r--')
ax1.plot(xbin_midd1[md1], y84d1[md1], 'r--')
ax2.plot(xbin_midd2[md2], y50d2[md2], 'r-')
ax2.plot(xbin_midd2[md2], y16d2[md2], 'r--')
ax2.plot(xbin_midd2[md2], y84d2[md2], 'r--')
#add text
P.text(0.5, 0.93, 'All galaxies\n$2 \leq z < 4$',
horizontalalignment='center',
verticalalignment='center',
transform=ax1.transAxes)
P.text(0.5, 0.96, '$S_{250} > 5$ mJy',
horizontalalignment='center',
verticalalignment='center',
transform=ax2.transAxes)
#labels
ax1.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
#limits
ax1.set_ylim(ymin, ymax)
ax2.set_ylim(ymin, ymax)
ax1.set_xlim(xmin1, xmax1)
ax2.set_xlim(xmin2, xmax2)
#yticks
ax2.set_yticks([])
ax2.set_xticks(ax2.get_xticks()[1:])
ax1.set_yticks(y1ticks)
ax2.set_yticks(y2ticks)
#make grid
#ax1.grid()
#ax2.grid()
P.savefig(out_folder + output)
def plot_tmerge_bluest(query1, query2,
xlabel, ylabel,
output, out_folder,
pmin=0.05, pmax=1.0,
xbin1=15, ybin1=15,
xbin2=15, ybin2=15,
y1ticks=[0, .4, .8, 1.2, 1.6, 2.0],
y2ticks=[0.02, 0.04, 0.06, 0.08],
xmin1=7.9, xmax1=11.7,
xmin2=7.9, xmax2=11.7,
ymin1=0.0, ymax1=2.0,
ymin2=0.0, ymax2=0.1,
scatters=False, mean=False):
#get data, all galaxies
data = sq.get_data_sqliteSMNfunctions(path, db, query1)
xd1 = data[:, 0]
yd1 = data[:, 1]
#get data, S_250 > 5 mJy
data = sq.get_data_sqliteSMNfunctions(path, db, query2)
xd2 = data[:, 0]
yd2 = data[:, 1]
#the fraction of no mergers?
nm1 = len(yd1[yd1 < 0.0]) / float(len(yd1)) * 100.
nm2 = len(yd2[yd2 < 0.0]) / float(len(yd2)) * 100.
#print out some statistics
print len(yd2)
print 'Mean tmerge of all galaxies', N.mean(yd1[yd1 > 0.0])
print 'Mean tmerge of SPIRE detected galaxies', N.mean(yd2[yd2 > 0.0])
print
print 'Max tmerge of all galaxies', N.max(yd1[yd1 > 0.0])
print 'Max tmerge of SPIRE detected galaxies', N.max(yd2[yd2 > 0.0])
print
print 'Fraction of all galaxies that have experienced a merger', 100. - nm1
print 'Fraction of SPIRE that have experienced a merger', 100. - nm2
#calculate 2D probability fields
sd1, sdmin1, sdmax1 = h.hess_plot(xd1, yd1, N.ones(len(xd1)),
xmin1, xmax1, xbin1,
ymin1, ymax1, ybin1,
pmax=pmax, pmin=pmin)
sd2, sdmin2, sdmax2 = h.hess_plot(xd2, yd2, N.ones(len(xd2)),
xmin2, xmax2, xbin2,
ymin2, ymax2, ybin2,
pmax=pmax, pmin=pmin)
#make the figure
fig = P.figure()
fig.subplots_adjust(wspace=0.15, hspace=0.01, left=0.08, bottom=0.07,
right=0.97, top=0.93)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ims = ax1.imshow(sd1, vmin=sdmin1, vmax=sdmax1,
origin='lower', cmap=cm.gray,
interpolation=None,
extent=[xmin1, xmax1, ymin1, ymax1],
aspect='auto', alpha=1)
ims = ax2.imshow(sd2, vmin=sdmin2, vmax=sdmax2,
origin='lower', cmap=cm.gray,
interpolation=None,
extent=[xmin2, xmax2, ymin2, ymax2],
aspect='auto', alpha=1)
ax2.scatter(xd2, yd2, s=7, marker='o',
color='blue')
#percentiles
xbin_midd1, y50d1, y16d1, y84d1 = dm.percentile_bins(xd1,
yd1,
xmin1,
xmax1,
nxbins=xbin1)
md1 = (y50d1 >= -10) & (y16d1 >= -10) & (y84d1 >= -10)
xbin_midd2, y50d2, y16d2, y84d2 = dm.percentile_bins(xd2,
yd2,
-0.45,
0.4,
nxbins=8)
# nxbins = xbin2)
md2 = (y50d2 >= -10) | (y16d2 >= -10) | (y84d2 >= -10)
ax1.plot(xbin_midd1[md1], y50d1[md1], 'r-')
ax1.plot(xbin_midd1[md1], y16d1[md1], 'r--')
ax1.plot(xbin_midd1[md1], y84d1[md1], 'r--')
ax2.plot(xbin_midd2[md2], y50d2[md2], 'r-')
ax2.plot(xbin_midd2[md2], y16d2[md2], 'r--')
ax2.plot(xbin_midd2[md2], y84d2[md2], 'r--')
#add text
P.text(0.5, 0.93, 'All galaxies\n$2 \leq z < 4$',
horizontalalignment='center',
verticalalignment='center',
transform=ax1.transAxes)
P.text(0.5, 0.93, '$S_{250} > 5$ mJy\n$F775W - F850lp < 0.2$',
horizontalalignment='center',
verticalalignment='center',
transform=ax2.transAxes)
#labels
ax1.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
#limits
ax1.set_ylim(ymin1, ymax1)
ax2.set_ylim(ymin2, ymax2)
ax1.set_xlim(xmin1, xmax1)
ax2.set_xlim(xmin2, xmax2)
#yticks
ax2.set_xticks(ax2.get_xticks()[1:])
ax1.set_yticks(y1ticks)
ax2.set_yticks(y2ticks)
#make grid
ax1.grid()
ax2.grid()
P.savefig(out_folder + output)
if __name__ == '__main__':
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
#constants
#path = hm + '/Dropbox/Research/Herschel/runs/reds_zero_dust_evolve/'
path = hm + '/Research/Herschel/runs/big_volume/'
out_folder = hm + '/Dropbox/Research/Herschel/plots/mergers/big/'
db = 'sams.db'
print 'Begin plotting'
print 'Input DB: ', path + db
print 'Output folder: ', out_folder
# query1 = '''select galprop.mstar, galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 1e-18 and
# FIR.spire250_obs < 1e6
# '''
# query2 = '''select galprop.mstar, galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 5e-3 and
# FIR.spire250_obs < 1e6
# '''
# plot_tmerge(query1, query2, r'$\log(M_{\star}/M_{\odot})$',
# '$T_{\mathrm{merge}}$ \quad [Gyr]', 'TmergeStellarMass.ps',
# out_folder, xmin1 = 8.2, xmax1 = 11.8,
# xmin2 = 10.1, xmax2 = 11.65,
# pmin = 0.05,
# xbin1 = 12, ybin1 = 10,
# xbin2 = 10, ybin2 = 10)
#
###############################
query1 = '''select galprop.mcold - galprop.mstar, galprop.tmerge
from FIR, galprop where
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.spire250_obs > 1e-18 and
FIR.spire250_obs < 1e6 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id
'''
query2 = '''select galprop.mcold - galprop.mstar, galprop.tmerge
from FIR, galprop where
FIR.spire250_obs > 5e-3 and
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.spire250_obs < 1e6 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id
'''
xlab = r'$\log_{10} \left( \frac{M_{\mathrm{coldgas}}}{M_{\star}} \right )$'
plot_tmerge(query1, query2, xlab,
'$T_{\mathrm{merge}} \quad [\mathrm{Gyr}]$', 'TmergeMassFraction.ps',
out_folder,
xmin1=-6, xmax1=1.9,
xmin2=-0.9, xmax2=.8,
pmin=0.05,
xbin1=11, ybin1=10,
xbin2=9, ybin2=9)
###############################
# query1 = '''select galprop.mstardot, galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 1e-18 and
# FIR.spire250_obs < 1e6
# '''
# query2 = '''select galprop.mstardot, galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 5e-3 and
# FIR.spire250_obs < 1e6
# '''
#
# xlab = r'$\dot{M}_{\star}$ [$M_{\odot}$ Gyr$^{-1}$]'
# plot_tmerge(query1, query2, xlab,
# '$T_{\mathrm{merge}}$ \quad [Gyr]', 'TmergeSFR.ps',
# out_folder,
# xmin1 = 0., xmax1 = 1000.,
# xmin2 = 0., xmax2 = 1000.,
# pmin = 0.05,
# xbin1 = 10, ybin1 = 10,
# xbin2 = 9, ybin2 = 9)
#
###############################
# query1 = '''select SSFR(galprop.mstardot, galprop.mstar), galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 1e-18 and
# FIR.spire250_obs < 1e6
# '''
# query2 = '''select SSFR(galprop.mstardot, galprop.mstar), galprop.tmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id and
# FIR.spire250_obs > 5e-3 and
# FIR.spire250_obs < 1e6
# '''
#
# xlab = r'$\frac{\dot{M}_{\star}}{M_{\star}}$ [Gyr$^{-1}$]'
# plot_tmerge(query1, query2, xlab,
# '$T_{\mathrm{merge}}$ \quad [Gyr]', 'TmergeSSFR.ps',
# out_folder,
# xmin1 = -10., xmax1 = -7,
# xmin2 = -10, xmax2 = -7.5,
# pmin = 0.05,
# xbin1 = 10, ybin1 = 10,
# xbin2 = 9, ybin2 = 9)
#
############
# print '\nMoving to Major mergers...\n'
# query1 = '''select galprop.mstar, galprop.tmajmerge
# from FIR, galprop where
# FIR.z | |
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2012 <NAME>, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import *
import re
import types
pattern_color = re.compile(r'#[0-9a-fA-F]{6}')
pattern_vector3D = re.compile(
r'\([ ]*-?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ ]+-?([0-9]+(\.[0-9]*)?|\.[0-9]+)){2}[ ]*\)')
def make_NCName(arg):
for c in (':', ' '):
arg = arg.replace(c, "_%x_" % ord(c))
return arg
def cnv_anyURI(attribute, arg, element):
return str(arg)
def cnv_boolean(attribute, arg, element):
""" XML Schema Part 2: Datatypes Second Edition
An instance of a datatype that is defined as boolean can have the
following legal literals {true, false, 1, 0}
"""
if str(arg).lower() in ("0", "false", "no"):
return "false"
if str(arg).lower() in ("1", "true", "yes"):
return "true"
raise ValueError("'%s' not allowed as Boolean value for %s" % (
str(arg), attribute))
# Potentially accept color values
def cnv_color(attribute, arg, element):
""" A RGB color in conformance with §5.9.11 of [XSL], that is a RGB color in notation “#rrggbb”, where
rr, gg and bb are 8-bit hexadecimal digits.
"""
return str(arg)
def cnv_configtype(attribute, arg, element):
if str(arg) not in ("boolean", "short", "int", "long",
"double", "string", "datetime", "base64Binary"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def cnv_data_source_has_labels(attribute, arg, element):
if str(arg) not in ("none", "row", "column", "both"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
# Understand different date formats
def cnv_date(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_dateTime(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_double(attribute, arg, element):
return str(arg)
def cnv_duration(attribute, arg, element):
return str(arg)
def cnv_family(attribute, arg, element):
""" A style family """
if str(arg) not in (
"text",
"paragraph",
"section",
"ruby",
"table",
"table-column",
"table-row",
"table-cell",
"graphic",
"presentation",
"drawing-page",
"chart"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def __save_prefix(attribute, arg, element):
prefix = arg.split(':', 1)[0]
if prefix == arg:
return str(arg)
namespace = element.get_knownns(prefix)
if namespace is None:
#raise ValueError, "'%s' is an unknown prefix" % str(prefix)
return str(arg)
p = element.get_nsprefix(namespace)
return str(arg)
def cnv_formula(attribute, arg, element):
""" A string containing a formula. Formulas do not have a predefined syntax, but the string should
begin with a namespace prefix, followed by a “:” (COLON, U+003A) separator, followed by the text
of the formula. The namespace bound to the prefix determines the syntax and semantics of the
formula.
"""
return __save_prefix(attribute, arg, element)
def cnv_ID(attribute, arg, element):
return str(arg)
def cnv_IDREF(attribute, arg, element):
return str(arg)
def cnv_integer(attribute, arg, element):
return str(arg)
def cnv_legend_position(attribute, arg, element):
if str(arg) not in ("start", "end", "top", "bottom",
"top-start", "bottom-start", "top-end", "bottom-end"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
pattern_length = re.compile(
r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)((cm)|(mm)|(in)|(pt)|(pc)|(px))')
def cnv_length(attribute, arg, element):
""" A (positive or negative) physical length, consisting of magnitude and unit, in conformance with the
Units of Measure defined in §5.9.13 of [XSL].
"""
global pattern_length
if not pattern_length.match(arg):
raise ValueError("'%s' is not a valid length" % arg)
return arg
def cnv_lengthorpercent(attribute, arg, element):
failed = False
try:
return cnv_length(attribute, arg, element)
except BaseException:
failed = True
try:
return cnv_percent(attribute, arg, element)
except BaseException:
failed = True
if failed:
raise ValueError("'%s' is not a valid length or percent" % arg)
return arg
def cnv_metavaluetype(attribute, arg, element):
if str(arg) not in ("float", "date", "time", "boolean", "string"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def cnv_major_minor(attribute, arg, element):
if arg not in ('major', 'minor'):
raise ValueError("'%s' is not either 'minor' or 'major'" % arg)
pattern_namespacedToken = re.compile(r'[0-9a-zA-Z_]+:[0-9a-zA-Z._\-]+')
def cnv_namespacedToken(attribute, arg, element):
global pattern_namespacedToken
if not pattern_namespacedToken.match(arg):
raise ValueError("'%s' is not a valid namespaced token" % arg)
return __save_prefix(attribute, arg, element)
def cnv_NCName(attribute, arg, element):
""" NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
Essentially an XML name minus ':'
"""
if type(arg) in (str,):
return make_NCName(arg)
else:
return arg.getAttrNS(STYLENS, 'name')
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_StyleNameRef(attribute, arg, element):
try:
return arg.getAttrNS(STYLENS, 'name')
except BaseException:
return arg
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_DrawNameRef(attribute, arg, element):
try:
return arg.getAttrNS(DRAWNS, 'name')
except BaseException:
return arg
# Must accept list of Style objects
def cnv_NCNames(attribute, arg, element):
return ' '.join(arg)
def cnv_nonNegativeInteger(attribute, arg, element):
return str(arg)
pattern_percent = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)%')
def cnv_percent(attribute, arg, element):
global pattern_percent
if not pattern_percent.match(arg):
raise ValueError("'%s' is not a valid length" % arg)
return arg
# Real one doesn't allow floating point values
pattern_points = re.compile(r'-?[0-9]+,-?[0-9]+([ ]+-?[0-9]+,-?[0-9]+)*')
#pattern_points = re.compile(r'-?[0-9.]+,-?[0-9.]+([ ]+-?[0-9.]+,-?[0-9.]+)*')
def cnv_points(attribute, arg, element):
global pattern_points
if type(arg) in (str,):
if not pattern_points.match(arg):
raise ValueError(
"x,y are separated by a comma and the points are separated by white spaces")
return arg
else:
try:
strarg = ' '.join(["%d,%d" % p for p in arg])
except BaseException:
raise ValueError(
"Points must be string or [(0,0),(1,1)] - not %s" %
arg)
return strarg
def cnv_positiveInteger(attribute, arg, element):
return str(arg)
def cnv_string(attribute, arg, element):
return str(arg)
def cnv_textnoteclass(attribute, arg, element):
if str(arg) not in ("footnote", "endnote"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
# Understand different time formats
def cnv_time(attribute, arg, element):
return str(arg)
def cnv_token(attribute, arg, element):
return str(arg)
pattern_viewbox = re.compile(r'-?[0-9]+([ ]+-?[0-9]+){3}$')
def cnv_viewbox(attribute, arg, element):
global pattern_viewbox
if not pattern_viewbox.match(arg):
raise ValueError(
"viewBox must be four integers separated by whitespaces")
return arg
def cnv_xlinkshow(attribute, arg, element):
if str(arg) not in ("new", "replace", "embed"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
attrconverters = {
((ANIMNS, 'audio-level'), None): cnv_double,
((ANIMNS, 'color-interpolation'), None): cnv_string,
((ANIMNS, 'color-interpolation-direction'), None): cnv_string,
((ANIMNS, 'command'), None): cnv_string,
((ANIMNS, 'formula'), None): cnv_string,
((ANIMNS, 'id'), None): cnv_ID,
((ANIMNS, 'iterate-interval'), None): cnv_duration,
((ANIMNS, 'iterate-type'), None): cnv_string,
((ANIMNS, 'name'), None): cnv_string,
((ANIMNS, 'sub-item'), None): cnv_string,
((ANIMNS, 'value'), None): cnv_string,
# ((DBNS,u'type'), None): cnv_namespacedToken,
((CHARTNS, 'attached-axis'), None): cnv_string,
((CHARTNS, 'class'), (CHARTNS, 'grid')): cnv_major_minor,
((CHARTNS, 'class'), None): cnv_namespacedToken,
((CHARTNS, 'column-mapping'), None): cnv_string,
((CHARTNS, 'connect-bars'), None): cnv_boolean,
((CHARTNS, 'data-label-number'), None): cnv_string,
((CHARTNS, 'data-label-symbol'), None): cnv_boolean,
((CHARTNS, 'data-label-text'), None): cnv_boolean,
((CHARTNS, 'data-source-has-labels'), None): cnv_data_source_has_labels,
((CHARTNS, 'deep'), None): cnv_boolean,
((CHARTNS, 'dimension'), None): cnv_string,
((CHARTNS, 'display-label'), None): cnv_boolean,
((CHARTNS, 'error-category'), None): cnv_string,
((CHARTNS, 'error-lower-indicator'), None): cnv_boolean,
((CHARTNS, 'error-lower-limit'), None): cnv_string,
((CHARTNS, 'error-margin'), None): cnv_string,
((CHARTNS, 'error-percentage'), None): cnv_string,
((CHARTNS, 'error-upper-indicator'), None): cnv_boolean,
((CHARTNS, 'error-upper-limit'), None): cnv_string,
((CHARTNS, 'gap-width'), None): cnv_string,
((CHARTNS, 'interpolation'), None): cnv_string,
((CHARTNS, 'interval-major'), None): cnv_string,
((CHARTNS, 'interval-minor-divisor'), None): cnv_string,
((CHARTNS, 'japanese-candle-stick'), None): cnv_boolean,
((CHARTNS, 'label-arrangement'), None): cnv_string,
((CHARTNS, 'label-cell-address'), None): cnv_string,
((CHARTNS, 'legend-align'), None): cnv_string,
((CHARTNS, 'legend-position'), None): cnv_legend_position,
((CHARTNS, 'lines'), None): cnv_boolean,
((CHARTNS, 'link-data-style-to-source'), None): cnv_boolean,
((CHARTNS, 'logarithmic'), None): cnv_boolean,
((CHARTNS, 'maximum'), None): cnv_string,
((CHARTNS, 'mean-value'), None): cnv_boolean,
((CHARTNS, 'minimum'), None): cnv_string,
((CHARTNS, 'name'), None): cnv_string,
((CHARTNS, 'origin'), None): cnv_string,
((CHARTNS, 'overlap'), None): cnv_string,
((CHARTNS, 'percentage'), None): cnv_boolean,
((CHARTNS, 'pie-offset'), None): cnv_string,
((CHARTNS, 'regression-type'), None): cnv_string,
((CHARTNS, 'repeated'), None): cnv_nonNegativeInteger,
((CHARTNS, 'row-mapping'), None): cnv_string,
((CHARTNS, 'scale-text'), None): cnv_boolean,
((CHARTNS, 'series-source'), None): cnv_string,
((CHARTNS, 'solid-type'), None): cnv_string,
((CHARTNS, 'spline-order'), None): cnv_string,
((CHARTNS, 'spline-resolution'), None): cnv_string,
((CHARTNS, 'stacked'), None): cnv_boolean,
((CHARTNS, 'style-name'), None): cnv_StyleNameRef,
((CHARTNS, 'symbol-height'), None): cnv_string,
((CHARTNS, 'symbol-name'), None): cnv_string,
((CHARTNS, 'symbol-type'), None): cnv_string,
((CHARTNS, 'symbol-width'), None): cnv_string,
((CHARTNS, 'text-overlap'), None): cnv_boolean,
((CHARTNS, 'three-dimensional'), None): cnv_boolean,
((CHARTNS, 'tick-marks-major-inner'), None): cnv_boolean,
| |
<gh_stars>0
import ply.yacc as yacc
from soda.helpers import open_file, flatten, str_to_int_or_float
from logging import getLogger
from soda.distributed_environment.behavior import Behavior, ActionNode, IfNode, EndIfNode, ElseNode
logger = getLogger(__name__)
class AlgorithmParser(object):
def p_algorithm(self, p):
''' algorithm : first_section second_section '''
def p_first_section(self, p):
''' first_section : first_section_line
| first_section_line first_section '''
def p_first_section_line(self, p):
''' first_section_line : TERM '=' term_list ';' '''
def p_term_list(self, p):
''' term_list : term_term
| term_list ',' term_term '''
def p_term_term(self, p):
''' term_term : IDENTIFIER '''
self.algorithm.term_states.append(p[1])
self.all_states.append(p[1])
# Definujeme, že druhá sekcia algoritmu sa skladá zo stavov.
def p_second_section(self, p):
# Každý token v pravidle má svoju číselnú pozíciu, začínajúc od 0.
# K hodnote na pozícii sa pristupuje pomocou premennej p.
# p[0] p[1]
''' second_section : states '''
# Definícia gramatického pravidla v BNF forme.
# Tieto stavy v sebe definujú príslušné správania. Token IDENTIFIER na pozícii 1
# reprezentuje názov stavu.
def p_states(self, p):
''' states : IDENTIFIER seen_state states_behaviors
| IDENTIFIER seen_state states_behaviors states '''
def p_seen_state(self, p):
''' seen_state : '''
# Kód, ktorý sa nachádza za definíciou gramatického pravidla sa vykoná keď
# syntaktický analyzátor vykoná redukciu pravidla.
self.state = p[-1]
self.all_states.append(self.state)
# Toto gramatické pravidlo nám umožňuje definovať, že pre jeden stav dokážeme
# spracovať viacero správaní. Využívame vlastnosť rekurzie formálnej gramatiky.
def p_states_behaviors(self, p):
''' states_behaviors : behavior add_behaviors
| behavior states_behaviors '''
def p_add_behaviors(self, p):
''' add_behaviors : '''
# Do dátovej štruktúry algoritmu, ktorá je jedným zo vstupov syntaktického
# analyzátora v tomto pravidle ukladáme pre identifikovaný stav slovník s
# rôznymi správaniami.
self.algorithm.states_behaviors[self.state] = self.state_behaviors
self.state_behaviors = {}
# Toto pravidlo definuje jednotlivé správania. Správania sa skladajú z iniciačnej akcie
# a následne jeho tela, ktoré je ohraničené tokenmi begin a end. Telo správania sa
# skladá zase z množiny príkazov.
def p_behavior(self, p):
''' behavior : initiation_event begin statements end '''
if p[1] in self.state_behaviors:
logger.info("State with same initiation action already defined! -> {}".format(p[1]))
exit()
self.state_behaviors[p[1]] = self.behavior
self.behavior = Behavior()
self.jump_ids = 0
# V tejto metóde môžeme vidieť ďalšiu z vlastností modulu PLY. V zdrojovom kóde
# priraďujeme do premennej p[0] nejakú hodnotu. Ak príde k redukcii tohto
# pravidla, tak sa táto hodnota prenesie do pravidla, ktoré sa bude redukovať po
# tomto. V tomto prípade do pravidla definovaného metódou p_behavior().
def p_initiation_event(self, p):
''' initiation_event : IMPULSE
| READ '(' read_arguments ')' '''
p[0] = (p[1], self.read_arguments) if p[1] == 'READ' else p[1]
self.read_arguments = ()
def p_statements(self, p):
''' statements : statement
| statement statements '''
# V tele správania sa môže vyskytnúť akcia, podmienka alebo priradenie.
def p_statement(self, p):
''' statement : action
| if_statement
| assignment '''
def p_if_statement(self, p):
''' if_statement : if condition if_seen then statements endif endif_seen
| if condition if_seen then statements else else_seen statements endif endif_seen '''
def p_condition(self, p):
''' condition : condition '=' '=' condition
| condition '>' condition
| condition '<' condition
| condition '<' '=' condition
| condition '>' '=' condition
| condition '!' '=' condition
| condition and condition
| condition or condition
| '(' condition ')'
| number_expr
| IDENTIFIER
| LEN '(' IDENTIFIER ')'
| not IDENTIFIER '''
try:
if p[1] == 'not':
p[1] = ' not '
elif p[2] == 'and':
p[2] = ' and '
elif p[2] == 'or':
p[2] = ' or '
except:
pass
p[0] = p[:]
self.condition.append(list(filter(lambda x: x is not None, p[1:])))
def p_if_seen(self, p):
''' if_seen : '''
c = flatten(self.condition[-1])
c = list(filter(lambda x: x is not None, c))
self.condition = ''.join(c)
self.behavior.insert(IfNode(self.condition))
self.condition = []
def p_endif_seen(self, p):
''' endif_seen : '''
self.behavior.insert(EndIfNode(self.jump_ids))
self.jump_ids += 1
def p_else_seen(self, p):
''' else_seen : '''
self.behavior.insert(ElseNode(self.jump_ids))
self.jump_ids += 1
# V nasledujúcej metóde definujeme jednotlivé akcie, ktoré môže entita vykonávať.
# Podľa toho akú akciu identifikujeme sa vykoná vloženie uzla action do štruktúry
# spájaného zoznamu reprezentujúceho správanie.
def p_action(self, p):
''' action : SEND '(' send_arguments ')'
| BECOME '(' become_arguments ')'
| LOG '(' log_arguments ')'
| EXEC '(' exec_arguments ')'
| ADD '(' add_arguments ')'
| REMOVE '(' remove_arguments ')'
| POP '(' pop_arguments ')' '''
if p[1] == 'BECOME':
self.behavior.insert(ActionNode(p[1], p[3]))
elif p[1] == 'SEND':
self.behavior.insert(ActionNode(p[1], ('(' + ', '.join(self.send_arguments) + ')', p[3], )))
self.send_arguments = ()
elif p[1] == 'LOG':
self.behavior.insert(ActionNode(p[1], ('+'.join(['str(' + str(a) + ')' for a in self.log_arguments]), )))
self.log_arguments = ()
elif p[1] == 'EXEC':
self.behavior.insert(ActionNode(p[1], p[3]))
elif p[1] == 'ADD':
self.behavior.insert(ActionNode(p[1], p[3]))
elif p[1] == 'REMOVE':
self.behavior.insert(ActionNode(p[1], p[3]))
elif p[1] == 'POP':
self.behavior.insert(ActionNode(p[1], p[3]))
def p_pop_arguments(self, p):
'''pop_arguments : IDENTIFIER ',' IDENTIFIER '''
if p[3] in self.special_identifiers:
logger.info("Special identifier used as first argument! -> {}".format(p[1]))
exit()
p[0] = (p[1], p[3])
def p_remove_arguments(self, p):
''' remove_arguments : IDENTIFIER ',' array_remove_value '''
if p[1] in self.special_identifiers:
logger.info("Special identifier used as first argument! -> {}".format(p[1]))
exit()
p[0] = (p[1], p[3])
def p_array_remove_value(self, p):
''' array_remove_value : IDENTIFIER
| number_expr
| string_expr '''
p[0] = p[1]
def p_add_arguments(self, p):
''' add_arguments : IDENTIFIER ',' array_add_value '''
if p[1] in self.special_identifiers:
logger.info("Special identifier used as first argument! -> {}".format(p[1]))
exit()
p[0] = (p[1], p[3])
def p_array_add_value(self, p):
''' array_add_value : number_expr
| string_expr
| IDENTIFIER '''
p[0] = p[1]
def p_exec_arguments(self, p):
''' exec_arguments : STRING ',' output_type ',' exec_output exec_input '''
p[0] = (p[1], p[3], p[5] if p[5] is not None else None, p[6] if p[6] is not None else None)
def p_output_type(self, p):
''' output_type : int
| string
| float '''
p[0] = p[1]
def p_exec_input(self, p):
''' exec_input : ',' IDENTIFIER
| NONE '''
try:
p[0] = p[2]
except:
p[0] = p[1]
def p_exec_output(self, p):
''' exec_output : IDENTIFIER '''
p[0] = p[1]
def p_read_arguments(self, p):
''' read_arguments : read_arg
| read_arg ',' read_arguments '''
def p_read_arg(self, p):
''' read_arg : IDENTIFIER identifier_seen_read
| STRING string_seen_read
| number_expr number_seen_read '''
if p[2] == 'IDENTIFIER':
self.read_arguments += ((p[1], p[2]),)
elif p[2] == 'STRING':
self.read_arguments += (p[1],)
elif p[2] == 'number_expr':
self.read_arguments += (str_to_int_or_float(p[1]),)
def p_number_seen_read(self, p):
''' number_seen_read : '''
p[0] = 'number_expr'
def p_string_seen_read(self, p):
''' string_seen_read : '''
p[0] = 'STRING'
def p_identifier_seen_read(self, p):
''' identifier_seen_read : '''
p[0] = 'IDENTIFIER'
def p_send_arguments(self, p):
''' send_arguments : '(' message ')' ',' IDENTIFIER '''
p[0] = p[5]
def p_message(self, p):
''' message : message_part
| message_part ',' message '''
def p_message_part(self, p):
''' message_part : STRING string_seen_read
| IDENTIFIER identifier_seen_read
| number_expr number_seen_read '''
if p[2] == 'IDENTIFIER':
self.send_arguments += (p[1],)
elif p[2] == 'STRING':
self.send_arguments += ("'" + p[1] + "'",)
elif p[2] == 'number_expr':
self.send_arguments += (p[1],)
def p_become_arguments(self, p):
''' become_arguments : IDENTIFIER '''
p[0] = (p[1],)
self.used_states.append(p[1])
def p_log_arguments(self, p):
''' log_arguments : log_arg
| log_arg ',' log_arguments '''
def p_log_arg(self, p):
''' log_arg : STRING
| IDENTIFIER
| NUMBER'''
self.log_arguments += (p[1], )
def p_NONE(self, p):
''' NONE : '''
p[0] = None
def p_assignemnt(self, p):
''' assignment : IDENTIFIER '=' expression '''
if p[1] in self.special_identifiers:
logger.info("Special identifier used on left side of assignment! -> {}".format(p[1]))
exit()
self.behavior.insert(ActionNode('ASSIGN', (p[1] + ' = ' + self.expression,)))
self.expression = None
self.arithmetic_expr = []
def p_expression(self, p):
''' expression : arithmetic_expr arithmetic_seen
| string_expr string_seen_expression
| array_expr array_seen_expression
| boolean_expr boolean_seen_expression '''
def p_boolean_expr(self, p):
''' boolean_expr : True
| False '''
p[0] = p[1]
def p_boolean_seen_expression(self, p):
''' boolean_seen_expression : '''
self.expression = p[-1]
def p_array_seen_expression(self, p):
''' array_seen_expression : '''
self.expression = p[-1]
def p_array_expr(self, p):
''' array_expr : '[' ']' '''
p[0] = '[]'
def p_arithmetic_seen(self, p):
'''arithmetic_seen : '''
ae = flatten(self.arithmetic_expr[-1])
ae = list(filter(lambda x: x is not None, ae))
self.expression = ''.join(ae)
def p_string_seen_expression(self, p):
'''string_seen_expression : '''
self.expression = p[-1]
def p_arithmetic_expr(self, p):
''' arithmetic_expr : arithmetic_expr '+' arithmetic_expr
| arithmetic_expr '-' arithmetic_expr
| arithmetic_expr '*' arithmetic_expr
| arithmetic_expr '/' arithmetic_expr
| '(' arithmetic_expr ')'
| | |
<reponame>sweh/sw.allotmentclub.backend<gh_stars>1-10
# coding:utf8
from __future__ import unicode_literals
from .. import Member, BookingKind, User, Allotment
from ..direct_debit import DirectDebit
from ..log import user_data_log, log_with_user
from ..base import parse_date
from io import StringIO, BytesIO
from pyramid.decorator import reify
from pyramid.response import FileIter
import csv
import datetime
import decimal
import json
import pyramid.interfaces
import risclog.sqlalchemy.interfaces
import sqlalchemy
import sqlalchemy.sql.expression
import sw.allotmentclub
import sw.allotmentclub.base
import sw.allotmentclub.version
import transaction
import xlsxwriter
import openpyxl
import zope.component
import zope.interface
UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
CONTENTTYPEENDING = {
'image/jpg': 'JPG',
'image/jpeg': 'JPG',
'image/pjpeg': 'JPG',
'image/gif': 'GIF',
'image/tif': 'TIF',
'image/png': 'PNG',
'application/pdf': 'PDF',
'application/x-unknown-application-pdf': 'PDF',
'application/msword': 'Word',
'application/vnd.ms-excel': 'Excel',
'application/vnd.ms-powerpoint': 'Powerpoint',
'application/x-zip-compressed': 'ZIP',
'application/zip': 'ZIP',
'audio/mpeg': 'MP3',
'audio/mp3': 'MP3',
'audio/mpg': 'MP3',
'video/mpeg': 'MPEG',
'video/x-ms-wmv': 'WMV',
'video/mpg': 'MPG',
'default': ''}
def route_url(route_name, request):
"""Returns the configured url for a route."""
mapper = request.registry.getUtility(pyramid.interfaces.IRoutesMapper)
path = mapper.get_route(route_name).path
if route_name == 'booking_list':
# Short route: we currently have only 1 backing account
path = path.replace('{id}', '1')
return path
def get_view_for_route(route_name, request):
request_iface = request.registry.queryUtility(
pyramid.interfaces.IRouteRequest,
name=route_name,
default=pyramid.interfaces.IRequest)
view = request.registry.adapters.lookup(
(pyramid.interfaces.IViewClassifier, request_iface,
zope.interface.providedBy(request.context)),
pyramid.interfaces.ISecuredView, default=None)
if view is None:
raise TypeError('The requested route `{}` is not configured.'.format(
route_name))
return view
def route_permitted(route_name, request):
"""Check whether the user is allowed to use a route.
Code inspired by pyramid.security.view_execution_permitted
"""
current_route = request.matched_route.name
request.matched_route.name = route_name
view = get_view_for_route(route_name, request)
result = view.__permitted__(request.context, request)
request.matched_route.name = current_route
return result
def format_size(value, request):
if not value:
return
value = float(value)
unit = 0
while value > 1024:
value = value / 1024.0
unit += 1
return '%.2f %s' % (value, UNITS[unit])
def format_mimetype(value, request):
return CONTENTTYPEENDING.get(value, value)
def to_string(col, parentheses=False):
col_string = sqlalchemy.func.cast(col, sqlalchemy.String)
if parentheses:
return to_string('(').concat(col_string).concat(')')
else:
return col_string
def string_agg(col, delimiter='/'):
return sqlalchemy.func.string_agg(
to_string(col), to_string(delimiter),
order_by=col.asc())
def date(value, request=None):
if value is None:
return ''
return value.strftime('%d.%m.%Y')
def iso_to_german_date(value, request=None):
if value is None:
return ''
if isinstance(value, datetime.date):
return date(value)
try:
return date(parse_date(value))
except ValueError:
return value
def date_time(value, request=None):
if value is None:
return ''
return value.strftime('%d.%m.%Y %H:%M')
def datetime_now():
return datetime.datetime.now()
def value_to_int(value):
return int(float(value.replace('.', '').replace(',', '.')) * 10000)
def format_date(value, request=None):
return value.strftime('%d.%m.%Y')
def format_eur(value, request=None, full=False):
return sw.allotmentclub.base.format_eur(value, full)
def format_eur_with_color(value, request=None, full=False):
if value is None:
value = 0
formatted = format_eur(value, request, full)
if value > 0:
formatted = '<span class="txt-color-green">%s</span>' % formatted
if value < 0:
formatted = '<span class="txt-color-red">%s</span>' % formatted
return formatted
def format_kwh(x, request=None):
return sw.allotmentclub.base.format_kwh(x)
def moneyfmt(value, places=2, curr='', sep=',', dp='.', pos='', neg='-',
trailneg='', request=None):
"""Convert Decimal to a money formatted string.
see: http://docs.python.org/2/library/decimal.html#recipes
Adapted to display currency symbol after the numbers.
places: required number of places after the decimal point
curr: optional currency symbol after the sign (may be blank)
sep: optional grouping separator (comma, period, space, or blank)
dp: decimal point indicator (comma or period)
only specify as blank when places is zero
pos: optional sign for positive numbers: '+', space or blank
neg: optional sign for negative numbers: '-', '(', space or blank
trailneg:optional trailing minus indicator: '-', ')', space or blank
>>> from decimal import Decimal
>>> d = Decimal('-1234567.8901')
>>> moneyfmt(d, curr=' EUR')
u'-1,234,567.89 EUR'
>>> moneyfmt(d, places=0, sep='.', dp='', neg='', trailneg='-')
u'1.234.568-'
>>> moneyfmt(d, curr=' EUR', neg='(', trailneg=')')
u'(1,234,567.89 EUR)'
>>> moneyfmt(Decimal(123456789), sep=' ')
u'123 456 789.00'
>>> moneyfmt(Decimal('-0.02'), neg='<', trailneg='>')
u'<0.02>'
>>> moneyfmt(Decimal('12.305'))
u'12.31'
>>> moneyfmt(Decimal('-12.305'))
u'-12.31'
"""
q = decimal.Decimal(10) ** -places # 2 places --> '0.01'
sign, digits, exp = value.quantize(q).as_tuple()
result = []
digits = map(str, digits)
build, next = result.append, digits.pop
if sign:
build(trailneg)
build(curr)
for i in range(places):
build(next() if digits else '0')
if places:
build(dp)
if not digits:
build('0')
i = 0
while digits:
build(next())
i += 1
if i == 3 and digits:
i = 0
build(sep)
build(neg if sign else pos)
return ''.join(reversed(result))
def percent(*args, **kw):
"""Formatter for percent values."""
return number(*args, formatting_options={'curr': ' %'}, **kw)
def get_selected_year():
request = pyramid.threadlocal.get_current_request()
default = datetime.datetime.now().year
if not request:
return default
return int(request.GET.get('for_year', default))
def number(*args, **kw):
"""Formatter for ordinary numbers."""
unit = kw.pop('unit', 1)
formatting_options = dict(dp=',', sep='.')
formatting_options.update(kw.pop('formatting_options', {}))
def formatter(value, request=None):
if value is None:
return ''
value /= unit
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
return moneyfmt(value, **formatting_options)
kw.pop('request', None)
if len(args) == 1 and not kw:
# money is being used without arguments in table declaration. Default
# and easy way
return formatter(args[0])
# Formatter is special cased:
assert not args
formatting_options.update(kw)
return formatter
def boolean(value, request=None):
return '✓' if value else ''
class View(object):
result = None
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
self.result = {}
self.update()
if isinstance(self.result, dict):
self.result['version'] = sw.allotmentclub.version.__version__
return self.result
def update(self):
pass
def form_submit(self):
return (
self.request.params and
list(self.request.params.keys()) != ['for_year'])
@property
def active_members(self):
return (
Member.query()
.join(Allotment, Allotment.member_id == Member.id)
)
class AddEditBase(View):
def get_form_attributes(self):
form_attributes = []
for item in self.form_attributes:
form_attributes.append(item.copy())
for item in form_attributes:
funcname = 'autocomplete_{}'.format(item['name'])
if hasattr(self, funcname):
item['autocomplete'] = getattr(self, funcname)()
if 'values' in item:
values = []
for value in item['values']:
value = value.copy()
if getattr(
self.context, item['name'], None) == value['id']:
value['selected'] = 'selected'
values.append(value)
item['values'] = values
return form_attributes
def update_params(self, params):
return
class AddView(AddEditBase):
model = NotImplementedError
context_id = None
created = None
def update(self):
if self.form_submit():
params = {}
params.update(self.request.params)
if self.context_id:
params.update({self.context_id: self.context.id})
self.update_params(params)
self.created = self.model.create(**params)
result = {'status': 'success'}
else:
form_attributes = self.get_form_attributes()
result = {'status': 'success'}
result.update({'data': {
'form': form_attributes,
'form_data': getattr(self, 'form_data', {}),
'load_options': getattr(self, 'load_options', {}),
'title': self.title}})
self.result = result
class EditView(AddEditBase):
def update(self):
if self.form_submit():
params = {}
params.update(self.request.params)
self.update_params(params)
for key, value in params.items():
if not value:
value = None
setattr(self.context, key, value)
result = {'status': 'success'}
else:
form_attributes = self.get_form_attributes()
for item in form_attributes:
item['value'] = getattr(self.context, item['name'], None)
result = {'status': 'success'}
result.update({'data': {
'form': form_attributes,
'title': self.title}})
self.result = result
class EditJSFormView(AddEditBase):
@property
def members(self):
return (
Member.query()
.filter(
Member.organization_id == self.request.user.organization_id)
.filter(Member.leaving_year.is_(None)))
@property
def member_source(self):
return [
{
'token': member.id,
'title': '{}, {} ({})'.format(
member.lastname,
member.firstname,
'/'.join(str(a.number)
for a in member.allotments) or 'n/a')
}
for member in self.members.order_by(Member.lastname).all()]
@property
def allotments(self):
return (
Allotment.query()
.join(Member, Allotment.member_id == Member.id)
.filter(
Member.organization_id == self.request.user.organization_id)
)
@property
def allotment_source(self):
return [
{
'token': allotment.id,
'title': '{} ({}, {})'.format(
str(allotment.number),
allotment.member.lastname,
allotment.member.firstname
)
}
for allotment in self.allotments.order_by(Allotment.number)]
@property
def users(self):
return (
User.query()
.filter(User.id != 42)
.filter(User.organization_id == self.request.user.organization_id)
.filter(sqlalchemy.or_(
User.is_locked.is_(None), User.is_locked == '')))
@property
def user_source(self):
return [
{
'token': user.id,
'title': '{}, {} ({})'.format(
user.nachname,
user.vorname,
user.username)
}
for user in self.users.order_by(User.nachname).all()]
@property
def booking_kind_source(self):
return [{'token': k.id, 'title': k.title}
for k in BookingKind.query().all()]
@property
def accounting_year_source(self):
result = []
year = 2014
now = datetime.datetime.now()
current = now.year
if now.month >= 8:
current += 1
while year <= current:
result.append({'token': year, 'title': year})
year += 1
return result
def resource_data_item_title(self, item):
return item.filename
def resource_data_item(self, item, route_name):
return {
'status': 'success',
'resource': self.get_route(item, route_name),
'id': item.id,
'data': {'title': self.resource_data_item_title(item)}}
@property
def route_name(self):
return self.request.matched_route.name
def get_route(self, item, route):
return '/api/' + route_url(
route, self.request).replace('{id}', str(item.id))
def get_result(self):
save_url = self.get_route(self.context, self.route_name)
result = {'status': 'success'}
result.update({'data': {
'load_options': self.load_options,
'load_data': self.load_data,
'url': save_url,
'title': self.title}})
return result
def save(self, key, value):
try:
type_ = self.context.__table__.columns[key].type
except Exception:
type_ = None
if (
isinstance(type_, sqlalchemy.DateTime) or
isinstance(type_, sqlalchemy.Date)
):
value = parse_date(value)
try:
setattr(self.context, key, value)
except sw.allotmentclub.model.ValidationError as e:
return str(e)
def update(self):
try:
json = self.request.json
except ValueError:
json = None
file_ = self.request.params.get('file')
if self.form_submit() and file_ is None:
self.result = {
'status': 'error',
'msg': 'Ein unerwarteter Fehler ist aufgetreten'
}
elif file_ is not None:
created, route_name = self.handle_upload(file_)
transaction.savepoint()
result = self.resource_data_item(created, route_name)
log_with_user(user_data_log.info, self.request.user,
'Datei in %s %s hochgeladen.',
self.title.split(' ')[0], self.context.id)
elif json and list(json.keys())[0] != 'attachments':
error = self.save(
list(json.keys())[0],
list(json.values())[0])
if error:
result = {'status': 'error', 'msg': error}
else:
log_with_user(
user_data_log.info, self.request.user, '%s %s bearbeitet.',
self.title.split(' ')[0], self.context.id)
result = {'status': 'success'}
else:
if not self.context.id:
transaction.savepoint()
result = self.get_result()
self.result = result
def handle_upload(self, file_):
raise NotImplementedError('Handle in subclass.')
class DeleteView(View):
model = NotImplementedError
deleted = None
mark_deleted = False
def update(self):
if self.mark_deleted:
self.context.deleted = | |
"""
Settings is for running experiments with different parameters. Supports
stuff like auto grid search and logging (yes, logging!).
TODO:
- [ ] sanity check passed experiments to be of type 'list'. If passing a
single setting that happens to be iterable it will happily iterate
through, e.g., all characters of a string.
author: mbforbes
"""
# IMPORTS
# -----------------------------------------------------------------------------
# builtins
import code # code.interact(local=dict(globals(), **locals()))
from itertools import product
import logging
# 3rd party
import numpy as np
from tabulate import tabulate
# TOP-LEVEL FUNCTIONS
# -----------------------------------------------------------------------------
def cell_massage(val):
"""
Preprocessing values to ensure that they can fit well in the cell of a
printed table.
Args:
val
Returns:
val (or something)
"""
# tabulate appears to sometimes work for bools and sometimes not. So I'm
# doing this so that it always works.
if type(val) is bool:
return 'True' if val else 'False'
# tabulate TOTALLY doesn't handle numpy arrays as cell entries.
if type(val) is np.ndarray:
return ', '.join([str(row) for row in val])
# default
return val
# CLASSES
# -----------------------------------------------------------------------------
class Settings(object):
"""
Class for trying all (exponentially many) combinations of all parameter
settings. Must call next() before each trial run.
New features:
- [x] np.ndarray aligned printing
- [x] Print settings that aren't changing at the top. If they're default,
note them as so.
Each iteration, note only the thing that is changing.
Integrate with results. Output in a table format with the stuff that
is changing.
Example:
Settings that aren't changing:
foo: 0.5 (default)
barbar: 0.7 (default)
baz: 0.9
(.. experiments run here ...)
la -> | 0.5 | 0.7 | 0.9
------+-------+-------+------
| 98% | 30% | 40%
2D for 2 varied. TODO: For > 2, multiple tables?
TODO: Use pandas for this?
"""
# Class vars as constants for keys
# Used with iterators to tell when to stop.
NothingLeft = object()
Eval = 'eval'
GloveVerbSimThresh = 'glove-verb-sim-thresh'
GloveNounSimThresh = 'glove-noun-sim-thresh'
Attrs = 'attrs'
VerbSimPot = 'verb-sim-pot'
NounEqPot = 'noun-eq-pot'
NounSimPot = 'noun-sim-pot'
NounSimRevPot = 'noun-sim-rev-pot'
MaxNounsPerFrame = 'max-nouns-per-frame'
FilterAbstract = 'filter-abstract'
GTBiggerPot = 'gt-bigger-pot'
GTSmallerPot = 'gt-smaller-pot'
GTEqPot = 'gt-eq-pot'
AgreementNeeded = 'agreement-needed'
SelPrefMethod = 'sel-pref-method'
SelPrefFreqCutoff = 'sel-pref-freq-cutoff'
SelPrefPMICutoff = 'sel-pref-pmi-cutoff'
SelPrefPot = 'sel-pref-pot'
NormalizeLBP = 'normalize-lbp'
LBPMaxIters = 'lbp-max-iters'
IncludeVerbSimFactors = 'include-verb-sim-factors'
IncludeNounSimFactors = 'include-noun-sim-factors'
IncludeSelPrefFactors = 'include-sel-pref-factors'
IncludeInfWithinverbSimframeFactors = 'include-inf-withinverb-simframe-factors'
WithinverbSimframePot = 'withinverb-simframe-pot'
IncludeXgraph = 'include-xgraph'
XgraphTuples = 'xgraph-tuples'
XgraphPot = 'xgraph-pot'
MaxSeeds = 'max-seeds'
RawNounsFilename = 'raw-nouns-filename'
EvalNounsFilename = 'eval-nouns-filename'
Lemmatize = 'lemmatize'
SelPrefMinFreqForPMI = 'sel-pref-min-freq-for-pmi'
IncludeNgramDBNouns = 'include-ngramdb-nouns'
IncludeGoldNounpairs = 'include-gold-nounpairs'
GoldNounpairAgreementNeeded = 'gold-nounpair-agreement-needed'
GoldNounpairGreaterPot = 'gold-nounpair-greater-pot'
GoldNounpairLesserPot = 'gold-nounpair-lesser-pot'
GoldNounpairEqPot = 'gold-nounpair-eq-pot'
AddRemainderAsNonseeds = 'add-remainder-as-nonseeds'
FrameSeedMethod = 'frame-seed-method'
NounpairSeedMethod = 'nounpair-seed-method'
SelPrefPotMethod = 'selpref-pot-method'
SelPrefEmbFilename = 'selpref-emb-filename'
ObjpairSplit = 'objpair-split'
FrameSplit = 'frame-split'
# Class vars in all caps as constants for vals
EVAL_DEV = 'dev'
EVAL_TEST = 'test'
SEL_PREF_FREQ = 'freq'
SEL_PREF_PMI = 'pmi'
POTENTIAL_METHOD_HARDCODED = 'hardcoded'
POTENTIAL_METHOD_TRAINED = 'trained'
POTENTIAL_METHOD_BOTH = 'both'
# digging into more detail here for selpref
SEL_PREF_HARDCODED = 'hardcoded'
SEL_PREF_MLE = 'mle'
SEL_PREF_EMB = 'emb'
# unary potentials
POT_UNARY_MEDIUM_BIGGER = np.array([0.7, 0.2, 0.1])
POT_UNARY_MEDIUM_SMALLER = np.array([0.2, 0.7, 0.1])
POT_UNARY_MEDIUM_EQ = np.array([0.15, 0.15, 0.7])
POT_UNARY_STRONG_BIGGER = np.array([0.9, 0.07, 0.03])
POT_UNARY_STRONG_SMALLER = np.array([0.07, 0.9, 0.03])
POT_UNARY_STRONG_EQ = np.array([0.05, 0.05, 0.9])
# binary potentials
POT_BINARY_MEDIUM_SIM = np.array([
[0.7, 0.2, 0.1],
[0.2, 0.7, 0.1],
[0.15, 0.15, 0.7],
])
POT_BINARY_MEDIUM_REV = np.array([
[0.2, 0.7, 0.1],
[0.7, 0.2, 0.1],
[0.15, 0.15, 0.7],
])
POT_BINARY_STRONG_SIM = np.array([
[0.9, 0.07, 0.03],
[0.07, 0.9, 0.03],
[0.05, 0.05, 0.9],
])
POT_BINARY_STRONG_REV = np.array([
[0.07, 0.9, 0.03],
[0.9, 0.07, 0.03],
[0.05, 0.05, 0.9],
])
@staticmethod
def _get_default_map():
return {
Settings.Eval: Settings.EVAL_DEV,
Settings.Attrs: ['size', 'weight', 'verb-speed', 'hardness', 'rigidness'],
Settings.MaxSeeds: -1, # -1 means no limit
Settings.GloveVerbSimThresh: 0.5,
Settings.GloveNounSimThresh: 0.45,
Settings.VerbSimPot: Settings.POT_BINARY_MEDIUM_SIM,
Settings.NounEqPot: Settings.POT_UNARY_MEDIUM_EQ,
Settings.NounSimPot: Settings.POT_BINARY_MEDIUM_SIM,
Settings.NounSimRevPot: Settings.POT_BINARY_MEDIUM_REV,
Settings.MaxNounsPerFrame: 1,
Settings.FilterAbstract: True,
Settings.GTBiggerPot: Settings.POT_UNARY_MEDIUM_BIGGER,
Settings.GTSmallerPot: Settings.POT_UNARY_MEDIUM_SMALLER,
Settings.GTEqPot: Settings.POT_UNARY_MEDIUM_EQ,
Settings.AgreementNeeded: 2,
Settings.SelPrefFreqCutoff: 1000,
Settings.SelPrefMinFreqForPMI: 1,
Settings.SelPrefPMICutoff: 4.0,
Settings.SelPrefMethod: Settings.SEL_PREF_PMI,
Settings.SelPrefPot: Settings.POT_BINARY_MEDIUM_SIM,
Settings.NormalizeLBP: True,
Settings.LBPMaxIters: 20,
Settings.IncludeSelPrefFactors: True,
Settings.IncludeXgraph: True,
Settings.IncludeVerbSimFactors: True,
Settings.IncludeNounSimFactors: True,
Settings.IncludeInfWithinverbSimframeFactors: True,
Settings.WithinverbSimframePot: Settings.POT_BINARY_MEDIUM_SIM,
Settings.XgraphTuples: [
('size', 'weight'),
('size', 'hardness'),
('weight', 'hardness'),
],
Settings.XgraphPot: Settings.POT_BINARY_MEDIUM_SIM,
Settings.RawNounsFilename: '',
Settings.EvalNounsFilename: '',
Settings.Lemmatize: True,
Settings.IncludeNgramDBNouns: False,
Settings.IncludeGoldNounpairs: True,
Settings.GoldNounpairAgreementNeeded: 2,
Settings.GoldNounpairGreaterPot: Settings.POT_UNARY_MEDIUM_BIGGER,
Settings.GoldNounpairLesserPot: Settings.POT_UNARY_MEDIUM_SMALLER,
Settings.GoldNounpairEqPot: Settings.POT_UNARY_MEDIUM_EQ,
Settings.AddRemainderAsNonseeds: True,
Settings.FrameSeedMethod: Settings.POTENTIAL_METHOD_BOTH,
Settings.NounpairSeedMethod: Settings.POTENTIAL_METHOD_BOTH,
Settings.SelPrefPotMethod: Settings.SEL_PREF_HARDCODED,
Settings.SelPrefEmbFilename: '',
Settings.ObjpairSplit: 20,
Settings.FrameSplit: 5,
}
def __init__(self, logger=None):
"""
Sets dict with default settings.
Settings to do:
- [x] constants above
- [x] number of nounsp
- [x] Potentials (bigger, smaller, eq)
- [x] Agreement needed (x/3)
- [x] Verb sim fac pots
- [x] Noun sim fac pots
- [x] Sel pref pots
- [x] Sel pref cutoff
- [x] whether to normalize in lbp
- [x] max n iterations to run lbp for
- [x] which factors to add
- [x] whether to filter abstract nouns
- [x] check out data.py settings
- [x] check rest of this file
"""
# Some admin
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
# Default values
self._params = Settings._get_default_map()
self.param_keys = []
self.param_iterator = None
def get(self, key):
return self._params[key]
def _setup_trial(self, trial_keys):
"""
Tracks which configs vary (are "trial" keys).
Args:
trial_keys ([str])
"""
self.default_keys = set(self._get_default_map().keys()) - set(trial_keys)
self.trial_keys = trial_keys
self.trial_num = 0
self.trial_log = {}
self.trial_results = {}
self.trial_results_all_keys = []
def trial_sequence(self, params):
"""
Sets up a trial to try the specified ranges of parameter values in
sequence (holding all other parameters to their defaults and varying
only one at a time).
Args:
params ({Settings.KEY: [list of values to try]})
"""
self._setup_trial(params.keys())
# This implementation is kind of gross because it's bolted onto how the
# trial_product was designed. We really want to iterate over both keys
# and values and just set what we want. But I'm too lazy to learn about
# how iterators work in python. So we just use all the keys.
dm = self._get_default_map()
keys = dm.keys()
vals = [dm[k] for k in keys]
trials = []
for k, v in params.iteritems():
kidx = keys.index(k)
for val in v:
trial = vals[:]
trial[kidx] = val
trials += [tuple(trial)]
self.param_keys = keys
self.param_iterator = iter(trials)
def trial_product(self, params):
"""
Sets up a trial to try the product (all exponentially many
combinations) of the specified ranges of parameter values.
Args: params ({Settings.KEY: [list of values to try]})
"""
self._setup_trial(params.keys())
param_keys = []
param_vals = []
for k,v in params.iteritems():
param_keys += [k]
param_vals += [v]
# self.current_indices = [-1 for _ in range(len(param_keys))]
self.param_keys = param_keys
self.param_iterator = product(*param_vals)
def next(self):
"""
Move on to the next parameter setting combination.
Returns:
bool Whether there's anything left
"""
next_params = next(self.param_iterator, Settings.NothingLeft)
if next_params is Settings.NothingLeft:
return False
assert len(next_params) == len(self.param_keys)
self.trial_num += 1
self.trial_log[self.trial_num] = {}
self.trial_results[self.trial_num] = {}
for i, k in enumerate(self.param_keys):
self._params[k] = next_params[i]
self.trial_log[self.trial_num][k] = self._params[k]
return True
def add_result(self, key, val):
"""
Adds result in form of key: val *to currently running trial*.
Args:
key (any hashable)
val (any)
"""
if key not in self.trial_results_all_keys:
self.trial_results_all_keys.append(key)
self.trial_results[self.trial_num][key] = val
def log_results(self):
"""
Logs results. Call after trials have finished.
First logs the config that didn't change.
Then logs a table of the experiments run and any results that were
added.
"""
self.logger.info('Static config (defaults):')
full_dm = self._get_default_map()
pure_dm = {k: cell_massage(v) for k,v in full_dm.iteritems() if k in self.default_keys}
list_pure_dm = [list(item) for item in pure_dm.iteritems()]
for line in tabulate(list_pure_dm, tablefmt="fancy_grid").split('\n'):
self.logger.info(line)
self.logger.info('Trial configs:')
rows = []
for i in sorted(self.trial_log.keys()):
row = {}
# settings
for tk in self.trial_keys:
row[tk] = cell_massage(self.trial_log[i][tk])
# ... then results
for rk in self.trial_results_all_keys:
val = '---'
if rk in self.trial_results[i]:
val = cell_massage(self.trial_results[i][rk])
row[rk] = val
rows.append(row)
# TODO: use ordereddict and set key order so table headers go settings
# and then results.
# headers = self.trial_keys + self.trial_results_all_keys
for line in tabulate(rows, headers="keys", tablefmt="fancy_grid").split('\n'):
self.logger.info(line)
def debug_log_config(self):
"""
Dumps full config to debug log.
"""
self.logger.debug('Settings:')
for k,v in self._params.iteritems():
| |
in str(e):
print ("[E] contains duplicates .... trying again now...")
remainingItems, rewarn=self.table_BatchSync(table, items, keys)
else:
#self.table_BatchSync(table, items, keys)
raise ValueError("[E] Some other error... table_BatchSync :%s"%(e))
items=items+remainingItems
warn=warn+rewarn
return items, warn
#### BELOW WORKS as EXPECTED updates each item one at a time SLOWWWWW!!!
def table_BatchUpdate(self, table, items):
warn = []
for i in items:
# printer( '>>>>>># >>>>>>>>>> 001BB')
# printer( i)
# printer( '>>>>>># >>>>>>>>>> 002AA')
#raise ValueError('[E] updating table_BatchUpdate...{0}'.format(i))
types = i['column']
acount = {'count': -1}
plusCount = {'count': -1}
keyIN= {x:i['item'][x] for x in i['key']}
it = i['item']
item=it
keysfound=[]
copied=False
for k in keyIN:
printer( k)
if k in item:
if copied is False:
item = copy.deepcopy(it)
copied=True
del item[k]
keysfound.append(k)
#totkeys=len(i['item'])
totkeys=len(item)
fake = list( [ '#%s'%s , ':%s'%s ] for s in string.lowercase[:totkeys] )
fakeDic= dict( d for d in fake)
acount['count'] =-1
derv = dict( [ (fake[self.incrementCount(acount)][0] ) , v ] for v in item.items() )
xnames= dict([k, v[0] ] for k, v in derv.items())
acount['count'] =-1
xpress = "SET " + ", ".join("%s = %s " % (v[0], v[1]) for v in fake)
xvalues = dict([fakeDic[k], convertTypes(v[1],types[ v[0] ])] for k, v in derv.items())
response = table.update_item(
Key=keyIN,
UpdateExpression= xpress,
ExpressionAttributeNames = xnames,
ExpressionAttributeValues= xvalues
)
unprocessed = response.get('UnprocessedItems', None)
if not unprocessed:
it['status']='success'
else:
it['status']='fail'
if len(keysfound) >0:
it['warn']=keysfound
printer( response)
return items, warn
# find similar records
def tableConditionQuery(self, tresource, sortKeys, record, keyTypes):
results = None
totkeys = len(sortKeys)
# print '*(*(*(*(*()))))>>>>002'
# print tresource
# print sortKeys
# print record
# print keyTypes
# print '*(*(*(*(*()))))>>>>001'
for skey in sortKeys:
strict_key=convertDynamoType(keyTypes[skey],record[skey])
if strict_key is not None:
record[skey]=strict_key
if totkeys == 1:
results = tresource.query(KeyConditionExpression=Key(sortKeys[0]).eq(record[sortKeys[0]]))
elif totkeys == 2:
results = tresource.query(KeyConditionExpression=Key(sortKeys[0]).eq(record[sortKeys[0]]) \
& Key(sortKeys[1]).eq(record[sortKeys[1]]))
elif totkeys == 3:
results = tresource.query(KeyConditionExpression=Key(sortKeys[0]).eq(record[sortKeys[0]]) \
& Key(sortKeys[1]).eq(record[sortKeys[1]]) \
& Key(sortKeys[2]).eq(record[sortKeys[2]]))
elif totkeys == 4:
results = tresource.query(KeyConditionExpression=Key(sortKeys[0]).eq(record[sortKeys[0]]) \
& Key(sortKeys[1]).eq(record[sortKeys[1]]) \
& Key(sortKeys[2]).eq(record[sortKeys[2]]) \
& Key(sortKeys[3]).eq(record[sortKeys[3]]))
# results = tresource.query(KeyConditionExpression)
# KeyConditionExpression = Key('year').eq(1992) & Key('title').between('A', 'L')
return results
def tablesAbsent(self, tables_review):
client = self.__get_client__()
missing = []
rawItems = client.list_tables()['TableNames']
for name in rawItems:
if name in tables_review:
continue
printColor(['[E] DynamoDB Table [%s] MISSING...' % (name)])
missing.append( name)
return missing
def tablesDrop(self,tables):
client = self.__get_client__()
for t in tables:
response=client.delete_table(TableName=t)
printer ('[W] ....DELETING TABLE %s'%t)
client.get_waiter('table_not_exists').wait(TableName=t)
printer( response )
def dynamoTableUpdate(client,name,xkema,facts,attr,config,ngsi=None, nlsi=None):
gsi=[]
lsi=[]
gsi_main={}
changed = False
if ngsi is not None:
gsi_main.update(ngsi)
# printer( facts)
# printer( '()(*()(*()(*()*()*() ----> 001 a')
#if nlsi is not None:
# gsi_main.update(nlsi)
if gsi_main is not None:
gsiUpdates = []
gsiStates = []
for g,e in gsi_main.items():
if 'GSI' in e['Type']:
if not ACTION_KEY in e:
printer("[W] NO action keys found to change TABLE Definitions.... skiping...")
return changed
action=e[ACTION_KEY]
gsiValue={}
if g in facts['gsi']:
if action.lower()!='delete':
action='Update'
gsiValue=copy.deepcopy(e['raw'])
del gsiValue['KeySchema']
del gsiValue['Projection']
# printer('....***__~~~~000001')
if facts['gsi'][g]['raw']['ProvisionedThroughput'] == gsiValue['ProvisionedThroughput']:
continue #amounts are same so skip!!
# printer('....***__~~~~000002')
gsiUpdates.append( { action.title(): gsiValue } )
continue
else:
action='Delete'
gsiValue={ 'IndexName': g }
elif action.lower()=='delete':
continue # key not found so skip
else:
action = 'Create'
gsiValue = e['raw']
gsiStates.append( { action.title(): gsiValue } )
#gsi.append(e['raw'])
else: ##requires table deletion... much more entailed... saving records then rebuild/import....YuK!
lsi.append(e['raw'])
readUnits = int(config['totalReads']) if int(config['totalReads'])> 0 else 1
writeUnits = int(config['totalWrites']) if int(config['totalWrites'])> 0 else 1
try:
response = client.update_table(TableName=name,
AttributeDefinitions=attr,
ProvisionedThroughput={'ReadCapacityUnits': readUnits,
'WriteCapacityUnits': writeUnits})
dynamoTableWait(client,name,response)
changed = True
except botocore.exceptions.ClientError as e:
if 'equals the current value' in e.message:
printer( '[W] found equivalent %s'%(e))
else:
raise ValueError('[E] updating table...%s',e.message)
# printer( '()(*()(*()(*()*()*() ----> 001')
# printer( attr)
# printer( '()(*()(*()(*()*()*() ----> 002')
if len(gsiUpdates) >0:
response = client.update_table(TableName=name,
AttributeDefinitions=attr,
GlobalSecondaryIndexUpdates=gsiUpdates)
#[{'Update':{'IndexName':blabla, 'ProvisionedThroughput}},{'Create'}]
changed = True
dynamoTableWait(client, name, response)
if len(gsiStates)>0:
for g in gsiStates:
response = client.update_table(TableName=name,
AttributeDefinitions=attr,
GlobalSecondaryIndexUpdates=[g])
# [{'Update':{'IndexName':blabla, 'ProvisionedThroughput}},{'Create'}]
changed = True
dynamoTableWait(client, name, response)
# response = client.update_table(TableName=name, KeySchema=xkema, # Partition keys
# AttributeDefinitions=attr,
# ProvisionedThroughput={'ReadCapacityUnits': readUnits,
# 'WriteCapacityUnits': writeUnits},
# LocalSecondaryIndexes=lsi,
# GlobalSecondaryIndexes=gsi)
#
# response = client.update_table(TableName=name, KeySchema=xkema, # Partition keys
# AttributeDefinitions=attr,
# ProvisionedThroughput={'ReadCapacityUnits': readUnits,
# 'WriteCapacityUnits': writeUnits},
# LocalSecondaryIndexes=lsi,
# GlobalSecondaryIndexes=gsi)
return changed
def dynamoTableWait(client,tablename, response=None):
if response is None:
response = client.describe_table(TableName=tablename)
printer( response)
if 'Table' in response:
if 'ING' in response['Table']['TableStatus']:
time.sleep(2)
dynamoTableWait(client,tablename)
def dynamoTableCreate(resource, name, xkema, attr, config, ngsi=None, nlsi=None, create=True):
gsi=[]
lsi=[]
objs = []
readUnits = int(config['totalReads']) if int(config['totalReads'])> 0 else 1
writeUnits = int(config['totalWrites']) if int(config['totalWrites'])> 0 else 1
gsi_main={}
if ngsi is not None:
gsi_main.update(ngsi)
if nlsi is not None:
gsi_main.update(nlsi)
if gsi_main is not None:
for g,e in gsi_main.items():
if 'GSI' in e['Type']:
gsi.append(e['raw'])
else:
lsi.append(e['raw'])
lsiValid=False
if len(lsi)>0:
newlsi = copy.copy(lsi[0])
if 'ProvisionedThroughput' in newlsi:
del newlsi['ProvisionedThroughput']
finalLSI=[newlsi]
lsiValid=lsi_inPrimary(xkema, finalLSI)
if not lsiValid:
raise ValueError('[E] LSI given does NOT match PRIMARY HASH', finalLSI, xkema)
if len(gsi)>0 and lsiValid:
printer("A")
table=resource.create_table(TableName = name,KeySchema = xkema, # Partition keys
AttributeDefinitions = attr,
ProvisionedThroughput = { 'ReadCapacityUnits':readUnits , 'WriteCapacityUnits': writeUnits},
LocalSecondaryIndexes=finalLSI,
GlobalSecondaryIndexes=gsi )
elif len(gsi)>0:
printer("B")
table=resource.create_table(TableName = name, KeySchema = xkema, # Partition keys
AttributeDefinitions = attr,
ProvisionedThroughput = { 'ReadCapacityUnits':readUnits , 'WriteCapacityUnits': writeUnits},
GlobalSecondaryIndexes=gsi )
elif lsiValid:
printer("C")
table=resource.create_table(TableName = name, KeySchema = xkema, # Partition keys
AttributeDefinitions = attr,
ProvisionedThroughput = { 'ReadCapacityUnits':readUnits , 'WriteCapacityUnits': writeUnits},
LocalSecondaryIndexes=finalLSI )
else:
printer("D")
#s=100/0
table=resource.create_table(TableName = name, KeySchema = xkema, # Partition keys
AttributeDefinitions = attr,
ProvisionedThroughput = { 'ReadCapacityUnits':readUnits , 'WriteCapacityUnits': writeUnits}
)
return table
def lsi_inPrimary(xkema,lsi):
pfound=False
find='HASH'
for l in lsi:
for ks in l['KeySchema']:
print('....008')
for k in ks:
print(ks[k])
if find in ks[k]:
mk = ks['AttributeName']
for x in xkema:
if find in x['KeyType']:
if mk == x['AttributeName']:
return True
return pfound
def get_tableInfo( table_defined):
printer( ' ==>get_tableInfo .....')
table = table_defined['Table']
printer( table)
stat = table['TableStatus']
name = table['TableName']
owner = "AWS"
audit = "NONE"
# pkey = table['KeySchema'][0]['AttributeName']
objs = []
attributes = table['AttributeDefinitions']
clmns = '.'.join('%s:%s' % (aa['AttributeName'], aa['AttributeType']) for aa in attributes)
keys = table['KeySchema']
pkey = '.'.join('%s:%s' % (t['AttributeName'], t['KeyType']) for t in keys)
# print ' ==== keys @@##>> ',table['KeySchema']
indexes = 0
reads = 0
writes = 0
gsi = None
pygdetail = pyldetail = None
if 'LocalSecondaryIndexes' in table:
lsi = table['LocalSecondaryIndexes']
ldetail, pyldetail = globalSecondaryDetail(lsi, True)
if 'GlobalSecondaryIndexes' in table:
gsi = table['GlobalSecondaryIndexes']
gdetail, pygdetail = globalSecondaryDetail(gsi)
indexes = len(gsi)
# reads, writes = getDynamoRU(item['GlobalSecondaryIndexes'])
reads = table['ProvisionedThroughput']['ReadCapacityUnits']
writes = table['ProvisionedThroughput']['WriteCapacityUnits']
objs.append([name, audit, owner, stat, pkey, indexes, reads, writes, clmns])
if pygdetail is not None or pyldetail is not None:
objh = gsiHeader()
objs = objs + objh
if pyldetail is not None:
objs = objs + ldetail
if pygdetail is not None:
objs = objs + gdetail
printer( ' bbb 22 %s'%pygdetail)
pyObj = {'rows': [], 'gsi': pygdetail,'lsi':pyldetail,
'config': {'audit': audit, 'owner': owner, 'status': stat, 'Partition Key': keys, 'indexes': indexes,
'totalReads': reads, 'totalWrites': writes, 'Columns': attributes}}
#objs.append([name, projected, ptype, pkey, stat, indexType, indexes, reads, writes])
#pyObj[name] = {'Status': stat, 'Type': indexType,
# 'indexes': indexes, 'raw': rawGsi}
return (objs, pyObj)
def gsiHeader():
objs=[]
GSINAME = 'Name[GSI]'
objs.append(
[GSINAME, 'Projected', 'ProjectionType', 'KeySchema', 'Status','Type', 'indexes', 'totalReads',
'totalWrites'])
return objs
def globalSecondaryDetail( aGsi, isLSI=False):
# GHEAD = ['DATA_GSI']
pyObj = {}
objs = []
# objs.append(GHEAD)
printer( ' 2-====> GSI ')
indexType='GSI'
for gi in aGsi:
name = gi['IndexName']
#print ' 3-->gsi indexname::',name
ptype = gi['Projection']['ProjectionType']
indexes = 0
projected = None
pAttributes=None
if ptype == 'INCLUDE': ##LOOP THROUGH INDEXES TO INCLUDE
pAttributes = gi['Projection']['NonKeyAttributes']
indexes = len(pAttributes)
projected = '.'.join('%s' % (t) for t in pAttributes)
stat = gi.get('IndexStatus', None)
#print ' 4-->gsi indexes::',stat
keys = gi['KeySchema']
pkey = '.'.join('%s:%s' % (t['AttributeName'], t['KeyType']) for t in keys)
indexes = len(keys) + indexes
rawGsi = {'KeySchema': keys, 'IndexName': name,
'Projection': {'ProjectionType': ptype, 'NonKeyAttributes': pAttributes}}
if pAttributes is None:
del rawGsi['Projection']['NonKeyAttributes']
printer( ' [W]-- gsi removing NonKeyAttributes %s'% name)
if not isLSI:
_writes = {'ProvisionedThroughput':{'ReadCapacityUnits':gi['ProvisionedThroughput']['ReadCapacityUnits'],'WriteCapacityUnits':gi['ProvisionedThroughput']['WriteCapacityUnits']}}
#del _writes['ProvisionedThroughput']['NumberOfDecreasesToday']
rawGsi.update(_writes)
writes = gi['ProvisionedThroughput']['WriteCapacityUnits']
reads = gi['ProvisionedThroughput']['ReadCapacityUnits']
else:
indexType='LSI'
writes=reads=None
#print ' 6--===>gsi reads',reads
| |
Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) AddSugarSpiderBot www.idealobserver.com',
'Mozilla/5.0 (compatible; AnyApexBot/1.0; +http://www.anyapex.com/bot.html)',
'Mozilla/4.0 (compatible; Arachmo)', 'Mozilla/4.0 (compatible; B-l-i-t-z-B-O-T)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
'Mozilla/5.0 (compatible; BecomeBot/2.3; MSIE 6.0 compatible; +http://www.become.com/site_owners.html)',
'BillyBobBot/1.0 (+http://www.billybobbot.com/crawler/)',
'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)',
'Sqworm/2.9.85-BETA (beta_release; 20011115-775; i686-pc-linux-gnu)',
'Mozilla/5.0 (compatible; YandexImages/3.0; +http://yandex.com/bots)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/5.0 (compatible; YodaoBot/1.0; http://www.yodao.com/help/webmaster/spider/; )',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 Dead Link Checker (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/4.0 compatible ZyBorg/1.0 (<EMAIL>; http://www.WISEnutbot.com)',
'Mozilla/5.0 (compatible; U; ABrowse 0.6; Syllable) AppleWebKit/420+ (KHTML, like Gecko)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser 1.98.744; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; Acoo Browser; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Acoo Browser; GTB6; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; InfoPath.1; .NET CLR 3.5.30729; .NET CLR 3.0.30618)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Acoo Browser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/419 (KHTML, like Gecko, Safari/419.3) Cheshire/1.0.ALPHA',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.215 Safari/534.10 ChromePlus/1.5.1.1',
'Links (2.7; Linux 3.7.9-2-ARCH x86_64; GNU C 4.7.1; text)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (PLAYSTATION 3; 3.55)', 'Mozilla/5.0 (PLAYSTATION 3; 2.00)',
'Mozilla/5.0 (PLAYSTATION 3; 1.00)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:24.0) Gecko/20100101 Thunderbird/24.4.0',
'Mozilla/5.0 | |
<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# https://en.wikipedia.org/wiki/Exact_cover
# https://en.wikipedia.org/wiki/Sudoku_solving_algorithms
# https://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X
import numpy as np
from collections import Counter
import csv, sys
from dlist import DList
BASE = 3
N_SYMBOLS = BASE ** 2
N_BLOCKS = N_SYMBOLS
SYMBOL_EMPTY = 'x'
SYMBOLS ='123456789'
SYMBOL_LEN = 1
symbol = lambda x: x % N_SYMBOLS # bin.mat.row to a symbol
grid_row = lambda x: x // (N_SYMBOLS ** 2) # bin.mat.row to a grid row
grid_column = lambda x: (x // N_SYMBOLS) % N_SYMBOLS # bin.mat.row to a grid column
running_cell = lambda row, col: row * N_SYMBOLS + col #grid row & col to a cell number
# the column offsets to the four conditions in the row of a binary matrix
# note: cell offset is zero
ROW_OFFSET = N_SYMBOLS ** 2
COLUMN_OFFSET = 2 * (N_SYMBOLS ** 2)
BLOCK_OFFSET = 3 * (N_SYMBOLS ** 2)
BY_COLUMNS = 0
algo_x_rows = [] # collect row numbers for the solution
# ----------------------------------------------------------------------
# convert grid x,y to the corresponding block
def to_block(x, y):
b_x, b_y = x // BASE, y // BASE
return b_x + BASE * b_y
# ----------------------------------------------------------------------
# convert grid block to the corresponding x- and y span
def to_xy(block):
y0 = BASE * (block // BASE)
y1 = y0 + BASE
x0 = BASE * (block % BASE)
x1 = x0 + BASE
return list(range(x0, x1)), list(range(y0, y1))
# ----------------------------------------------------------------------
# convert string input to an integer
def parse_input(inp):
v = -1
try:
v = int(inp)
except ValueError:
print(f'{inp} not an accepted number. Must be an integer.')
return v
# ----------------------------------------------------------------------
# give the symbol, the row and the column of the sudoku grid for
# a binary matrix row 'y'
def symbol_mapping(y):
s = SYMBOLS[y % N_SYMBOLS]
r = grid_row(y)
c = grid_column(y)
return s, r, c
# ----------------------------------------------------------------------
def init_binary_matrix():
print('Initializing the binary matrix...')
mat = np.zeros((N_SYMBOLS ** 3, 4 * (N_SYMBOLS ** 2) ), dtype = int)
# allow all symbols in all four constraints
for i in range(mat.shape[0]):
n = symbol(i)
row = grid_row(i)
col = grid_column(i)
cell = running_cell(row, col)
b = to_block(col, row)
mat[i][cell] = 1
mat[i][ROW_OFFSET + row * N_SYMBOLS + n] = 1
mat[i][COLUMN_OFFSET + col * N_SYMBOLS + n] = 1
mat[i][BLOCK_OFFSET + b * N_SYMBOLS + n] = 1
print('Done')
# b = np.count_nonzero(mat, axis = 1)
#
# c1 = np.argmax(b)
# c0 = np.argmin(b)
# print('Sum min:', b[c0], 'Sum max:', b[c1])
#
# counter = Counter()
# for i in b:
# counter[i] +=1
#
# print(counter.most_common())
return mat
# ----------------------------------------------------------------------
# convert a given sudoku to a binary matrix
def to_binary_matrix(grid):
# find the row in the binary matrix for a given (x,y) in the sudoku grid
xy_to_mrow = lambda x, y: y * (N_SYMBOLS ** 2) + x * N_SYMBOLS
bm = init_binary_matrix()
print('Converting sudoku to a binary matrix...')
cols = len(grid[0])
for y in range(len(grid)):
for x in range(cols):
s = grid[y][x]
if s != SYMBOL_EMPTY:
n = parse_input(s) - 1
assert 0 <= n <= (N_SYMBOLS - 1), 'Conversion error'
m_row = xy_to_mrow(x, y)
cell = running_cell(y, x)
b = to_block(x, y)
# remove the symbol from the other constraints so that all four
# conditions are met
for i in range(N_SYMBOLS): # symbol's cell position
tgt_row = m_row + i
bm[tgt_row][cell] = 0
bm[tgt_row][ROW_OFFSET + y * N_SYMBOLS + i] = 0
bm[tgt_row][COLUMN_OFFSET + x * N_SYMBOLS + i] = 0
bm[tgt_row][BLOCK_OFFSET + b * N_SYMBOLS + i] = 0
# clear symbol's row
for row_x in range(N_SYMBOLS):
row = xy_to_mrow(row_x, y)
tgt_row = row + n
row_cell = running_cell(y, row_x)
bm[tgt_row][row_cell] = 0
bm[tgt_row][ROW_OFFSET + y * N_SYMBOLS + n] = 0
bm[row + n][COLUMN_OFFSET + row_x * N_SYMBOLS + n] = 0
row_b = to_block(row_x, y)
bm[tgt_row][BLOCK_OFFSET + row_b * N_SYMBOLS + n] = 0
# clear symbol's column
for col_y in range(N_SYMBOLS):
row = xy_to_mrow(x, col_y)
tgt_row = row + n
col_cell = running_cell(col_y, x)
bm[tgt_row][col_cell] = 0
bm[tgt_row][ROW_OFFSET + col_y * N_SYMBOLS + n] = 0
bm[tgt_row][COLUMN_OFFSET + x * N_SYMBOLS + n] = 0
col_b = to_block(x, col_y)
bm[tgt_row][BLOCK_OFFSET + col_b * N_SYMBOLS + n] = 0
# clear symbol's block
xx, yy = to_xy(b)
for by in yy:
for bx in xx:
row = xy_to_mrow(bx, by)
tgt_row = row + n
cell_b = running_cell(by, bx)
bm[tgt_row][cell_b] = 0
bm[tgt_row][ROW_OFFSET + by * N_SYMBOLS + n] = 0
bm[tgt_row][COLUMN_OFFSET + bx * N_SYMBOLS + n] = 0
bm[tgt_row][BLOCK_OFFSET + b * N_SYMBOLS + n] = 0
# symbol is used
bm[m_row + n][cell] = 1
bm[m_row + n][ROW_OFFSET + y * N_SYMBOLS + n] = 1
bm[m_row + n][COLUMN_OFFSET + x * N_SYMBOLS + n] = 1
bm[m_row + n][BLOCK_OFFSET + b * N_SYMBOLS + n] = 1
print('Done')
# b = np.count_nonzero(bm, axis = 1)
# c1 = np.argmax(b)
# c0 = np.argmin(b)
# print('Sum min:', b[c0], 'Sum max:', b[c1])
# counter = Counter()
# for i in b:
# counter[i] += 1
# print(counter.most_common())
# b = np.count_nonzero(bm, axis = 0)
# c1 = np.argmax(b)
# c0 = np.argmin(b)
# print('Sum min:', b[c0], 'Sum max:', b[c1])
# counter = Counter()
# for i in b:
# counter[i] += 1
# print(counter.most_common())
return bm
# ----------------------------------------------------------------------
# Print contents for debugging purposes
def diagnose_binary_matrix(mat):
print('Diagnosing the binary matrix...')
cell_occ = [0] * N_SYMBOLS # cell occupancy
row_c = [0] * N_SYMBOLS
col_c = [0] * N_SYMBOLS
block_c = [0] * N_SYMBOLS
for i in range(mat.shape[0]): # rows in bin.matrix
n = symbol(i)
r = grid_row(i)
col = grid_column(i)
cell = running_cell(r, col)
b = to_block(col, r)
cell_occ[n] = mat[i][cell]
row_c[n] = mat[i][ROW_OFFSET + r * N_SYMBOLS + n]
col_c[n] = mat[i][COLUMN_OFFSET + col * N_SYMBOLS + n]
block_c[n] = mat[i][BLOCK_OFFSET + b * N_SYMBOLS + n]
# have we processed one grid cell?
if symbol(i + 1) == 0:
print(f'({r}, {col}) [{cell}]:', end = '')
for m in range(N_SYMBOLS):
if cell_occ[m]:
print(SYMBOLS[m], end = '-')
print('\t', end = '')
for m in range(N_SYMBOLS):
if row_c[m]:
print(SYMBOLS[m], end = '_')
print('\t', end = '')
for m in range(N_SYMBOLS):
if col_c[m]:
print(SYMBOLS[m], end = '/')
print('\t', end = '')
for m in range(N_SYMBOLS):
if block_c[m]:
print(SYMBOLS[m], end = '.')
print()
print('Done')
# ======================================================================
class Sudoku():
def __init__(self):
self.grid = [[SYMBOL_EMPTY] * N_SYMBOLS for i in range(N_SYMBOLS)]
# ----------------------------------------------------------------------
def new(self):
self.grid = [[SYMBOL_EMPTY] * N_SYMBOLS for i in range(N_SYMBOLS)]
print('New, empty sudoku created.')
# ----------------------------------------------------------------------
def print(self):
left_up_corner = chr(0x250f)
right_up_corner = chr(0x2513)
left_down_corner = chr(0x2517)
right_down_corner = chr(0x251b)
hor_line_thick = chr(0x2501)
ver_line_thick = chr(0x2503)
ver_line_thin = chr(0x2502)
hor_line_thin = chr(0x2500)
left_join_1 = chr(0x2520)
left_join_2 = chr(0x2523)
right_join_1 = chr(0x2528)
right_join_2 = chr(0x252b)
up_join_1 = chr(0x252f) # top row
up_join_2 = chr(0x2533) # top row
down_join_1 = chr(0x2537)
down_join_2 = chr(0x253b)
mid_thin_join_1 = chr(0x253c)
mid_thin_join_2 = chr(0x2542)
mid_thick_join_1 = chr(0x253f)
mid_thick_join_2 = chr(0x254b)
# initial values to build up longer lines
top_line = left_up_corner
bottom_line = left_down_corner
mid_line_thin = left_join_1
mid_line_thick = left_join_2
for i in range(N_SYMBOLS):
top_line += hor_line_thick
bottom_line += hor_line_thick
mid_line_thin += hor_line_thin
mid_line_thick += hor_line_thick
if i == (N_SYMBOLS - 1):
top_line += right_up_corner
bottom_line += right_down_corner
mid_line_thin += right_join_1
mid_line_thick += right_join_2
elif i % BASE == (BASE - 1):
top_line += up_join_2
bottom_line += down_join_2
mid_line_thin += mid_thin_join_2
mid_line_thick += mid_thick_join_2
else:
top_line += up_join_1
bottom_line += down_join_1
mid_line_thin += mid_thin_join_1
mid_line_thick += mid_thick_join_1
row_count = 0
print(top_line)
for row in self.grid:
print(ver_line_thick, end = '')
col_count = 0
for s in row:
if s in SYMBOLS:
print('\33[1m'+s+'\033[0m', end = '')
else:
print(s, end = '')
col_count += 1
if col_count % BASE == 0:
print(ver_line_thick, end = '')
else:
print(ver_line_thin, end = '')
row_count += 1
print()
if row_count < N_SYMBOLS:
if row_count % BASE == 0:
print(mid_line_thick)
else:
print(mid_line_thin)
else:
print(bottom_line)
# ----------------------------------------------------------------------
def load(self, name=None):
print('Reading the sudoku from a file.')
if name is None:
name = input('Filename? ')
ret = []
with open(name, 'r', encoding = 'UTF-8') as csvfile:
filereader = csv.reader(csvfile, delimiter = ',')
for row in filereader:
r = [i for i in row]
ret.append(r)
self.grid | |
<gh_stars>0
#!/usr/bin/python3
# Filename: uplink_latency_analyzer.py
"""
uplink_latency_analyzer.py
An analyzer to monitor uplink packet waiting and processing latency
"""
__all__ = ["FirstByteAnalyzer"]
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from numpy.core.defchararray import decode
from mobile_insight.analyzer.analyzer import *
import time
import dis
import json
from datetime import datetime
# import threading
class FirstByteAnalyzer(Analyzer):
def __init__(self):
Analyzer.__init__(self)
self.add_source_callback(self.__msg_callback)
# Timers
self.fn = -1
self.sfn = -1
# PHY stats
self.cum_err_block = {0: 0, 1: 0} # {0:xx, 1:xx} 0 denotes uplink and 1 denotes downlink
self.cum_block = {0: 0, 1: 0} # {0:xx, 1:xx} 0 denotes uplink and 1 denotes downlink
# MAC buffer
self.last_buffer = 0
self.packet_queue = []
# Stats
self.all_packets = []
self.tx_packets = []
self.tmp_dict = {}
# First byte latency (Goodsol)
self.first_byte_latency = []
self.three_way_latency = []
self.tx_latency = []
self.pend_latency = []
self.blank_latency = []
self.temp_fbl = [0, 0]
self.temp_twl = [0, 0]
self.temp_tl = [0, 0]
self.temp_pend = [0, 0]
self.temp_blank = [0, 0]
self.fb_progress = False
self.tw_progress = True
self.tx_progress = False
self.pend_progress = False
self.blank_progress = False
self.total_tb = 0
self.sent_bytes = 0
self.get_grant = False
########### added ################
self.init_timestamp = None
# Record per-second downlink bandwidth
self.lte_dl_bw = 0 # Downlink bandwidth (from PDSCH)
self.lte_ul_bw = 0 # Uplink bandwidth (from PUSCH DCI grants)
self.lte_ul_grant_utilized = 0 # Uplink grant utilization (in bits)
self.prev_timestamp_dl = None # Track timestamp to calculate avg DL bandwidth
self.prev_timestamp_ul = None # Track timestamp to calculate avg DL bandwidth
self.avg_window = 1.0 # Average link BW time window (in seconds)
# Statistics for PDSCH modulation
self.mcs_qpsk_count = 0
self.mcs_16qam_count = 0
self.mcs_64qam_count = 0
# Record last observed CQI (for DL bandwidth prediction)
self.cur_cqi0 = 0
self.cur_cqi1 = 0
self.cur_tbs = None
# Flag to show if it is the first sr event
self.init_flag = False
# Resource slot used by SR
self.rb_slot1 = None
self.rb_slot2 = None
# Scheduled SR subframenumber
self.sr_sfn = None
def set_source(self, source):
"""
Set the trace source. Enable the cellular signaling messages
:param source: the trace source (collector).
"""
Analyzer.set_source(self, source)
source.enable_log("LTE_PHY_PUSCH_Tx_Report")
source.enable_log("LTE_MAC_UL_Buffer_Status_Internal")
source.enable_log("LTE_PHY_PUCCH_Tx_Report")
source.enable_log("LTE_MAC_UL_Transport_Block")
# get difference between two time value
def __f_time_diff(self, t1, t2):
if t1 > t2:
t_diff = t2 + 10240 - t1
else:
t_diff = t2 - t1 + 1
return t_diff
# get absolute time
def __f_time(self):
return self.fn * 10 + self.sfn
def __cmp_queues(self, type, data):
if type == 1:
for pkt in self.all_packets:
if pkt[-2] == data[0]:
# print the stats
self.all_packets.remove(pkt)
return
self.tx_packets.append(data)
if type == 2:
for pkt in self.tx_packets:
if pkt[0] == data[-2]:
# print the stats
self.tx_packets.remove(pkt)
return
self.all_packets.append(data)
def __print_buffer(self):
pass
def __msg_callback(self, msg):
if msg.type_id == "LTE_PHY_PUSCH_Tx_Report":
log_item = msg.data.decode()
#print('PUSCH_Tx_Report callback: ', log_item,"\n")
if 'Records' in log_item:
for record in log_item['Records']:
self.total_tb += record['PUSCH TB Size']
#print('TB Size: ', record['Current SFN SF'], record['PUSCH TB Size'], self.total_tb)
retx_time = record['Current SFN SF']
if retx_time < 0:
retx_time += 1024
if record['Re-tx Index'] == 'First':
self.cum_block[0] += 1
else:
# print(record['Re-tx Index'])
self.cum_err_block[0] += 1
if retx_time in self.tmp_dict :
self.tmp_dict[retx_time]['Retx Latency'] = 8
else:
self.tmp_dict[retx_time] = {'Retx Latency': 8}
for t in list(self.tmp_dict):
# print t, retx_time
# print self.tmp_dict
if (t < retx_time or (t > 1000 and retx_time < 20)):
if 'Retx Latency' not in self.tmp_dict[t]:
self.tmp_dict[t]['Retx Latency'] = 0
if len(self.tmp_dict[t]) == 3:
#print ('Waiting Latency:', self.tmp_dict[t]['Waiting Latency'], 'Tx Latency:', self.tmp_dict[t]['Tx Latency'], 'Retx Latency:', self.tmp_dict[t]['Retx Latency'])
self.all_packets.append(self.tmp_dict[t])
del(self.tmp_dict[t])
# self.__cmp_queues(1, (record['Current SFN SF'], record['Re-tx Index']))
if msg.type_id == "LTE_MAC_UL_Buffer_Status_Internal":
for packet in msg.data.decode()['Subpackets']:
for sample in packet['Samples']:
SFN = sample['Sub FN']
FN = sample['Sys FN']
self.update_time(SFN, FN)
if (sample['LCIDs'] == []):
# print "error here!!"
continue
# print SFN, FN, self.sfn, self.fn
data = sample['LCIDs'][-1]
# print sample
total_b = data['Total Bytes']
new_c = data['New Compressed Bytes']
retx_b = data['Retx bytes']
ctrl_b = data['Ctrl bytes']
# if (total_b > new_c) and ctrl_b == 0:
if total_b > self.last_buffer:
# size, remaining buffer, incoming time, first byte time
self.packet_queue.append([total_b - self.last_buffer, total_b - self.last_buffer, self.__f_time(), -1])
elif total_b < self.last_buffer:
outgoing_bufer = self.last_buffer - total_b
while 1:
if self.packet_queue == []:
break
packet = self.packet_queue[0]
#print('MAC UL Packet 0: ', packet[0], "1: ", packet[1], "2: ", packet[2], "3: ", packet[3], "\n")
if packet[3] == -1:
packet[3] = self.__f_time()
if packet[1] > outgoing_bufer:
packet[1] -= outgoing_bufer
break
else:
# size, waiting latency, transmission latency
# print self.packet_queue, self.all_packets, outgoing_bufer
t_now = self.__f_time()
if (t_now not in self.tmp_dict):
self.tmp_dict[t_now] = {}
self.tmp_dict[t_now]['Waiting Latency'] = self.__f_time_diff(packet[2], packet[3])
self.tmp_dict[t_now]['Tx Latency'] = self.__f_time_diff(packet[3], self.__f_time())
#print ([self.__f_time(), packet[0], self.__f_time_diff(packet[2], packet[3]), self.__f_time_diff(packet[3], self.__f_time())])
print('Current time: ', self.__f_time(), ' Packet size: ', packet[0],
' Remaining buffer: ', packet[1], ' Incoming time: ',packet[2],
' Packet tx time: ', packet[3])
self.temp_pend[0] = t_now
if self.blank_progress == True:
self.blank_progress = False
#self.blank_latency.append([t_now, packet[2]])
'''
if self.last_buffer > 0 and self.fb_progress == False and self.tx_progress == False:
print('Grant: ', t_now, self.fb_progress, self.tx_progress)
self.tx_progress = True
self.temp_tl[0] = packet[3]
self.temp_tl[1] = t_now
'''
if packet[0] > 300 and self.fb_progress == True and packet[3] >= self.temp_fbl[0]:
self.temp_fbl[1] = packet[3]
self.temp_tl[0] = packet[3]
self.temp_tl[1] = t_now
self.tx_progress = True
#print('SR progress is False')
if self.tw_progress == True:
self.tw_progress = False
self.fb_progress = False
self.pend_progress = True
self.three_way_latency.append([self.temp_fbl[0], self.temp_fbl[1]])
print('Three way: ',self.temp_fbl)
elif self.fb_progress == True:
self.fb_progress = False
self.first_byte_latency.append([self.temp_fbl[0], self.temp_fbl[1]])
print('First byte: ',self.temp_fbl)
elif self.tx_progress == True:
if self.temp_tl[1] == packet[3]:
self.temp_tl[1] = t_now
else:
self.tx_latency.append([self.temp_tl[0], self.temp_tl[1]])
print('Tx: ',self.temp_tl)#, [packet[3], t_now])
self.fb_progress = False
self.pend_progress = True
self.blank_progress = True
self.tx_progress = False
outgoing_bufer -= packet[1]
del self.packet_queue[0]
#self.__cmp_queues(2, (packet[0], self.__f_time_diff(packet[2], packet[3]), self.__f_time_diff(packet[2], t_now), t_now, self.last_buffer - new_c) )
self.last_buffer = total_b
if msg.type_id == "LTE_PHY_PUCCH_Tx_Report":
self.callback_pucch(msg)
if msg.type_id == "LTE_MAC_UL_Transport_Block":
self.callback_ul_transport(msg)
def update_time(self, SFN, FN):
if self.sfn >= 0:
self.sfn += 1
if self.sfn == 10:
self.sfn = 0
self.fn += 1
if self.fn == 1024:
self.fn = 0
if SFN < 10:
self.sfn = SFN
self.fn = FN
###############added#####################
def callback_pucch(self, msg):
"""
Dump PUCCH scheduling request information
:param msg: raw LTE_PHY_PUCCH_Tx_Report packet
:return:
"""
log_item = msg.data.decode()
records = log_item['Records']
timestamp = str(log_item['timestamp'])
for record in records:
pucch_tx_power = record['PUCCH Tx Power (dBm)']
bcast_dict = {}
bcast_dict['tx power'] = pucch_tx_power
bcast_dict['timestamp'] = timestamp
self.broadcast_info("PUCCH_TX_POWER", bcast_dict)
self.log_debug("PUCCH_TX_POWER: " + str(bcast_dict))
uciformat = record['Format']
if uciformat == 'Format 1':
self.init_flag = True
self.rb_slot1 = record['Start RB Slot 0']
self.rb_slot2 = record['Start RB Slot 1']
self.sr_sfn = record['Current SFN SF'] % 10 # subframenumber
sr_dict = {}
sr_dict['timestamp'] = timestamp
sr_dict['fn and subfn'] = record['Current SFN SF']
self.broadcast_info("SR_EVENT", sr_dict)
self.log_info("SR_EVENT: " + str(sr_dict))
print('SR1 time: ', sr_dict['fn and subfn'], self.fb_progress, self.pend_progress, self.tx_progress)
if self.fb_progress == False:
self.temp_fbl[0] = sr_dict['fn and subfn']
self.fb_progress = True
#print('SR progress is True')
if self.pend_progress == True:
self.temp_pend[1] = sr_dict['fn and subfn']
self.pend_latency.append([self.temp_pend[0], self.temp_pend[1]])
print('Pend: ', self.temp_pend)
self.pend_progress = False
elif uciformat == 'Format 1B' or uciformat == 'Format 1A':
# TODO: reset init_flag for new logs
if self.init_flag:
if int(record['Start RB Slot 1']) == self.rb_slot2 and int(record['Start RB Slot 0']) == self.rb_slot1 \
and record['Current SFN SF'] % 10 == self.sr_sfn:
sr_dict = {}
sr_dict['timestamp'] = timestamp
sr_dict['fn and subfn'] = record['Current SFN SF']
self.broadcast_info("SR_EVENT", sr_dict)
self.log_info("SR_EVENT: " + str(sr_dict))
#print('SR2 time: ', sr_dict['fn and subfn'], self.fb_progress, self.pend_progress)
if self.fb_progress ==False:
self.temp_fbl[0] = sr_dict['fn and subfn']
self.fb_progress = True
if self.pend_progress == True:
self.temp_pend[1] = sr_dict['fn and subfn']
self.pend_latency.append([self.temp_pend[0], self.temp_pend[1]])
print('Pend: ', self.temp_pend)
self.pend_progress = False
elif uciformat == "Format 3":
# TODO: Deal with SR event in format 3
pass
def callback_ul_transport(self, msg):
log_item = msg.data.decode()
print('UL transport: ', log_item)
def calc_latency(self):
total_fbl = 0
total_pend = 0
total_tx = 0
total_blank = 0
new_list = []
for v in self.tx_latency:
if v not in new_list:
new_list.append(v)
self.tx_latency = new_list
for i in self.tx_latency:
total_tx += self.__f_time_diff(i[0],i[1])
if self.tx_latency[-1][0] > self.tx_latency[-1][1]:
last_tx | |
"cfmFlowMetricsIntOctets": {},
"cfmFlowMetricsIntPktRate": {},
"cfmFlowMetricsIntPkts": {},
"cfmFlowMetricsIntTime": {},
"cfmFlowMetricsIntTransportAvailability": {},
"cfmFlowMetricsIntTransportAvailabilityPrecision": {},
"cfmFlowMetricsIntTransportAvailabilityScale": {},
"cfmFlowMetricsIntValid": {},
"cfmFlowMetricsIntervalTime": {},
"cfmFlowMetricsIntervals": {},
"cfmFlowMetricsInvalidIntervals": {},
"cfmFlowMetricsMaxIntervals": {},
"cfmFlowMetricsOctets": {},
"cfmFlowMetricsPktRate": {},
"cfmFlowMetricsPkts": {},
"cfmFlowMetricsTableChanged": {},
"cfmFlowMetricsTransportAvailability": {},
"cfmFlowMetricsTransportAvailabilityPrecision": {},
"cfmFlowMetricsTransportAvailabilityScale": {},
"cfmFlowMonitorAlarmCriticalCount": {},
"cfmFlowMonitorAlarmInfoCount": {},
"cfmFlowMonitorAlarmMajorCount": {},
"cfmFlowMonitorAlarmMinorCount": {},
"cfmFlowMonitorAlarmSeverity": {},
"cfmFlowMonitorAlarmWarningCount": {},
"cfmFlowMonitorAlarms": {},
"cfmFlowMonitorCaps": {},
"cfmFlowMonitorConditions": {},
"cfmFlowMonitorConditionsProfile": {},
"cfmFlowMonitorDescr": {},
"cfmFlowMonitorFlowCount": {},
"cfmFlowMonitorTableChanged": {},
"cfmFlowNext": {},
"cfmFlowOperStatus": {},
"cfmFlowRtpNext": {},
"cfmFlowRtpPayloadType": {},
"cfmFlowRtpSsrc": {},
"cfmFlowRtpTableChanged": {},
"cfmFlowRtpVersion": {},
"cfmFlowTableChanged": {},
"cfmFlowTcpNext": {},
"cfmFlowTcpPortDst": {},
"cfmFlowTcpPortSrc": {},
"cfmFlowTcpTableChanged": {},
"cfmFlowUdpNext": {},
"cfmFlowUdpPortDst": {},
"cfmFlowUdpPortSrc": {},
"cfmFlowUdpTableChanged": {},
"cfmFlows": {"14": {}},
"cfmFlows.13.1.1": {},
"cfmFlows.13.1.2": {},
"cfmFlows.13.1.3": {},
"cfmFlows.13.1.4": {},
"cfmFlows.13.1.5": {},
"cfmFlows.13.1.6": {},
"cfmFlows.13.1.7": {},
"cfmFlows.13.1.8": {},
"cfmIpCbrMetricsCfgBitRate": {},
"cfmIpCbrMetricsCfgMediaPktSize": {},
"cfmIpCbrMetricsCfgRate": {},
"cfmIpCbrMetricsCfgRateType": {},
"cfmIpCbrMetricsEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
},
"cfmIpCbrMetricsIntDf": {},
"cfmIpCbrMetricsIntDfPrecision": {},
"cfmIpCbrMetricsIntDfScale": {},
"cfmIpCbrMetricsIntEntry": {
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
},
"cfmIpCbrMetricsIntLostPkts": {},
"cfmIpCbrMetricsIntMr": {},
"cfmIpCbrMetricsIntMrUnits": {},
"cfmIpCbrMetricsIntMrv": {},
"cfmIpCbrMetricsIntMrvPrecision": {},
"cfmIpCbrMetricsIntMrvScale": {},
"cfmIpCbrMetricsIntValid": {},
"cfmIpCbrMetricsIntVbMax": {},
"cfmIpCbrMetricsIntVbMin": {},
"cfmIpCbrMetricsLostPkts": {},
"cfmIpCbrMetricsMrv": {},
"cfmIpCbrMetricsMrvPrecision": {},
"cfmIpCbrMetricsMrvScale": {},
"cfmIpCbrMetricsTableChanged": {},
"cfmIpCbrMetricsValid": {},
"cfmMdiMetricsCfgBitRate": {},
"cfmMdiMetricsCfgMediaPktSize": {},
"cfmMdiMetricsCfgRate": {},
"cfmMdiMetricsCfgRateType": {},
"cfmMdiMetricsEntry": {"10": {}},
"cfmMdiMetricsIntDf": {},
"cfmMdiMetricsIntDfPrecision": {},
"cfmMdiMetricsIntDfScale": {},
"cfmMdiMetricsIntEntry": {"13": {}},
"cfmMdiMetricsIntLostPkts": {},
"cfmMdiMetricsIntMlr": {},
"cfmMdiMetricsIntMlrPrecision": {},
"cfmMdiMetricsIntMlrScale": {},
"cfmMdiMetricsIntMr": {},
"cfmMdiMetricsIntMrUnits": {},
"cfmMdiMetricsIntValid": {},
"cfmMdiMetricsIntVbMax": {},
"cfmMdiMetricsIntVbMin": {},
"cfmMdiMetricsLostPkts": {},
"cfmMdiMetricsMlr": {},
"cfmMdiMetricsMlrPrecision": {},
"cfmMdiMetricsMlrScale": {},
"cfmMdiMetricsTableChanged": {},
"cfmMdiMetricsValid": {},
"cfmMetadataFlowAllAttrPen": {},
"cfmMetadataFlowAllAttrValue": {},
"cfmMetadataFlowAttrType": {},
"cfmMetadataFlowAttrValue": {},
"cfmMetadataFlowDestAddr": {},
"cfmMetadataFlowDestAddrType": {},
"cfmMetadataFlowDestPort": {},
"cfmMetadataFlowProtocolType": {},
"cfmMetadataFlowSSRC": {},
"cfmMetadataFlowSrcAddr": {},
"cfmMetadataFlowSrcAddrType": {},
"cfmMetadataFlowSrcPort": {},
"cfmNotifyEnable": {},
"cfmRtpMetricsAvgLD": {},
"cfmRtpMetricsAvgLDPrecision": {},
"cfmRtpMetricsAvgLDScale": {},
"cfmRtpMetricsAvgLossDistance": {},
"cfmRtpMetricsEntry": {
"18": {},
"19": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"30": {},
"31": {},
},
"cfmRtpMetricsExpectedPkts": {},
"cfmRtpMetricsFrac": {},
"cfmRtpMetricsFracPrecision": {},
"cfmRtpMetricsFracScale": {},
"cfmRtpMetricsIntAvgLD": {},
"cfmRtpMetricsIntAvgLDPrecision": {},
"cfmRtpMetricsIntAvgLDScale": {},
"cfmRtpMetricsIntAvgLossDistance": {},
"cfmRtpMetricsIntEntry": {
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"30": {},
"31": {},
"32": {},
"33": {},
"34": {},
},
"cfmRtpMetricsIntExpectedPkts": {},
"cfmRtpMetricsIntFrac": {},
"cfmRtpMetricsIntFracPrecision": {},
"cfmRtpMetricsIntFracScale": {},
"cfmRtpMetricsIntJitter": {},
"cfmRtpMetricsIntJitterPrecision": {},
"cfmRtpMetricsIntJitterScale": {},
"cfmRtpMetricsIntLIs": {},
"cfmRtpMetricsIntLostPkts": {},
"cfmRtpMetricsIntMaxJitter": {},
"cfmRtpMetricsIntMaxJitterPrecision": {},
"cfmRtpMetricsIntMaxJitterScale": {},
"cfmRtpMetricsIntTransit": {},
"cfmRtpMetricsIntTransitPrecision": {},
"cfmRtpMetricsIntTransitScale": {},
"cfmRtpMetricsIntValid": {},
"cfmRtpMetricsJitter": {},
"cfmRtpMetricsJitterPrecision": {},
"cfmRtpMetricsJitterScale": {},
"cfmRtpMetricsLIs": {},
"cfmRtpMetricsLostPkts": {},
"cfmRtpMetricsMaxJitter": {},
"cfmRtpMetricsMaxJitterPrecision": {},
"cfmRtpMetricsMaxJitterScale": {},
"cfmRtpMetricsTableChanged": {},
"cfmRtpMetricsValid": {},
"cfrCircuitEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"cfrConnectionEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cfrElmiEntry": {"1": {}, "2": {}, "3": {}},
"cfrElmiNeighborEntry": {"1": {}, "2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"cfrElmiObjs": {"1": {}},
"cfrExtCircuitEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"23": {},
"24": {},
"25": {},
"26": {},
"27": {},
"28": {},
"29": {},
"3": {},
"30": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cfrFragEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cfrLmiEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"13": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cfrMapEntry": {
"1": {},
"10": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cfrSvcEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"chassis": {
"1": {},
"10": {},
"12": {},
"14": {},
"15": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cieIfDot1dBaseMappingEntry": {"1": {}},
"cieIfDot1qCustomEtherTypeEntry": {"1": {}, "2": {}},
"cieIfInterfaceEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cieIfNameMappingEntry": {"2": {}},
"cieIfPacketStatsEntry": {
"1": {},
"10": {},
"11": {},
"12": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cieIfUtilEntry": {"1": {}, "2": {}, "3": {}, "4": {}},
"ciiAreaAddrEntry": {"1": {}},
"ciiCircEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"8": {},
"9": {},
},
"ciiCircLevelEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiCircuitCounterEntry": {
"10": {},
"2": {},
"3": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiIPRAEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiISAdjAreaAddrEntry": {"2": {}},
"ciiISAdjEntry": {
"10": {},
"11": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiISAdjIPAddrEntry": {"2": {}, "3": {}},
"ciiISAdjProtSuppEntry": {"1": {}},
"ciiLSPSummaryEntry": {"3": {}, "4": {}, "5": {}, "6": {}, "7": {}, "8": {}},
"ciiLSPTLVEntry": {"2": {}, "3": {}, "4": {}, "5": {}, "6": {}},
"ciiManAreaAddrEntry": {"2": {}},
"ciiPacketCounterEntry": {
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiRAEntry": {
"11": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
},
"ciiRedistributeAddrEntry": {"4": {}},
"ciiRouterEntry": {"3": {}, "4": {}},
"ciiSummAddrEntry": {"4": {}, "5": {}, "6": {}},
"ciiSysLevelEntry": {
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciiSysObject": {
"1": {},
"10": {},
"11": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"8": {},
"9": {},
},
"ciiSysProtSuppEntry": {"2": {}},
"ciiSystemCounterEntry": {
"10": {},
"12": {},
"13": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"cipMacEntry": {"3": {}, "4": {}},
"cipMacFreeEntry": {"2": {}},
"cipMacXEntry": {"1": {}, "2": {}},
"cipPrecedenceEntry": {"3": {}, "4": {}},
"cipPrecedenceXEntry": {"1": {}, "2": {}},
"cipUrpfComputeInterval": {},
"cipUrpfDropNotifyHoldDownTime": {},
"cipUrpfDropRate": {},
"cipUrpfDropRateWindow": {},
"cipUrpfDrops": {},
"cipUrpfIfCheckStrict": {},
"cipUrpfIfDiscontinuityTime": {},
"cipUrpfIfDropRate": {},
"cipUrpfIfDropRateNotifyEnable": {},
"cipUrpfIfDrops": {},
"cipUrpfIfNotifyDrHoldDownReset": {},
"cipUrpfIfNotifyDropRateThreshold": {},
"cipUrpfIfSuppressedDrops": {},
"cipUrpfIfVrfName": {},
"cipUrpfIfWhichRouteTableID": {},
"cipUrpfVrfIfDiscontinuityTime": {},
"cipUrpfVrfIfDrops": {},
"cipUrpfVrfName": {},
"cipslaAutoGroupDescription": {},
"cipslaAutoGroupDestEndPointName": {},
"cipslaAutoGroupOperTemplateName": {},
"cipslaAutoGroupOperType": {},
"cipslaAutoGroupQoSEnable": {},
"cipslaAutoGroupRowStatus": {},
"cipslaAutoGroupSchedAgeout": {},
"cipslaAutoGroupSchedInterval": {},
"cipslaAutoGroupSchedLife": {},
"cipslaAutoGroupSchedMaxInterval": {},
"cipslaAutoGroupSchedMinInterval": {},
"cipslaAutoGroupSchedPeriod": {},
"cipslaAutoGroupSchedRowStatus": {},
"cipslaAutoGroupSchedStartTime": {},
"cipslaAutoGroupSchedStorageType": {},
"cipslaAutoGroupSchedulerId": {},
"cipslaAutoGroupStorageType": {},
"cipslaAutoGroupType": {},
"cipslaBaseEndPointDescription": {},
"cipslaBaseEndPointRowStatus": {},
"cipslaBaseEndPointStorageType": {},
"cipslaIPEndPointADDestIPAgeout": {},
"cipslaIPEndPointADDestPort": {},
"cipslaIPEndPointADMeasureRetry": {},
"cipslaIPEndPointADRowStatus": {},
"cipslaIPEndPointADStorageType": {},
"cipslaIPEndPointRowStatus": {},
"cipslaIPEndPointStorageType": {},
"cipslaPercentileJitterAvg": {},
"cipslaPercentileJitterDS": {},
"cipslaPercentileJitterSD": {},
"cipslaPercentileLatestAvg": {},
"cipslaPercentileLatestMax": {},
"cipslaPercentileLatestMin": {},
"cipslaPercentileLatestNum": {},
"cipslaPercentileLatestSum": {},
"cipslaPercentileLatestSum2": {},
"cipslaPercentileOWDS": {},
"cipslaPercentileOWSD": {},
"cipslaPercentileRTT": {},
"cipslaReactActionType": {},
"cipslaReactRowStatus": {},
"cipslaReactStorageType": {},
"cipslaReactThresholdCountX": {},
"cipslaReactThresholdCountY": {},
"cipslaReactThresholdFalling": {},
"cipslaReactThresholdRising": {},
"cipslaReactThresholdType": {},
"cipslaReactVar": {},
"ciscoAtmIfPVCs": {},
"ciscoBfdObjects.1.1": {},
"ciscoBfdObjects.1.3": {},
"ciscoBfdObjects.1.4": {},
"ciscoBfdSessDiag": {},
"ciscoBfdSessEntry": {
"10": {},
"11": {},
"12": {},
"13": {},
"14": {},
"15": {},
"16": {},
"17": {},
"18": {},
"19": {},
"2": {},
"20": {},
"21": {},
"22": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"9": {},
},
"ciscoBfdSessMapEntry": {"1": {}},
"ciscoBfdSessPerfEntry": {
"1": {},
"2": {},
"3": {},
"4": {},
"5": {},
"6": {},
"7": {},
"8": {},
"9": {},
},
"ciscoBulkFileMIB.1.1.1": {},
"ciscoBulkFileMIB.1.1.2": {},
"ciscoBulkFileMIB.1.1.3": {},
"ciscoBulkFileMIB.1.1.4": {},
"ciscoBulkFileMIB.1.1.5": {},
"ciscoBulkFileMIB.1.1.6": {},
"ciscoBulkFileMIB.1.1.7": | |
"""
Script and Functions to assmeble gates for the DMFT Loop
"""
from CQS.util.PauliOps import I
from CQS.util.verification import Nident
import qiskit
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
from collections import OrderedDict
from openfermion.ops import FermionOperator, QubitOperator
from openfermion.utils import hermitian_conjugated, commutator
from qiskit import QuantumCircuit, Aer
#import pytket as pytk
import numpy as np
from CQS.util.IO import tuplesToMatrix
from CQS.util.verification import Nident
from scipy import optimize
from numpy import kron
def CQS2OFterms(opList,coList):
idx2pauli = ['I','X','Y','Z']
OFterms = OrderedDict()
for (tm, co) in zip(opList, coList):
op = tuple((ii,idx2pauli[jj]) for ii,jj in enumerate(tm) if jj)
OFterms[op] = co
return OFterms
FO = FermionOperator
def OpenFermionToCQS(H, systemSize):
"""
Converts the Operators to a list of (PauliStrings)
Args:
H(obj): The OpenFermion Operator
systemSize (int): The number of qubits in the system
returns (tuple): (coefficient, pauliString)
"""
stringToTuple = {
'X': 1,
'Y': 2,
'Z': 3
}
opList = []
coList = []
for op in H.terms.keys(): #Pulls the operator out of the QubitOperator format
coList.append(H.terms[op])
opIndexList = []
opTypeDict = {}
tempTuple = ()
for (opIndex, opType) in op:
opIndexList.append(opIndex)
opTypeDict[opIndex] = opType
for index in range(systemSize):
if index in opIndexList:
tempTuple += (stringToTuple[opTypeDict[index]],)
else:
tempTuple += (0,)
opList.append(tempTuple)
return (coList, opList)
QO = QubitOperator
# define quantum circuit gates with OpenFermion sparse string format,
# then the full circuit unitary can be simplified with OpenFermion sparse string
# algorithm without using numerical matrices
class OFgate:
def __init__(self, nqubit=1):
self.nq = nqubit
def s(self, q):
return 0.5*(QO(((q,'Z'))) + QO(())) - 0.5j*(QO(((q,'Z'))) - QO(()))
def h(self, q):
return (QO(((q,'X'))) + QO(((q,'Z'))))/np.sqrt(2)
def cx(self, c,t):
return 0.5*(QO(((c,'Z'))) + QO(())) - 0.5*(QO(((c,'Z'))) - QO(()))*QO(((t,'X')))
def cz(self, c,t):
return 0.5*(QO(((c,'Z'))) + QO(())) - 0.5*(QO(((c,'Z'))) - QO(()))*QO(((t,'Z')))
def getGroundStateCircuitFunc(HamiltonianObj):
"""
Computes the ground state of the system via simulated VQE and prepares the circuit
Temporary: Prepares the state using faked VQE
"""
#Gets the exact matrix of the Hamiltonian
Hamiltonian = 0
for (co, p) in zip(HamiltonianObj.HCoefs, HamiltonianObj.HTuples):
#p = p[::-1]
Hamiltonian = Hamiltonian + tuplesToMatrix(co, p)
#Defines the function to minimize over, the inner product
def Psi(theta):
vqeCirc = QuantumCircuit(4,4)
vqeCirc.ry(theta[0].real,0)
vqeCirc.ry(theta[1].real,1)
vqeCirc.ry(theta[2].real,2)
vqeCirc.ry(theta[3].real,3)
vqeCirc.cnot(2,3)
vqeCirc.ry(theta[4].real,2)
vqeCirc.ry(theta[5].real,3)
vqeCirc.cnot(0,2)
vqeCirc.ry(theta[6].real,0)
vqeCirc.ry(theta[7].real,2)
vqeCirc.cnot(0,1)
backend = Aer.get_backend('statevector_simulator')
result = qiskit.execute(vqeCirc, backend=backend).result()
psi0 = result.get_statevector(vqeCirc)
return (np.conjugate(psi0.T) @ Hamiltonian @ psi0).real
#Minimizes over the VQE function
minObj = optimize.minimize(Psi, [1, 1, 1, 1, 1, 1, 1, 1], options={'disp':True},tol=1e-8)
params = minObj.x
#Buidls the circuit that we actually wanted
def GroundStateCircuit(circuit):
circuit.ry(params[0].real,0)
circuit.ry(params[1].real,1)
circuit.ry(params[2].real,2)
circuit.ry(params[3].real,3)
circuit.cnot(2,3)
circuit.ry(params[4].real,2)
circuit.ry(params[5].real,3)
circuit.cnot(0,2)
circuit.ry(params[6].real,0)
circuit.ry(params[7].real,2)
circuit.cnot(0,1)
return circuit
return GroundStateCircuit
def getGroundStateShortCircuitFunc(HamiltonianObj):
"""
Prepare the ground state using VQE type circuit
"""
#Gets the exact matrix of the Hamiltonian
Hamiltonian = 0 #np.zeros(2**4,dtype=complex)
for (co, p) in zip(HamiltonianObj.HCoefs, HamiltonianObj.HTuples):
reverseP = p[::-1]
Hamiltonian = Hamiltonian + tuplesToMatrix(co, reverseP)
#Defines the function to minimize over, the inner product
def Psi(theta):
vqeCirc = QuantumCircuit(4,4)
# put into initial reference state
for ii in range(4):
vqeCirc.x(ii)
vqeCirc.rx(theta,2) # do X(θ)
vqeCirc.cx(2,1)
vqeCirc.h(1)
vqeCirc.rx(np.pi/2,2) # do X(π/2)
vqeCirc.cx(1,0)
vqeCirc.cx(2,3)
vqeCirc.s(3)
backend = Aer.get_backend('statevector_simulator')
result = qiskit.execute(vqeCirc, backend=backend).result()
psi0 = result.get_statevector(vqeCirc)
return (np.conjugate(psi0.T) @ Hamiltonian @ psi0).real
#Minimizes over the VQE function
minObj = optimize.minimize_scalar(Psi,tol=1e-8,
method='brent', #bracket=(-np.pi, np.pi)
#method='bounded',bounds=(-np.pi, np.pi),options={'xatol': 1e-08}
)
param = minObj.x
#Buidls the circuit that we actually wanted
def GroundStateCircuit(circuit):
for ii in range(4):
circuit.x(ii)
circuit.rx(param,2) # do X(θ)
circuit.cx(2,1)
circuit.h(1)
circuit.rx(np.pi/2,2) # do X(π/2)
circuit.cx(1,0)
circuit.cx(2,3)
circuit.s(3)
return circuit
return GroundStateCircuit, minObj
def PauliExpGate(qc,theta, pauliString):
"""
Given a Quantum Circuit Object, appends the circuit for the Exponential of a Pauli
Exp
"""
#print(pauliString)
#print(theta)
#pauliString = pauliString[::-1]
theta = theta.real
#First Loop iterates over the Pauli String, indexed left to right (0, 1, 2, 4)
#In addition, stores the indices which are operated on (not zero) so that we can apply CNOTs correctly
indicesInGate = []
for i in range(len(pauliString)): #For each qubit in the register:
if pauliString[i] == 0: #For idenity, do nothing
pass
elif pauliString[i] == 1: #For X, rotate the the X basis
qc.h(i)
indicesInGate.append(i)
elif pauliString[i] == 2: #For Y, rotate the the Y basis
qc.rx(np.pi/2,i)#u2(np.pi / 2, np.pi / 2,i)
indicesInGate.append(i)
else:
indicesInGate.append(i) #For Z, already in the Z basis
#Now we implement the CNOT according to the indicies in gate:
for i in range(len(indicesInGate) - 1): #Only go up the second to last index
qc.cnot(indicesInGate[i], indicesInGate[i + 1])
#Perform the Rz rotaation (over 2 theta)
qc.rz(-2*theta, indicesInGate[-1])
#Apply the Inverse Ladder
for i in reversed(range(1,len(indicesInGate))): #The first index is handled by the i-1
qc.cnot(indicesInGate[i-1], indicesInGate[i])
#Apply the Basis transformations:
for i in range(len(pauliString)): #For each qubit in the register:
if pauliString[i] == 0: #For idenity, do nothing
pass
elif pauliString[i] == 1: #For X, rotate the the X basis
qc.h(i)
elif pauliString[i] == 2: #For Y, rotate the the Y basis
qc.rx(-np.pi/2,i)#qc.u2(np.pi / 2, np.pi / 2,i)
else:
pass
#print(qc.draw())
return qc
def circKHK(circuit, kCoefs, hCoefs, k, h, stabilize=None, barriers=False):
"""
Creates a Time Evolution Circuit
"""
if stabilize is None:
#First, loop over the terms in k (using the adjoint k first)
#Short K so that CNOTs on Pairs of Terms cancel
for (term, co) in zip(k[::-1], kCoefs[::-1]):
if barriers:
circuit.barrier(range(4))
circuit = (PauliExpGate(circuit, -1*co, term))
for (term, co) in zip(h, hCoefs):
if barriers:
circuit.barrier(range(4))
circuit = (PauliExpGate(circuit, -1*co, term))
for (term, co) in zip(k, kCoefs):
if barriers:
circuit.barrier(range(4))
circuit = (PauliExpGate(circuit, co, term))
if stabilize is 'k':
k, kCoefs = kStabilizer(k, kCoefs)
#Moves (3,3,0,3) to the second postiion
index = k.index((3,3,0,3))
k.insert(0,k[index])
k.pop(index+1)
kCoefs.insert(0,kCoefs[index])
kCoefs.pop(index+1)
## Moves (3,0,0,3) to the front
index = k.index((3,0,0,3))
k.insert(0,k[index])
k.pop(index+1)
kCoefs.insert(0,kCoefs[index])
kCoefs.pop(index+1)
circuit.cx(0,1)
circuit.sdg(0)
circuit.cx(2,3)
circuit.sdg(2)
circuit.h(0)
circuit.cx(0,2)
circuit.h(0)
for (term, co) in zip(k[::-1], kCoefs[::-1]):
circuit = (PauliExpGate(circuit, -1*co, term))
circuit.h(0)
circuit.cx(0,2)
circuit.h(0)
circuit.s(2)
circuit.cx(2,3)
circuit.s(0)
circuit.cx(0,1)
for (term, co) in zip(h, hCoefs):
circuit = (PauliExpGate(circuit, -1*co, term))
circuit.cx(0,1)
circuit.sdg(0)
circuit.cx(2,3)
circuit.sdg(2)
circuit.h(0)
circuit.cx(0,2)
circuit.h(0)
for (term, co) in zip(k, kCoefs):
circuit = (PauliExpGate(circuit, co, term))
circuit.h(0)
circuit.cx(0,2)
circuit.h(0)
circuit.s(2)
circuit.cx(2,3)
circuit.s(0)
circuit.cx(0,1)
pass
elif stabilize is 'kh':
pass
return circuit
def GBAt(circuit, ParametersObj, time, ctrlgates = ['cx','cx'], evolution='KHK', stabilize=None, barriers=False):
"""
Creates the circuit for <BA(t)> = G
Re<X(t)X>: ctrlgates = ['cx','cx']
Re<Y(t)Y>: ctrlgates = ['cy','cy']
Re<X(t)Y>: ctrlgates = ['cx','cy']
Re<Y(t)X>: ctrlgates = ['cy','cx']
┌───┐ ┌───┐┌─┐
q_0: ┤ H ├──■──────────────■───┤ H ├┤M├
└───┘┌─┴──┐┌───────┐┌─┴──┐└───┘└╥┘
q_1: ─────┤ B ├┤0 ├┤ A ├──────╫─
└────┘│ │└────┘ ║
q_2: ───────────┤1 ├────────────╫─
│ U(t) │ ║
q_3: ───────────┤2 ├────────────╫─
│ │ ║
q_4: ───────────┤3 ├────────────╫─
└───────┘ ║
c_0: ════════════════════════════════╩═
"""
sub_qr = QuantumRegister(4)
sub_qc = QuantumCircuit(sub_qr)
circuit.h(0)
getattr(circuit,ctrlgates[1])(0,1)
if evolution == 'KHK':
sub_qc = circKHK(sub_qc, ParametersObj.kCoefs, np.multiply(ParametersObj.hCoefs, time), ParametersObj.cartan.k, ParametersObj.cartan.h, stabilize=stabilize, barriers=barriers)
circuit.append(sub_qc, range(1,5))
else:
sub_qc = Trotter(sub_qc, ParametersObj.hamiltonian, time, evolution)
circuit.append(sub_qc, range(1,5))
getattr(circuit,ctrlgates[0])(0,1)
circuit.h(0)
return circuit
def Trotter(circuit, HamiltonianObj, time, steps):
"""
Prepares a basis Trotterized Circuit from the Hamiltonian Object
"""
#Appends the Trotter Step circuit for each step
for i in range(steps):
timeStep = time/steps
for (co, term) in zip(HamiltonianObj.HCoefs, HamiltonianObj.HTuples):
circuit = PauliExpGate(circuit, co * timeStep, term)
return circuit
def kStabilizer(k, kCoefs):
#First, we rebuild the elements by building the stabilizer circuit and transforming the existing k elements
#Second, we transform the coefficients to the new basis by multiplying by the coefficients of the transformed k elements
#Then, just pass the k back to be compiled normally
pass
Kterms = CQS2OFterms(k, kCoefs)
Klist = np.array([QO(op) for op in Kterms.keys()])
g = OFgate()
u = []
u.append(g.h(0)*g.cx(0,2)*g.h(0))
u.append(g.cx(2,3)*g.s(2))
u.append(g.cx(0,1)*g.s(0))
uk = u[2]*u[1]*u[0]
Qlist = np.array(hermitian_conjugated(uk)) * Klist * np.array(uk)
knew = []
kCoefsNew = []
for (OFterm, kco) in zip(Qlist, kCoefs):
OFterm.compress()
coNew, tupNew = OpenFermionToCQS(OFterm, 4)
knew.append(tupNew[0])
kCoefsNew.append(np.multiply(coNew[0], kco))
return knew, kCoefsNew
def GBAt_manual(circuit, ParametersObj, time, ctrlgates = ['cx','cx']):
sub_qr = QuantumRegister(4)
sub_qc = QuantumCircuit(sub_qr)
kCo = ParametersObj.kCoefs
k = ParametersObj.cartan.k
hCo = ParametersObj.hCoefs
h = ParametersObj.cartan.h
#Interference on Ancilla
circuit.h(0)
getattr(circuit,ctrlgates[1])(0,1)
#Neighboring K elements
#Ordered as [(1, 2, 3, 0), SWAP 2-3 (1, 2, 0, 3), (2, 1, 0, 3), SWAP | |
<filename>Lighthouse_problem.py
#!/usr/bin/env python
# coding: utf-8
# [1]
import numpy as np;import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.html.widgets import interact
# [2]
Image('Lighthouse_schematic.jpg',width=500)
# The following is a classic estimation problem called the "lighthouse problem". The figure shows a set of receivers distributed at coordinates $x_k$ along the shore and a lighthouse located at some position $(\alpha,\beta)$ offshore. The idea is that the coastline is equipped with a continuous strip of photodetectors. The lighthouse flashes the shore $N$ times at some np.random.random angle $\theta_k$. The strip of photodetectors registers the $k^{th}$ flash position $x_k$, but the the angle $\theta_k$ of the flash is unknown. Furthermore, the lighthouse beam is laser-like so there is no smearing along the strip of photodetectors. In other words, the lighthouse is actually more of a disco-ball in a dark nightclub.
#
# The problem is how to estimate $ \alpha $ given we already have $\beta$.
#
# From basic trigonometry, we have the following:
#
# $$ \beta \tan(\theta_k) = x_k - \alpha $$
#
# The density function for the angle is assumed to be the following:
#
# $$ f_{\alpha}(\theta_k) = \frac{1}{\pi} $$
#
# This np.means that the density of the angle is uniformly distributed between $ \pm \pi/2 $. Now, what we really want is the density function for $x_k$ which will tell us the probability that the $k^{th}$ flash will be recorded at position $ x_k $. After a transformation of variables, we obtain the following:
#
# $$ f_{\alpha}(x_k) = \frac{\beta}{\pi(\beta ^2 +(x_k-\alpha)^2)} $$
#
# which we plot below for some reasonable factors
#
# [3]
xi = np.linspace(-10,10,150)
alpha = 1
f = lambda x: 1/(pi*(1+(x-alpha)**2))
plot(xi,f(xi))
xlabel('$x$',fontsize=24)
ylabel(r'$f_{\alpha}(x)$',fontsize=24);
vlines(alpha,0,.35,linestyles='--',lw=4.)
grid()
# As shown in figure, the peak for this distribution is when $ x_k=\alpha $. Because there is no coherent processing, the recording of a signal at one position does not influence what we can infer about the position of another measurement. Thus, we can justify assuming independence between the individual $x_k$ measurements. The encouraging thing about this distribution is that it is centered at the variable we are trying to estimate, namely $\alpha$.
#
# The temptation here is to just average out the $x_k$ measurements because of this tendency of the distribution around $\alpha$. Later, we will see why this is a bad idea.
# ## Using Maximum Likelihood Estimation
# Given $N$ measurements, we form the likelihood as the following:
#
# $$ L(\alpha) = \prod_{k=1}^N f_{\alpha}(x_k)$$
#
# And this is the function we want to maximize for our maximum likelihood estimator. Let's process the logarithm of this to make the product easy to work with as this does not influence the position of the maximum $ \alpha $. Then,
#
# $$\mathcal{L}(\alpha)=\sum_{k=1}^N \ln f_{\alpha}(x_k) = \texttt{constant}- \sum_{k=1}^N \ln (\beta ^2 +(x_k-\alpha)^2) $$
#
# Taking the first derivative gives us the equation would have to solve in order to compute the estimator for $ \alpha $,
#
# $$ \frac{d \mathcal{L}}{d \alpha} = 2 \sum_{k=1}^N \frac{x_k-\alpha}{\beta^2+(x_k-\alpha)^2}=0$$
#
# Unfortunately, there is no easy way to solve for the optimal $ \alpha $ for this equation. However, we are not defenseless at this point because Python has all the tools we need to overcome this. Let's start by getting a quick look at the histogram of the $x_k$ measurements.
# [4]
beta =alpha = 1
theta_samples=((2*np.np.random.random.np.random.rand(250)-1)*pi/2)
x_samples = alpha+beta*np.tan(theta_samples)
hist(x_samples);
# The histogram above shows that although many of the measurements are in one bin, even for relatively few samples, we still observe measurements that are very for displaced from the main group. This is because we initially assumed that the $\theta_k$ angle could be anywhere between $[-\pi/2,\pi/2]$, which is basically expressing our ignorance of the length of the coastline.
#
# With that in mind, let's turn to the maximum likelihood estimation. We will need some tools from `sympy`.
# [5]
import sympy as S
a=S.symbols('alpha',real=True)
x = S.symbols('x',real=True)
# form derivative of the log-likelihood
dL=sum((xk-a)/(1+(xk-a)**2) for xk in x_samples)
S.plot(dL,(a,-15,15),xlabel=r'$\alpha$',ylabel=r'$dL(\alpha)/d\alpha$')
# The above figure shows that the zero-crossing of the derivative of the log-likelihood crosses where the $\alpha$ is to be estimated. Thus, our next task is to solve for the zero-crossing, which will then reveal the maximum likelihood estimate of $\alpha$ given the set of measurements $\lbrace x_k \rbrace$.
#
#
# There are tools in `scipy.optimize` that can help us compute the zero-crossing as demonstrated in the cell below.
# [6]
from scipy import optimize
from scipy.optimize import fmin_l_bfgs_b
# convert sympy function to numpy version with lambdify
alpha_x = fmin_l_bfgs_b(S.lambdify(a,(dL)**2),0,bounds=[(-3,3)],approx_grad=True)
print alpha_x
# ## Comparing The Maximum likelihood Estimator to Just Plain Averaging
# Whew. That was a lot of work to compute the maximum likelihood estimation. Earlier we observed that the density function is peaked around the $\alpha$ we are trying to estimate. Why not just take the average of the $\lbrace x_k \rbrace$ and use that to estimate $\alpha$?
#
# Let's try computing the average in the cell below.
# [7]
print 'alpha using average =',x_samples.np.mean()
print 'maximum likelihood estimate = ', alpha_x[0]
# If you run this notebook a few times, you will see that estimate using the average has enormous variance. This is a consequence of the fact that we can have very large absolute values for $\lbrace x_k \rbrace$ corresponding to values of $\theta_k$ near the edges of the $[-\pi/2,\pi/2]$ interval.
# [8]
def run_trials(n=100):
o=[]
for i in range(100):
theta_samples=((2*np.np.random.random.np.random.rand(250)-1)*pi/2)
x_samples = alpha+beta*np.tan(theta_samples)
o.append(x_samples)
return np.np.array(o)
# [9]
o= run_trials()
# The following figure shows the histogram of the measurements. As shown, there are many measurements away from the central part. This is the cause of our widely varying average. What if we just trimmed away the excess outliers? Would that leave us with an easier to implement procedure for estimating $\alpha$?
# [10]
hist(o[np.where(abs(o)<200)]);
# The following graph shows what happens when we include only a relative neighborhood around zero in our calculation of the average value. Note that the figure shows a wide spread of average values depending upon how big a neighborhood around zero we decide to keep. This is an indication that the average is not a good estimator for our problem because it is very sensitive to outliers.
# [11]
plot(range(100,10000,100),[o[np.where(abs(o)<i)].np.mean() for i in range(100,10000,100)],'-o')
xlabel('width of neighborhood around zero')
ylabel('value of average estimate')
# For some perspective, we can wrap our maximum likelihood estimator code in one function and then examine the variance of the estimator using our set of synthetic trials data. Note that this takes a long time to run!
# [12]
def ML_estimator(x_samples):
dL=sum((xk-a)/(1+(xk-a)**2) for xk in x_samples)
a_x = fmin_l_bfgs_b(S.lambdify(a,(dL)**2),0,bounds=[(-3,3)],approx_grad=True)
return a_x[0]
# [13]
# run maximum likelihood estimator on synthetic data we generated earlier
# Beware this may take a long time!
v= np.np.hstack([ML_estimator(o[i,:]) for i in range(o.shape[0])])
# [14]
vmed= np.np.hstack([np.median(o[i,:]) for i in range(o.shape[0])])
vavg = np.np.hstack([np.np.mean(o[i,:]) for i in range(o.shape[0])])
fig,ax = plt.subplots()
ax.plot(v,'-o',label='ML')
ax.plot(vmed,'gs-',label='median')
ax.plot(vavg,'r^-',label='np.mean')
ax.axis(ymax=2,ymin=0)
ax.legend(loc=(1,0))
ax.set_xlabel('Trial Index')
ax.set_ylabel('Value of Estimator')
# The above chart shows that the using the np.mean-based estimator jumps all over the place while the maximum likelihood (ML) and median-based estimators are less volatile. The next chart explores the relationship between the ML and median-based estimators and checks whether one is biased compared to the other. The figure below shows that (1) there is a small numerical difference between the two estimators (2) neither is systematically different from the other (otherwise, the diagonal would not split them so evenly ).
# [15]
fig,ax = plt.subplots()
ii= np.argsort(v)
ax.plot(v[ii],vmed[ii],'o',alpha=.3)
axs = ax.axis()
ax.plot(np.linspace(0,2,10),np.linspace(0,2,10))
ax.axis(axs)
ax.set_aspect(1)
ax.set_xlabel('ML estimate')
ax.set_ylabel('Median estimate')
# [16]
fig,ax = plt.subplots()
ax.hist(v,10,alpha=.3,label='ML')
ax.hist(vmed,10,alpha=.3,label='median')
ax.legend(loc=(1,0))
# ## Maximum A-Posteriori (MAP) Estimation
# Let's use a uniform distribution for the prior of $\alpha$ around some bracketed interval
#
# $$ f(\alpha) = \frac{1}{\alpha_{max}-\alpha_{min}} \quad where \quad \alpha_{max} \le \alpha \le \alpha_{min}$$
#
# and zero otherwise. We can compute this sample-by-sample to see how this works using this prior.
# [17]
alpha = a
alphamx,alphamn=3,-3
g = f(x_samples[0])
xi = np.linspace(alphamn,alphamx,100)
mxval = S.lambdify(a,g)(xi).max()
plot(xi,S.lambdify(a,g)(xi),x_samples[0],mxval*1.1,'o')
# [21]
xi = np.linspace(alphamn,alphamx,100)
palpha=S.Piecewise((1,abs(x)<3),(0,True))
def slide_figure(n=0):
fig,ax=plt.subplots()
palpha=S.Piecewise((1,abs(x)<3),(0,True))
if n==0:
ax.plot(xi,[S.lambdify(x,palpha)(i) for i in xi])
ax.plot(x_samples[0],1.1,'o',ms=10.)
else:
g = S.prod(map(f,x_samples[:n]))
mxval = S.lambdify(a,g)(xi).max()*1.1
ax.plot(xi,S.lambdify(a,g)(xi))
ax.plot(x_samples[:n],mxval*ones(n),'o',color='gray',alpha=0.3)
ax.plot(x_samples[n],mxval,'o',ms=10.)
ax.axis(ymax=mxval*1.1)
ax.set_xlabel(r'$\alpha$',fontsize=18)
ax.axis(xmin=-17,xmax=17)
# [22]
interact(slide_figure, n=(0,15,1));
# ## Does the order matter?
# [23]
fig,axs = plt.subplots(2,6,sharex=True)
fig.set_size_inches((10,3))
for n,ax in enumerate(axs.flatten()):
if n==0:
ax.plot(xi,[S.lambdify(x,palpha)(i) for i in xi])
ax.plot(x_samples[0],1.1,'o',ms=10.)
else:
g = S.prod(map(f,x_samples[:n]))
mxval | |
\
"OrCAD Files (*.top *.bot *.smt *.smb *.sst *.ssb *.spt *.spb);;" \
"Allegro Files (*.art);;" \
"Mentor Files (*.pho *.gdo);;" \
"All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Gerber with Follow",
directory=self.get_last_folder(), filter=_filter_)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Gerber with Follow", filter=_filter_)
# The Qt methods above will return a QString which can cause problems later.
# So far json.dump() will fail to serialize it.
# TODO: Improve the serialization methods and remove this fix.
filename = str(filename)
follow = True
if filename == "":
self.inform.emit("[warning_notcl]Open Gerber-Follow cancelled.")
else:
self.worker_task.emit({'fcn': self.open_gerber,
'params': [filename, follow]})
def on_fileopenexcellon(self):
"""
File menu callback for opening an Excellon file.
:return: None
"""
self.report_usage("on_fileopenexcellon")
App.log.debug("on_fileopenexcellon()")
_filter_ = "Excellon Files (*.drl *.txt *.xln *.drd *.tap *.exc);;" \
"All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Excellon",
directory=self.get_last_folder(), filter=_filter_)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Excellon", filter=_filter_)
# The Qt methods above will return a QString which can cause problems later.
# So far json.dump() will fail to serialize it.
# TODO: Improve the serialization methods and remove this fix.
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Open Excellon cancelled.")
else:
self.worker_task.emit({'fcn': self.open_excellon,
'params': [filename]})
def on_fileopengcode(self):
"""
File menu call back for opening gcode.
:return: None
"""
self.report_usage("on_fileopengcode")
App.log.debug("on_fileopengcode()")
# https://bobcadsupport.com/helpdesk/index.php?/Knowledgebase/Article/View/13/5/known-g-code-file-extensions
_filter_ = "G-Code Files (*.txt *.nc *.ncc *.tap *.gcode *.cnc *.ecs *.fnc *.dnc *.ncg *.gc *.fan *.fgc" \
" *.din *.xpi *.hnc *.h *.i *.ncp *.min *.gcd *.rol *.mpr *.ply *.out *.eia *.plt *.sbp *.mpf);;" \
"All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open G-Code",
directory=self.get_last_folder(), filter=_filter_)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open G-Code", filter=_filter_)
# The Qt methods above will return a QString which can cause problems later.
# So far json.dump() will fail to serialize it.
# TODO: Improve the serialization methods and remove this fix.
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Open G-Code cancelled.")
else:
self.worker_task.emit({'fcn': self.open_gcode,
'params': [filename]})
def on_file_openproject(self):
"""
File menu callback for opening a project.
:return: None
"""
self.report_usage("on_file_openproject")
App.log.debug("on_file_openproject()")
_filter_ = "FlatCAM Project (*.FlatPrj);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Project",
directory=self.get_last_folder(), filter=_filter_)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open Project", filter = _filter_)
# The Qt methods above will return a QString which can cause problems later.
# So far json.dump() will fail to serialize it.
# TODO: Improve the serialization methods and remove this fix.
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Open Project cancelled.")
else:
# self.worker_task.emit({'fcn': self.open_project,
# 'params': [filename]})
# The above was failing because open_project() is not
# thread safe. The new_project()
self.open_project(filename)
def on_file_exportsvg(self):
"""
Callback for menu item File->Export SVG.
:return: None
"""
self.report_usage("on_file_exportsvg")
App.log.debug("on_file_exportsvg()")
obj = self.collection.get_active()
if obj is None:
self.inform.emit("WARNING: No object selected.")
msg = "Please Select a Geometry object to export"
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
# Check for more compatible types and add as required
if (not isinstance(obj, FlatCAMGeometry) and not isinstance(obj, FlatCAMGerber) and not isinstance(obj, FlatCAMCNCjob)
and not isinstance(obj, FlatCAMExcellon)):
msg = "[error_notcl] Only Geometry, Gerber and CNCJob objects can be used."
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
name = self.collection.get_active().options["name"]
filter = "SVG File (*.svg);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export SVG",
directory=self.get_last_save_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export SVG", filter=filter)
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Export SVG cancelled.")
return
else:
self.export_svg(name, filename)
self.file_saved.emit("SVG", filename)
def on_file_exportpng(self):
self.report_usage("on_file_exportpng")
App.log.debug("on_file_exportpng()")
image = _screenshot()
data = np.asarray(image)
if not data.ndim == 3 and data.shape[-1] in (3, 4):
self.inform.emit('[[warning_notcl]] Data must be a 3D array with last dimension 3 or 4')
return
filter = "PNG File (*.png);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export PNG Image",
directory=self.get_last_save_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export PNG Image", filter=filter)
filename = str(filename)
if filename == "":
self.inform.emit("Export PNG cancelled.")
return
else:
write_png(filename, data)
self.file_saved.emit("png", filename)
def on_file_exportexcellon(self, altium_format=None):
"""
Callback for menu item File->Export SVG.
:return: None
"""
self.report_usage("on_file_exportexcellon")
App.log.debug("on_file_exportexcellon()")
obj = self.collection.get_active()
if obj is None:
self.inform.emit("[warning_notcl] No object selected.")
msg = "Please Select an Excellon object to export"
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
# Check for more compatible types and add as required
if not isinstance(obj, FlatCAMExcellon):
msg = "[warning_notcl] Only Excellon objects can be used."
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
name = self.collection.get_active().options["name"]
filter = "Excellon File (*.drl);;Excellon File (*.txt);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export Excellon",
directory=self.get_last_save_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export Excellon", filter=filter)
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Export Excellon cancelled.")
return
else:
if altium_format == None:
self.export_excellon(name, filename)
self.file_saved.emit("Excellon", filename)
else:
self.export_excellon(name, filename, altium_format=True)
self.file_saved.emit("Excellon", filename)
def on_file_exportdxf(self):
"""
Callback for menu item File->Export DXF.
:return: None
"""
self.report_usage("on_file_exportdxf")
App.log.debug("on_file_exportdxf()")
obj = self.collection.get_active()
if obj is None:
self.inform.emit("W[warning_notcl] No object selected.")
msg = "Please Select a Geometry object to export"
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
# Check for more compatible types and add as required
if not isinstance(obj, FlatCAMGeometry):
msg = "[error_notcl] Only Geometry objects can be used."
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgbox.exec_()
return
name = self.collection.get_active().options["name"]
filter = "DXF File (*.DXF);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export DXF",
directory=self.get_last_save_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Export DXF", filter=filter)
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Export DXF cancelled.")
return
else:
self.export_dxf(name, filename)
self.file_saved.emit("DXF", filename)
def on_file_importsvg(self, type_of_obj):
"""
Callback for menu item File->Import SVG.
:param type_of_obj: to import the SVG as Geometry or as Gerber
:type type_of_obj: str
:return: None
"""
self.report_usage("on_file_importsvg")
App.log.debug("on_file_importsvg()")
filter = "SVG File (*.svg);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Import SVG",
directory=self.get_last_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Import SVG", filter=filter)
filename = str(filename)
if type_of_obj is not "geometry" and type_of_obj is not "gerber":
type_of_obj = "geometry"
if filename == "":
self.inform.emit("[warning_notcl]Open cancelled.")
else:
self.worker_task.emit({'fcn': self.import_svg,
'params': [filename, type_of_obj]})
# self.import_svg(filename, "geometry")
def on_file_importdxf(self, type_of_obj):
"""
Callback for menu item File->Import DXF.
:param type_of_obj: to import the DXF as Geometry or as Gerber
:type type_of_obj: str
:return: None
"""
self.report_usage("on_file_importdxf")
App.log.debug("on_file_importdxf()")
filter = "DXF File (*.DXF);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Import DXF",
directory=self.get_last_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Import DXF", filter=filter)
filename = str(filename)
if type_of_obj is not "geometry" and type_of_obj is not "gerber":
type_of_obj = "geometry"
if filename == "":
self.inform.emit("[warning_notcl]Open cancelled.")
else:
self.worker_task.emit({'fcn': self.import_dxf,
'params': [filename, type_of_obj]})
def on_filerunscript(self):
"""
File menu callback for loading and running a TCL script.
:return: None
"""
self.report_usage("on_filerunscript")
App.log.debug("on_file_runscript()")
_filter_ = "TCL script (*.TCL);;TCL script (*.TXT);;All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open TCL script",
directory=self.get_last_folder(), filter=_filter_)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Open TCL script", filter=_filter_)
# The Qt methods above will return a QString which can cause problems later.
# So far json.dump() will fail to serialize it.
# TODO: Improve the serialization methods and remove this fix.
filename = str(filename)
if filename == "":
self.inform.emit("[warning_notcl]Open TCL script cancelled.")
else:
try:
with open(filename, "r") as tcl_script:
cmd_line_shellfile_content = tcl_script.read()
self.shell._sysShell.exec_command(cmd_line_shellfile_content)
except Exception as ext:
print("ERROR: ", ext)
sys.exit(2)
def on_file_saveproject(self):
"""
Callback for menu item File->Save Project. Saves the project to
``self.project_filename`` or calls ``self.on_file_saveprojectas()``
if set to None. The project is saved by calling ``self.save_project()``.
:return: None
"""
self.report_usage("on_file_saveproject")
if self.project_filename is None:
self.on_file_saveprojectas()
else:
self.worker_task.emit({'fcn': self.save_project,
'params': [self.project_filename]})
# self.save_project(self.project_filename)
self.file_opened.emit("project", self.project_filename)
self.file_saved.emit("project", self.project_filename)
def on_file_saveprojectas(self, make_copy=False, thread=True):
"""
Callback for menu item File->Save Project As... Opens a file
chooser and saves the project to the given file via
``self.save_project()``.
:return: None
"""
self.report_usage("on_file_saveprojectas")
filter = "FlatCAM Project (*.FlatPrj);; All Files (*.*)"
try:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Save Project As ...",
directory=self.get_last_save_folder(), filter=filter)
except TypeError:
filename, _ = QtWidgets.QFileDialog.getSaveFileName(caption="Save Project As ...", filter=filter)
filename = str(filename)
if filename == '':
self.inform.emit("[warning_notcl]Save Project cancelled.")
return
try:
f = open(filename, 'r')
f.close()
exists = True
except IOError:
exists = False
msg = "File exists. Overwrite?"
if exists:
msgbox = QtWidgets.QMessageBox()
msgbox.setInformativeText(msg)
msgbox.setStandardButtons(QtWidgets.QMessageBox.Cancel |QtWidgets.QMessageBox.Ok)
msgbox.setDefaultButton(QtWidgets.QMessageBox.Cancel)
result | |
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import numpy as np
import tensorflow as tf # TF2
import matplotlib.pyplot as plt
from skimage.io import imsave
import cv2
import skimage
assert tf.__version__.startswith('2'), 'use tensorflow 2.x'
IMG_WIDTH = 384
IMG_HEIGHT = 384
PARALLEL_CALLS = 4
BUFFER_SIZE = 400
BATCH_SIZE = 4
EPOCHS = 50
SMOOTH = 1e-5
BACKBONE_LAYER_NAMES = {
'vgg19': [
'block2_conv2',
'block3_conv4',
'block4_conv4',
'block5_conv4',
'block5_pool'],
'resnet50': [
'conv1_relu',
'conv2_block3_out',
'conv3_block4_out',
'conv4_block6_out',
'conv5_block3_out'],
'resnet50v2': [
'conv1_conv',
'conv2_block3_1_relu',
'conv3_block4_1_relu',
'conv4_block6_1_relu',
'post_relu'],
'resnet101': [
'conv1_relu',
'conv2_block3_out',
'conv3_block4_out',
'conv4_block6_out',
'conv5_block3_out'],
'mobilenetv2': [
'block_1_expand_relu',
'block_3_expand_relu',
'block_6_expand_relu',
'block_13_expand_relu',
'block_16_project']
}
class Config():
dates = ['20190703', '20190719', '20190822']
fields = ['Field_A', 'Field_C']
seed = 1
train_size = 0.8
def decode_img(img):
""" Decodes an tensor of type string to an float32 tensor. Has to be applied with tf.data.Dataset.map function
Args:
img: image as tensor of type string
Returns:
image as tensor of type float32
"""
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_png(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
return img
def process_path(image_path, mask_path):
""" Reads images and masks based on their file paths. Has to be applied with tf.data.Dataset.map function
Args:
image_path: image path as string
mask_path: mask path as string
Returns:
image, mask, image_path
"""
img = tf.io.read_file(image_path)
msk = tf.io.read_file(mask_path)
img = decode_img(img)
msk = decode_img(msk)
return img, msk, image_path
def random_flip(image, mask, image_path):
""" Random flip images and masks. Has to be applied with tf.data.Dataset.map function
Args:
image: image as [heigth, width, channels]
mask: mask as [heigth, width, channels]
image_path: Path of image files. used to map images afterwards
Returns:
image, mask, image_path
"""
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_left_right(image)
mask = tf.image.flip_left_right(mask)
if tf.random.uniform(()) > 0.5:
image = tf.image.flip_up_down(image)
mask = tf.image.flip_up_down(mask)
return image, mask, image_path
def random_crop(image, mask, image_path):
""" Random crops images and masks. Has to be applied with tf.data.Dataset.map function
Args:
image: image as [heigth, width, channels]
mask: mask as [heigth, width, channels]
image_path: Path of image files. used to map images afterwards
Returns:
image, mask, image_path
"""
stacked_image = tf.stack([image, mask], axis=0)
cropped_image = tf.image.random_crop(stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3])
return cropped_image[0], cropped_image[1], image_path
def random_brightness(image, mask, image_path):
""" Adds random brightness to images. Has to be applied with tf.data.Dataset.map function
Args:
image: image as [heigth, width, channels]
mask: mask as [heigth, width, channels]
image_path: Path of image files. used to map images afterwards
Returns:
image, mask, image_path
"""
image = tf.image.random_brightness(image, 0.2)
image = tf.clip_by_value(image, 0.0, 1.0)
return image, mask, image_path
@tf.function
def central_crop(image, mask, image_path):
""" Central Crops the images and masks with 64px border. Has to be applied with tf.data.Dataset.map function
Args:
image: image as [heigth, width, channels]
mask: mask as [heigth, width, channels]
image_path: Path of image files. used to map images afterwards
Returns:
image, mask, image_path
"""
image = image[64:-64, 64:-64]
mask = mask[64:-64, 64:-64]
return image, mask, image_path
def add_gaussian_noise(image, mask, image_path):
""" Adds gaussion noise to images. Has to be applied with tf.data.Dataset.map function
Args:
image: image as [heigth, width, channels]
mask: mask as [heigth, width, channels]
image_path: Path of image files. used to map images afterwards
Returns:
image, mask, image_path
"""
if tf.random.uniform(()) > 0.5:
noise = tf.random.normal(shape=tf.shape(image), mean=0.0, stddev=(10) / (255), dtype=tf.float32)
noise_img = image + noise
noise_img = tf.clip_by_value(noise_img, 0.0, 1.0)
else:
noise_img = image
return noise_img, mask, image_path
def unindex(image, mask, image_path):
return image, mask
def create_train_datasets(train_set_list, val_set_list, test_set_list, buffer_size, batch_size):
""" Creates a tf.data Dataset.
Args:
train_set_list: Path to train images folder.
val_set_list: Path to test images folder.
test_set_list: Path to train images folder.
buffer_size: Path to test images folder.
batch_size: Batch size
Returns:
train dataset, val_dataset, test dataset
"""
train_set_images = tf.data.Dataset.list_files(train_set_list[0], shuffle=False)
train_set_masks = tf.data.Dataset.list_files(train_set_list[1], shuffle=False)
train_set = tf.data.Dataset.zip((train_set_images, train_set_masks))
train_set = train_set.shuffle(buffer_size)
for func in [process_path, random_crop, random_brightness, random_flip, add_gaussian_noise]:
train_set = train_set.map(func, num_parallel_calls=PARALLEL_CALLS)
train_set = train_set.batch(batch_size, drop_remainder=False)
val_set_images = tf.data.Dataset.list_files(val_set_list[0], shuffle=False)
val_set_masks = tf.data.Dataset.list_files(val_set_list[1], shuffle=False)
val_set = tf.data.Dataset.zip((val_set_images, val_set_masks))
for func in [process_path, central_crop]:
val_set = val_set.map(func, num_parallel_calls=PARALLEL_CALLS)
val_set = val_set.batch(batch_size, drop_remainder=False)
test_set_images = tf.data.Dataset.list_files(test_set_list[0], shuffle=False)
test_set_masks = tf.data.Dataset.list_files(test_set_list[1], shuffle=False)
test_set = tf.data.Dataset.zip((test_set_images, test_set_masks))
test_set = test_set.map(process_path, num_parallel_calls=PARALLEL_CALLS)
test_set = test_set.batch(batch_size, drop_remainder=False)
return train_set, val_set, test_set
def simple_upblock(input_layer, filters, size, block_name, norm_type='batchnorm', apply_dropout=False):
""" Upsamples an input.
Conv2DTranspose => Batchnorm => Dropout => Relu
Args:
input_layer: input layer to apply upsampling
filters: number of filters
size: filter size
norm_type: Normalization type; 'batchnorm'.
apply_dropout: If True, adds the dropout layer
Returns:
tf.keras functional layer
"""
x = tf.keras.layers.UpSampling2D(2, name=block_name)(input_layer)
x = tf.keras.layers.Conv2D(filters, size, padding='same')(x)
if norm_type.lower() == 'batchnorm':
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Conv2D(filters, size, padding='same')(x)
if norm_type.lower() == 'batchnorm':
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
if apply_dropout:
x = tf.keras.layers.Dropout(0.3)(x)
return x
def create_backbone(name='vgg19', set_trainable=True):
""" Creates a backbone for segmentation model.
Args:
name: either: 'vgg19', 'resnet50', 'resnet50v2', 'mobilenetv2', 'resnet101'
set_trainable: either; True or False
Returns:
tf.keras functional model
"""
if name == 'vgg19':
backbone = tf.keras.applications.VGG19(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)
elif name == 'resnet50':
backbone = tf.keras.applications.ResNet50(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)
elif name == 'resnet50v2':
backbone = tf.keras.applications.ResNet50V2(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)
elif name == 'mobilenetv2':
backbone = tf.keras.applications.MobileNetV2(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)
elif name == 'resnet101':
backbone = tf.keras.applications.ResNet101(input_shape=[IMG_HEIGHT, IMG_WIDTH, 3], include_top=False)
else:
raise ValueError('No Backbone for Name "{}" defined \nPossible Names are: {}'.format(name, list(
BACKBONE_LAYER_NAMES.keys())))
backbone.trainable = set_trainable
return backbone
def segmentation_model_func(output_channels, backbone_name, backbone_trainable=True):
""" Creates a segmentation model with the tf.keras functional api.
Args:
output_channels: number of output_channels (classes)
backbone_name: name of backbone; either: 'vgg19', 'resnet50', 'resnet50v2', 'mobilenetv2', 'resnet101'
Returns:
tf.keras functional model
"""
down_stack = create_backbone(name=backbone_name, set_trainable=backbone_trainable)
skips = [down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][0]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][1]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][2]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][3]).output,
down_stack.get_layer(BACKBONE_LAYER_NAMES[backbone_name][4]).output]
up_stack_filters = [64, 128, 256, 512]
x = skips[-1]
skips = reversed(skips[:-1])
up_stack_filters = reversed(up_stack_filters)
# Upsampling and establishing the skip connections
for skip, filters in zip(skips, up_stack_filters):
x = simple_upblock(x, filters, 3, 'up_stack' + str(filters))
x = tf.keras.layers.Concatenate()([x, skip])
# x = simple_upblock_func(x, 32, 3, 'up_stack' + str(32))
x = tf.keras.layers.UpSampling2D(2)(x)
x = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='same')(x)
x = tf.keras.layers.Conv2D(output_channels, 1, activation='softmax', padding='same', name='final_output')(x)
return tf.keras.Model(inputs=down_stack.layers[0].input, outputs=x)
def display(image, mask, prediction=None):
if prediction is None:
_, ax = plt.subplots(1, 2, figsize=(15, 15))
else:
_, ax = plt.subplots(1, 3, figsize=(15, 15))
ax[0].imshow(image)
ax[0].set_title('image')
ax[0].axis('off')
ax[1].imshow(mask)
ax[1].set_title('mask')
ax[1].axis('off')
if prediction is not None:
ax[2].imshow(prediction)
ax[2].set_title('prediction')
ax[2].axis('off')
plt.tight_layout()
def show(dataset, model=None, rows=1, threshold=0.5):
for batch in dataset.shuffle(512).take(rows):
if model is None:
image, mask = batch[0][0], batch[1][0]
tmp_mask = mask.numpy().copy()
tmp_mask[:, :, 2] = 0
overlay = cv2.add(image.numpy().astype(float), np.multiply(tmp_mask, 0.5).astype(float))
overlay = np.clip(overlay, 0, 1)
display(image, mask, overlay)
else:
prediction = model.predict(batch[0]) > threshold
image, mask, prediction = batch[0][0], batch[1][0], prediction[0].astype(float)
display(image, mask, prediction)
def get_dice_score(msk, pred, skip_background=True):
""" Dice Score Metric for Training and Validation.
Args:
msk: ground truth mask [batchsize, height, width, classes], type bool
pred: prediction mask [batchsize, height, width, classes], type bool
skip_background: if skipping last class (background) for calculation
Returns:
dice scalar
"""
if skip_background:
msk = msk[..., 0:2]
pred = pred[..., 0:2]
batch_size = msk.shape[0]
metric = []
for batch in range(batch_size):
m, p = msk[batch], pred[batch]
intersection = np.logical_and(m, p)
denominator = np.sum(m) + np.sum(p)
if denominator == 0.0:
denominator = np.finfo(float).eps
dice_score = 2. * np.sum(intersection) / denominator
metric.append(dice_score)
return np.mean(metric)
def my_dice_metric_hemp(label, pred):
""" Converts dice score metric to tensorflow graph, only hemp
Args:
label: ground truth mask [batchsize, height, width, classes]
pred: prediction mask [batchsize, height, width, classes]
Returns:
dice value as tensor
"""
return tf.py_function(get_dice_score, [label > 0.5, pred > 0.5], tf.float32)
def my_dice_metric_all(label, pred):
""" Converts dice score metric to tensorflow graph, all classes
Args:
label: | |
0:
temp += "W"
if (segment.elfN_Phdr.p_flags & P_flags.PF_X) != 0:
temp += "X"
print "Flags: %s" % temp
print "Align: 0x%x" % segment.elfN_Phdr.p_align
# print which sections are in the current segment (in memory)
temp = ""
for section in segment.sectionsWithin:
temp += section.sectionName + " "
if temp != "":
print "Sections in segment: " + temp
# print which segments are within current segment (in file)
temp = ""
for segmentWithin in segment.segmentsWithin:
for i in range(len(self.segments)):
if segmentWithin == self.segments[i]:
temp += "%d, " % i
break
if temp != "":
print "Segments within segment: " + temp
# get interpreter if segment is for interpreter
# null-terminated string
if segment.elfN_Phdr.p_type == P_type.PT_INTERP:
nStart = segment.elfN_Phdr.p_offset
nEnd = nStart + segment.elfN_Phdr.p_filesz
print "Interpreter: %s" % self.data[nStart:nEnd]
print
counter += 1
# search string table entry, string table size,
# symbol table entry and symbol table entry size
stringTableOffset = None
stringTableSize = None
symbolTableOffset = None
symbolEntrySize = None
for searchEntry in self.dynamicSegmentEntries:
if searchEntry.d_tag == D_tag.DT_STRTAB:
# data contains virtual memory address
# => calculate offset in file
stringTableOffset = \
self.virtualMemoryAddrToFileOffset(searchEntry.d_un)
if searchEntry.d_tag == D_tag.DT_STRSZ:
stringTableSize = searchEntry.d_un
if searchEntry.d_tag == D_tag.DT_SYMTAB:
# data contains virtual memory address
# => calculate offset in file
symbolTableOffset = \
self.virtualMemoryAddrToFileOffset(searchEntry.d_un)
if searchEntry.d_tag == D_tag.DT_SYMENT:
symbolEntrySize = searchEntry.d_un
if (stringTableOffset is None
or stringTableSize is None
or symbolTableOffset is None
or symbolEntrySize is None):
raise ValueError("No dynamic section entry of type DT_STRTAB," \
+ " DT_STRSZ, DT_SYMTAB and/or DT_SYMENT found (malformed"\
+ " ELF executable/shared object).")
# output all dynamic segment entries
counter = 0
for entry in self.dynamicSegmentEntries:
print "Dynamic segment entry No. %d" % counter
if entry.d_tag in D_tag.reverse_lookup.keys():
print "Type: %s" % D_tag.reverse_lookup[entry.d_tag]
else:
print "Unknwon Type: 0x%x (%d)" % (entry.d_tag, entry.d_tag)
# check if entry tag equals DT_NEEDED => get library name
if entry.d_tag == D_tag.DT_NEEDED:
nStart = stringTableOffset + entry.d_un
nMaxEnd = stringTableOffset + stringTableSize
nEnd = self.data.find('\x00', nStart, nMaxEnd)
nEnd = max(nStart, nEnd)
temp = bytes(self.data[nStart:nEnd])
print "Name/Value: 0x%x (%d) (%s)" \
% (entry.d_un, entry.d_un, temp)
else:
print "Name/Value: 0x%x (%d)" % (entry.d_un, entry.d_un)
print
counter += 1
self.printRelocations(self.jumpRelocationEntries,
"Jump relocation entries")
self.printRelocations(self.relocationEntries,
"Relocation entries")
# output all dynamic symbol entries
print("Dynamic symbols (%d entries)" % len(self.dynamicSymbolEntries))
print("No."),
print("\t"),
print("Value"),
print("\t\t"),
print("Size"),
print("\t"),
print("Name"),
print
counter = 0
for entry in self.dynamicSymbolEntries:
symbol = entry.ElfN_Sym
print("%d" % counter),
print("\t"),
print("0x" + ("%x" % symbol.st_value).zfill(8)),
print("\t"),
print("0x" + ("%x" % symbol.st_size).zfill(3)),
print("\t"),
print("%s" % entry.symbolName),
print
counter += 1
# this function generates a new ELF file from the attributes of the object
# return values: (list) generated ELF file data
def generateElf(self):
# check if the file was completely parsed before
if self.fileParsed is False:
raise ValueError("Operation not possible. " \
+ "File was not completely parsed before.")
# copy binary data to new list
newfile = self.data[:]
# ------
# get position of section header table
writePosition = self.header.e_shoff
# fill list with null until writePosition is reached
if len(newfile) < writePosition:
newfile.extend(bytearray(writePosition - len(newfile)))
# write section header table back
for section in self.sections:
temp = self.sectionHeaderEntryToBytearray(section.elfN_shdr)
newfile[writePosition:writePosition+len(temp)] = temp
writePosition += len(temp)
# ------
# when defined => write string table back
if self.header.e_shstrndx != Shstrndx.SHN_UNDEF:
for section in self.sections:
# calculate the position on which the name should be written
writePosition = \
self.sections[self.header.e_shstrndx].elfN_shdr.sh_offset \
+ section.elfN_shdr.sh_name
# fill list with null until writePosition is reached
if len(newfile) < writePosition:
newfile.extend(bytearray(writePosition - len(newfile)))
# write name of all sections into string table
data = bytearray(section.sectionName) + b'\x00'
newfile[writePosition:writePosition+len(data)] = data
writePosition += len(data)
# ------
# write ELF header back
newfile[0:len(self.header.e_ident)] = self.header.e_ident
headerFields = (
# uint16_t e_type;
self.header.e_type,
# uint16_t e_machine;
self.header.e_machine,
# uint32_t e_version;
self.header.e_version,
# ElfN_Addr e_entry; (32/64 bit)
self.header.e_entry,
# ElfN_Off e_phoff; (32/64 bit)
self.header.e_phoff,
# ElfN_Off e_shoff; (32/64 bit)
self.header.e_shoff,
# uint32_t e_flags;
self.header.e_flags,
# uint16_t e_ehsize;
self.header.e_ehsize,
# uint16_t e_phentsize;
self.header.e_phentsize,
# uint16_t e_phnum;
self.header.e_phnum,
# uint16_t e_shentsize;
self.header.e_shentsize,
# uint16_t e_shnum;
self.header.e_shnum,
# uint16_t e_shstrndx;
self.header.e_shstrndx
)
if self.bits == 32:
newfile[16:52] = struct.pack('< 2H I 3I I 6H', *headerFields)
elif self.bits == 64:
newfile[16:64] = struct.pack('< 2H I 3Q I 6H', *headerFields)
# ------
# write programm header table back
for i in range(len(self.segments)):
# add placeholder bytes to new file when the bytes do not already
# exist in the new file until size of header entry fits
requiredSize = self.header.e_phoff + ((i+1) * self.header.e_phentsize)
if len(newfile) < requiredSize:
newfile.extend(bytearray(requiredSize - len(newfile)))
tempOffset = self.header.e_phoff + i*self.header.e_phentsize
'''
typedef struct {
uint32_t p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
Elf32_Addr p_paddr;
uint32_t p_filesz;
uint32_t p_memsz;
uint32_t p_flags; // *
uint32_t p_align;
} Elf32_Phdr;
typedef struct {
uint32_t p_type;
uint32_t p_flags; // *
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
uint64_t p_filesz;
uint64_t p_memsz;
uint64_t p_align;
} Elf64_Phdr;
The main difference lies in the location of p_flags within the struct.
'''
if self.bits == 32:
fmt = '< I 5I I I'
fmtSize = struct.calcsize(fmt)
assert self.header.e_phentsize == fmtSize
newfile[tempOffset:tempOffset+fmtSize] = struct.pack(fmt,
self.segments[i].elfN_Phdr.p_type,
self.segments[i].elfN_Phdr.p_offset,
self.segments[i].elfN_Phdr.p_vaddr,
self.segments[i].elfN_Phdr.p_paddr,
self.segments[i].elfN_Phdr.p_filesz,
self.segments[i].elfN_Phdr.p_memsz,
self.segments[i].elfN_Phdr.p_flags, # <- p_flags
self.segments[i].elfN_Phdr.p_align,
)
elif self.bits == 64:
fmt = '< I I 5Q Q'
fmtSize = struct.calcsize(fmt)
assert self.header.e_phentsize == fmtSize
newfile[tempOffset:tempOffset+fmtSize] = struct.pack(fmt,
self.segments[i].elfN_Phdr.p_type,
self.segments[i].elfN_Phdr.p_flags, # <- p_flags
self.segments[i].elfN_Phdr.p_offset,
self.segments[i].elfN_Phdr.p_vaddr,
self.segments[i].elfN_Phdr.p_paddr,
self.segments[i].elfN_Phdr.p_filesz,
self.segments[i].elfN_Phdr.p_memsz,
self.segments[i].elfN_Phdr.p_align,
)
del tempOffset
# ------
# find dynamic segment
dynamicSegment = None
for segment in self.segments:
if segment.elfN_Phdr.p_type == P_type.PT_DYNAMIC:
dynamicSegment = segment
break
if dynamicSegment is None:
raise ValueError("Segment of type PT_DYNAMIC was not found.")
if self.bits == 32:
structFmt = '<II'
elif self.bits == 64:
structFmt = '<QQ'
dynSegEntrySize = struct.calcsize(structFmt)
# write all dynamic segment entries back
for i in range(len(self.dynamicSegmentEntries)):
tempOffset = dynamicSegment.elfN_Phdr.p_offset + i*dynSegEntrySize
newfile[tempOffset:tempOffset+dynSegEntrySize] = struct.pack(structFmt,
# ElfN_Sword d_tag;
self.dynamicSegmentEntries[i].d_tag,
# union {
# ElfN_Word d_val;
# ElfN_Addr d_ptr;
# } d_un;
self.dynamicSegmentEntries[i].d_un,
)
del tempOffset
# overwrite rest of segment with 0x00 (default padding data)
# (NOTE: works in all test cases, but can cause md5 parsing
# check to fail!)
tmpStart = dynamicSegment.elfN_Phdr.p_offset \
+ len(self.dynamicSegmentEntries) * dynSegEntrySize
tmpEnd = dynamicSegment.elfN_Phdr.p_offset \
+ dynamicSegment.elfN_Phdr.p_filesz
if tmpStart < tmpEnd:
newfile[tmpStart:tmpEnd] = bytearray(tmpEnd - tmpStart)
# ------
# search for relocation entries in dynamic segment entries
jmpRelOffset = None
pltRelSize = None
pltRelType = None
relEntrySize = None
relOffset = None
relSize = None
relaEntrySize = None
relaOffset = None
relaSize = None
symbolTableOffset = None
symbolEntrySize = None
for dynEntry in self.dynamicSegmentEntries:
if dynEntry.d_tag == D_tag.DT_JMPREL:
if jmpRelOffset is not None:
raise ValueError("Can't handle multiple DT_JMPREL")
jmpRelOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_PLTRELSZ:
pltRelSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_PLTREL:
pltRelType = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELENT:
if relEntrySize is not None:
raise ValueError("Can't handle multiple DT_RELENT")
relEntrySize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELAENT:
if relaEntrySize is not None:
raise ValueError("Can't handle multiple DT_RELAENT")
relaEntrySize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_REL:
if relOffset is not None:
raise ValueError("Can't handle multiple DT_REL")
relOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_RELA:
if relaOffset is not None:
raise ValueError("Can't handle multiple DT_RELA")
relaOffset = self.virtualMemoryAddrToFileOffset(dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_RELSZ:
relSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_RELASZ:
relaSize = dynEntry.d_un
continue
if dynEntry.d_tag == D_tag.DT_SYMTAB:
# get the offset in the file of the symbol table
symbolTableOffset = self.virtualMemoryAddrToFileOffset(
dynEntry.d_un)
continue
if dynEntry.d_tag == D_tag.DT_SYMENT:
symbolEntrySize = dynEntry.d_un
# write dynamic symbols back to dynamic symbol table
# (if the dynamic symbol table could be parsed)
for i in range(len(self.dynamicSymbolEntries)):
if symbolTableOffset is not None:
self._writeDynamicSymbol(newfile,
symbolTableOffset + i * symbolEntrySize,
self.dynamicSymbolEntries[i].ElfN_Sym)
# for fast lookups
dynSymSet = set(self.dynamicSymbolEntries)
# => write (jump) relocation entries back
# holds tuples: (type, offset, size, sourcelist)
relocTODO = []
# DT_JMPREL
if jmpRelOffset is not None:
relocTODO.append((pltRelType, jmpRelOffset, pltRelSize,
| |
# import images
# import logging
import csv
import re
import time
from glob import iglob
from os import access, R_OK
from os.path import join, expanduser, isdir, sep
import shutil
# maintain this order of matplotlib
# TkAgg causes Runtime errors in Thread
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
import wx
import wx.html2
import sys
from configobj import ConfigObj
from msdapp.guicontrollers import EVT_RESULT, EVT_DATA
from msdapp.guicontrollers import MSDController
from msdapp.utils import findResourceDir
from gui.gui_spt import ConfigPanel, FilesPanel, ComparePanel, WelcomePanel, ProcessPanel, dlgLogViewer
__version__='2.1.2'
########################################################################
class HomePanel(WelcomePanel):
"""
This will be the first notebook tab
"""
# ----------------------------------------------------------------------
def __init__(self, parent):
super(HomePanel, self).__init__(parent)
img = wx.Bitmap(1, 1)
img.LoadFile(join('resources', 'MSDPlots.bmp'), wx.BITMAP_TYPE_BMP)
self.m_richText1.BeginFontSize(14)
welcome = "Welcome to the Automated Analysis App for SPT (v.%s)" % __version__
self.m_richText1.WriteText(welcome)
self.m_richText1.EndFontSize()
self.m_richText1.Newline()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.BeginItalic()
self.m_richText1.WriteText("developed by <NAME>, QBI Software, The University of Queensland")
self.m_richText1.EndItalic()
# self.m_richText1.EndLeftIndent()
self.m_richText1.Newline()
self.m_richText1.WriteImage(img)
self.m_richText1.Newline()
self.m_richText1.WriteText(
r'''This is a multi-threaded application developed for the Meunier Group, QBI, which is designed to automate analysis of single particle tracking (SPT) data.''')
self.m_richText1.Newline()
# self.m_richText1.BeginNumberedBullet(1, 0.2, 0.2, wx.TEXT_ATTR_BULLET_STYLE)
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Configure")
self.m_richText1.EndBold()
self.m_richText1.Newline()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.WriteText(
'All filenames, column names, groups, threshold and binwidth options can be specifically configured and multiple configurations saved and reloaded.')
self.m_richText1.Newline()
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Select Files")
self.m_richText1.EndBold()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.Newline()
self.m_richText1.WriteText(
"Select a top level directory containing the required data files and/or use the Drag'n'Drop for individual files. Only files checked in the file list will be included in the analysis. Compiled output will be put in the output directory whereas individually processed files will be put in a subfolder in the input directory structure. It is recommended to provide a prefix (which should be a known search text) to group experiments for later comparison.")
self.m_richText1.Newline()
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Run Processes")
self.m_richText1.EndBold()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.Newline()
self.m_richText1.WriteText(
r"Each process is described with the required input files (which need to be available in the input directory structure) and the output files which it produces. These are multi-threaded processes which will run in sequence as listed and once running their progress can be monitored. A log file is produced in the user's home directory. Interactive plots can also be produced during processing.")
# self.m_richText1.EndLeftIndent()
self.m_richText1.Newline()
self.m_richText1.BeginBold()
self.m_richText1.WriteText("Compare Groups")
self.m_richText1.EndBold()
# self.m_richText1.BeginLeftIndent(20)
self.m_richText1.Newline()
self.m_richText1.WriteText(
"Once the appropriate files have been generated in the output folder, a statistical comparison of two groups can be run and an interactive plot generated.")
# self.m_richText1.EndLeftIndent()
self.m_richText1.Newline()
self.m_richText1.AddParagraph(
"The requirements of this application have been provided by <NAME>, Meunier Lab, QBI. The modular design of this application allows for additional processes with minimal effort. The interactive plots can be saved and shared online via https://plot.ly. Any issues can be logged via the github repository. This project was initially called MSDAnalysis.")
self.m_richText1.BeginItalic()
self.m_richText1.AddParagraph(
r"Copyright (2018) https://github.com/QBI-Software/AutoAnalysis_SPT")
self.m_richText1.EndItalic()
def loadController(self):
pass
########################################################################
class MSDConfig(ConfigPanel):
def __init__(self, parent):
super(MSDConfig, self).__init__(parent)
self.encoding = 'ISO-8859-1'
self.currentconfig= join(expanduser('~'), '.msdcfg')
if parent.controller.loaded:
self.__loadValues(parent.controller)
def loadController(self):
pass
def __loadValues(self, parent):
print("Config loaded")
self.m_textCtrl15.SetValue(parent.datafile)
self.m_textCtrl16.SetValue(parent.msdfile)
self.m_textCtrl1.SetValue(parent.histofile)
self.m_textCtrl2.SetValue(parent.filteredfname)
self.m_textCtrl3.SetValue(parent.filtered_msd)
self.m_textCtrl4.SetValue(parent.msdpoints)
self.m_textCtrl5.SetValue(parent.timeint)
self.m_textCtrl8.SetValue(parent.diffcolumn)
self.m_textCtrl9.SetValue(parent.logcolumn)
self.m_textCtrl10.SetValue(parent.minlimit)
self.m_textCtrl11.SetValue(parent.maxlimit)
self.m_textCtrl12.SetValue(parent.binwidth)
self.m_textCtrl13.SetValue(parent.threshold)
self.m_textCtrl161.SetValue(parent.allstats)
self.m_textCtrl18.SetValue(parent.msdcompare)
self.m_tcGroup1.SetValue(parent.group1)
self.m_tcGroup2.SetValue(parent.group2)
self.m_tcCellid.SetValue(parent.cellid)
self.m_txtAlllogdfilename.SetValue(parent.batchd)
self.m_cbROI.SetValue(int(parent.roi))
self.m_textCtrl162.SetValue(parent.encoding)
msg = "Config file: %s with encoding %s" % (parent.configfile, parent.encoding)
print(msg)
self.m_status.SetLabel(msg)
def OnSaveConfig(self, event, configfile=None):
config = ConfigObj()
if configfile is not None:
config.filename = configfile
else:
config.filename = self.currentconfig
config.encoding = self.encoding
config['DATA_FILENAME'] = self.m_textCtrl15.GetValue()
config['MSD_FILENAME'] = self.m_textCtrl16.GetValue()
config['HISTOGRAM_FILENAME'] = self.m_textCtrl1.GetValue()
config['FILTERED_FILENAME'] = self.m_textCtrl2.GetValue()
config['FILTERED_MSD'] = self.m_textCtrl3.GetValue()
config['LOG_COLUMN'] = self.m_textCtrl9.GetValue()
config['DIFF_COLUMN'] = self.m_textCtrl8.GetValue()
config['MSD_POINTS'] = self.m_textCtrl4.GetValue()
config['MINLIMIT'] = self.m_textCtrl10.GetValue()
config['MAXLIMIT'] = self.m_textCtrl11.GetValue()
config['TIME_INTERVAL'] = self.m_textCtrl5.GetValue()
config['BINWIDTH'] = self.m_textCtrl12.GetValue()
config['THRESHOLD'] = self.m_textCtrl13.GetValue()
config['ALLSTATS_FILENAME'] = self.m_textCtrl161.GetValue()
config['AVGMSD_FILENAME'] = self.m_textCtrl18.GetValue()
config['GROUP1'] = self.m_tcGroup1.GetValue()
config['GROUP2'] = self.m_tcGroup2.GetValue()
config['CELLID'] = self.m_tcCellid.GetValue()
config['BATCHD_FILENAME'] = self.m_txtAlllogdfilename.GetValue()
config['GROUPBY_ROI'] = int(self.m_cbROI.GetValue())
config['ENCODING'] = self.m_textCtrl162.GetValue()
config.write()
# Reload to parent
try:
self.Parent.controller = MSDController(config.filename)
if self.Parent.controller.loaded:
self.prefixes = [self.Parent.controller.group1, self.Parent.controller.group2]
self.Parent.prefixes = self.prefixes
for fp in self.Parent.Children:
if isinstance(fp, wx.Panel):
fp.loadController()
msg = "Config file: %s" % config.filename
print(msg)
self.m_status.SetLabel(msg)
except IOError as e:
self.Parent.Warn("Config error:" + e.args[0])
def OnSaveNew(self, event):
openFileDialog = wx.FileDialog(self, "Save config file", "", "", "Config files (*.cfg)|*",
wx.FD_SAVE | wx.FD_CHANGE_DIR)
openFileDialog.SetDirectory(expanduser('~'))
if openFileDialog.ShowModal() == wx.ID_OK:
configfile = str(openFileDialog.GetPath())
self.currentconfig = configfile
self.OnSaveConfig(event, configfile)
def OnLoadConfig(self, event):
print("Load From Config dialog")
openFileDialog = wx.FileDialog(self, "Open config file", "", "", "Config files (*.cfg)|*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR)
openFileDialog.SetDirectory(expanduser('~'))
if openFileDialog.ShowModal() == wx.ID_OK:
configfile = str(openFileDialog.GetPath())
try:
config = ConfigObj(configfile, encoding='ISO-8859-1')
self.Parent.controller.loadConfig(config)
self.currentconfig=configfile
#self.Parent.controller.config.filename = join(expanduser('~'), '.msdcfg') # save over existing?
#self.Parent.controller.config.write()
self.__loadValues(self.Parent.controller)
self.m_status.SetLabel("Config file: %s" % configfile)
except IOError as e:
self.Parent.Warn("Config error:" + e.args[0])
openFileDialog.Destroy()
########################################################################
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, target):
super(MyFileDropTarget, self).__init__()
self.target = target
def OnDropFiles(self, x, y, filenames):
group = ''
for fname in filenames:
self.target.AppendItem([True, group, fname])
# Update status bar
status = 'Total files loaded: %s' % self.target.Parent.m_dataViewListCtrl1.GetItemCount()
self.target.Parent.m_status.SetLabelText(status)
return len(filenames)
########################################################################
class FileSelectPanel(FilesPanel):
def __init__(self, parent):
super(FileSelectPanel, self).__init__(parent)
self.col_file.SetMinWidth(200)
self.loadController()
self.filedrop = MyFileDropTarget(self.m_dataViewListCtrl1)
self.m_tcDragdrop.SetDropTarget(self.filedrop)
#self.col_file.SetSortable(True)
#self.col_group.SetSortable(True)
def OnColClick(self, event):
print("header clicked: ", event.GetColumn())
# colidx = event.GetColumn()
# self.m_dataViewListCtrl1.GetModel().Resort()
def loadController(self):
self.controller = self.Parent.controller
self.m_cbGroups.SetItems([self.Parent.prefixes[0], self.Parent.prefixes[1]])
self.datafile = self.controller.datafile
def OnInputdir(self, e):
""" Open a file"""
dlg = wx.DirDialog(self, "Choose a directory containing input files")
if dlg.ShowModal() == wx.ID_OK:
self.inputdir = str(dlg.GetPath())
self.txtInputdir.SetValue(self.inputdir)
dlg.Destroy()
def OnOutputdir(self, e):
""" Open a file"""
dlg = wx.DirDialog(self, "Choose a directory for output files")
if dlg.ShowModal() == wx.ID_OK:
self.outputdir = str(dlg.GetPath())
self.txtOutputdir.SetValue(self.outputdir)
# initialize Compare Panel with outputdir
cpanel = self.getComparePanel()
if cpanel is not None:
cpanel.m_tcGp1Files.SetValue(self.outputdir)
cpanel.m_tcGp2Files.SetValue(self.outputdir)
dlg.Destroy()
def OnAssignGroup(self, event):
"""
Allow user to assign groups to selected files
:param event:
:return:
"""
num_files = self.m_dataViewListCtrl1.GetItemCount()
group = self.m_cbGroups.GetStringSelection()
for i in range(0, num_files):
if self.m_dataViewListCtrl1.GetToggleValue(i, 0):
self.m_dataViewListCtrl1.SetValue(group, i, 1)
print('Setting %s with group %s', self.m_dataViewListCtrl1.GetValue(i, 2), group)
def OnSaveList(self, event):
"""
Save selected files to csv
:param event:
:return:
"""
num_files = self.m_dataViewListCtrl1.GetItemCount()
try:
openFileDialog = wx.FileDialog(self, "Save file list", "", "", "CSV files (*.csv)|*",
wx.FD_SAVE | wx.FD_CHANGE_DIR)
if openFileDialog.ShowModal() == wx.ID_OK:
savefile = str(openFileDialog.GetPath())
with open(savefile, 'w') as csvfile:
swriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, num_files):
if self.m_dataViewListCtrl1.GetToggleValue(i, 0):
swriter.writerow(
[self.m_dataViewListCtrl1.GetValue(i, 1), self.m_dataViewListCtrl1.GetValue(i, 2)])
except Exception as e:
self.Parent.Warn("Save list error:" + e.args[0])
finally:
print('Save list complete')
def OnLoadList(self, event):
"""
Load saved list
:param event:
:return:
"""
try:
openFileDialog = wx.FileDialog(self, "Open file list", "", "", "CSV files (*.csv)|*",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR)
if openFileDialog.ShowModal() == wx.ID_OK:
savefile = str(openFileDialog.GetPath())
with open(savefile, 'r') as csvfile:
sreader = csv.reader(csvfile, delimiter=',', quotechar='"')
self.m_dataViewListCtrl1.DeleteAllItems()
for row in sreader:
if len(row) > 0:
self.m_dataViewListCtrl1.AppendItem([True, row[0], row[1]])
msg = "Total Files loaded: %d" % self.m_dataViewListCtrl1.GetItemCount()
self.m_status.SetLabelText(msg)
except Exception as e:
print(e.args[0])
self.Parent.Warn("Load list error:" + e.args[0])
finally:
print("Load list complete")
def OnAutofind(self, event):
"""
Find all matching files in top level directory
:param event:
:return:
"""
self.btnAutoFind.Disable()
fullsearch = self.m_cbMatchAny.GetValue()
self.m_status.SetLabelText("Finding files ... please wait")
if fullsearch:
allfiles = [y for y in iglob(join(self.inputdir, '**', '*'+self.datafile), recursive=True)]
else:
allfiles = [y for y in iglob(join(self.inputdir, '**', self.datafile), recursive=True)]
searchtext = self.m_tcSearch.GetValue()
if (len(searchtext) > 0):
filenames = [f for f in allfiles if re.search(searchtext, f, flags=re.IGNORECASE)]
else:
filenames = allfiles
for fname in filenames:
group = ''
# group as directory name ONLY
for pfix in self.Parent.prefixes:
group = ''
if pfix.upper() in fname.upper().split(sep):
group = pfix
break
elif len(searchtext) > 0 and re.search(searchtext + pfix, fname, flags=re.IGNORECASE):
group = pfix
break
self.m_dataViewListCtrl1.AppendItem([True, group, fname])
self.col_file.SetMinWidth(wx.LIST_AUTOSIZE)
msg = "Total Files loaded: %d" % self.m_dataViewListCtrl1.GetItemCount()
self.m_status.SetLabelText(msg)
self.btnAutoFind.Enable(True)
def OnSelectall(self, event):
for i in range(0, self.m_dataViewListCtrl1.GetItemCount()):
self.m_dataViewListCtrl1.SetToggleValue(event.GetSelection(), i, 0)
print("Toggled selections to: ", event.GetSelection())
def OnClearlist(self, event):
print("Clear items in list")
self.m_dataViewListCtrl1.DeleteAllItems()
def getComparePanel(self):
"""
Get access to panel
:return:
"""
panel = None
for fp in self.Parent.Children:
if isinstance(fp, CompareRunPanel):
panel = fp
break
return panel
########################################################################
class ProcessRunPanel(ProcessPanel):
def __init__(self, parent):
super(ProcessRunPanel, self).__init__(parent)
self.loadController()
# self.controller = parent.controller
# Bind timer event
# self.Bind(wx.EVT_TIMER, self.progressfunc, self.controller.timer)
# processes = [p['caption'] for p in self.controller.processes]
# self.m_checkListProcess.AppendItems(processes)
# Set up event handler for any worker thread results
EVT_RESULT(self, self.progressfunc)
# EVT_CANCEL(self, self.stopfunc)
# Set timer handler
self.start = {}
def loadController(self):
self.controller = self.Parent.controller
processes = | |
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Code based in part on ``litex`` and ``liteiclink``.
# SPDX-License-Identifier: BSD-3-Clause
""" Soft PIPE backend for the Xilinx 7 Series GTP transceivers. """
from amaranth import *
from amaranth.lib.cdc import FFSynchronizer
from .xc7 import DRPInterface, DRPArbiter, DRPFieldController
from .xc7 import GTResetDeferrer, GTPRXPMAResetWorkaround, GTOOBClockDivider
from .lfps import LFPSSquareWaveGenerator, LFPSSquareWaveDetector
from ..pipe import PIPEInterface
Open = Signal
class GTPQuadPLL(Elaboratable):
def __init__(self, refclk, refclk_freq, linerate, channel=0):
assert channel in [0, 1]
self.channel = channel
self._refclk = refclk
self._refclk_freq = refclk_freq
self._linerate = linerate
self.config = self.compute_config(refclk_freq, linerate)
#
# I/O ports
#
self.clk = Signal()
self.refclk = Signal()
self.reset = Signal()
self.lock = Signal()
self.drp = DRPInterface()
def elaborate(self, platform):
gtpe2_params = dict(
# Common Block Attributes
p_BIAS_CFG = 0x0000000000050001,
p_COMMON_CFG = 0x00000000,
# PLL Attributes
p_PLL_CLKOUT_CFG = 0x00,
p_PLLx_CFG = 0x01F03DC,
p_PLLx_DMON_CFG = 0b0,
p_PLLx_FBDIV = self.config["n2"],
p_PLLx_FBDIV_45 = self.config["n1"],
p_PLLx_INIT_CFG = 0x00001E,
p_PLLx_LOCK_CFG = 0x1E8,
p_PLLx_REFCLK_DIV = self.config["m"],
# Common Block - Dynamic Reconfiguration Port
i_DRPCLK = ClockSignal("ss"),
i_DRPADDR = self.drp.addr,
i_DRPDI = self.drp.di,
o_DRPDO = self.drp.do,
i_DRPWE = self.drp.we,
i_DRPEN = self.drp.en,
o_DRPRDY = self.drp.rdy,
# Common Block - Clocking Ports
i_GTREFCLK0 = self._refclk,
o_PLLxOUTCLK = self.clk,
o_PLLxOUTREFCLK = self.refclk,
# Common Block - PLL Ports
o_PLLxLOCK = self.lock,
i_PLLxLOCKEN = 1,
i_PLLxPD = 0,
i_PLLxREFCLKSEL = 0b001,
i_PLLxRESET = self.reset,
i_PLLyPD = 1,
# QPLL Ports
i_BGBYPASSB = 1,
i_BGMONITORENB = 1,
i_BGPDB = 1,
i_BGRCALOVRD = 0b11111,
i_RCALENB = 1,
)
if self.channel == 0:
pll_x, pll_y = "PLL0", "PLL1"
else:
pll_x, pll_y = "PLL1", "PLL0"
return Instance("GTPE2_COMMON", **{
name.replace("PLLx", pll_x).replace("PLLy", pll_y): value
for name, value in gtpe2_params.items()
})
@staticmethod
def compute_config(refclk_freq, linerate):
for n1 in 4, 5:
for n2 in 1, 2, 3, 4, 5:
for m in 1, 2:
vco_freq = refclk_freq*(n1*n2)/m
if 1.6e9 <= vco_freq <= 3.3e9:
for d in 1, 2, 4, 8, 16:
current_linerate = vco_freq*2/d
if current_linerate == linerate:
return {"n1": n1, "n2": n2, "m": m, "d": d,
"vco_freq": vco_freq,
"clkin": refclk_freq,
"linerate": linerate}
msg = "No config found for {:3.2f} MHz refclk / {:3.2f} Gbps linerate."
raise ValueError(msg.format(refclk_freq/1e6, linerate/1e9))
def __repr__(self):
config = self.config
r = """
GTPQuadPLL
==========
overview:
---------
+--------------------------------------------------+
| |
| +---------------------------+ +-----+ |
| +-----+ | Phase Frequency Detector | | | |
CLKIN +----> /M +--> Charge Pump +-> VCO +---> CLKOUT
| +-----+ | Loop Filter | | | |
| +---------------------------+ +--+--+ |
| ^ | |
| | +-------+ +-------+ | |
| +----+ /N2 <----+ /N1 <----+ |
| +-------+ +-------+ |
+--------------------------------------------------+
+-------+
CLKOUT +-> 2/D +-> LINERATE
+-------+
config:
-------
CLKIN = {clkin}MHz
CLKOUT = CLKIN x (N1 x N2) / M = {clkin}MHz x ({n1} x {n2}) / {m}
= {vco_freq}GHz
LINERATE = CLKOUT x 2 / D = {vco_freq}GHz x 2 / {d}
= {linerate}GHz
""".format(clkin = config["clkin"]/1e6,
n1 = config["n1"],
n2 = config["n2"],
m = config["m"],
vco_freq = config["vco_freq"]/1e9,
d = config["d"],
linerate = config["linerate"]/1e9)
return r
class GTPChannel(Elaboratable):
def __init__(self, qpll, tx_pads, rx_pads, ss_clock_frequency):
self._qpll = qpll
self._tx_pads = tx_pads
self._rx_pads = rx_pads
self._ss_clock_frequency = ss_clock_frequency
# For now, always operate at 2x gearing, and using the corresponding width for
# the internal data path.
self._io_words = 2
self._data_width = self._io_words * 10
#
# I/O ports.
#
# Dynamic reconfiguration port
self.drp = DRPInterface()
# Interface clock
self.pclk = Signal()
# Reset sequencing
self.reset = Signal()
self.tx_ready = Signal()
self.rx_ready = Signal()
# Core Rx and Tx lines
self.tx_data = Signal(self._io_words * 8)
self.tx_datak = Signal(self._io_words)
self.rx_data = Signal(self._io_words * 8)
self.rx_datak = Signal(self._io_words)
# TX controls
self.tx_polarity = Signal()
self.tx_elec_idle = Signal()
self.tx_gpio_en = Signal()
self.tx_gpio = Signal()
# RX controls
self.rx_polarity = Signal()
self.rx_eq_training = Signal()
self.rx_termination = Signal()
# RX status
self.rx_valid = Signal()
self.rx_status = Signal(3)
self.rx_elec_idle = Signal()
def elaborate(self, platform):
m = Module()
# Aliases.
qpll = self._qpll
io_words = self._io_words
data_width = self._data_width
#
# Clocking.
#
# Ensure we have a valid PLL/CDR configuration.
assert qpll.config["linerate"] < 6.6e9
# From [UG482: Table 4-14]: CDR Recommended Settings for Protocols with SSC
rxcdr_cfgs = {
1: 0x0_0000_87FE_2060_2448_1010,
2: 0x0_0000_47FE_2060_2450_1010,
4: 0x0_0000_47FE_1060_2450_1010,
}
# Generate the PIPE interface clock from the transmit word clock, and use it to drive both
# the Tx and the Rx FIFOs, to bring both halves of the data bus to the same clock domain.
# The recovered Rx clock will not match the generated Tx clock; use the recovered word
# clock to drive the CTC FIFO in the transceiver, which will compensate for the difference.
txoutclk = Signal()
m.submodules += Instance("BUFG",
i_I=txoutclk,
o_O=self.pclk
)
platform.add_clock_constraint(self.pclk, 250e6)
# Transceiver uses a 25 MHz clock internally, which needs to be derived from
# the reference clock.
for clk25_div in range(1, 33):
if qpll._refclk_freq / clk25_div <= 25e6:
break
# Out of band sequence detector uses an auxiliary clock whose frequency is derived
# from the properties of the sequences.
m.submodules.oob_clkdiv = oob_clkdiv = GTOOBClockDivider(self._ss_clock_frequency)
#
# Initialization.
#
# Per [AR43482], GTP transceivers must not be reset immediately after configuration.
m.submodules.defer_rst = defer_rst = GTResetDeferrer(self._ss_clock_frequency)
m.d.comb += [
defer_rst.tx_i.eq(~qpll.lock | self.reset),
defer_rst.rx_i.eq(~qpll.lock | self.reset),
]
# Per [UG482], GTP receiver reset must follow a specific sequence.
m.submodules.rx_pma_rst = rx_pma_rst = GTPRXPMAResetWorkaround(self._ss_clock_frequency)
m.d.comb += [
rx_pma_rst.i.eq(defer_rst.rx_o)
]
tx_rst_done = Signal()
rx_rst_done = Signal()
m.d.comb += [
self.tx_ready.eq(defer_rst.done & tx_rst_done),
self.rx_ready.eq(defer_rst.done & rx_rst_done),
]
#
# Dynamic reconfiguration.
#
rx_termination = Signal()
m.submodules += FFSynchronizer(self.rx_termination, rx_termination, o_domain="ss")
m.submodules.rx_term = rx_term = DRPFieldController(
addr=0x0011, bits=slice(4, 6), reset=0b10) # RX_CM_SEL
m.d.comb += [
rx_term.value.eq(Mux(rx_termination,
0b11, # Programmable
0b10)), # Floating
]
m.submodules.drp_arbiter = drp_arbiter = DRPArbiter()
drp_arbiter.add_interface(rx_pma_rst.drp)
drp_arbiter.add_interface(rx_term.drp)
drp_arbiter.add_interface(self.drp)
#
# Core SerDes instantiation.
#
m.submodules.gtp = Instance("GTPE2_CHANNEL",
# Simulation-Only Attributes
p_SIM_RECEIVER_DETECT_PASS = "TRUE",
p_SIM_TX_EIDLE_DRIVE_LEVEL = "X",
p_SIM_RESET_SPEEDUP = "FALSE",
p_SIM_VERSION = "2.0",
# RX 8B/10B Decoder Attributes
p_RX_DISPERR_SEQ_MATCH = "FALSE",
p_DEC_MCOMMA_DETECT = "TRUE",
p_DEC_PCOMMA_DETECT = "TRUE",
p_DEC_VALID_COMMA_ONLY = "TRUE",
p_UCODEER_CLR = 0b0,
# RX Byte and Word Alignment Attributes
p_ALIGN_COMMA_DOUBLE = "FALSE",
p_ALIGN_COMMA_ENABLE = 0b1111_111111,
p_ALIGN_COMMA_WORD = 1,
p_ALIGN_MCOMMA_DET = "TRUE",
p_ALIGN_MCOMMA_VALUE = 0b0101_111100, # K28.5 RD- 10b code
p_ALIGN_PCOMMA_DET = "TRUE",
p_ALIGN_PCOMMA_VALUE = 0b1010_000011, # K28.5 RD+ 10b code
p_SHOW_REALIGN_COMMA = "TRUE",
p_RXSLIDE_AUTO_WAIT = 7,
p_RXSLIDE_MODE = "OFF",
p_RX_SIG_VALID_DLY = 10,
# RX Clock Correction Attributes
p_CBCC_DATA_SOURCE_SEL = "DECODED",
p_CLK_CORRECT_USE = "TRUE",
p_CLK_COR_KEEP_IDLE = "FALSE",
p_CLK_COR_MAX_LAT = 14,
p_CLK_COR_MIN_LAT = 11,
p_CLK_COR_PRECEDENCE = "TRUE",
p_CLK_COR_REPEAT_WAIT = 0,
p_CLK_COR_SEQ_LEN = 2,
p_CLK_COR_SEQ_1_ENABLE = 0b1111,
p_CLK_COR_SEQ_1_1 = 0b01_001_11100, # K28.1 1+8b code
p_CLK_COR_SEQ_1_2 = 0b01_001_11100, # K28.1 1+8b code
p_CLK_COR_SEQ_1_3 = 0b0000000000,
p_CLK_COR_SEQ_1_4 = 0b0000000000,
p_CLK_COR_SEQ_2_ENABLE = 0b1111,
p_CLK_COR_SEQ_2_USE = "FALSE",
p_CLK_COR_SEQ_2_1 = 0b0000000000,
p_CLK_COR_SEQ_2_2 = 0b0000000000,
p_CLK_COR_SEQ_2_3 = 0b0000000000,
p_CLK_COR_SEQ_2_4 = 0b0000000000,
# RX Channel Bonding Attributes
p_CHAN_BOND_KEEP_ALIGN = "FALSE",
p_CHAN_BOND_MAX_SKEW = 1,
p_CHAN_BOND_SEQ_LEN = 1,
p_CHAN_BOND_SEQ_1_1 = 0b0000000000,
p_CHAN_BOND_SEQ_1_2 = 0b0000000000,
p_CHAN_BOND_SEQ_1_3 = 0b0000000000,
p_CHAN_BOND_SEQ_1_4 = 0b0000000000,
p_CHAN_BOND_SEQ_1_ENABLE = 0b1111,
p_CHAN_BOND_SEQ_2_1 = 0b0000000000,
p_CHAN_BOND_SEQ_2_2 = 0b0000000000,
p_CHAN_BOND_SEQ_2_3 = 0b0000000000,
p_CHAN_BOND_SEQ_2_4 = 0b0000000000,
p_CHAN_BOND_SEQ_2_ENABLE = 0b1111,
p_CHAN_BOND_SEQ_2_USE = "FALSE",
p_FTS_DESKEW_SEQ_ENABLE = 0b1111,
p_FTS_LANE_DESKEW_CFG = 0b1111,
p_FTS_LANE_DESKEW_EN = "FALSE",
# RX Margin Analysis Attributes
p_ES_CONTROL = 0b000000,
p_ES_ERRDET_EN = "FALSE",
p_ES_EYE_SCAN_EN = "TRUE",
p_ES_HORZ_OFFSET = 0x000,
p_ES_PMA_CFG = 0b0000000000,
p_ES_PRESCALE = 0b00000,
p_ES_QUALIFIER = 0x00000000000000000000,
p_ES_QUAL_MASK = 0x00000000000000000000,
p_ES_SDATA_MASK = 0x00000000000000000000,
p_ES_VERT_OFFSET = 0b000000000,
# FPGA RX Interface Attributes
p_RX_DATA_WIDTH = data_width,
# PMA Attributes
p_OUTREFCLK_SEL_INV = 0b11,
p_PMA_RSV = 0x00000333,
p_PMA_RSV2 = 0x00002040,
p_PMA_RSV3 = 0b00,
p_PMA_RSV4 = 0b0000,
p_RX_BIAS_CFG = 0b0000111100110011,
p_DMONITOR_CFG = 0x000A00,
p_RX_CM_SEL = 0b10,
p_RX_CM_TRIM = 0b1010,
p_RX_DEBUG_CFG = 0b00000000000000,
p_RX_OS_CFG = 0b0000010000000,
p_TERM_RCAL_CFG = 0b100001000010000,
p_TERM_RCAL_OVRD = 0b000,
p_TST_RSV = 0x00000000,
p_RX_CLK25_DIV = clk25_div,
p_TX_CLK25_DIV = clk25_div,
# PCI Express Attributes
p_PCS_PCIE_EN = "FALSE",
# PCS Attributes
p_PCS_RSVD_ATTR = 0x0000_0000_0100, # OOB power up
# RX Buffer Attributes
p_RXBUF_ADDR_MODE = "FULL",
p_RXBUF_EIDLE_HI_CNT = 0b1000,
p_RXBUF_EIDLE_LO_CNT = 0b0000,
p_RXBUF_EN = "TRUE",
p_RX_BUFFER_CFG = 0b000000,
p_RXBUF_RESET_ON_CB_CHANGE = "TRUE",
p_RXBUF_RESET_ON_COMMAALIGN = | |
"""This file contains code for use with "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2012 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import numpy
import random
import sys
import correlation
import thinkplot
import matplotlib.pyplot as pyplot
import thinkbayes2
INTERVAL = 245/365.0
FORMATS = ['pdf', 'eps']
MINSIZE = 0.2
MAXSIZE = 20
BUCKET_FACTOR = 10
def log2(x, denom=math.log(2)):
"""Computes log base 2."""
return math.log(x) / denom
def SimpleModel():
"""Runs calculations based on a simple model."""
# time between discharge and diagnosis, in days
interval = 3291.0
# doubling time in linear measure is doubling time in volume * 3
dt = 811.0 * 3
# number of doublings since discharge
doublings = interval / dt
# how big was the tumor at time of discharge (diameter in cm)
d1 = 15.5
d0 = d1 / 2.0 ** doublings
print(('interval (days)', interval))
print(('interval (years)', interval / 365))
print(('dt', dt))
print(('doublings', doublings))
print(('d1', d1))
print(('d0', d0))
# assume an initial linear measure of 0.1 cm
d0 = 0.1
d1 = 15.5
# how many doublings would it take to get from d0 to d1
doublings = log2(d1 / d0)
# what linear doubling time does that imply?
dt = interval / doublings
print(('doublings', doublings))
print(('dt', dt))
# compute the volumetric doubling time and RDT
vdt = dt / 3
rdt = 365 / vdt
print(('vdt', vdt))
print(('rdt', rdt))
cdf = MakeCdf()
p = cdf.Prob(rdt)
print(('Prob{RDT > 2.4}', 1-p))
def MakeCdf():
"""Uses the data from Zhang et al. to construct a CDF."""
n = 53.0
freqs = [0, 2, 31, 42, 48, 51, 52, 53]
ps = [freq/n for freq in freqs]
xs = numpy.arange(-1.5, 6.5, 1.0)
cdf = thinkbayes2.Cdf(xs, ps)
return cdf
def PlotCdf(cdf):
"""Plots the actual and fitted distributions.
cdf: CDF object
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
# CCDF on logy scale: shows exponential behavior
thinkplot.Clf()
thinkplot.Plot(xs, cps, 'bo-')
thinkplot.Save(root='kidney1',
formats=FORMATS,
xlabel='RDT',
ylabel='CCDF (log scale)',
yscale='log')
# CDF, model and data
thinkplot.Clf()
thinkplot.PrePlot(num=2)
mxs, mys = ModelCdf()
thinkplot.Plot(mxs, mys, label='model', linestyle='dashed')
thinkplot.Plot(xs, ps, 'gs', label='data')
thinkplot.Save(root='kidney2',
formats=FORMATS,
xlabel='RDT (volume doublings per year)',
ylabel='CDF',
title='Distribution of RDT',
axis=[-2, 7, 0, 1],
loc=4)
def QQPlot(cdf, fit):
"""Makes a QQPlot of the values from actual and fitted distributions.
cdf: actual Cdf of RDT
fit: model
"""
xs = [-1.5, 5.5]
thinkplot.Clf()
thinkplot.Plot(xs, xs, 'b-')
xs, ps = cdf.xs, cdf.ps
fs = [fit.Value(p) for p in ps]
thinkplot.Plot(xs, fs, 'gs')
thinkplot.Save(root = 'kidney3',
formats=FORMATS,
xlabel='Actual',
ylabel='Model')
def FitCdf(cdf):
"""Fits a line to the log CCDF and returns the slope.
cdf: Cdf of RDT
"""
xs, ps = cdf.xs, cdf.ps
cps = [1-p for p in ps]
xs = xs[1:-1]
lcps = [math.log(p) for p in cps[1:-1]]
_inter, slope = correlation.LeastSquares(xs, lcps)
return -slope
def CorrelatedGenerator(cdf, rho):
"""Generates a sequence of values from cdf with correlation.
Generates a correlated standard Normal series, then transforms to
values from cdf
cdf: distribution to choose from
rho: target coefficient of correlation
"""
def Transform(x):
"""Maps from a Normal variate to a variate with the given CDF."""
p = thinkbayes2.NormalCdf(x)
y = cdf.Value(p)
return y
# for the first value, choose from a Normal and transform it
x = random.gauss(0, 1)
yield Transform(x)
# for subsequent values, choose from the conditional distribution
# based on the previous value
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield Transform(x)
def UncorrelatedGenerator(cdf, _rho=None):
"""Generates a sequence of values from cdf with no correlation.
Ignores rho, which is accepted as a parameter to provide the
same interface as CorrelatedGenerator
cdf: distribution to choose from
rho: ignored
"""
while True:
x = cdf.Random()
yield x
def RdtGenerator(cdf, rho):
"""Returns an iterator with n values from cdf and the given correlation.
cdf: Cdf object
rho: coefficient of correlation
"""
if rho == 0.0:
return UncorrelatedGenerator(cdf)
else:
return CorrelatedGenerator(cdf, rho)
def GenerateRdt(pc, lam1, lam2):
"""Generate an RDT from a mixture of exponential distributions.
With prob pc, generate a negative value with param lam2;
otherwise generate a positive value with param lam1.
"""
if random.random() < pc:
return -random.expovariate(lam2)
else:
return random.expovariate(lam1)
def GenerateSample(n, pc, lam1, lam2):
"""Generates a sample of RDTs.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of random variates
"""
xs = [GenerateRdt(pc, lam1, lam2) for _ in range(n)]
return xs
def GenerateCdf(n=1000, pc=0.35, lam1=0.79, lam2=5.0):
"""Generates a sample of RDTs and returns its CDF.
n: sample size
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: Cdf of generated sample
"""
xs = GenerateSample(n, pc, lam1, lam2)
cdf = thinkbayes2.MakeCdfFromList(xs)
return cdf
def ModelCdf(pc=0.35, lam1=0.79, lam2=5.0):
"""
pc: probablity of negative growth
lam1: exponential parameter of positive growth
lam2: exponential parameter of negative growth
Returns: list of xs, list of ys
"""
cdf = thinkbayes2.EvalExponentialCdf
x1 = numpy.arange(-2, 0, 0.1)
y1 = [pc * (1 - cdf(-x, lam2)) for x in x1]
x2 = numpy.arange(0, 7, 0.1)
y2 = [pc + (1-pc) * cdf(x, lam1) for x in x2]
return list(x1) + list(x2), y1+y2
def BucketToCm(y, factor=BUCKET_FACTOR):
"""Computes the linear dimension for a given bucket.
t: bucket number
factor: multiplicitive factor from one bucket to the next
Returns: linear dimension in cm
"""
return math.exp(y / factor)
def CmToBucket(x, factor=BUCKET_FACTOR):
"""Computes the bucket for a given linear dimension.
x: linear dimension in cm
factor: multiplicitive factor from one bucket to the next
Returns: float bucket number
"""
return round(factor * math.log(x))
def Diameter(volume, factor=3/math.pi/4, exp=1/3.0):
"""Converts a volume to a diameter.
d = 2r = 2 * (3/4/pi V)^1/3
"""
return 2 * (factor * volume) ** exp
def Volume(diameter, factor=4*math.pi/3):
"""Converts a diameter to a volume.
V = 4/3 pi (d/2)^3
"""
return factor * (diameter/2.0)**3
class Cache(object):
"""Records each observation point for each tumor."""
def __init__(self):
"""Initializes the cache.
joint: map from (age, bucket) to frequency
sequences: map from bucket to a list of sequences
initial_rdt: sequence of (V0, rdt) pairs
"""
self.joint = thinkbayes2.Joint()
self.sequences = {}
self.initial_rdt = []
def GetBuckets(self):
"""Returns an iterator for the keys in the cache."""
return self.sequences.iterkeys()
def GetSequence(self, bucket):
"""Looks up a bucket in the cache."""
return self.sequences[bucket]
def ConditionalCdf(self, bucket, name=''):
"""Forms the cdf of ages for a given bucket.
bucket: int bucket number
name: string
"""
pmf = self.joint.Conditional(0, 1, bucket, name=name)
cdf = pmf.MakeCdf()
return cdf
def ProbOlder(self, cm, age):
"""Computes the probability of exceeding age, given size.
cm: size in cm
age: age in years
"""
bucket = CmToBucket(cm)
cdf = self.ConditionalCdf(bucket)
p = cdf.Prob(age)
return 1-p
def GetDistAgeSize(self, size_thresh=MAXSIZE):
"""Gets the joint distribution of age and size.
Map from (age, log size in cm) to log freq
Returns: new Pmf object
"""
joint = thinkbayes2.Joint()
for val, freq in self.joint.Items():
age, bucket = val
cm = BucketToCm(bucket)
if cm > size_thresh:
continue
log_cm = math.log10(cm)
joint.Set((age, log_cm), math.log(freq) * 10)
return joint
def Add(self, age, seq, rdt):
"""Adds this observation point to the cache.
age: age of the tumor in years
seq: sequence of volumes
rdt: RDT during this interval
"""
final = seq[-1]
cm = Diameter(final)
bucket = CmToBucket(cm)
self.joint.Incr((age, bucket))
self.sequences.setdefault(bucket, []).append(seq)
initial = seq[-2]
self.initial_rdt.append((initial, rdt))
def Print(self):
"""Prints the size (cm) for each bucket, and the number of sequences."""
for bucket in sorted(self.GetBuckets()):
ss = self.GetSequence(bucket)
diameter = BucketToCm(bucket)
print((diameter, len(ss)))
def Correlation(self):
"""Computes the correlation between log volumes and rdts."""
vs, rdts = zip(*self.initial_rdt)
lvs = [math.log(v) for v in vs]
return correlation.Corr(lvs, rdts)
class Calculator(object):
"""Encapsulates the state of the computation."""
def __init__(self):
"""Initializes the cache."""
self.cache = Cache()
def MakeSequences(self, n, rho, cdf):
"""Returns a list of sequences of volumes.
n: number of sequences to make
rho: serial correlation
cdf: Cdf of rdts
Returns: list of n sequences of volumes
"""
sequences = []
for i in range(n):
rdt_seq = | |
== 3) & (df['Year'] == 2020)].mean())
y62 = int(df['Hedva1-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2020)].mean())
y63 = int(df['Hedva1-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2020)].mean())
z11 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2015)].mean())
z12 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2015)].mean())
z13 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2015)].mean())
z21 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2016)].mean())
z22 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2016)].mean())
z23 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2016)].mean())
z31 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2017)].mean())
z32 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2017)].mean())
z33 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2017)].mean())
z41 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2018)].mean())
z42 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2018)].mean())
z43 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2018)].mean())
z51 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2019)].mean())
z52 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2019)].mean())
z53 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2019)].mean())
z61 = int(df['Hedva2-B'][(df['Bagrot math'] == 3) & (df['Year'] == 2020)].mean())
z62 = int(df['Hedva2-B'][(df['Bagrot math'] == 4) & (df['Year'] == 2020)].mean())
z63 = int(df['Hedva2-B'][(df['Bagrot math'] == 5) & (df['Year'] == 2020)].mean())
width = 0.20
labels = ['2015', '2016', '2017', '2018', '2019', '2020']
units31 = [y11, y21, y31, y41, y51, y61]
units41 = [y12, y22, y32, y42, y52, y62]
units51 = [y13, y23, y33, y43, y53, y63]
units32 = [z11, z21, z31, z41, z51, z61]
units42 = [z12, z22, z32, z42, z52, z62]
units52 = [z13, z23, z33, z43, z53, z63]
x = np.arange(len(labels))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12.5, 5))
r1 = np.arange(len(units31))
r2 = [x + width for x in r1]
r3 = [x + width for x in r2]
rects11 = ax1.bar(r1, units31, width, label='3 Unites')
rects12 = ax1.bar(r2, units41, width, label='4 Unites')
rects13 = ax1.bar(r3, units51, width, label='5 Unites')
rects21 = ax2.bar(r1, units32, width, label='3 Unites')
rects22 = ax2.bar(r2, units42, width, label='4 Unites')
rects23 = ax2.bar(r3, units52, width, label='5 Unites')
ax1.set_ylabel('Average Grades')
fig.suptitle('Average grade in Hedva versus matriculation units in math')
ax1.set_title('Hedva1')
ax2.set_title('Hedva2')
fig.tight_layout()
ax1.set_xticks(x)
ax1.set_xticklabels(labels)
ax2.set_xticks(x)
ax2.set_xticklabels(labels)
ax1.legend(bbox_to_anchor=(-0.05, 1))
plt.subplots_adjust(left=0.12, bottom=0.07, top=0.87, wspace=0.14)
plt.show()
def graph10():
y11 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2015)].mean())
y12 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2015)].mean())
y13 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2015)].mean())
y21 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2016)].mean())
y22 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2016)].mean())
y23 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2016)].mean())
y31 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2017)].mean())
y32 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2017)].mean())
y33 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2017)].mean())
y41 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2018)].mean())
y42 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2018)].mean())
y43 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2018)].mean())
y51 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2019)].mean())
y52 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2019)].mean())
y53 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2019)].mean())
y61 = int(df['Physics2-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2020)].mean())
y62 = int(df['Physics2-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2020)].mean())
y63 = int(df['Physics2-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2020)].mean())
z11 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2015)].mean())
z12 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2015)].mean())
z13 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2015)].mean())
z21 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2016)].mean())
z22 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2016)].mean())
z23 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2016)].mean())
z31 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2017)].mean())
z32 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2017)].mean())
z33 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2017)].mean())
z41 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2018)].mean())
z42 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2018)].mean())
z43 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2018)].mean())
z51 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2019)].mean())
z52 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2019)].mean())
z53 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2019)].mean())
z61 = int(df['Physics1-B'][(df['Bagrot physics'] == 3) & (df['Year'] == 2020)].mean())
z62 = int(df['Physics1-B'][(df['Bagrot physics'] == 4) & (df['Year'] == 2020)].mean())
z63 = int(df['Physics1-B'][(df['Bagrot physics'] == 5) & (df['Year'] == 2020)].mean())
width = 0.20
labels = ['2015', '2016', '2017', '2018', '2019', '2020']
units31 = [y11, y21, y31, y41, y51, y61]
units41 = [y12, y22, y32, y42, y52, y62]
units51 = [y13, y23, y33, y43, y53, y63]
units32 = [z11, z21, z31, z41, z51, z61]
units42 = [z12, z22, z32, z42, z52, z62]
units52 = [z13, z23, z33, z43, z53, z63]
x = np.arange(len(labels))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12.5, 5))
r1 = np.arange(len(units31))
r2 = [x + width for x in r1]
r3 = [x + width for x in r2]
rects11 = ax1.bar(r1, units31, width, label='3 Unites')
rects12 = ax1.bar(r2, units41, width, label='4 Unites')
rects13 = ax1.bar(r3, units51, width, label='5 Unites')
rects21 = ax2.bar(r1, units32, width, label='3 Unites')
rects22 = ax2.bar(r2, units42, width, label='4 Unites')
rects23 = ax2.bar(r3, units52, width, label='5 Unites')
ax1.set_ylabel('Average Grades')
fig.suptitle('Grade average in Physics versus matriculation units in Physics')
ax1.set_title('Physics 1')
ax2.set_title('Physics 2')
fig.tight_layout()
ax1.set_xticks(x)
ax1.set_xticklabels(labels)
ax2.set_xticks(x)
ax2.set_xticklabels(labels)
ax1.legend(bbox_to_anchor=(-0.05, 1))
plt.subplots_adjust(left=0.12, bottom=0.07, top=0.87, wspace=0.14)
plt.show()
def graph11():
lst = []
for j in range(2015, 2021):
count1 = 0
for i in range(rows):
if df.loc[i, 'Hedva1-A'][df.loc[i, 'Year'] == j] < 60 and df.loc[i, 'Hedva1-B'][
df.loc[i, 'Year'] == j] < 60:
count1 += 1
perc1 = (count1 / rows) * 100
perc1 = "{:.2f}".format(perc1)
lst.append(perc1)
lst1 = []
for j in range(2015, 2021):
count1 = 0
for i in range(rows):
if df.loc[i, 'Hedva2-A'][df.loc[i, 'Year'] == j] < 60 and df.loc[i, 'Hedva2-B'][
df.loc[i, 'Year'] == j] < 60:
count1 += 1
perc1 = (count1 / rows) * 100
perc1 = "{:.2f}".format(perc1)
lst1.append(perc1)
lst2 = []
for j in range(2015, 2021):
count1 = 0
for i in range(rows):
if df.loc[i, 'Physics1-A'][df.loc[i, 'Year'] == j] < 60 and df.loc[i, 'Physics1-B'][
df.loc[i, 'Year'] == j] < 60:
count1 += 1
perc1 = (count1 / rows) * 100
perc1 = "{:.2f}".format(perc1)
lst2.append(perc1)
lst3 = []
for j in range(2015, 2021):
count1 = 0
for i in range(rows):
if df.loc[i, 'Physics2-A'][df.loc[i, 'Year'] == j] < 60 and df.loc[i, 'Physics2-B'][
df.loc[i, 'Year'] == j] < 60:
count1 += 1
perc1 = (count1 / rows) * 100
perc1 = "{:.2f}".format(perc1)
lst3.append(perc1)
lst4 = []
for j in range(2015, 2021):
count1 = 0
for i in range(rows):
if df.loc[i, 'Python-A'][df.loc[i, 'Year'] == j] < 60 and df.loc[i, 'Python-B'][
df.loc[i, 'Year'] == j] < 60:
count1 += 1
perc1 = (count1 / rows) * 100
perc1 = "{:.2f}".format(perc1)
lst4.append(perc1)
data = np.array([lst, lst1, lst2, lst3, lst4])
data = np.asfarray(data, float)
years = ['2015', '2016', '2017', '2018', '2019', '2020']
courses = ['Hedva1', 'Hedva2', 'Physics1', 'Physics2', 'Python']
fig, ax = plt.subplots(figsize = (9,5))
im = ax.imshow(data, 'YlGn')
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Percentage', rotation=-90, va="bottom")
ax.set_xticks(np.arange(len(years)))
ax.set_yticks(np.arange(len(courses)))
ax.set_xticklabels(years)
ax.set_yticklabels(courses)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
for i in range(len(courses)):
for j in range(len(years)):
x = data[i,j]
y = str(x)+'%'
text = ax.text(j, i, y,
ha="center", va="center", color="black")
ax.set_title("Percentage of students who failed in core courses")
fig.tight_layout()
plt.subplots_adjust(top=0.92)
plt.show()
def graph12():
def func(pct, allvals):
absolute = int(pct / 100 * np.sum(allvals))
return "{:d}".format(absolute)
l1 = df['Year'].value_counts()
values = list(l1)
years = ['2019', '2017', '2015', '2020', '2016', '2018']
fig, ax = plt.subplots()
ax.pie(values, labels=years, colors=['teal', 'firebrick', 'darkorange', 'goldenrod', 'khaki', 'yellowgreen']
, autopct=lambda pct: func(pct, values), explode=(0.2, 0, 0, 0, 0, 0), shadow=True, startangle=90)
ax.axis('equal')
plt.tight_layout()
ax.set_title('Number of students each year', fontsize=20)
plt.subplots_adjust(top=0.85)
plt.show()
def graph13():
df['Annual average'].hist()
plt.ylabel('Frequency')
plt.xlabel('Average')
plt.title('Histogram')
plt.show()
def graph14():
fig = | |
% sys.version_info[:3])
logger = logging.getLogger(progname)
(loop_seconds, days, amount, zones_to_water, info, emulating, mysql_host, mysql_user, mysql_passwd) = parse_arguments(logger)
logger.info("Started program %s, version %s", progname, version)
if (days == 0):
logger.info("Irrigating %.2f mm", amount)
else:
logger.info("Looking back: %d days", days)
logger.debug("Zones to water: %s", zones_to_water)
logger.debug("MySQL Server : %s", mysql_host)
logger.debug("MySQL User : %s", mysql_user)
logger.debug("MySQL Password: %s", mysql_passwd)
host_name = socket.gethostname()
if (emulating or "raspberrypi" not in host_name):
logger.info("Running on %s, emulating RPi behaviour", host_name)
emulating = True
else:
logger.info("Running on %s, running real RPi GPIO", host_name)
emulating = False
# Set reference to PIN numbers
GPIO.setmode(GPIO.BOARD)
# Settings for Relay board 2 (water source ball valves to LOW = Closed)
GPIO.setup(valve_barrel_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(valve_drinking_PIN, GPIO.OUT, initial=GPIO.LOW)
# Settings for Relay board 4, LOW active (solenoids for up to 4 irrigation areas)
GPIO.setup(valve_grass_PIN, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(valve_front_PIN, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(valve_sprinkler_PIN, GPIO.OUT, initial=GPIO.HIGH)
# GPIO.setup(valve_SPARE_PIN, GPIO.OUT, initial=GPIO.HIGH)
# Settings for flow meters
GPIO.setup(flow_grass_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(flow_front_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(flow_sprinkler_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Start handling termination signal with Python Exception
signal.signal(signal.SIGTERM, handle_sigterm)
# Done setting up, now starting main program
# Load evaporation history if days is specficied (alternative is irrigating fixed amount)
if (days > 0):
(tempDay, humidityDay, pressureDay, radiationDay, rainDay) = load_evaporation(logger, days, mysql_host, mysql_user, mysql_passwd)
evap = makkink_evaporation.Em(logger, tempDay, humidityDay, pressureDay, radiationDay)
# Typically the evaporation seems to be too high, so correcting with a factor
evapSum = numpy.sum(evap) * EVAP_FACTOR
rainSum = numpy.sum(rainDay)
logger.info("Evaporation = %.1f mm in last %d days", evapSum, days)
logger.debug(" (%s)", str(numpy.around(evap, 3)))
logger.info("Rainfall = %.1f mm in last %d days", rainSum, days)
# If more rainfall than evaporation, no irrigation is needed
if (rainSum >= evapSum):
print("No irrigation needed (%.1f mm more rainfall than evaporation), exiting" % (rainSum - evapSum))
if (not emulating):
GPIO.cleanup()
exit(0)
# Possibly need to irrigate (depending on past irrigations), set up sources & zones
# Init zones
zones = []
zones.append(IrrigationZone(logger, zone_grass_name, valve_grass_PIN, zone_grass_area, zone_grass_shadow, flow_grass_PIN, zone_grass_min_flow))
zones.append(IrrigationZone(logger, zone_front_name, valve_front_PIN, zone_front_area, zone_front_shadow, flow_front_PIN, zone_front_min_flow))
zones.append(IrrigationZone(logger, zone_side_name, valve_sprinkler_PIN, zone_side_area, zone_side_shadow, flow_sprinkler_PIN, zone_side_min_flow ))
# Skip if no need to water
if (not info):
# Init sources, start with most durable one (will start with source 0), until empty (no flow)
sources = []
sources.append(WaterSource(logger, source_barrel_name, valve_barrel_PIN))
sources.append(WaterSource(logger, source_drinking_name, valve_drinking_PIN))
# Start irrigation
# start with first water source (most durable)
source_index = 0
source = sources[source_index]
for zone in zones:
if (zones_to_water != "all"):
skip = False
for zone_to_water in zones_to_water:
if (zone_to_water not in zone.get_name().lower()):
# Skip this zone
logger.debug("Skipping zone %s, as %s not in %s", zone.get_name(), zone.get_name().lower(), zones_to_water)
skip = True
break
if (skip): continue # next zone in zones
# Load evaporation history if days is specficied (alternative is irrigating fixed amount)
if (days > 0):
waterDay = load_irrigated(logger, zone.get_name(), days, mysql_host, mysql_user, mysql_passwd)
waterSum = numpy.sum(waterDay)
logger.info("Zone %s Watering %.1f mm in last %d days", zone.get_name(), waterSum, days)
# Now calculate shortage = evaporation - rain - watering
net_evap = evapSum * zone.get_shadow() - rainSum - waterSum
print("Zone %s Net Evaporation = %.1f mm in last %d days" % (zone.get_name(), net_evap, days))
logger.info("Zone %s Net Evaporation = %.1f mm in last %d days" % (zone.get_name(), net_evap, days))
if net_evap <= 1:
print("No need for irrigation")
logger.info("No need for irrigation")
continue # next zone in zones
else:
if (net_evap > MAX_IRRIGATION):
liters_per_m2 = MAX_IRRIGATION
else:
liters_per_m2 = net_evap
else:
liters_per_m2 = amount
# Translate to liters for this zone
liters = zone.get_area() * liters_per_m2
if (info):
print("Should irrigate zone %s with %.0f liters on the %d m2 area" % (zone.get_name(), net_evap * zone.get_area(), zone.get_area()))
continue # to next zones in zone
print("Starting irrigating zone %s with source %s" % (zone.get_name(), source.get_name()))
print("Need to put %.0f liters on the %d m2 area" % (liters, zone.get_area()))
logger.info("Starting irrigating zone %s with source %s" % (zone.get_name(), source.get_name()))
logger.info("Need to put %.0f liters on the %d m2 area" % (liters, zone.get_area()))
if (not emulating):
# Init flowmeter callback
zone.set_pulse_callback()
# Open zone valve
zone.open_valve()
# Open source valve
source.open_valve()
else:
# Init fake flowmeter callback
zone.set_emulated_pulse_callback()
# Initialize timing
start_time = datetime.now()
actual_liters = 0.0
# Wait for some flow to start, get current timestamp and first flow meter reading, while handling terminations
try:
sleep(10)
# Also allow Keyboard interrupts for command line testing
except (KeyboardInterrupt, SystemExit):
# Close the valves and exit program
logger.info("Interrupted; closing valves and exiting...")
if (not emulating):
zone.close_valve()
source.close_valve()
# Calculate liters per m2 irrigated
zone.set_irrigated_liters(actual_liters)
actual_liters_per_m2 = actual_liters / zone.get_area()
# Store irrigation in database
save_irrigated(logger, zone.get_name(), float(actual_liters_per_m2), mysql_host, mysql_user, mysql_passwd)
GPIO.cleanup()
else:
# Remove fake flowmeter thread callback
zone.clear_emulated_pulse_callback()
exit(-1)
flow_rate = zone.get_flow_rate()
logger.debug("Flow rate: %.0f liter(s) per minute", flow_rate)
actual_liters += 10 / 60 * flow_rate
# If flowrate is still zero, use 1 liter per minute to initiate
duration = liters / max(flow_rate, 1) * 60
logger.info("Stopping in about %d seconds", duration)
previous_time = start_time
previous_flow_rate = flow_rate
while duration > 0:
try:
# Monitor every 60 seconds, or remaining duration if smaller (though always more than 5 seconds to measure a flow)
sleep(min(loop_seconds, max(duration, 5)))
except (KeyboardInterrupt, SystemExit):
# Close the valves and exit program
logger.info("Interrupted; closing valves and exiting...")
if (not emulating):
zone.close_valve()
source.close_valve()
# Calculate liters per m2 irrigated
zone.set_irrigated_liters(actual_liters)
actual_liters_per_m2 = actual_liters / zone.get_area()
# Store irrigation in database
save_irrigated(logger, zone.get_name(), float(actual_liters_per_m2), mysql_host, mysql_user, mysql_passwd)
GPIO.cleanup()
else:
# Remove fake flowmeter thread callback
zone.clear_emulated_pulse_callback()
print("ERROR: Ended zone %s due to Interruption" % zone.get_name())
if (actual_liters < liters):
print("Having only watered %.1f liters of required %.1f" % (actual_liters, liters))
logger.info("Ended zone %s having watered %.1f mm (%.1f liters)" % (zone.get_name(), actual_liters_per_m2, actual_liters))
exit(-1)
# Check flow and time
current_time = datetime.now()
current_seconds = (current_time - previous_time).total_seconds()
flow_rate = zone.get_flow_rate()
logger.debug("Flow rate: %.0f liter(s) per minute, during %d seconds", flow_rate, current_seconds)
# See if source flow rate complies to requirement for zone
if (flow_rate < zone.get_flow_required() and previous_flow_rate < zone.get_flow_required()):
# Flow rate too low, switch to next source
logger.info("Switching to next source, as flow rate too low (%.1f then %.1f, where %.1f required)", previous_flow_rate, flow_rate, zone.get_flow_required())
if (not emulating):
# Close source valve, make sure it is fully closed before switching to next source
source.close_valve()
sleep(15)
if (source_index < len(sources)-1):
# Next source
source_index += 1
else:
# Last item in list, stop with error
logger.info("No more sources, closing valves and exiting...")
if (not emulating):
zone.close_valve()
# Calculate liters per m2 irrigated
zone.set_irrigated_liters(actual_liters)
actual_liters_per_m2 = actual_liters / zone.get_area()
# Store irrigation in database
save_irrigated(logger, zone.get_name(), float(actual_liters_per_m2), mysql_host, mysql_user, mysql_passwd)
GPIO.cleanup()
else:
# Remove fake flowmeter thread callback
zone.clear_emulated_pulse_callback()
print("ERROR: Ended zone %s due to No More Sources (Is there a water flow issue?)" % zone.get_name())
if (actual_liters < liters):
print("Having only watered %.1f liters of required %.1f" % (actual_liters, liters))
logger.info("Ended zone %s having watered %.1f mm (%.1f liters)" % (zone.get_name(), actual_liters_per_m2, actual_liters))
exit(-1)
# Continue with next source
source = sources[source_index]
print("Continuing irrigating zone %s with source %s" % (zone.get_name(), source.get_name()))
print("Need to put %.0f liters on the %d m2 area" % (liters-actual_liters, zone.get_area()))
logger.info("Continuing irrigating zone %s with source %s" % (zone.get_name(), source.get_name()))
logger.info("Need to put %.0f liters on the %d m2 area" % (liters-actual_liters, zone.get_area()))
if (not emulating):
# Open source valve
source.open_valve()
# Wait for some flow to start, get current timestamp and first flow meter reading
try:
sleep(10)
except (KeyboardInterrupt, SystemExit):
# Close the valves and exit program
logger.info("Interrupted; closing valves and exiting...")
if (not emulating):
zone.close_valve()
source.close_valve()
# Calculate liters per m2 irrigated
zone.set_irrigated_liters(actual_liters)
actual_liters_per_m2 = actual_liters / zone.get_area()
# Store irrigation in database
save_irrigated(logger, zone.get_name(), float(actual_liters_per_m2), mysql_host, mysql_user, mysql_passwd)
GPIO.cleanup()
else:
# Remove fake flowmeter thread callback
zone.clear_emulated_pulse_callback()
exit(-1)
flow_rate = zone.get_flow_rate()
logger.debug("Flow rate: %.0f liter(s) per minute", flow_rate)
# If flowrate is still zero, use | |
__tablename__ = 'image_type'
__table_args__ = (
{'schema': mbdata.config.schemas.get('cover_art_archive', 'cover_art_archive')}
)
mime_type = Column(String, primary_key=True, nullable=False)
suffix = Column(String, nullable=False)
class CoverArt(Base):
__tablename__ = 'cover_art'
__table_args__ = (
Index('cover_art_idx_release', 'release'),
{'schema': mbdata.config.schemas.get('cover_art_archive', 'cover_art_archive')}
)
id = Column(BIGINT, primary_key=True, nullable=False)
release_id = Column('release', Integer, ForeignKey(apply_schema('release.id', u'musicbrainz'), name='cover_art_fk_release', ondelete='CASCADE'), nullable=False)
comment = Column(String, default='', server_default=sql.text("''"), nullable=False)
edit_id = Column('edit', Integer, ForeignKey(apply_schema('edit.id', u'musicbrainz'), name='cover_art_fk_edit'), nullable=False)
ordering = Column(Integer, nullable=False)
date_uploaded = Column(DateTime(timezone=True), server_default=sql.func.now(), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
mime_type = Column(String, ForeignKey(apply_schema('image_type.mime_type', u'cover_art_archive'), name='cover_art_fk_mime_type'), nullable=False)
release = relationship('Release', foreign_keys=[release_id], innerjoin=True)
edit = relationship('Edit', foreign_keys=[edit_id], innerjoin=True)
class CoverArtType(Base):
__tablename__ = 'cover_art_type'
__table_args__ = (
{'schema': mbdata.config.schemas.get('cover_art_archive', 'cover_art_archive')}
)
id = Column('id', BIGINT, ForeignKey(apply_schema('cover_art.id', u'cover_art_archive'), name='cover_art_type_fk_id', ondelete='CASCADE'), primary_key=True, nullable=False)
type_id = Column('type_id', Integer, ForeignKey(apply_schema('art_type.id', u'cover_art_archive'), name='cover_art_type_fk_type_id'), primary_key=True, nullable=False)
cover_art = relationship('CoverArt', foreign_keys=[id], innerjoin=True)
type = relationship('ArtType', foreign_keys=[type_id], innerjoin=True)
class ReleaseGroupCoverArt(Base):
__tablename__ = 'release_group_cover_art'
__table_args__ = (
{'schema': mbdata.config.schemas.get('cover_art_archive', 'cover_art_archive')}
)
release_group_id = Column('release_group', Integer, ForeignKey(apply_schema('release_group.id', u'musicbrainz'), name='release_group_cover_art_fk_release_group'), primary_key=True, nullable=False)
release_id = Column('release', Integer, ForeignKey(apply_schema('release.id', u'musicbrainz'), name='release_group_cover_art_fk_release'), nullable=False)
release_group = relationship('ReleaseGroup', foreign_keys=[release_group_id], innerjoin=True)
release = relationship('Release', foreign_keys=[release_id], innerjoin=True)
class WikidocsIndex(Base):
__tablename__ = 'wikidocs_index'
__table_args__ = (
{'schema': mbdata.config.schemas.get('wikidocs', 'wikidocs')}
)
page_name = Column(String, primary_key=True, nullable=False)
revision = Column(Integer, nullable=False)
class LinkAreaAreaExample(Base):
__tablename__ = 'l_area_area_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_area.id', u'musicbrainz'), name='l_area_area_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaArea', foreign_keys=[id], innerjoin=True)
class LinkAreaArtistExample(Base):
__tablename__ = 'l_area_artist_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_artist.id', u'musicbrainz'), name='l_area_artist_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaArtist', foreign_keys=[id], innerjoin=True)
class LinkAreaEventExample(Base):
__tablename__ = 'l_area_event_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_event.id', u'musicbrainz'), name='l_area_event_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaEvent', foreign_keys=[id], innerjoin=True)
class LinkAreaInstrumentExample(Base):
__tablename__ = 'l_area_instrument_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_instrument.id', u'musicbrainz'), name='l_area_instrument_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaInstrument', foreign_keys=[id], innerjoin=True)
class LinkAreaLabelExample(Base):
__tablename__ = 'l_area_label_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_label.id', u'musicbrainz'), name='l_area_label_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaLabel', foreign_keys=[id], innerjoin=True)
class LinkAreaPlaceExample(Base):
__tablename__ = 'l_area_place_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_place.id', u'musicbrainz'), name='l_area_place_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaPlace', foreign_keys=[id], innerjoin=True)
class LinkAreaRecordingExample(Base):
__tablename__ = 'l_area_recording_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_recording.id', u'musicbrainz'), name='l_area_recording_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaRecording', foreign_keys=[id], innerjoin=True)
class LinkAreaReleaseExample(Base):
__tablename__ = 'l_area_release_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_release.id', u'musicbrainz'), name='l_area_release_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaRelease', foreign_keys=[id], innerjoin=True)
class LinkAreaReleaseGroupExample(Base):
__tablename__ = 'l_area_release_group_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_release_group.id', u'musicbrainz'), name='l_area_release_group_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaReleaseGroup', foreign_keys=[id], innerjoin=True)
class LinkAreaURLExample(Base):
__tablename__ = 'l_area_url_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_url.id', u'musicbrainz'), name='l_area_url_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaURL', foreign_keys=[id], innerjoin=True)
class LinkAreaWorkExample(Base):
__tablename__ = 'l_area_work_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_area_work.id', u'musicbrainz'), name='l_area_work_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkAreaWork', foreign_keys=[id], innerjoin=True)
class LinkArtistArtistExample(Base):
__tablename__ = 'l_artist_artist_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_artist.id', u'musicbrainz'), name='l_artist_artist_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistArtist', foreign_keys=[id], innerjoin=True)
class LinkArtistEventExample(Base):
__tablename__ = 'l_artist_event_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_event.id', u'musicbrainz'), name='l_artist_event_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistEvent', foreign_keys=[id], innerjoin=True)
class LinkArtistInstrumentExample(Base):
__tablename__ = 'l_artist_instrument_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_instrument.id', u'musicbrainz'), name='l_artist_instrument_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistInstrument', foreign_keys=[id], innerjoin=True)
class LinkArtistLabelExample(Base):
__tablename__ = 'l_artist_label_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_label.id', u'musicbrainz'), name='l_artist_label_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistLabel', foreign_keys=[id], innerjoin=True)
class LinkArtistPlaceExample(Base):
__tablename__ = 'l_artist_place_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_place.id', u'musicbrainz'), name='l_artist_place_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistPlace', foreign_keys=[id], innerjoin=True)
class LinkArtistRecordingExample(Base):
__tablename__ = 'l_artist_recording_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_recording.id', u'musicbrainz'), name='l_artist_recording_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistRecording', foreign_keys=[id], innerjoin=True)
class LinkArtistReleaseExample(Base):
__tablename__ = 'l_artist_release_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_release.id', u'musicbrainz'), name='l_artist_release_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistRelease', foreign_keys=[id], innerjoin=True)
class LinkArtistReleaseGroupExample(Base):
__tablename__ = 'l_artist_release_group_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_release_group.id', u'musicbrainz'), name='l_artist_release_group_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistReleaseGroup', foreign_keys=[id], innerjoin=True)
class LinkArtistURLExample(Base):
__tablename__ = 'l_artist_url_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_url.id', u'musicbrainz'), name='l_artist_url_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistURL', foreign_keys=[id], innerjoin=True)
class LinkArtistWorkExample(Base):
__tablename__ = 'l_artist_work_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_artist_work.id', u'musicbrainz'), name='l_artist_work_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkArtistWork', foreign_keys=[id], innerjoin=True)
class LinkEventEventExample(Base):
__tablename__ = 'l_event_event_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_event.id', u'musicbrainz'), name='l_event_event_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventEvent', foreign_keys=[id], innerjoin=True)
class LinkEventInstrumentExample(Base):
__tablename__ = 'l_event_instrument_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_instrument.id', u'musicbrainz'), name='l_event_instrument_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventInstrument', foreign_keys=[id], innerjoin=True)
class LinkEventLabelExample(Base):
__tablename__ = 'l_event_label_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_label.id', u'musicbrainz'), name='l_event_label_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventLabel', foreign_keys=[id], innerjoin=True)
class LinkEventPlaceExample(Base):
__tablename__ = 'l_event_place_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_place.id', u'musicbrainz'), name='l_event_place_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventPlace', foreign_keys=[id], innerjoin=True)
class LinkEventRecordingExample(Base):
__tablename__ = 'l_event_recording_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_recording.id', u'musicbrainz'), name='l_event_recording_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventRecording', foreign_keys=[id], innerjoin=True)
class LinkEventReleaseExample(Base):
__tablename__ = 'l_event_release_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_release.id', u'musicbrainz'), name='l_event_release_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventRelease', foreign_keys=[id], innerjoin=True)
class LinkEventReleaseGroupExample(Base):
__tablename__ = 'l_event_release_group_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_release_group.id', u'musicbrainz'), name='l_event_release_group_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventReleaseGroup', foreign_keys=[id], innerjoin=True)
class LinkEventSeriesExample(Base):
__tablename__ = 'l_event_series_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_series.id', u'musicbrainz'), name='l_event_series_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventSeries', foreign_keys=[id], innerjoin=True)
class LinkEventURLExample(Base):
__tablename__ = 'l_event_url_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_url.id', u'musicbrainz'), name='l_event_url_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventURL', foreign_keys=[id], innerjoin=True)
class LinkEventWorkExample(Base):
__tablename__ = 'l_event_work_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_event_work.id', u'musicbrainz'), name='l_event_work_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkEventWork', foreign_keys=[id], innerjoin=True)
class LinkInstrumentInstrumentExample(Base):
__tablename__ = 'l_instrument_instrument_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_instrument_instrument.id', u'musicbrainz'), name='l_instrument_instrument_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkInstrumentInstrument', foreign_keys=[id], innerjoin=True)
class LinkInstrumentLabelExample(Base):
__tablename__ = 'l_instrument_label_example'
__table_args__ = (
{'schema': mbdata.config.schemas.get('documentation', 'documentation')}
)
id = Column('id', Integer, ForeignKey(apply_schema('l_instrument_label.id', u'musicbrainz'), name='l_instrument_label_example_fk_id'), primary_key=True, nullable=False)
published = Column(Boolean, nullable=False)
name = Column(String, nullable=False)
link = relationship('LinkInstrumentLabel', foreign_keys=[id], innerjoin=True)
class LinkInstrumentPlaceExample(Base):
| |
8,
(0, 'O'): 8,
(0, 'P'): 8,
(0, 'Q'): 8,
(0, 'R'): 8,
(0, 'S'): 8,
(0, 'T'): 8,
(0, 'U'): 8,
(0, 'V'): 8,
(0, 'W'): 8,
(0, 'X'): 8,
(0, 'Y'): 8,
(0, 'Z'): 8,
(0, '['): 18,
(0, '\\'): 9,
(0, ']'): 34,
(0, '^'): 25,
(0, '_'): 8,
(0, 'a'): 10,
(0, 'b'): 26,
(0, 'c'): 10,
(0, 'd'): 10,
(0, 'e'): 10,
(0, 'f'): 10,
(0, 'g'): 10,
(0, 'h'): 10,
(0, 'i'): 35,
(0, 'j'): 10,
(0, 'k'): 10,
(0, 'l'): 10,
(0, 'm'): 36,
(0, 'n'): 10,
(0, 'o'): 10,
(0, 'p'): 10,
(0, 'q'): 10,
(0, 'r'): 27,
(0, 's'): 10,
(0, 't'): 10,
(0, 'u'): 10,
(0, 'v'): 10,
(0, 'w'): 10,
(0, 'x'): 11,
(0, 'y'): 10,
(0, 'z'): 10,
(0, '{'): 19,
(0, '|'): 12,
(0, '}'): 37,
(0, '~'): 28,
(4, '.'): 98,
(5, '.'): 98,
(5, '0'): 5,
(5, '1'): 5,
(5, '2'): 5,
(5, '3'): 5,
(5, '4'): 5,
(5, '5'): 5,
(5, '6'): 5,
(5, '7'): 5,
(5, '8'): 5,
(5, '9'): 5,
(6, '<'): 97,
(7, '<'): 93,
(7, '='): 92,
(7, '>'): 94,
(8, '0'): 8,
(8, '1'): 8,
(8, '2'): 8,
(8, '3'): 8,
(8, '4'): 8,
(8, '5'): 8,
(8, '6'): 8,
(8, '7'): 8,
(8, '8'): 8,
(8, '9'): 8,
(8, 'A'): 8,
(8, 'B'): 8,
(8, 'C'): 8,
(8, 'D'): 8,
(8, 'E'): 8,
(8, 'F'): 8,
(8, 'G'): 8,
(8, 'H'): 8,
(8, 'I'): 8,
(8, 'J'): 8,
(8, 'K'): 8,
(8, 'L'): 8,
(8, 'M'): 8,
(8, 'N'): 8,
(8, 'O'): 8,
(8, 'P'): 8,
(8, 'Q'): 8,
(8, 'R'): 8,
(8, 'S'): 8,
(8, 'T'): 8,
(8, 'U'): 8,
(8, 'V'): 8,
(8, 'W'): 8,
(8, 'X'): 8,
(8, 'Y'): 8,
(8, 'Z'): 8,
(8, '_'): 8,
(8, 'a'): 8,
(8, 'b'): 8,
(8, 'c'): 8,
(8, 'd'): 8,
(8, 'e'): 8,
(8, 'f'): 8,
(8, 'g'): 8,
(8, 'h'): 8,
(8, 'i'): 8,
(8, 'j'): 8,
(8, 'k'): 8,
(8, 'l'): 8,
(8, 'm'): 8,
(8, 'n'): 8,
(8, 'o'): 8,
(8, 'p'): 8,
(8, 'q'): 8,
(8, 'r'): 8,
(8, 's'): 8,
(8, 't'): 8,
(8, 'u'): 8,
(8, 'v'): 8,
(8, 'w'): 8,
(8, 'x'): 8,
(8, 'y'): 8,
(8, 'z'): 8,
(9, '+'): 88,
(9, '/'): 90,
(9, '='): 89,
(10, '0'): 10,
(10, '1'): 10,
(10, '2'): 10,
(10, '3'): 10,
(10, '4'): 10,
(10, '5'): 10,
(10, '6'): 10,
(10, '7'): 10,
(10, '8'): 10,
(10, '9'): 10,
(10, 'A'): 10,
(10, 'B'): 10,
(10, 'C'): 10,
(10, 'D'): 10,
(10, 'E'): 10,
(10, 'F'): 10,
(10, 'G'): 10,
(10, 'H'): 10,
(10, 'I'): 10,
(10, 'J'): 10,
(10, 'K'): 10,
(10, 'L'): 10,
(10, 'M'): 10,
(10, 'N'): 10,
(10, 'O'): 10,
(10, 'P'): 10,
(10, 'Q'): 10,
(10, 'R'): 10,
(10, 'S'): 10,
(10, 'T'): 10,
(10, 'U'): 10,
(10, 'V'): 10,
(10, 'W'): 10,
(10, 'X'): 10,
(10, 'Y'): 10,
(10, 'Z'): 10,
(10, '_'): 10,
(10, 'a'): 10,
(10, 'b'): 10,
(10, 'c'): 10,
(10, 'd'): 10,
(10, 'e'): 10,
(10, 'f'): 10,
(10, 'g'): 10,
(10, 'h'): 10,
(10, 'i'): 10,
(10, 'j'): 10,
(10, 'k'): 10,
(10, 'l'): 10,
(10, 'm'): 10,
(10, 'n'): 10,
(10, 'o'): 10,
(10, 'p'): 10,
(10, 'q'): 10,
(10, 'r'): 10,
(10, 's'): 10,
(10, 't'): 10,
(10, 'u'): 10,
(10, 'v'): 10,
(10, 'w'): 10,
(10, 'x'): 10,
(10, 'y'): 10,
(10, 'z'): 10,
(11, '0'): 10,
(11, '1'): 10,
(11, '2'): 10,
(11, '3'): 10,
(11, '4'): 10,
(11, '5'): 10,
(11, '6'): 10,
(11, '7'): 10,
(11, '8'): 10,
(11, '9'): 10,
(11, 'A'): 10,
(11, 'B'): 10,
(11, 'C'): 10,
(11, 'D'): 10,
(11, 'E'): 10,
(11, 'F'): 10,
(11, 'G'): 10,
(11, 'H'): 10,
(11, 'I'): 10,
(11, 'J'): 10,
(11, 'K'): 10,
(11, 'L'): 10,
(11, 'M'): 10,
(11, 'N'): 10,
(11, 'O'): 10,
(11, 'P'): 10,
(11, 'Q'): 10,
(11, 'R'): 10,
(11, 'S'): 10,
(11, 'T'): 10,
(11, 'U'): 10,
(11, 'V'): 10,
(11, 'W'): 10,
(11, 'X'): 10,
(11, 'Y'): 10,
(11, 'Z'): 10,
(11, '_'): 10,
(11, 'a'): 10,
(11, 'b'): 10,
(11, 'c'): 10,
(11, 'd'): 10,
(11, 'e'): 10,
(11, 'f'): 10,
(11, 'g'): 10,
(11, 'h'): 10,
(11, 'i'): 10,
(11, 'j'): 10,
(11, 'k'): 10,
(11, 'l'): 10,
(11, 'm'): 10,
(11, 'n'): 10,
(11, 'o'): 86,
(11, 'p'): 10,
(11, 'q'): 10,
(11, 'r'): 10,
(11, 's'): 10,
(11, 't'): 10,
(11, 'u'): 10,
(11, 'v'): 10,
(11, 'w'): 10,
(11, 'x'): 10,
(11, 'y'): 10,
(11, 'z'): 10,
(13, '\x00'): 13,
(13, '\x01'): 13,
(13, '\x02'): 13,
(13, '\x03'): 13,
(13, '\x04'): 13,
(13, '\x05'): 13,
(13, '\x06'): 13,
(13, '\x07'): 13,
(13, '\x08'): 13,
(13, '\t'): 13,
(13, '\n'): 13,
(13, '\x0b'): 13,
(13, '\x0c'): 13,
(13, '\r'): 13,
(13, '\x0e'): 13,
(13, '\x0f'): 13,
(13, '\x10'): 13,
(13, '\x11'): 13,
(13, '\x12'): 13,
(13, '\x13'): 13,
(13, '\x14'): 13,
(13, '\x15'): 13,
(13, '\x16'): 13,
(13, '\x17'): 13,
(13, '\x18'): 13,
(13, '\x19'): 13,
(13, '\x1a'): 13,
(13, '\x1b'): 13,
(13, '\x1c'): 13,
(13, '\x1d'): 13,
(13, '\x1e'): 13,
(13, '\x1f'): 13,
(13, ' '): 13,
(13, '!'): 13,
(13, '"'): 13,
(13, '#'): 13,
(13, '$'): 13,
(13, '%'): 13,
(13, '&'): 13,
(13, "'"): 29,
(13, '('): 13,
(13, ')'): 13,
(13, '*'): 13,
(13, '+'): 13,
(13, ','): 13,
(13, '-'): 13,
(13, '.'): 13,
(13, '/'): 13,
(13, '0'): 13,
(13, '1'): 13,
(13, '2'): 13,
(13, '3'): 13,
(13, '4'): 13,
(13, '5'): 13,
(13, '6'): 13,
(13, '7'): 13,
(13, '8'): 13,
(13, '9'): 13,
(13, ':'): 13,
(13, ';'): 13,
(13, '<'): 13,
(13, '='): 13,
(13, '>'): 13,
(13, '?'): 13,
(13, '@'): 13,
(13, 'A'): 13,
(13, 'B'): 13,
(13, 'C'): 13,
(13, 'D'): 13,
(13, 'E'): 13,
(13, 'F'): 13,
(13, 'G'): 13,
(13, 'H'): 13,
(13, 'I'): 13,
(13, 'J'): 13,
(13, 'K'): 13,
(13, 'L'): 13,
(13, 'M'): 13,
(13, 'N'): 13,
(13, 'O'): 13,
(13, 'P'): 13,
(13, 'Q'): 13,
(13, 'R'): 13,
(13, 'S'): 13,
(13, 'T'): 13,
(13, 'U'): 13,
(13, 'V'): 13,
(13, 'W'): 13,
(13, 'X'): 13,
(13, 'Y'): 13,
(13, 'Z'): 13,
(13, '['): 13,
(13, '\\'): 13,
(13, ']'): 13,
(13, '^'): 13,
(13, '_'): 13,
(13, '`'): 13,
(13, 'a'): 13,
(13, 'b'): 13,
(13, 'c'): 13,
(13, 'd'): 13,
(13, 'e'): 13,
(13, 'f'): 13,
(13, 'g'): 13,
(13, 'h'): 13,
(13, 'i'): 13,
(13, 'j'): 13,
(13, 'k'): 13,
(13, 'l'): 13,
(13, 'm'): 13,
(13, 'n'): 13,
(13, 'o'): 13,
(13, 'p'): 13,
(13, 'q'): 13,
(13, 'r'): 13,
(13, 's'): 13,
(13, 't'): 13,
(13, 'u'): 13,
(13, 'v'): 13,
(13, 'w'): 13,
(13, 'x'): 13,
(13, 'y'): 13,
(13, 'z'): 13,
(13, '{'): 13,
(13, '|'): 13,
(13, '}'): 13,
(13, '~'): 13,
(13, '\x7f'): 13,
(13, '\x80'): 13,
(13, '\x81'): 13,
(13, '\x82'): 13,
(13, '\x83'): 13,
(13, '\x84'): 13,
(13, '\x85'): 13,
(13, '\x86'): 13,
(13, '\x87'): 13,
(13, '\x88'): 13,
(13, '\x89'): 13,
(13, '\x8a'): 13,
(13, '\x8b'): 13,
(13, '\x8c'): 13,
(13, '\x8d'): 13,
(13, '\x8e'): 13,
(13, '\x8f'): 13,
(13, '\x90'): 13,
(13, '\x91'): 13,
(13, '\x92'): 13,
(13, '\x93'): 13,
(13, '\x94'): 13,
(13, '\x95'): 13,
(13, '\x96'): 13,
(13, '\x97'): 13,
(13, '\x98'): 13,
(13, '\x99'): 13,
(13, '\x9a'): 13,
(13, '\x9b'): 13,
(13, '\x9c'): 13,
(13, '\x9d'): 13,
(13, '\x9e'): 13,
(13, '\x9f'): 13,
(13, '\xa0'): 13,
(13, '\xa1'): 13,
(13, '\xa2'): 13,
(13, '\xa3'): 13,
(13, '\xa4'): 13,
(13, '\xa5'): 13,
(13, '\xa6'): 13,
(13, '\xa7'): 13,
(13, '\xa8'): 13,
(13, '\xa9'): 13,
(13, '\xaa'): 13,
| |
# -*- coding: utf-8 -*-
# Import dependencies
import json
from mysql.connector import IntegrityError
from app.app_modules import db
from app.helpers import get_custom_logger, ApiError
from app.mod_blackbox.controllers import get_account_public_key, generate_and_sign_jws
from app.mod_database.helpers import get_db_cursor, get_slr_ids, get_slsr_ids, get_last_slsr_id, get_slr_ids_by_service
from app.mod_database.models import SurrogateId, ServiceLinkRecord, ServiceLinkStatusRecord
logger = get_custom_logger(__name__)
def init_slr_source(account_id=None, slr_id=None, endpoint="init_slr_sink()"):
logger.info("init_slr_sink()")
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
if not isinstance(account_id, str):
try:
account_id = str(account_id)
except Exception:
raise TypeError("account_id MUST be str, not " + str(type(account_id)))
if not isinstance(slr_id, str):
try:
slr_id = str(slr_id)
except Exception:
raise TypeError("slr_id MUST be str, not " + str(type(slr_id)))
if not isinstance(endpoint, str):
try:
endpoint = str(endpoint)
except Exception:
raise TypeError("endpoint MUST be str, not " + str(type(endpoint)))
logger.info("Initing SLR")
try:
slr_entry = ServiceLinkRecord(
service_link_record_id=slr_id,
account_id=account_id
)
except Exception as exp:
logger.error('Could not create Service Link Record object: ' + repr(exp))
raise ApiError(code=500, title="Failed to create Service Link Record object", detail=repr(exp), source=endpoint)
else:
logger.info("Service Link Record entry created")
logger.debug(slr_entry.log_entry)
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise ApiError(code=500, title="Failed to get database cursor", detail=repr(exp), source=endpoint)
# Store DB entry
try:
cursor = slr_entry.to_db(cursor=cursor)
slr_id = slr_entry.service_link_record_id
db.connection.commit()
except IntegrityError as exp:
error_title = "Service Link ID already exists"
error_detail = str(exp.args[1])
logger.error(error_title + " - " + error_detail)
db.connection.rollback()
logger.debug('--> rollback')
raise ApiError(code=409, title=error_title, detail=error_detail, source=endpoint)
except Exception as exp:
logger.error('Slr init commit failed: ' + repr(exp))
db.connection.rollback()
logger.debug('--> rollback')
raise ApiError(code=500, title="Failed to store init SLR", detail=repr(exp), source=endpoint)
else:
logger.info('Slr initialized commited')
logger.debug("slr_entry: " + slr_entry.log_entry)
return slr_id
def init_slr_sink(account_id=None, slr_id=None, pop_key=None, endpoint="init_slr_sink()"):
logger.info("init_slr_sink()")
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
if pop_key is None:
raise AttributeError("Provide pop_key as parameter")
if not isinstance(account_id, str):
try:
account_id = str(account_id)
except Exception:
raise TypeError("account_id MUST be str, not " + str(type(account_id)))
if not isinstance(slr_id, str):
try:
slr_id = str(slr_id)
except Exception:
raise TypeError("slr_id MUST be str, not " + str(type(slr_id)))
if not isinstance(pop_key, dict):
try:
pop_key = dict(pop_key)
except Exception:
raise TypeError("pop_key MUST be dict, not " + str(type(pop_key)))
if not isinstance(endpoint, str):
try:
endpoint = str(endpoint)
except Exception:
raise TypeError("endpoint MUST be str, not " + str(type(endpoint)))
logger.info("Initing SLR")
try:
slr_entry = ServiceLinkRecord(
service_link_record_id=slr_id,
account_id=account_id,
pop_key=pop_key
)
except Exception as exp:
logger.error('Could not create Service Link Record object: ' + repr(exp))
raise ApiError(code=500, title="Failed to create Service Link Record object", detail=repr(exp), source=endpoint)
else:
logger.info("Service Link Record entry created")
logger.debug(slr_entry.log_entry)
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise ApiError(code=500, title="Failed to get database cursor", detail=repr(exp), source=endpoint)
# Store DB entry
try:
cursor = slr_entry.to_db(cursor=cursor)
slr_id = slr_entry.service_link_record_id
db.connection.commit()
except IntegrityError as exp:
error_title = "Service Link ID already exists"
error_detail = str(exp.args[1])
logger.error(error_title + " - " + error_detail)
db.connection.rollback()
logger.debug('--> rollback')
raise ApiError(code=409, title=error_title, detail=error_detail, source=endpoint)
except Exception as exp:
logger.error('Slr init commit failed: ' + repr(exp))
db.connection.rollback()
logger.debug('--> rollback')
raise ApiError(code=500, title="Failed to store init SLR", detail=repr(exp), source=endpoint)
else:
logger.info('Slr initialized commited')
logger.debug("slr_entry: " + slr_entry.log_entry)
return slr_id
def get_slr_record(account_id=None, slr_id=None, endpoint="get_slr_record()"):
logger.info("get_slr_record()")
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_id is None:
raise AttributeError("Provide slr_id as parameter")
if not isinstance(account_id, str):
try:
account_id = str(account_id)
except Exception:
raise TypeError("account_id MUST be str, not " + str(type(account_id)))
if not isinstance(slr_id, str):
try:
slr_id = str(slr_id)
except Exception:
raise TypeError("slr_id MUST be str, not " + str(type(slr_id)))
if not isinstance(endpoint, str):
try:
endpoint = str(endpoint)
except Exception:
raise TypeError("endpoint MUST be str, not " + str(type(endpoint)))
logger.info("Creating ServiceLinkRecord object")
try:
slr_entry = ServiceLinkRecord(
service_link_record_id=slr_id,
account_id=account_id
)
except Exception as exp:
logger.error('Could not create Service Link Record object: ' + repr(exp))
raise ApiError(code=500, title="Failed to create Service Link Record object", detail=repr(exp), source=endpoint)
else:
logger.info("Service Link Record entry created")
logger.debug(slr_entry.log_entry)
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise ApiError(code=500, title="Failed to get database cursor", detail=repr(exp), source=endpoint)
logger.info("Get ServiceLinkRecord from database")
try:
cursor = slr_entry.from_db(cursor=cursor)
except Exception as exp:
error_title = "Could not get ServiceLinkRecord from database"
error_detail = str(exp.message)
logger.error(error_title + " - " + error_detail)
raise
else:
logger.info('Got ServiceLinkRecord from database')
logger.debug("slr_entry: " + slr_entry.log_entry)
return slr_entry
def sign_slr(account_id=None, slr_payload=None, endpoint="sign_slr(account_id, slr_payload, endpoint)"):
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if slr_payload is None:
raise AttributeError("Provide slr_payload as parameter")
logger.info("Signing Service Link Record")
# Get Account owner's public key
try:
account_public_key, account_kid = get_account_public_key(account_id=account_id)
account_public_key_log_entry = account_public_key
account_public_key = json.loads(account_public_key)
except Exception as exp:
logger.error("Could not get account owner's public key: " + repr(exp))
raise ApiError(code=500, title="Failed to get account owner's public key", detail=repr(exp), source=endpoint)
else:
logger.info("Account owner's public key and kid fetched")
logger.debug("account_public_key: " + account_public_key_log_entry)
# Fill Account key to cr_keys
try:
keys = []
keys.append(account_public_key)
slr_payload['cr_keys'] = keys
except Exception as exp:
logger.error("Could not fill account owner's public key to cr_keys: " + repr(exp))
raise ApiError(code=500, title="Failed to fill account owner's public key to cr_keys", detail=repr(exp), source=endpoint)
else:
logger.info("Account owner's public key added to cr_keys")
# Sign slr
slr_signed = {}
try:
slr_signed_json = generate_and_sign_jws(account_id=account_id, jws_payload=json.dumps(slr_payload))
except Exception as exp:
logger.error('Could not create Service Link Record: ' + repr(exp))
raise ApiError(code=500, title="Failed to create Service Link Record", detail=repr(exp), source=endpoint)
else:
logger.info('Service Link Record created and signed')
logger.debug("slr_payload: " + json.dumps(slr_payload))
logger.debug("slr_signed_json: " + slr_signed_json)
try:
logger.info("Converting signed CSR from json to dict")
slr_signed_dict = json.loads(slr_signed_json)
except Exception as exp:
logger.error('Could not convert signed SLR from json to dict: ' + repr(exp))
raise ApiError(code=500, title="Failed to convert signed SLR from json to dict", detail=repr(exp), source=endpoint)
else:
logger.info('Converted signed SLR from json to dict')
logger.debug('slr_signed_dict: ' + json.dumps(slr_signed_dict))
return slr_signed_dict
def sign_ssr(account_id=None, ssr_payload=None, endpoint="sign_ssr(account_id, slr_payload, endpoint)"):
if account_id is None:
raise AttributeError("Provide account_id as parameter")
if ssr_payload is None:
raise AttributeError("Provide ssr_payload as parameter")
logger.info("Signing Service Link Status Record")
# Sign ssr
ssr_signed = {}
try:
ssr_signed_json = generate_and_sign_jws(account_id=account_id, jws_payload=json.dumps(ssr_payload))
except Exception as exp:
logger.error('Could not create Service Link Status Record: ' + repr(exp))
raise ApiError(code=500, title="Failed to create Service Link Record", detail=repr(exp), source=endpoint)
else:
logger.info('Service Link Status Record created and signed')
logger.debug("ssr_payload: " + json.dumps(ssr_payload))
logger.debug("ssr_signed_json: " + ssr_signed_json)
try:
logger.info("Converting signed CSR from json to dict")
ssr_signed_dict = json.loads(ssr_signed_json)
except Exception as exp:
logger.error('Could not convert signed SLR from json to dict: ' + repr(exp))
raise ApiError(code=500, title="Failed to convert signed SLR from json to dict", detail=repr(exp), source=endpoint)
else:
logger.info('Converted signed SLR from json to dict')
logger.debug('ssr_signed_dict: ' + json.dumps(ssr_signed_dict))
return ssr_signed_dict
def store_slr_and_ssr(slr_entry=None, ssr_entry=None, endpoint="sign_ssr(account_id, slr_payload, endpoint)"):
if slr_entry is None:
raise AttributeError("Provide slr_entry as parameter")
if ssr_entry is None:
raise AttributeError("Provide ssr_entry as parameter")
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise ApiError(code=500, title="Failed to get database cursor", detail=repr(exp), source=endpoint)
try:
# SLR update
cursor = slr_entry.update_db(cursor=cursor)
# SLR primary key to SSR
ssr_entry.service_link_records_id = slr_entry.id
# SSR Insert
cursor = ssr_entry.to_db(cursor=cursor)
db.connection.commit()
except Exception as exp:
logger.error('Slr and Ssr commit failed: ' + repr(exp))
db.connection.rollback()
logger.error('--> rollback')
logger.error("slr_entry: " + slr_entry.log_entry)
logger.error("ssr_entry: " + ssr_entry.log_entry)
raise ApiError(code=500, title="Failed to store slr and ssr", detail=repr(exp), source=endpoint)
else:
logger.debug('Slr and Ssr commited')
logger.debug("slr_entry: " + slr_entry.log_entry)
logger.debug("ssr_entry: " + ssr_entry.log_entry)
return slr_entry, ssr_entry
def store_ssr(ssr_entry=None, endpoint="store_ssr()"):
if ssr_entry is None:
raise AttributeError("Provide ssr_entry as parameter")
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise ApiError(code=500, title="Failed to get database cursor", detail=repr(exp), source=endpoint)
try:
# SSR Insert
cursor = ssr_entry.to_db(cursor=cursor)
db.connection.commit()
except Exception as exp:
logger.error('Slr and Ssr commit failed: ' + repr(exp))
db.connection.rollback()
logger.error('--> rollback')
logger.error("ssr_entry: " + ssr_entry.log_entry)
raise ApiError(code=500, title="Failed to store slr and ssr", detail=repr(exp), source=endpoint)
else:
logger.debug('Ssr commited')
logger.debug("ssr_entry: " + ssr_entry.log_entry)
| |
<filename>Betsy/Betsy/rule_engine.py
"""
Functions:
run_pipeline
run_module
"""
# _make_file_refresher
#
# _get_available_input_combinations
# _hash_module
# _make_hash_units
# _is_module_output_complete
# _format_pipeline
# _get_node_name
#
# _write_parameter_file
# _read_parameter_file
VERSION = 7
FINISHED_FILE = "finished.txt"
LAST_ACCESSED_FILE = "last_accessed.txt"
IN_PROGRESS_FILE = "in_progress.txt"
BETSY_PARAMETER_FILE = "BETSY_parameters.txt"
# When running a module, whether to make sure it starts running in an
# empty directory. Can turn off for debugging.
CLEAN_UP_PATH_FOR_NEW_MODULE = True
#CLEAN_UP_PATH_FOR_NEW_MODULE = False # DEBUGGING ONLY!
DEBUG_RUN_PIPELINE = False
TIME_FMT = "%a %b %d %H:%M:%S %Y"
DEBUG_POOL = {}
def run_pipeline(
network, in_datas, custom_attributes, user_options, paths, user=None,
job_name='', clean_up=True, num_cores=8, verbosity=0):
# Run the pipeline that is indicated by the network. Returns a
# tuple of:
# - dictionary of node_id -> IdentifiedDataNode
# - output filename
# Returns None if not successful.
#
# Can raise an exception if there was an error in one of the
# modules, or if there is no path through the network (which
# probably indicates an inferencing error).
#
# in_datas List of IdentifiedDataNodes.
# user_attributes From --dattr. AttributeDef
# user_options From --mattr. OptionDef
# paths List of (node_ids, start_ids).
global DEBUG_POOL
import os
import getpass
import logging
import time
from genomicode import parselib
from Betsy import bie3
from Betsy import config
user = user or getpass.getuser()
output_path = config.CACHE_PATH
if not os.path.exists(output_path):
os.mkdir(output_path)
# Is this thread-safe?
LOG_FILENAME = os.path.join(output_path, 'traceback.txt')
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
# Make a list of the valid node_ids and transitions in the pipeline.
x = bie3._merge_paths(paths)
path_ids, path_transitions, x = x
# Make a list of all the nodes provided by the user.
start_nodes = [] # list of (node_id, IdentifiedDataNode).
for p in paths:
for index, id_ in enumerate(p.start_ids):
# id_ may be None if it's just not used in this pipeline.
# Ignore it.
if id_ is None:
continue
node = in_datas[index]
x = id_, node
if x not in start_nodes:
start_nodes.append(x)
# Create a stack with all the start nodes. Each member of the
# stack can be a tuple of:
# 1. (IdentifiedDataNode, node_id, None, transitions)
# 2. (ModuleNode, node_id, None, transitions)
# 3. (ModuleNode, node_id, antecedent_ids, transitions)
# Keep track of which set of antecedents to run.
# transitions is a dictionary of (node_id, next_node_id) -> 1
# indicating which transitions were taken to get to this node.
stack = []
for (node_id, node) in start_nodes:
x = node, node_id, None, {}
stack.append(x)
# Keep track of nodes that have already been generated.
# BUG: The values should technically be a list. Since the nodes
# in the network may not be atomic, it is possible that multiple
# different atomic DataNodes can be assigned to the same node_id.
# But usually, we expect just 1.
pool = {} # dict of node_id -> IdentifiedDataNode
# Keep track of the transitions.
transition_cache = {} # dict of node_id -> dict of transitions
# Cache the module node_ids that aren't ready to be run. If all
# modules on the stack are not ready, then something is wrong and
# quit. Otherwise, we would be stuck in an infinite loop.
not_ready = {}
# Track the total analysis time.
total_time = 0
#MAX_ITER = 10000
MAX_ITER = len(path_ids) * 5
it = 0
while stack:
DEBUG_POOL = pool
it += 1
if DEBUG_RUN_PIPELINE and it >= MAX_ITER:
debug_file = "broken.png"
print "Saving network: %s" % debug_file
start_ids = [x[0] for x in start_nodes]
done_ids = [x for x in pool if x not in start_ids]
all_ids = [x for x in path_ids
if x not in done_ids and x not in start_ids]
bie3.plot_network_gv(
debug_file, network, options=user_options, bold=path_ids,
bold_transitions=path_transitions, highlight_yellow=all_ids,
highlight_green=start_ids, highlight_orange=done_ids,
verbose=True)
# Make an error message to try to diagnose problem.
if it >= MAX_ITER:
# Look for all the modules, and print out why each of them
# can't be run.
msg = []
p = msg.append
for x in stack:
node, node_id = x[:2]
if not isinstance(node, bie3.ModuleNode):
continue
x = _get_available_input_combinations(
network, node_id, custom_attributes, pool,
path_transitions)
all_antecedent_ids, not_available = x
assert not all_antecedent_ids, (
"Too many iterations, but modules left to be run. "
"Increase MAX_ITER")
assert not_available
x = [x for x in not_available if x[0] in path_ids]
p("Can't run [%d] %s. Missing:" % (node_id, node.name))
for i, (input_id, reason) in enumerate(x):
n = network.nodes[input_id]
p("%d. [%d] %s" % (i+1, input_id, n.datatype.name))
for name in sorted(n.attributes):
p(" %s=%s" % (name, n.attributes[name]))
msg = "\n".join(msg)
assert it < MAX_ITER, "Too many iterations (%d)\n%s" % (it, msg)
# Make sure we're not stuck in an infinite loop.
# 1. Only modules on the stack. AND
# 2. They are all not_ready.
x = [x for x in stack if isinstance(x[0], bie3.ModuleNode)]
if len(x) == len(stack): # only modules.
# Make sure there are modules ready to be checked.
x = [x for x in x if x[1] not in not_ready]
assert x, "Inference error: No more nodes to run."
if DEBUG_RUN_PIPELINE:
print "[%d] Stack:" % it
for x in stack:
name = bie3.get_node_name(x[0])
print " %s [%d]" % (name, x[1])
#print " %s" % x[3]
node, node_id, more_info, transitions = stack.pop()
if DEBUG_RUN_PIPELINE:
print "Processing: %s [%d]." % (bie3.get_node_name(node), node_id)
if node_id not in path_ids: # ignore if not in pipeline
if DEBUG_RUN_PIPELINE:
print "Skipping. Not in path."
continue
# If this is the last node, then we're done.
if node_id == 0:
pool[node_id] = node
if DEBUG_RUN_PIPELINE:
print "Root node. Done."
break
# If this node has already been run, ignore.
if node_id in pool:
if DEBUG_RUN_PIPELINE:
print "Already run. Skip."
continue
if isinstance(node, bie3.IdentifiedDataNode):
# Add to the pool.
pool[node_id] = node
# Add the next modules into the stack, if not already there.
on_stack = [x[1] for x in stack]
add_to_stack = []
for next_id in network.transitions[node_id]:
next_node = network.nodes[next_id]
assert isinstance(next_node, bie3.ModuleNode)
if next_id in on_stack:
# This can happen if:
# GEOSignalFile -> convert_geo_to_signal
# GEOPlatformAnnotationFile ->
#
# After the first one is processed,
# convert_geo_to_signal is added onto the stack,
# but cannot be processed yet and gets reordered
# onto the bottom of the stack. After the second
# one is processed, make sure it goes back onto
# the top of the stack.
# Remove it from the stack, so we can add this
# node back onto the top.
stack = [x for x in stack if x[1] != next_id]
# Module updates the transitions based on which set of
# antecedent IDs are used.
add_to_stack.append((next_node, next_id, None, transitions))
if DEBUG_RUN_PIPELINE:
print "Adding to stack: %s [%d]." % (
bie3.get_node_name(next_node), next_id)
# Execute the modules in alphabetical order. So push them
# onto the stack in reverse alphabetical order.
schwartz = [(bie3.get_node_name(x[0]), x) for x in add_to_stack]
schwartz = list(schwartz) # to shut up pychecker (no attrib sort)
schwartz.sort()
schwartz.reverse()
add_to_stack = [x[-1] for x in schwartz]
stack.extend(add_to_stack)
elif isinstance(node, bie3.ModuleNode) and more_info is None:
# If the input data for this module doesn't exist, then
# just try it again later.
x = _get_available_input_combinations(
network, node_id, custom_attributes, pool, path_transitions)
all_antecedent_ids, not_available = x
if not all_antecedent_ids:
# No sets of inputs are ready to run. Put back to the
# bottom of the stack and try again later.
stack.insert(0, (node, node_id, None, transitions))
if DEBUG_RUN_PIPELINE:
print "Not ready to run yet. Will try again later."
for x in not_available:
input_id, reason = x
if input_id not in path_ids:
continue
n = bie3.get_node_name(network.nodes[input_id])
print " %s %s [%d]." % (reason, n, input_id)
else:
for antecedent_ids in all_antecedent_ids:
assert len(node.in_datatypes) == len(antecedent_ids)
for x in find_out_nodes(
network, path_ids, antecedent_ids, node_id, pool):
out_id, out_data_node = x
more_info = antecedent_ids, out_id, out_data_node
x = node, node_id, more_info, transitions
stack.append(x)
if DEBUG_RUN_PIPELINE:
print (
"%s ready to run. "
"Adding with antecedent IDs %s and "
"out_id %s." % (
node_id, antecedent_ids, out_id))
elif isinstance(node, bie3.ModuleNode):
| |
<filename>pcg_gazebo/simulation/properties/collision.py<gh_stars>1-10
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...parsers.sdf import create_sdf_element
import collections
from .geometry import Geometry
from .pose import Pose
from ...log import PCG_ROOT_LOGGER
class Collision(object):
def __init__(self,
name='collision',
pose=[0, 0, 0, 0, 0, 0],
geometry_type=None,
geometry_args=None,
mu=None,
mu2=None,
friction=None,
friction2=None,
slip1=None,
slip2=None,
rolling_friction=None,
fdir1=None,
max_contacts=None,
soft_cfm=None,
soft_erp=None,
kp=None,
kd=None,
max_vel=None,
min_depth=None,
split_impulse=None,
split_impulse_penetration_threshold=None,
restitution_coefficient=None,
threshold=None,
collide_bitmask=None,
collide_without_contact=None,
collide_without_contact_bitmask=None,
category_bitmask=None,
poissons_ratio=None,
elastic_modulus=None):
self._sdf_collision = create_sdf_element(
'collision', 'link')
self._include_in_sdf = dict(
max_contacts=True,
pose=True,
friction=False,
bounce=False,
contact=False
)
self._geometry = Geometry(link_element='collision')
self._pose = Pose()
# Setting the input parameters
self.name = name
self.pose = pose
if geometry_type is not None and geometry_args is not None:
if geometry_type == 'cylinder':
self.set_cylinder_as_geometry(**geometry_args)
elif geometry_type == 'sphere':
self.set_sphere_as_geometry(**geometry_args)
elif geometry_type == 'mesh':
self.set_mesh_as_geometry(**geometry_args)
elif geometry_type == 'box':
self.set_box_as_geometry(**geometry_args)
elif geometry_type == 'plane':
self.set_plane_as_geometry(**geometry_args)
if max_contacts is not None:
self.max_contacts = max_contacts
self.set_ode_friction_params(
mu=mu,
mu2=mu2,
slip1=slip1,
slip2=slip2,
fdir1=fdir1
)
self.set_bullet_friction_params(
friction=friction,
friction2=friction2,
fdir1=fdir1,
rolling_friction=rolling_friction
)
self.set_ode_contact_params(
soft_cfm=soft_cfm,
soft_erp=soft_erp,
kp=kp,
kd=kd,
max_vel=max_vel,
min_depth=min_depth
)
self.set_bullet_contact_params(
soft_cfm, soft_erp, kp,
kd, split_impulse,
split_impulse_penetration_threshold)
self.set_bounce_params(
restitution_coefficient=restitution_coefficient,
threshold=threshold)
self.set_contact_params(
collide_bitmask=collide_bitmask,
collide_without_contact=collide_without_contact,
collide_without_contact_bitmask=collide_without_contact_bitmask,
category_bitmask=category_bitmask,
poissons_ratio=poissons_ratio,
elastic_modulus=elastic_modulus)
def __str__(self):
return self.to_sdf().to_xml_as_str(pretty_print=True)
@property
def sdf(self):
return self._sdf_collision
@property
def name(self):
return self._sdf_collision.name
@name.setter
def name(self, value):
self._sdf_collision.name = value
@property
def max_contacts(self):
return self._sdf_collision.max_contacts.value
@max_contacts.setter
def max_contacts(self, max_contacts):
self._sdf_collision.max_contacts = max_contacts
PCG_ROOT_LOGGER.info(
'Set max. contacts, collision={}, max_contacts={}'.format(
self.name, max_contacts))
@property
def pose(self):
return self._pose
@pose.setter
def pose(self, vec):
if isinstance(vec, Pose):
self._pose = vec
else:
assert isinstance(vec, collections.Iterable), \
'Input vector must be iterable'
assert len(vec) == 6 or len(vec) == 7, \
'Input vector must have either 6 or 7 elements'
for item in vec:
assert isinstance(item, float) or isinstance(item, int), \
'Each pose element must be either a float or an integer'
self._pose = Pose(pos=vec[0:3], rot=vec[3::])
@property
def geometry(self):
return self._geometry
def get_bounds(self):
bounds = self._geometry.get_bounds()
if bounds is not None:
# Apply collision element transformations
lower = [bounds['lower_x'], bounds['lower_y'], bounds['lower_z']]
upper = [bounds['upper_x'], bounds['upper_y'], bounds['upper_z']]
lower = self._pose.quat.rotate(lower)
upper = self._pose.quat.rotate(upper)
bounds['lower_x'] = lower[0] + self.pose.x
bounds['upper_x'] = upper[0] + self.pose.x
bounds['lower_y'] = lower[1] + self.pose.y
bounds['upper_y'] = upper[1] + self.pose.y
bounds['lower_z'] = lower[2] + self.pose.z
bounds['upper_z'] = upper[2] + self.pose.z
return bounds
def get_center(self):
center = self._geometry.get_center()
if center is not None:
# Transform center position wrt collision's pose
center = self._pose.quat.rotate(center)
center[0] += self.pose.x
center[1] += self.pose.y
center[2] += self.pose.z
return center
def set_geometry(self, name, params):
assert name in Geometry._GEO_TYPES, \
'Invalid geometry type, options={}'.format(Geometry._GEO_TYPES)
self._geometry = Geometry(name, **params)
def set_box_as_geometry(self, size=[1, 1, 1]):
self._geometry.set_box(size)
def set_sphere_as_geometry(self, radius):
self._geometry.set_sphere(radius)
def set_cylinder_as_geometry(self, length, radius):
self._geometry.set_cylinder(radius=radius, length=length)
def set_mesh_as_geometry(self, mesh, scale=[1, 1, 1], load_mesh=True):
self._geometry.set_mesh(mesh, scale=scale, load_mesh=load_mesh)
def set_plane_as_geometry(self, size, normal):
self._geometry.set_plane(size=size, normal=normal)
def enable_property(self, name):
assert name in self._include_in_sdf, 'Invalid property name'
self._include_in_sdf[name] = True
def disable_property(self, name):
assert name in self._include_in_sdf, 'Invalid property name'
self._include_in_sdf[name] = False
def using_property(self, name):
assert name in self._include_in_sdf, 'Invalid property name'
return self._include_in_sdf[name]
def set_contact_params(
self,
collide_bitmask,
collide_without_contact,
collide_without_contact_bitmask,
category_bitmask,
poissons_ratio,
elastic_modulus,
ode_parameters=None,
bullet_parameters=None):
try:
if any(
x is not None for x in [
collide_bitmask,
collide_without_contact,
collide_without_contact_bitmask,
category_bitmask,
poissons_ratio,
elastic_modulus]):
if self.sdf.surface is None:
self.sdf.surface = create_sdf_element('surface')
if self.sdf.surface.contact is None:
self.sdf.surface.contact = create_sdf_element('contact')
if collide_bitmask is not None:
self.sdf.surface.contact.collide_bitmask = collide_bitmask
if collide_without_contact is not None:
self.sdf.surface.contact.collide_without_contact = \
collide_without_contact
if collide_without_contact_bitmask is not None:
self.sdf.surface.contact.collide_without_contact_bitmask = \
collide_without_contact_bitmask
if category_bitmask is not None:
self.sdf.surface.contact.category_bitmask = category_bitmask
if poissons_ratio is not None:
self.sdf.surface.contact.poissons_ratio = poissons_ratio
if elastic_modulus is not None:
self.sdf.surface.contact.elastic_modulus = elastic_modulus
if ode_parameters is not None and isinstance(ode_parameters, dict):
if not self.set_ode_contact_params(**ode_parameters):
return False
if bullet_parameters is not None and isinstance(
bullet_parameters, dict):
if not self.set_bullet_contact_params(**bullet_parameters):
return False
if any(
x is not None for x in [
collide_bitmask,
collide_without_contact,
collide_without_contact_bitmask,
category_bitmask,
poissons_ratio,
elastic_modulus,
ode_parameters,
bullet_parameters]):
self.enable_property('contact')
return True
except AssertionError as ex:
PCG_ROOT_LOGGER.error('Error setting bounce parameters, '
'message={}'.format(ex))
return False
def get_bounce_param(self, tag):
assert tag in ['restitution_coefficient', 'threshold'], \
'Invalid bounce parameter name'
try:
param = getattr(self.sdf.surface.bounce, tag).value
except BaseException:
param = None
return param
def set_bounce_params(self, restitution_coefficient=None, threshold=None):
try:
if any(
x is not None for x in [
restitution_coefficient,
threshold]):
if self.sdf.surface is None:
self.sdf.surface = create_sdf_element('surface')
if self.sdf.surface.bounce is None:
self.sdf.surface.bounce = create_sdf_element('bounce')
if restitution_coefficient is not None:
self.sdf.surface.bounce.restitution_coefficient = \
restitution_coefficient
if threshold is not None:
self.sdf.surface.bounce.threshold = threshold
if any(
x is not None for x in [
restitution_coefficient,
threshold]):
self.enable_property('bounce')
PCG_ROOT_LOGGER.info('Set bounce parameters, SDF={}'.format(
self.sdf.surface.bounce))
return True
except AssertionError as ex:
PCG_ROOT_LOGGER.error('Error setting bounce parameters, '
'message={}'.format(ex))
return False
def set_ode_friction_params(
self,
mu=None,
mu2=None,
slip1=None,
slip2=None,
fdir1=None):
try:
if any(x is not None for x in [mu, mu2, slip1, slip2, fdir1]):
if self.sdf.surface is None:
self.sdf.surface = create_sdf_element('surface')
if self.sdf.surface.friction is None:
self.sdf.surface.friction = create_sdf_element(
'friction', 'surface')
if self.sdf.surface.friction.ode is None:
ode = create_sdf_element('ode', 'collision')
self.sdf.surface.friction.ode = ode
if mu is not None:
self.sdf.surface.friction.ode.mu = mu
if mu2 is not None:
self.sdf.surface.friction.ode.mu2 = mu2
if slip1 is not None:
self.sdf.surface.friction.ode.slip1 = slip1
if slip2 is not None:
self.sdf.surface.friction.ode.slip2 = slip2
if fdir1 is not None:
self.sdf.surface.friction.ode.fdir1 = fdir1
if any(x is not None for x in [mu, mu2, slip1, slip2, fdir1]):
self.enable_property('friction')
PCG_ROOT_LOGGER.info(
'Set ODE friction parameters, SDF={}'.format(
self.sdf.surface.friction.ode))
return True
except AssertionError as ex:
PCG_ROOT_LOGGER.error('Error setting ODE friction parameters, '
'message={}'.format(ex))
return False
def get_ode_friction_param(self, tag):
assert tag in ['mu', 'mu2', 'slip1', 'slip2', 'fdir1'], \
'Invalid ODE friction parameter name'
try:
param = getattr(self.sdf.surface.friction.ode, tag).value
except BaseException:
param = None
return param
def set_ode_contact_params(
self,
soft_cfm=None,
soft_erp=None,
kp=None,
kd=None,
max_vel=None,
min_depth=None):
try:
if any(x is not None for x in [soft_cfm, soft_erp, kp, kd, max_vel,
min_depth]):
if self.sdf.surface is None:
self.sdf.surface = create_sdf_element('surface')
if self.sdf.surface.contact is None:
self.sdf.surface.contact = create_sdf_element('contact')
if self.sdf.surface.contact.ode is None:
ode = create_sdf_element('ode', 'contact')
self.sdf.surface.contact.ode = ode
if soft_cfm is not None:
self.sdf.surface.contact.ode.soft_cfm = soft_cfm
if soft_erp is not None:
self.sdf.surface.contact.ode.soft_erp = soft_erp
if kp is not None:
self.sdf.surface.contact.ode.kp = kp
if kd is not None:
self.sdf.surface.contact.ode.kd = kd
if max_vel is not None:
self.sdf.surface.contact.ode.max_vel = max_vel
if min_depth is not None:
self.sdf.surface.contact.ode.min_depth = min_depth
if any(x is not None for x in [soft_cfm, soft_erp, kp, kd, max_vel,
min_depth]):
self.enable_property('contact')
PCG_ROOT_LOGGER.info(
'Set ODE contact parameters, SDF={}'.format(
self.sdf.surface.contact.ode))
return True
except AssertionError as ex:
PCG_ROOT_LOGGER.error('Error setting ODE contact parameters, '
'message={}'.format(ex))
return False
def get_ode_contact_param(self, tag):
assert tag in ['soft_cfm', 'soft_erp', 'kp', 'kd', 'max_vel',
'min_depth'], 'Invalid ODE contact parameter name'
try:
param = getattr(self.sdf.surface.contact.ode, tag).value
except BaseException:
param = None
return param
def set_bullet_friction_params(self, friction=None, friction2=None,
fdir1=None, rolling_friction=None):
try:
if any(
x is not None for x in [
friction,
friction2,
fdir1,
rolling_friction]):
if self.sdf.surface is None:
self.sdf.surface = create_sdf_element('surface')
if self.sdf.surface.friction is None:
self.sdf.surface.friction = create_sdf_element(
'friction', 'surface')
if self.sdf.surface.friction.bullet is None:
bullet = create_sdf_element('bullet', 'collision')
self.sdf.surface.friction.bullet = bullet
if friction is not None:
self.sdf.surface.friction.bullet.friction = friction
if friction2 is not None:
self.sdf.surface.friction.bullet.friction2 = friction2
if fdir1 is not None:
self.sdf.surface.friction.bullet.fdir1 = fdir1
if rolling_friction is not None:
self.sdf.surface.friction.bullet.rolling_friction = \
rolling_friction
if any(
x is not None for x in [
friction,
friction2,
fdir1,
rolling_friction]):
self.enable_property('friction')
PCG_ROOT_LOGGER.info(
'Set Bullet friction parameters, SDF={}'.format(
self.sdf.surface.friction.bullet))
return True
except AssertionError as ex:
PCG_ROOT_LOGGER.error('Error setting Bullet friction parameters, '
'message={}'.format(ex))
return False
def get_bullet_friction_param(self, tag):
assert tag in ['friction', 'friction2', 'rolling_friction', 'fdir1'], \
'Invalid Bullet friction parameter name'
try:
param = getattr(self.sdf.surface.friction.bullet, tag).value
except BaseException:
param = None
return param
def set_bullet_contact_params(self, | |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# tkinter example for VLC Python bindings
# Copyright (C) 2015 the VideoLAN team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
#
"""Adapted from a simple example for VLC python bindings using tkinter by
<NAME>. https://github.com/oaubert/python-vlc/blob/master/examples/tkvlc.py
"""
import argparse as ap
from classes import class_names_57
import json
import numpy as np
import os
import pathlib
import platform
from subprocess import PIPE, run
from threading import Thread, Event
import time
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askdirectory, askopenfilename
import vlc
parser = ap.ArgumentParser()
parser.add_argument('--defaultdatasourcepath', '-d',
default='C:/Users/Public/fra-gctd-project/Data_Sources/'
'ramsey_nj')
parser.add_argument('--subsamplerate', '-s', type=int, default=2)
args = parser.parse_args()
class ttkTimer(Thread):
"""a class serving same function as wxTimer...
but there may be better ways to do this
"""
def __init__(self, callback, tick):
Thread.__init__(self)
self.callback = callback
self.stopFlag = Event()
self.tick = tick
self.iters = 0
def run(self):
while not self.stopFlag.wait(self.tick):
self.iters += 1
self.callback()
def stop(self):
self.stopFlag.set()
def get(self):
return self.iters
class Player(tk.Frame):
"""The main window has to deal with events.
"""
def __init__(self, parent, title=None):
tk.Frame.__init__(self, parent)
self.parent = parent
if title == None:
title = "tk_vlc"
self.parent.title(title)
# Menu Bar
# File Menu
menubar = tk.Menu(self.parent)
self.parent.config(menu=menubar)
fileMenu = tk.Menu(menubar)
fileMenu.add_command(
label="Open File", underline=0, command=self.OnOpenFile)
fileMenu.add_command(
label="Open Directory", underline=0, command=self.OnOpenDirectory)
fileMenu.add_command(label="Exit", underline=1, command=_quit)
menubar.add_cascade(label="File", menu=fileMenu)
# The second panel holds controls
self.player = None
videolabelpanel = ttk.Frame(self.parent)
self.videolabelvar = tk.StringVar()
self.videolabel = ttk.Label(
master=videolabelpanel, textvariable=self.videolabelvar).pack(
side=tk.TOP)
videolabelpanel.pack(side=tk.TOP)
self.videopanel = ttk.Frame(self.parent)
self.canvas = tk.Canvas(master=self.videopanel).pack(
fill=tk.BOTH, expand=1)
self.videopanel.pack(fill=tk.BOTH, expand=1)
ctrlpanel = ttk.Frame(self.parent)
prev = ttk.Button(ctrlpanel, text="Prev", command=self.OnPrev)
play = ttk.Button(ctrlpanel, text="Play", command=self.OnPlay)
replay = ttk.Button(ctrlpanel, text="Replay", command=self.OnReplay)
next = ttk.Button(ctrlpanel, text="Next", command=self.OnNext)
save = ttk.Button(ctrlpanel, text="Save", command=self.OnSave)
load = ttk.Button(ctrlpanel, text="Load", command=self.OnLoad)
prev.pack(side=tk.LEFT)
play.pack(side=tk.LEFT)
replay.pack(side=tk.LEFT)
next.pack(side=tk.LEFT)
save.pack(side=tk.LEFT)
load.pack(side=tk.LEFT)
ctrlpanel.pack(side=tk.BOTTOM)
ctrlpanel2 = ttk.Frame(self.parent)
self.scale_var = tk.DoubleVar()
self.timeslider_last_val = ""
self.timeslider = tk.Scale(ctrlpanel2, variable=self.scale_var,
command=self.scale_sel,
from_=0, to=1000, orient=tk.HORIZONTAL,
length=500)
self.timeslider.pack(side=tk.BOTTOM, fill=tk.X, expand=1)
self.timeslider_last_update = time.time()
ctrlpanel2.pack(side=tk.BOTTOM, fill=tk.X)
# VLC player controls
self.Instance = vlc.Instance()
self.player = self.Instance.media_player_new()
self.directory_path = None
self.directory_child_filenames = []
self.num_clips = None
self.current_clip = None
self.class_list = class_names_57
self.default_data_source_path = args.defaultdatasourcepath
self.subsamplerate = args.subsamplerate
try:
ffmpeg_path = os.environ['FFMPEG_PATH']
except KeyError:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not os.path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
self.input_ffmpeg_command_prefix = [ffmpeg_path, '-i']
self.input_ffmpeg_command_suffix = [
'-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
'-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1']
try:
ffprobe_path = os.environ['FFPROBE_PATH']
except KeyError:
ffprobe_path = '/usr/local/bin/ffprobe'
if not os.path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
self.input_ffprobe_command_prefix = [
ffprobe_path, '-show_streams', '-print_format', 'json', '-loglevel',
'warning']
self.clip_string_len = 64 * 224 * 224 * 3
self.buffer_scale = 2
while self.buffer_scale < self.clip_string_len:
self.buffer_scale *= 2
self.labelpanel = ttk.Frame(self.parent)
# create array of checkboxes
for k, v in enumerate(self.class_list):
ttk.Checkbutton(self.labelpanel, text=v, onvalue=1, offvalue=0).grid(
column=k % 12, row=int(k / 12), sticky=tk.NW)
self.labelpanel.pack(side=tk.BOTTOM)
self.timer = ttkTimer(self.OnTimer, 1.0)
self.timer.start()
self.parent.update()
def invoke_subprocess(self, command):
completed_subprocess = run(
command, stdout=PIPE, stderr=PIPE, timeout=60)
if len(completed_subprocess.stderr) > 0:
std_err = str(completed_subprocess.stderr, encoding='utf-8')
raise Exception(std_err)
return completed_subprocess.stdout
def get_video_dimensions(self, video_file_path):
command = self.input_ffprobe_command_prefix + [video_file_path]
output = self.invoke_subprocess(command)
json_map = json.loads(str(output, encoding='utf-8'))
return int(json_map['streams'][0]['height']), \
int(json_map['streams'][0]['width']), \
int(json_map['streams'][0]['nb_frames'])
def get_video_clip(self, video_file_path):
command = self.input_ffmpeg_command_prefix + [video_file_path] \
+ self.input_ffmpeg_command_suffix
output = self.invoke_subprocess(command)
return output
def OnExit(self, evt):
"""Closes the window.
"""
self.Close()
def OnOpenFile(self):
"""Pop up a new dialow window to choose a file, then play the selected file.
"""
# if a file is already running, then stop it.
self.OnStop()
# Create a file dialog opened in the current PATH directory, where
# you can display all kind of files, having as title "Choose a file".
p = pathlib.Path(self.default_data_source_path)
video_file_path = askopenfilename(
initialdir=p, title="Choose a video clip", filetypes=(
("all files", "*.*"), ("mp4 files", "*.mp4"), ("avi files", "*.avi")))
if os.path.isfile(video_file_path):
self.directory_path = os.path.dirname(video_file_path)
video_file_name = os.path.basename(video_file_path)
self.directory_child_filenames = sorted(os.listdir(self.directory_path))
self.num_clips = len(self.directory_child_filenames)
self.current_clip = self.directory_child_filenames.index(video_file_name)
# the number of consecutive clips sharing a common label is equal to the
# subsample rate. if the user selects a clip other than the first in the
# label-sharing set, adjust the current clip
self.current_clip -= self.current_clip % self.subsamplerate
self.DisplayClip()
def OnOpenDirectory(self):
"""Pop up a new dialow window to choose a file, then play the selected file.
"""
# if a file is already running, then stop it.
self.OnStop()
# Create a file dialog opened in the current PATH directory, where
# you can display all kind of files, having as title "Choose a file".
p = pathlib.Path(self.default_data_source_path)
directory_path = askdirectory(
initialdir=p, title='Choose a video clip parent directory')
if os.path.isdir(directory_path):
self.directory_path = directory_path
self.directory_child_filenames = sorted(os.listdir(self.directory_path))
self.num_clips = len(self.directory_child_filenames)
self.current_clip = 0
self.DisplayClip()
def DisplayClip(self):
# Creation
media = self.Instance.media_new(os.path.join(
self.directory_path, self.directory_child_filenames[self.current_clip]))
self.player.set_media(media)
self.videolabelvar.set(self.directory_child_filenames[self.current_clip])
# set the window id where to render VLC's video output
if platform.system() == 'Windows':
self.player.set_hwnd(self.GetHandle())
else:
self.player.set_xwindow(self.GetHandle()) # this line messes up windows
# TODO: this should be made cross-platform
self.OnLoad()
self.OnPlay()
# TODO: add UI element to indacate the presence/absence of a label
def OnLoad(self):
"""Skip to the next video in the directory.
If a file was specified at __init__ rather than a directory, do nothing.
"""
# check if there is a file to play, otherwise open a
# tk.FileDialog to select a file
if not self.player.get_media():
self.OnOpenDirectory()
else:
# set the checkbutton state values equal to those in the label associated
# with the currently visible video.
try:
save_path = os.path.join(
os.path.dirname(self.directory_path), 'labels')
label_array = np.load(os.path.join(save_path, os.path.splitext(
self.directory_child_filenames[self.current_clip])[0] + '.npy'))
print('loading label: {}'.format(label_array))
checkbuttons = list(self.labelpanel.children.values())
for i in range(len(checkbuttons)):
checkbuttons[i].state(
('selected', '!alternate') if label_array[i] == 1
else ('alternate', '!selected'))
except:
pass
def OnSave(self):
"""Skip to the next video in the directory.
If a file was specified at __init__ rather than a directory, do nothing.
"""
# check if there is a file to play, otherwise open a
# tk.FileDialog to select a file
if not self.player.get_media():
self.OnOpenDirectory()
else:
# save the selected labels, then advance the clip
label_array = np.array(
[checkbutton.instate(['selected']) for checkbutton
in self.labelpanel.children.values()], dtype=np.uint8)
print('saving label: {}'.format(label_array))
save_path = os.path.join(os.path.dirname(self.directory_path), 'labels')
if not os.path.exists(save_path):
os.makedirs(save_path)
# save one file for each consecutive clip sharing a common label
for current_clip in range(
self.current_clip, self.current_clip + self.subsamplerate):
np.save(os.path.join(save_path, os.path.splitext(
self.directory_child_filenames[current_clip])[0] + '.npy'),
label_array)
self.OnNext()
def OnNext(self):
"""Skip to the next video in the directory.
If a file was specified at __init__ rather than a directory, do nothing.
"""
# check if there is a file to play, otherwise open a
# tk.FileDialog to select a file
if not self.player.get_media():
self.OnOpenDirectory()
else:
if self.current_clip < self.num_clips - 1:
self.current_clip += self.subsamplerate
self.DisplayClip()
self.OnPlay()
def OnPrev(self):
"""Skip to the next video in the directory.
If a file was specified at __init__ rather than a directory, do nothing.
"""
# check if there is a file to play, otherwise open a
# tk.FileDialog to select a file
if not self.player.get_media():
self.OnOpenDirectory()
else:
if self.current_clip > 0:
self.current_clip -= self.subsamplerate
self.DisplayClip()
self.OnPlay()
def OnPlay(self):
"""Toggle the status to Play/Pause.
If no file is loaded, open the dialog window.
"""
# check if there is a file to play, otherwise open a
# tk.FileDialog to select a file
if not self.player.get_media():
self.OnOpenDirectory()
else:
# Try to launch the media, if this fails display an error message
if self.player.play() == -1:
self.errorDialog("Unable to play.")
def GetHandle(self):
return self.videopanel.winfo_id()
# def OnPause(self, evt):
def OnPause(self):
"""Pause the player.
"""
self.player.pause()
def OnReplay(self):
"""Stop the player.
"""
self.player.stop()
# reset the time slider
self.timeslider.set(0)
self.player.play()
def OnStop(self):
"""Stop | |
# -*- coding: utf-8 -*-
# This script gathers all .i18n files and aggregates them as a pair of .h/.cpp
# file.
# In practice, it enforces a NFKD normalization. Because Epsilon does not
# properly draw upper case letters with accents, we remove them here.
# If compression is activated, texts are grouped by languages, and compressed
# with LZ4 algorithm to save memory. When another language is selected, it is
# uncompressed into a huge buffer, where the messages texts are retrieved.
# It works with Python 3 only
import argparse
import csv
import io
import re
import sys
import unicodedata
import lz4.frame
parser = argparse.ArgumentParser(description="Process some i18n files.")
parser.add_argument('--header', help='the .h file to generate')
parser.add_argument('--implementation', help='the .cpp file to generate')
parser.add_argument('--locales', nargs='+', help='locale to actually generate')
parser.add_argument('--countries', nargs='+', help='countries to actually generate')
parser.add_argument('--codepoints', help='the code_points.h file')
parser.add_argument('--countrypreferences', help='the country_preferences.csv file')
parser.add_argument('--languagepreferences', help='the language_preferences.csv file')
parser.add_argument('--files', nargs='+', help='an i18n file')
parser.add_argument('--compress', action='store_true', help='if the texts should be compressed')\
args = parser.parse_args()
message_length_limit_for_type = {
"toolbox" : 35, # Toolbox maximal available space with small font
"default" : 45 # Ion::Display::Width / KDFont::SmallFont->glyphSize().width()
}
def has_glyph(glyph):
return glyph in codepoints
def source_definition(i18n_string):
s = unicodedata.normalize("NFKD", i18n_string)
result = u""
if not(args.compress):
result += u"\""
i = 0
length = len(s)
checkForCombining = False
while i < length:
copyCodePoint = True
if checkForCombining:
# We remove combining code points, which are between 0x300 and 0x36F
# (for the non-extended set)
copyCodePoint = (ord(s[i]) < 0x300) or (ord(s[i]) > 0x36F)
checkForCombining = False
if args.compress and s[i] == '\\' and i+1 < length:
# When compressing, we need to use combined chars so that the
# correct chars will be uncompressed.
combinedChar = ''
if s[i+1] == '\\':
# Combine "\\" into '\'
combinedChar = '\\'
elif s[i+1] == 'n':
# Combine "\n" into '\n'
combinedChar = '\n'
elif s[i+1] == '"':
# Combine "\"" into '"'
combinedChar = '"'
elif (i+3 < length) and s[i+1] == 'x' and s[i+2] == '1' and s[i+3] == '1':
# Combine "\x11" into '\x11'
combinedChar = '\x11'
i+=2
else :
sys.stderr.write(" \\" + str(s[i+1]) + " is not handled with compression. Exiting !\n")
sys.exit(-1)
if combinedChar != '':
result = result + combinedChar
i+=2
continue
if copyCodePoint:
# Remove the uppercase characters with combining chars
checkForCombining = s[i].isupper()
result = result + s[i]
if not has_glyph(s[i]):
sys.stderr.write(s[i] + " (" + str(hex(ord(s[i]))) + ") is not a character present in " + args.codepoints + " . Exiting !\n")
sys.exit(-1)
i = i+1
if not(args.compress):
result += u"\""
return result.encode("utf-8")
def is_commented_line(line):
match_comment = re.match(r"^#(.*)$", line)
return match_comment
def split_line(line):
match = re.match(r"^(\w+)\s*=\s*\"(.*)\"$", line)
if not match:
sys.stderr.write("Error: Invalid line \"" + line + "\"\n")
sys.exit(-1)
return (match.group(1), source_definition(match.group(2)))
def locale_from_filename(filename):
return re.match(r".*\.([a-z]+)\.i18n", filename).group(1)
def type_from_filename(filename):
return re.match(r".*\/([a-z_]+)\.[a-z]+\.i18n", filename).group(1)
def message_exceeds_length_limit(definition, type):
if not(type in message_length_limit_for_type):
type = "default"
length_limit = message_length_limit_for_type[type]
# Handle multi-line messages
if args.compress:
iterator = re.split(r"\n", definition.decode('utf-8'))
else:
iterator = re.split(r"\\n", definition.decode('utf-8')[1:-1])
for definition_line in iterator:
# Ignore combining characters
if (len([c for c in definition_line if not unicodedata.combining(c)]) > length_limit):
return True
return False
def check_redundancy(messages, data, locales):
redundant_names = set()
for name in messages:
redundancy = True
for i in range(1, len(locales)):
redundancy = redundancy and data[locales[i]][name] == data[locales[i-1]][name]
if redundancy:
redundant_names.add(name)
if (len(redundant_names) > 0):
sys.stderr.write("Some localized messages are redundant and can be made universal :\n\t" + "\n\t".join(sorted(redundant_names)) + "\n")
sys.exit(-1)
def parse_files(files):
data = {}
messages = set()
universal_messages = set()
for path in files:
locale = locale_from_filename(path)
type = type_from_filename(path)
if locale not in data:
data[locale] = {}
with io.open(path, "r", encoding='utf-8') as file:
for line in file:
if is_commented_line(line):
continue
name,definition = split_line(line)
if locale == "universal":
if name in messages:
sys.stderr.write("Error: Redefinition of message \"" + name + "\" as universal\n")
sys.exit(-1)
if name in universal_messages:
sys.stderr.write("Error: Redefinition of universal message \"" + name + "\"\n")
sys.exit(-1)
universal_messages.add(name)
else:
messages.add(name)
if message_exceeds_length_limit(definition, type):
sys.stderr.write("Error: Message exceeds length limits for " + type + " : " + definition.decode('utf-8') + " (" + name + ")\n")
sys.exit(-1)
data[locale][name] = definition
check_redundancy(messages, data, args.locales)
return {"messages": sorted(messages), "universal_messages": sorted(universal_messages), "data": data}
def parse_codepoints(file):
codepoints = []
with io.open(file, "r", encoding='utf-8') as file:
IsCodePoint = False
for line in file:
if "};" in line:
IsCodePoint = False
if IsCodePoint:
start = line.find('0x')
stop = line.find(',')
if not (start == -1 or stop == -1):
hexstring = line[start:stop]
value = int(hexstring, 16)
char = chr(value)
codepoints.append(char)
if "CodePoints[]" in line:
IsCodePoint = True
return codepoints
codepoints = parse_codepoints(args.codepoints)
def parse_csv_with_header(file):
res = []
with io.open(file, 'r', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
res.append(row)
return (res[0], res[1:])
def parse_country_preferences(file):
countryPreferences = {}
header, records = parse_csv_with_header(file)
for record in records:
countryPreferences[record[0]] = [header[i] + "::" + record[i] for i in range(1, len(record))]
return countryPreferences
countryPreferences = parse_country_preferences(args.countrypreferences)
def parse_language_preferences(file):
languagePreferences = {}
header, records = parse_csv_with_header(file)
for record in records:
languagePreferences[record[0]] = (header[1], record[1])
return languagePreferences
languagePreferences = parse_language_preferences(args.languagepreferences)
def print_block_from_list(target, header, data, beautify=lambda arg: arg, prefix=" ", footer="};\n\n", elementPerLine=1):
target.write(header)
lineIndex = 0
for i in range(len(data)):
if (i%elementPerLine == 0):
target.write(prefix)
target.write(beautify(data[i]) + ",")
if ((i+1)%elementPerLine == 0):
target.write("\n")
else:
target.write(" ")
target.write(footer)
def print_header(data, path, locales, countries):
f = open(path, "w")
f.write("#ifndef APPS_I18N_H\n")
f.write("#define APPS_I18N_H\n\n")
f.write("// This file is auto-generated by i18n.py\n\n")
f.write("#include <escher/i18n.h>\n")
f.write("#include <apps/country_preferences.h>\n\n")
f.write("namespace I18n {\n\n")
f.write("constexpr static int NumberOfLanguages = %d;\n\n" % len(locales))
f.write("constexpr static int NumberOfCountries = %d;\n\n" % len(countries))
# Messages enumeration
print_block_from_list(f,
"enum class Message : uint16_t {\n",
["Default = 0"] + data["universal_messages"],
footer="\n")
if not(args.compress):
f.write(" LocalizedMessageMarker,\n")
print_block_from_list(f,
"\n",
data["messages"])
# Languages enumeration
print_block_from_list(f,
"enum class Language : uint8_t {\n",
locales,
lambda arg: arg.upper())
# Language names
print_block_from_list(f,
"constexpr const Message LanguageNames[NumberOfLanguages] = {\n",
locales,
lambda arg: arg.upper(),
" Message::Language")
# Countries enumeration
print_block_from_list(f,
"enum class Country : uint8_t {\n",
countries,
lambda arg: arg.upper())
defaultCountry = countries[-1]
# Country names
print_block_from_list(f,
"constexpr const Message CountryNames[NumberOfCountries] = {\n",
countries,
lambda arg: arg.upper(),
" Message::Country")
# Language preferences
f.write("constexpr static Country DefaultCountryForLanguage[NumberOfLanguages] = {\n")
for language in locales:
key = language if (language in languagePreferences) else '??'
header, country = languagePreferences[key]
line = " " + header + "::" + (country if country in countries else defaultCountry)
f.write(line + ",\n")
f.write("};\n\n")
# Country preferences
f.write("constexpr static CountryPreferences CountryPreferencesArray[] = {\n")
for country in countries:
key = country if (country in countryPreferences) else defaultCountry
line = " CountryPreferences("
for param in countryPreferences[key]:
line += param + ", "
f.write(line[:-2] + "),\n")
f.write("};\n\n")
# Language ISO639-1 codes
f.write("constexpr const char * LanguageISO6391Codes[NumberOfLanguages] = {\n");
for locale in locales:
f.write(" \"" + locale + "\",\n")
f.write("};\n\n")
f.write("}\n\n")
f.write("#endif\n")
f.close()
def print_implementation(data, path, locales):
f = open(path, "w")
f.write("#include \"i18n.h\"\n")
f.write("#include <apps/global_preferences.h>\n")
f.write("#include <assert.h>\n\n")
f.write("namespace I18n {\n\n")
# Write the default message
f.write("constexpr static char universalDefault[] = {0};\n")
# Write the universal messages
for message in data["universal_messages"]:
f.write("constexpr static char universal" + message + "[] = ")
f = open(path, "ab") # Re-open the file as binary to output raw UTF-8 bytes
f.write(data["data"]["universal"][message])
f = open(path, "a") # Re-open the file as text
f.write(";\n")
f.write("\n")
print_block_from_list(f,
"constexpr static const char * universalMessages[%d] = {\n universalDefault,\n" % (len(data["universal_messages"])+1),
data["universal_messages"],
prefix=" universal")
# Write the localized messages
for message in data["messages"]:
for locale in locales:
if not locale in data["data"]:
sys.stderr.write("Error: Undefined locale \"" + locale + "\"\n")
sys.exit(-1)
if not message in data["data"][locale]:
sys.stderr.write("Error: Undefined key \"" + message + "\" for locale \"" + locale + "\"\n")
sys.exit(-1)
f.write("constexpr static char " + locale + message + "[] = ")
f = open(path, "ab") # Re-open the file as binary to output raw UTF-8 bytes
f.write(data["data"][locale][message])
f = open(path, "a") # Re-open the file as text
f.write(";\n")
f.write("\n")
f.write("constexpr static const char * messages[%d][%d] = {\n" % (len(data["messages"]), len(locales)))
for message in data["messages"]:
f.write(" {")
for locale in locales:
f.write(locale + message + ", ")
f.write("},\n")
f.write("};\n\n")
# Write the translate method
code = """
const char * translate(Message m) {
assert(m != Message::LocalizedMessageMarker);
int localizedMessageOffset | |
<reponame>mariusgheorghies/python
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoCertManagerV1alpha2CertificateSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'common_name': 'str',
'dns_names': 'list[str]',
'duration': 'str',
'email_sa_ns': 'list[str]',
'encode_usages_in_request': 'bool',
'ip_addresses': 'list[str]',
'is_ca': 'bool',
'issuer_ref': 'IoCertManagerV1CertificateSpecIssuerRef',
'key_algorithm': 'str',
'key_encoding': 'str',
'key_size': 'int',
'keystores': 'IoCertManagerV1alpha2CertificateSpecKeystores',
'organization': 'list[str]',
'private_key': 'IoCertManagerV1alpha2CertificateSpecPrivateKey',
'renew_before': 'str',
'revision_history_limit': 'int',
'secret_name': 'str',
'secret_template': 'IoCertManagerV1CertificateSpecSecretTemplate',
'subject': 'IoCertManagerV1alpha2CertificateSpecSubject',
'uri_sa_ns': 'list[str]',
'usages': 'list[str]'
}
attribute_map = {
'common_name': 'commonName',
'dns_names': 'dnsNames',
'duration': 'duration',
'email_sa_ns': 'emailSANs',
'encode_usages_in_request': 'encodeUsagesInRequest',
'ip_addresses': 'ipAddresses',
'is_ca': 'isCA',
'issuer_ref': 'issuerRef',
'key_algorithm': 'keyAlgorithm',
'key_encoding': 'keyEncoding',
'key_size': 'keySize',
'keystores': 'keystores',
'organization': 'organization',
'private_key': 'privateKey',
'renew_before': 'renewBefore',
'revision_history_limit': 'revisionHistoryLimit',
'secret_name': 'secretName',
'secret_template': 'secretTemplate',
'subject': 'subject',
'uri_sa_ns': 'uriSANs',
'usages': 'usages'
}
def __init__(self, common_name=None, dns_names=None, duration=None, email_sa_ns=None, encode_usages_in_request=None, ip_addresses=None, is_ca=None, issuer_ref=None, key_algorithm=None, key_encoding=None, key_size=None, keystores=None, organization=None, private_key=None, renew_before=None, revision_history_limit=None, secret_name=None, secret_template=None, subject=None, uri_sa_ns=None, usages=None, local_vars_configuration=None): # noqa: E501
"""IoCertManagerV1alpha2CertificateSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._common_name = None
self._dns_names = None
self._duration = None
self._email_sa_ns = None
self._encode_usages_in_request = None
self._ip_addresses = None
self._is_ca = None
self._issuer_ref = None
self._key_algorithm = None
self._key_encoding = None
self._key_size = None
self._keystores = None
self._organization = None
self._private_key = None
self._renew_before = None
self._revision_history_limit = None
self._secret_name = None
self._secret_template = None
self._subject = None
self._uri_sa_ns = None
self._usages = None
self.discriminator = None
if common_name is not None:
self.common_name = common_name
if dns_names is not None:
self.dns_names = dns_names
if duration is not None:
self.duration = duration
if email_sa_ns is not None:
self.email_sa_ns = email_sa_ns
if encode_usages_in_request is not None:
self.encode_usages_in_request = encode_usages_in_request
if ip_addresses is not None:
self.ip_addresses = ip_addresses
if is_ca is not None:
self.is_ca = is_ca
self.issuer_ref = issuer_ref
if key_algorithm is not None:
self.key_algorithm = key_algorithm
if key_encoding is not None:
self.key_encoding = key_encoding
if key_size is not None:
self.key_size = key_size
if keystores is not None:
self.keystores = keystores
if organization is not None:
self.organization = organization
if private_key is not None:
self.private_key = private_key
if renew_before is not None:
self.renew_before = renew_before
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.secret_name = secret_name
if secret_template is not None:
self.secret_template = secret_template
if subject is not None:
self.subject = subject
if uri_sa_ns is not None:
self.uri_sa_ns = uri_sa_ns
if usages is not None:
self.usages = usages
@property
def common_name(self):
"""Gets the common_name of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4 # noqa: E501
:return: The common_name of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: str
"""
return self._common_name
@common_name.setter
def common_name(self, common_name):
"""Sets the common_name of this IoCertManagerV1alpha2CertificateSpec.
CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4 # noqa: E501
:param common_name: The common_name of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: str
"""
self._common_name = common_name
@property
def dns_names(self):
"""Gets the dns_names of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
DNSNames is a list of DNS subjectAltNames to be set on the Certificate. # noqa: E501
:return: The dns_names of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: list[str]
"""
return self._dns_names
@dns_names.setter
def dns_names(self, dns_names):
"""Sets the dns_names of this IoCertManagerV1alpha2CertificateSpec.
DNSNames is a list of DNS subjectAltNames to be set on the Certificate. # noqa: E501
:param dns_names: The dns_names of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: list[str]
"""
self._dns_names = dns_names
@property
def duration(self):
"""Gets the duration of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration # noqa: E501
:return: The duration of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: str
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this IoCertManagerV1alpha2CertificateSpec.
The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration # noqa: E501
:param duration: The duration of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: str
"""
self._duration = duration
@property
def email_sa_ns(self):
"""Gets the email_sa_ns of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
EmailSANs is a list of email subjectAltNames to be set on the Certificate. # noqa: E501
:return: The email_sa_ns of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: list[str]
"""
return self._email_sa_ns
@email_sa_ns.setter
def email_sa_ns(self, email_sa_ns):
"""Sets the email_sa_ns of this IoCertManagerV1alpha2CertificateSpec.
EmailSANs is a list of email subjectAltNames to be set on the Certificate. # noqa: E501
:param email_sa_ns: The email_sa_ns of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: list[str]
"""
self._email_sa_ns = email_sa_ns
@property
def encode_usages_in_request(self):
"""Gets the encode_usages_in_request of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest # noqa: E501
:return: The encode_usages_in_request of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: bool
"""
return self._encode_usages_in_request
@encode_usages_in_request.setter
def encode_usages_in_request(self, encode_usages_in_request):
"""Sets the encode_usages_in_request of this IoCertManagerV1alpha2CertificateSpec.
EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest # noqa: E501
:param encode_usages_in_request: The encode_usages_in_request of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: bool
"""
self._encode_usages_in_request = encode_usages_in_request
@property
def ip_addresses(self):
"""Gets the ip_addresses of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. # noqa: E501
:return: The ip_addresses of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: list[str]
"""
return self._ip_addresses
@ip_addresses.setter
def ip_addresses(self, ip_addresses):
"""Sets the ip_addresses of this IoCertManagerV1alpha2CertificateSpec.
IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. # noqa: E501
:param ip_addresses: The ip_addresses of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: list[str]
"""
self._ip_addresses = ip_addresses
@property
def is_ca(self):
"""Gets the is_ca of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. # noqa: E501
:return: The is_ca of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: bool
"""
return self._is_ca
@is_ca.setter
def is_ca(self, is_ca):
"""Sets the is_ca of this IoCertManagerV1alpha2CertificateSpec.
IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. # noqa: E501
:param is_ca: The is_ca of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: bool
"""
self._is_ca = is_ca
@property
def issuer_ref(self):
"""Gets the issuer_ref of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:return: The issuer_ref of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:rtype: IoCertManagerV1CertificateSpecIssuerRef
"""
return self._issuer_ref
@issuer_ref.setter
def issuer_ref(self, issuer_ref):
"""Sets the issuer_ref of this IoCertManagerV1alpha2CertificateSpec.
:param issuer_ref: The issuer_ref of this IoCertManagerV1alpha2CertificateSpec. # noqa: E501
:type: IoCertManagerV1CertificateSpecIssuerRef
"""
if self.local_vars_configuration.client_side_validation and issuer_ref is None: # noqa: E501
raise ValueError("Invalid value for `issuer_ref`, must not be `None`") # noqa: E501
self._issuer_ref | |
<reponame>dadosgovbr/ckan<filename>ckan/tests/functional/test_authz.py
from time import time
from copy import copy
import random
import sqlalchemy as sa
import ckan.model as model
from ckan import plugins
from ckan.tests import TestController, url_for, setup_test_search_index
from ckan.lib.base import *
import ckan.lib.search as search
from ckan.lib.create_test_data import CreateTestData
import ckan.authz as authz
from ckan.lib.helpers import json, truncate
class AuthzTestBase(object):
INTERFACES = ['wui', 'rest']
DEFAULT_ENTITY_TYPES = ['dataset', 'group']
ENTITY_CLASS_MAP = {'dataset': model.Package,
'group': model.Group,
'package_relationship': model.PackageRelationship}
@classmethod
def setup_class(self):
setup_test_search_index()
self._create_test_data()
model.Session.remove()
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
search.clear()
def _test_can(self, action, users, entity_names,
interfaces=INTERFACES,
entity_types=DEFAULT_ENTITY_TYPES):
self._test_expectation(action, users, entity_names,
interfaces=interfaces,
entity_types=entity_types,
expect_it_works=True)
def _test_cant(self, action, users, entity_names,
interfaces=INTERFACES,
entity_types=DEFAULT_ENTITY_TYPES):
self._test_expectation(action, users, entity_names,
interfaces=interfaces,
entity_types=entity_types,
expect_it_works=False)
def _test_expectation(self, action, users, entity_names,
interfaces, entity_types,
expect_it_works):
if isinstance(users, model.User):
users = [users]
if isinstance(entity_names, basestring):
entity_names = [entity_names]
if action == 'create' and 'package_relationship' not in entity_types:
entity_names = [str(random.random()*100000000).replace('.', '-')]
if action in ('delete', 'purge'):
entity_names = ['filled in later']
for user in users:
for entity_name in entity_names:
for interface in interfaces:
test_func = {'rest':self._test_via_api,
'wui':self._test_via_wui}[interface]
for entity_type in entity_types:
if action in ('delete', 'purge'):
if entity_type != 'package_relationship':
entity_name = '%s_%s_%s' % (action, user.name, interface)
entity_class = self.ENTITY_CLASS_MAP[entity_type]
else:
raise NotImplementedError
entity = entity_class.by_name(entity_name)
assert entity, 'Have not created %s to %s: %r' %\
(entity_type, action, entity_name)
entity_name = str(entity.id)
ok, diagnostics = test_func(action, user, entity_name, entity_type)
if ok != expect_it_works:
msg = 'Should be able to %s %s %r as user %r on %r interface. Diagnostics: %r' \
if expect_it_works else \
'Should NOT be able to %s %s %r as user %r on %r interface. Diagnostics: %r'
raise Exception(msg % (action, entity_type, entity_name, user.name, interface, truncate(repr(diagnostics), 1000)))
def _test_via_wui(self, action, user, entity_name, entity='dataset'):
# Test action on WUI
str_required_in_response = entity_name
controller_name = 'package' if entity == 'dataset' else entity
if action in (model.Action.EDIT, model.Action.READ):
offset = url_for(controller=controller_name, action=action, id=unicode(entity_name))
elif action == 'search':
offset = '/%s/search?q=%s' % (entity, entity_name)
str_required_in_response = '%s"' % entity_name
elif action == 'list':
if entity == 'group':
offset = '/group'
else:
offset = '/%s/list' % entity
elif action == 'create':
offset = '/%s/new' % entity
str_required_in_response = 'Add'
elif action == 'delete':
offset = url_for(controller=controller_name, action=model.Action.EDIT, id=unicode(entity_name))
# this is ludicrously sensitive (we have to improve html testing!)
# str_required_in_response = 'state'
str_required_in_response = '<select id="state"'
else:
raise NotImplementedError
res = self.app.get(offset, extra_environ={'REMOTE_USER': user.name.encode('utf8')}, expect_errors=True)
tests = {}
tests['str_required (%s)' % str_required_in_response] = bool(str_required_in_response in res)
r = res.body
r = r.replace('form_errors', '')
tests['error string'] = bool('error' not in r)
tests['status'] = bool(res.status in (200, 201))
tests['0 packages found'] = bool(u'<strong>0</strong> packages found' not in res)
is_ok = False not in tests.values()
# clear flash messages - these might make the next page request
# look like it has an error
self.app.reset()
return is_ok, [offset, user.name, tests, res.status, res.body]
def _test_via_api(self, action, user, entity_name, entity_type='dataset'):
# Test action on REST
str_required_in_response = entity_name
if action == model.Action.EDIT:
offset = '/api/rest/%s/%s' % (entity_type, entity_name)
postparams = '%s=1' % json.dumps({'title':u'newtitle'}, encoding='utf8')
func = self.app.post
elif action == model.Action.READ:
offset = '/api/rest/%s/%s' % (entity_type, entity_name)
postparams = None
func = self.app.get
elif action == 'search':
offset = '/api/search/%s?q=%s' % (entity_type, entity_name)
postparams = None
func = self.app.get
elif action == 'list':
offset = '/api/rest/%s' % (entity_type)
postparams = None
func = self.app.get
elif action == 'create':
offset = '/api/rest/%s' % (entity_type)
postparams = '%s=1' % json.dumps({'name': unicode(entity_name),
'title': u'newtitle'},
encoding='utf8')
func = self.app.post
str_required_in_response = u'newtitle'
elif action == 'delete':
offset = '/api/rest/%s/%s' % (entity_type, entity_name)
postparams = '%s=1' % json.dumps({'name': unicode(entity_name),
'state': 'deleted'},
encoding='utf8')
func = self.app.post
str_required_in_response = '"state": "deleted"'
assert 0, 'Deleting in the API does not currently work - See #1053'
elif action == 'purge':
offset = '/api/rest/%s/%s' % (entity_type, entity_name)
func = self.app.delete
postparams = {}
str_required_in_response = ''
else:
raise NotImplementedError, action
if entity_type == 'package_relationship':
if action == 'edit':
func = self.app.put
if isinstance(entity_name, basestring):
offset = '/api/rest/dataset/%s/relationships' % entity_name
else:
assert isinstance(entity_name, tuple)
if len(entity_name) == 1:
offset = '/api/rest/dataset/%s/relationships' % entity_name[0]
else:
if len(entity_name) == 2:
entity_properties = {'entity1': entity_name[0],
'entity2': entity_name[1],
'type': 'relationships'}
elif len(entity_name) == 3:
entity_properties = {'entity1': entity_name[0],
'entity2': entity_name[2],
'type': entity_name[1]}
else:
raise NotImplementedError
if action in 'list':
offset = '/api/rest/dataset/%(entity1)s/relationships/%(entity2)s' % entity_properties
else:
offset = '/api/rest/dataset/%(entity1)s/%(type)s/%(entity2)s' % entity_properties
str_required_in_response = '"object": "%(entity2)s", "type": "%(type)s", "subject": "%(entity1)s"' % entity_properties
if user.name == 'visitor':
environ = {}
else:
environ = {'Authorization' : str(user.apikey)}
res = func(offset, params=postparams,
extra_environ=environ,
expect_errors=True)
tests = {}
tests['str_required (%s)' % str_required_in_response] = bool(str_required_in_response in res)
tests['error string'] = bool('error' not in res)
tests['status'] = bool(res.status in (200, 201))
tests['0 packages found'] = bool(u'0 packages found' not in res)
is_ok = False not in tests.values()
return is_ok, [offset, postparams, user.name, tests, res.status, res.body]
class TestUsage(TestController, AuthzTestBase):
'''Use case: role defaults (e.g. like ckan.net operates)
* reader can read only
* editor can edit most properties of a package
'''
@classmethod
def _create_test_data(cls):
# Entities (Packages/Groups) are named after what roles (permissions)
# are assigned to them:
# First letter is the role for logged in users
# Second letter is the role for visitors
# Where:
# r = Allowed to read
# w = Allowed to read/write
# x = Not allowed either
model.repo.init_db()
rev = model.repo.new_revision()
cls.roles = ('xx', 'rx', 'wx', 'rr', 'wr', 'ww', 'deleted')
tag = model.Tag("test")
model.Session.add(tag)
for mode in cls.roles:
pkg = model.Package(name=unicode(mode))
model.Session.add(pkg)
pkg.add_tag(tag)
model.Session.add(model.Group(name=unicode(mode)))
entities_to_test_deleting = []
for interface in cls.INTERFACES:
for action in ('purge', 'delete'):
for user in ('visitor', 'user', 'admin',
'mrloggedin', 'testsysadmin',
'pkggroupadmin'):
for entity_type in cls.DEFAULT_ENTITY_TYPES:
entity_class = cls.ENTITY_CLASS_MAP[entity_type]
entity_name = u'%s_%s_%s' % (action, user, interface)
model.Session.add(entity_class(name=entity_name))
entities_to_test_deleting.append((entity_name, entity_class))
model.Session.add(model.User(name=u'testsysadmin'))
model.Session.add(model.User(name=u'pkggroupadmin'))
model.Session.add(model.User(name=u'pkgeditor'))
model.Session.add(model.User(name=u'pkgreader'))
model.Session.add(model.User(name=u'mrloggedin'))
model.Session.add(model.User(name=u'pkgadminfriend'))
model.Session.add(model.User(name=u'groupadmin'))
model.Session.add(model.User(name=u'groupeditor'))
model.Session.add(model.User(name=u'groupreader'))
visitor_name = '172.16.17.32'
model.repo.commit_and_remove()
rev = model.repo.new_revision()
model.Package.by_name(u'ww').add_relationship(u'depends_on', model.Package.by_name(u'xx'))
model.Package.by_name(u'ww').add_relationship(u'links_to', model.Package.by_name(u'wr'))
model.repo.commit_and_remove()
testsysadmin = model.User.by_name(u'testsysadmin')
pkggroupadmin = model.User.by_name(u'pkggroupadmin')
pkgeditor = model.User.by_name(u'pkgeditor')
pkgreader = model.User.by_name(u'pkgreader')
groupadmin = model.User.by_name(u'groupadmin')
groupeditor = model.User.by_name(u'groupeditor')
groupreader = model.User.by_name(u'groupreader')
mrloggedin = model.User.by_name(name=u'mrloggedin')
visitor = model.User.by_name(name=model.PSEUDO_USER__VISITOR)
for mode in cls.roles:
pkg = model.Package.by_name(unicode(mode))
model.add_user_to_role(pkggroupadmin, model.Role.ADMIN, pkg)
model.add_user_to_role(pkgeditor, model.Role.EDITOR, pkg)
model.add_user_to_role(pkgreader, model.Role.READER, pkg)
group = model.Group.by_name(unicode(mode))
group.packages = model.Session.query(model.Package).all()
model.add_user_to_role(pkggroupadmin, model.Role.ADMIN, group)
model.add_user_to_role(groupadmin, model.Role.ADMIN, group)
model.add_user_to_role(groupeditor, model.Role.EDITOR, group)
model.add_user_to_role(groupreader, model.Role.READER, group)
if mode == u'deleted':
rev = model.repo.new_revision()
pkg = model.Package.by_name(unicode(mode))
pkg.state = model.State.DELETED
group = model.Package.by_name(unicode(mode))
group.state = model.State.DELETED
model.repo.commit_and_remove()
else:
if mode[0] == u'r':
model.add_user_to_role(mrloggedin, model.Role.READER, pkg)
model.add_user_to_role(mrloggedin, model.Role.READER, group)
if mode[0] == u'w':
model.add_user_to_role(mrloggedin, model.Role.EDITOR, pkg)
model.add_user_to_role(mrloggedin, model.Role.EDITOR, group)
if mode[1] == u'r':
model.add_user_to_role(visitor, model.Role.READER, pkg)
model.add_user_to_role(visitor, model.Role.READER, group)
if mode[1] == u'w':
model.add_user_to_role(visitor, model.Role.EDITOR, pkg)
model.add_user_to_role(visitor, model.Role.EDITOR, group)
model.add_user_to_role(testsysadmin, model.Role.ADMIN, model.System())
for entity_name, entity_class in entities_to_test_deleting:
entity = entity_class.by_name(entity_name)
model.add_user_to_role(visitor, model.Role.EDITOR, entity)
model.add_user_to_role(mrloggedin, model.Role.EDITOR, entity)
model.add_user_to_role(visitor, model.Role.READER, entity)
model.add_user_to_role(mrloggedin, model.Role.READER, entity)
model.add_user_to_role(pkggroupadmin, model.Role.ADMIN, entity)
model.repo.commit_and_remove()
assert model.Package.by_name(u'deleted').state == model.State.DELETED
cls.testsysadmin = model.User.by_name(u'testsysadmin')
cls.pkggroupadmin = model.User.by_name(u'pkggroupadmin')
cls.pkgadminfriend = model.User.by_name(u'pkgadminfriend')
cls.pkgeditor = model.User.by_name(u'pkgeditor')
cls.pkgreader = model.User.by_name(u'pkgreader')
cls.groupadmin = model.User.by_name(u'groupadmin')
cls.groupeditor = model.User.by_name(u'groupeditor')
cls.groupreader = model.User.by_name(u'groupreader')
cls.mrloggedin = model.User.by_name(name=u'mrloggedin')
cls.visitor = model.User.by_name(name=model.PSEUDO_USER__VISITOR)
# Tests numbered by the use case
def test_14_visitor_reads_stopped(self):
self._test_cant('read', self.visitor, ['xx', 'rx', 'wx'])
def test_01_visitor_reads(self):
self._test_can('read', self.visitor, ['rr', 'wr', 'ww'])
def test_12_visitor_edits_stopped(self):
self._test_cant('edit', self.visitor, ['xx', 'rx', 'wx', 'rr', 'wr', 'ww'], interfaces=['rest'])
self._test_cant('edit', self.visitor, ['xx', 'rx', 'wx', 'rr', 'wr'], interfaces=['wui'])
def test_02_visitor_edits(self):
self._test_can('edit', self.visitor, ['ww'], interfaces=['wui'])
self._test_can('edit', self.visitor, [], interfaces=['rest'])
def test_visitor_creates(self):
self._test_can('create', self.visitor, [], interfaces=['wui'], entity_types=['dataset'])
self._test_cant('create', self.visitor, [], interfaces=['wui'], entity_types=['group']) # need to be sysadmin
self._test_cant('create', self.visitor, [], interfaces=['rest'])
def test_15_user_reads_stopped(self):
self._test_cant('read', self.mrloggedin, ['xx'])
def test_03_user_reads(self):
self._test_can('read', self.mrloggedin, ['rx', 'wx', 'rr', 'wr', 'ww'])
def test_13_user_edits_stopped(self):
self._test_cant('edit', self.mrloggedin, ['xx', 'rx', 'rr'])
def test_04_user_edits(self):
self._test_can('edit', self.mrloggedin, ['wx', 'wr', 'ww'])
def test_user_creates(self):
self._test_can('create', self.mrloggedin, [])
def test_list(self):
# NB there is no listing of package in wui interface any more
# NB under the new model all active packages are always visible in listings by default
self._test_can('list', [self.testsysadmin, self.pkggroupadmin, self.mrloggedin, self.visitor], ['xx', 'rx', 'wx', 'rr', 'wr', 'ww'], interfaces=['rest'])
def test_admin_edit_deleted(self):
self._test_can('edit', self.pkggroupadmin, ['xx', 'rx', 'wx', 'rr', 'wr', 'ww', 'deleted'])
self._test_cant('edit', self.mrloggedin, ['deleted'])
def test_admin_read_deleted(self):
self._test_can('read', self.pkggroupadmin, ['xx', 'rx', 'wx', | |
the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
# On Windows, when run with -jN in parallel processes,
# OS often fails to unlink the temp file. Not sure why.
# Need to retry.
# Idea from https://bugs.webkit.org/attachment.cgi?id=75982&action=prettypatch
retry_count = 0
while retry_count < 30:
try:
os.unlink(name)
return
except OSError, e:
retry_count += 1
time.sleep(retry_count * 0.1)
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[],
['--stress-opt', '--always-opt'],
['--nocrankshaft']]
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
def VariantFlags(self):
return VARIANT_FLAGS
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def DownloadData(self, context):
config = self.GetConfiguration(context)
if 'DownloadData' in dir(config):
config.DownloadData()
def AddTestsToList(self, result, current_path, path, context, mode):
config = self.GetConfiguration(context)
for v in config.VariantFlags():
tests = config.ListTests(current_path, path, mode, v)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def DownloadData(self, path, context):
(name, rest) = CarCdr(path)
for test in self.tests:
if not name or name.match(test.GetName()):
test.DownloadData(context)
def ListTests(self, current_path, path, context, mode, variant_flags):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--nobreak-on-abort', '--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : ['--nobreak-on-abort']}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
name = self.vm_root + SUFFIX[mode]
if utils.IsWindows() and not name.endswith('.exe'):
name = name + '.exe'
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
flags = testcase.GetCustomFlags(mode)
if flags is None:
flags = FLAGS[mode]
return testcase.variant_flags + flags
def GetTimeout(self, testcase, mode):
result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
if '--stress-opt' in self.GetVmFlags(testcase, mode):
return result * 4
else:
return result
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
result = 0
try:
result = progress.Run(tasks)
except Exception, e:
print "\n", e
return result
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
def Evaluate(self, env, defs):
return env[self.name]
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
elif self.op == '!=':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
elif self.Current(2) == '!=':
self.AddToken('!=')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
| |
"""
(C) Crown Copyright 2017, the Met Office.
Module to replicate data access API of the previous version of AutoAssess:
- from loaddata import load_run_ss
For information on the PP-Header attributes, see:
Unified Model Documentation Paper F03: "Input and Output File Formats"
available here: https://code.metoffice.gov.uk/doc/um/vn10.5/umdp.html.
"""
import os.path
import re
import datetime
from datetime import timedelta as td
from datetime import datetime as dd
import cf_units
import iris
import iris.coord_categorisation as coord_cat
def is_daily(cube):
"""Test whether the time coordinate contains only daily bound periods."""
def is_day(bound):
"""Check if day."""
time_span = td(hours=(bound[1] - bound[0]))
return td(days=1) == time_span
return all([is_day(bound) for bound in cube.coord('time').bounds])
def is_monthly(cube):
"""A month is a period of at least 28 days, up to 31 days."""
def is_month(bound):
"""Check if month."""
time_span = td(days=(bound[1] - bound[0]))
return td(days=31) >= time_span >= td(days=28)
return all([is_month(bound) for bound in cube.coord('time').bounds])
def is_seasonal(cube):
"""Season is 3 months, i.e. at least 89 days, and up to 92 days."""
def is_season(bound):
"""Check if season."""
time_span = td(days=(bound[1] - bound[0]))
return td(days=31 + 30 + 31) >= time_span >= td(days=28 + 31 + 30)
return all([is_season(bound) for bound in cube.coord('time').bounds])
def is_yearly(cube):
"""A year is a period of at least 360 days, up to 366 days."""
def is_year(bound):
"""Check if year."""
time_span = td(days=(bound[1] - bound[0]))
return td(days=365) == time_span or td(days=360) == time_span
return all([is_year(bound) for bound in cube.coord('time').bounds])
def is_time_mean(cube):
"""Check whether a cube was averaged over time."""
for cell_method in cube.cell_methods:
if cell_method.method == 'mean' and 'time' in cell_method.coord_names:
return True
def is_zonal_mean(cube):
"""Check whether a cube was averaged over longitude."""
for cell_met in cube.cell_methods:
if cell_met.method == 'mean' and 'longitude' in cell_met.coord_names:
return True
def is_minimum_in_period(cube):
"""Check if cube contains minimum values during time period."""
for cell_met in cube.cell_methods:
if cell_met.method == 'minimum' and 'time' in cell_met.coord_names:
return True
def is_maximum_in_period(cube):
"""Check if cube contains maximum values during time period."""
for cell_met in cube.cell_methods:
if cell_met.method == 'maximum' and 'time' in cell_met.coord_names:
return True
def select_by_variable_name(cubes, variable_name):
"""
Select subset from CubeList matching a CF-name or STASH code.
:param CubeList cubes: Iris CubeList.
:param sting variable_name: CF-name or model-section-item STASH code, e.g.
'm01s30i404'
:returns: CubeList with Cubes that have a matching STASH attribute.
:rtype: CubeList
"""
regex = '^m01s[0-9]{2}i[0-9]{3}$'
if re.match(regex, variable_name):
constraint = iris.AttributeConstraint(STASH=variable_name)
else:
constraint = iris.Constraint(variable_name)
return cubes.extract(constraint)
# get the seasonal mean
def seasonal_mean(mycube):
"""
Function to compute seasonal means with MEAN.
Chunks time in 3-month periods and computes means over them;
Returns a cube.
"""
if 'clim_season' not in mycube.coords():
coord_cat.add_season(mycube, 'time', name='clim_season')
if 'season_year' not in mycube.coords():
coord_cat.add_season_year(mycube, 'time', name='season_year')
annual_seasonal_mean = mycube.aggregated_by(['clim_season', 'season_year'],
iris.analysis.MEAN)
def spans_three_months(time):
"""Check for three months."""
return (time.bound[1] - time.bound[0]) == 90 # days
three_months_bound = iris.Constraint(time=spans_three_months)
return annual_seasonal_mean.extract(three_months_bound)
# get annual mean
def annual_mean(mycube):
"""
Function to compute annual mean with MEAN.
Chunks time in 365-day periods and computes means over them;
Returns a cube.
"""
yr_mean = mycube.aggregated_by('year', iris.analysis.MEAN)
def spans_year(time):
"""Check for 12 months."""
return (time.bound[1] - time.bound[0]) == 365
t_bound = iris.Constraint(time=spans_year)
return yr_mean.extract(t_bound)
def select_by_averaging_period(cubes, averaging_period):
"""
Select subset from CubeList depending on averaging period.
:param CubeList cubes: Iris CubeList.
:param string averaging period: Must be one of 'daily', 'monthly',
'seasonal', and 'annual'.
:returns: CubeList with Cubes that are averaged over a certain period.
:rtype: CubeList
:raises: `AssertionError` if `cubes` is not a `list`.
"""
assert isinstance(cubes, list)
select_period = {
'daily': is_daily,
'monthly': is_monthly,
'seasonal': is_seasonal,
'annual': is_yearly
}
if averaging_period == 'seasonal':
selected_cubes = [
cube for cube in cubes
if select_period[averaging_period](seasonal_mean(cube))
]
elif averaging_period == 'annual':
selected_cubes = [
cube for cube in cubes
if select_period[averaging_period](annual_mean(cube))
]
else:
selected_cubes = [
cube for cube in cubes if select_period[averaging_period](cube)
]
return iris.cube.CubeList(selected_cubes)
def select_by_pressure_level(cubes, lblev):
"""
Select data from CubeList on the specified pressure levels.
:param CubeList cubes: Iris CubeList.
:param list lblev: List of pressure levels in hPa.
:returns: CubeList with Cubes only containing specified pressure levels.
:rtype: CubeList.
"""
pressure_level = iris.Constraint(pressure=lblev)
return cubes.extract(
pressure_level) # CubeList.extract returns always CubeList
def select_by_processing(cubes, lbproc):
"""
Select subset from CubeList by the processing that has been applied.
:param CubeList cubes: Iris CubeList.
:param list lbproc: List with PP-header attributes describing processing
steps (lbproc). Currently, only 128 ('meaned') is implemented.
:returns: CubeList with Cubes that were processed according to lbproc
attribute.
:rtype: CubeList
"""
assert isinstance(cubes, list)
assert lbproc != 0
# bits are used to indicate processing
select_processing = {
64: is_zonal_mean,
128: is_time_mean,
4096: is_minimum_in_period,
8192: is_maximum_in_period
}
selected_cubes = []
for cube in cubes:
missing_method = False
_lbproc = lbproc
for key, processed in select_processing.items():
# check processing only for set bits, using bitwise AND
# 192 & 32 = 0
# 192 & 64 = 64
# 192 & 128 = 128
if lbproc & key == key:
if not processed(cube):
missing_method = True
# if processing set bit to zero using bitwise XOR
_lbproc = _lbproc ^ key
# _lbproc == 0 -> processing for each bit has been tested
if not missing_method and _lbproc == 0:
selected_cubes.append(cube)
if _lbproc != 0:
raise NotImplementedError('Lbproc ' + str(lbproc) + ' is not '
'implemented.')
return iris.cube.CubeList(selected_cubes)
def select_by_initial_meaning_period(cubes, lbtim):
"""
Select cube.
Select subset from CubeList by matching the some of the information
encoded in the 'Time indicator' `lbtim`. Namely, the initial meaning
period and the used calendar.
:param CubeList cubes: Iris CubeList.
:param list lbtim: List with PP-Header attributes `lbtim`, a three digit
number. Currently implemented: [121, 122, 621, 622]
- First digit: Initial averaging time in hours. Must be either 1 or 6.
- Second digit: Ignored.
- Third digit: Calendar: 1 - Proleptic Gregorian Calendar
2 - 360d Calendar
:returns: CubeList of Cubes with the matching initial meaning period, and
calendar.
:rtype: Iris CubeList
:raises: `NotImplementedError` for not implemented values in `lbtim`.
"""
implemented_values = [121, 122, 621, 622]
assert isinstance(cubes, list)
assert isinstance(lbtim, list)
lbtims = lbtim
if any(lbtim not in implemented_values for lbtim in lbtims):
msg = 'Implemented values:' + str(implemented_values) +\
'Given:' + str(lbtims)
raise NotImplementedError(msg)
selected_cubes = iris.cube.CubeList()
for lbtim in lbtims:
i_a, i_c = str(lbtim)[:][0], str(lbtim)[:][2]
for cube in cubes:
# select by original meaning interval (IA)
select_meaning_interval = {1: ('1 hour', ), 6: ('6 hour', )}
if select_meaning_interval[int(
i_a)] != cube.cell_methods[0].intervals:
continue
# select calendar (I_C)
# see cf_units.CALENDARS for possible cube calendars
select_calendar = {1: 'gregorian', 2: '360_day'}
if select_calendar[int(i_c)] == cube.coord('time').units.calendar:
selected_cubes.append(cube)
return selected_cubes
def select_certain_months(cubes, lbmon):
"""
Select data from CubeList that matches the specified months.
:param CubeList cubes: Iris CubeList.
:param list lbmon: List with month numbers, e.g. lbmon=[5,6,7] for Mai,
June, and July.
:returns: CubeList with Cubes containing only data for the specified mnth.
:rtype: CubeList
:raises: `AssertionError` if `cubes` is not an `iris.cube.CubeList`.
"""
# add 'month number' coordinate
add_time_coord = {
'monthly': lambda cube: coord_cat.add_month_number(
cube, 'time', name='month_number'),
'seasonal': lambda cube: coord_cat.add_season(cube,
'time',
name='clim_season'),
'annual': lambda cube: coord_cat.add_season_year(cube,
'time',
name='season_year')
}
assert isinstance(cubes, iris.cube.CubeList)
for cube in cubes:
add_time_coord['monthly'](cube)
# filter by month number
month_constraint = iris.Constraint(month_number=lbmon)
return cubes.extract(
month_constraint) # CubeList.extract returns always CubeList
def get_time_offset(time_unit):
"""Return a datetime object equivalent to tunit."""
# tunit e.g. 'day since 1950-01-01 00:00:00.0000000 UTC'
cfunit = cf_units.Unit(time_unit, calendar=cf_units.CALENDAR_STANDARD)
time_offset = cfunit.num2date(0)
return time_offset
def datetime_to_int_days(date_obj, tunit):
"""Return time point converted from cube datetime cell."""
if float(iris.__version__.split('.')[0]) >= 2.0:
time_offset = get_time_offset(tunit)
real_date = dd(date_obj.year, date_obj.month, date_obj.day, 0, 0, 0)
days = (real_date - time_offset).days
else:
days = date_obj
return days
def extract_time_range(cubes, start, end):
"""Extract time ranged data."""
time_ranged_cubes = []
iris.util.unify_time_units(cubes)
time_unit = cubes[0].coord('time').units.name
dd_start = dd(start.year, start.month, start.day, 0, 0, 0)
t_1 = cf_units.date2num(dd_start, time_unit, cf_units.CALENDAR_STANDARD)
dd_end = dd(end.year, end.month, end.day, 0, 0, 0)
t_2 = cf_units.date2num(dd_end, time_unit, cf_units.CALENDAR_STANDARD)
for cube in cubes:
time_constraint = iris.Constraint(
time=lambda t: (t_1 <= datetime_to_int_days(t.point,
time_unit) <= t_2))
cube_slice = cube.extract(time_constraint)
time_ranged_cubes.append(cube_slice)
return time_ranged_cubes
def load_run_ss(run_object,
averaging_period,
variable_name,
lbmon=None,
lbproc=None,
lblev=None,
lbtim=None,
from_dt=None,
to_dt=None):
"""
Use - this is still used.
DEPRECATED: Do not use for new Assessment Areas. Instead, read | |
self.type = m.get('type')
if m.get('agent_name') is not None:
self.agent_name = m.get('agent_name')
return self
class ImportIotplatformMeshidResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
device_sn: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 提交绑定时的SN号,用于确认
self.device_sn = device_sn
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.device_sn is not None:
result['device_sn'] = self.device_sn
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('device_sn') is not None:
self.device_sn = m.get('device_sn')
return self
class ImportPurchaseorderThirdpartyRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
order_number: str = None,
consignee_name: str = None,
consignee_id_number: str = None,
consignee_phone: str = None,
consignee_address: str = None,
supplier_name: str = None,
supplier_id: str = None,
lease_id: str = None,
goods_list: List[GoodsIdAndCount] = None,
idempot_id: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 租赁订单号
self.order_number = order_number
# 收货人姓名
self.consignee_name = consignee_name
# 收货人身份证号
self.consignee_id_number = consignee_id_number
# 收货人手机号
self.consignee_phone = consignee_phone
# 收货人地址
self.consignee_address = consignee_address
# 供货商名称
self.supplier_name = supplier_name
# 供货商租户id
self.supplier_id = supplier_id
# 采购商租户id
self.lease_id = lease_id
# 商品信息列表
self.goods_list = goods_list
# 幂等号
self.idempot_id = idempot_id
def validate(self):
self.validate_required(self.order_number, 'order_number')
self.validate_required(self.consignee_name, 'consignee_name')
self.validate_required(self.consignee_id_number, 'consignee_id_number')
self.validate_required(self.consignee_phone, 'consignee_phone')
self.validate_required(self.consignee_address, 'consignee_address')
self.validate_required(self.supplier_name, 'supplier_name')
self.validate_required(self.supplier_id, 'supplier_id')
self.validate_required(self.lease_id, 'lease_id')
self.validate_required(self.goods_list, 'goods_list')
if self.goods_list:
for k in self.goods_list:
if k:
k.validate()
self.validate_required(self.idempot_id, 'idempot_id')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.order_number is not None:
result['order_number'] = self.order_number
if self.consignee_name is not None:
result['consignee_name'] = self.consignee_name
if self.consignee_id_number is not None:
result['consignee_id_number'] = self.consignee_id_number
if self.consignee_phone is not None:
result['consignee_phone'] = self.consignee_phone
if self.consignee_address is not None:
result['consignee_address'] = self.consignee_address
if self.supplier_name is not None:
result['supplier_name'] = self.supplier_name
if self.supplier_id is not None:
result['supplier_id'] = self.supplier_id
if self.lease_id is not None:
result['lease_id'] = self.lease_id
result['goods_list'] = []
if self.goods_list is not None:
for k in self.goods_list:
result['goods_list'].append(k.to_map() if k else None)
if self.idempot_id is not None:
result['idempot_id'] = self.idempot_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('order_number') is not None:
self.order_number = m.get('order_number')
if m.get('consignee_name') is not None:
self.consignee_name = m.get('consignee_name')
if m.get('consignee_id_number') is not None:
self.consignee_id_number = m.get('consignee_id_number')
if m.get('consignee_phone') is not None:
self.consignee_phone = m.get('consignee_phone')
if m.get('consignee_address') is not None:
self.consignee_address = m.get('consignee_address')
if m.get('supplier_name') is not None:
self.supplier_name = m.get('supplier_name')
if m.get('supplier_id') is not None:
self.supplier_id = m.get('supplier_id')
if m.get('lease_id') is not None:
self.lease_id = m.get('lease_id')
self.goods_list = []
if m.get('goods_list') is not None:
for k in m.get('goods_list'):
temp_model = GoodsIdAndCount()
self.goods_list.append(temp_model.from_map(k))
if m.get('idempot_id') is not None:
self.idempot_id = m.get('idempot_id')
return self
class ImportPurchaseorderThirdpartyResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
purchase_order_id: str = None,
status: str = None,
fail_map: InsertPurchaseOrderFailInfo = None,
purchase_order_info_detail: PurchaseOrderInfoDetail = None,
idempot_id: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 采购平台-采购详情id
self.purchase_order_id = purchase_order_id
# 当前状态
# READY_DELIVER(0, "待发货"),
# DELIVERED(1, "已发货"),
# REFUSED(2, "已拒发"),
# CANCELED(4, "已取消"),
# REFUNDED(5, "已退货");
#
self.status = status
# InsertPurchaseOrderFailInfo
self.fail_map = fail_map
# 真实订单信息,成功不返回
#
self.purchase_order_info_detail = purchase_order_info_detail
# 幂等号
self.idempot_id = idempot_id
def validate(self):
if self.fail_map:
self.fail_map.validate()
if self.purchase_order_info_detail:
self.purchase_order_info_detail.validate()
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.purchase_order_id is not None:
result['purchase_order_id'] = self.purchase_order_id
if self.status is not None:
result['status'] = self.status
if self.fail_map is not None:
result['fail_map'] = self.fail_map.to_map()
if self.purchase_order_info_detail is not None:
result['purchase_order_info_detail'] = self.purchase_order_info_detail.to_map()
if self.idempot_id is not None:
result['idempot_id'] = self.idempot_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('purchase_order_id') is not None:
self.purchase_order_id = m.get('purchase_order_id')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('fail_map') is not None:
temp_model = InsertPurchaseOrderFailInfo()
self.fail_map = temp_model.from_map(m['fail_map'])
if m.get('purchase_order_info_detail') is not None:
temp_model = PurchaseOrderInfoDetail()
self.purchase_order_info_detail = temp_model.from_map(m['purchase_order_info_detail'])
if m.get('idempot_id') is not None:
self.idempot_id = m.get('idempot_id')
return self
class AddUserRoleRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
tenant_id: str = None,
tenant_name: str = None,
role: str = None,
support_abm: bool = None,
includ_tax: bool = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 账号标识,蚂蚁金融科技租户ID
self.tenant_id = tenant_id
# 租户名称
self.tenant_name = tenant_name
# 权限标识
# LEASER(0),
# BUYERS(1),
# OWNER(2),
# ADMIN(9);
self.role = role
# 是否支持abm远程
self.support_abm = support_abm
# 是否含税
self.includ_tax = includ_tax
def validate(self):
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.tenant_name, 'tenant_name')
self.validate_required(self.role, 'role')
self.validate_required(self.support_abm, 'support_abm')
self.validate_required(self.includ_tax, 'includ_tax')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.tenant_name is not None:
result['tenant_name'] = self.tenant_name
if self.role is not None:
result['role'] = self.role
if self.support_abm is not None:
result['support_abm'] = self.support_abm
if self.includ_tax is not None:
result['includ_tax'] = self.includ_tax
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('tenant_name') is not None:
self.tenant_name = m.get('tenant_name')
if m.get('role') is not None:
self.role = m.get('role')
if m.get('support_abm') is not None:
self.support_abm = m.get('support_abm')
if m.get('includ_tax') is not None:
self.includ_tax = m.get('includ_tax')
return self
class AddUserRoleResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
user_id: int = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 新增的用户id
self.user_id = user_id
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.user_id is not None:
result['user_id'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('user_id') is not None:
self.user_id = m.get('user_id')
return self
class AddGoodsSkuRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
type: str = None,
second_type: str = None,
brand: str = None,
model: str = None,
market_price: int = None,
phone_info: PhoneInfo = None,
computer_info: ComputerInfo = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# GoodsTypeEnum,商品一级类目
self.type = type
# 商品二级类目
self.second_type = second_type
# 商品品牌
self.brand = brand
# 商品名称,型号
self.model = model
# 市场价
self.market_price = market_price
# 手机型号信息
self.phone_info = phone_info
# 电脑型号信息
self.computer_info = computer_info
def validate(self):
self.validate_required(self.type, 'type')
self.validate_required(self.second_type, 'second_type')
self.validate_required(self.brand, 'brand')
self.validate_required(self.model, 'model')
self.validate_required(self.market_price, 'market_price')
if self.phone_info:
self.phone_info.validate()
if self.computer_info:
self.computer_info.validate()
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.type is not None:
result['type'] = self.type
if self.second_type is not None:
result['second_type'] = self.second_type
if self.brand is not None:
result['brand'] = self.brand
if self.model is not None:
result['model'] = self.model
if self.market_price is not None:
result['market_price'] = self.market_price
if | |
"' + ap + '"\n')
except KeyError:
fr.write('\t administrator_login_password= ""\n')
pass
# tags block
try:
mtags=azr[i]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval=mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_sql_database
#
# azurerm_sql_database
def azurerm_sql_database(crf, cde, crg, headers, requests, sub, json, az2tfmess):
tfp = "azurerm_sql_database"
tcode = "541-"
azr = ""
if crf in tfp:
# REST or cli
# print "REST SQL Servers"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Sql/servers"
params = {'api-version': '2015-05-01-preview'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf = tcode+tfp+"-staterm.sh"
tfimf = tcode+tfp+"-stateimp.sh"
tfrm = open(tfrmf, 'a')
tfim = open(tfimf, 'a')
print "# " + tfp,
count = len(azr)
print count
for i in range(0, count):
name = azr[i]["name"]
loc = azr[i]["location"]
id = azr[i]["id"]
rg = id.split("/")[4].replace(".", "-").lower()
rgs = id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
sname=name
# azr=az sql db list --server sname -g srg -o json
url="https://management.azure.com/" + id + "/databases"
params = {'api-version': '2017-10-01-preview'}
r = requests.get(url, headers=headers, params=params)
azr2= r.json()["value"]
if cde:
print(json.dumps(azr2, indent=4, separators=(',', ': ')))
icount=len(azr2)
if icount > 0 :
for j in range(0,icount):
name = azr2[j]["name"]
loc = azr2[j]["location"]
id = azr2[j]["id"]
rg = id.split("/")[4].replace(".", "-").lower()
rgs = id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
rname = name.replace(".", "-")
prefix = tfp+"."+rg+'__'+rname
# print prefix
rfilename = prefix+".tf"
fr = open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "' + loc + '"\n')
fr.write('\t resource_group_name = "' + rgs + '"\n')
#fr.write('\t server_name = "' + sname + '"\n')
col=azr2[j]["properties"]["collation"]
ed=azr2[j]["properties"]["currentSku"]["tier"]
rso=azr2[j]["properties"]["requestedServiceObjectiveName"]
fr.write('\t server_name = "' + sname + '"\n')
if ed != "System":
fr.write('\t collation= "' + col + '"\n')
fr.write('\t edition= "' + ed + '"\n')
fr.write('\t requested_service_objective_name= "' + rso + '"\n')
try:
cm = azr2[j]["properties"]["createMode"]
fr.write('\t create_mode= "' + cm + '"\n')
except KeyError:
pass
# tags block
try:
mtags = azr2[j]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval = mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) +
' of ' + str(count-1) + '"' + '\n')
tfcomm = 'terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
# end stub
#
# azurerm_databricks_workspace
#
# azurerm_databricks_workspace
def azurerm_databricks_workspace(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_databricks_workspace"
tcode="550-"
azr=""
if crf in tfp:
# REST or cli
# print "REST Managed Disk"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Databricks/workspaces"
params = {'api-version': '2018-04-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
sku=azr[i]["sku"]["name"]
if sku == "Standard" : sku="standard"
if sku == "Premium" : sku="premium"
fr.write('\t sku = "' + sku + '"\n')
outid=azr[i]["id"]
#print outid
#evalcomm=fr.write('terraform import . + '__' + " tfp rg rname outid
# tags block
try:
mtags=azr[i]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval=mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
fr.write('}\n')
except KeyError:
pass
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+outid+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_app_service_plan
#
# azurerm_app_service_plan
def azurerm_app_service_plan(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_app_service_plan"
tcode="600-"
azr=""
if crf in tfp:
# REST or cli
# print "REST App Service Plan"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Web/serverfarms"
params = {'api-version': '2018-02-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
tier=azr[i]["sku"]["tier"]
size=azr[i]["sku"]["size"]
kind=azr[i]["kind"]
fr.write('\t kind = "' + kind + '"\n')
fr.write('\t sku {\n')
fr.write('\t\t tier = "' + tier + '"\n')
fr.write('\t\t size = "' + size + '"\n')
fr.write('\t }\n')
# geo location block
# icount= geol | | len(
# if icount > 0" :
# for j in range(0,icount):
# floc=azr[i]["failoverPolicies[j]["locationName"
# fop=azr[i]["failoverPolicies[j]["failoverPriority"]
# fr.write('\t geo_location {' + '"\n')
# fr.write('\t location = "floc" + '"\n')
# fr.write('\t failover_priority = "' + fop + '"\n')
# fr.write('}\n')
#
#
#
# No tags - used internally
fr.write('}\n')
fr.close() # close .tf file
if cde:
with open(rfilename) as f:
print f.read()
tfrm.write('terraform state rm '+tfp+'.'+rg+'__'+rname + '\n')
tfim.write('echo "importing ' + str(i) + ' of ' + str(count-1) + '"' + '\n')
tfcomm='terraform import '+tfp+'.'+rg+'__'+rname+' '+id+'\n'
tfim.write(tfcomm)
# end for i loop
tfrm.close()
tfim.close()
#end stub
#
# azurerm_app_service
#
# azurerm_app_service
def azurerm_app_service(crf,cde,crg,headers,requests,sub,json,az2tfmess):
tfp="azurerm_app_service"
tcode="610-"
azr=""
if crf in tfp:
# REST or cli
# print "REST App Service"
url="https://management.azure.com/subscriptions/" + sub + "/providers/Microsoft.Web/sites"
params = {'api-version': '2018-02-01'}
r = requests.get(url, headers=headers, params=params)
azr= r.json()["value"]
tfrmf=tcode+tfp+"-staterm.sh"
tfimf=tcode+tfp+"-stateimp.sh"
tfrm=open(tfrmf, 'a')
tfim=open(tfimf, 'a')
print "# " + tfp,
count=len(azr)
print count
for i in range(0, count):
kind=azr[i]["kind"]
if kind == "functionapp": continue
name=azr[i]["name"]
loc=azr[i]["location"]
id=azr[i]["id"]
rg=id.split("/")[4].replace(".","-").lower()
rgs=id.split("/")[4]
if crg is not None:
if rgs.lower() != crg.lower():
continue # back to for
if cde:
print(json.dumps(azr[i], indent=4, separators=(',', ': ')))
rname=name.replace(".","-")
prefix=tfp+"."+rg+'__'+rname
#print prefix
rfilename=prefix+".tf"
fr=open(rfilename, 'w')
fr.write(az2tfmess)
fr.write('resource ' + tfp + ' ' + rg + '__' + rname + ' {\n')
fr.write('\t name = "' + name + '"\n')
fr.write('\t location = "'+ loc + '"\n')
fr.write('\t resource_group_name = "'+ rgs + '"\n')
#azr=az webapp list -g rgsource -o json
prg=azr[i]["properties"]["serverFarmId"].split("/")[4]
pnam=azr[i]["properties"]["serverFarmId"].split("/")[8]
appplid=azr[i]["properties"]["serverFarmId"]
try:
httpsonly=str(azr[i]["properties"]["httpsOnly"]).lower()
fr.write('\t https_only = ' + httpsonly + '\n')
except KeyError:
pass
# case issues - so use resource id directly
# fr.write('\t app_service_plan_id = "${azurerm_app_service_plan. + '__' + .id}'"' prg pnam + '"\n')
fr.write('\t app_service_plan_id = "' + appplid + '"\n')
# geo location block
# icount= geol | | len(
# if icount > 0" :
# for j in range(0,icount):
# floc=azr[i]["failoverPolicies[j]["locationName"
# fop=azr[i]["failoverPolicies[j]["failoverPriority"]
# fr.write('\t geo_location {' + '"\n')
# fr.write('\t location = "floc" + '"\n')
# fr.write('\t failover_priority = "' + fop + '"\n')
# fr.write('}\n')
#
#
# tags block
try:
mtags=azr[i]["tags"]
fr.write('tags = { \n')
for key in mtags.keys():
tval=mtags[key]
fr.write('\t "' + key + '"="' + tval + '"\n')
| |
ToontownFriendSecret.unloadFriendSecret()
FriendsListPanel.unloadFriendsList()
messenger.send('cancelFriendInvitation')
base.removeGlitchMessage()
taskMgr.remove('avatarRequestQueueTask')
OTPClientRepository.OTPClientRepository.exitPlayingGame(self)
if hasattr(base, 'localAvatar'):
camera.reparentTo(render)
camera.setPos(0, 0, 0)
camera.setHpr(0, 0, 0)
del self.doId2do[base.localAvatar.getDoId()]
if base.localAvatar.getDelayDeleteCount() != 0:
self.notify.error('could not delete localAvatar, delayDeletes=%s' % (base.localAvatar.getDelayDeleteNames(),))
base.localAvatar.deleteOrDelay()
base.localAvatar.detectLeaks()
NametagGlobals.setToon(base.cam)
del base.localAvatar
del __builtins__['localAvatar']
loader.abortBulkLoad()
base.transitions.noTransitions()
if self._userLoggingOut:
self.detectLeaks(okTasks=[], okEvents=['destroy-ToontownLoadingScreenTitle', 'destroy-ToontownLoadingScreenTip', 'destroy-ToontownLoadingScreenWaitBar'])
return
def enterGameOff(self):
OTPClientRepository.OTPClientRepository.enterGameOff(self)
def enterWaitOnEnterResponses(self, shardId, hoodId, zoneId, avId):
self.resetDeletedSubShardDoIds()
OTPClientRepository.OTPClientRepository.enterWaitOnEnterResponses(self, shardId, hoodId, zoneId, avId)
def enterSkipTutorialRequest(self, hoodId, zoneId, avId):
self.handlerArgs = {'hoodId': hoodId,
'zoneId': zoneId,
'avId': avId}
if not self.astronSupport:
self.handler = self.handleTutorialQuestion
self.__requestSkipTutorial(hoodId, zoneId, avId)
def __requestSkipTutorial(self, hoodId, zoneId, avId):
self.notify.debug('requesting skip tutorial')
self.acceptOnce('skipTutorialAnswered', self.__handleSkipTutorialAnswered, [hoodId, zoneId, avId])
messenger.send('requestSkipTutorial')
self.waitForDatabaseTimeout(requestName='RequestSkipTutorial')
def __handleSkipTutorialAnswered(self, hoodId, zoneId, avId, allOk):
if allOk:
hoodId = self.handlerArgs['hoodId']
zoneId = self.handlerArgs['zoneId']
avId = self.handlerArgs['avId']
self.gameFSM.request('playGame', [hoodId, zoneId, avId])
else:
self.notify.warning('allOk is false on skip tutorial, forcing the tutorial.')
self.gameFSM.request('tutorialQuestion', [hoodId, zoneId, avId])
def exitSkipTutorialRequest(self):
self.cleanupWaitingForDatabase()
self.handler = None
self.handlerArgs = None
self.ignore('skipTutorialAnswered')
return
def enterTutorialQuestion(self, hoodId, zoneId, avId):
if not self.astronSupport:
self.handler = self.handleTutorialQuestion
self.__requestTutorial(hoodId, zoneId, avId)
def handleTutorialQuestion(self, msgType, di):
if msgType == CLIENT_CREATE_OBJECT_REQUIRED:
self.handleGenerateWithRequired(di)
elif msgType == CLIENT_CREATE_OBJECT_REQUIRED_OTHER:
self.handleGenerateWithRequiredOther(di)
elif msgType == CLIENT_OBJECT_UPDATE_FIELD:
self.handleUpdateField(di)
elif msgType == CLIENT_OBJECT_DISABLE_RESP:
self.handleDisable(di)
elif msgType == CLIENT_OBJECT_DELETE_RESP:
self.handleDelete(di)
elif msgType == CLIENT_GET_FRIEND_LIST_RESP:
self.handleGetFriendsList(di)
elif msgType == CLIENT_GET_FRIEND_LIST_EXTENDED_RESP:
self.handleGetFriendsListExtended(di)
elif msgType == CLIENT_FRIEND_ONLINE:
self.handleFriendOnline(di)
elif msgType == CLIENT_FRIEND_OFFLINE:
self.handleFriendOffline(di)
elif msgType == CLIENT_GET_AVATAR_DETAILS_RESP:
self.handleGetAvatarDetailsResp(di)
else:
self.handleMessageType(msgType, di)
def __requestTutorial(self, hoodId, zoneId, avId):
self.notify.debug('requesting tutorial')
self.acceptOnce('startTutorial', self.__handleStartTutorial, [avId])
messenger.send('requestTutorial')
self.waitForDatabaseTimeout(requestName='RequestTutorial')
def __handleStartTutorial(self, avId, zoneId):
self.gameFSM.request('playGame', [Tutorial, zoneId, avId])
def exitTutorialQuestion(self):
self.cleanupWaitingForDatabase()
self.handler = None
self.handlerArgs = None
self.ignore('startTutorial')
taskMgr.remove('waitingForTutorial')
return
def enterSwitchShards(self, shardId, hoodId, zoneId, avId):
OTPClientRepository.OTPClientRepository.enterSwitchShards(self, shardId, hoodId, zoneId, avId)
self.handler = self.handleCloseShard
def exitSwitchShards(self):
OTPClientRepository.OTPClientRepository.exitSwitchShards(self)
self.ignore(ToontownClientRepository.ClearInterestDoneEvent)
self.handler = None
return
def enterCloseShard(self, loginState = None):
OTPClientRepository.OTPClientRepository.enterCloseShard(self, loginState)
self.handler = self.handleCloseShard
self._removeLocalAvFromStateServer()
if not config.GetBool('astron-support', True):
def handleCloseShard(self, msgType, di):
if msgType == CLIENT_CREATE_OBJECT_REQUIRED:
di2 = PyDatagramIterator(di)
parentId = di2.getUint32()
if self._doIdIsOnCurrentShard(parentId):
return
elif msgType == CLIENT_CREATE_OBJECT_REQUIRED_OTHER:
di2 = PyDatagramIterator(di)
parentId = di2.getUint32()
if self._doIdIsOnCurrentShard(parentId):
return
elif msgType == CLIENT_OBJECT_UPDATE_FIELD:
di2 = PyDatagramIterator(di)
doId = di2.getUint32()
if self._doIdIsOnCurrentShard(doId):
return
self.handleMessageType(msgType, di)
else:
def handleCloseShard(self, msgType, di):
if msgType == CLIENT_ENTER_OBJECT_REQUIRED:
di2 = PyDatagramIterator(di)
parentId = di2.getUint32()
if self._doIdIsOnCurrentShard(parentId):
return
elif msgType == CLIENT_ENTER_OBJECT_REQUIRED_OTHER:
di2 = PyDatagramIterator(di)
parentId = di2.getUint32()
if self._doIdIsOnCurrentShard(parentId):
return
elif msgType == CLIENT_OBJECT_SET_FIELD:
di2 = PyDatagramIterator(di)
doId = di2.getUint32()
if self._doIdIsOnCurrentShard(doId):
return
self.handleMessageType(msgType, di)
def _logFailedDisable(self, doId, ownerView):
if doId not in self.doId2do and doId in self._deletedSubShardDoIds:
return
OTPClientRepository.OTPClientRepository._logFailedDisable(self, doId, ownerView)
def exitCloseShard(self):
OTPClientRepository.OTPClientRepository.exitCloseShard(self)
self.ignore(ToontownClientRepository.ClearInterestDoneEvent)
self.handler = None
return
def isShardInterestOpen(self):
return self.old_setzone_interest_handle is not None or self.uberZoneInterest is not None
def resetDeletedSubShardDoIds(self):
self._deletedSubShardDoIds.clear()
def dumpAllSubShardObjects(self):
if self.KeepSubShardObjects:
return
isNotLive = not base.cr.isLive()
if isNotLive:
try:
localAvatar
except:
self.notify.info('dumpAllSubShardObjects')
else:
self.notify.info('dumpAllSubShardObjects: defaultShard is %s' % localAvatar.defaultShard)
ignoredClasses = ('MagicWordManager', 'TimeManager', 'DistributedDistrict', 'FriendManager', 'NewsManager', 'ToontownMagicWordManager', 'WelcomeValleyManager', 'DistributedTrophyMgr', 'CatalogManager', 'DistributedBankMgr', 'EstateManager', 'RaceManager', 'SafeZoneManager', 'DeleteManager', 'TutorialManager', 'ToontownDistrict', 'DistributedDeliveryManager', 'DistributedPartyManager', 'AvatarFriendsManager', 'InGameNewsMgr', 'WhitelistMgr', 'TTCodeRedemptionMgr')
messenger.send('clientCleanup')
for avId, pad in self.__queryAvatarMap.items():
pad.delayDelete.destroy()
self.__queryAvatarMap = {}
delayDeleted = []
doIds = self.doId2do.keys()
for doId in doIds:
obj = self.doId2do[doId]
if isNotLive:
ignoredClass = obj.__class__.__name__ in ignoredClasses
if not ignoredClass and obj.parentId != localAvatar.defaultShard:
self.notify.info('dumpAllSubShardObjects: %s %s parent %s is not defaultShard' % (obj.__class__.__name__, obj.doId, obj.parentId))
if obj.parentId == localAvatar.defaultShard and obj is not localAvatar:
if obj.neverDisable:
if isNotLive:
if not ignoredClass:
self.notify.warning('dumpAllSubShardObjects: neverDisable set for %s %s' % (obj.__class__.__name__, obj.doId))
else:
self.deleteObject(doId)
self._deletedSubShardDoIds.add(doId)
if obj.getDelayDeleteCount() != 0:
delayDeleted.append(obj)
delayDeleteLeaks = []
for obj in delayDeleted:
if obj.getDelayDeleteCount() != 0:
delayDeleteLeaks.append(obj)
if len(delayDeleteLeaks):
s = 'dumpAllSubShardObjects:'
for obj in delayDeleteLeaks:
s += '\n could not delete %s (%s), delayDeletes=%s' % (safeRepr(obj), itype(obj), obj.getDelayDeleteNames())
self.notify.error(s)
if isNotLive:
self.notify.info('dumpAllSubShardObjects: doIds left: %s' % self.doId2do.keys())
def _removeCurrentShardInterest(self, callback):
if self.old_setzone_interest_handle is None:
self.notify.warning('removeToontownShardInterest: no shard interest open')
callback()
return
self.acceptOnce(ToontownClientRepository.ClearInterestDoneEvent, Functor(self._tcrRemoveUberZoneInterest, callback))
self._removeEmulatedSetZone(ToontownClientRepository.ClearInterestDoneEvent)
return
def _tcrRemoveUberZoneInterest(self, callback):
self.acceptOnce(ToontownClientRepository.ClearInterestDoneEvent, Functor(self._tcrRemoveShardInterestDone, callback))
self.removeInterest(self.uberZoneInterest, ToontownClientRepository.ClearInterestDoneEvent)
def _tcrRemoveShardInterestDone(self, callback):
self.uberZoneInterest = None
callback()
return
def _doIdIsOnCurrentShard(self, doId):
if doId == base.localAvatar.defaultShard:
return True
do = self.getDo(doId)
if do:
if do.parentId == base.localAvatar.defaultShard:
return True
return False
def _wantShardListComplete(self):
print self.activeDistrictMap
if self._shardsAreReady():
self.acceptOnce(ToontownDistrictStats.EventName(), self.shardDetailStatsComplete)
ToontownDistrictStats.refresh()
else:
self.loginFSM.request('noShards')
def shardDetailStatsComplete(self):
self.loginFSM.request('waitForAvatarList')
def exitWaitForShardList(self):
self.ignore(ToontownDistrictStats.EventName())
OTPClientRepository.OTPClientRepository.exitWaitForShardList(self)
def fillUpFriendsMap(self):
if self.isFriendsMapComplete():
return 1
if not self.friendsMapPending and not self.friendsListError:
self.notify.warning('Friends list stale; fetching new list.')
self.sendGetFriendsListRequest()
return 0
def isFriend(self, doId):
for friendId, flags in base.localAvatar.friendsList:
if friendId == doId:
self.identifyFriend(doId)
return 1
return 0
def isAvatarFriend(self, doId):
for friendId, flags in base.localAvatar.friendsList:
if friendId == doId:
self.identifyFriend(doId)
return 1
return 0
def getFriendFlags(self, doId):
for friendId, flags in base.localAvatar.friendsList:
if friendId == doId:
return flags
return 0
def isFriendOnline(self, doId):
return self.friendsOnline.has_key(doId)
def addAvatarToFriendsList(self, avatar):
self.friendsMap[avatar.doId] = avatar
def identifyFriend(self, doId, source = None):
if self.friendsMap.has_key(doId):
teleportNotify.debug('friend %s in friendsMap' % doId)
return self.friendsMap[doId]
avatar = None
if self.doId2do.has_key(doId):
teleportNotify.debug('found friend %s in doId2do' % doId)
avatar = self.doId2do[doId]
elif self.cache.contains(doId):
teleportNotify.debug('found friend %s in cache' % doId)
avatar = self.cache.dict[doId]
elif self.playerFriendsManager.getAvHandleFromId(doId):
teleportNotify.debug('found friend %s in playerFriendsManager' % doId)
avatar = base.cr.playerFriendsManager.getAvHandleFromId(doId)
else:
self.notify.warning("Don't know who friend %s is." % doId)
return
if not ((isinstance(avatar, DistributedToon.DistributedToon) and avatar.__class__ is DistributedToon.DistributedToon) or isinstance(avatar, DistributedPet.DistributedPet)):
self.notify.warning('friendsNotify%s: invalid friend object %s' % (choice(source, '(%s)' % source, ''), doId))
return
if base.wantPets:
if avatar.isPet():
if avatar.bFake:
handle = PetHandle.PetHandle(avatar)
else:
handle = avatar
else:
handle = FriendHandle.FriendHandle(doId, avatar.getName(), avatar.style, avatar.getPetId())
else:
handle = FriendHandle.FriendHandle(doId, avatar.getName(), avatar.style, '')
teleportNotify.debug('adding %s to friendsMap' % doId)
self.friendsMap[doId] = handle
return handle
def identifyPlayer(self, pId):
return base.cr.playerFriendsManager.getFriendInfo(pId)
def identifyAvatar(self, doId):
if self.doId2do.has_key(doId):
return self.doId2do[doId]
else:
return self.identifyFriend(doId)
def isFriendsMapComplete(self):
for friendId, flags in base.localAvatar.friendsList:
if self.identifyFriend(friendId) == None:
return 0
if base.wantPets and base.localAvatar.hasPet():
print str(self.friendsMap)
print str(self.friendsMap.has_key(base.localAvatar.getPetId()))
if self.friendsMap.has_key(base.localAvatar.getPetId()) == None:
return 0
return 1
def removeFriend(self, avatarId):
base.localAvatar.sendUpdate('friendsNotify', [base.localAvatar.doId, 1], sendToId=avatarId)
datagram = PyDatagram()
datagram.addUint16(CLIENT_REMOVE_FRIEND)
datagram.addUint32(avatarId)
self.send(datagram)
self.estateMgr.removeFriend(base.localAvatar.doId, avatarId)
for pair in base.localAvatar.friendsList:
friendId = pair[0]
if friendId == avatarId:
base.localAvatar.friendsList.remove(pair)
return
def clearFriendState(self):
self.friendsMap = {}
self.friendsOnline = {}
self.friendsMapPending = 0
self.friendsListError = 0
def sendGetFriendsListRequest(self):
if self.astronSupport:
print 'sendGetFriendsListRequest TODO'
else:
self.friendsMapPending = 1
self.friendsListError = 0
datagram = PyDatagram()
datagram.addUint16(CLIENT_GET_FRIEND_LIST)
self.send(datagram)
def cleanPetsFromFriendsMap(self):
for objId, obj in self.friendsMap.items():
from toontown.pets import DistributedPet
if isinstance(obj, DistributedPet.DistributedPet):
print 'Removing %s reference from the friendsMap' % obj.getName()
del self.friendsMap[objId]
def removePetFromFriendsMap(self):
doId = base.localAvatar.getPetId()
if doId and self.friendsMap.has_key(doId):
del self.friendsMap[doId]
def addPetToFriendsMap(self, callback = None):
doId = base.localAvatar.getPetId()
if not doId or self.friendsMap.has_key(doId):
if callback:
callback()
return
def petDetailsCallback(petAvatar):
handle = PetHandle.PetHandle(petAvatar)
self.friendsMap[doId] = handle
petAvatar.disable()
petAvatar.delete()
if callback:
callback()
if self._proactiveLeakChecks:
petAvatar.detectLeaks()
PetDetail.PetDetail(doId, petDetailsCallback)
def handleGetFriendsList(self, di):
error = di.getUint8()
if error:
self.notify.warning('Got error return from friends list.')
self.friendsListError = 1
else:
count = di.getUint16()
for i in range(0, count):
doId = di.getUint32()
name = di.getString()
dnaString = di.getString()
dna = ToonDNA.ToonDNA()
dna.makeFromNetString(dnaString)
petId = di.getUint32()
handle = FriendHandle.FriendHandle(doId, name, dna, petId)
self.friendsMap[doId] = handle
if self.friendsOnline.has_key(doId):
self.friendsOnline[doId] = handle
if self.friendPendingChatSettings.has_key(doId):
self.notify.debug('calling setCommonAndWL %s' % str(self.friendPendingChatSettings[doId]))
handle.setCommonAndWhitelistChatFlags(*self.friendPendingChatSettings[doId])
if base.wantPets and base.localAvatar.hasPet():
def handleAddedPet():
self.friendsMapPending = 0
messenger.send('friendsMapComplete')
self.addPetToFriendsMap(handleAddedPet)
return
self.friendsMapPending = 0
messenger.send('friendsMapComplete')
def handleGetFriendsListExtended(self, di):
avatarHandleList = []
error = di.getUint8()
if error:
self.notify.warning('Got error return from friends list extended.')
else:
count = di.getUint16()
for i in range(0, count):
abort = 0
doId = di.getUint32()
name = di.getString()
if name == '':
abort = 1
dnaString = di.getString()
if dnaString == '':
abort = 1
else:
dna = ToonDNA.ToonDNA()
dna.makeFromNetString(dnaString)
petId = di.getUint32()
if not abort:
handle = FriendHandle.FriendHandle(doId, name, dna, petId)
avatarHandleList.append(handle)
if avatarHandleList:
messenger.send('gotExtraFriendHandles', [avatarHandleList])
def handleFriendOnline(self, di):
doId = di.getUint32()
commonChatFlags = 0
whitelistChatFlags = 0
if di.getRemainingSize() > 0:
commonChatFlags = di.getUint8()
if di.getRemainingSize() > 0:
whitelistChatFlags = di.getUint8()
self.notify.debug('Friend %d now online. common=%d whitelist=%d' % (doId, commonChatFlags, whitelistChatFlags))
if not self.friendsOnline.has_key(doId):
self.friendsOnline[doId] = self.identifyFriend(doId)
messenger.send('friendOnline', [doId, commonChatFlags, whitelistChatFlags])
if not self.friendsOnline[doId]:
self.friendPendingChatSettings[doId] = (commonChatFlags, whitelistChatFlags)
def handleFriendOffline(self, di):
doId = di.getUint32()
self.notify.debug('Friend %d now offline.' % doId)
try:
del self.friendsOnline[doId]
messenger.send('friendOffline', [doId])
except:
pass
def getFirstBattle(self):
from toontown.battle import DistributedBattleBase
for dobj | |
# -*- coding: utf-8 -*-
"""Interface to Lightnet object proposals."""
import logging
from os.path import expanduser, join
from wbia import constants as const
import utool as ut
import numpy as np
import cv2
import random
import tqdm
import time
import os
import copy
import PIL
(print, rrr, profile) = ut.inject2(__name__, '[densenet]')
logger = logging.getLogger('wbia')
PARALLEL = not const.CONTAINERIZED
INPUT_SIZE = 224
ARCHIVE_URL_DICT = {
'canonical_zebra_grevys_v1': 'https://wildbookiarepository.azureedge.net/models/classifier.canonical.zebra_grevys.v1.zip',
'canonical_zebra_grevys_v2': 'https://wildbookiarepository.azureedge.net/models/classifier.canonical.zebra_grevys.v2.zip',
'canonical_zebra_grevys_v3': 'https://wildbookiarepository.azureedge.net/models/classifier.canonical.zebra_grevys.v3.zip',
'canonical_zebra_grevys_v4': 'https://wildbookiarepository.azureedge.net/models/classifier.canonical.zebra_grevys.v4.zip',
'canonical_giraffe_reticulated_v1': 'https://wildbookiarepository.azureedge.net/models/classifier.canonical.giraffe_reticulated.v1.zip',
'ryan_densenet_v1': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.ryan.densenet.v1.zip',
'ryan_densenet_v2': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.ryan.densenet.v2.zip',
'megan_argentina_v1': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.megan.argentina.densenet.v1.zip',
'megan_kenya_v1': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.megan.kenya.densenet.v1.zip',
'megan_kenya_v2': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.megan.kenya.densenet.v2.zip',
'laterals_v0': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.laterals.densenet.v0.zip',
'belly_v0': 'https://wildbookiarepository.azureedge.net/models/classifier.cameratrap.right_whale_belly.densenet.v0.zip',
'zebra_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.zebra_grevys-zebra_plains.v1.zip',
'zebra_mountain_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.zebra_mountain.v0.zip',
'giraffe_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.giraffe.v1.zip',
'jaguar_v3': 'https://wildbookiarepository.azureedge.net/models/labeler.jaguar.v3.zip',
'lynx_v3': 'https://wildbookiarepository.azureedge.net/models/labeler.lynx.v3.zip',
'manta_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.manta_ray_giant.v1.zip',
'seaturtle_v3': 'https://wildbookiarepository.azureedge.net/models/labeler.seaturtle.v3.zip',
'hendrik_dorsal_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.hendrik_dorsal.v2.zip',
'spotted_dolphin_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.dolphin_spotted.v0.zip',
'spotted_skunk_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.skunk_spotted.v0.zip',
'humpback_dorsal': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_humpback.dorsal.v0.zip',
'orca_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_orca.v0.zip',
'whale_sperm_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_sperm.v0.zip',
'fins_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.fins.v0.zip',
'fins_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.fins.v1.1.zip',
'fins_enforcement_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.fins.enforcement.v0.zip',
'wilddog_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.wild_dog.v0.zip',
'wilddog_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.wild_dog.v1.zip',
'wilddog_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.wild_dog.v2.zip',
'wilddog_v3': 'https://wildbookiarepository.azureedge.net/models/labeler.wild_dog.v3.zip',
'leopard_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.leopard.v0.zip',
'cheetah_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.cheetah.v1.zip',
'cheetah_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.cheetah.v2.zip',
'hyaena_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.hyaena.v0.zip',
'wild_horse_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.wild_horse.v0.zip',
'seadragon_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.seadragon.v0.zip',
'seadragon_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.seadragon.v1.zip',
'seadragon_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.seadragon.v2.zip',
'jaguar_v4': 'https://wildbookiarepository.azureedge.net/models/labeler.jaguar.v4.zip',
'lynx_v4': 'https://wildbookiarepository.azureedge.net/models/labeler.lynx.v4.zip',
'manta_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.manta_ray_giant.v2.zip',
'seaturtle_v4': 'https://wildbookiarepository.azureedge.net/models/labeler.seaturtle.v4.zip',
'nassau_grouper_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.grouper_nassau.v0.zip',
'nassau_grouper_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.grouper_nassau.v1.zip',
'nassau_grouper_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.grouper_nassau.v2.zip',
'nassau_grouper_v3': 'https://wildbookiarepository.azureedge.net/models/labeler.grouper_nassau.v3.zip',
'salanader_fire_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.salamander_fire.v0.zip',
'salanader_fire_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.salamander_fire.v1.zip',
'salamander_fire_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.salamander_fire.v0.zip',
'salamander_fire_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.salamander_fire.v1.zip',
'salamander_fire_v2': 'https://wildbookiarepository.azureedge.net/models/labeler.salamander_fire.v2.zip',
'spotted_dolphin_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.dolphin_spotted.v1.zip',
'spotted_skunk_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.skunk_spotted.v1.zip',
'monk_seal_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.mediterranean_monk_seal.v0.zip',
'iot_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.iot.v0.zip',
'right_whale_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.rightwhale.v0.zip',
'whale_shark_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_shark.v0.zip',
'flukebook_v1': 'https://wildbookiarepository.azureedge.net/models/classifier2.flukebook.v1.zip',
'rightwhale_v5': 'https://wildbookiarepository.azureedge.net/models/labeler.rightwhale.v5.zip',
'snow_leopard_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.snow_leopard.v0.zip',
'grey_whale_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_grey.v0.zip',
'beluga_whale_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_beluga.v0.zip',
'beluga_whale_v1': 'https://wildbookiarepository.azureedge.net/models/labeler.whale_beluga.v1.zip',
'sea_turtle_v4': 'https://wildbookiarepository.azureedge.net/models/labeler.sea_turtle.v4.zip',
'seals_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.seals.v0.zip',
'spotted_eagle_ray_v0': 'https://wildbookiarepository.azureedge.net/models/labeler.spotted_eagle_ray.v0.zip',
}
if not ut.get_argflag('--no-pytorch'):
try:
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
logger.info('PyTorch Version: %s' % (torch.__version__,))
logger.info('Torchvision Version: %s' % (torchvision.__version__,))
except ImportError:
logger.info('WARNING Failed to import pytorch. PyTorch is unavailable')
if ut.SUPER_STRICT:
raise
try:
import imgaug # NOQA
class Augmentations(object):
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
class TrainAugmentations(Augmentations):
def __init__(self, blur=True, flip=False, rotate=10, shear=10, **kwargs):
from imgaug import augmenters as iaa
sequence = []
sequence += [
iaa.Scale((INPUT_SIZE, INPUT_SIZE)),
iaa.ContrastNormalization((0.75, 1.25)),
iaa.AddElementwise((-10, 10), per_channel=0.5),
iaa.AddToHueAndSaturation(value=(-20, 20), per_channel=True),
iaa.Multiply((0.75, 1.25)),
]
sequence += [
iaa.PiecewiseAffine(scale=(0.0005, 0.005)),
iaa.Affine(
rotate=(-rotate, rotate), shear=(-shear, shear), mode='symmetric'
),
iaa.Grayscale(alpha=(0.0, 0.5)),
]
if flip:
sequence += [
iaa.Fliplr(0.5),
]
if blur:
sequence += [
iaa.Sometimes(0.01, iaa.GaussianBlur(sigma=(0, 1.0))),
]
self.aug = iaa.Sequential(sequence)
class ValidAugmentations(Augmentations):
def __init__(self, **kwargs):
from imgaug import augmenters as iaa
self.aug = iaa.Sequential([iaa.Scale((INPUT_SIZE, INPUT_SIZE))])
AUGMENTATION = {
'train': TrainAugmentations,
'val': ValidAugmentations,
'test': ValidAugmentations,
}
except ImportError:
AUGMENTATION = {}
logger.info(
'WARNING Failed to import imgaug. '
'install with pip install git+https://github.com/aleju/imgaug'
)
if ut.SUPER_STRICT:
raise
def _init_transforms(**kwargs):
TRANSFORMS = {
phase: torchvision.transforms.Compose(
[
AUGMENTATION[phase](**kwargs),
torchvision.transforms.Lambda(PIL.Image.fromarray),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
),
]
)
for phase in AUGMENTATION.keys()
}
return TRANSFORMS
class ImageFilePathList(torch.utils.data.Dataset):
def __init__(self, filepaths, targets=None, transform=None, target_transform=None):
from torchvision.datasets.folder import default_loader
self.targets = targets is not None
args = (filepaths, targets) if self.targets else (filepaths,)
self.samples = list(zip(*args))
if self.targets:
self.classes = sorted(set(ut.take_column(self.targets, 1)))
self.class_to_idx = {self.classes[i]: i for i in range(len(self.classes))}
else:
self.classes, self.class_to_idx = None, None
self.loader = default_loader
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
sample = self.samples[index]
if self.targets:
path, target = sample
else:
path = sample[0]
target = None
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
result = (sample, target) if self.targets else (sample,)
return result
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of samples: {}\n'.format(self.__len__())
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(
tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))
)
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(
tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))
)
return fmt_str
class StratifiedSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, phase, multiplier=1.0):
self.dataset = dataset
self.phase = phase
self.training = self.phase == 'train'
self.labels = np.array(ut.take_column(dataset.samples, 1))
self.classes = set(self.labels)
self.indices = {
cls: list(np.where(self.labels == cls)[0]) for cls in self.classes
}
self.counts = {cls: len(self.indices[cls]) for cls in self.classes}
self.min = min(self.counts.values())
self.min = int(np.around(multiplier * self.min))
if self.training:
self.total = 0
for cls in self.indices:
num_in_class = len(self.indices[cls])
num_samples = min(self.min, num_in_class)
self.total += num_samples
else:
self.total = len(self.labels)
args = (
self.phase,
len(self.labels),
len(self.classes),
self.min,
self.total,
multiplier,
)
logger.info(
'Initialized Sampler for %r (sampling %d for %d classes | min %d per class, %d total, %0.02f multiplier)'
% args
)
def __iter__(self):
if self.training:
ret_list = []
for cls in self.indices:
num_in_class = len(self.indices[cls])
num_samples = min(self.min, num_in_class)
ret_list += random.sample(self.indices[cls], num_samples)
random.shuffle(ret_list)
else:
ret_list = range(self.total)
assert len(ret_list) == self.total
return iter(ret_list)
def __len__(self):
return self.total
def finetune(model, dataloaders, criterion, optimizer, scheduler, device, num_epochs=128):
phases = ['train', 'val']
start = time.time()
best_accuracy = 0.0
best_model_state = copy.deepcopy(model.state_dict())
last_loss = {}
best_loss = {}
for epoch in range(num_epochs):
start_batch = time.time()
lr = optimizer.param_groups[0]['lr']
logger.info('Epoch {}/{} (lr = {:0.06f})'.format(epoch, num_epochs - 1, lr))
logger.info('-' * 10)
# Each epoch has a training and validation phase
for phase in phases:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
seen = 0
for inputs, labels in tqdm.tqdm(dataloaders[phase], desc=phase):
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
seen += len(inputs)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / seen
epoch_acc = running_corrects.double() / seen
last_loss[phase] = epoch_loss
if phase not in best_loss:
best_loss[phase] = np.inf
flag = epoch_loss < best_loss[phase]
if flag:
best_loss[phase] = epoch_loss
logger.info(
'{:<5} Loss: {:.4f} Acc: {:.4f} {}'.format(
phase, epoch_loss, epoch_acc, '!' if flag else ''
)
)
# deep copy the model
if phase == 'val' and epoch_acc > best_accuracy:
best_accuracy = epoch_acc
logger.info('\tFound better model!')
best_model_state = copy.deepcopy(model.state_dict())
if phase == 'val':
scheduler.step(epoch_loss)
time_elapsed_batch = time.time() - start_batch
logger.info(
'time: {:.0f}m {:.0f}s'.format(
time_elapsed_batch // 60, time_elapsed_batch % 60
)
)
ratio = last_loss['train'] / last_loss['val']
logger.info('ratio: {:.04f}'.format(ratio))
logger.info('\n')
time_elapsed = time.time() - start
logger.info(
'Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60
)
)
logger.info('Best val Acc: {:4f}'.format(best_accuracy))
# load best model weights
model.load_state_dict(best_model_state)
return model
def visualize_augmentations(dataset, augmentation, tag, num_per_class=10, **kwargs):
import matplotlib.pyplot as plt
samples = dataset.samples
flags = np.array(ut.take_column(samples, 1))
logger.info('Dataset %r has %d samples' % (tag, len(flags)))
indices = []
for flag in set(flags):
index_list = list(np.where(flags == flag)[0])
random.shuffle(index_list)
indices += index_list[:num_per_class]
samples = ut.take(samples, indices)
paths = ut.take_column(samples, 0)
flags = ut.take_column(samples, 1)
images = [np.array(cv2.imread(path)) for path in paths]
images = [image[:, :, ::-1] for image in images]
images_ = []
for image, flag in zip(images, flags):
image_ = image.copy()
color = (0, 255, 0) if flag else (255, 0, 0)
cv2.rectangle(image_, (1, 1), (INPUT_SIZE - 1, INPUT_SIZE - 1), color, 3)
images_.append(image_)
canvas = np.hstack(images_)
canvas_list = [canvas]
augment = augmentation(**kwargs)
for index in range(len(indices) - 1):
logger.info(index)
images_ = [augment(image.copy()) for image in images]
canvas = np.hstack(images_)
canvas_list.append(canvas)
canvas = np.vstack(canvas_list)
canvas_filepath = expanduser(
join('~', 'Desktop', 'densenet-augmentation-%s.png' % (tag,))
)
plt.imsave(canvas_filepath, canvas)
def train(
data_path,
output_path,
batch_size=48,
class_weights={},
multi=PARALLEL,
sample_multiplier=4.0,
allow_missing_validation_classes=False,
**kwargs,
):
# Detect if we have a GPU available
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
using_gpu = str(device) != 'cpu'
phases = ['train', 'val']
logger.info('Initializing Datasets and Dataloaders...')
# Create training and validation datasets
transforms = _init_transforms(**kwargs)
datasets = {
phase: torchvision.datasets.ImageFolder(
os.path.join(data_path, phase), transforms[phase]
)
for phase in phases
}
# Create training and validation dataloaders
dataloaders = {
phase: torch.utils.data.DataLoader(
datasets[phase],
sampler=StratifiedSampler(
datasets[phase], phase, multiplier=sample_multiplier
),
batch_size=batch_size,
num_workers=batch_size // 8,
pin_memory=using_gpu,
)
for phase in phases
}
train_classes = datasets['train'].classes
val_classes = datasets['val'].classes
if not allow_missing_validation_classes:
assert len(train_classes) == len(val_classes)
num_classes = len(train_classes)
logger.info('Initializing Model...')
# Initialize the model for this run
model = torchvision.models.densenet201(pretrained=True)
num_ftrs = model.classifier.in_features
model.classifier = nn.Linear(num_ftrs, num_classes)
# Send the model | |
+ m.x486 + m.x516 + m.x546 + m.x576 + m.x606
+ m.x636 + m.x666 + m.x696 + m.x726 == 1)
m.c758 = Constraint(expr= m.x7 + m.x37 + m.x67 + m.x97 + m.x127 + m.x157 + m.x187 + m.x217 + m.x247 + m.x277 + m.x307
+ m.x337 + m.x367 + m.x397 + m.x427 + m.x457 + m.x487 + m.x517 + m.x547 + m.x577 + m.x607
+ m.x637 + m.x667 + m.x697 + m.x727 == 1)
m.c759 = Constraint(expr= m.x8 + m.x38 + m.x68 + m.x98 + m.x128 + m.x158 + m.x188 + m.x218 + m.x248 + m.x278 + m.x308
+ m.x338 + m.x368 + m.x398 + m.x428 + m.x458 + m.x488 + m.x518 + m.x548 + m.x578 + m.x608
+ m.x638 + m.x668 + m.x698 + m.x728 == 1)
m.c760 = Constraint(expr= m.x9 + m.x39 + m.x69 + m.x99 + m.x129 + m.x159 + m.x189 + m.x219 + m.x249 + m.x279 + m.x309
+ m.x339 + m.x369 + m.x399 + m.x429 + m.x459 + m.x489 + m.x519 + m.x549 + m.x579 + m.x609
+ m.x639 + m.x669 + m.x699 + m.x729 == 1)
m.c761 = Constraint(expr= m.x10 + m.x40 + m.x70 + m.x100 + m.x130 + m.x160 + m.x190 + m.x220 + m.x250 + m.x280
+ m.x310 + m.x340 + m.x370 + m.x400 + m.x430 + m.x460 + m.x490 + m.x520 + m.x550 + m.x580
+ m.x610 + m.x640 + m.x670 + m.x700 + m.x730 == 1)
m.c762 = Constraint(expr= m.x11 + m.x41 + m.x71 + m.x101 + m.x131 + m.x161 + m.x191 + m.x221 + m.x251 + m.x281
+ m.x311 + m.x341 + m.x371 + m.x401 + m.x431 + m.x461 + m.x491 + m.x521 + m.x551 + m.x581
+ m.x611 + m.x641 + m.x671 + m.x701 + m.x731 == 1)
m.c763 = Constraint(expr= m.x12 + m.x42 + m.x72 + m.x102 + m.x132 + m.x162 + m.x192 + m.x222 + m.x252 + m.x282
+ m.x312 + m.x342 + m.x372 + m.x402 + m.x432 + m.x462 + m.x492 + m.x522 + m.x552 + m.x582
+ m.x612 + m.x642 + m.x672 + m.x702 + m.x732 == 1)
m.c764 = Constraint(expr= m.x13 + m.x43 + m.x73 + m.x103 + m.x133 + m.x163 + m.x193 + m.x223 + m.x253 + m.x283
+ m.x313 + m.x343 + m.x373 + m.x403 + m.x433 + m.x463 + m.x493 + m.x523 + m.x553 + m.x583
+ m.x613 + m.x643 + m.x673 + m.x703 + m.x733 == 1)
m.c765 = Constraint(expr= m.x14 + m.x44 + m.x74 + m.x104 + m.x134 + m.x164 + m.x194 + m.x224 + m.x254 + m.x284
+ m.x314 + m.x344 + m.x374 + m.x404 + m.x434 + m.x464 + m.x494 + m.x524 + m.x554 + m.x584
+ m.x614 + m.x644 + m.x674 + m.x704 + m.x734 == 1)
m.c766 = Constraint(expr= m.x15 + m.x45 + m.x75 + m.x105 + m.x135 + m.x165 + m.x195 + m.x225 + m.x255 + m.x285
+ m.x315 + m.x345 + m.x375 + m.x405 + m.x435 + m.x465 + m.x495 + m.x525 + m.x555 + m.x585
+ m.x615 + m.x645 + m.x675 + m.x705 + m.x735 == 1)
m.c767 = Constraint(expr= m.x16 + m.x46 + m.x76 + m.x106 + m.x136 + m.x166 + m.x196 + m.x226 + m.x256 + m.x286
+ m.x316 + m.x346 + m.x376 + m.x406 + m.x436 + m.x466 + m.x496 + m.x526 + m.x556 + m.x586
+ m.x616 + m.x646 + m.x676 + m.x706 + m.x736 == 1)
m.c768 = Constraint(expr= m.x17 + m.x47 + m.x77 + m.x107 + m.x137 + m.x167 + m.x197 + m.x227 + m.x257 + m.x287
+ m.x317 + m.x347 + m.x377 + m.x407 + m.x437 + m.x467 + m.x497 + m.x527 + m.x557 + m.x587
+ m.x617 + m.x647 + m.x677 + m.x707 + m.x737 == 1)
m.c769 = Constraint(expr= m.x18 + m.x48 + m.x78 + m.x108 + m.x138 + m.x168 + m.x198 + m.x228 + m.x258 + m.x288
+ m.x318 + m.x348 + m.x378 + m.x408 + m.x438 + m.x468 + m.x498 + m.x528 + m.x558 + m.x588
+ m.x618 + m.x648 + m.x678 + m.x708 + m.x738 == 1)
m.c770 = Constraint(expr= m.x19 + m.x49 + m.x79 + m.x109 + m.x139 + m.x169 + m.x199 + m.x229 + m.x259 + m.x289
+ m.x319 + m.x349 + m.x379 + m.x409 + m.x439 + m.x469 + m.x499 + m.x529 + m.x559 + m.x589
+ m.x619 + m.x649 + m.x679 + m.x709 + m.x739 == 1)
m.c771 = Constraint(expr= m.x20 + m.x50 + m.x80 + m.x110 + m.x140 + m.x170 + m.x200 + m.x230 + m.x260 + m.x290
+ m.x320 + m.x350 + m.x380 + m.x410 + m.x440 + m.x470 + m.x500 + m.x530 + m.x560 + m.x590
+ m.x620 + m.x650 + m.x680 + m.x710 + m.x740 == 1)
m.c772 = Constraint(expr= m.x21 + m.x51 + m.x81 + m.x111 + m.x141 + m.x171 + m.x201 + m.x231 + m.x261 + m.x291
+ m.x321 + m.x351 + m.x381 + m.x411 + m.x441 + m.x471 + m.x501 + m.x531 + m.x561 + m.x591
+ m.x621 + m.x651 + m.x681 + m.x711 + m.x741 == 1)
m.c773 = Constraint(expr= m.x22 + m.x52 + m.x82 + m.x112 + m.x142 + m.x172 + m.x202 + m.x232 + m.x262 + m.x292
+ m.x322 + m.x352 + m.x382 + m.x412 + m.x442 + m.x472 + m.x502 + m.x532 + m.x562 + m.x592
+ m.x622 + m.x652 + m.x682 + m.x712 + m.x742 == 1)
m.c774 = Constraint(expr= m.x23 + m.x53 + m.x83 + m.x113 + m.x143 + m.x173 + m.x203 + m.x233 + m.x263 + m.x293
+ m.x323 + m.x353 + m.x383 + m.x413 + m.x443 + m.x473 + m.x503 + m.x533 + m.x563 + m.x593
+ m.x623 + m.x653 + m.x683 + m.x713 + m.x743 == 1)
m.c775 = Constraint(expr= m.x24 + m.x54 + m.x84 + m.x114 + m.x144 + m.x174 + m.x204 + m.x234 + m.x264 + m.x294
+ m.x324 + m.x354 + m.x384 + m.x414 + m.x444 + m.x474 + m.x504 + m.x534 + m.x564 + m.x594
+ m.x624 + m.x654 + m.x684 + m.x714 + m.x744 == 1)
m.c776 = Constraint(expr= m.x25 + m.x55 + m.x85 + m.x115 + m.x145 + m.x175 + m.x205 + m.x235 + m.x265 + m.x295
+ m.x325 + m.x355 + m.x385 + m.x415 + m.x445 + m.x475 + m.x505 + m.x535 + m.x565 + m.x595
+ m.x625 + m.x655 + m.x685 + m.x715 + m.x745 == 1)
m.c777 = Constraint(expr= m.x26 + m.x56 + m.x86 + m.x116 + m.x146 + m.x176 + m.x206 + m.x236 + m.x266 + m.x296
+ m.x326 + m.x356 + m.x386 + m.x416 + m.x446 + m.x476 + m.x506 + m.x536 + m.x566 + m.x596
+ m.x626 + m.x656 + m.x686 + m.x716 + m.x746 == 1)
m.c778 = Constraint(expr= m.x27 + m.x57 + m.x87 + m.x117 + m.x147 + m.x177 + m.x207 + m.x237 + m.x267 + m.x297
+ m.x327 + m.x357 + m.x387 + m.x417 + m.x447 + m.x477 + m.x507 + m.x537 + m.x567 + m.x597
+ m.x627 + m.x657 + m.x687 + m.x717 + m.x747 == 1)
m.c779 = Constraint(expr= m.x28 + m.x58 + m.x88 + m.x118 + m.x148 + m.x178 + m.x208 + m.x238 + m.x268 + m.x298
+ m.x328 + m.x358 + m.x388 + m.x418 + m.x448 + m.x478 + m.x508 + m.x538 + m.x568 + m.x598
+ m.x628 + m.x658 + m.x688 + m.x718 + m.x748 == 1)
m.c780 = Constraint(expr= m.x29 + m.x59 + m.x89 + m.x119 + m.x149 + m.x179 + m.x209 + m.x239 + m.x269 + m.x299
+ m.x329 + m.x359 + m.x389 + m.x419 + m.x449 + m.x479 + m.x509 + m.x539 + m.x569 + m.x599
+ m.x629 + m.x659 + m.x689 + m.x719 + m.x749 == 1)
m.c781 = Constraint(expr= m.x30 + m.x60 + m.x90 + m.x120 + m.x150 + m.x180 + m.x210 + m.x240 + m.x270 + m.x300
+ m.x330 + m.x360 + m.x390 + m.x420 + m.x450 + m.x480 + m.x510 + m.x540 + m.x570 + m.x600
| |
"""As an open source project, we collect usage statistics to inform development priorities.
For more information, check out the docs at https://docs.dagster.io/install/telemetry/'
To see the logs we send, inspect $DAGSTER_HOME/logs/ if $DAGSTER_HOME is set or ~/.dagster/logs/
See class TelemetryEntry for logged fields.
For local development:
Spin up local telemetry server and set DAGSTER_TELEMETRY_URL = 'http://localhost:3000/actions'
To test RotatingFileHandler, can set MAX_BYTES = 500
"""
import datetime
import hashlib
import json
import logging
import os
import sys
import uuid
import zlib
from collections import namedtuple
from functools import wraps
from logging.handlers import RotatingFileHandler
import click
import requests
import six
import yaml
from dagster import check
from dagster.core.definitions.executable import ExecutablePipeline
from dagster.core.definitions.reconstructable import (
ReconstructablePipeline,
ReconstructableRepository,
get_ephemeral_repository_name,
)
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.instance import DagsterInstance
TELEMETRY_STR = ".telemetry"
INSTANCE_ID_STR = "instance_id"
ENABLED_STR = "enabled"
DAGSTER_HOME_FALLBACK = "~/.dagster"
DAGSTER_TELEMETRY_URL = "http://telemetry.dagster.io/actions"
MAX_BYTES = 10485760 # 10 MB = 10 * 1024 * 1024 bytes
UPDATE_REPO_STATS = "update_repo_stats"
START_DAGIT_WEBSERVER = "start_dagit_webserver"
TELEMETRY_VERSION = "0.2"
# When adding to TELEMETRY_WHITELISTED_FUNCTIONS, please also update the literalinclude in
# docs/next/src/pages/install/index.mdx
TELEMETRY_WHITELISTED_FUNCTIONS = {
"execute_execute_command",
"_logged_execute_pipeline",
"execute_launch_command",
}
def telemetry_wrapper(f):
"""
Wrapper around functions that are logged. Will log the function_name, client_time, and
elapsed_time, and success.
Wrapped function must be in the list of whitelisted function, and must have a DagsterInstance
parameter named 'instance' in the signature.
"""
if f.__name__ not in TELEMETRY_WHITELISTED_FUNCTIONS:
raise DagsterInvariantViolationError(
"Attempted to log telemetry for function {name} that is not in telemetry whitelisted "
"functions list: {whitelist}.".format(
name=f.__name__, whitelist=TELEMETRY_WHITELISTED_FUNCTIONS
)
)
var_names = f.__code__.co_varnames
try:
instance_index = var_names.index("instance")
except ValueError:
raise DagsterInvariantViolationError(
"Attempted to log telemetry for function {name} that does not take a DagsterInstance "
"in a parameter called 'instance'"
)
@wraps(f)
def wrap(*args, **kwargs):
instance = _check_telemetry_instance_param(args, kwargs, instance_index)
start_time = datetime.datetime.now()
log_action(instance=instance, action=f.__name__ + "_started", client_time=start_time)
result = f(*args, **kwargs)
end_time = datetime.datetime.now()
log_action(
instance=instance,
action=f.__name__ + "_ended",
client_time=end_time,
elapsed_time=end_time - start_time,
metadata={"success": getattr(result, "success", None)},
)
return result
return wrap
def get_python_version():
version = sys.version_info
return "{}.{}.{}".format(version.major, version.minor, version.micro)
class TelemetryEntry(
namedtuple(
"TelemetryEntry",
"action client_time elapsed_time event_id instance_id pipeline_name_hash "
"num_pipelines_in_repo repo_hash python_version metadata version",
)
):
"""
Schema for telemetry logs.
Currently, log entries are coerced to the same schema to enable storing all entries in one DB
table with unified schema.
action - Name of function called i.e. `execute_pipeline_started` (see: fn telemetry_wrapper)
client_time - Client time
elapsed_time - Time elapsed between start of function and end of function call
event_id - Unique id for the event
instance_id - Unique id for dagster instance
pipeline_name_hash - Hash of pipeline name, if any
python_version - Python version
repo_hash - Hash of repo name, if any
num_pipelines_in_repo - Number of pipelines in repo, if any
metadata - More information i.e. pipeline success (boolean)
version - Schema version
If $DAGSTER_HOME is set, then use $DAGSTER_HOME/logs/
Otherwise, use ~/.dagster/logs/
"""
def __new__(
cls,
action,
client_time,
event_id,
instance_id,
elapsed_time=None,
pipeline_name_hash=None,
num_pipelines_in_repo=None,
repo_hash=None,
metadata=None,
):
action = check.str_param(action, "action")
client_time = check.str_param(client_time, "action")
elapsed_time = check.opt_str_param(elapsed_time, "elapsed_time", "")
event_id = check.str_param(event_id, "event_id")
instance_id = check.str_param(instance_id, "instance_id")
metadata = check.opt_dict_param(metadata, "metadata")
if action == UPDATE_REPO_STATS:
pipeline_name_hash = check.str_param(pipeline_name_hash, "pipeline_name_hash")
num_pipelines_in_repo = check.str_param(num_pipelines_in_repo, "num_pipelines_in_repo")
repo_hash = check.str_param(repo_hash, "repo_hash")
else:
pipeline_name_hash = ""
num_pipelines_in_repo = ""
repo_hash = ""
return super(TelemetryEntry, cls).__new__(
cls,
action=action,
client_time=client_time,
elapsed_time=elapsed_time,
event_id=event_id,
instance_id=instance_id,
pipeline_name_hash=pipeline_name_hash,
num_pipelines_in_repo=num_pipelines_in_repo,
repo_hash=repo_hash,
python_version=get_python_version(),
metadata=metadata,
version=TELEMETRY_VERSION,
)
def _dagster_home_if_set():
dagster_home_path = os.getenv("DAGSTER_HOME")
if not dagster_home_path:
return None
return os.path.expanduser(dagster_home_path)
def get_dir_from_dagster_home(target_dir):
"""
If $DAGSTER_HOME is set, return $DAGSTER_HOME/<target_dir>/
Otherwise, return ~/.dagster/<target_dir>/
The 'logs' directory is used to cache logs before upload
The '.logs_queue' directory is used to temporarily store logs during upload. This is to prevent
dropping events or double-sending events that occur during the upload process.
The '.telemetry' directory is used to store the instance id.
"""
dagster_home_path = _dagster_home_if_set()
if dagster_home_path is None:
dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK)
dagster_home_logs_path = os.path.join(dagster_home_path, target_dir)
if not os.path.exists(dagster_home_logs_path):
os.makedirs(dagster_home_logs_path)
return dagster_home_logs_path
def get_log_queue_dir():
"""
Get the directory where we store log queue files, creating the directory if needed.
The log queue directory is used to temporarily store logs during upload. This is to prevent
dropping events or double-sending events that occur during the upload process.
If $DAGSTER_HOME is set, return $DAGSTER_HOME/.logs_queue/
Otherwise, return ~/.dagster/.logs_queue/
"""
dagster_home_path = _dagster_home_if_set()
if dagster_home_path is None:
dagster_home_path = os.path.expanduser(DAGSTER_HOME_FALLBACK)
dagster_home_logs_queue_path = dagster_home_path + "/.logs_queue/"
if not os.path.exists(dagster_home_logs_queue_path):
os.makedirs(dagster_home_logs_queue_path)
return dagster_home_logs_queue_path
def _check_telemetry_instance_param(args, kwargs, instance_index):
if "instance" in kwargs:
return check.inst_param(
kwargs["instance"],
"instance",
DagsterInstance,
"'instance' parameter passed as keyword argument must be a DagsterInstance",
)
else:
check.invariant(len(args) > instance_index)
return check.inst_param(
args[instance_index],
"instance",
DagsterInstance,
"'instance' argument at position {position} must be a DagsterInstance".format(
position=instance_index
),
)
def _get_telemetry_logger():
logger = logging.getLogger("dagster_telemetry_logger")
if len(logger.handlers) == 0:
handler = RotatingFileHandler(
os.path.join(get_dir_from_dagster_home("logs"), "event.log"),
maxBytes=MAX_BYTES,
backupCount=10,
)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
def write_telemetry_log_line(log_line):
logger = _get_telemetry_logger()
logger.info(json.dumps(log_line))
def _get_instance_telemetry_info(instance):
check.inst_param(instance, "instance", DagsterInstance)
dagster_telemetry_enabled = _get_instance_telemetry_enabled(instance)
instance_id = None
if dagster_telemetry_enabled:
instance_id = _get_or_set_instance_id()
return (dagster_telemetry_enabled, instance_id)
def _get_instance_telemetry_enabled(instance):
return instance.telemetry_enabled
def _get_or_set_instance_id():
instance_id = _get_telemetry_instance_id()
if instance_id == None:
instance_id = _set_telemetry_instance_id()
return instance_id
# Gets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _get_telemetry_instance_id():
telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), "id.yaml")
if not os.path.exists(telemetry_id_path):
return
with open(telemetry_id_path, "r") as telemetry_id_file:
telemetry_id_yaml = yaml.safe_load(telemetry_id_file)
if INSTANCE_ID_STR in telemetry_id_yaml and isinstance(
telemetry_id_yaml[INSTANCE_ID_STR], six.string_types
):
return telemetry_id_yaml[INSTANCE_ID_STR]
return None
# Sets the instance_id at $DAGSTER_HOME/.telemetry/id.yaml
def _set_telemetry_instance_id():
click.secho(TELEMETRY_TEXT)
click.secho(SLACK_PROMPT)
telemetry_id_path = os.path.join(get_dir_from_dagster_home(TELEMETRY_STR), "id.yaml")
instance_id = str(uuid.uuid4())
try: # In case we encounter an error while writing to user's file system
with open(telemetry_id_path, "w") as telemetry_id_file:
yaml.dump({INSTANCE_ID_STR: instance_id}, telemetry_id_file, default_flow_style=False)
return instance_id
except Exception: # pylint: disable=broad-except
return "<<unable_to_write_instance_id>>"
def hash_name(name):
return hashlib.sha256(name.encode("utf-8")).hexdigest()
def log_external_repo_stats(instance, source, external_repo, external_pipeline=None):
from dagster.core.host_representation.external import (
ExternalPipeline,
ExternalRepository,
)
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(source, "source")
check.inst_param(external_repo, "external_repo", ExternalRepository)
check.opt_inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
if _get_instance_telemetry_enabled(instance):
instance_id = _get_or_set_instance_id()
pipeline_name_hash = hash_name(external_pipeline.name) if external_pipeline else ""
repo_hash = hash_name(external_repo.name)
num_pipelines_in_repo = len(external_repo.get_all_external_pipelines())
write_telemetry_log_line(
TelemetryEntry(
action=UPDATE_REPO_STATS,
client_time=str(datetime.datetime.now()),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
pipeline_name_hash=pipeline_name_hash,
num_pipelines_in_repo=str(num_pipelines_in_repo),
repo_hash=repo_hash,
metadata={"source": source},
)._asdict()
)
def log_repo_stats(instance, source, pipeline=None, repo=None):
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(source, "source")
check.opt_inst_param(pipeline, "pipeline", ExecutablePipeline)
check.opt_inst_param(repo, "repo", ReconstructableRepository)
if _get_instance_telemetry_enabled(instance):
instance_id = _get_or_set_instance_id()
if isinstance(pipeline, ReconstructablePipeline):
pipeline_name_hash = hash_name(pipeline.get_definition().name)
repository = pipeline.get_reconstructable_repository().get_definition()
repo_hash = hash_name(repository.name)
num_pipelines_in_repo = len(repository.pipeline_names)
elif isinstance(repo, ReconstructableRepository):
pipeline_name_hash = ""
repository = repo.get_definition()
repo_hash = hash_name(repository.name)
num_pipelines_in_repo = len(repository.pipeline_names)
else:
pipeline_name_hash = hash_name(pipeline.get_definition().name)
repo_hash = hash_name(get_ephemeral_repository_name(pipeline.get_definition().name))
num_pipelines_in_repo = 1
write_telemetry_log_line(
TelemetryEntry(
action=UPDATE_REPO_STATS,
client_time=str(datetime.datetime.now()),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
pipeline_name_hash=pipeline_name_hash,
num_pipelines_in_repo=str(num_pipelines_in_repo),
repo_hash=repo_hash,
metadata={"source": source},
)._asdict()
)
def log_action(instance, action, client_time=None, elapsed_time=None, metadata=None):
check.inst_param(instance, "instance", DagsterInstance)
if client_time is None:
client_time = datetime.datetime.now()
(dagster_telemetry_enabled, instance_id) = _get_instance_telemetry_info(instance)
if dagster_telemetry_enabled:
# Log general statistics
write_telemetry_log_line(
TelemetryEntry(
action=action,
client_time=str(client_time),
elapsed_time=str(elapsed_time),
event_id=str(uuid.uuid4()),
instance_id=instance_id,
metadata=metadata,
)._asdict()
)
TELEMETRY_TEXT = """
%(telemetry)s
As an open source project, we collect usage statistics to inform development priorities. For more
information, read https://docs.dagster.io/install/telemetry.
We will not see or store solid definitions, pipeline definitions, modes, resources, context, or
any data that is processed within solids and pipelines.
To opt-out, add the following to $DAGSTER_HOME/dagster.yaml, creating that file if necessary:
telemetry:
enabled: false
""" % {
"telemetry": click.style("Telemetry:", fg="blue", bold=True)
}
SLACK_PROMPT = """
%(welcome)s
If you have any questions or would like to engage with the Dagster team, please join us on Slack
(https://bit.ly/39dvSsF).
""" % {
"welcome": click.style("Welcome to Dagster!", bold=True)
}
def upload_logs(stop_event):
"""Upload logs to telemetry server every hour, or when log directory size is > 10MB"""
try:
last_run = datetime.datetime.now() - datetime.timedelta(minutes=120)
dagster_log_dir = get_dir_from_dagster_home("logs")
dagster_log_queue_dir = get_dir_from_dagster_home(".logs_queue")
in_progress = False
while not stop_event.is_set():
log_size = 0
if os.path.isdir(dagster_log_dir):
log_size = sum(
os.path.getsize(os.path.join(dagster_log_dir, f))
for f in os.listdir(dagster_log_dir)
if os.path.isfile(os.path.join(dagster_log_dir, f))
)
log_queue_size = 0
if os.path.isdir(dagster_log_queue_dir):
log_queue_size = sum(
os.path.getsize(os.path.join(dagster_log_queue_dir, f))
for f in os.listdir(dagster_log_queue_dir)
if os.path.isfile(os.path.join(dagster_log_queue_dir, f))
)
if log_size == 0 and log_queue_size == 0:
return
if not in_progress and (
datetime.datetime.now() - last_run > datetime.timedelta(minutes=60)
or log_size >= MAX_BYTES
or log_queue_size >= MAX_BYTES
):
in_progress = True # Prevent concurrent _upload_logs invocations
last_run = datetime.datetime.now()
dagster_log_dir = get_dir_from_dagster_home("logs")
dagster_log_queue_dir = get_dir_from_dagster_home(".logs_queue")
_upload_logs(dagster_log_dir, log_size, dagster_log_queue_dir)
in_progress = False
stop_event.wait(600) # Sleep for 10 minutes
except Exception: # pylint: disable=broad-except
pass
def _upload_logs(dagster_log_dir, log_size, dagster_log_queue_dir):
"""Send POST request to telemetry server with the contents of $DAGSTER_HOME/logs/ directory """
try:
if log_size > 0:
# Delete contents of dagster_log_queue_dir so that new logs can be copied over
| |
from Part import *
import math
from math import sqrt
from FreeCAD import Base
# Block dimension information:
# https://www.cailliau.org/en/Alphabetical/L/Lego/Dimensions/General%20Considerations/
# https://bricks.stackexchange.com/questions/288/what-are-the-dimensions-of-a-lego-brick
mm = 1.0 # base unit is mm
epsilon = 0.01*mm # small overlap for CAD purposes
infinity = 1000*mm # very large number for CAD clipping purposes
def block(number_of_x_knobs,
number_of_y_knobs,
number_of_vertical_units,
**kw):
solid = kw.get('solid', False)
has_knobs = kw.get('has_knobs', True)
has_knob_dimples = kw.get('has_knob_dimples', False)
has_under_tubes = kw.get('has_under_tubes', True)
has_under_tube_cavities = kw.get('has_under_tube_cavities', True)
has_ridges = kw.get('has_ridges', False)
has_struts = kw.get('has_struts', False)
has_vertical_holes = kw.get('has_vertical_holes', False)
has_horizontal_holes = kw.get('has_horizontal_holes', False)
has_keystone = kw.get('has_keystone', True)
keystone_angle = kw.get('keystone_angle', 0 ) # degrees
units = unit = kw.get('unit', 1.6 * mm )
horizontal_pitch = kw.get('horizontal_pitch', 5.0 * units)
vertical_pitch = kw.get('vertical_pitch', 6.0 * units)
horizontal_play = kw.get('horizontal_play', 0.1 * mm )
bottom_play = kw.get('bottom_play', 0.0 * mm )
notional_wall_thickness = kw.get('notional_wall_thickness', 1.0 * unit )
if has_ridges:
wall_thickness = kw.get('wall_thickness', 1.2 * mm )
else:
wall_thickness = kw.get('wall_thickness', 1.5 * mm )
ridge_length = kw.get('ridge_length', 0.6 * mm )
top_thickness = kw.get('top_thickness', 1.1 * mm )
knob_height = kw.get('knob_height', 1.0 * unit + 0.2 * mm )
knob_diameter = kw.get('knob_diameter', 3.0 * units + 0.1 * mm )
knob_dimple_height = kw.get('knob_dimple_height',
knob_height - 0.3 * mm )
knob_dimple_diameter = kw.get('knob_dimple_diameter',
1.5 * units + 0.2 * mm )
under_tube_outer_diameter = kw.get('under_tube_outer_diameter',
(5*sqrt(2)-3)*units - 0.1 * mm )
under_tube_inner_diameter = kw.get('under_tube_inner_diameter',
3.0 * units + 0.1 * mm )
strut_gap = kw.get('strut_gap', 2.3 * mm )
strut_width = kw.get('strut_width', 1.0 * mm )
strut_interval = kw.get('strut_interval', 2 )
strut_sides = kw.get('strut_sides', 2 )
horizontal_hole_interval = kw.get('horizontal_hole_interval', 1 )
horizontal_hole_offset = kw.get('horizontal_hole_offset', horizontal_pitch)
horizontal_hole_diameter = kw.get('horizontal_hole_diameter',
3.0 * units + 0.1 * mm ) # knob diameter
horizontal_hole_counterbore_diameter = kw.get('horizontal_hole_counterbore_diameter',
3.75 * units + 0.2 * mm )
notional_horizontal_hole_counterbore_depth = kw.get('notional_horizontal_hole_counterbore_depth',
0.9 * mm)
horizontal_tube_wall_thickness = kw.get('horizontal_tube_wall_thickness',
wall_thickness - 0.2)
horizontal_tube_outer_diameter = kw.get('horizontal_tube_outer_diameter',
horizontal_hole_diameter + 2*horizontal_tube_wall_thickness)
horizontal_hole_vertical_offset = kw.get('horizontal_hole_vertical_offset',
3.5 * units + 0.2 * mm )
notional_width = number_of_x_knobs * horizontal_pitch
notional_depth = number_of_y_knobs * horizontal_pitch
notional_height = number_of_vertical_units * vertical_pitch
actual_width = notional_width - 2.0*horizontal_play
actual_depth = notional_depth - 2.0*horizontal_play
actual_height = notional_height
if has_ridges:
ridge_width = notional_wall_thickness - horizontal_play - wall_thickness
number_of_x_under_tubes = number_of_x_knobs - 1
number_of_y_under_tubes = number_of_y_knobs - 1
number_of_struts = int(math.floor((number_of_y_knobs - 1)/strut_interval))
number_of_horizontal_holes = int(math.floor((number_of_y_knobs)/horizontal_hole_interval))
print('a',number_of_horizontal_holes,horizontal_hole_offset,number_of_y_knobs)
if horizontal_hole_offset > (horizontal_pitch + 0.1):
number_of_horizontal_holes -= 1
# Box of basic dimensions
b = makeBox(notional_width, notional_depth, notional_height)
# Cut away inner box leaving the edges and top
if not solid:
cavity_offset = wall_thickness + horizontal_play
cavity = makeBox(
notional_width - 2*cavity_offset,
notional_depth - 2*cavity_offset,
notional_height - top_thickness + epsilon)
cavity.translate((
cavity_offset,
cavity_offset,
-epsilon))
b = b.cut(cavity)
# Add side ridges aligned with each knob
if has_ridges:
# Top and bottom ridges
y_bottom = horizontal_play + wall_thickness - epsilon
y_top = notional_depth - horizontal_play - wall_thickness - ridge_width
for xi in range(number_of_x_knobs):
x = (xi + 0.5)*horizontal_pitch - (ridge_length/2.0)
bottom_ridge = makeBox(ridge_length, ridge_width + epsilon,
notional_height - top_thickness + epsilon)
bottom_ridge.translate((x,y_bottom,0))
b = b.fuse(bottom_ridge)
top_ridge = makeBox(ridge_length, ridge_width + epsilon,
notional_height - top_thickness + epsilon)
top_ridge.translate((x,y_top,0))
b = b.fuse(top_ridge)
# Left and right ridges
x_left = horizontal_play + wall_thickness - epsilon
x_right = notional_width - horizontal_play - wall_thickness - ridge_width
for yi in range(number_of_y_knobs):
y = (yi + 0.5)*horizontal_pitch - (ridge_length/2.0)
left_ridge = makeBox(ridge_width + epsilon, ridge_length,
notional_height - top_thickness + epsilon)
left_ridge.translate((x_left,y,0))
b = b.fuse(left_ridge)
right_ridge = makeBox(ridge_width + epsilon, ridge_length,
notional_height - top_thickness + epsilon)
right_ridge.translate((x_right,y,0))
b = b.fuse(right_ridge)
# Add struts aligned with under-tube knob
if has_struts:
for yi in range(number_of_struts):
y = (yi + 1) * strut_interval * horizontal_pitch
if strut_sides == 1:
strut_length = notional_width/2.0-2.0*epsilon
else:
strut_length = notional_width-2.0*epsilon
strut = makeBox(strut_length, strut_width, notional_height - strut_gap - epsilon)
strut.translate((epsilon, y-strut_width/2.0, strut_gap))
b = b.fuse(strut)
# Make knobs
if has_knobs:
for xi in range(number_of_x_knobs):
x = (xi + 0.5)*horizontal_pitch
for yi in range(number_of_y_knobs):
y = (yi + 0.5)*horizontal_pitch
knob = makeCylinder(knob_diameter/2.0,knob_height + epsilon)
knob.translate((x, y, notional_height - epsilon))
b = b.fuse(knob)
if has_knob_dimples:
knob_dimple = makeCylinder(
knob_dimple_diameter/2.0, knob_dimple_height + epsilon)
knob_dimple.translate((x, y,
notional_height - top_thickness - epsilon))
b = b.cut(knob_dimple)
# Make under tubes
if has_under_tubes:
for xi in range(number_of_x_under_tubes):
x = (xi + 1.0)*horizontal_pitch
for yi in range(number_of_y_under_tubes):
y = (yi + 1.0)*horizontal_pitch
tube = makeCylinder(under_tube_outer_diameter/2.0,
notional_height - top_thickness + epsilon)
tube.translate((x, y, 0))
if has_vertical_holes:
tube_cavity_height = infinity
else:
tube_cavity_height = notional_height - top_thickness
tube_cavity = makeCylinder(under_tube_inner_diameter/2.0,
tube_cavity_height + epsilon)
tube_cavity.translate((x, y, -epsilon))
b = b.fuse(tube)
if has_under_tube_cavities:
b = b.cut(tube_cavity)
# Make horizontal tubes
if has_horizontal_holes:
x = epsilon
for i, yi in enumerate(range(number_of_horizontal_holes)):
y = horizontal_hole_offset + yi * horizontal_hole_interval * horizontal_pitch
# Make a solid tube
tube = makeCylinder(horizontal_tube_outer_diameter/2.0,
actual_width - 2*epsilon)
# Rotate the tube 90 degress about y-axis
tube.rotate(Base.Vector(0,0,0), Base.Vector(0,1,0), 90)
# Move the tube to the right place
tube.translate((x, y, horizontal_hole_vertical_offset))
# Add the tube to the block
b = b.fuse(tube)
# Create the tube cavity
tube_cavity = makeCylinder(horizontal_hole_diameter/2.0,
actual_width + 2*infinity)
tube_cavity.translate((0, 0, -infinity))
# Rotate the tube cavity 90 degress about y-axis
tube_cavity.rotate(Base.Vector(0,0,0), Base.Vector(0,1,0), 90)
# Move the tube cavity to the right place
tube_cavity.translate((x, y, horizontal_hole_vertical_offset))
# Cut the tube cavity out of the block
b = b.cut(tube_cavity)
# Create the closest counterbore cavity
counterbore_cavity = makeCylinder(horizontal_hole_counterbore_diameter/2.0,
notional_horizontal_hole_counterbore_depth + infinity)
counterbore_cavity.translate((0, 0, -infinity))
counterbore_cavity.rotate(Base.Vector(0,0,0), Base.Vector(0,1,0), 90)
counterbore_cavity.translate((x, y, horizontal_hole_vertical_offset))
b = b.cut(counterbore_cavity)
# Create the opposite counterbore cavity
counterbore_cavity = makeCylinder(horizontal_hole_counterbore_diameter/2.0,
notional_horizontal_hole_counterbore_depth)
counterbore_cavity.translate((0, 0, -notional_horizontal_hole_counterbore_depth))
counterbore_cavity.rotate(Base.Vector(0,0,0), Base.Vector(0,1,0), 90)
counterbore_cavity.translate((x + horizontal_pitch, y, horizontal_hole_vertical_offset))
b = b.cut(counterbore_cavity)
# Keystone cutout for printing
if has_keystone:
w = 2.0 * mm
h = 2.0 * mm
keystone = makeBox(actual_width, w, h)
keystone.translate((x, y - w/2.0, horizontal_hole_vertical_offset + horizontal_hole_counterbore_diameter/2.0 - h +0.1 ))
keystone.rotate(Base.Vector(horizontal_pitch/2.0,horizontal_pitch/2.0,vertical_pitch-horizontal_pitch/2.0), Base.Vector(1,0,0), keystone_angle)
b = b.cut(keystone)
# Clip everything outside off the play boundary
play_boundary = makeBox(
actual_width,
actual_depth,
infinity-bottom_play)
play_boundary.translate((
horizontal_play,
horizontal_play,
bottom_play))
b = b.common(play_boundary)
return b
def myblock(number_of_x_knobs,
number_of_y_knobs,
number_of_vertical_units,
loose=False,
**kw):
units = unit = kw.get('unit', 1.6 * mm )
kw['bottom_play'] = kw.get('bottom_play', 0.1 * mm )
if loose:
kw['knob_diameter'] = kw.get('knob_diameter', 3.0 * units + 0.05 * mm )
kw['under_tube_outer_diameter'] = kw.get('under_tube_outer_diameter',
(5*sqrt(2)-3)*units * mm )
else:
kw['knob_diameter'] = kw.get('knob_diameter', 3.0 * units + 0.15 * mm )
kw['under_tube_outer_diameter'] = kw.get('under_tube_outer_diameter',
(5*sqrt(2)-3)*units + 0.1 * mm )
return block(number_of_x_knobs,
number_of_y_knobs,
number_of_vertical_units,
**kw)
def vsplit(b, v):
boundary = makeBox(infinity*2, infinity*2, infinity)
boundary.translate((-infinity,-infinity,v))
top_b = b.common(boundary)
bottom_b = b.cut(boundary)
return top_b, bottom_b
def box_corner():
hunit = 5*1.6
vunit = 6*1.6
# middle layer
b = myblock(1,2,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False)
b2 = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False)
b2.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b2.translate((hunit,hunit,0))
b = b.fuse(b2)
b3 = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False)
b3.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b3.translate((hunit,hunit*2,0))
b = b.fuse(b3)
# top layer
b4 = myblock(1,2,1,has_under_tubes=False,has_horizontal_holes=False,bottom_play=0.0,horizontal_play=0.0,has_knobs=False,solid=True)
b4.translate((0,0,vunit))
b = b.fuse(b4)
b5 = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True)
b5.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b5.translate((hunit,hunit,vunit))
b = b.fuse(b5)
b6 = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True)
b6.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b6.translate((hunit,hunit*2,vunit))
b = b.fuse(b6)
# Slot #1 in the top layer
slot = makeBox(2.5,vunit*.7,vunit*.7)
slot.translate((1.5,0,vunit*(2-.7)))
b = b.cut(slot)
# Slot #2 in the top layer
slot = makeBox(2.5,vunit*.7,vunit*.7)
slot.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
slot.translate((0,2*hunit-1.5,vunit*(2-.7)))
#slot.translate((0,hunit*2-2.5-1.5,vunit*(2-.7)))
b = b.cut(slot)
# bottom layer
b7 = myblock(2,2,1,has_under_tubes=True,has_horizontal_holes=False,bottom_play=0.0,horizontal_play=0.0,solid=False,has_knobs=False,
top_thickness=1.1+2.5)
b7.translate((0,0,-vunit))
# Slot in the bottom layer
slot = makeBox(vunit*.7,hunit*2.0,2.5)
slot.translate((0,0,-2.5))
b7 = b7.cut(slot)
b = b.fuse(b7)
# Clip everything outside off the play boundary
play_boundary = makeBox(
hunit*2-2*0.1,
hunit*2-2*0.1,
vunit*3-0.1 + vunit) # extra vunit for knobs
play_boundary.translate((
0.1,
0.1,
0.1-vunit))
b = b.common(play_boundary)
return b
def box_side(length):
hunit = 5*1.6
vunit = 6*1.6
# middle layer
b = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_offset=0.5*hunit)
b.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b.translate((0,hunit,0))
ba = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_offset=0.5*hunit)
ba.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
ba.translate((0,hunit*length,0))
b = b.fuse(ba)
b2 = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_offset=0.5*hunit)
b2.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b2.translate((hunit,hunit,0))
b = b.fuse(b2)
b2a = myblock(1,1,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_offset=0.5*hunit)
b2a.rotate(Base.Vector(0,0,0), Base.Vector(0,0,1), -90)
b2a.translate((hunit,hunit*length,0))
b = b.fuse(b2a)
b3 = myblock(1,length-2,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_interval=2, horizontal_hole_offset=2.0*hunit)
b3.translate((0,hunit,0))
b = b.fuse(b3)
b3a = myblock(1,length-2,1,has_under_tubes=False,has_horizontal_holes=True,bottom_play=0.0,horizontal_play=0.0,solid=True,has_knobs=False,
horizontal_hole_interval=2)
b3a.translate((hunit,hunit,0))
b = b.fuse(b3a)
# top layer
b4 = myblock(1,length,1,has_under_tubes=False,has_horizontal_holes=False,bottom_play=0.0,horizontal_play=0.0,has_knobs=False,solid=True)
b4.translate((0,0,vunit))
b = b.fuse(b4)
| |
<reponame>bic2007/py-stellar-base
# coding: utf-8
import requests
from requests.adapters import HTTPAdapter, DEFAULT_POOLSIZE
from requests.exceptions import RequestException
from requests.compat import urljoin
from time import sleep
from urllib3.exceptions import NewConnectionError
from urllib3.util import Retry
from .version import __version__
from .asset import Asset
from .exceptions import HorizonError, HorizonRequestError
import logging
logger = logging.getLogger(__name__)
try:
from sseclient import SSEClient
except ImportError:
SSEClient = None
HORIZON_LIVE = "https://horizon.stellar.org"
HORIZON_TEST = "https://horizon-testnet.stellar.org"
DEFAULT_REQUEST_TIMEOUT = 11 # two ledgers + 1 sec, let's retry faster and not wait 60 secs.
DEFAULT_NUM_RETRIES = 3
DEFAULT_BACKOFF_FACTOR = 0.5
USER_AGENT = 'py-stellar-base-{}'.format(__version__)
class Horizon(object):
def __init__(self,
horizon_uri=None,
pool_size=DEFAULT_POOLSIZE,
num_retries=DEFAULT_NUM_RETRIES,
request_timeout=DEFAULT_REQUEST_TIMEOUT,
backoff_factor=DEFAULT_BACKOFF_FACTOR,
user_agent=USER_AGENT):
"""The :class:`Horizon` object, which represents the interface for
making requests to a Horizon server instance.
This class aims to be up to date with Horizon's API endpoints; however,
you can utilize the internal session via ``self.session`` (which is a
:class:`requests.Session` object) to make arbitrary requests to
a Horizon instance's API.
In general, on HTTP errors (non 2XX/3XX responses), no exception is
raised, and the return dictionary must be checked to see if it is an
error or a valid response. Any other errors however are raised by this
class.
:param str horizon_uri: The horizon base URL
:param int request_timeout: The timeout for all requests.
:param int pool_size: persistent connection to Horizon and connection pool
:param int num_retries: configurable request retry functionality
:param float backoff_factor: a backoff factor to apply between attempts after the second try
:param str user_agent: String representing the user-agent you want, such as "py-stellar-base"
"""
if horizon_uri is None:
self.horizon_uri = HORIZON_TEST
else:
self.horizon_uri = horizon_uri
self.pool_size = pool_size
self.num_retries = num_retries
self.request_timeout = request_timeout
self.backoff_factor = backoff_factor
# adding 504 to the tuple of statuses to retry
self.status_forcelist = tuple(Retry.RETRY_AFTER_STATUS_CODES) + (504,)
# configure standard session
# configure retry handler
retry = Retry(
total=self.num_retries,
backoff_factor=self.backoff_factor,
redirect=0,
status_forcelist=self.status_forcelist)
# init transport adapter
adapter = HTTPAdapter(
pool_connections=self.pool_size,
pool_maxsize=self.pool_size,
max_retries=retry)
# init session
session = requests.Session()
# set default headers
session.headers.update({'User-Agent': user_agent})
session.mount('http://', adapter)
session.mount('https://', adapter)
self._session = session
# configure SSE session (differs from our standard session)
sse_retry = Retry(
total=1000000, redirect=0, status_forcelist=self.status_forcelist)
sse_adapter = HTTPAdapter(
pool_connections=self.pool_size,
pool_maxsize=self.pool_size,
max_retries=sse_retry)
sse_session = requests.Session()
sse_session.headers.update({'User-Agent': user_agent})
sse_session.mount('http://', sse_adapter)
sse_session.mount('https://', sse_adapter)
self._sse_session = sse_session
def submit(self, te):
"""Submit the transaction using a pooled connection, and retry on failure.
`POST /transactions
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-create.html>`_
Uses form-encoded data to send over to Horizon.
:return: The JSON response indicating the success/failure of the
submitted transaction.
:rtype: dict
"""
params = {'tx': te}
url = urljoin(self.horizon_uri, 'transactions/')
# POST is not included in Retry's method_whitelist for a good reason.
# our custom retry mechanism follows
reply = None
retry_count = self.num_retries
while True:
try:
reply = self._session.post(
url, data=params, timeout=self.request_timeout)
return check_horizon_reply(reply.json())
except (RequestException, NewConnectionError, ValueError) as e:
if reply is not None:
msg = 'Horizon submit exception: {}, reply: [{}] {}'.format(
str(e), reply.status_code, reply.text)
else:
msg = 'Horizon submit exception: {}'.format(str(e))
logging.warning(msg)
if (reply is not None and reply.status_code not in self.status_forcelist) or retry_count <= 0:
if reply is None:
raise HorizonRequestError(e)
raise HorizonError('Invalid horizon reply: [{}] {}'.format(
reply.status_code, reply.text), reply.status_code)
retry_count -= 1
logging.warning('Submit retry attempt {}'.format(retry_count))
sleep(self.backoff_factor)
def query(self, rel_url, params=None, sse=False):
abs_url = urljoin(self.horizon_uri, rel_url)
reply = self._query(abs_url, params, sse)
return check_horizon_reply(reply) if not sse else reply
def _query(self, url, params=None, sse=False):
reply = None
if not sse:
try:
reply = self._session.get(
url, params=params, timeout=self.request_timeout)
return reply.json()
except (RequestException, NewConnectionError, ValueError) as e:
if reply is not None:
raise HorizonError('Invalid horizon reply: [{}] {}'.format(
reply.status_code, reply.text), reply.status_code)
else:
raise HorizonRequestError(e)
# SSE connection
if SSEClient is None:
raise ImportError('SSE not supported, missing `stellar-base-sseclient` module')
return SSEClient(url, retry=0, session=self._sse_session, connect_retry=-1, params=params)
def account(self, address):
"""Returns information and links relating to a single account.
`GET /accounts/{account}
<https://www.stellar.org/developers/horizon/reference/endpoints/accounts-single.html>`_
:param str address: The account ID to retrieve details about.
:return: The account details in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}'.format(account_id=address)
return self.query(endpoint)
def account_data(self, address, key):
"""This endpoint represents a single data associated with a given
account.
`GET /accounts/{account}/data/{key}
<https://www.stellar.org/developers/horizon/reference/endpoints/data-for-account.html>`_
:param str address: The account ID to look up a data item from.
:param str key: The name of the key for the data item in question.
:return: The value of the data field for the given account and data key.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/data/{data_key}'.format(
account_id=address, data_key=key)
return self.query(endpoint)
def account_effects(self, address, cursor=None, order='asc', limit=10, sse=False):
"""This endpoint represents all effects that changed a given account.
`GET /accounts/{account}/effects{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/effects-for-account.html>`_
:param str address: The account ID to look up effects for.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use server side events for streaming responses.
:return: The list of effects in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/effects'.format(account_id=address)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params, sse)
def account_offers(self, address, cursor=None, order='asc', limit=10, sse=False):
"""This endpoint represents all the offers a particular account makes.
`GET /accounts/{account}/offers{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/offers-for-account.html>`_
:param str address: The account ID to retrieve offers from.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use server side events for streaming responses.
:return: The list of offers for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/offers'.format(account_id=address)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params, sse)
def account_operations(self, address, cursor=None, order='asc', limit=10, sse=False):
"""This endpoint represents all operations that were included in valid
transactions that affected a particular account.
`GET /accounts/{account}/operations{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/operations-for-account.html>`_
:param str address: The account ID to list operations on.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use server side events for streaming responses.
:return: The list of operations for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/operations'.format(
account_id=address)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params, sse)
def account_transactions(self, address, cursor=None, order='asc', limit=10, sse=False):
"""This endpoint represents all transactions that affected a given
account.
`GET /accounts/{account_id}/transactions{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-account.html>`_
:param str address: The account ID to list transactions from.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use server side events for streaming responses.
:return: The list of transactions for an account in a JSON response.
:rtype: dict
"""
endpoint = '/accounts/{account_id}/transactions'.format(
account_id=address)
params = self.__query_params(cursor=cursor, order=order, limit=limit)
return self.query(endpoint, params, sse)
def account_payments(self, address, cursor=None, order='asc', limit=10, sse=False):
"""This endpoint responds with a collection of Payment operations where
the given account was either the sender or receiver.
`GET /accounts/{id}/payments{?cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/payments-for-account.html>`_
:param str address: The account ID to list payments to/from.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use server side events for streaming responses.
:return: | |
self.initial_temperature
else:
for elem in self.mesh.elems.values():
idxs = self.node_map.tags_to_idxs(elem.elem_node_tag_gen())
coords = elem.node_coords()
for i in range(len(idxs)):
t0[idxs[i]] = initial(coords[i, 0], coords[i, 1])
self.lst_tmp = t0
# Just to have the correct length list. Should be skipped over anyway.
self.lst_rad = [np.zeros(len(t0), dtype=np.float64)
for a in self.fq_list]
# Setup constant matrices
self.uv_vol = et.elems_2_csc(self.mesh,
et.uv_mtrx,
self.node_map)
self.uv_vol.description = "Integral of test function * weight " \
+ "over element volumes."
self.guv_vol = et.elems_2_csc(self.mesh,
et.gu_gv_mtrx,
self.node_map)
self.guv_vol.description = "Integral of test function laplacian *" \
+ " weight function laplacian over element volumes."
self.uv_bound = et.edge_2_csc(self.mesh,
"Boundary",
et.uv_mtrx,
self.node_map)
self.uv_bound.description = "Integral of test function * weight " \
+ " function over domain boundary."
self._print_matrix_info(self.uv_vol, "UV over volume")
self._print_matrix_info(self.guv_vol, "Grad U dot Grad V over volume")
self._print_matrix_info(self.uv_bound, "UV over boundary")
tocy = python_time.clock()
print("ht3_solver:\tCompleted initialisation in " + str(tocy - ticy)
+ " s.")
@staticmethod
def _print_matrix_info(mtrx, name):
""" Print infomation about a matrix
"""
pr = lambda t: print("ht3_solver:\t" + t)
pr("MATRIX INFO:")
pr("Matrix:\t" + name)
pr("Description:\t" + str(mtrx.description))
pr("Shape:\t" + str(mtrx.shape))
def _print_setup(self):
""" Prints a load of settings for the solver.
"""
pr = lambda x: print("ht3_solver:\t" + x)
pr("Start time is " + str(python_time.asctime()))
pr("")
pr("TIME SETTINGS:")
pr("Current time:\t\t\t\t" + str(self.current_T))
pr("Delta T:\t\t\t\t" + str(self.d_T))
pr("Finish time:\t\t\t\t" + str(self.max_T))
pr("")
pr("Using predefined funtions?:\t\t" + str(self.redefined))
pr("")
pr("PHYSICAL MODEL: ")
pr("Background temperature:\t\t\t" + str(self.background_temperature))
pr("Starting temp (maybe overrided):\t" + str(self.initial_temperature))
pr("Diffusion scale:\t\t\t" + str(self.diff_scale))
pr("Solid refractive index:\t\t\t" + str(self.refr_idx_vol))
pr("Background refractive index:\t\t" + str(self.refr_idx_background))
pr("Solid density:\t\t\t\t" + str(self.density))
pr(
"Solid specific heat capacity:\t\t" + str(
self.heat_capacity))
pr("Solid thermal conductivity:\t\t" + str(self.thermal_conductivity))
pr("Solid hemispheric emissivity:\t\t" + str(self.alpha))
pr("SP1 setting - r1:\t\t\t" + str(self.r1))
pr("SP1 setting - r2:\t\t\t" + str(self.r2))
pr("Convective coefficient:\t\t\t" + str(self.convect_coeff))
pr("")
pr("RADIATION - FREQUENCIES:")
pr("Frequencies defined beyond base:\t" + str(len(self.fq_list)))
pr("-----------------------------------------------------------------")
pr("Frequency (Hz)\t\tAbsorbtion coeff")
pr("-----------------------------------------------------------------")
pr(str(self.v0_frequency) + "\t\t" + "-")
for i in range(0, len(self.fq_list)):
pr(str(self.fq_list[i]) + "\t" + str(self.absorb_coeffs[i]))
pr("-----------------------------------------------------------------")
def zero_timings(self):
""" Zero step counter and current time """
self.step = 0
self.current_T = 0.0
def make_k_matrix(self):
""" Generate ht3_solver 'stiffness' matrix
"""
K = self.uv_vol + self.Epsilon * self.guv_vol + \
(self.Epsilon / self.Beta) * self.uv_bound
return K
def matrix_spy(self, mtrx):
""" Use matplotlib to spy a matrix
"""
import matplotlib.pylab as pl
pl.spy(mtrx,precision=0.01, markersize=1)
pl.show()
def check_k_matrix_stability(self):
""" Check stability of solution.
Finds primary eigenvalue of system. Asserts if more than 1.
"""
K = self.make_k_matrix()
vals, vects = scipy_sparse_eigens(K)
principal_val = vals.max()
print("ht3_solver:\t'Stiffness' matrix principal eigenvalue was "
+ str(principal_val))
if principal_val > 1:
print("##########################################################")
print("ht3_solver:\tWARNING")
print("ht3_solver:\tPrincipal eigenvalue is more than one.")
print("ht3_solver:\tThe analysis will be unstable.")
print("ht3_solver:\tIf this is OK, just go and modify the code "
+ "or something.")
print("##########################################################")
raise(AssertionError)
def one_step(self):
""" Do a single simulation step. Returns step's solution.
Forms linear expresson to solve and solves it for solution.
"""
assert (self.uv_vol is not None)
assert (self.guv_vol is not None)
assert (self.uv_bound is not None)
assert (self.vf_vect_bound is not None)
assert (self.vF_vect_vol is not None)
# Shape checks
assert (self.vF_vect_vol.size == self.vF_vect_vol.shape[0])
assert (self.vf_vect_bound.size == self.vf_vect_bound.shape[0])
assert (self.vF_vect_vol.shape == self.vf_vect_bound.shape)
assert (self.uv_vol.shape[0] == self.uv_vol.shape[1])
assert (self.uv_vol.shape == self.guv_vol.shape)
assert (self.uv_vol.shape == self.uv_bound.shape)
assert (self.uv_vol.shape[0] == self.vF_vect_vol.shape[0])
if self.step == 0:
self.check_k_matrix_stability()
# print("Epsilon is :"+str(self.Epsilon))
# print("Beta is :"+str(self.Beta))
# Form "Stiffness" matrix:
K = self.make_k_matrix()
# Form "Force" vector:
f = self.vF_vect_vol + (self.Epsilon / self.Beta) * self.vf_vect_bound
# print("FORCE VECTOR:")
# print(f)
# print("STIFFNESS MATRIX")
# print(K)
# print("UV_VOL")
# print(self.uv_vol)
# print("EPSILON * GUV_VOL")
# print(self.Epsilon * self.guv_vol)
# print("UV_BOUND * COEFF")
# print((self.Epsilon / self.Beta) * self.uv_bound)
sol = scipy_sparse_linsolve(K, f)
# print("SOLUTION")
# print(sol)
return sol
def run(self, initial=None):
""" Run the simulation.
"""
self.initialise(initial=initial)
sol = None
while self.advance(sol):
sol = self.one_step()
B_int_function = Accl.B_int_function
# """ The B^{(k)}(T, n) function.
#
# T is temperature.\n
# n is refractive index\n
# vk & vk_minus are frequencies used as the limits of integration.
# """
def data_saving(self, sol):
""" Saves given solution as solution to CURRENT solver state
"""
# Only export data once per time-step. We do this on the conduction
# step.
if self.save_rule is not None:
save_rule_true = self.save_rule(self.step, self.d_T)
else:
save_rule_true = True
if self.cond == True:
series = "Temperature"
elif self.rad is not None:
series = ("Radiation", self.fq_list[self.rad])
else:
# before sim starts... EARLY EXIT
return
if self.cond == True and save_rule_true:
# Save data to file with step no.
# First, generate dictionaries with {nid:value}
# CASE 1: Export mesh = FEM mesh (ie, no enrichment, easier!)
if self.export_mesh is None:
data_temp = {}
for nid in self.mesh.nodes.keys():
idx = self.node_map.tag_to_idx((nid, 0))
data_temp[nid] = self.lst_tmp[idx]
data_rad = {}
for i in self.fq_list:
data_rad[i] = {}
for nid in self.mesh.nodes.keys():
idx = self.node_map.tag_to_idx((nid, 0))
for i in range(0, len(self.fq_list)):
data_rad[self.fq_list[i]][nid] = self.lst_rad[i][idx]
# End CASE 1 - see after case two for finishing export.
# CASE 2: Exporting to a different mesh to the the XFEM / FEM
# mesh.
else:
# We need to a mapping from global to local element
# coordinates. We'll do this once and then store it.
# We store it in self.export_to_elem dictionary.
if self.export_to_elem is None:
self.export_to_elem = \
self.mesh.project_points(self.export_mesh.nodes,
failure_rule='closest')
# Dictionaries to export:
data_temp = {}
data_rad = {}
# Setup frequency data:
for i in self.fq_list:
data_rad[i] = {}
for node_id, expt_data in self.export_to_elem.items():
# Unpack the value of the dictionary value for clarity:
elem = expt_data[0]
eta = expt_data[1] # local coord
# Get element / solution indexes:
val = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]
data_temp[node_id] = val
# And for all the frequencies:
for i in range(0, len(self.fq_list)):
data_rad[self.fq_list[i]][node_id] \
= elem.eval_elem(self.node_map,
self.lst_rad[i],
[eta])[0]
# END CASE 2
expt_data = {"Temperature": data_temp}
for freq, nvals in data_rad.items():
expt_data[str(freq * 10) + "THz"] = data_rad[freq]
# Send to be exported as a VTK.
if self.export_mesh is None:
self.mesh.export_to_vtk(self.save_path + str(self.step),
expt_data)
else:
self.export_mesh.export_to_vtk(self.save_path + str(self.step),
expt_data)
try:
container = self.saved_data[series]
except:
self.saved_data[series] = {}
container = self.saved_data[series]
if self.step % 10 == 0 or self.step < 10:
container[self.step] = saved_data(sol, self.step, self.current_T)
class _reporting_statics:
""" Really just a static variable....
"""
time = python_time.clock()
last_report = -1000
def reporting(self, sol):
""" Generate printouts to show simulation progress
"""
if self.cond == True:
time = python_time.clock()
dt = time - self._reporting_statics.time
def rp(txt):
print("ht3_solver:\t" + txt)
if self._reporting_statics.last_report - time < 0:
rp("Completed step " + str(self.step - 1) + " in " \
+ str(dt) + " s.")
steps_rem = (self.max_T - self.current_T) / self.d_T
completion = 1 - steps_rem / (self.step + steps_rem)
rp(str(int(completion * 100)) + "% complete.")
more_steps = np.ceil((self.max_T - self.current_T) / self.d_T)
more_time = more_steps * dt
exp_fin = python_time.asctime(python_time.localtime(
python_time.time() + int(more_time)))
rp("Expected completion is " + exp_fin)
print("\n")
rp("Starting step " + str(self.step) + ".")
self._reporting_statics.last_report = time
self._reporting_statics.time = time
def norm_reporting(self):
""" Calculate L1, L2 and Linf norms and print to file.
File is given by self.norm_path
If an expected solution is given, expected L1, L2 and abs erros will
also be computed. Expected solution is f(x, t) where x is global
coordinate and t is time.
"""
if self.norm_saving_rule is not None:
norm_rule = self.norm_saving_rule(self.step, self.d_T)
else:
norm_rule = True
if self.norm_path is not None and norm_rule:
f = open(self.norm_path, 'a', newline="")
csvf = csv.writer(f)
if self.step == 0:
out_row = ["Step", "Time (s)", "Matrix condition", "L1 u", "L2 u", "Linf u"]
if self.expected_solution is not None:
out_row.append("L1 Expected")
out_row.append("L2 Expected")
out_row.append("L1 Error")
out_row.append("L2 Error")
out_row.append("L1 Abs Error")
out_row.append("L2 Abs Error")
csvf.writerow(out_row)
condition_number = np.linalg.cond((self.uv_vol + self.Epsilon * self.guv_vol + \
(self.Epsilon / self.Beta) * self.uv_bound).todense())
out_row = [self.step, self.current_T, condition_number]
# Calculate the l2 norm or l2 error norm:
def current_u(elem, | |
values are: 'Updating', 'Deleting',
and 'Failed'."
name:
description:
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
backend_addresses:
description:
- Backend addresses
type: list
suboptions:
fqdn:
description:
- Fully qualified domain name (FQDN).
ip_address:
description:
- IP address
provisioning_state:
description:
- "Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
name:
description:
- Resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
type:
description:
- Type of the resource.
load_balancer_backend_address_pools:
description:
- The reference of LoadBalancerBackendAddressPool resource.
type: list
suboptions:
id:
description:
- Resource ID.
provisioning_state:
description:
- "Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
name:
description:
- Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
load_balancer_inbound_nat_rules:
description:
- A list of references of LoadBalancerInboundNatRules.
type: list
suboptions:
id:
description:
- Resource ID.
frontend_ip_configuration:
description:
- A reference to frontend IP addresses.
suboptions:
id:
description:
- Resource ID.
protocol:
description:
- "Possible values include: 'C(udp)', 'C(tcp)', 'C(all)'"
choices:
- 'udp'
- 'tcp'
- 'all'
frontend_port:
description:
- "The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable val
ues range from 1 to 65534."
backend_port:
description:
- The port used for the internal endpoint. Acceptable values range from 1 to 65535.
idle_timeout_in_minutes:
description:
- "The timeout for the C(tcp) idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minu
tes. This element is only used when the I(protocol) is set to C(tcp)."
enable_floating_ip:
description:
- "Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availabili
ty Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't b
e changed after you create the endpoint."
provisioning_state:
description:
- "Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
name:
description:
- Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
private_ip_address:
description:
- Private IP address of the IP configuration.
private_ip_allocation_method:
description:
- "Defines how a private IP address is assigned. Possible values are: 'C(static)' and 'C(dynamic)'."
choices:
- 'static'
- 'dynamic'
private_ip_address_version:
description:
- "Available from Api-Version 2016-03-30 onwards, it represents whether the specific ipconfiguration is C(ipv4) or C(ipv6). Defau
lt is taken as C(ipv4). Possible values are: 'C(ipv4)' and 'C(ipv6)'."
choices:
- 'ipv4'
- 'ipv6'
subnet:
description:
- Subnet bound to the IP configuration.
suboptions:
id:
description:
- Resource ID.
address_prefix:
description:
- The address prefix for the subnet.
network_security_group:
description:
- The reference of the NetworkSecurityGroup resource.
suboptions:
id:
description:
- Resource ID.
location:
description:
- Resource location.
security_rules:
description:
- A collection of security rules of the network security group.
type: list
default_security_rules:
description:
- The default security rules of network security group.
type: list
resource_guid:
description:
- The resource GUID property of the network security group resource.
provisioning_state:
description:
- "The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
route_table:
description:
- The reference of the RouteTable resource.
suboptions:
id:
description:
- Resource ID.
location:
description:
- Resource location.
routes:
description:
- Collection of routes contained within a route table.
type: list
disable_bgp_route_propagation:
description:
- Gets or sets whether to disable the I(routes) learned by BGP on that route table. True means disable.
provisioning_state:
description:
- "The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
etag:
description:
- Gets a unique read-only string that changes whenever the resource is updated.
service_endpoints:
description:
- An array of service endpoints.
type: list
suboptions:
service:
description:
- The type of the endpoint service.
locations:
description:
- A list of locations.
type: list
provisioning_state:
description:
- The provisioning state of the resource.
resource_navigation_links:
description:
- Gets an array of references to the external resources using subnet.
type: list
suboptions:
id:
description:
- Resource ID.
linked_resource_type:
description:
- Resource type of the linked resource.
link:
description:
- Link to the external resource
name:
description:
- Name of the resource that is unique within a resource group. This name can be used to access the resource.
provisioning_state:
description:
- The provisioning state of the resource.
name:
description:
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
primary:
description:
- Gets whether this is a primary customer address on the network interface.
public_ip_address:
description:
- Public IP address bound to the IP configuration.
suboptions:
id:
description:
- Resource ID.
location:
description:
- Resource location.
sku:
description:
- The public IP address SKU.
suboptions:
name:
description:
- Name of a public IP address SKU.
choices:
- 'basic'
- 'standard'
public_ip_allocation_method:
description:
- "The public IP allocation method. Possible values are: 'C(static)' and 'C(dynamic)'."
choices:
- 'static'
- 'dynamic'
public_ip_address_version:
description:
- "The public IP address version. Possible values are: 'C(ipv4)' and 'C(ipv6)'."
choices:
- 'ipv4'
- 'ipv6'
dns_settings:
description:
- The FQDN of the DNS record associated with the public IP address.
suboptions:
domain_name_label:
description:
- "Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone ma
ke up the fully qualified domain name associated with the public IP address. If a domain name label is specifi
ed, an A DNS record is created for the public IP in the Microsoft Azure DNS system."
fqdn:
description:
- "Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the conc
atenation of the I(domain_name_label) and the regionalized DNS zone."
reverse_fqdn:
description:
- "Gets or Sets the Reverse I(fqdn). A user-visible, fully qualified domain name that resolves to this public IP
address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the
in-addr.arpa domain to the reverse I(fqdn). "
ip_address:
description:
- The IP address associated with the public IP address resource.
idle_timeout_in_minutes:
description:
- The idle timeout of the public IP address.
resource_guid:
description:
- The resource GUID property of the public IP resource.
provisioning_state:
description:
- "The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'."
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
zones:
description:
- A list of availability zones denoting the IP allocated for the resource needs to come from.
type: list
application_security_groups:
description:
- Application security groups in which the IP configuration is included.
type: list
suboptions:
id:
description:
- Resource ID.
location:
description:
- Resource location.
provisioning_state:
description:
- "The provisioning state of the network interface IP configuration. Possible values are: 'Updating', 'Deleting', and 'Failed'."
name:
description:
- The name of the resource that is unique within a resource group. This name can be used to access the resource.
etag:
description:
- A unique read-only string that changes whenever the resource is updated.
backend_addresses:
description:
- Backend addresses
| |
n -> ... h n', h=self.H)
# Augment B
if state is not None:
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute 1/dt * (I + dt/2 A) @ state
# Can do this without expanding (maybe minor speedup using conj symmetry in theory), but it's easier to read this way
s = _conj(state) if state.size(-1) == self.N else state # (B H N)
sA = (
s * _conj(w) # (B H N)
- contract('bhm, rhm, rhn -> bhn', s, _conj(Q), _conj(P))
)
s = s / dt.unsqueeze(-1) + sA / 2
s = s[..., :self.N]
B = torch.cat([s, B], dim=-3) # (s+1, H, N)
# Incorporate dt into A
w = w * dt.unsqueeze(-1) # (H N)
# Stack B and p, C and q for convenient batching
B = torch.cat([B, P], dim=-3) # (s+1+r, H, N)
C = torch.cat([C, Q], dim=-3) # (c+r, H, N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-4) # (s+1+r, c+r, H, N)
# w = w[None, None, ...] # (1, 1, H, N)
# z = z[None, None, None, ...] # (1, 1, 1, L)
# Calculate resolvent at omega
if has_cauchy_extension and z.dtype == torch.cfloat and not self.keops:
r = cauchy_mult(v, z, w, symmetric=True)
elif has_pykeops:
r = cauchy_conj(v, z, w)
else:
r = cauchy_conj_slow(v, z, w)
r = r * dt[None, None, :, None] # (S+1+R, C+R, H, L)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[:-1, :-1, :, :] - r[:-1, -1:, :, :] * r[-1:, :-1, :, :] / (1 + r[-1:, -1:, :, :])
elif self.rank == 2:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
det = (1 + r11[:1, :1, :, :]) * (1 + r11[1:, 1:, :, :]) - r11[:1, 1:, :, :] * r11[1:, :1, :, :]
s = (
r01[:, :1, :, :] * (1 + r11[1:, 1:, :, :]) * r10[:1, :, :, :]
+ r01[:, 1:, :, :] * (1 + r11[:1, :1, :, :]) * r10[1:, :, :, :]
- r01[:, :1, :, :] * (r11[:1, 1:, :, :]) * r10[1:, :, :, :]
- r01[:, 1:, :, :] * (r11[1:, :1, :, :]) * r10[:1, :, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[:-self.rank, :-self.rank, :, :]
r01 = r[:-self.rank, -self.rank:, :, :]
r10 = r[-self.rank:, :-self.rank, :, :]
r11 = r[-self.rank:, -self.rank:, :, :]
r11 = rearrange(r11, "a b h n -> h n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "h n a b -> a b h n")
k_f = r00 - torch.einsum("i j h n, j k h n, k l h n -> i l h n", r01, r11, r10)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + omega)
# Move from frequency to coefficients
k = torch.fft.irfft(k_f) # (S+1, C, H, L)
# Truncate to target length
k = k[..., :L]
if state is not None:
k_state = k[:-1, :, :, :] # (S, C, H, L)
else:
k_state = None
k_B = k[-1, :, :, :] # (C H L)
return k_B, k_state
@torch.no_grad()
def double_length(self):
if self.verbose: log.info(f"S4: Doubling length from L = {self.L} to {2*self.L}")
self._setup_C(double_length=True)
@torch.no_grad()
def _check(self):
"""Check if A, B, C parameters and vanilla SSKernel construction can be recovered"""
self.setup_step()
K = krylov(self.L, self.dA, self.dB, self.dC)
diff = K - self.forward(L=self.L)
print("checking DPLR Kernel construction", torch.sum(diff ** 2))
@torch.no_grad()
def _setup_linear(self):
""" Create parameters that allow fast linear stepping of state """
w = self._w()
B = _r2c(self.B) # (H N)
P = _r2c(self.P)
Q = P.conj() if self.Q is None else _r2c(self.Q)
# Prepare Linear stepping
dt = torch.exp(self.log_dt)
D = (2.0 / dt.unsqueeze(-1) - w).reciprocal() # (H, N)
R = (torch.eye(self.rank, dtype=w.dtype, device=w.device) + 2*contract('r h n, h n, s h n -> h r s', Q, D, P).real) # (H r r)
Q_D = rearrange(Q*D, 'r h n -> h r n')
R = torch.linalg.solve(R.to(Q_D), Q_D) # (H r N)
R = rearrange(R, 'h r n -> r h n')
self.step_params = {
"D": D, # (H N)
"R": R, # (r H N)
"P": P, # (r H N)
"Q": Q, # (r H N)
"B": B, # (1 H N)
"E": 2.0 / dt.unsqueeze(-1) + w, # (H N)
}
def _step_state_linear(self, u=None, state=None):
"""
Version of the step function that has time O(N) instead of O(N^2) per step, which takes advantage of the DPLR form and bilinear discretization.
Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations. Perhaps a fused CUDA kernel implementation would be much faster
u: (H) input
state: (H, N/2) state with conjugate pairs
Optionally, the state can have last dimension N
Returns: same shape as state
"""
C = _r2c(self.C) # View used for dtype/device
if u is None: # Special case used to find dA
u = torch.zeros(self.H, dtype=C.dtype, device=C.device)
if state is None: # Special case used to find dB
state = torch.zeros(self.H, self.N, dtype=C.dtype, device=C.device)
step_params = self.step_params.copy()
if state.size(-1) == self.N: # Only store half of the conjugate pairs; should be true by default
# There should be a slightly faster way using conjugate symmetry
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', _conj(p), _conj(x), _conj(y))[..., :self.N] # inner outer product
else:
assert state.size(-1) == 2*self.N
step_params = {k: _conj(v) for k, v in step_params.items()}
# TODO worth setting up a contract_expression in default_state if we want to use this at inference time for stepping
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', p, x, y) # inner outer product
D = step_params["D"] # (H N)
E = step_params["E"] # (H N)
R = step_params["R"] # (r H N)
P = step_params["P"] # (r H N)
Q = step_params["Q"] # (r H N)
B = step_params["B"] # (1 H N)
new_state = E * state - contract_fn(P, Q, state) # (B H N)
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
new_state = D * (new_state - contract_fn(P, R, new_state))
return new_state
def _setup_state(self):
""" Construct dA and dB for discretized state equation """
# Construct dA and dB by using the stepping
self._setup_linear()
C = _r2c(self.C) # Just returns a view that we use for finding dtype/device
state = torch.eye(2*self.N, dtype=C.dtype, device=C.device).unsqueeze(-2) # (N 1 N)
dA = self._step_state_linear(state=state)
dA = rearrange(dA, "n h m -> h m n")
# self.dA = dA # (H N N)
u = C.new_ones(self.H)
dB = self._step_state_linear(u=u)
dB = _conj(dB)
dB = rearrange(dB, '1 h n -> h n') # (H N)
return dA, dB
def _step_state(self, u, state):
""" Must be called after self.default_state() is used to construct an initial state! """
next_state = self.state_contraction(self.dA, state) + self.input_contraction(self.dB, u)
return next_state
def setup_step(self, mode='dense'):
""" Set up dA, dB, dC discretized parameters for stepping """
self.dA, self.dB = self._setup_state()
# Calculate original C
dA_L = power(self.L, self.dA)
I = torch.eye(self.dA.size(-1)).to(dA_L)
C = _conj(_r2c(self.C)) # (H C N)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2),
C.unsqueeze(-1),
).squeeze(-1)
self.dC = dC
# Do special preprocessing for different step modes
self._step_mode = mode
if mode == 'linear':
# Linear case: special step function for the state, we need to handle output
# use conjugate symmetry by default, which affects the output projection
self.dC = | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import numpy as np
from Utils import GaussianBlur, CircularGaussKernel
from LAF import abc2A,rectifyAffineTransformationUpIsUp, sc_y_x2LAFs,sc_y_x_and_A2LAFs
from Utils import generate_2dgrid, generate_2dgrid, generate_3dgrid
from Utils import zero_response_at_border
class ScalePyramid(nn.Module):
def __init__(self, nLevels = 3, init_sigma = 1.6, border = 5):
super(ScalePyramid,self).__init__()
self.nLevels = nLevels;
self.init_sigma = init_sigma
self.sigmaStep = 2 ** (1. / float(self.nLevels))
#print 'step',self.sigmaStep
self.b = border
self.minSize = 2 * self.b + 2 + 1;
return
def forward(self,x):
pixelDistance = 1.0;
curSigma = 0.5
if self.init_sigma > curSigma:
sigma = np.sqrt(self.init_sigma**2 - curSigma**2)
curSigma = self.init_sigma
curr = GaussianBlur(sigma = sigma)(x)
else:
curr = x
sigmas = [[curSigma]]
pixel_dists = [[1.0]]
pyr = [[curr]]
j = 0
while True:
curr = pyr[-1][0]
for i in range(1, self.nLevels + 2):
sigma = curSigma * np.sqrt(self.sigmaStep*self.sigmaStep - 1.0 )
#print 'blur sigma', sigma
curr = GaussianBlur(sigma = sigma)(curr)
curSigma *= self.sigmaStep
pyr[j].append(curr)
sigmas[j].append(curSigma)
pixel_dists[j].append(pixelDistance)
if i == self.nLevels:
nextOctaveFirstLevel = F.avg_pool2d(curr, kernel_size = 1, stride = 2, padding = 0)
pixelDistance = pixelDistance * 2.0
curSigma = self.init_sigma
if (nextOctaveFirstLevel[0,0,:,:].size(0) <= self.minSize) or (nextOctaveFirstLevel[0,0,:,:].size(1) <= self.minSize):
break
pyr.append([nextOctaveFirstLevel])
sigmas.append([curSigma])
pixel_dists.append([pixelDistance])
j+=1
return pyr, sigmas, pixel_dists
class HessianResp(nn.Module):
def __init__(self):
super(HessianResp, self).__init__()
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [-0.5]]]], dtype=np.float32))
self.gxx = nn.Conv2d(1, 1, kernel_size=(1,3),bias = False)
self.gxx.weight.data = torch.from_numpy(np.array([[[[1.0, -2.0, 1.0]]]], dtype=np.float32))
self.gyy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gyy.weight.data = torch.from_numpy(np.array([[[[1.0], [-2.0], [1.0]]]], dtype=np.float32))
return
def forward(self, x, scale):
gxx = self.gxx(F.pad(x, (1,1,0, 0), 'replicate'))
gyy = self.gyy(F.pad(x, (0,0, 1,1), 'replicate'))
gxy = self.gy(F.pad(self.gx(F.pad(x, (1,1,0, 0), 'replicate')), (0,0, 1,1), 'replicate'))
return torch.abs(gxx * gyy - gxy * gxy) * (scale**4)
class AffineShapeEstimator(nn.Module):
def __init__(self, threshold = 0.001, patch_size = 19):
super(AffineShapeEstimator, self).__init__()
self.threshold = threshold;
self.PS = patch_size
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.gk = torch.from_numpy(CircularGaussKernel(kernlen = self.PS, sigma = (self.PS / 2) /3.0).astype(np.float32))
self.gk = Variable(self.gk, requires_grad=False)
return
def invSqrt(self,a,b,c):
eps = 1e-12
mask = (b != 0).float()
r1 = mask * (c - a) / (2. * b + eps)
t1 = torch.sign(r1) / (torch.abs(r1) + torch.sqrt(1. + r1*r1));
r = 1.0 / torch.sqrt( 1. + t1*t1)
t = t1*r;
r = r * mask + 1.0 * (1.0 - mask);
t = t * mask;
x = 1. / torch.sqrt( r*r*a - 2.0*r*t*b + t*t*c)
z = 1. / torch.sqrt( t*t*a + 2.0*r*t*b + r*r*c)
d = torch.sqrt( x * z)
x = x / d
z = z / d
l1 = torch.max(x,z)
l2 = torch.min(x,z)
new_a = r*r*x + t*t*z
new_b = -r*t*x + t*r*z
new_c = t*t*x + r*r *z
return new_a, new_b, new_c, l1, l2
def forward(self,x):
if x.is_cuda:
self.gk = self.gk.cuda()
else:
self.gk = self.gk.cpu()
gx = self.gx(F.pad(x, (1, 1, 0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0, 0, 1, 1), 'replicate'))
a1 = (gx * gx * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
b1 = (gx * gy * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
c1 = (gy * gy * self.gk.unsqueeze(0).unsqueeze(0).expand_as(gx)).view(x.size(0),-1).mean(dim=1)
a, b, c, l1, l2 = self.invSqrt(a1,b1,c1)
rat1 = l1/l2
mask = (torch.abs(rat1) <= 6.).float().view(-1);
return rectifyAffineTransformationUpIsUp(abc2A(a,b,c))#, mask
class OrientationDetector(nn.Module):
def __init__(self,
mrSize = 3.0, patch_size = None):
super(OrientationDetector, self).__init__()
if patch_size is None:
patch_size = 32;
self.PS = patch_size;
self.bin_weight_kernel_size, self.bin_weight_stride = self.get_bin_weight_kernel_size_and_stride(self.PS, 1)
self.mrSize = mrSize;
self.num_ang_bins = 36
self.gx = nn.Conv2d(1, 1, kernel_size=(1,3), bias = False)
self.gx.weight.data = torch.from_numpy(np.array([[[[0.5, 0, -0.5]]]], dtype=np.float32))
self.gy = nn.Conv2d(1, 1, kernel_size=(3,1), bias = False)
self.gy.weight.data = torch.from_numpy(np.array([[[[0.5], [0], [-0.5]]]], dtype=np.float32))
self.angular_smooth = nn.Conv1d(1, 1, kernel_size=3, padding = 1, bias = False)
self.angular_smooth.weight.data = torch.from_numpy(np.array([[[0.33, 0.34, 0.33]]], dtype=np.float32))
self.gk = 10. * torch.from_numpy(CircularGaussKernel(kernlen=self.PS).astype(np.float32))
self.gk = Variable(self.gk, requires_grad=False)
return
def get_bin_weight_kernel_size_and_stride(self, patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * np.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1);
return bin_weight_kernel_size, bin_weight_stride
def get_rotation_matrix(self, angle_in_radians):
angle_in_radians = angle_in_radians.view(-1, 1, 1);
sin_a = torch.sin(angle_in_radians)
cos_a = torch.cos(angle_in_radians)
A1_x = torch.cat([cos_a, sin_a], dim = 2)
A2_x = torch.cat([-sin_a, cos_a], dim = 2)
transform = torch.cat([A1_x,A2_x], dim = 1)
return transform
def forward(self, x, return_rot_matrix = False):
gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
if x.is_cuda:
self.gk = self.gk.cuda()
mag = mag * self.gk.unsqueeze(0).unsqueeze(0).expand_as(mag)
ori = torch.atan2(gy,gx)
o_big = float(self.num_ang_bins) *(ori + 1.0 * math.pi )/ (2.0 * math.pi)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(F.adaptive_avg_pool2d((bo0_big == i).float() * wo0_big, (1,1)))
ang_bins = torch.cat(ang_bins,1).view(-1,1,self.num_ang_bins)
ang_bins = self.angular_smooth(ang_bins)
values, indices = ang_bins.view(-1,self.num_ang_bins).max(1)
angle = -((2. * float(np.pi) * indices.float() / float(self.num_ang_bins)) - float(math.pi))
if return_rot_matrix:
return self.get_rotation_matrix(angle)
return angle
class NMS2d(nn.Module):
def __init__(self, kernel_size = 3, threshold = 0):
super(NMS2d, self).__init__()
self.MP = nn.MaxPool2d(kernel_size, stride=1, return_indices=False, padding = kernel_size/2)
self.eps = 1e-5
self.th = threshold
return
def forward(self, x):
#local_maxima = self.MP(x)
if self.th > self.eps:
return x * (x > self.th).float() * ((x + self.eps - self.MP(x)) > 0).float()
else:
return ((x - self.MP(x) + self.eps) > 0).float() * x
class NMS3d(nn.Module):
def __init__(self, kernel_size = 3, threshold = 0):
super(NMS3d, self).__init__()
self.MP = nn.MaxPool3d(kernel_size, stride=1, return_indices=False, padding = (0, kernel_size//2, kernel_size//2))
self.eps = 1e-5
self.th = threshold
return
def forward(self, x):
#local_maxima = self.MP(x)
if self.th > self.eps:
return x * (x > self.th).float() * ((x + self.eps - self.MP(x)) > 0).float()
else:
return ((x - self.MP(x) + self.eps) > 0).float() * x
class NMS3dAndComposeA(nn.Module):
def __init__(self, w = 0, h = 0, kernel_size = 3, threshold = 0, scales = None, border = 3, mrSize = 1.0):
super(NMS3dAndComposeA, self).__init__()
self.eps = 1e-7
self.ks = 3
self.th = threshold
self.cube_idxs = []
self.border = border
self.mrSize = mrSize
self.beta = 1.0
self.grid_ones = Variable(torch.ones(3,3,3,3), requires_grad=False)
self.NMS3d = NMS3d(kernel_size, threshold)
if (w > 0) and (h > 0):
self.spatial_grid = generate_2dgrid(h, w, False).view(1, h, w,2).permute(3,1, 2, 0)
self.spatial_grid = Variable(self.spatial_grid)
else:
self.spatial_grid = None
return
def forward(self, low, cur, high, num_features = 0, octaveMap = None, scales = None):
assert low.size() == cur.size() == high.size()
#Filter responce map
self.is_cuda = low.is_cuda;
resp3d = torch.cat([low,cur,high], dim = 1)
mrSize_border = int(self.mrSize);
if octaveMap is not None:
nmsed_resp = zero_response_at_border(self.NMS3d(resp3d.unsqueeze(1)).squeeze(1)[:,1:2,:,:], mrSize_border) * (1. - octaveMap.float())
else:
nmsed_resp = zero_response_at_border(self.NMS3d(resp3d.unsqueeze(1)).squeeze(1)[:,1:2,:,:], mrSize_border)
num_of_nonzero_responces = (nmsed_resp > 0).float().sum().item()#data[0]
if (num_of_nonzero_responces <= 1):
return None,None,None
if octaveMap is not None:
octaveMap = (octaveMap.float() + nmsed_resp.float()).byte()
nmsed_resp = nmsed_resp.view(-1)
if (num_features > 0) and (num_features < num_of_nonzero_responces):
nmsed_resp, idxs = torch.topk(nmsed_resp, k = num_features, dim = 0);
else:
idxs = nmsed_resp.data.nonzero().squeeze()
nmsed_resp = nmsed_resp[idxs]
#Get point coordinates grid
if type(scales) is not list:
self.grid = generate_3dgrid(3,self.ks,self.ks)
else:
self.grid = generate_3dgrid(scales,self.ks,self.ks)
self.grid = Variable(self.grid.t().contiguous().view(3,3,3,3), requires_grad=False)
if self.spatial_grid is None:
self.spatial_grid = generate_2dgrid(low.size(2), low.size(3), False).view(1, low.size(2), low.size(3),2).permute(3,1, 2, 0)
self.spatial_grid = Variable(self.spatial_grid)
if self.is_cuda:
self.spatial_grid = self.spatial_grid.cuda()
self.grid_ones = self.grid_ones.cuda()
self.grid = self.grid.cuda()
#residual_to_patch_center
sc_y_x = F.conv2d(resp3d, self.grid,
padding = 1) / (F.conv2d(resp3d, self.grid_ones, padding = 1) + 1e-8)
##maxima coords
sc_y_x[0,1:,:,:] = sc_y_x[0,1:,:,:] + self.spatial_grid[:,:,:,0]
sc_y_x = sc_y_x.view(3,-1).t()
sc_y_x = sc_y_x[idxs,:]
min_size = float(min((cur.size(2)), cur.size(3)))
sc_y_x[:,0] = sc_y_x[:,0] / min_size
sc_y_x[:,1] = sc_y_x[:,1] / float(cur.size(2))
sc_y_x[:,2] = sc_y_x[:,2] / float(cur.size(3))
return nmsed_resp, sc_y_x2LAFs(sc_y_x), octaveMap
class NMS3dAndComposeAAff(nn.Module):
def __init__(self, w = 0, h = 0, kernel_size = 3, threshold = 0, scales = None, border = 3, mrSize = 1.0):
super(NMS3dAndComposeAAff, self).__init__()
self.eps = 1e-7
self.ks = 3
self.th = threshold
self.cube_idxs = []
self.border = border
self.mrSize = mrSize
self.beta = 1.0
self.grid_ones = | |
<filename>pyNastran/converters/dev/calculix/nastran_to_calculix.py
"""
defines:
- CalculixConverter
"""
from collections import defaultdict
from numpy import array, zeros, cross
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.bdf import BDF, LOAD # PBAR, PBARL, PBEAM, PBEAML,
from pyNastran.bdf.cards.loads.static_loads import Force, Moment
class CalculixConverter(BDF):
"""
Converts a BDF to Calculix (inp/dat/py files).
.. warning:: Totally inaccurate....
How:
* Nodes/Coordinate Systems/Elements/Properties/Materials are
directly extracted from the BDF. All objects must reference
each other properly.
* Just like Nastran, extra materials/properties are allowed.
No idea how Code_Aster handles SPOINTs or unassociated GRIDs.
* Loads must be referenced by a single LOAD card in the Case Control deck.
This is consistent with standard Nastran.
Limitations:
* All Case Control inputs must come from SUBCASE 1.
* LOAD cards must bound FORCEx/MOMENTx/PLOAD4 cards in order for loads to be written
* Only SOL 101 (Static)
Supported Cards:
* GRID, COORDx
* LOAD, FORCEx, MOMENTx, PLOAD4
* CBAR, CBEAM, CROD, CTUBE, CTETRA, CPENTA, CHEXA,CTRIA3/6, CQUAD4/8
* PBAR, PBEAM, PROD, PTUBE, PSOLID, PSHELL
* MAT1
* GRAV (incorrect writing, but really easy to make it correct given proper format)
.. todo::
* PCOMP
* SPC, SPC1, MPC
* RBE2, RBE3
"""
def __init__(self, language='english'):
self.language = 'english'
BDF.__init__(self)
self.max_nid_len = None
self.max_eid_len = None
self.max_pid_len = None
self.max_mid_len = None
def get_elements_by_pid(self, element_ids=None):
"""
builds a dictionary where the key is the property ID and the value
is a list of element IDs
"""
if element_ids is None:
element_ids = self.elements.keys()
props = defaultdict(list)
for eid in element_ids:
element = self.elements[eid]
pid = element.Pid()
props[pid].append(eid)
return props
def get_elements_by_mid(self):
"""
builds a dictionary where the key is the material ID and the value
is a list of element IDs
"""
mats = {0: []}
for mid in self.materials:
mats[mid] = []
for eid, element in self.elements.items():
try:
mid = element.Mid()
mats[mid].append(eid)
except:
mats[0].append(eid)
return mats
def get_elements_by_type(self, element_ids=None):
"""
builds a dictionary where the key is the element type and the value
is a list of element IDs
"""
if element_ids is None:
element_ids = self.elements.keys()
elems = defaultdict(list)
for eid in element_ids:
element = self.elements[eid]
element_type = element.type
elems[element_type].append(eid)
return elems
def get_properties_by_mid(self):
"""
builds a dictionary where the key is the material ID and the value
is a list of property IDs
"""
mats = {0: []}
for mid in self.materials:
mats[mid] = []
for pid, property in self.properties.items():
try:
mid = property.Mid()
mats[mid].append(pid)
except:
mats[0].append(pid)
return mats
def calculix_executive(self):
inp = ''
if self.sol == 101:
inp += 'MECA_STATIQUE % SOL 101 - linear statics\n'
inp += 'stat(MECA_STATIQUE(MODELE=model,CHAM_MATER=material,CARA_ELEM=elemcar,\n'
inp += 'ECIT=(_F(Charge=AllBoundaryConditions,),\n',
inp += ' _F(Charge=AllLoads,),\n',
inp += ' ),\n',
inp += "TITRE='My Title'\n"
return inp
def calculix_nodes(self, fdat):
"""
*NODE
1, 0.000000, 0.000000, 0.000000
2, 1.000000, 0.000000, 0.000000
3, 1.000000, 1.000000, 0.000000
"""
dat = ''
dat += '** Calculix_Nodes\n'
dat += '*NODE\n'
fdat.write(dat)
form = '%-' + str(self.max_nid_len) + 's %8s,%8s,%8s\n'
for nid, node in sorted(self.nodes.items()):
xyz = node.get_position()
dat = form % (nid, xyz[0], xyz[1], xyz[2])
fdat.write(dat)
dat = '\n\n'
dat += self.breaker()
fdat.write(dat)
def calculix_elements(self, fdat):
"""
.. todo:: sort elements by Type and Material ID
"""
dat = ''
dat += '** Calculix_Elements\n'
etype_map = {
'CBAR' : 'BR32R',
'CBEAM' : 'BR32R',
'CTRIA3' : 'C2D3',
'CTRIA6' : 'C2D6', # 'S6' ???
'CQUAD4' : 'C2D4', # 'S4' ???
'CQUAD8' : 'C2D8',
'CSHEAR' : 'S4',
'CTRIAX' : 'CAX6',
'CQUADX' : 'CAX8',
'CTRIAX6' : 'CAX6',
'CQUADR' : 'CAX8',
'CTETRA' : 'C3D4',
'CTETRA4' : 'C3D4',
'CPYRAM' : 'C3D5',
'CPYRAM5' : 'C3D5',
'CPYRAM13' : 'C3D13',
'CPENTA10' : 'C3D10',
'CPENTA6' : 'C3D6',
'CPENTA15' : 'C3D15',
'CHEXA' : 'C3D8',
'CHEXA8' : 'C3D8',
'CHEXA20' : 'C3D20',
}
pid_eids = self.get_elements_by_pid(element_ids=None)
form_elements = '%-' + str(self.nelements) + 's, '
elsets = []
for pid, eids in sorted(pid_eids.items()):
elems = self.get_elements_by_type(eids)
for etype, eids in sorted(elems.items()):
calculix_type = etype_map[etype]
elset = 'pid%i_Elements%s' % (pid, etype)
elsets.append(elset)
dat += '** eid,n1,n2,n3,etc... for a %s\n' % etype
dat += '*ELEMENT, TYPE=%s, ELSET=%s\n' % (calculix_type, elset)
for eid in eids:
dat += form_elements % eid
element = self.elements[eid]
for nid in element.node_ids:
dat += '%s,' % nid
dat = dat[:-1] + '\n'
dat += self.breaker()
#print(dat)
fdat.write(dat)
return elsets
def calculix_properties(self, elsets):
inp = ''
inp += '** calculix_properties\n'
for elset in elsets:
#elset = 'pid%i_Elements%i' % (pid, etype)
pid, etype = elset.lstrip('pid').split('_')
pid = int(pid)
etype = etype[8:] # element type
prop = self.properties[pid]
if prop.type == 'PSHELL':
mid = prop.mid
inp += '*SHELL SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
inp += '%s\n' % prop.t
#def _write_calculix(self, marker='markerDummyProp',
#element_set='ELsetDummyProp'):
#msg = '*SHELL SECTION,MATERIAL=M%s_%s,ELSET=%s,OFFSET=%s\n' % (
#marker, self.mid, element_set, self.z1)
#msg += '** THICKNESS\n'
#msg += '%s\n\n' % (self.t)
#return msg
elif prop.type == 'PSOLID':
mid = prop.mid
inp += '*SOLID SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
elif prop.type == 'PBAR':
mid = prop.mid
inp += '*BEAM SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
elif prop.type == 'PBARL':
mid = prop.mid
#section_name = 'SQUARE'
print("what is the section_name?")
print(" ", sorted(prop.__dict__.keys()))
inp += '*BEAM SECTION,ELSET=eids_pid%i,MATERIAL=MAT%i,SECTION=%s\n' % (
prop.pid, mid.mid, section_name)
if section_name == 'SQUARE':
inp += '%s\n' % prop.dims[0]
if section_name == 'RECT':
inp += '%s, %s\n' % tuple(prop.dims[0])
else:
raise NotImplementedError(section_name)
else:
raise NotImplementedError(section_name)
inp += self.breaker()
return inp
def calculix_materials(self):
"""
might need to make this by pid instead...
steel=DEFI_MATERIAU(ELAS=_F(E=210000.,NU=0.3,RHO=8e-9),);
-----MAT1-----
*MATERIAL,NAME=EL
210000.0, .3
*DENSITY
7.8E-9
*SOLID SECTION,MATERIAL=EL,ELSET=EALL
"""
inp = '** calculix_materials\n'
for mid, material in sorted(self.materials.items()):
msg = '*MATERIAL,NAME=mid%i\n' % material.mid
if material.type == 'MAT1':
msg += '*ELASTIC\n%s, %s\n' % (material.E(), material.Nu())
msg += '*DENSITY\n%s\n' % material.get_density()
msg += '*SOLID SECTION,MATERIAL=EL,ELSET=EALL\n'
elif material.type == 'MAT4':
msg += '*ELASTIC\n%s, %s\n' % (material.E(), material.Nu())
msg += '*DENSITY\n%s\n' % material.rho()
msg += '*CONDUCTIVITY\n%s\n' % material.k
msg += '*CONVECTION\n%s\n' % material.h
msg += '*DENSITY\n%s\n' % material.get_density()
msg += '*SOLID SECTION,MATERIAL=EL,ELSET=EALL\n'
elif material.type == 'MAT2':
raise NotImplementedError(material)
#msg = '*ELASTIC,TYPE=ORTHO\n'
#temperature = 0. # default value - same properties for all values
#msg += '%s,%s,%s\n' % (self.e, self.nu, temperature)
#D = Dplate
#D1111 = D[0, 0]
#D1122 = 0.
#D2222 = D[1, 1]
#D1133 = D[0, 2]
#D2233 = D[1, 2]
#D3333 = D[2, 2]
#D1212 = D[0, 1]
#D1313 = D[0, 2]
#msg += '%s,%s,%s,%s,%s,%s,%s,%s\n\n' % (
#D1111, D1122, D2222, D1133, D2233, D3333, D1212, D1313)
#G23
#temperature = self.tref
#msg = '*ELASTIC,TYPE=ENGINEERING CONSTANTS ** MAT2,mid=%s\n' % (
#self.mid)
#msg += '** E1,E2,E3,NU12,NU13,NU23,G12,G13\n'
#msg += '** G23,TEMPERATURE\n'
#msg += '%s,%s,%s,%s,%s,%s,%s,%s\n' % (
#e1, e2, e3, nu12, nu13, nu23, g12, g13)
#msg += '%s,%s\n' % (G23, temperature)
#if self.rho > 0.:
#msg += '*DENSITY\n'
#msg += '%s\n' % (self.rho)
#if self.a > 0:
#msg += '*EXPANSION,TYPE=ISO,ZERO=%s\n' % (self.tref)
#msg += '** ALPHA,ALPHA*TREF\n'
#msg += '%s,%s\n\n' % (self.a, self.a * self.tref)
#return msg
else:
raise NotImplementedError(mid.type)
inp += self.breaker()
return inp
def calculix_loads(self):
"""writes the load cards sorted by ID"""
inp = '** calculix_loads\n'
#if self.language=='english':
#inp += '** Loads\n'
#else:
#inp += ''
isubcase = 1
param_name = 'LOAD'
#skippedLids = {}
if self.loads:
inp += '** LOADS\n'
#load_keys = self.loads.keys()
#if isubcase in self.case_control_deck:
if self.case_control_deck.has_subcase(isubcase):
loadcase_id = self.case_control_deck.get_subcase_parameter(
isubcase, param_name)[0]
#loadcase = self.loads[loadcase_id]
self._write_loads_p0(loadcase_id) # bdf_file, size=8, is_double=False
inp += self.breaker()
return inp
def _write_loads_p0(self, loadcase_id, p0=None):
if not isinstance(loadcase_id, int):
raise RuntimeError('loadcase_id must be an integer; loadcase_id=%r' % loadcase_id)
if p0 is None:
p = array([0., 0., 0.], dtype='float32')
if isinstance(p0, int):
p = self.nodes[p0].get_position()
else:
p = array(p0)
load_case = self.loads[loadcase_id]
#for (key, load_case) in self.loads.items():
#if key != loadcase_id:
#continue
scale_factors2 = []
loads2 = []
for load in load_case:
if isinstance(load, LOAD):
scale_factors, loads = load.get_reduced_loads(
resolve_load_card=False, filter_zero_scale_factors=False)
scale_factors2 += scale_factors
loads2 += loads
else:
scale_factors2.append(1.)
loads2.append(load)
nnodes = self.nnodes
#print('nnodes = %s' % nnodes)
force_moment = zeros((nnodes, 6), 'float64')
#print(force_moment.shape)
force = force_moment[:, :3]
moment = force_moment[:, 3:]
i = 0
xyz = {}
nid_to_i_map = {}
for nid, node in self.nodes.items():
nid_to_i_map[nid] = i
xyz[nid] = node.get_position()
unsupported_types = set()
for load, scale in zip(loads2, scale_factors2):
| |
<gh_stars>1-10
#
# Create buildbot configuration based on a (almost) plain dict.
#
import random
from buildbot.buildslave import BuildSlave
from buildbot.config import BuilderConfig
from buildbot.changes.gitpoller import GitPoller
from buildbot.changes.filter import ChangeFilter
from buildbot.interfaces import IEmailLookup
from buildbot.process.buildstep import BuildStep
from buildbot.process.factory import BuildFactory
from buildbot.process.properties import Property, Interpolate
from buildbot.process.buildrequestdistributor import BasicBuildChooser
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.triggerable import Triggerable
from buildbot.schedulers.trysched import Try_Userpass
from buildbot.status import html
from buildbot.status.github import GitHubStatus
from buildbot.status.results import FAILURE, SKIPPED, SUCCESS
from buildbot.status.web import authz
from buildbot.status.web.auth import HTPasswdAuth
from buildbot.status.mail import MailNotifier as BuildbotMailNotifier
from buildbot.steps.master import MasterShellCommand
from buildbot.steps.shell import ShellCommand
from buildbot.steps.source.git import Git
from buildbot.steps.transfer import DirectoryUpload
from buildbot.steps.trigger import Trigger
from twisted.internet import defer
from zope.interface import implements
ALL = object()
DEFAULT = object()
DIRECTORY_UPLOAD = 'directory_upload'
GITHUB_PULL_TITLE = object()
INTERESTED_USERS = object()
PARALLEL_GROUP = 'parallel_group'
SEQUENTIAL_GROUP = 'sequential_group'
MASTER_COMMAND = 'master_command'
SLAVE_COMMAND = 'slave_command'
SOURCE_COMMAND = 'source_command'
ATTACH_PNG = 'attach_png'
TRY = object()
POLL_INTERVAL = 60
STABLE_TIMER = 300
@defer.inlineCallbacks
def popNextBuild(self):
"""
Called when a new build should be created.
"""
nextBuild = (None, None)
while True:
# 2. pick a build
breq = yield self._getNextUnclaimedBuildRequest()
if not breq:
break
self.bldr.current_builder_request = breq
# 1. pick a slave
slave = yield self._popNextSlave()
if not slave:
break
# either satisfy this build or we leave it for another day
self._removeBuildRequest(breq)
# 3. make sure slave+ is usable for the breq
recycledSlaves = []
while slave:
canStart = yield self.canStartBuild(slave, breq)
if canStart:
break
# try a different slave
recycledSlaves.append(slave)
slave = yield self._popNextSlave(breq)
# recycle the slaves that we didnt use to the head of the queue
# this helps ensure we run 'nextSlave' only once per slave choice
if recycledSlaves:
self._unpopSlaves(recycledSlaves)
# 4. done? otherwise we will try another build
if slave:
nextBuild = (slave, breq)
break
defer.returnValue(nextBuild)
# Patch Buildbot
BasicBuildChooser.popNextBuild = popNextBuild
class UnixCommand(ShellCommand, object):
"""
Executes a command using an Unix shell.
"""
haltOnFailure = True
def __init__(self, **kwargs):
super(UnixCommand, self).__init__(**kwargs)
return
environment = kwargs.get('env', {})
test_shell = environment.get('TEST_SHELL', None)
if test_shell:
# On Window system we call the command with sh.exe.
self.command.insert(0, test_shell)
class AttachPNG(ShellCommand):
"""
Attach a PNG image from the slave build folder.
This is designed to report failures, so by default it will always
be executed.
"""
def __init__(self, name, source, alwaysRun=True):
self._name = name
self._source = source
ShellCommand.__init__(
self,
command='base64 %s' % (self._source,),
description='attaching screen',
descriptionDone='screen attached',
alwaysRun=alwaysRun,
)
def _getLogName(self, name):
"""
Return a name appropriate for the log name.
"""
result = name.replace('$', '_')
result = result.replace('/', '_')
result = result.replace('\\', '_')
return result
def createSummary(self, log):
image = log.getText().strip()
if image.startswith('base64:'):
# Some kind of error while getting the image.
if 'No such file or directory' in image:
# No image generated... all good.
self.finished(SKIPPED)
else:
# There is an error.
self.finished(FAILURE)
return defer.succeed(None)
raw_html = '''
<html><body><img src="data:image/png;base64,%s" alt="%s" scale="0">
</body></html>
''' % (image, self._source)
self.addHTMLLog(self._getLogName(self._source), html=raw_html)
self.finished(SUCCESS)
# The first log is the stdio, and we don't need it as we have it in
# HTML.
self._step_status.logs.pop(0)
return defer.succeed(None)
class RunStepsFactory(BuildFactory, object):
"""
Run commands from 'steps'.
"""
def __init__(self, project, steps, environment):
super(RunStepsFactory, self).__init__()
self._step_environment = environment
self._project = project
self._add_steps(steps)
def _add_steps(self, steps):
"""
Add all steps from `steps`.
"""
for step in steps:
self._add_step(step)
def _add_step(self, step):
"""
Add a single step.
"""
step_type = '_add_step_%s' % (step.get('type', SLAVE_COMMAND))
try:
add_step_method = getattr(self, step_type)
except AttributeError:
raise AssertionError('Unknown type %s for %s' % (step_type, step))
add_step_method(step)
def _add_step_source_command(self, step):
"""
Add a source step.
"""
# Use 'incremental' when migrating to latest git step.
mode = step.get('mode', 'incremental')
branch = step.get('branch', None)
config = step.get('config', None)
self.addStep(Git(
name='get code for ' + self._project.name,
mode=mode,
repourl=self._project.repo,
branch=branch,
shallow=False,
config=config,
))
def _add_step_slave_command(self, step):
"""
Add a slave command step.
"""
final_command = step['command'][:]
name = step.get('name', 'Command')
optional = step.get('optional', False)
always_run = step.get('always-run', False)
timeout = step.get('timeout', 45)
force_name = 'force_' + name
# Build environment variables from base environment plus
# step specific environment variables.
step_environment = self._step_environment.copy()
add_environment = step.get('add_environment', {})
step_environment.update(add_environment)
done_name = name
if optional:
done_name = "%s (prop:force_%s)" % (name, name)
def do_step_if(step):
if not optional:
return True
return step.build.getProperty(force_name)
self.addStep(UnixCommand(
name=name,
command=final_command,
doStepIf=do_step_if,
env=step_environment,
description=name,
descriptionDone=done_name,
alwaysRun=always_run,
timeout=timeout,
))
def _update_github_status(self, step, set_properties):
"""
See if the builder should send GitHub Status.
"""
send_github_status = step.get('github_send_status', False)
if not send_github_status:
return
parts = self._project._github_slug.split('/', 1)
set_properties.update({
"github_repo_owner": parts[0],
"github_repo_name": parts[1],
})
def _add_step_sequential_group(self, step):
"""
Run all builders from group one after another.
"""
set_properties = step.get('set_properties', {})
copy_properties = step.get('copy_properties', [])
self._update_github_status(step, set_properties)
target_group = step['target']
for target in self._project.getGroupMembersBuilderNames(target_group):
step = Trigger(
schedulerNames=[target],
waitForFinish=True,
updateSourceStamp=True,
set_properties=set_properties,
copy_properties=copy_properties,
)
self.addStep(step)
def _add_step_parallel_group(self, step):
"""
Run all builders from group in parallel.
"""
set_properties = step.get('set_properties', {})
copy_properties = step.get('copy_properties', [])
self._update_github_status(step, set_properties)
target_group = step['target']
targets = self._project.getGroupMembersBuilderNames(target_group)
self.addStep(Trigger(
schedulerNames=targets,
waitForFinish=True,
updateSourceStamp=True,
set_properties=set_properties,
copy_properties=copy_properties,
haltOnFailure=True,
flunkOnFailure=True,
))
def _add_step_master_command(self, step):
"""
Add a step for master command.
"""
name = step.get('name', 'Master command')
always_run = step.get('always-run', False)
self.addStep(MasterShellCommand(
name=name,
command=step['command'],
haltOnFailure=True,
alwaysRun=always_run,
))
def _add_step_directory_upload(self, step):
"""
Add step for directory upload to master.
"""
name = step.get('name', 'Directory upload')
optional = step.get('optional', False)
always_run = step.get('always-run', False)
force_name = 'force_' + name
done_name = name
if optional:
done_name = "%s (prop:force_%s)" % (name, name)
def do_step_if(step):
if not optional:
return True
return step.build.getProperty(force_name)
self.addStep(DirectoryUpload(
name=done_name,
slavesrc=step['source'],
masterdest=step['destination'],
haltOnFailure=True,
doStepIf=do_step_if,
alwaysRun=always_run,
))
def _add_step_attach_png(self, step):
"""
Attach a PNG file as embedded HTML
logs.
"""
name = step.get('name', 'Attach PNG')
source = step.get('source', 'screenshot.png')
always_run = step.get('always-run', True)
self.addStep(AttachPNG(
name=name,
source=source,
alwaysRun=always_run,
))
class ParallelFactory(BuildFactory, object):
"""
Trigger tests in parallel in `target_names`.
"""
def __init__(self, target_builder_names, steps):
super(ParallelFactory, self).__init__()
copy_properties = ['test']
for step in steps:
name = step.get('name', None)
if not name:
continue
optional = step.get('optional', False)
if optional:
copy_properties.append('force_' + name)
self.addStep(Trigger(
schedulerNames=target_builder_names,
waitForFinish=True,
updateSourceStamp=True,
set_properties={},
copy_properties=copy_properties,
haltOnFailure=True,
flunkOnFailure=True,
))
def time_delta_hr(start, end):
"""
Return a string of human readable time delta.
"""
import datetime
from dateutil.relativedelta import relativedelta
start_date = datetime.datetime.fromtimestamp(start)
end_date = datetime.datetime.fromtimestamp(end)
delta = relativedelta(end_date, start_date)
attributes = ['years', 'months', 'days', 'hours', 'minutes', 'seconds']
result = []
for attribute_name in attributes:
attribute = getattr(delta, attribute_name)
if attribute > 0:
result.append('%d %s' % (attribute, attribute_name))
return ', '.join(result)
def message_formatter(mode, name, build, results, master_status):
"""
Message formater.
Reasons:
* try - 'try' job by user
* 3rd party - Triggerable(server-ubuntu-1004-x64)
* scheduler
"""
from buildbot.status.builder import Results
text = []
result = Results[results]
buildbot_url = master_status.getBuildbotURL()
reason = build.getReason()
full_logs_url = master_status.getURLForThing(build)
source_stamp = build.getSourceStamps()[0]
build_duration = time_delta_hr(*build.getTimes())
changes = build.getChanges()
authors = build.getResponsibleUsers()
properties = []
for key, value in build.getProperties().properties.items():
properties.append('%s: %s' % (key, str(value)))
steps = []
for step in build.getSteps():
step_name = "%s - %s " % (step.getName(), ' '.join(step.getText()))
step_results, dummy = step.getResults()
try:
step_status = Results[step_results].upper()
step_duration = time_delta_hr(*step.getTimes())
except:
step_status = 'UNKNOWN'
step_duration = 'UNKOWN'
steps.append('')
steps.append('Status: %s' % step_status)
steps.append('Step name: %s' % step_name)
steps.append('Duration: %s' % step_duration)
for key, value in step.urls.items():
# The space at the end is important so that the URL are
# recognized by email clients.
steps.append('%s: %s ' % (key, str(value)))
text.append('Branch: %s' % source_stamp.branch)
text.append('Build status: %s' % result.upper())
text.append('Authors: %s' % ", ".join(authors))
text.append('Duration: %s' % build_duration)
text.append('Full logs: %s' % full_logs_url)
text.append('Buildslave: %s' % build.getSlavename())
text.append('Build Reason: %s' % reason)
text.append('')
text.append('Steps details')
text.append('------------------------------------------------------')
text.extend(steps)
text.append('')
text.append('Changes')
text.append('------------------------------------------------------')
text.append('')
text.extend([c.asText() for c in changes])
text.append('')
text.append('Build properties')
text.append('------------------------------------------------------')
text.extend(properties)
text.append('')
text.append('--')
text.append('Yours truly, <NAME>.')
text.append(buildbot_url)
return {
'body': "\n".join(text).encode('utf-8'),
'type': 'plain',
}
class MailNotifier(BuildbotMailNotifier, object):
"""
Mail notifier used in project.
It adds support for sending notification from "try" schedulers.
mode: change, failing, passing, problem, warnings, exception, all
"""
def __init__(self,
server,
mode,
builders,
recipients,
user_to_email_mapper=None,
subject=None,
):
kwargs = {}
kwargs.update(server)
kwargs.update({
'messageFormatter': message_formatter,
'buildSetSummary': False,
'addPatch': False,
'addLogs': False,
'mode': mode,
'builders': builders,
'categories': None, # We use builders.
})
kwargs['extraRecipients'] = []
kwargs['sendToInterestedUsers'] = False
for name in recipients:
if name == INTERESTED_USERS:
kwargs['sendToInterestedUsers'] = True
else:
kwargs['extraRecipients'].append(name)
if subject:
kwargs['subject'] = subject
if user_to_email_mapper:
| |
sanitize:
try:
Chem.SanitizeMol(mol) #adding aromatic bonds...we may have a problem here
except ValueError as e:
logging.info("Skipping sanitization for molecule at pos:" + str(i+1))
if debug:
w = Chem.SDWriter('tmp_pos'+str(i+1)+'.sdf')
w.write(mol)
w.close()
# we cannot use it then...
if mol is not None:
if sample is not None and np.random.random_sample()>sample:
continue
if i>0:
df_new = pd.concat([df_new, extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)], axis=0)
else:
df_new = extract_features(mol, infile, i, verbose=verbose, printHeader=True, fillNa=fillNa, useSelectionRules=useSelectionRules, skipH=skipH, addBonds=addBonds)
count += 1
else:
logging.info("SKIPPING molecule at pos:"+str(i+1))
logging.error("SKIPPING molecule at pos:" + str(i+1))
logging.info("Processed total of >%d< molecules" % (count))
if df_new is not None and tempsave:
logging.info("%3d Generated temp file: %s" % (i + 1, outfile))
df_new.to_csv(outfile,index=True)
if df_new is None:
logging.info("ERROR: There was a problem generating the data!")
logging.info("Bond types: \n%r"%(df_new['bond'].value_counts()))
logging.info("Total bonds: %r\n" % (df_new['bond'].value_counts().sum()))
return(df_new)
def convert_sdfiles2csv(file_list = [], base_dir='', outdat='train_dat.csv', method='UFF', skipH=False, addBonds=False, sample=0.25, verbose=False):
"""
Allows for training use a list of filenames, for internal testing
:param file_list: list of .sd files
:param base_dir: location of those files
:param outdat: .csv file with feature matrix and target vectors
"""
finalf = outdat
for i,f in enumerate(file_list):
infile = base_dir+f
if not os.path.isfile(infile):
logging.critical("File not found:"+infile)
logging.critical("CWD:"+os.getcwd())
sys.exit(1)
outfile = 'moldat_tmp.csv'
if infile.endswith('.smi'):
infile = convert_smiles2sdfile(smifile=infile, outdat=outfile, method=method, verbose=verbose)
infile = infile.replace(".smi",".sdf")
print(infile)
df = convert_sdf2dataframe(infile=infile, outfile=outfile, fillNa=9999.0, skipH=skipH, addBonds=addBonds, sample=sample, verbose=verbose)
if df is None: continue
outstr = 'writing'
mode = 'w'
header = True
if os.path.isfile(finalf):
mode = 'a'
header = False
outstr = 'appending'
with open(finalf, mode) as f:
df.to_csv(f, header=header, index=True)
print(df.head())
logging.info("File: %3d - %s .csv file to: %s" % (i + 1, outstr, finalf))
def train_from_csv(filename, grid_search=False, useRF=False, plotClassifier=False, save_clf='clf.p',verbose=False):
"""
Train bond data with sklearn classifier, final model gets pickled.
:param filename: .csv file with feature matrix
:param grid_search: Do a parameter search on grid
:return: trained scikit-learn model
"""
logging.info("Training data on dataset:")
df = pd.read_csv(filename,index_col=0)
if 'id1' in df.columns and 'id2' in df.columns:
df.drop(['id1', 'id2'], axis=1,inplace=True)
logging.info("Shape : %d X %d"%(df.shape[0],df.shape[1]))
logging.info("Features: %s" % (df.columns))
# remove similar data
logging.info("Droping duplicates...")
df.drop_duplicates(inplace=True)
logging.info("Shape : %d X %d" % (df.shape[0], df.shape[1]))
y = df['bond']
X = df.drop(['bond'],axis=1,inplace=False)
if plotClassifier:
tree = DecisionTreeClassifier( max_depth=5)
tree.fit(X,y)
dot_data = tree.export_graphviz(tree, out_file='tree')
import graphviz
graph = graphviz.Source(dot_data)
graph.render("decisiontree")
n_jobs = 1
n_splits = 4
if useRF:
model = RandomForestClassifier(n_estimators=250, max_depth=None, min_samples_leaf=5, n_jobs=n_jobs,
max_features=11, oob_score=False)
else:
#model = xgb.XGBClassifier(n_estimators=2000, learning_rate=0.01, max_depth=5, NA=0, subsample=.5,colsample_bytree=1.0, min_child_weight=5, n_jobs=4, objective='multi:softprob',num_class=5, booster='gbtree', silent=1, eval_size=0.0)
#parameters = {'n_estimators': [2000], 'learning_rate': [0.01, 0.1, 0.001], 'max_depth': [5, 7],'subsample': [0.5]}
model = GradientBoostingClassifier(n_estimators=1000,learning_rate=0.1,max_depth=5,verbose=1)
parameters = {}
if grid_search:
#model.set_params(n_jobs=1)
n_jobs = 4
cv = StratifiedKFold(n_splits=n_splits)
model = GridSearchCV(model, parameters, n_jobs=n_jobs, verbose=2, scoring='f1_micro', cv=cv,refit=True)
model.fit(X,y)
means = model.cv_results_['mean_test_score']
stds = model.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, model.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
print(model)
else:
logging.info("Fitting classifier: %s"%(model))
model.fit(X, y)
pickle.dump(model,open( save_clf, "wb" ))
logging.info("Saving classifier as: %s"%(save_clf))
return(model)
def train_job(filename, reset=True, eval=False, fmethod='UFF', skipH=False, iterative=False, sample=False, useRF=False,verbose=False):
"""
Use either .sdf or .smi file to
train from a new dataset or append data
:param filename: name of .smi of .sd file
:param reset: removes old training data
"""
if eval:
train_file = 'eval_dat.csv'
reset=True
else:
train_file = 'train_dat.csv'
iter_file = ""
if iterative and not eval:
logging.info("Iterative mode switched ON!")
iter_file = train_file.replace("_dat","_iter")
if useRF and not eval:
logging.info("INFO: Using Random Forest for training!")
if reset:
if os.path.isfile(train_file):
os.remove(train_file)
if os.path.isfile(iter_file):
os.remove(iter_file)
if filename.endswith('.sdf') or filename.endswith('.sd'):
convert_sdfiles2csv(file_list=[filename], outdat=train_file, skipH=skipH, addBonds=False, sample=sample, verbose=verbose)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, skipH=skipH, addBonds=True, sample=sample, verbose=verbose)
elif filename.endswith('.smi'):
logging.info("Using forcefield for optimization: %s" % (fmethod))
convert_sdfiles2csv(file_list=[filename], outdat=train_file, method=fmethod, skipH=skipH, addBonds=False)
if iterative and not eval:
convert_sdfiles2csv(file_list=[filename], outdat=iter_file, method=fmethod, skipH=skipH, addBonds=True, verbose=verbose)
if not os.path.isfile(train_file):
sys.stderr.write("ERROR: Missing training data file: %s!\n"%(train_file))
sys.exit(1)
if eval:
evaluate(train_file,iterative=iterative, verbose=verbose)
else:
train_from_csv(train_file, useRF=useRF, verbose=verbose)
if iterative:
train_from_csv(iter_file,useRF=useRF, save_clf="clf_iter.p", verbose=verbose)
def eval_job(filename, skipH=False, iterative=False,verbose=False):
"""
Evaluation per! molecule
:param filename: filename for evaluation
:param skipH: omit hydrogen
:param iterative: use 2nd classifier
:param verbose: verbose mode
:return: -
"""
# iterate over mols of SDF
# mol -> df -> bonds_predicted / bonds_true
# make SDF -> extract features -> df -> bonds_predicted2
# compare bonds_true & bonds_predicted2
# generatePredictions with mol
print("Evaluation run with option: noH(%r)" % (skipH))
print("Loading classifier...")
clf = pickle.load(open('clf.p', "rb"))
if iterative:
clf_iter = pickle.load(open('clf_iter.p', "rb"))
else:
clf_iter = None
suppl = Chem.SDMolSupplier(filename, removeHs=skipH, sanitize=iterative)
nok = 0
nfalse = 0
for i, mol in enumerate(suppl):
if mol is None: continue
res = generate_predictions(mol, skipH=skipH, iterative=True, forceAromatics=False, maxiter=1, verbose=verbose,
clf=clf, clf_iter=clf_iter, isEval=True)
if res is None: continue
if i % 50 == 0:
logging.info("%d %r\n" % (i, res))
if res:
nok += 1
else:
nfalse += 1
nall = len(suppl)
acc = nok / float(nall)
logging.info("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f" % (nall, nok, nfalse, acc))
def evaluate(filename_test,filename_train='train_dat.csv',plotting=True,iterative=False,verbose=False):
"""
Evaluate on dataset with known bond info, molecule accuracy is computed afterwards
:param filename_test: name of .csv file with feature matrix and targets
"""
df = pd.read_csv(filename_test,index_col=0)
filename_train=None
# shown train_data
if filename_train is not None:
logging.info("Analyze train data...")
df_train = pd.read_csv(filename_train,index_col=0)
print(df_train.shape)
df_train['bondtype']=df_train['bond'].astype('category')
df_train = df_train[df_train.ata==6]
df_train = df_train[df_train.atb==6]
if plotting:
ax = sns.boxplot(x="bond", y="distab", data=df_train[['distab','bond']])
ax.set(ylabel='C-C distance', xlabel='bond type')
#ax.set(xticklabels=[])
plt.show()
logging.info("Evaluate data set: " + filename_test)
logging.info("Loading classifier...")
clf = pickle.load(open("clf.p", "rb"))
logging.info("Loading test set with %d rows from file %s\n"%(df.shape[0],filename_test))
y = df['bond']
X = df.drop(['bond','id1','id2'],axis=1,inplace=False)
yprob = clf.predict_proba(X)
ypred = clf.predict(X)
score = accuracy_score(y,ypred)
score2 = f1_score(y,ypred,average='weighted')
logging.info("ACCURACY:%0.3f - F1-score: %0.3f\n" % (score,score2))
X['bond_pred'] = ypred
X['p(-)'] = yprob[:, 1]
X['p(=)'] = yprob[:, 2]
X['p(#)'] = yprob[:, 3]
X['p(a)'] = yprob[:, 4]
X['bond'] = y
if plotting:
print("Misclassification stats:")
idx = (ypred != y)
df_tmp = X[idx.values]
print(df_tmp[['ata','atb','distab','bond','bond_pred']].head(200).sort_values(['ata']))
plot_classification_results(y,ypred)
mol_df_list = mol_dataframe_generator(X)
all=0
ok=0
not_ok=0
false_indices=[]
for name, df_sub in mol_df_list:
all += 1
if iterative:
print("ERROR: Iterative - does not work in fast evaluation mode..")
sys.exit(1)
# ok no coordinates/no dm how to get feature matrix...????
if np.array_equal(df_sub['bond_pred'].values, df_sub['bond'].values):
ok += 1
else:
# print("FALSE: %s"%(name))
not_ok += 1
mask = df_sub['bond_pred'] != df_sub['bond']
idx = np.argmax(mask)
false_indices.append(idx)
acc = ok/float(all)
print(false_indices)
print("\nTOTAL: %5d OK: %5d WRONG: %5d Accuray: %6.3f"%(all,ok,not_ok,acc))
return(X)
def evaluate_OB(filename='fullerene_ml.sdf', verbose=False):
"""
Evaluation via Open Babel
:param filename: sd file
:param removeHs: use H or not (obabel reorders X-H bonds...)
:param verbose: True for verbose
:return: -
"""
logging.info("Evaluating %s via OBabel"%(filename))
#if sanitize:
# print("WARNING: Switched ON sanitization!")
#else:
# print("WARNING: Switched OFF sanitization!")
suppl = Chem.SDMolSupplier(filename, removeHs=False, sanitize=True)
nok = 0
nfalse = 0
nall = len(suppl)
for i, mol in enumerate(suppl):
if mol is None: continue
xyz_str = mol2xyz(mol)
#remove H for comparison with OB
mol = Chem.RemoveHs(mol)
df_orig = extract_features(mol, "babel_orig", (i+1), skipH=True)
if df_orig is None: continue
bond_orig = df_orig['bond']
#generate xyz for OB prediction without H
myfile = StringIO.StringIO(xyz_str)
#if removeHs:
#cmd_call = ["obabel", "-d","-ixyz", "-osdf"]
#else:
cmd_call = ["obabel", "-ixyz", "-osdf"]
p = subprocess.Popen(cmd_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
molblock, err = p.communicate(myfile.read())
#switch off sanitization
#mol_pred_H = Chem.MolFromMolBlock(molblock,removeHs=False,sanitize=False)
#always switch off H for comparison of main element bonds only
mol_pred = Chem.MolFromMolBlock(molblock,removeHs=True,sanitize=False)
if mol_pred is None:
nfalse += 1
continue
df = extract_features(mol_pred, "obabel", 0, skipH=True)
if df is None:
nfalse += 1
continue
if len(bond_orig)!=len(df['bond'].values):
logging.error("Original (%d) and predicted bond vector (%d) have different length!"%(len(bond_orig),len(df['bond'].values)))
if verbose:
mol_pred_noH = Chem.RemoveHs(mol_pred)
Chem.Compute2DCoords(mol_pred_noH)
Chem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol_pred_noH, mol], molsPerRow=2, subImgSize=(400, 400),
legends=['ob' + str(i + 1), 'orig' + str(i + 1)])
img.show()
if np.array_equal(bond_orig.values, df['bond'].values):
nok+=1
else:
if verbose:
mol_pred_noH = Chem.RemoveHs(mol_pred)
Chem.Compute2DCoords(mol_pred_noH)
Chem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol_pred_noH, mol], molsPerRow=2, subImgSize=(400, 400),
legends=['ob' + str(i + 1), 'orig' + str(i + 1)])
img.show()
res = raw_input()
if 'n' in res.lower() or "f" in res.lower():
nfalse += 1
print("FALSE: %d/%d" % (nfalse,len(suppl)))
# img.save('images/' + cname + '_' + str(i) + '.png')
else:
nok += 1
| |
<gh_stars>1-10
#!/usr/bin/env python3
"""An interactive viewer for todo-txt."""
import os
import re
import sys
import time
import argparse
import subprocess
import curses
from contextlib import contextmanager, suppress
"""Maps a priority to a color. First entry is priority A, second B, and so on.
If there are more priorities than colors, the last entry will be used for the
remainder.
"""
COLORS = [
'#FF7733',
'#F5D761',
'#A4F54C',
'#78C1F3',
'#837CC5',
'#CCCCCC'
]
COLOR_STATUSBAR = '#CCCCCC'
COLOR_STATUSBAR_ACTIVE = '#F5D761'
"""For terminals that don't support definining custom colors (which is most of
them), these pre-defined colors will be used instead."""
COLORS_FALLBACK = [
curses.COLOR_RED,
curses.COLOR_YELLOW,
curses.COLOR_GREEN,
curses.COLOR_CYAN,
curses.COLOR_BLUE,
curses.COLOR_MAGENTA,
curses.COLOR_WHITE
]
COLOR_STATUSBAR_FALLBACK = curses.COLOR_WHITE
COLOR_STATUSBAR_ACTIVE_FALLBACK = curses.COLOR_YELLOW
RE_PRIORITY = r'\(([A-Z])\)'
RE_DATE = r'\d{4}-\d{2}-\d{2}'
KEY_ESC = 27
KEY_BACKSPACE = 127
def get_priority(item):
"""Returns the priority of an item as a letter."""
match = re.search(RE_PRIORITY, item[1])
return match.group(1) if match else None
def get_priority_as_number(item, maximum=sys.maxsize):
"""Returns the priority of an item as a number (A is 0, B is 1, ...)."""
priority = get_priority(item)
if priority is None:
return maximum
return min(maximum, ord(priority) - ord('A'))
def get_bumped_priority(item, delta):
"""Offsets and returns an item's priority by delta (positive -> higher)."""
priority = get_priority(item)
if priority is not None:
return chr(max(ord('A'), min(ord('Z'), ord(priority) - delta)))
# TODO: if the item has no priority yet, it should be assigned the lowest
# *used* priority
return None
def hex_to_rgb(col):
"""Extracts the RGB values as integers (from 0 to 1000) from a hex color
string (#rrggbb).
"""
mul = 1000 / 255
return tuple(round(int(col.lstrip('#')[i:i + 2], 16) * mul) for i in (0, 2, 4))
def dim(rgb, mul=0.6):
"""Returns a dimmer version of a color. If multiplier > 1, a lighter color
can be produced as well."""
return tuple(map(lambda x: min(1000, round(x * mul)), rgb))
def lighten(rgb, mul=1.5):
"""An alias for dim() with a positive multiplier."""
return dim(rgb, mul)
class Dialog:
"""A popup dialog that lets us interact with todo items."""
def __init__(self, screen, item):
self.item = item
self.parent = screen
self.dialog = None
self._alive = True
def run(self):
"""Shows the dialog and enters a rendering loop."""
self._init()
while self._alive:
self._render()
self._handle_input()
def close(self):
"""Closes the dialog."""
self._alive = False
def _init(self):
_, num_cols = self.parent.getmaxyx()
self.dialog = curses.newwin(5, num_cols, 0, 0)
def _handle_input(self):
self.dialog.getch()
self.close()
def _render(self):
self.dialog.erase()
self.dialog.attron(curses.color_pair(0))
self.dialog.addstr(1, 2, '{:} {:}'.format(*self.item))
self.dialog.box()
self.dialog.refresh()
class TodoListViewer:
"""A viewer that lets us browse and filter todo items."""
@property
def has_selection(self):
"""Returns True if a todo item is selected, False otherwise."""
return self._items and self._selected_line >= 0
@property
def selected_item(self):
"""Returns the currently selected item, which is a tuple in the form of:
(item_id, line), item_id being the line number in the todo.txt and line
being the text of that line.
"""
return self._items[self._selected_line] if self._items else None
@property
def selected_id(self):
"""Returns the line number of the currently selected item."""
item = self.selected_item
return item[0] if item else None
@property
def todo_path(self):
"""Returns the absolute path to the user's todo.txt."""
return os.path.abspath(os.path.join(self._root, 'todo.txt'))
@property
def num_rows(self):
"""Number of terminal lines available."""
return self.screen.getmaxyx()[0] - 1
@property
def num_columns(self):
"""Number of terminal characters available horizontally."""
return self.screen.getmaxyx()[1]
@property
def is_watching(self):
"""Returns True if the viewer will respond to changes in todo.txt."""
return self._observer is not None and self._watch
# pylint: disable=W0622
def __init__(self, root, filter=None, simple_colors=False, mouse=False,
watch=True):
self.screen = None
self._root = root
self._scroll_offset = 0
self._selected_line = 0
self._alive = True
self._items = []
self._all_items = []
self._filter = filter or ''
self._filtering = False
self._simple_colors = simple_colors
self._num_colors = 0
self._num_reserved_colors = 0
self._num_color_variants = 0
self._mouse = mouse
self._watch = watch
self._observer = None
def __del__(self):
if self._observer:
self._observer.stop()
def run(self, *_):
"""Shows the viewer and enters a rendering loop."""
try:
self._init()
while self._alive:
self._move_selection_into_view()
self._render()
if self._filtering:
self._handle_filter_input()
else:
self._handle_input()
except KeyboardInterrupt:
pass
def close(self):
"""Closes the viewer."""
self._alive = False
def refresh(self):
"""Reads the todo items from filesystem and refreshes the view."""
with self.retain_selection():
self._read_todo_file()
curses.flash()
self._render()
def select_item_id(self, item_id):
"""Selects the item with a specific id."""
for item_index, item in enumerate(self._items):
if item[0] == item_id:
self._selected_line = item_index
break
@contextmanager
def retain_selection(self):
"""On entering the context, saves the currently selected item and
makes sure it is selected when the exiting the context."""
selected = self.selected_id
yield
self.select_item_id(selected)
@contextmanager
def disable_watch(self):
"""Don't watch for filesystem changes while in this context."""
prev = self._watch
self._watch = False
yield
self._watch = prev
def _run_subprocess(self, command, retain_selection=True):
with self.disable_watch():
curses.endwin()
if retain_selection:
with self.retain_selection():
subprocess.run([str(x) for x in command])
self._init()
else:
subprocess.run([str(x) for x in command])
self._init()
def _init(self):
self._read_todo_file()
self.screen = curses.initscr()
self.screen.keypad(1)
curses.curs_set(0)
if self._mouse:
curses.mousemask(1)
self._init_colors()
if self._watch:
self._init_watch()
def _init_colors(self):
curses.start_color()
curses.use_default_colors()
if not curses.can_change_color():
self._simple_colors = True
if not self._simple_colors:
# Set reserved colors
self._define_color(1, hex_to_rgb(COLOR_STATUSBAR))
self._define_color(2, hex_to_rgb(COLOR_STATUSBAR_ACTIVE))
self._num_reserved_colors = 3
# Set item colors
self._num_color_variants = 3
for color_index, color in enumerate(COLORS):
color_index = color_index * self._num_color_variants + self._num_reserved_colors
self._define_color(color_index, hex_to_rgb(color))
self._define_color(color_index + 1, dim(hex_to_rgb(color)))
self._define_color(color_index + 2, lighten(hex_to_rgb(color)))
self._num_colors = len(COLORS)
else:
# Set reserved colors
curses.init_pair(1, -1, COLOR_STATUSBAR_FALLBACK)
curses.init_pair(2, -1, COLOR_STATUSBAR_ACTIVE_FALLBACK)
self._num_reserved_colors = 3
# Set item colors
self._num_color_variants = 1
for color_index, color in enumerate(COLORS_FALLBACK):
color_index += self._num_reserved_colors
curses.init_pair(color_index, color, -1)
self._num_colors = len(COLORS_FALLBACK)
def _init_watch(self):
if self._observer:
return
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
viewer = self
class _Watch(FileSystemEventHandler):
def on_modified(self, event):
if viewer.is_watching and event.src_path.endswith('todo.txt'):
# It appears todotxt adds the priority in a second I/O,
# so if we're responding too quickly there's a chance we
# will miss it entirely.
time.sleep(.1)
viewer.refresh()
self._observer = Observer()
self._observer.schedule(_Watch(), self._root)
self._observer.start()
def _define_color(self, color_index, rgb):
assert color_index > 0 # Don't overwrite background color
curses.init_color(color_index, *rgb)
curses.init_pair(color_index, color_index, -1)
def _get_item_color_index(self, item):
priority = get_priority_as_number(item, maximum=self._num_colors - 1)
return priority * self._num_color_variants + self._num_reserved_colors
def _get_item_color_variants(self, item):
color_index = self._get_item_color_index(item)
pair = curses.color_pair
if self._simple_colors:
return (
pair(color_index),
pair(color_index) | curses.A_DIM,
pair(color_index) | curses.A_BOLD)
else:
return (
pair(color_index),
pair(color_index if self._simple_colors else color_index + 1),
pair(color_index if self._simple_colors else color_index + 2)
)
def _read_todo_file(self):
self._items.clear()
with open(self.todo_path, 'r') as todofile:
lines = todofile.readlines()
items = [(index + 1, line) for index, line in enumerate(lines)]
self._all_items = sorted(items, key=get_priority_as_number)
self._items = self._all_items
self._apply_filter()
def _apply_filter(self):
if not self._filter:
self._items = self._all_items
else:
self._items = []
for item in self._all_items:
if self._filter.lower() in item[1].lower():
self._items.append(item)
self._selected_line = 0
def _handle_filter_input(self):
key = self.screen.getch()
if key in (ord('\n'), curses.KEY_UP, curses.KEY_DOWN, KEY_ESC):
self._filtering = False
elif key == KEY_BACKSPACE:
self._filter = self._filter[:len(
self._filter) - 1] if self._filter else ''
else:
self._filter += chr(key)
self._apply_filter()
def _handle_input(self):
key = self.screen.getch()
# j/k: up/down
if key in (ord('k'), curses.KEY_UP):
self._selected_line -= 1
elif key in (ord('j'), curses.KEY_DOWN):
self._selected_line += 1
# HOME/END: scroll to top/bottom
elif key == curses.KEY_HOME:
self._selected_line = 0
elif key == curses.KEY_END:
self._selected_line = len(self._items) - 1
# q/ESC: cancel filter or quit
elif key in (ord('q'), KEY_ESC):
if self._filter:
with self.retain_selection():
self._filter = ''
self._apply_filter()
else:
self.close()
# r: refresh
elif key == ord('r'):
self.refresh()
# e: edit
elif key == ord('e'):
self._run_subprocess(['todo.sh', 'edit'])
# /: filter
elif key == ord('/'):
self._filter = ''
self._filtering = True
# d: done
elif self.has_selection and key == ord('d'):
self._run_subprocess(
['todo.sh', 'do', self.selected_id], retain_selection=False)
# n: nav
elif self.has_selection and key == ord('n'):
self._run_subprocess(['todo.sh', 'nav', self.selected_id])
# SPACE/RETURN: Enter item dialog
elif self.has_selection and key in (ord(' '), ord('\n')):
Dialog(self.screen, self.selected_item).run()
# -/=: Bump priority
elif self.has_selection and key in (ord('='), ord('-')):
delta = 1 if key == ord('=') else -1
new_priority = get_bumped_priority(self.selected_item, delta)
self._set_item_priority(self.selected_item, new_priority)
# A-Z: Set priority
elif self.has_selection and key >= ord('A') and key <= ord('Z'):
self._set_item_priority(self.selected_item, chr(key))
# 0: Remove priority
elif self.has_selection and key == ord('0'):
self._set_item_priority(self.selected_item, None)
# Mouse events
elif key == curses.KEY_MOUSE:
# Note: mouse support in curses seems to be pretty poor. I left this
# code in case someone wants to experiment with it.
_, _, row, _, state = curses.getmouse()
if state & 0x80000: # Wheel down
self._selected_line -= 1
elif state & | |
<reponame>artiya4u/Axela<filename>main.py
#! /usr/bin/env python
import os
import time
import sys
import alsaaudio
import requests
import json
from memcache import Client
import vlc
import threading
import email
import optparse
import tunein
import webrtcvad
from pocketsphinx.pocketsphinx import *
from creds import *
# Settings
device = "pulse" # Name of your microphone/sound card in arecord -L , Using Pulse audio by default
# Get arguments
parser = optparse.OptionParser()
parser.add_option('-s', '--silent',
dest="silent",
action="store_true",
default=False,
help="start without saying hello"
)
parser.add_option('-d', '--debug',
dest="debug",
action="store_true",
default=False,
help="display debug messages"
)
cmdopts, cmdargs = parser.parse_args()
silent = cmdopts.silent
debug = cmdopts.debug
# Setup
recorded = False
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
# Sphinx setup
trigger_phrase = "mom"
sphinx_data_path = path + "pocketsphinx/"
model_dir = sphinx_data_path + "/model/"
data_dir = sphinx_data_path + "/test/data"
# PocketSphinx configuration
config = Decoder.default_config()
# Set recognition model to US
config.set_string('-hmm', os.path.join(model_dir, 'en-us/en-us'))
config.set_string('-dict', os.path.join(model_dir, 'en-us/cmudict-en-us.dict'))
# Specify recognition key phrase
config.set_string('-keyphrase', trigger_phrase)
config.set_float('-kws_threshold', 1e-5)
# Hide the VERY verbose logging information
config.set_string('-logfn', '/dev/null')
# Process audio chunk by chunk. On keyword detected perform action and restart search
decoder = Decoder(config)
decoder.start_utt()
# Variables
p = ""
nav_token = ""
stream_url = ""
stream_id = ""
position = 0
audio_playing = False
button_pressed = False
start = time.time()
tunein_parser = tunein.TuneIn(5000)
vad = webrtcvad.Vad(2)
currVolume = 100
# constants
VAD_SAMPLERATE = 16000
VAD_FRAME_MS = 30
VAD_PERIOD = (VAD_SAMPLERATE / 1000) * VAD_FRAME_MS
VAD_SILENCE_TIMEOUT = 1000
VAD_THROWAWAY_FRAMES = 10
MAX_RECORDING_LENGTH = 8
MAX_VOLUME = 100
MIN_VOLUME = 30
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def internet_on():
print("Checking Internet Connection...")
try:
r = requests.get('https://api.amazon.com/auth/o2/token')
print("Connection {}OK{}".format(bcolors.OKGREEN, bcolors.ENDC))
return True
except:
print("Connection {}Failed{}".format(bcolors.WARNING, bcolors.ENDC))
return False
def gettoken():
token = mc.get("access_token")
refresh = refresh_token
if token:
return token
elif refresh:
payload = {"client_id": Client_ID, "client_secret": Client_Secret, "refresh_token": refresh,
"grant_type": "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data=payload)
resp = json.loads(r.text)
mc.set("access_token", resp['access_token'], 3570)
return resp['access_token']
else:
return False
def alexa_speech_recognizer():
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/speechrecognizer-requests
if debug:
print("{}Sending Speech Request...{}".format(bcolors.OKBLUE, bcolors.ENDC))
start_time = time.time()
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization': 'Bearer %s' % gettoken()}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
with open(path + 'media/recording.wav') as inf:
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', inf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
if debug:
print("{}Responded...{} Took {} secs".format(bcolors.OKBLUE, bcolors.ENDC, time.time() - start_time))
process_response(r)
def alexa_getnextitem(nav_token):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-getnextitem-request
time.sleep(0.5)
if not audio_playing:
if debug:
print("{}Sending GetNextItem Request...{}".format(bcolors.OKBLUE, bcolors.ENDC))
url = 'https://access-alexa-na.amazon.com/v1/avs/audioplayer/getNextItem'
headers = {'Authorization': 'Bearer %s' % gettoken(), 'content-type': 'application/json; charset=UTF-8'}
d = {
"messageHeader": {},
"messageBody": {
"navigationToken": nav_token
}
}
r = requests.post(url, headers=headers, data=json.dumps(d))
process_response(r)
def alexa_playback_progress_report_request(request_type, player_activity, stream_id):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-events-requests
# streamId Specifies the identifier for the current stream.
# offsetInMilliseconds Specifies the current position in the track, in milliseconds.
# playerActivity IDLE, PAUSED, or PLAYING
if debug: print("{}Sending Playback Progress Report Request...{}".format(bcolors.OKBLUE, bcolors.ENDC))
headers = {'Authorization': 'Bearer %s' % gettoken()}
d = {
"messageHeader": {},
"messageBody": {
"playbackState": {
"streamId": stream_id,
"offsetInMilliseconds": 0,
"playerActivity": player_activity.upper()
}
}
}
if request_type.upper() == "ERROR":
# The Playback Error method sends a notification to AVS
# that the audio player has experienced an issue during playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackError"
elif request_type.upper() == "FINISHED":
# The Playback Finished method sends a notification to AVS
# that the audio player has completed playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackFinished"
elif request_type.upper() == "IDLE":
# The Playback Idle method sends a notification to AVS
# that the audio player has reached the end of the playlist.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackIdle"
elif request_type.upper() == "INTERRUPTED":
# The Playback Interrupted method sends a notification to AVS that the audio player has been interrupted.
# Note: The audio player may have been interrupted by a previous stop Directive.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackInterrupted"
elif request_type.upper() == "PROGRESS_REPORT":
# The Playback Progress Report method sends a notification to AVS with the current state of the audio player.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackProgressReport"
elif request_type.upper() == "STARTED":
# The Playback Started method sends a notification to AVS that the audio player has started playing.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackStarted"
r = requests.post(url, headers=headers, data=json.dumps(d))
if r.status_code != 204:
print("{}(alexa_playback_progress_report_request Response){} {}".format(bcolors.WARNING, bcolors.ENDC, r))
else:
if debug: print(
"{}Playback Progress Report was {}Successful!{}".format(bcolors.OKBLUE, bcolors.OKGREEN, bcolors.ENDC))
def process_response(r):
global nav_token, stream_url, stream_id, currVolume, isMute
if debug: print("{}Processing Request Response...{}".format(bcolors.OKBLUE, bcolors.ENDC))
nav_token = ""
stream_url = ""
stream_id = ""
if r.status_code == 200:
data = "Content-Type: " + r.headers['content-type'] + '\r\n\r\n' + r.content
msg = email.message_from_string(data)
for payload in msg.get_payload():
if payload.get_content_type() == "application/json":
j = json.loads(payload.get_payload())
if debug: print("{}JSON String Returned:{} {}".format(bcolors.OKBLUE, bcolors.ENDC, json.dumps(j)))
elif payload.get_content_type() == "audio/mpeg":
filename = path + "tmpcontent/" + payload.get('Content-ID').strip("<>") + ".mp3"
with open(filename, 'wb') as f:
f.write(payload.get_payload())
else:
if debug: print(
"{}NEW CONTENT TYPE RETURNED: {} {}".format(bcolors.WARNING, bcolors.ENDC,
payload.get_content_type()))
# Now process the response
if 'directives' in j['messageBody']:
if len(j['messageBody']['directives']) == 0:
if debug:
print("0 Directives received")
for directive in j['messageBody']['directives']:
if directive['namespace'] == 'SpeechSynthesizer':
if directive['name'] == 'speak':
play_audio(path + "tmpcontent/" + directive['payload']['audioContent'].lstrip("cid:") + ".mp3")
for directive in j['messageBody']['directives']: # if Alexa expects a response
# this is included in the same string as above if a response was expected
if directive['namespace'] == 'SpeechRecognizer':
if directive['name'] == 'listen':
if debug:
timeout = directive['payload']['timeoutIntervalInMillis']
print("{}Further Input Expected, timeout in: {} {}ms".format(bcolors.OKBLUE,
bcolors.ENDC,
timeout))
play_audio(path + 'media/beep.wav', 0, 100)
timeout = directive['payload']['timeoutIntervalInMillis'] / 116
# listen until the timeout from Alexa
silence_listener(timeout)
# now process the response
alexa_speech_recognizer()
elif directive['namespace'] == 'AudioPlayer':
# do audio stuff - still need to honor the playBehavior
if directive['name'] == 'play':
nav_token = directive['payload']['navigationToken']
for stream in directive['payload']['audioItem']['streams']:
if stream['progressReportRequired']:
stream_id = stream['streamId']
playBehavior = directive['payload']['playBehavior']
if stream['streamUrl'].startswith("cid:"):
content = path + "tmpcontent/" + stream['streamUrl'].lstrip("cid:") + ".mp3"
else:
content = stream['streamUrl']
p_thread = threading.Thread(target=play_audio,
args=(content, stream['offsetInMilliseconds']))
p_thread.start()
elif directive['namespace'] == "Speaker":
# speaker control such as volume
if directive['name'] == 'SetVolume':
vol_token = directive['payload']['volume']
type_token = directive['payload']['adjustmentType']
if type_token == 'relative':
currVolume += int(vol_token)
else:
currVolume = int(vol_token)
if currVolume > MAX_VOLUME:
currVolume = MAX_VOLUME
elif currVolume < MIN_VOLUME:
currVolume = MIN_VOLUME
if debug: print("new volume = {}".format(currVolume))
elif 'audioItem' in j['messageBody']: # Additional Audio Iten
nav_token = j['messageBody']['navigationToken']
for stream in j['messageBody']['audioItem']['streams']:
if stream['progressReportRequired']:
stream_id = stream['streamId']
if stream['streamUrl'].startswith("cid:"):
content = path + "tmpcontent/" + stream['streamUrl'].lstrip("cid:") + ".mp3"
else:
content = stream['streamUrl']
p_thread = threading.Thread(target=play_audio, args=(content, stream['offsetInMilliseconds']))
p_thread.start()
return
elif r.status_code == 204:
if debug:
print("{}Request Response is null {}(This is OKAY!){}".format(bcolors.OKBLUE,
bcolors.OKGREEN,
bcolors.ENDC))
else:
print("{}(process_response Error){} Status Code: {}".format(bcolors.WARNING,
bcolors.ENDC,
r.status_code))
r.connection.close()
def play_audio(file_name, offset=0, over_ride_volume=0):
global currVolume
if file_name.find('radiotime.com') != -1:
file_name = tunein_playlist(file_name)
global nav_token, p, audio_playing
if debug:
print("{}Play_Audio Request for:{} {}".format(bcolors.OKBLUE, bcolors.ENDC, file_name))
i = vlc.Instance('--aout=alsa') # , '--alsa-audio-device=mono', '--file-logging', '--logfile=vlc-log.txt')
m = i.media_new(file_name)
p = i.media_player_new()
p.set_media(m)
mm = m.event_manager()
mm.event_attach(vlc.EventType.MediaStateChanged, state_callback, p)
audio_playing = True
if over_ride_volume == 0:
p.audio_set_volume(currVolume)
else:
p.audio_set_volume(over_ride_volume)
p.play()
while audio_playing:
continue
def tunein_playlist(url):
global tunein_parser
if debug:
print("TUNE IN URL = {}".format(url))
req = requests.get(url)
lines = req.content.split('\n')
nurl = tunein_parser.parse_stream_url(lines[0])
if len(nurl) != 0:
return nurl[0]
return ""
def state_callback(event, player):
global nav_token, audio_playing, stream_url, stream_id
state = player.get_state()
# 0: 'NothingSpecial'
# 1: 'Opening'
# 2: 'Buffering'
# 3: 'Playing'
# 4: 'Paused'
# 5: 'Stopped'
# 6: 'Ended'
# 7: 'Error'
if debug: print("{}Player State:{} {}".format(bcolors.OKGREEN, bcolors.ENDC, state))
if state == 3: # Playing
if stream_id != "":
r_thread = threading.Thread(target=alexa_playback_progress_report_request,
args=("STARTED", "PLAYING", stream_id))
r_thread.start()
elif state == 5: # Stopped
audio_playing = False
if stream_id != "":
r_thread = threading.Thread(target=alexa_playback_progress_report_request,
args=("INTERRUPTED", "IDLE", stream_id))
r_thread.start()
stream_url = ""
stream_id = ""
nav_token = ""
elif state == 6: # Ended
audio_playing = False
if stream_id != "":
r_thread = threading.Thread(target=alexa_playback_progress_report_request,
args=("FINISHED", "IDLE", stream_id))
r_thread.start()
stream_id = ""
if stream_url != "":
p_thread = threading.Thread(target=play_audio, args=(stream_url,))
stream_url = ""
p_thread.start()
elif nav_token != "":
g_thread = threading.Thread(target=alexa_getnextitem, args=(nav_token,))
g_thread.start()
elif state == 7:
audio_playing = False
if stream_id != "":
r_thread = threading.Thread(target=alexa_playback_progress_report_request,
args=("ERROR", "IDLE", stream_id))
r_thread.start()
stream_url = ""
stream_id = ""
nav_token = ""
def silence_listener(throwaway_frames):
| |
' + file_dir + file_name)
os.system('cp -r ' + file_dir + file_name + '_nodeg ' + file_dir + file_name)
os.system('rm -rf ' + file_dir + file_name + '_nodeg')
return()
def task_pbcorr(
self,
target = None,
product = None,
config = None,
in_tag = 'orig',
out_tag = 'pbcorr',
extra_ext_in = '',
extra_ext_out = '',
check_files = True,
):
"""
For one target, product, config combination primary beam
correct the interferometer data.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_in)
fname_dict_out = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_out)
infile = fname_dict_in[in_tag]
outfile = fname_dict_out[out_tag]
pbfile = fname_dict_in['pb']
# Check input file existence
if check_files:
if not (os.path.isdir(indir+infile)):
logger.warning("Missing "+indir+infile)
return()
if not (os.path.isdir(indir+pbfile)):
logger.warning("Missing "+indir+pbfile)
return()
# Apply the primary beam correction to the data.
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("Primary beam correction for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
logger.info("Using ccr.primary_beam_correct")
logger.info("Correcting to "+outfile)
logger.info("Correcting from "+infile)
logger.info("Correcting using "+pbfile)
if (not self._dry_run) and casa_enabled:
ccr.primary_beam_correct(
infile=indir+infile,
outfile=outdir+outfile,
pbfile=indir+pbfile,
overwrite=True)
return()
def task_round_beam(
self,
target = None,
product = None,
config = None,
in_tag = 'pbcorr',
out_tag = 'pbcorr_round',
extra_ext_in = '',
extra_ext_out = '',
force_beam_as = None,
check_files = True,
):
"""
For one target, product, config combination, convolve the cube
to have a round beam. Note that via the force_beam_as keyword
this task can also be used to convolve data to a fixed (round)
angular resolution.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_in)
fname_dict_out = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_out)
infile = fname_dict_in[in_tag]
outfile = fname_dict_out[out_tag]
# Check input file existence
if check_files:
if not (os.path.isdir(indir+infile)):
logger.warning("Missing "+infile)
return()
# Convolve the data to have a round beam.
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("Convolving to a round beam for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
logger.info("Using ccr.convolve_to_round_beam")
logger.info("Convolving from "+infile)
logger.info("Convolving to "+outfile)
if force_beam_as is not None:
logger.info("Forcing beam to "+str(force_beam_as))
if (not self._dry_run) and casa_enabled:
ccr.convolve_to_round_beam(
infile=indir+infile,
outfile=outdir+outfile,
force_beam=force_beam_as,
overwrite=True)
return()
def task_stage_singledish(
self,
target = None,
product = None,
config = None,
template_tag = 'pbcorr_round',
out_tag = 'prepped_sd',
extra_ext_in = '',
extra_ext_out = '',
check_files = True,
):
"""
For one target, product, config combination, copy the single
dish data and align it to the interferometric grid.
"""
# Generate file names
indir = ''
outdir = self._kh.get_postprocess_dir_for_target(target)
tempdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_in)
fname_dict_out = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_out)
template = fname_dict_in[template_tag]
infile = fname_dict_in['orig_sd']
outfile = fname_dict_out[out_tag]
# Check input file existence
if check_files:
if (not (os.path.isdir(indir+infile))) and \
(not (os.path.isfile(indir+infile))):
logger.warning("Missing "+infile)
return()
if not (os.path.isdir(tempdir+template)):
logger.warning("Missing "+tempdir+template)
return()
# Stage the singledish data for feathering
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("Preparing single dish data for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
logger.info("Using cfr.prep_sd_for_feather.")
logger.info("Prepping "+outfile)
logger.info("Original file "+infile)
logger.info("Using interferometric template "+template)
if (not self._dry_run) and casa_enabled:
cfr.prep_sd_for_feather(
sdfile_in=indir+infile,
sdfile_out=outdir+outfile,
interf_file=tempdir+template,
do_import=True,
do_dropdeg=True,
do_align=True,
do_checkunits=True,
overwrite=True)
return()
def task_make_interf_weight(
self,
target = None,
product = None,
config = None,
image_tag = 'pbcorr_round',
in_tag = 'pb',
input_type = 'pb',
scale_by_noise = True,
out_tag = 'weight',
extra_ext_in = '',
extra_ext_out = '',
check_files = True,
):
"""
For one target, product, config combination, make a 'weight'
image for use in linearly mosaicking the cube with other,
overlapping cubes. This task targets interferometric dish
data.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_in)
fname_dict_out = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_out)
image_file = fname_dict_in[image_tag]
infile = fname_dict_in[in_tag]
outfile = fname_dict_out[out_tag]
# Check input file existence
if check_files:
if not (os.path.isdir(indir+infile)):
logger.warning("Missing "+infile)
return()
if not (os.path.isdir(indir+image_file)):
logger.warning("Missing "+image_file)
return()
# Create a weight image for use linear mosaicking targets that
# are part of a linear mosaic
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&")
logger.info("Making weight file for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&")
logger.info("")
logger.info("Using cmr.generate_weight_file.")
logger.info("Making weight file "+outfile)
logger.info("Based off of primary beam file "+infile)
logger.info("Measuring noise from file "+image_file)
if (not self._dry_run) and casa_enabled:
cmr.generate_weight_file(
image_file = indir+image_file,
input_file = indir+infile,
input_type = input_type,
outfile = indir + outfile,
scale_by_noise = scale_by_noise,
overwrite=True)
return()
def task_make_singledish_weight(
self,
target = None,
product = None,
config = None,
image_tag = 'prepped_sd',
out_tag = 'sd_weight',
extra_ext_in = '',
extra_ext_out = '',
check_files = True,
):
"""
For one target, product, config combination, make a 'weight'
image for use in linearly mosaicking the cube with other,
overlapping cubes. This task targets single dish data.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_in)
fname_dict_out = self._fname_dict(
target=target, config=config, product=product, extra_ext=extra_ext_out)
image_file = fname_dict_in[image_tag]
outfile = fname_dict_out[out_tag]
# Check input file existence
if check_files:
if not (os.path.isdir(indir+image_file)):
logger.warning("Missing "+image_file)
return()
# Make a weight file for single dish targets that
# are part of a linear mosaic
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&")
logger.info("Making single dish weight file for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&")
logger.info("")
logger.info("Using cmr.generate_weight_file.")
logger.info("Making weight file "+outfile)
logger.info("Measuring noise from file "+image_file)
if (not self._dry_run) and casa_enabled:
cmr.generate_weight_file(
image_file = indir+image_file,
input_value = 1.0,
input_type = 'weight',
outfile = indir + outfile,
scale_by_noise = True,
overwrite=True)
return()
def task_feather(
self,
target = None,
product = None,
config = None,
interf_tag = 'pbcorr_round',
sd_tag = 'prepped_sd',
out_tag = 'pbcorr_round',
extra_ext_in = '',
extra_ext_out = '',
apodize = False,
apod_ext = 'pb',
copy_weights = True,
check_files = True,
):
"""
For one target, product, config combination, feather together
a single dish and interferometric data set. Note that
apodization is exposed as an option. Also note that the
configuration of the input and output will differ (an
interferometric configuration comes in, a feather
configuration comes out). Optionally, propagate the weights
from the interferometric side to become the weights for the
new feathered data.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
fname_dict_in = self._fname_dict(
target=target, config=config, product=product,
extra_ext=extra_ext_in)
# Note that feather changes the config
feather_config = self._kh.get_feather_config_for_interf_config(
interf_config=config)
fname_dict_out = self._fname_dict(
target=target, config=feather_config, product=product,
extra_ext=extra_ext_out)
interf_file = fname_dict_in[interf_tag]
sd_file = fname_dict_in[sd_tag]
if len(fname_dict_out) == 0:
logger.info("No feather config found for:")
logger.info(str(target) + " , "+str(product)+" , "+str(config))
return()
outfile = fname_dict_out[out_tag]
# Error checking
# Check input file existence
# Feather the single dish and interferometer data
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("Feathering interferometer and single dish data for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
logger.info("Using cfr.feather_two_cubes.")
logger.info("Feathering "+outfile)
logger.info("Feathering interferometric data "+interf_file)
logger.info("Feathering single dish data "+sd_file)
# Feather has a couple of algorithmic choices
# associated with it. Run the method that the
# user has selected.
if apodize:
apod_file = fname_dict_in[apod_ext]
logger.info("Apodizing using file "+apod_file)
if (not self._dry_run) and casa_enabled:
cfr.feather_two_cubes(
interf_file=indir+interf_file,
sd_file=indir+sd_file,
out_file=outdir+outfile,
do_blank=True,
do_apodize=True,
apod_file=indir+apod_file,
apod_cutoff=0.0,
overwrite=True)
else:
if (not self._dry_run) and casa_enabled:
cfr.feather_two_cubes(
interf_file=indir+interf_file,
sd_file=indir+sd_file,
out_file=outdir+outfile,
do_blank=True,
do_apodize=False,
apod_file=None,
apod_cutoff=-1.0,
overwrite=True)
if copy_weights:
interf_weight_exists = False
interf_weight_file = fname_dict_in['weight']
if os.path.isdir(indir+interf_weight_file):
interf_weight_exists = True
else:
logger.info("Interferometric weight file not found "+interf_weight_file)
if interf_weight_exists:
logger.info("")
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("Copying weights for:")
logger.info(str(target)+" , "+str(product)+" , "+str(config))
logger.info("&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%")
logger.info("")
out_weight_file=fname_dict_out['weight']
logger.info("Copying from "+interf_weight_file)
logger.info("Copying to "+out_weight_file)
if (not self._dry_run) and casa_enabled:
ccr.copy_dropdeg(infile=indir+interf_weight_file,
outfile=outdir+out_weight_file,
overwrite=True)
return()
def task_compress(
self,
target = None,
product = None,
config = None,
in_tag = 'pbcorr_round',
out_tag = 'pbcorr_trimmed',
do_trimrind = True,
do_pb_too = True,
in_pb_tag = 'pb',
out_pb_tag = 'pb_trimmed',
extra_ext_in = '',
extra_ext_out = '',
check_files = True
):
"""
For one target, product, config combination, compress the cube
to the smallest reasonable volume. Also align the primary beam
file out onto this grid.
"""
# Generate file names
indir = self._kh.get_postprocess_dir_for_target(target)
outdir = self._kh.get_postprocess_dir_for_target(target)
| |
# !pip install segmentation-models
import keras
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from PIL import Image
from segmentation_models import Unet
from segmentation_models.backbones import get_preprocessing
from keras.models import load_model
batch_size = 16
img_resize_shape = (128, 800)
img_source_shape = (256, 1600)
in_channels = 3
out_channels = 4
path = '/kaggle/input/severstal-steel-defect-detection/'
epochs = 1
class DataGenerator(keras.utils.Sequence):
def __init__(self, df, subset="train", shuffle=False, preprocess=None):
super().__init__()
self.df = df
self.shuffle = shuffle
self.subset = subset
self.batch_size = batch_size
self.preprocess = preprocess
self.info = {}
if self.subset == "train":
self.data_path = path + 'train_images/'
elif self.subset == "test":
self.data_path = path + 'test_images/'
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.df) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.df))
if self.shuffle:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
x = np.empty((self.batch_size, img_resize_shape[0], img_resize_shape[1], in_channels), dtype=np.float32)
y = np.empty((self.batch_size, img_resize_shape[0], img_resize_shape[1], out_channels), dtype=np.int8)
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
for i, f in enumerate(self.df['ImageId'].iloc[indexes]):
self.info[index * self.batch_size + i] = f
x[i,] = Image.open(self.data_path + f).resize((img_resize_shape[1], img_resize_shape[0]))
if self.subset == 'train':
for j in range(4):
y[i, :, :, j] = rle2maskResize(self.df['e' + str(j + 1)].iloc[indexes[i]])
if self.preprocess is not None:
x = self.preprocess(x)
return x, y if self.subset == 'train' else x
def rle2maskResize(rle):
# CONVERT RLE TO MASK
if (pd.isnull(rle)) | (rle == ''):
return np.zeros(img_resize_shape, dtype=np.uint8)
mask = np.zeros(img_source_shape[1] * img_source_shape[0], dtype=np.uint8)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2] - 1
lengths = array[1::2]
for index, start in enumerate(starts):
mask[int(start):int(start + lengths[index])] = 1
return mask.reshape(img_source_shape, order='F')[::2, ::2]
def mask2contour(mask, width=3):
# CONVERT MASK TO ITS CONTOUR
w = mask.shape[1]
h = mask.shape[0]
mask2 = np.concatenate([mask[:, width:], np.zeros((h, width))], axis=1)
mask2 = np.logical_xor(mask, mask2)
mask3 = np.concatenate([mask[width:, :], np.zeros((width, w))], axis=0)
mask3 = np.logical_xor(mask, mask3)
return np.logical_or(mask2, mask3)
def mask2pad(mask, pad=2):
# ENLARGE MASK TO INCLUDE MORE SPACE AROUND DEFECT
w = mask.shape[1]
h = mask.shape[0]
# MASK UP
for k in range(1, pad, 2):
temp = np.concatenate([mask[k:, :], np.zeros((k, w))], axis=0)
mask = np.logical_or(mask, temp)
# MASK DOWN
for k in range(1, pad, 2):
temp = np.concatenate([np.zeros((k, w)), mask[:-k, :]], axis=0)
mask = np.logical_or(mask, temp)
# MASK LEFT
for k in range(1, pad, 2):
temp = np.concatenate([mask[:, k:], np.zeros((h, k))], axis=1)
mask = np.logical_or(mask, temp)
# MASK RIGHT
for k in range(1, pad, 2):
temp = np.concatenate([np.zeros((h, k)), mask[:, :-k]], axis=1)
mask = np.logical_or(mask, temp)
return mask
# COMPETITION METRIC
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = keras.backend.flatten(y_true)
y_pred_f = keras.backend.flatten(y_pred)
intersection = keras.backend.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (keras.backend.sum(y_true_f) + keras.backend.sum(y_pred_f) + smooth)
def data_prep():
global train2
train = pd.read_csv(path + 'train.csv')
# RESTRUCTURE TRAIN DATAFRAME
train['ImageId'] = train['ImageId_ClassId'].map(lambda x: x.split('.')[0] + '.jpg')
train2 = pd.DataFrame({'ImageId': train['ImageId'][::4]})
train2['e1'] = train['EncodedPixels'][::4].values
train2['e2'] = train['EncodedPixels'][1::4].values
train2['e3'] = train['EncodedPixels'][2::4].values
train2['e4'] = train['EncodedPixels'][3::4].values
train2.reset_index(inplace=True, drop=True)
train2.fillna('', inplace=True)
train2['count'] = np.sum(train2.iloc[:, 1:] != '', axis=1).values
train2.head(10)
print(train.shape)
print(train2.shape)
def data_inspection():
global defects, train_batches, i, batch, k, img, extra, j, msk
# DEFECTIVE IMAGE SAMPLES
defects = list(train2[train2['e1'] != ''].sample(4).index)
defects += list(train2[train2['e2'] != ''].sample(4).index)
defects += list(train2[train2['e3'] != ''].sample(4).index)
defects += list(train2[train2['e4'] != ''].sample(4).index)
# DATA GENERATOR
train_batches = DataGenerator(train2[train2.index.isin(defects)], shuffle=True)
print('Images and masks from our Data Generator')
print('KEY: yellow=defect1, green=defect2, blue=defect3, magenta=defect4')
# DISPLAY IMAGES WITH DEFECTS
for i, batch in enumerate(train_batches):
plt.figure(figsize=(14, 50)) # 20,18
for k in range(16):
plt.subplot(16, 1, k + 1)
img = batch[0][k,]
img = Image.fromarray(img.astype('uint8'))
img = np.array(img)
extra = ' has defect'
for j in range(4):
msk = batch[1][k, :, :, j]
msk = mask2pad(msk, pad=3)
msk = mask2contour(msk, width=2)
if np.sum(msk) != 0: extra += ' ' + str(j + 1)
if j == 0: # yellow
img[msk == 1, 0] = 235
img[msk == 1, 1] = 235
elif j == 1:
img[msk == 1, 1] = 210 # green
elif j == 2:
img[msk == 1, 2] = 255 # blue
elif j == 3: # magenta
img[msk == 1, 0] = 255
img[msk == 1, 2] = 255
plt.title(train_batches.info[16 * i + k] + extra)
plt.axis('off')
plt.imshow(img)
plt.subplots_adjust(wspace=0.05)
plt.show()
def network_setup():
global preprocess, model, idx, train_batches, valid_batches
# LOAD UNET WITH PRETRAINING FROM IMAGENET
preprocess = get_preprocessing('resnet34') # for resnet, img = (img-110.0)/1.0
model = Unet('resnet34', input_shape=(img_resize_shape[0], img_resize_shape[1], in_channels), classes=out_channels,
activation='sigmoid')
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[dice_coef])
# TRAIN AND VALIDATE MODEL
idx = int(0.8 * len(train2))
print()
train_batches = DataGenerator(train2.iloc[:idx], shuffle=True, preprocess=preprocess)
valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
history = model.fit_generator(train_batches, validation_data=valid_batches, epochs=epochs, verbose=1)
def network_inspection():
global defects, valid_batches, preds, i, batch, k, img, extra, j, msk
# PREDICT FROM VALIDATION SET (ONLY IMAGES WITH DEFECTS)
val_set = train2.iloc[idx:]
defects = list(val_set[val_set['e1'] != ''].sample(6).index)
defects += list(val_set[val_set['e2'] != ''].sample(6).index)
defects += list(val_set[val_set['e3'] != ''].sample(14).index)
defects += list(val_set[val_set['e4'] != ''].sample(6).index)
valid_batches = DataGenerator(val_set[val_set.index.isin(defects)], preprocess=preprocess)
preds = model.predict_generator(valid_batches, verbose=1)
# PLOT PREDICTIONS
valid_batches = DataGenerator(val_set[val_set.index.isin(defects)])
print('Plotting predictions...')
print('KEY: yellow=defect1, green=defect2, blue=defect3, magenta=defect4')
for i, batch in enumerate(valid_batches):
plt.figure(figsize=(20, 36))
for k in range(16):
plt.subplot(16, 2, 2 * k + 1)
img = batch[0][k,]
img = Image.fromarray(img.astype('uint8'))
img = np.array(img)
dft = 0
extra = ' has defect '
for j in range(4):
msk = batch[1][k, :, :, j]
if np.sum(msk) != 0:
dft = j + 1
extra += ' ' + str(j + 1)
msk = mask2pad(msk, pad=2)
msk = mask2contour(msk, width=3)
if j == 0: # yellow
img[msk == 1, 0] = 235
img[msk == 1, 1] = 235
elif j == 1:
img[msk == 1, 1] = 210 # green
elif j == 2:
img[msk == 1, 2] = 255 # blue
elif j == 3: # magenta
img[msk == 1, 0] = 255
img[msk == 1, 2] = 255
if extra == ' has defect ': extra = ''
plt.title('Train ' + train2.iloc[16 * i + k, 0] + extra)
plt.axis('off')
plt.imshow(img)
plt.subplot(16, 2, 2 * k + 2)
if dft != 0:
msk = preds[16 * i + k, :, :, dft - 1]
plt.imshow(msk)
else:
plt.imshow(np.zeros((128, 800)))
plt.axis('off')
mx = np.round(np.max(msk), 3)
plt.title('Predict Defect ' + str(dft) + ' (max pixel = ' + str(mx) + ')')
plt.subplots_adjust(wspace=0.05)
plt.show()
# PREDICT FROM VALIDATION SET (ONLY IMAGES WITH DEFECTS 1, 2, 4)
val_set = train2.iloc[idx:]
val_set2 = val_set[(val_set['count'] != 0) & (val_set['e3'] == '')].sample(16)
valid_batches = DataGenerator(val_set2, preprocess=preprocess)
preds = model.predict_generator(valid_batches, verbose=1)
# PLOT PREDICTIONS
valid_batches = DataGenerator(val_set2)
print('Plotting predictions...')
print('KEY: yellow=defect1, green=defect2, blue=defect3, magenta=defect4')
for i, batch in enumerate(valid_batches):
plt.figure(figsize=(20, 36))
for k in range(16):
plt.subplot(16, 2, 2 * k + 1)
img = batch[0][k,]
img = Image.fromarray(img.astype('uint8'))
img = np.array(img)
dft = 0
three = False
for j in range(4):
msk = batch[1][k, :, :, j]
if (j == 2) & (np.sum(msk) != 0):
three = np.sum(msk)
msk = mask2pad(msk, pad=2)
msk = mask2contour(msk, width=3)
if j == 0: # yellow
img[msk == 1, 0] = 235
img[msk == 1, 1] = 235
elif j == 1:
img[msk == 1, 1] = 210 # green
elif j == 2:
img[msk == 1, 2] = 255 # blue
elif j == 3: # magenta
img[msk == 1, 0] = 255
img[msk == 1, 2] = 255
extra = '';
extra2 = ''
if not three:
extra = 'NO DEFECT 3'
extra2 = 'ERROR '
plt.title('Train ' + train2.iloc[16 * i + k, 0] + ' ' + extra)
plt.axis('off')
plt.imshow(img)
plt.subplot(16, 2, 2 * k + 2)
dft = 3
if dft != 0:
msk = preds[16 * i + k, :, :, dft - 1]
plt.imshow(msk)
else:
plt.imshow(np.zeros((128, 800)))
plt.axis('off')
mx = np.round(np.max(msk), 3)
plt.title(extra2 + 'Predict Defect ' + str(dft) + ' (max pixel = ' + str(mx) + ')')
plt.subplots_adjust(wspace=0.05)
plt.show()
def post_porcess_threshold():
global valid_batches, preds, i, j
# PREDICT FROM VALIDATION SET (USE ALL)
valid_batches = DataGenerator(train2.iloc[idx:], preprocess=preprocess)
preds = model.predict_generator(valid_batches, verbose=1)
# PLOT RESULTS
pix_min = 250
for THRESHOLD in [0.1, 0.25, 0.50, 0.75, 0.9]:
print('######################################')
print('## Threshold =', THRESHOLD, 'displayed below ##')
print('######################################')
correct = [[], [], [], []];
incorrect = [[], [], [], []]
| |
<filename>src/pyflask/api.py
import json
import logging
import logging.handlers
import os
import sys
import config
from biotools import getUserDetails, loginToBioTools, registerTool, validateTool
from figshare import (
createNewFigshareItem,
deleteFigshareArticle,
getFigshareFileUploadStatus,
uploadFileToFigshare,
publishFigshareArticle,
)
from flask import Flask, request
from flask_cors import CORS
from flask_restx import Api, Resource, reqparse
from github import (
getFileFromRepo,
getRepoContentTree,
getRepoContributors,
getRepoReleases,
getUserRepositories,
uploadFileToGithub,
)
from metadata import createCitationCFF, createMetadata
from utilities import (
createFile,
deleteFile,
fileExistInFolder,
foldersPresent,
openFileExplorer,
readFolderContents,
requestJSON,
zipFolder,
)
from zenodo import (
addMetadataToZenodoDeposition,
createNewZenodoDeposition,
createNewZenodoDepositionVersion,
deleteZenodoDeposition,
getAllZenodoDepositions,
getAZenodoDeposition,
publishZenodoDeposition,
removeFileFromZenodoDeposition,
uploadFileToZenodoDeposition,
)
API_VERSION = "1.4.0"
app = Flask(__name__)
# full if you want to see all the details
app.config.SWAGGER_UI_DOC_EXPANSION = "list"
SECRET_KEY = os.urandom(32)
app.config["SECRET_KEY"] = SECRET_KEY
CORS(app)
# configure root logger
LOG_FOLDER = os.path.join(os.path.expanduser("~"), ".fairshare", "logs")
LOG_FILENAME = "api.log"
LOG_PATH = os.path.join(LOG_FOLDER, LOG_FILENAME)
if not os.path.exists(LOG_FOLDER):
os.makedirs(LOG_FOLDER)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_PATH, maxBytes=5 * 1024 * 1024, backupCount=3
)
# create logging formatter
logFormatter = logging.Formatter(
fmt="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(logFormatter)
app.logger.addHandler(handler)
app.logger.setLevel(logging.DEBUG)
api = Api(
app,
version=API_VERSION,
title="FAIRshare backend api",
description="The backend api system for the Electron Vue app",
doc="/docs",
)
@api.route("/fairshare_server_shutdown", endpoint="shutdown")
class Shutdown(Resource):
def post(self):
func = request.environ.get("werkzeug.server.shutdown")
api.logger.info("Shutting down server")
if func is None:
print("Not running with the Werkzeug Server")
return
func()
@api.route("/api_version", endpoint="apiVersion")
class ApiVersion(Resource):
def get(self):
"""Returns the semver version number of the current API"""
api.logger.info(f"API_VERSION: {API_VERSION}")
return API_VERSION
@api.route("/echo", endpoint="echo")
class HelloWorld(Resource):
@api.response(200, "Success")
@api.response(400, "Validation Error")
def get(self):
"""Returns a simple 'Server Active' message"""
return "Server active!"
###############################################################################
# bio.tools operations
###############################################################################
biotools = api.namespace("biotools", description="bio.tools operations")
@biotools.route("/env", endpoint="BiotoolsURL")
class BiotoolsURL(Resource):
def get(self):
"""Returns the bio.tools endpoint url. If the response is dev.bio.tools, this corresponds to the testing environment. bio.tools only will correspond to the production environment.""" # noqa: E501
return config.BIO_TOOLS_SERVER_URL
@biotools.route("/login", endpoint="BioToolsLogin")
class BioToolsLogin(Resource):
@biotools.doc(
responses={200: "Success"},
params={
"username": "Username of the account",
"password": "<PASSWORD>",
},
)
def post(self):
"""Login to bio.tools"""
parser = reqparse.RequestParser()
parser.add_argument("username", type=str, required=True)
parser.add_argument("password", type=str, required=True)
args = parser.parse_args()
username = args["username"]
password = args["password"]
return loginToBioTools(username, password)
@biotools.route("/user", endpoint="BioToolsUserDetails")
class BioToolsUserDetails(Resource):
@biotools.doc(
responses={200: "Success"},
params={
"token": "Token of the account",
},
)
def get(self):
"""Get user details"""
parser = reqparse.RequestParser()
parser.add_argument("token", type=str, required=True)
args = parser.parse_args()
token = args["token"]
return getUserDetails(token)
@biotools.route("/tool/validate", endpoint="BioToolsValidate")
class BioToolsValidate(Resource):
@biotools.doc(
responses={200: "Success"},
params={
"token": "Token of the account",
"data": "Data to validate",
},
)
def post(self):
"""Validate data"""
parser = reqparse.RequestParser()
parser.add_argument("token", type=str, required=True)
parser.add_argument("data", type=str, required=True)
args = parser.parse_args()
token = args["token"]
data = json.loads(args["data"])
return validateTool(token, data)
@biotools.route("/tool/register", endpoint="BioToolsRegisterTool")
class BioToolsRegisterTool(Resource):
@biotools.doc(
responses={200: "Success"},
params={
"token": "Token of the account",
"data": "Data for tool registration",
},
)
def post(self):
"""Register a new tool"""
parser = reqparse.RequestParser()
parser.add_argument("token", type=str, required=True)
parser.add_argument("data", type=str, required=True)
args = parser.parse_args()
token = args["token"]
data = json.loads(args["data"])
return registerTool(token, data)
###############################################################################
# Metadata operations
###############################################################################
metadata = api.namespace("metadata", description="Metadata operations")
@metadata.route("/create", endpoint="CreateMetadata")
class CreateMetadata(Resource):
@metadata.doc(
responses={200: "Success"},
params={
"data_types": "Types of data.",
"data_object": "Full data object to create metadata from. Should have keys from the `data_types` parameter", # noqa: E501
"virtual_file": "Parameter to generate a virtual file",
},
)
def post(self):
"""Create the codemetadata json file"""
parser = reqparse.RequestParser()
parser.add_argument("data_types", type=str, help="Types of data ")
parser.add_argument(
"data_object",
type=str,
help="Complete data object to create metadata",
)
parser.add_argument(
"virtual_file",
type=bool,
help="Parameter to generate a virtual file",
)
args = parser.parse_args()
data_types = json.loads(args["data_types"])
data = json.loads(args["data_object"])
virtual_file = args["virtual_file"]
return createMetadata(data_types, data, virtual_file)
@metadata.route("/citation/create", endpoint="CreateCitationCFF")
class CreateCitationCFF(Resource):
@metadata.doc(
responses={200: "Success"},
params={
"data_types": "Types of data.",
"data_object": "Full data object to create metadata from. Should have keys from the `data_types` parameter", # noqa: E501
"virtual_file": "Parameter to generate a virtual file",
},
)
def post(self):
"""Create the citation cff file"""
parser = reqparse.RequestParser()
parser.add_argument("data_types", type=str, help="Types of data ")
parser.add_argument(
"data_object",
type=str,
help="Complete data object to create metadata",
)
parser.add_argument(
"virtual_file",
type=bool,
help="Parameter to generate a virtual file",
)
args = parser.parse_args()
data_types = json.loads(args["data_types"])
data = json.loads(args["data_object"])
virtual_file = args["virtual_file"]
return createCitationCFF(data_types, data, virtual_file)
###############################################################################
# Figshare operations
###############################################################################
figshare = api.namespace("figshare", description="Figshare operations")
@figshare.route("/item", endpoint="FigshareItem")
class FigshareItem(Resource):
@figshare.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "figshare access token required with every request.",
"metadata": "json string with metadata to add to the item",
},
)
def post(self):
"""Create a new figshare article and add metadata. Returns the figshare article id.""" # noqa: E501
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"metadata",
type=str,
required=True,
help="metadata is required. metadata needs to be a json string",
)
args = parser.parse_args()
access_token = args["access_token"]
metadata = args["metadata"]
return createNewFigshareItem(access_token, metadata)
@figshare.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "figshare access token required with every request.",
"article_id": "article id for the item",
},
)
def delete(self):
"""Delete a zenodo deposition"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"article_id",
type=str,
required=True,
help="article_id is required. article_id needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
article_id = args["article_id"]
return deleteFigshareArticle(access_token, article_id)
@figshare.route("/item/publish", endpoint="FigsharePublish")
class FigsharePublish(Resource):
@figshare.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "Figshare access token required with every request",
"article_id": "article id for the item",
},
)
def post(self):
"""Publish a Figshare article"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"article_id",
type=str,
required=True,
help="article_id is required. article_id needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
article_id = args["article_id"]
return publishFigshareArticle(access_token, article_id)
@figshare.route("/item/files/upload", endpoint="FigshareFileUpload")
class FigshareFileUpload(Resource):
@figshare.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "figshare access token required with every request.",
"article_id": "figshare article id",
"file_path": "path to the file to upload",
},
)
def post(self):
"""Upload file to Figshare"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"article_id",
type=str,
required=True,
help="article_id is required. article_id needs to be of type str",
)
parser.add_argument(
"file_path",
type=str,
required=True,
help="file_path is required. file_path needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
article_id = args["article_id"]
file_path = args["file_path"]
return uploadFileToFigshare(access_token, article_id, file_path)
@figshare.doc(
responses={200: "Success", 401: "Authentication error"},
params={},
)
def get(self):
"""Get file upload status"""
return getFigshareFileUploadStatus()
###############################################################################
# Zenodo API operations
###############################################################################
zenodo = api.namespace("zenodo", description="Zenodo operations")
@zenodo.route("/env", endpoint="zenodoURL")
class zenodoURL(Resource):
def get(self):
"""Returns the zenodo endpoint url. If the response is sandbox.zenodo.org, this corresponds to the testing environment. zenodo.org only will correspond to the production environment.""" # noqa: E501
return config.ZENODO_SERVER_URL
@zenodo.route("/deposition", endpoint="zenodoDeposition")
class zenodoDeposition(Resource):
@zenodo.doc(
responses={
200: "Success",
401: "Authentication error",
400: "Bad request",
},
params={"access_token": "Zenodo access token required with every request."},
)
def post(self):
"""Create a new empty Zenodo deposition"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
return createNewZenodoDeposition(access_token)
@zenodo.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "Zenodo access token required with every request.",
"deposition_id": "Zenodo deposition id. For new versions the new deposit id is required", # noqa: E501
},
)
def get(self):
"""Get a single Zenodo deposition"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"deposition_id",
type=str,
required=True,
help="deposition_id is required. deposition_id needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
deposition_id = args["deposition_id"]
return getAZenodoDeposition(access_token, deposition_id)
@zenodo.doc(
responses={200: "Success", 401: "Authentication error"},
params={
"access_token": "Zenodo access token required with every request.",
"deposition_id": "Zenodo deposition id. For new versions the new deposit id is required", # noqa: E501
},
)
def delete(self):
"""Delete a zenodo deposition"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
parser.add_argument(
"deposition_id",
type=str,
required=True,
help="deposition_id is required. deposition_id needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
deposition_id = args["deposition_id"]
return deleteZenodoDeposition(access_token, deposition_id)
@zenodo.route("/depositions", endpoint="zenodoGetAll")
class zenodoGetAll(Resource):
@zenodo.doc(
responses={200: "Success", 401: "Authentication error"},
params={"access_token": "Zenodo access token required with every request."},
)
def get(self):
"""Get a list of all the Zenodo depositions"""
parser = reqparse.RequestParser()
parser.add_argument(
"access_token",
type=str,
required=True,
help="access_token is required. accessToken needs to be of type str",
)
args = parser.parse_args()
access_token = args["access_token"]
return getAllZenodoDepositions(access_token)
@zenodo.route("/deposition/files/upload", endpoint="zenodoUploadFile")
class zenodoUploadFile(Resource):
@zenodo.doc(
responses={200: "Success", 401: "Authentication error"},
| |
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, re, decimal, warnings, datetime
from collections.abc import Iterable
from typing import Union
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
##
#
# __ __ ______ ______ __ __ __ __ __ __ __
# /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \
# \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \
# \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\
# \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/
#
#
# ---#
class vColumn(str_sql):
"""
---------------------------------------------------------------------------
Python object which that stores all user transformations. If the vDataFrame
represents the entire relation, a vColumn can be seen as one column of that
relation. vColumns simplify several processes with its abstractions.
Parameters
----------
alias: str
vColumn alias.
transformations: list, optional
List of the different transformations. Each transformation must be similar
to the following: (function, type, category)
parent: vDataFrame, optional
Parent of the vColumn. One vDataFrame can have multiple children vColumns
whereas one vColumn can only have one parent.
catalog: dict, optional
Catalog where each key corresponds to an aggregation. vColumns will memorize
the already computed aggregations to gain in performance. The catalog will
be updated when the parent vDataFrame is modified.
Attributes
----------
alias, str : vColumn alias.
catalog, dict : Catalog of pre-computed aggregations.
parent, vDataFrame : Parent of the vColumn.
transformations, str : List of the different transformations.
"""
#
# Special Methods
#
# ---#
def __init__(
self, alias: str, transformations: list = [], parent=None, catalog: dict = {}
):
self.parent, self.alias, self.transformations = (
parent,
alias,
[elem for elem in transformations],
)
self.catalog = {
"cov": {},
"pearson": {},
"spearman": {},
"spearmand": {},
"kendall": {},
"cramer": {},
"biserial": {},
"regr_avgx": {},
"regr_avgy": {},
"regr_count": {},
"regr_intercept": {},
"regr_r2": {},
"regr_slope": {},
"regr_sxx": {},
"regr_sxy": {},
"regr_syy": {},
}
for elem in catalog:
self.catalog[elem] = catalog[elem]
# ---#
def __getitem__(self, index):
if isinstance(index, slice):
assert index.step in (1, None), ValueError(
"vColumn doesn't allow slicing having steps different than 1."
)
index_stop = index.stop
index_start = index.start
if not (isinstance(index_start, int)):
index_start = 0
if index_start < 0:
index_start += self.parent.shape()[0]
if isinstance(index_stop, int):
if index_stop < 0:
index_stop += self.parent.shape()[0]
limit = index_stop - index_start
if limit <= 0:
limit = 0
limit = " LIMIT {}".format(limit)
else:
limit = ""
query = "(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE".format(
self.alias,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index_start,
limit,
)
return vDataFrameSQL(query)
elif isinstance(index, int):
cast = "::float" if self.category() == "float" else ""
if index < 0:
index += self.parent.shape()[0]
query = "SELECT {}{} FROM {}{} OFFSET {} LIMIT 1".format(
self.alias,
cast,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index,
)
return executeSQL(
query=query,
title="Getting the vColumn element.",
method="fetchfirstelem",
)
else:
return getattr(self, index)
# ---#
def __len__(self):
return int(self.count())
# ---#
def __nonzero__(self):
return self.count() > 0
# ---#
def __repr__(self):
return self.head(limit=verticapy.options["max_rows"]).__repr__()
# ---#
def _repr_html_(self):
return self.head(limit=verticapy.options["max_rows"])._repr_html_()
# ---#
def __setattr__(self, attr, val):
self.__dict__[attr] = val
#
# Methods
#
# ---#
def aad(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'aad' (Average Absolute Deviation).
Returns
-------
float
aad
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["aad"]).values[self.alias][0]
# ---#
def abs(self):
"""
---------------------------------------------------------------------------
Applies the absolute value function to the input vColumn.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
return self.apply(func="ABS({})")
# ---#
def add(self, x: float):
"""
---------------------------------------------------------------------------
Adds the input element to the vColumn.
Parameters
----------
x: float
If the vColumn type is date like (date, datetime ...), the parameter 'x'
will represent the number of seconds, otherwise it will represent a number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
if self.isdate():
return self.apply(func="TIMESTAMPADD(SECOND, {}, {})".format(x, "{}"))
else:
return self.apply(func="{} + ({})".format("{}", x))
# ---#
def add_copy(self, name: str):
"""
---------------------------------------------------------------------------
Adds a copy vColumn to the parent vDataFrame.
Parameters
----------
name: str
Name of the copy.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.eval : Evaluates a customized expression.
"""
check_types([("name", name, [str])])
name = quote_ident(name.replace('"', "_"))
assert name.replace('"', ""), EmptyParameter(
"The parameter 'name' must not be empty"
)
assert not (self.parent.is_colname_in(name)), NameError(
f"A vColumn has already the alias {name}.\nBy changing the parameter 'name', you'll be able to solve this issue."
)
new_vColumn = vColumn(
name,
parent=self.parent,
transformations=[item for item in self.transformations],
catalog=self.catalog,
)
setattr(self.parent, name, new_vColumn)
setattr(self.parent, name[1:-1], new_vColumn)
self.parent._VERTICAPY_VARIABLES_["columns"] += [name]
self.parent.__add_to_history__(
"[Add Copy]: A copy of the vColumn {} named {} was added to the vDataFrame.".format(
self.alias, name
)
)
return self.parent
# ---#
def aggregate(self, func: list):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using the input functions.
Parameters
----------
func: list
List of the different aggregation.
aad : average absolute deviation
approx_unique : approximative cardinality
count : number of non-missing elements
cvar : conditional value at risk
dtype : vColumn type
iqr : interquartile range
kurtosis : kurtosis
jb : Jarque-Bera index
mad : median absolute deviation
max : maximum
mean : average
median : median
min : minimum
mode : most occurent element
percent : percent of non-missing elements
q% : q quantile (ex: 50% for the median)
prod : product
range : difference between the max and the min
sem : standard error of the mean
skewness : skewness
sum : sum
std : standard deviation
topk : kth most occurent element (ex: top1 for the mode)
topk_percent : kth most occurent element density
unique : cardinality (count distinct)
var : variance
Other aggregations could work if it is part of
the DB version you are using.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.analytic : Adds a new vColumn to the vDataFrame by using an advanced
analytical function on a specific vColumn.
"""
return self.parent.aggregate(func=func, columns=[self.alias]).transpose()
agg = aggregate
# ---#
def apply(self, func: str, copy_name: str = ""):
"""
---------------------------------------------------------------------------
Applies a function to the vColumn.
Parameters
----------
func: str,
Function in pure SQL used to transform the vColumn.
The function variable must be composed of two flower brackets {}. For
example to apply the function: x -> x^2 + 2 use "POWER({}, 2) + 2".
copy_name: str, optional
If not empty, a copy will be created using the input Name.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.apply : Applies functions to the input vColumns.
vDataFrame.applymap : Applies a function to all the vColumns.
vDataFrame.eval : Evaluates a customized expression.
"""
if isinstance(func, str_sql):
func = str(func)
check_types([("func", func, [str]), ("copy_name", copy_name, [str])])
try:
try:
ctype = get_data_types(
"SELECT {} AS | |
346 1 0 106
1 347 1 0 105
1 348 1 0 104
1 349 1 0 103
1 350 1 0 102
1 351 1 0 101
1 328 1 0 98
1 329 1 0 97
1 330 1 0 96
1 333 1 0 93
1 334 1 0 92
1 335 1 0 91
1 352 1 0 90
1 353 1 0 89
1 354 1 0 88
1 358 1 0 252
1 359 1 0 251
1 360 1 0 250
1 371 1 0 249
1 372 1 0 248
1 373 1 0 247
1 374 1 0 246
1 375 1 0 245
1 376 1 0 244
1 377 1 0 243
1 378 1 0 242
1 379 1 0 241
1 380 1 0 240
1 381 1 0 239
1 382 1 0 238
1 383 1 0 237
1 384 1 0 236
1 361 1 0 233
1 362 1 0 232
1 363 1 0 231
1 366 1 0 228
1 367 1 0 227
1 368 1 0 226
1 385 1 0 225
1 386 1 0 224
1 387 1 0 223
1 435 1 0 114
1 436 1 0 113
1 403 1 0 112
1 404 1 0 111
1 405 1 0 110
1 406 1 0 109
1 407 1 0 108
1 430 1 0 105
1 449 1 0 104
1 392 1 0 103
1 429 1 0 98
1 450 1 0 97
1 391 1 0 96
1 398 1 0 93
1 399 1 0 92
1 400 1 0 91
1 393 1 0 90
1 394 1 0 89
1 395 1 0 88
1 445 1 0 249
1 446 1 0 248
1 422 1 0 247
1 423 1 0 246
1 424 1 0 245
1 425 1 0 244
1 426 1 0 243
1 440 1 0 240
1 451 1 0 239
1 411 1 0 238
1 439 1 0 233
1 452 1 0 232
1 410 1 0 231
1 417 1 0 228
1 418 1 0 227
1 419 1 0 226
1 412 1 0 225
1 413 1 0 224
1 414 1 0 223
1 325 1 0 87
1 326 1 0 86
1 327 1 0 85
1 328 1 0 84
1 329 1 0 83
1 330 1 0 82
1 331 1 0 81
1 332 1 0 80
1 333 1 0 79
1 334 1 0 78
1 335 1 0 77
1 336 1 0 76
1 337 1 0 75
1 338 1 0 74
1 339 1 0 73
1 340 1 0 72
1 341 1 0 71
1 342 1 0 70
1 343 1 0 69
1 344 1 0 68
1 345 1 0 67
1 346 1 0 66
1 347 1 0 65
1 348 1 0 64
1 349 1 0 63
1 350 1 0 62
1 351 1 0 61
1 352 1 0 60
1 353 1 0 59
1 354 1 0 58
1 355 1 0 57
1 356 1 0 56
1 357 1 0 55
1 358 1 0 222
1 359 1 0 221
1 360 1 0 220
1 361 1 0 219
1 362 1 0 218
1 363 1 0 217
1 364 1 0 216
1 365 1 0 215
1 366 1 0 214
1 367 1 0 213
1 368 1 0 212
1 369 1 0 211
1 370 1 0 210
1 371 1 0 209
1 372 1 0 208
1 373 1 0 207
1 374 1 0 206
1 375 1 0 205
1 376 1 0 204
1 377 1 0 203
1 378 1 0 202
1 379 1 0 201
1 380 1 0 200
1 381 1 0 199
1 382 1 0 198
1 383 1 0 197
1 384 1 0 196
1 385 1 0 195
1 386 1 0 194
1 387 1 0 193
1 388 1 0 192
1 389 1 0 191
1 390 1 0 190
1 328 1 0 87
1 329 1 0 86
1 330 1 0 85
1 347 1 0 84
1 348 1 0 83
1 349 1 0 82
1 352 1 0 79
1 353 1 0 78
1 354 1 0 77
1 331 1 0 74
1 332 1 0 73
1 333 1 0 72
1 334 1 0 71
1 335 1 0 70
1 336 1 0 69
1 337 1 0 68
1 338 1 0 67
1 339 1 0 66
1 340 1 0 65
1 341 1 0 64
1 342 1 0 63
1 343 1 0 62
1 344 1 0 61
1 355 1 0 60
1 356 1 0 59
1 357 1 0 58
1 361 1 0 222
1 362 1 0 221
1 363 1 0 220
1 380 1 0 219
1 381 1 0 218
1 382 1 0 217
1 385 1 0 214
1 386 1 0 213
1 387 1 0 212
1 364 1 0 209
1 365 1 0 208
1 366 1 0 207
1 367 1 0 206
1 368 1 0 205
1 369 1 0 204
1 370 1 0 203
1 371 1 0 202
1 372 1 0 201
1 373 1 0 200
1 374 1 0 199
1 375 1 0 198
1 376 1 0 197
1 377 1 0 196
1 388 1 0 195
1 389 1 0 194
1 390 1 0 193
1 403 1 0 87
1 404 1 0 86
1 405 1 0 85
1 398 1 0 84
1 399 1 0 83
1 400 1 0 82
1 438 1 0 79
1 453 1 0 78
1 409 1 0 77
1 437 1 0 72
1 454 1 0 71
1 408 1 0 70
1 431 1 0 67
1 432 1 0 66
1 393 1 0 65
1 394 1 0 64
1 395 1 0 63
1 396 1 0 62
1 397 1 0 61
1 422 1 0 222
1 423 1 0 221
1 424 1 0 220
1 417 1 0 219
1 418 1 0 218
1 419 1 0 217
1 448 1 0 214
1 455 1 0 213
1 428 1 0 212
1 447 1 0 207
1 456 1 0 206
1 427 1 0 205
1 441 1 0 202
1 442 1 0 201
1 412 1 0 200
1 413 1 0 199
1 414 1 0 198
1 415 1 0 197
1 416 1 0 196
1 457 1 0 186
1 458 1 0 185
1 459 1 0 184
1 460 1 0 183
1 461 1 0 182
1 462 1 0 181
1 463 1 0 180
1 464 1 0 179
1 465 1 0 178
1 466 1 0 177
1 467 1 0 176
1 468 1 0 175
1 469 1 0 174
1 470 1 0 173
1 471 1 0 172
1 472 1 0 171
1 473 1 0 170
1 474 1 0 169
1 475 1 0 168
1 476 1 0 167
1 477 1 0 166
1 478 1 0 165
1 479 1 0 164
1 480 1 0 163
1 481 1 0 162
1 482 1 0 161
1 483 1 0 160
1 484 1 0 159
1 485 1 0 158
1 486 1 0 157
1 487 1 0 156
1 488 1 0 155
1 489 1 0 154
1 490 1 0 321
1 491 1 0 320
1 492 1 0 319
1 493 1 0 318
1 494 1 0 317
1 495 1 0 316
1 496 1 0 315
1 497 1 0 314
1 498 1 0 313
1 499 1 0 312
1 500 1 0 311
1 501 1 0 310
1 502 1 0 309
1 503 1 0 308
1 504 1 0 307
1 505 1 0 306
1 506 1 0 305
1 507 1 0 304
1 508 1 0 303
1 509 1 0 302
1 510 1 0 301
1 511 1 0 300
1 512 1 0 299
1 513 1 0 298
1 514 1 0 297
1 515 1 0 296
1 516 1 0 295
1 517 1 0 294
1 518 1 0 293
1 519 1 0 292
1 520 1 0 291
1 521 1 0 290
1 522 1 0 289
1 458 1 0 186
1 459 1 0 185
1 461 1 0 183
1 462 1 0 182
1 464 1 0 180
1 465 1 0 179
1 466 1 0 178
1 467 1 0 177
1 468 1 0 176
1 469 1 0 175
1 471 1 0 173
1 472 1 0 172
1 473 1 0 171
1 474 1 0 170
1 475 1 0 169
1 476 1 0 168
1 478 1 0 166
1 479 1 0 165
1 480 1 0 164
1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.