input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_normals(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_normals_average(bpy_types.Menu,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_normals_select_strength(bpy_types.Menu,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_normals_set_strength(bpy_types.Menu,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_select_by_trait(bpy_types.Menu,
bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class VIEW3D_MT_edit_mesh_select_linked(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
| |
markers is None:
markers = [','] * data.shape[1]
if markers_sizes is None:
markers_sizes = [10] * data.shape[1]
if colors is None:
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
if alphas is None:
alphas = data.shape[1] * [1]
if zorders is None:
zorders = data.shape[1] * [0]
while len(colors) < data.shape[1]:
colors += [tuple(np.random.random(3))]
if linewidths is None:
linewidths = [1] * data.shape[1]
lgd = None
for count in range(data.shape[1]):
if labels is not None:
axes.plot(data[:, count], label='%s' % labels[count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
lgd = co.plot_oper.put_legend_outside_plot(axes,
already_reshaped=True)
else:
axes.plot(data[:, count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
if title:
if info is not None:
plt.title(self.testname +
'\n Dataset: ' + self.testdataname +
'\n' + info.title())
else:
plt.title(self.testname +
'\n Dataset ' + self.testdataname)
info = ''
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
if save:
self.save_plot(fig, lgd, display_all=display_all, info=info)
return fig, lgd, axes
def init_testing(self, data=None, online=True, save=True, load=True,
testname=None, scores_savepath=None,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
testdatapath=None, save_results=True):
'''
Initializes paths and names used in testing to save, load and visualize
data.
Built as a convenience method, in case <self.run_testing> gets overriden.
'''
self.parameters['testing'] = True
self.parameters['testing_params']['online'] = online
if online:
self.reset_online_test()
else:
self.reset_offline_test()
self.scores_filter_shape = scores_filter_shape
self.std_small_filter_shape = std_small_filter_shape
self.std_big_filter_shape = std_big_filter_shape
self.online = online
if testname is not None:
self.testname = testname.title()
else:
self.testname = (self.name + ' ' + self.classifiers_used).title()
if self.add_info is not None:
self.testname += ' ' + self.add_info.title()
self.parameters['testing_params']['current'] = self.testname
if online:
if testdatapath is not None:
self.testdataname = ('online (using '
+ os.path.basename(testdatapath) + ')')
else:
self.testdataname = 'online'
else:
self.testdataname = os.path.basename(data)
if not self.online:
if self.test_ind is not None:
available_tests_ids = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)
if available_tests_ids is None:
fold_name = '0'
else:
curr_test_id = self.tests_ids[self.available_tests.
index(self.test_name)]
if str(curr_test_id) in available_tests_ids:
fold_name = str(available_tests_ids[str(curr_test_id)])
else:
fold_name = str(len(available_tests_ids))
else:
self.test_name = 'Online'
try:
fold_name = os.path.join(*[co.CONST['results_fold'],
'Classification', 'Online'])
except OSError:
fold_name = '0'
if self.test_ind is not None:
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', self.test_name,
fold_name)
co.makedir(self.save_fold)
if save or load:
fold_name = self.classifier_folder
if scores_savepath is None:
self.scores_savepath = self.testdataname + '_scores_for_'
self.scores_savepath += self.full_info.replace(' ',
'_').lower()
self.scores_savepath += '.pkl'
else:
self.scores_savepath = scores_savepath
return True
def run_testing(self, data=None, derot_angle=None, derot_center=None,
online=True,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
ground_truth_type=None,
img_count=None, save=True, scores_savepath=None,
load=False, testname=None, display_scores=True,
construct_gt=True, just_scores=False, testdatapath=None,
compute_perform=True,
save_results=True):
'''
Test Classifiers using data (.png files) located in <data>. If <online>, the
testing is online, with <data> being a numpy array, which has been
firstly processed by <hand_segmentation_alg>. The scores retrieved from
testing are filtered using a box filter of shape <box_filter_shape>.
The running mean along a buffer
of the data is computed with a running window of length
<mean_filter_shape>. The ground truth for the testing data is given by
<ground_truth_type> (for further info about the variable refer to
<co.gd_oper.construct_ground_truth>). If the training is online, the count of
the frame is passed by <img_count>. If <save> is True,
testing results are saved to <scores_savepath>, or a path constructed
by the configuration. <testname> overrides the first line of the plots.
If <load> is True and <scores_save_path> exists, testing is bypassed and all the
necessary results are loaded from memory. If <just_scores> is True, the
classification stage is not done and only scores are computed. If
<testdatapath> is not <None> and <online> is True, then it will be
assumed that a pseudoonline testing is taking place
'''
loaded = False
if not online:
LOG.info('Testing:' + data)
try:
self.test_ind = self.available_tests.index(data)
self.test_name = data
except BaseException:
if data.split(os.sep)[-1] in self.available_tests:
self.test_ind = (
self.available_tests.index(data.split(os.sep)[-1]))
self.test_name = data.split(os.sep)[-1]
elif data in self.dynamic_actions or data in self.passive_actions:
self.test_ind = None
elif data.split(os.sep)[-1] in self.dynamic_actions or \
data.split(os.sep)[-1] in self.passive_actions:
self.test_ind = None
else:
raise Exception('test data must be inside test_save_path,' +
' check config.yaml')
if construct_gt and ground_truth_type is None:
ground_truth_type =os.path.join(
co.CONST['ground_truth_fold'],
self.test_name + '.csv')
elif isinstance(data, tuple):
derot_angle = data[1]
derot_center = data[2]
data = data[0]
if not self.testing_initialized or not online:
if not self.init_testing(data=data,
online=online,
save=save,
load=load,
testname=testname,
scores_savepath=scores_savepath,
scores_filter_shape=scores_filter_shape,
std_small_filter_shape=std_small_filter_shape,
std_big_filter_shape=std_big_filter_shape,
testdatapath=testdatapath,
save_results=save_results):
return False
if not online:
if self.test_ind is not None and (
load and self.accuracies[self.available_tests.index(self.test_name)]
is not None):
LOG.info('Tests already performed, loaded data')
try:
self.scores = self.results['Scores']
loaded = True
except:
pass
if not loaded:
if self.test_ind is not None:
testdata = self.offline_testdata_processing(
os.path.join(co.CONST['test_save_path'],
self.test_name))
else:
testdata = self.offline_testdata_processing(
data)
try:
self.test_ind = self.available_tests.index(data)
except BaseException: self.test_ind = None
LOG.info(self.full_info + ':')
LOG.info('Testing Classifiers using testdata with size: '
+ str(testdata.shape))
fmask = np.prod(np.isfinite(testdata), axis=1).astype(bool)
fin_scores = self.unified_classifier.decide(
testdata[fmask, :])
self.scores = np.zeros(
(testdata.shape[0], fin_scores.shape[1]))
self.scores[:] = None
self.scores[fmask] = fin_scores
if self.test_ind is not None:
self.testdata[self.test_ind]['Results']['Scores'] = self.scores
if construct_gt:
LOG.info('Constructing ground truth vector..')
self.test_ground_truth, self.test_breakpoints = co.gd_oper.construct_ground_truth(
os.path.join(co.CONST['test_save_path'], self.test_name),
classes_namespace=self.train_classes,
length=self.scores.shape[0],
ground_truth_type=ground_truth_type,
ret_breakpoints=True)
utterances_inds = co.gd_oper.merge_utterances_vectors(
co.gd_oper.create_utterances_vectors(
self.test_breakpoints, len(self.test_ground_truth)),
self.train_classes)
if not just_scores:
self.classify_offline(save=save, display=display_scores,
compute_perform=compute_perform,
extraction_method=
self.parameters[
'testing_params']['post_scores_processing_method'])
self.correlate_with_ground_truth(save=save,
display=display_scores,
compute_perform=compute_perform,
utterances_inds=utterances_inds)
self.display_scores_and_time(save=save)
if self.test_ind is not None:
co.file_oper.save_labeled_data(['Testing'] +self.tests_ids[
self.test_ind], self.testdata[self.test_ind])
if not just_scores:
if display_scores:
if self.parameters['testing_params'][
'post_scores_processing_method'] == 'CSTD':
self.plot_result(np.concatenate((
self.less_filtered_scores_std[:, None],
self.high_filtered_scores_std[:, None]), axis=1),
info='Scores Statistics',
xlabel='Frames',
labels=['STD', 'STD Mean'],
colors=['r', 'g'],
save=save)
mean_diff = (np.array(self.high_filtered_scores_std) -
np.array(self.less_filtered_scores_std))
mean_diff = (mean_diff) / float(np.max(np.abs(mean_diff[
np.isfinite(mean_diff)])))
plots = [mean_diff]
labels = ['ScoresSTD - ScoresSTDMean']
if self.test_ground_truth is not None:
plots += [((self.test_ground_truth - np.mean(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])) / float(
np.max(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])))[:, None]]
labels += ['Ground Truth']
self.plot_result(np.concatenate(plots, axis=1), labels=labels,
info='Metric of actions starting and ending ' +
'points', xlabel='Frames', save=save)
if display_scores:
self.plot_result(self.scores,
labels=self.train_classes,
info='Scores',
xlabel='Frames',
save=save,
)
return True, self.scores
else:
'''
input is processed from hand_segmentation_alg (any data
processed in such way, that the result is the same with my processing,
is acceptable, eg. Dexter)
There must be a continuous data streaming (method called in every
loop), even if the result of the previous algorithm is None
'''
scores_exist, score = self.process_online_data(data, img_count,
derot_angle,
derot_center,
just_scores=just_scores)
return scores_exist, score
def visualize_action(self, action, save=True,
save_name=None, *args, **kwargs):
'''
Visualizes action or a testing dataset using predefined locations in
config.yaml and the method co.draw_oper.plot_utterances
'''
dataset_loc = '/media/vassilis/Thesis/Datasets/PersonalFarm/'
results_loc = '/home/vassilis/Thesis/KinectPainting/Results/DataVisualization'
ground_truth, breakpoints, labels = co.gd_oper.load_ground_truth(action, ret_labs=True,
ret_breakpoints=True)
testing =True
images_base_loc = os.path.join(dataset_loc, 'actions',
'sets' if not testing else 'whole_result')
images_loc = os.path.join(
images_base_loc, action.replace(
'_', ' ').title())
imgs, masks, sync, angles, centers, samples_indices = co.imfold_oper.load_frames_data(
images_loc, masks_needed=True)
import cv2
masks_centers = []
xdim = 0
ydim = 0
conts = []
tmp = []
for mask, img in zip(masks, imgs):
conts = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
conts_areas = [cv2.contourArea(cont) for cont in conts]
tmp.append(np.sum(mask*img >0))
if np.sum(mask*img >0) < 500:
masks_centers.append(None)
else:
cont = conts[np.argmax(conts_areas)]
x, y, w, h = cv2.boundingRect(cont)
if w == 0 or h == 0:
masks_centers.append(None)
else:
masks_centers.append([y +h/2, x+w/2])
xdim = max(w, xdim)
ydim = max(h, ydim)
cropped_imgs = []
for img, center in zip(imgs, masks_centers):
if center is not None:
cropped_img = img[max(0, center[0]-ydim/2)
:min(img.shape[0], center[0]+ydim/2),
max(0, center[1]-xdim/2)
:min(img.shape[0], center[1]+xdim/2)]
inp_img = np.zeros((ydim, xdim))
inp_img[:cropped_img.shape[0],
:cropped_img.shape[1]] = cropped_img
cropped_imgs.append(inp_img)
else:
cropped_imgs.append(None)
fig = co.draw_oper.plot_utterances(frames=cropped_imgs,
frames_sync=sync,
ground_truth=ground_truth,
breakpoints= breakpoints,
labels=labels,
dataset_name=action,
*args, **kwargs)
if save:
if save_name is None:
save_name = 'Full' + action
fig.savefig(os.path.join(results_loc,
save_name + '.pdf'))
return fig
'''
categories_to_zoom=None,
#categories_to_zoom = self.dynamic_actions,
show_breaks=True, show_occ_tab=False,
show_zoomed_occ=True, show_im_examples=False,
show_fig_title=True,
examples_num=15
'''
def apply_to_training(
self, method, excluded_actions=None, *args, **kwargs):
'''
Apply a method to training data
'''
prev_root = ''
prev_action = ''
res = []
actions = (self.passive_actions +
self.dynamic_actions)
if excluded_actions is not None:
for action in excluded_actions:
actions.remove(action)
paths = os.listdir(co.CONST['actions_path'])
for action in actions:
if action not in paths:
actions.remove(action)
if not actions:
raise Exception('Badly given actions_path in config.yaml')
dirs = [os.path.join(co.CONST['actions_path'], action) for action in
actions]
for direc in dirs:
for root, dirs, _ in os.walk(direc):
separated_root = os.path.normpath(
root).split(
os.path.sep)
if root != prev_root and str.isdigit(
separated_root[-1]) and separated_root[
-2] != co.CONST['hnd_mk_fold_name']:
prev_root = root
if separated_root[-2] == | |
= ['i', 'information', 'INFO', 'INFORMATION'])
async def info(ctx, member: discord.Member = None):
member = member or ctx.author
delta1 = datetime.timedelta(hours = 3, minutes = 0)
mesinf = ctx.message.created_at + delta1
nowtime1 = mesinf.strftime("%X")
avatar = member.avatar_url
info1 = member.created_at + delta1
info_format = info1.strftime('%d.%m.%Y @ %X по МСК')
info2 = member.joined_at + delta1
info1_format = info2.strftime('%d.%m.%Y @ %X по МСК')
roles = [role.mention for role in member.roles[1:]]
status = str(member.status)
embed = discord.Embed(description = f'Сейчас {status}', color = 0x428325)
embed.set_author(name = f'Информация о {member.name}#{member.discriminator}', icon_url = avatar)
embed.add_field(name = "Аккаунт создан: ", value = f'`{info_format}`', inline = False)
embed.add_field(name = "Когда присоединился: ", value = f'`{info1_format}`', inline = False)
embed.add_field(name = "Имя юзера и его тег: ", value = f'`{member.name}#{member.discriminator}`', inline = False)
embed.add_field(name = "Никнейм на Dark Neon City: ", value = f'`{member.display_name}`', inline = False)
embed.add_field(name = f"Роли [{len(member.roles) - 1}]: ", value = ' '.join(reversed(roles)), inline = False)
embed.set_thumbnail(url = avatar)
embed.set_footer(text = f"supports by quantprod | Сегодня, в {nowtime1} по МСК")
await ctx.send(embed = embed)
#УДАЛЕНИЕ СООБЩЕНИЙ
@Bot.command(aliases = ['c_m', 'CLEAR_MEMBER'])
@commands.has_any_role("admin", "Смотрящий", "elite")
async def clear_member(ctx, user: discord.Member, amount = 15): #СООБЩЕНИЯ ПОЛЬЗОВАТЕЛЯ
await ctx.message.delete()
await ctx.channel.purge(limit = amount, check = lambda m: m.author == user)
author = ctx.message.author
message = await ctx.send(embed = discord.Embed(description = f'✅ {author.mention}, *удаление сообщений юзера прошло успешно!*', color = 0x428325))
await asyncio.sleep(5)
await message.delete()
@Bot.command(aliases = ['c', 'CLEAR'])
@commands.has_any_role("admin", "Смотрящий", "elite")
async def clear(ctx, amount = 30): #ВООБЩЕ ВСЕ СООБЩЕНИЯ
await ctx.message.delete()
await ctx.channel.purge(limit = amount)
author = ctx.message.author
message = await ctx.send(embed = discord.Embed(description = f'✅ {author.mention}, *удаление сообщений прошло успешно!*', color = 0x428325))
await asyncio.sleep(5)
await message.delete()
#ВЫДАЧА РОЛЕЙ ЧЕРЕЗ БОТА
@Bot.command(aliases = ['remove_role'])
@commands.has_any_role("admin", "Смотрящий")
async def add_role(ctx, member: discord.Member, role = None, *, roles = None):
delta1 = datetime.timedelta(hours = 3, minutes = 0)
mesinf = ctx.message.created_at + delta1
nowtime1 = mesinf.strftime("%X")
await ctx.message.delete()
channel = Bot.get_channel(526464840672346112) #логи
author = ctx.message.author
role_vip = discord.utils.get(ctx.guild.roles, name = "Vip")
role_moder = discord.utils.get(ctx.guild.roles, name = "moder")
role_dmoder = discord.utils.get(ctx.guild.roles, name = "dmoder")
role_cyber = discord.utils.get(ctx.guild.roles, name = "Cyber")
role_unit = discord.utils.get(ctx.guild.roles, name = "Unit")
role_admin = discord.utils.get(ctx.guild.roles, name = "admin")
if role == 'Vip':
await member.add_roles(role_vip)
if roles == 'Unit':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `Vip` **с роли** `Unit`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `Vip` на сервере Dark Neon City!**')
await member.remove_roles(role_unit)
elif roles == 'Cyber':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `Vip` **с роли** `Cyber`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `Vip` на сервере Dark Neon City!**')
await member.remove_roles(role_cyber)
elif roles == 'dmoder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Vip` **с роли** `dmoder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Vip` на сервере Dark Neon City!**')
await member.remove_roles(role_dmoder)
elif roles == 'moder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Vip` **с роли** `moder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Vip` на сервере Dark Neon City!**')
await member.remove_roles(role_moder)
elif roles == 'admin':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Vip` **с роли** `admin`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Vip` на сервере Dark Neon City!**')
await member.remove_roles(role_admin)
elif role == 'admin':
await member.add_roles(role_admin)
if roles == 'Unit':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `admin` **с роли** `Unit`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `admin` на сервере Dark Neon City!**')
await member.remove_roles(role_unit)
elif roles == 'Cyber':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `admin` **с роли** `Cyber`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `admin` на сервере Dark Neon City!**')
await member.remove_roles(role_cyber)
elif roles == 'dmoder':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `admin` **с роли** `dmoder`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `admin` на сервере Dark Neon City!**')
await member.remove_roles(role_dmoder)
elif roles == 'Vip':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `admin` **с роли** `Vip`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `admin` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif roles == 'moder':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `admin` **с роли** `moder`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `admin` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif role == 'moder':
await member.add_roles(role_moder)
if roles == 'Unit':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `moder` **с роли** `Unit`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `moder` на сервере Dark Neon City!**')
await member.remove_roles(role_unit)
elif roles == 'Cyber':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `moder` **с роли** `Cyber`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `moder` на сервере Dark Neon City!**')
await member.remove_roles(role_cyber)
elif roles == 'dmoder':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `moder` **с роли** `dmoder`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `moder` на сервере Dark Neon City!**')
await member.remove_roles(role_dmoder)
elif roles == 'Vip':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `moder` **с роли** `Vip`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `moder` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif roles == 'admin':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `moder` **с роли** `admin`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `moder` на сервере Dark Neon City!**')
await member.remove_roles(role_admin)
elif role == 'dmoder':
await member.add_roles(role_dmoder)
if roles == 'Unit':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `dmoder` **с роли** `Unit`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `dmoder` на сервере Dark Neon City!**')
await member.remove_roles(role_unit)
elif roles == 'Cyber':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `dmoder` **с роли** `Cyber`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `dmoder` на сервере Dark Neon City!**')
await member.remove_roles(role_cyber)
elif roles == 'moder':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `dmoder` **с роли** `moder`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `dmoder` на сервере Dark Neon City!**')
await member.remove_roles(role_moder)
elif roles == 'Vip':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `dmoder` **с роли** `Vip`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `dmoder` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif roles == 'admin':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `dmoder` **с роли** `admin`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `dmoder` на сервере Dark Neon City!**')
await member.remove_roles(role_admin)
elif role == 'Cyber':
await member.add_roles(role_cyber)
if roles == 'Unit':
await channel.send(f'**Пользователь** {author.mention} **повысил** `{member.name}#{member.discriminator}` **до роли** `Cyber` **с роли** `Unit`, **сегодня в** `{nowtime1}`.')
await member.send('**Поздравляю, теперь ты `Cyber` на сервере Dark Neon City!**')
await member.remove_roles(role_unit)
elif roles == 'moder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Cyber` **с роли** `moder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Cyber` на сервере Dark Neon City!**')
await member.remove_roles(role_moder)
elif roles == 'dmoder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Cyber` **с роли** `dmoder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Cyber` на сервере Dark Neon City!**')
await member.remove_roles(role_dmoder)
elif roles == 'Vip':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Cyber` **с роли** `Vip`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Cyber` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif roles == 'admin':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Cyber` **с роли** `admin`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Cyber` на сервере Dark Neon City!**')
await member.remove_roles(role_admin)
elif role == 'Unit':
await member.add_roles(role_unit)
if roles == 'Cyber':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Unit` **с роли** `Cyber`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Unit` на сервере Dark Neon City!**')
await member.remove_roles(role_cyber)
elif roles == 'moder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Unit` **с роли** `moder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Unit` на сервере Dark Neon City!**')
await member.remove_roles(role_moder)
elif roles == 'dmoder':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Unit` **с роли** `dmoder`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Unit` на сервере Dark Neon City!**')
await member.remove_roles(role_dmoder)
elif roles == 'Vip':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Unit` **с роли** `Vip`, **сегодня в** `{nowtime1}`.')
await member.send('**Упс, но теперь ты `Unit` на сервере Dark Neon City!**')
await member.remove_roles(role_vip)
elif roles == 'admin':
await channel.send(f'**Пользователь** {author.mention} **понизил** `{member.name}#{member.discriminator}` **до роли** `Unit` **с роли** `admin`, | |
"""Numeric routines
Author: Dr.-Ing. <NAME>
This module offers a collection of tensor routines using numpy.
"""
# %% Import
import numpy as np
import tensor_routines as tr
# %% Products
def td(a: np.ndarray, b: np.ndarray, n: int) -> np.ndarray:
"""Tensor dot.
Shortcut for `np.tensordot(a, b, n)`.
Parameters
----------
a : np.ndarray
Tensor
b : np.ndarray
Tensor
n : int
Number of neighbour axes to be contracted
Returns
-------
np.ndarray
Result
Examples
--------
Contract 2 neighbour axes
>>> a = np.random.rand(2, 3, 4, 5)
... b = np.random.rand(4, 5, 6, 7)
... c = td(a, b, 2) # c_ijkl = a_ijmn b_mnkl
... print(c.shape)
(2, 3, 6, 7)
"""
return np.tensordot(a, b, n)
def sp(a: np.ndarray, b: np.ndarray) -> float:
"""Scalar product.
Compute the scalar product (full contraction) of
tensors with equal shape.
Parameters
----------
a : np.ndarray
Tensor
b : np.ndarray
Tensor with b.shape == a.shape
Returns
-------
float
a_ijkl... b_ijkl...
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... b = np.random.rand(2, 3, 4)
... c = sp(a, b) # c = a_ijk b_ijk
"""
return td(a, b, a.ndim)
def nf(a: np.ndarray) -> float:
"""Frobenius norm.
Frobenius norm of a real-valued tensor (square
root of the sum of all squared tensor components)
by calling `np.linalg.norm(a)`.
Parameters
----------
a : np.ndarray
Tensor
Returns
-------
float
Frobenius norm
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... nf(a) - np.sqrt(np.sum(a**2))
"""
return np.linalg.norm(a)
def tp(*args) -> np.ndarray:
"""Tensor product.
Recursively calls `np.tensordot(a, b, 0)` for argument list
`args = [a0, a1, a2, ...]`, yielding, e.g.,
tp(a0, a1, a2) = tp(tp(a0, a1), a2)
Parameters
----------
args : sequence
Sequence of tensors
Returns
-------
np.ndarray
Tensor product
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... b = np.random.rand(7, 8, 9)
... c = tp(a, b) # c_ijkmno = a_ijk b_mno
... c.shape == (2, 3, 4, 7, 8, 9)
"""
temp = args[0]
for i in range(1, len(args)):
temp = np.tensordot(temp, args[i], 0)
return temp
def lm(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Linear map.
Compute linear map by contracting in order
all axes of b with the last axes of a, e.g.,
a_ijklm and b_nop, c = lm(a, b) with
c_ij = a_ijmno b_mno.
Parameters
----------
a : np.ndarray
Tensor
b : np.ndarray
Tensor with a.ndim >= b.ndim and matching commong axes
Returns
-------
np.ndarray
Linear map
Examples
--------
>>> a = np.random.rand(2, 3, 4, 5, 6)
... b = np.random.rand(4, 5, 6)
... c = lm(a, b)
... c.shape == (2, 3)
True
"""
return td(a, b, b.ndim)
def rp(q: np.ndarray, a: np.ndarray) -> np.ndarray:
"""Rayleight product.
Compute Rayleight product `b = rp(q, a)` defined as
b_{i_1 i_2 ...} = q_{i_1 j_1} q_{i_2 j_2} ... a_{j_1 j_2 ...}
where the second order tensor q in R^{n_{out} x n_{in}}
is used to map
a in R^{n_{in} x n_{in} x ...}
to
b in R^{n_{out} x n_{out} x ...}
Parameters
----------
q : np.ndarray
Second-order tensor
a : np.ndarray
Tensor with constant axis dimension, e.g., a.shape = (4, 4, 4, 4)
Returns
-------
np.ndarray
Rayleigh product
"""
n = a.ndim
p = [n-1] + list(range(n-1)) # index permutation per Q
for _ in range(n):
a = np.transpose(td(a, np.transpose(q), 1), p)
return a
# %% Transpositions
def tt(a: np.ndarray, t: list) -> np.ndarray:
"""Tensor dimension transposition.
Shortcut for np.transpose.
Parameters
----------
a : np.ndarray
Any numpy array
t : list
List of dimension transpositions with a.ndim == len(t)
Returns
-------
np.ndarray
Transposed array
Examples
--------
>>> a = np.random.rand(2, 3, 4, 5)
... tt(a, (2, 0, 3, 1)).shape == (4, 2, 5, 3)
True
Personal note
-------------
np transposes dimensions, NOT indices. It can be
considered as the index transpose of the result `at`
(see examples below).
Pattern:
a in R(d_0, d_1, ...)
t = (t_0, t_1, ...)
d_t = (d_{t_0}, d_{t_1}, ...)
i_t = (i_{t_0}, i_{t_1}, ...)
at = tr(a, t) in R(*d_t)
at_{*i_t} = a_{i_0 i_1 ...}
Example 1:
a in R(d_0, d_1, d_2, d_3)
at = tr(a, (3, 1, 0, 2)) in R(d_3, d_1, d_0, d_2)
at_{i_0 i_1 i_2 i_3} = a_{i_2 i_1 i_3 i_0}
at_{i_3 i_1 i_0 i_2} = a_{i_0 i_1 i_2 i_3} <- rename to
get dimension transposition
Example 2:
a in R(d_0, d_1, d_2, d_3)
at = tr(a, (1, 3, 2, 0)) in R(d_1, d_3, d_2, d_0)
at_{i_0 i_1 i_2 i_3} = a_{i_3 i_0 i_2 i_1}
at_{i_1 i_3 i_2 i_0} = a_{i_0 i_1 i_2 i_3} <-
With respect to Mathematica (index transposition):
a in R(d_0, d_1, d_2, d_3)
tr(a, (3, 1, 0, 2)) == TensorTranspose(a, {2, 1, 3, 0} + 1)
tr(a, (1, 3, 2, 0)) == TensorTranspose(a, {3, 0, 2, 1} + 1)
TensorTranspose(tr(a, t), t+1) == a
"""
return np.transpose(a, t)
def tt_r(a: np.ndarray) -> np.ndarray:
"""Right index pair transposition
Transpose right index pair of fourth-order tensor a.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, a_jikl
"""
return tt(a, (0, 1, 3, 2))
def tt_l(a: np.ndarray) -> np.ndarray:
"""Left index pair transposition.
Transpose left index pair of fourth-order tensor a.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, a_ijlk
"""
return tt(a, (1, 0, 2, 3))
def tt_m(a: np.ndarray) -> np.ndarray:
"""Major transposition
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, a_klij
"""
return tt(a, (2, 3, 0, 1))
# %% Symmetrizations
def sym_2(a: np.ndarray) -> np.ndarray:
"""Symmetric part of second-order tensor.
Parameters
----------
a : np.ndarray
Second-order tensor
Returns
-------
np.ndarray
Symmetric part
"""
return (a + a.T)/2
def sym_r(a: np.ndarray) -> np.ndarray:
"""Symmetrize right index pair.
Symmetrize a fourth-order tensor a_ijkl with respect to
its right index pair.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, symmetric with respect to kl
"""
return (a + tt_r(a))/2
def sym_l(a: np.ndarray) -> np.ndarray:
"""Symmetrize left index pair.
Symmetrize a fourth-order tensor a_ijkl with respect to
its left index pair.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, symmetric with respect to ij
"""
return (a + tt_l(a))/2
def sym_lr(a: np.ndarray) -> np.ndarray:
"""Symmetrize left and right index pair.
Symmetrize a fourth-order tensor a_ijkl with respect to
its left and right index pair.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor, symmetric with respect to ij and kl
"""
return sym_r(sym_l(a))
def sym_m(a: np.ndarray) -> np.ndarray:
"""Symmetrize major index pair.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl
Returns
-------
np.ndarray
Fourth-order tensor fulfilling a_ijkl = a_klij.
"""
return (a + tt_m(a))/2
def sym_mm(a: np.ndarray) -> np.ndarray:
"""Symmetrize minor and major index pairs of given fourth-order tensor.
Parameters
----------
a : np.ndarray
Fourth-order tensor, a_ijkl.
Returns
-------
np.ndarray
Fourth-order tensor fulfilling a_ijkl = a_jikl = a_klij.
"""
return sym_m(sym_lr(a))
# %% Isotropic
def iso_ev(a: np.ndarray) -> np.ndarray:
"""Project eigenvalues of isotropic tensor.
Compute for a given second- or fourth-order tensor the
eigenvalues of the isotropic part of the given tensor `a`
* For second-order tensors for `a = lambda ID_2 + aniso_part`,
`lambda` is computed.
* For fourth-order tensors
`a = sum_{i=1}^3 lambda_i P_ISOi + aniso_part`,
the lambda_i are computed.
Parameters
----------
a : np.ndarray
Isotropic tensor.
* Second order: a.shape == (3, 3)
* Fourth order: a.shape == (3, 3, 3, 3)
Returns
-------
np.ndarray
* Second order: lambda
* Fourth order: 1D-array with [lambda_1,2,3]
Examples
--------
>>> iso_ev(7*np.eye(3)) == 7
True
>>> (iso_ev(13*P_ISO_1 + 20*P_ISO_2) == np.array([13, 20])).all()
True
"""
if a.shape == (3, 3, 3, 3):
# Fourth-order
return np.array([sp(a, P)/sp(P, P) for P in P_ISO])
else:
# Second-order
return sp(a, ID_2)/3
def iso_t(lam: np.ndarray) -> np.ndarray:
"""Isotropic tensor.
Create an isotropic second- or fourth-order tensor from the
given eigenvalue(s).
Parameters
----------
lam : float or np.ndarray
* Second order: float corresponding to lambda for lambda ID_2
* Fourth order: 1D-array = [lambda_1,2|3] for
sum_{i=1}^3 lambda_i P_ISO_i
* [lambda_1, lambda_2] for only two first isotropic projects
* [lambda_1,2,3] for all isotropic | |
import matplotlib.pyplot as plt
from matplotlib import pyplot
import numpy as np
np.set_printoptions(suppress=True)
import os
import pickle
import sys
import json
from importlib import reload
reload(pyplot)
sys.path.insert(0, './src')
hf_usc_indexs = {}
col_names = [
'episode_id', 'time_step',
'e_veh_id', 'f_veh_id', 'm_veh_id',
'm_veh_exists', 'e_veh_att',
'e_veh_speed', 'f_veh_speed', 'm_veh_speed',
'e_veh_action', 'f_veh_action', 'm_veh_action',
'aggressiveness',
'desired_v','desired_tgap', 'min_jamx', 'max_act', 'min_act',
'el_delta_v', 'el_delta_x', 'em_delta_v', 'em_delta_x',
'em_delta_y', 'delta_x_to_merge']
for i, item_name in enumerate(col_names):
hf_usc_indexs[item_name] = i
# %%
"""
Needed methods
"""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
def latent_samples(model, sample_index):
h_seq = history_sca[sample_index, :, 2:]
merger_cs = future_m_veh_c[sample_index, :, 2:]
enc_h = model.h_seq_encoder(h_seq)
latent_dis_param = model.belief_net(enc_h, dis_type='prior')
z_idm, z_att = model.belief_net.sample_z(latent_dis_param)
return [z_idm, z_att]
def latent_vis(zsamples_n):
fig = pyplot.figure(figsize=(4, 4))
examples_to_vis = np.random.choice(val_samples, zsamples_n, replace=False)
sampled_z = latent_samples(model, examples_to_vis)[0].numpy()
#===============
# First subplot
#===============
# set up the axes for the first plot
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.xaxis.set_tick_params(pad=1, which='both')
ax.yaxis.set_tick_params(pad=1, which='both')
ax.zaxis.set_tick_params(pad=1, which='both')
# x ticks
ax.set_xticks([-2, 0, 2], minor=False)
ax.set_xlim(-2.5, 2.5)
# y ticks
ax.set_yticks([-4, -2, 0, 2], minor=False)
ax.set_ylim(-4.5, 2.5)
# z ticks
ax.set_zticks([-6, -3, 0, 3], minor=False)
ax.set_zlim(-6.5, 3.5)
ax.minorticks_off()
aggressiveness = history_future_usc[examples_to_vis, 0, hf_usc_indexs['aggressiveness']]
color_shade = aggressiveness
latent_plot = ax.scatter(sampled_z[:, 0], sampled_z[:, 1], sampled_z[:, 2],
s=5, c=color_shade, cmap='rainbow', edgecolors='black', linewidth=0.2)
# axins = inset_axes(ax,
# width="5%",
# height="90%",
# loc='right',
# borderpad=-2
# )
#
# fig.colorbar(latent_plot, cax=axins, ticks=np.arange(0, 1.1, 0.2))
ax.grid(False)
ax.view_init(30, 50)
#===============
# Second subplot
#===============
# set up the axes for the second plot
# ax = fig.add_subplot(1, 2, 2, projection='3d')
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
# latent_plot = ax.scatter(sampled_z[:, 0], sampled_z[:, 1], sampled_z[:, 2],
# s=5, c=color_shade, cmap='rainbow', edgecolors='black', linewidth=0.2)
#
# axins = inset_axes(ax,
# width="5%",
# height="90%",
# loc='right',
# borderpad=-2
# )
#
# fig.colorbar(latent_plot, cax=axins)
# cbar = fig.colorbar(latent_plot, cax=axins)
# ax.tick_params(pad=1)
# ax.grid(False)
# ax.view_init(30, 50)
# # ax.set_xlabel('$z_{1}$', labelpad=1)
# # ax.set_ylabel('$z_{2}$', labelpad=1)
# # ax.set_zlabel('$z_{3}$', labelpad=1)
# plt.subplots_adjust(wspace=0.2, hspace=None)
return ax
def vectorise(step_row, traces_n):
return np.repeat(step_row, traces_n, axis=0)
def fetch_traj(data, sample_index, colum_index):
""" Returns the state sequence. It also deletes the middle index, which is
the transition point from history to future.
"""
# data shape: [sample_index, time, feature]
traj = np.delete(data[sample_index, :, colum_index:colum_index+1], 29, axis=1)
return traj.flatten()
# %%
"""
Load data
"""
history_len = 30 # steps
rollout_len = 30
data_id = '031'
dataset_name = 'sim_data_'+data_id
data_arr_name = 'data_arrays_h{history_len}_f{rollout_len}'.format(\
history_len=history_len, rollout_len=rollout_len)
data_files_dir = './src/models/experiments/data_files/'+dataset_name+'/'
with open(data_files_dir+data_arr_name+'.pickle', 'rb') as handle:
data_arrays = pickle.load(handle)
history_future_usc, history_sca, future_sca, future_idm_s, \
future_m_veh_c, future_e_veh_a = data_arrays
history_sca = np.float32(history_sca)
future_idm_s = np.float32(future_idm_s)
future_m_veh_c = np.float32(future_m_veh_c)
history_future_usc.shape
all_epis = np.unique(history_sca[:, 0, 0])
np.random.seed(2021)
np.random.shuffle(all_epis)
train_epis = all_epis[:int(len(all_epis)*0.7)]
val_epis = np.setdiff1d(all_epis, train_epis)
# np.where(train_epis == 64)
train_samples = np.where(history_future_usc[:, 0:1, 0] == train_epis)[0]
val_samples = np.where(history_future_usc[:, 0:1, 0] == val_epis)[0]
# history_sca.shape
train_samples.shape
# %%
"""
Load model (with config file)
"""
model_name = 'neural_idm_139'
epoch_count = '20'
exp_path = './src/models/experiments/'+model_name+'/model_epo'+epoch_count
exp_dir = os.path.dirname(exp_path)
with open(exp_dir+'/'+'config.json', 'rb') as handle:
config = json.load(handle)
print(json.dumps(config, ensure_ascii=False, indent=4))
from models.core import neural_idm
reload(neural_idm)
from models.core.neural_idm import NeurIDMModel
model = NeurIDMModel(config)
model.load_weights(exp_path).expect_partial()
with open(data_files_dir+'env_scaler.pickle', 'rb') as handle:
model.forward_sim.env_scaler = pickle.load(handle)
with open(data_files_dir+'dummy_value_set.pickle', 'rb') as handle:
model.forward_sim.dummy_value_set = pickle.load(handle)
# model.forward_sim.attention_temp = 10
# %%
"""
Plot loss
"""
with open(exp_dir+'/'+'losses.pickle', 'rb') as handle:
losses = pickle.load(handle)
plt.figure()
plt.plot(losses['test_mseloss'], label='test_mseloss')
plt.plot(losses['train_mseloss'], label='train_mseloss')
plt.grid()
plt.legend()
plt.figure()
plt.plot(losses['test_klloss'], label='test_klloss')
plt.plot(losses['train_klloss'], label='train_klloss')
plt.legend()
plt.grid()
# %%
"""
Compare losses
"""
losses = {}
# for name in ['neural_idm_105', 'neural_idm_106']:
for name in ['neural_idm_139', 'neural_idm_138']:
with open('./src/models/experiments/'+name+'/'+'losses.pickle', 'rb') as handle:
losses[name] = pickle.load(handle)
plt.figure()
for name, loss in losses.items():
plt.plot(loss['test_mseloss'], label=name)
# plt.plot(loss['train_mseloss'], label='train_mseloss')
plt.grid()
plt.legend()
plt.figure()
for name, loss in losses.items():
plt.plot(loss['test_klloss'], label=name)
# plt.plot(loss['train_mseloss'], label='train_mseloss')
plt.grid()
plt.legend()
# %%
"""
Find bad examples
"""
import tensorflow as tf
# examples_to_vis = val_samples
# val_samples.shape
def get_avg_loss_across_sim(examples_to_vis):
merger_cs = future_m_veh_c[examples_to_vis, :, 2:]
h_seq = history_sca[examples_to_vis, :, 2:]
future_idm_ss = future_idm_s[examples_to_vis, :, 2:]
enc_h = model.h_seq_encoder(h_seq)
latent_dis_param = model.belief_net(enc_h, dis_type='prior')
sampled_z = model.belief_net.sample_z(latent_dis_param)
proj_belief = model.belief_net.belief_proj(sampled_z)
idm_params = model.idm_layer(proj_belief)
act_seq, att_scores = model.forward_sim.rollout([idm_params, proj_belief, \
future_idm_ss, merger_cs])
true_actions = future_e_veh_a[examples_to_vis, :, 2:]
loss = (tf.square(tf.subtract(act_seq, true_actions)))**0.5
return tf.reduce_mean(loss, axis=1).numpy()
# loss = get_avg_loss_across_sim(train_samples[0:15])
loss = get_avg_loss_across_sim(val_samples[0:5000])
_ = plt.hist(loss, bins=150)
# _ = plt.hist(loss[loss<0.1], bins=150)
bad_samples = np.where(loss > 1)
# %%
# %%
"""
Latent visualisation - aggressiveness used for color coding the latent samples
"""
latent_vis(zsamples_n=5000)
latent_vis(zsamples_n=5000)
plt.savefig("nidm_latent.png", dpi=500)
# plt.savefig("latent.png", dpi=500)
# %%
params = {
'font.size' : 12,
'font.family' : 'Palatino Linotype',
}
plt.rcParams.update(params)
# %%
# %%
"""
Visualisation of model predictions. Use this for debugging.
"""
Example_pred = 0
i = 0
covered_episodes = []
model.forward_sim.attention_temp = 1
traces_n = 50
# np.where((history_future_usc[:, 0, 0] == 26) & (history_future_usc[:, 0, 2] == 4))
sepcific_samples = []
distribution_name = 'prior'
# distribution_name = 'posterior'
# for i in bad_samples[0]:
# for i in sepcific_samples:
# for i in [2815]:
# for i in bad_samples[00]:
while Example_pred < 10:
sample_index = [val_samples[i]]
# sample_index = [train_samples[i]]
# sample_index = [i]
i += 1
e_veh_att = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['e_veh_att'])
m_veh_exists = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['m_veh_exists'])
aggressiveness = history_future_usc[sample_index, 0, hf_usc_indexs['aggressiveness']][0]
em_delta_y = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['em_delta_y'])
episode = future_idm_s[sample_index, 0, 0][0]
# if episode not in covered_episodes:
# if 4 == 4:
if episode not in covered_episodes and e_veh_att[25:35].mean() > 0:
# if episode not in covered_episodes and e_veh_att.mean() == 0:
covered_episodes.append(episode)
merger_cs = vectorise(future_m_veh_c[sample_index, :, 2:], traces_n)
h_seq = vectorise(history_sca[sample_index, :, 2:], traces_n)
future_idm_ss = vectorise(future_idm_s[sample_index, :, 2:], traces_n)
enc_h = model.h_seq_encoder(h_seq)
if distribution_name == 'posterior':
f_seq = vectorise(future_sca[sample_index, :, 2:], traces_n)
enc_f = model.f_seq_encoder(f_seq)
_, latent_dis_param = model.belief_net([enc_h, enc_f], dis_type='both')
elif distribution_name == 'prior':
latent_dis_param = model.belief_net(enc_h, dis_type='prior')
z_idm, z_att = model.belief_net.sample_z(latent_dis_param)
proj_idm = model.belief_net.z_proj_idm(np.concatenate([z_idm, h_seq[:, -1, :]], axis=-1))
proj_att = model.belief_net.z_proj_att(np.concatenate([z_att, h_seq[:, -1, :]], axis=-1))
idm_params = model.idm_layer(proj_idm)
act_seq, att_scores = model.forward_sim.rollout([idm_params, proj_att, \
future_idm_ss, merger_cs])
act_seq, att_scores = act_seq.numpy(), att_scores.numpy()
plt.figure(figsize=(5, 4))
episode_id = history_future_usc[sample_index, 0, hf_usc_indexs['episode_id']][0]
e_veh_id = history_future_usc[sample_index, 0, hf_usc_indexs['e_veh_id']][0]
time_0 = int(history_future_usc[sample_index, 0, hf_usc_indexs['time_step']][0])
time_steps = range(time_0, time_0+59)
info = [str(item)+' '+'\n' for item in [episode_id, time_0, e_veh_id, aggressiveness]]
plt.text(0.1, 0.4,
'experiment_name: '+ model_name+'_'+epoch_count +' '+'\n'
'episode_id: '+ info[0] +
'time_0: '+ info[1] +
'e_veh_id: '+ info[2] +
'aggressiveness: '+ info[3] +
'step_20_speed: '+ str(future_idm_ss[0, 0, 0])
, fontsize=10)
true_params = []
for param_name in ['desired_v', 'desired_tgap', 'min_jamx', 'max_act', 'min_act']:
true_pram_val = history_future_usc[sample_index, 0, hf_usc_indexs[param_name]][0]
true_params.append(round(true_pram_val, 2))
plt.text(0.1, 0.3, 'true: '+ str(true_params))
plt.text(0.1, 0.1, 'pred: '+ str(idm_params.numpy()[:, :].mean(axis=0).round(2)))
plt.figure(figsize=(5, 3))
traj = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['f_veh_action'])
plt.plot(time_steps, traj, color='purple')
traj = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['e_veh_action'])
plt.plot(time_steps, traj, color='black', linewidth=2)
traj = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['m_veh_action'])
plt.plot(time_steps, traj, color='red')
plt.legend(['f_veh_action', 'e_veh_action', 'm_veh_action'])
for sample_trace_i in range(traces_n):
plt.plot(time_steps[29:], act_seq[sample_trace_i, :, :].flatten(),
color='grey', alpha=0.4)
# plt.plot(time_steps[29:], act_seq[sample_trace_i, :, :].flatten(), color='grey')
# plt.ylim(-3, 3)
plt.title(str(sample_index[0]) + ' -- Action')
plt.grid()
plt.figure(figsize=(5, 3))
# plt.plot(e_veh_att[:40] , color='black')
plt.plot(time_steps , e_veh_att, color='red')
plt.plot([time_steps[29], time_steps[29]], [0, 1], color='black')
for sample_trace_i in range(traces_n):
plt.plot(time_steps[29:], att_scores[sample_trace_i, :].flatten(), color='grey')
plt.title(str(sample_index[0]) + ' -- Attention')
##########
# m_veh_id = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['m_veh_id'])
# plt.figure(figsize=(5, 3))
# plt.plot(m_veh_id, color='black')
# plt.title(str(sample_index[0]) + ' -- m_veh_id')
# plt.grid()
######\######
##########
plt.figure(figsize=(5, 3))
plt.plot(m_veh_exists, color='black')
plt.title(str(sample_index[0]) + ' -- m_veh_exists')
plt.grid()
######\######
plt.figure(figsize=(5, 3))
plt.plot(time_steps , em_delta_y, color='red')
# plt.plot([0, 40], [-0.37, -0.37], color='green')
# plt.plot([0, 40], [-1, -1], color='red')
# plt.plot([0, 40], [-1.5, -1.5], color='red')
plt.title(str(sample_index[0]) + ' -- em_delta_y')
plt.grid()
############
##########
# ax = latent_vis(2000)
# ax.scatter(z_idm[:, 0], z_idm[:, 1], z_idm[:, 2], s=15, color='black')
##########
"""
# plt.plot(desired_vs)
# plt.grid()
# plt.plot(desired_tgaps)
# plt.grid()
plt.figure(figsize=(5, 3))
desired_vs = idm_params.numpy()[:, 0]
desired_tgaps = idm_params.numpy()[:, 1]
plt.scatter(desired_vs, desired_tgaps, color='grey')
plt.scatter(24.7, 1.5, color='red')
plt.xlim(15, 40)
plt.ylim(0, 3)
#
# plt.scatter(30, 1, color='red')
# plt.xlim(25, 35)
# plt.ylim(0, 2)
plt.title(str(sample_index[0]) + ' -- Param')
plt.grid()
"""
Example_pred += 1
# %%
""" Set scientific plot format
"""
plt.rcParams['text.latex.preamble']=[r"\usepackage{lmodern}"]
#Options
params = {
'font.size' : 20,
'font.family' : 'EB Garamond',
}
plt.rcParams.update(params)
plt.style.use(['science','ieee'])
# %%
# %%
"""Prediction for an specific sample from the dataset
"""
# model.arbiter.attention_temp = 5
traces_n = 50
model.forward_sim.attention_temp = 5
# sample_index = [12374]
sample_index = [3958]
e_veh_att = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['e_veh_att'])
m_veh_exists = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['m_veh_exists'])
aggressiveness = history_future_usc[sample_index, 0, hf_usc_indexs['aggressiveness']][0]
em_delta_y = fetch_traj(history_future_usc, sample_index, hf_usc_indexs['em_delta_y'])
episode = future_idm_s[sample_index, 0, 0][0]
# merger_cs.shape
# plt.plot(history_future_usc[sample_index, 20: ,hf_usc_indexs['delta_x_to_merge']][0])
# plt.plot(merger_cs[0, :, -1])
merger_cs = vectorise(future_m_veh_c[sample_index, :, 2:], traces_n)
# merger_cs[:, 10, -1] = 0
h_seq = vectorise(history_sca[sample_index, :, 2:], traces_n)
future_idm_ss = vectorise(future_idm_s[sample_index, :, 2:], traces_n)
enc_h = model.h_seq_encoder(h_seq)
latent_dis_param = model.belief_net(enc_h, dis_type='prior')
z_idm, z_att = model.belief_net.sample_z(latent_dis_param)
proj_idm = model.belief_net.z_proj_idm(np.concatenate([z_idm, h_seq[:, -1, :]], axis=-1))
proj_att = model.belief_net.z_proj_att(np.concatenate([z_att, h_seq[:, -1, :]], axis=-1))
idm_params = model.idm_layer(proj_idm).numpy()
act_seq, att_scores = model.forward_sim.rollout([idm_params, proj_att, \
future_idm_ss, merger_cs])
act_seq, att_scores = act_seq.numpy(), att_scores.numpy()
att_ = att_scores.flatten()
plt.figure(figsize=(5, 3))
episode_id = history_future_usc[sample_index, 0, hf_usc_indexs['episode_id']][0]
e_veh_id = history_future_usc[sample_index, 0, hf_usc_indexs['e_veh_id']][0]
time_0 = history_future_usc[sample_index, 0, hf_usc_indexs['time_step']][0]
aggressiveness = history_future_usc[sample_index, 0, hf_usc_indexs['aggressiveness']][0]
info = [str(item)+' '+'\n' for item in [episode_id, time_0, e_veh_id, aggressiveness]]
plt.text(0.1, 0.5,
'episode_id: '+ info[0] +
'time_0: '+ info[1] +
'e_veh_id: '+ info[2] +
'aggressiveness: '+ info[3]
, | |
--resource-group {} --priority {}
--target-condition \"{}\" --labels {} --content '{}'"""
.format(config_ids[1], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict}"', '{configuration_payload}'),
checks=[
self.check('id', config_ids[1]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.deviceContent', json.loads(
self.kwargs['configuration_payload'])['content']['deviceContent']),
self.check('metrics', empty_metrics)])
# With connection string
self.cmd("iot hub configuration create -c {} --login {} --pri {} --tc \"{}\" --lab {} -k '{}' -m '{}'"
.format(config_ids[2], LIVE_HUB_CS, priority, condition, '"{generic_dict}"', content_path, metrics_path),
checks=[
self.check('id', config_ids[2]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.deviceContent', json.loads(
self.kwargs['configuration_payload'])['content']['deviceContent']),
self.check('metrics.queries', json.loads(self.kwargs['metrics_payload'])['queries'])])
self.cmd("""iot hub configuration create --config-id {} --hub-name {} --resource-group {} --priority {}
--target-condition \"{}\" --labels {} --content '{}' --metrics '{}'"""
.format(config_ids[3], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict}"',
'{configuration_payload}', '{metrics_payload}'),
checks=[
self.check('id', config_ids[3]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.deviceContent', json.loads(
self.kwargs['configuration_payload'])['content']['deviceContent']),
self.check('metrics.queries', json.loads(self.kwargs['metrics_payload'])['queries'])])
self.cmd("""iot hub configuration create --config-id {} --hub-name {} --resource-group {} --priority {}
--content '{}' """
.format(config_ids[4], LIVE_HUB, LIVE_RG, priority, '{configuration_payload}'),
checks=[
self.check('id', config_ids[4]),
self.check('priority', priority),
self.check('targetCondition', ''),
self.check('content.deviceContent', json.loads(
self.kwargs['configuration_payload'])['content']['deviceContent'])])
# With connection string
self.cmd('iot hub configuration show -c {} --login {}'.format(config_ids[0], LIVE_HUB_CS),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('metrics', empty_metrics)])
self.cmd('iot hub configuration show -c {} --login {}'.format(config_ids[3], LIVE_HUB_CS),
checks=[
self.check('id', config_ids[3]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('metrics.queries', json.loads(self.kwargs['metrics_payload'])['queries'])])
self.cmd('iot hub configuration show --config-id {} --hub-name {} --resource-group {}'.format(config_ids[2],
LIVE_HUB, LIVE_RG),
checks=[
self.check('id', config_ids[2]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('metrics.queries', json.loads(self.kwargs['metrics_payload'])['queries'])])
priority = random.randint(1, 10)
condition = "tags.building=43 and tags.environment='dev'"
self.kwargs['generic_dict_updated'] = {'key': 'super_value'}
self.cmd('iot hub configuration update -c {} -n {} -g {} --set priority={} targetCondition="{}" labels={}'
.format(config_ids[0], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict_updated}"'),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict_updated'])])
# With connection string
self.cmd('iot hub configuration update -c {} --login {} --set priority={} targetCondition="{}" labels={}'
.format(config_ids[0], LIVE_HUB_CS, priority, condition, '"{generic_dict_updated}"'),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict_updated'])])
# Error via type enforcer
self.cmd('iot hub configuration update -c {} --login {} --set priority={} targetCondition="{}" labels={}'
.format(config_ids[0], LIVE_HUB_CS, priority, condition, '"{bad_format}"'), expect_failure=True)
self.cmd('iot hub configuration update -c {} --login {} --set content={}'
.format(config_ids[0], LIVE_HUB_CS, '"{bad_format}"'), expect_failure=True)
# Metrics
user_metric_name = 'mymetric'
system_metric_name = 'appliedCount'
config_output = self.cmd('iot hub configuration show --login {} --config-id {}'.format(
LIVE_HUB_CS, config_ids[2])).get_output_in_json()
self.cmd('iot hub configuration show-metric --metric-id {} --login {} --config-id {} --metric-type {}'
.format(user_metric_name, LIVE_HUB_CS, config_ids[2], 'user'),
checks=[
self.check('metric', user_metric_name),
self.check('query', config_output['metrics']['queries'][user_metric_name])
])
# With connection string
self.cmd('iot hub configuration show-metric -m {} --login {} -c {} --metric-type {}'
.format('doesnotexist', LIVE_HUB_CS, config_ids[2], 'user'), expect_failure=True)
self.cmd('iot hub configuration show-metric -m {} --login {} -c {} --metric-type {}'
.format(system_metric_name, LIVE_HUB_CS, config_ids[2], 'system'),
checks=[
self.check('metric', system_metric_name),
self.check('query', config_output['systemMetrics']['queries'][system_metric_name])
])
config_list_check = [
self.check('length([*])', 5),
self.exists("[?id=='{}']".format(config_ids[0])),
self.exists("[?id=='{}']".format(config_ids[1])),
self.exists("[?id=='{}']".format(config_ids[2])),
self.exists("[?id=='{}']".format(config_ids[3])),
self.exists("[?id=='{}']".format(config_ids[4]))
]
self.cmd("iot hub configuration list -n {} -g {}".format(LIVE_HUB, LIVE_RG),
checks=config_list_check)
# With connection string
self.cmd("iot hub configuration list --login {}".format(LIVE_HUB_CS),
checks=config_list_check)
# Error top of -1 does not work with configurations
self.cmd("iot hub configuration list -n {} -g {} --top -1".format(LIVE_HUB, LIVE_RG), expect_failure=True)
# Error max top of 20 with configurations
self.cmd("iot hub configuration list -n {} -g {} --top 100".format(LIVE_HUB, LIVE_RG), expect_failure=True)
def test_edge_deployments(self):
self.kwargs['generic_dict'] = {'key': 'value'}
config_count = 3
names = self._create_entity_names(configs=config_count)
config_ids = names['config_ids']
content_path = os.path.join(CWD, 'test_config_modules_content.json')
content_path_v1 = os.path.join(CWD, 'test_config_modules_content_v1.json')
self.kwargs['configuration_payload'] = read_file_content(content_path)
self.kwargs['configuration_payload_v1'] = read_file_content(content_path_v1)
priority = random.randint(1, 10)
condition = 'tags.building=9 and tags.environment=\'test\''
# With connection string
self.cmd("iot edge deployment create -d {} --login {} --pri {} --tc \"{}\" --lab {} -k '{}'"
.format(config_ids[0], LIVE_HUB_CS, priority, condition, '"{generic_dict}"', content_path),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.modulesContent', json.loads(
self.kwargs['configuration_payload'])['content']['modulesContent'])])
self.cmd("""iot edge deployment create --deployment-id {} --hub-name {} --resource-group {} --priority {}
--target-condition \"{}\" --labels {} --content '{}'"""
.format(config_ids[1], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict}"', '{configuration_payload}'),
checks=[
self.check('id', config_ids[1]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.modulesContent', json.loads(
self.kwargs['configuration_payload'])['content']['modulesContent'])])
self.cmd("""iot edge deployment create --deployment-id {} --hub-name {} --resource-group {} --priority {}
--target-condition \"{}\" --labels {} --content '{}'"""
.format(config_ids[2], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict}"', content_path_v1),
checks=[
self.check('id', config_ids[2]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict']),
self.check('content.modulesContent', json.loads(
# moduleContent for v1
self.kwargs['configuration_payload_v1'])['content']['moduleContent'])])
# With connection string
self.cmd('iot edge deployment show -d {} --login {}'.format(config_ids[1], LIVE_HUB_CS),
checks=[
self.check('id', config_ids[1]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict'])])
self.cmd('iot edge deployment show --deployment-id {} --hub-name {} --resource-group {}'.format(config_ids[0],
LIVE_HUB, LIVE_RG),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict'])])
priority = random.randint(1, 10)
condition = "tags.building=43 and tags.environment='dev'"
self.kwargs['generic_dict_updated'] = {'key': 'super_value'}
self.cmd('iot edge deployment update -d {} -n {} -g {} --set priority={} targetCondition="{}" labels={}'
.format(config_ids[0], LIVE_HUB, LIVE_RG, priority, condition, '"{generic_dict_updated}"'),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict_updated'])])
# With connection string
self.cmd('iot edge deployment update -d {} --login {} --set priority={} targetCondition="{}" labels={}'
.format(config_ids[0], LIVE_HUB_CS, priority, condition, '"{generic_dict_updated}"'),
checks=[
self.check('id', config_ids[0]),
self.check('priority', priority),
self.check('targetCondition', condition),
self.check('labels', self.kwargs['generic_dict_updated'])])
# Metrics
system_metric_name = 'appliedCount'
config_output = self.cmd('iot edge deployment show --login {} --config-id {}'.format(
LIVE_HUB_CS, config_ids[2])).get_output_in_json()
self.cmd('iot edge deployment show-metric --metric-id {} --config-id {} --hub-name {}'
.format(system_metric_name, config_ids[2], LIVE_HUB),
checks=[
self.check('metric', system_metric_name),
self.check('query', config_output['systemMetrics']['queries'][system_metric_name])
])
# With connection string
self.cmd('iot edge deployment show-metric -m {} --login {} -c {}'
.format('doesnotexist', LIVE_HUB_CS, config_ids[2]), expect_failure=True)
self.cmd('iot edge deployment show-metric --metric-id {} --login {} --config-id {}'
.format(system_metric_name, LIVE_HUB_CS, config_ids[2]),
checks=[
self.check('metric', system_metric_name),
self.check('query', config_output['systemMetrics']['queries'][system_metric_name])
])
config_list_check = [
self.check('length([*])', 3),
self.exists("[?id=='{}']".format(config_ids[0])),
self.exists("[?id=='{}']".format(config_ids[1])),
self.exists("[?id=='{}']".format(config_ids[2]))
]
self.cmd("iot edge deployment list -n {} -g {}".format(LIVE_HUB, LIVE_RG),
checks=config_list_check)
# With connection string
self.cmd("iot edge deployment list --login {}".format(LIVE_HUB_CS),
checks=config_list_check)
# Explicit delete for edge deployment
self.cmd("iot edge deployment delete -d {} -n {} -g {}".format(
config_ids[1], LIVE_HUB, LIVE_RG))
del config_ids[1]
self.cmd("iot edge deployment delete -d {} --login {}".format(
config_ids[0], LIVE_HUB_CS))
del config_ids[0]
# Error top of -1 does not work with configurations
self.cmd("iot edge deployment list -n {} -g {} --top -1".format(LIVE_HUB, LIVE_RG), expect_failure=True)
# Error max top of 20 with configurations
self.cmd("iot edge deployment list -n {} -g {} --top 100".format(LIVE_HUB, LIVE_RG), expect_failure=True)
@pytest.mark.skipif(not validate_min_python_version(3, 4, exit_on_fail=False), reason="minimum python version not satisfied")
def test_uamqp_device_messaging(self):
device_count = 1
names = self._create_entity_names(devices=device_count)
device_ids = names['device_ids']
self.cmd('iot hub device-identity create -d {} -n {} -g {} --ee'.format(device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[0])])
test_body = str(uuid4())
test_props = 'key0=value0;key1=value1'
test_cid = str(uuid4())
self.cmd('iot device c2d-message send -d {} --hub-name {} -g {} --data {} --cid {}'
.format(device_ids[0], LIVE_HUB, LIVE_RG, test_body, test_cid), checks=self.is_empty())
result = self.cmd('iot device c2d-message receive -d {} --hub-name {} -g {}'.format(
device_ids[0], LIVE_HUB, LIVE_RG)).get_output_in_json()
assert result['data'] == test_body
assert result['correlationId'] == test_cid
assert result['ack'] == 'none'
etag = result['etag']
self.cmd('iot device c2d-message complete -d {} --hub-name {} -g {} --etag {}'.format(
device_ids[0], LIVE_HUB, LIVE_RG, etag), checks=self.is_empty())
test_body = str(uuid4())
test_cid = str(uuid4())
# With connection string
self.cmd('iot device c2d-message send -d {} --data {} --props {} --cid {} --ack {} --login {}'
.format(device_ids[0], test_body, test_props, test_cid, 'positive', LIVE_HUB_CS), checks=self.is_empty())
result = self.cmd('iot device c2d-message receive -d {} --login {}'.format(
device_ids[0], LIVE_HUB_CS)).get_output_in_json()
assert result['data'] == test_body
assert result['correlationId'] == test_cid
assert result['ack'] == 'positive'
etag = result['etag']
self.cmd('iot device c2d-message reject -d {} --etag {} --login {}'.format(
device_ids[0], etag, LIVE_HUB_CS), checks=self.is_empty())
# Test waiting for ack from c2d send
from azext_iot.operations.hub import iot_simulate_device
from azext_iot._factory import iot_hub_service_factory
from azure.cli.core.mock import DummyCli
cli_ctx = DummyCli()
client = iot_hub_service_factory(cli_ctx)
token, thread = execute_onthread(method=iot_simulate_device,
args=[client, device_ids[0], LIVE_HUB, 'complete',
'Ping from c2d ack wait test', 2, 5, 'http'],
max_runs=5,
return_handle=True)
self.cmd('iot device c2d-message send -d {} --ack {} --login {} --wait -y'.format(device_ids[0], 'full', LIVE_HUB_CS))
token.set()
thread.join()
# Error - invalid wait when no ack requested
self.cmd('iot device c2d-message send -d {} --login {} --wait -y'.format(
device_ids[0], LIVE_HUB_CS), expect_failure=True)
def test_device_messaging(self):
device_count = 1
names = self._create_entity_names(devices=device_count)
device_ids = names['device_ids']
self.cmd('iot hub device-identity create -d {} -n {} -g {} --ee'.format(device_ids[0], LIVE_HUB, LIVE_RG),
checks=[self.check('deviceId', device_ids[0])])
self.cmd('iot device c2d-message receive -d {} --hub-name {} -g {}'
.format(device_ids[0], LIVE_HUB, LIVE_RG), checks=self.is_empty())
# With connection string
self.cmd('iot device c2d-message receive -d {} --login {}'
.format(device_ids[0], LIVE_HUB_CS), checks=self.is_empty())
etag = '00000000-0000-0000-0000-000000000000'
self.cmd('iot device c2d-message complete -d {} --hub-name {} -g {} -e {}'
.format(device_ids[0], LIVE_HUB, LIVE_RG, etag), expect_failure=True)
# With connection string
self.cmd('iot device c2d-message complete -d | |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Aug 08, 2018
"""
import unittest
from itertools import product
from collections import defaultdict
import warnings
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression, LinearRegression
from warnings import simplefilter, catch_warnings
from causallib.estimation import AIPW, PropensityFeatureStandardization, WeightedStandardization
from causallib.estimation import IPW
from causallib.estimation import Standardization, StratifiedStandardization
class TestDoublyRobustBase(unittest.TestCase):
@staticmethod
def create_uninformative_tx_dataset():
n = 100
beta = 0.4
X = pd.Series(np.random.normal(size=n))
a = pd.Series([0] * (n // 2) + [1] * (n // 2))
y = X.mul(beta)
return {"X": X.to_frame(), "a": a, "y": y, "beta": beta}
@staticmethod
def create_uninformative_ox_dataset():
n = 100
beta = 0.4
X = pd.DataFrame(np.random.normal(size=(n, 5)))
a = pd.Series([0] * (n // 2) + [1] * (n // 2))
y = a.mul(beta)
return {"X": X, "a": a, "y": y, "beta": beta}
def fit_and_predict_all_learners(self, data, estimator):
estimator.fit(data["X"], data["a"], data["y"])
with catch_warnings():
simplefilter(action='ignore', category=UserWarning) # for some of the models using y throws a UserWarning
doubly_res = estimator.estimate_population_outcome(data["X"], data["a"], data["y"])
std_res = estimator.outcome_model.estimate_population_outcome(data["X"], data["a"])
ipw_res = estimator.weight_model.estimate_population_outcome(data["X"], data["a"], data["y"])
return doubly_res, std_res, ipw_res
def ensure_uninformative_tx_leads_to_std_like_results(self, estimator):
data = self.create_uninformative_tx_dataset()
doubly_res, std_res, ipw_res = self.fit_and_predict_all_learners(data, estimator)
with self.subTest("Compare population outcome with Standardization"):
self.assertAlmostEqual(doubly_res[0], std_res[0])
self.assertAlmostEqual(doubly_res[1], std_res[1])
with self.subTest("Compare population outcome with IPW"):
self.assertNotAlmostEqual(doubly_res[0], ipw_res[0])
self.assertNotAlmostEqual(doubly_res[1], ipw_res[1])
def ensure_uninformative_ox_leads_to_ipw_like_results(self, estimator):
data = self.create_uninformative_ox_dataset()
doubly_res, std_res, ipw_res = self.fit_and_predict_all_learners(data, estimator)
with self.subTest("Compare population outcome with Standardization"):
self.assertAlmostEqual(doubly_res[0], std_res[0])
self.assertAlmostEqual(doubly_res[1], std_res[1])
with self.subTest("Compare population outcome with IPW"):
self.assertAlmostEqual(doubly_res[0], ipw_res[0])
self.assertAlmostEqual(doubly_res[1], ipw_res[1])
def ensure_effect_recovery(self, n=1100):
use_tmle_data = True
if use_tmle_data: # Align the datasets to the same attributes
from causallib.tests.test_tmle import generate_data
data = generate_data(n, 2, 0, 1, 1, seed=1)
data['y'] = data['y_cont']
else:
data = self.create_uninformative_ox_dataset()
data['treatment_effect'] = data['beta']
self.estimator.fit(data['X'], data['a'], data['y'])
y = data["y"] if isinstance(self.estimator, AIPW) else None # Avoid warnings
pop_outcomes = self.estimator.estimate_population_outcome(data['X'], data['a'], y)
effect = pop_outcomes[1] - pop_outcomes[0]
np.testing.assert_allclose(
data['treatment_effect'], effect,
atol=0.05
)
return data
def ensure_is_fitted(self, estimator):
data = self.create_uninformative_ox_dataset()
estimator.fit(data["X"], data["a"], data["y"])
self.assertTrue(hasattr(estimator.weight_model.learner, "coef_"))
self.assertTrue(hasattr(estimator.outcome_model.learner, "coef_"))
def ensure_data_is_separated_between_models(self, estimator, n_added_outcome_model_features):
data = self.create_uninformative_ox_dataset()
# Reinitialize estimator:
estimator = estimator.__class__(estimator.outcome_model, estimator.weight_model,
outcome_covariates=[0, 1, 2, 3], weight_covariates=[3, 4])
estimator.fit(data["X"], data["a"], data["y"])
self.assertEqual(estimator.outcome_model.learner.coef_.size,
len(estimator.outcome_covariates) + n_added_outcome_model_features)
self.assertEqual(estimator.weight_model.learner.coef_.size, len(estimator.weight_covariates))
def ensure_weight_refitting_refits(self, estimator):
data = self.create_uninformative_ox_dataset()
with self.subTest("Test first fit of weight_model did fit the model"):
estimator.fit(data["X"], data["a"], data["y"])
self.assertEqual(estimator.weight_model.learner.coef_.size, data["X"].shape[1])
with self.subTest("Test no-refitting does not refit"):
estimator.weight_model.learner.coef_ = np.zeros_like(estimator.weight_model.learner.coef_)
estimator.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
np.testing.assert_array_equal(estimator.weight_model.learner.coef_,
np.zeros_like(estimator.weight_model.learner.coef_))
with self.subTest("Test yes-refitting does indeed fit"):
estimator.fit(data["X"], data["a"], data["y"], refit_weight_model=True)
self.assertTrue(np.any(np.not_equal(estimator.weight_model.learner.coef_,
np.zeros_like(estimator.weight_model.learner.coef_))))
def ensure_model_combinations_work(self, estimator_class):
data = self.create_uninformative_ox_dataset()
for ipw, std in product([IPW], [Standardization, StratifiedStandardization]):
with self.subTest("Test combination of {} and {} does not crash".format(ipw, std)):
ipw = ipw(LogisticRegression())
std = std(LinearRegression())
dr = estimator_class(std, ipw)
with self.subTest("Test fit"):
dr.fit(data["X"], data["a"], data["y"])
self.assertTrue(True) # Dummy assert, didn't crash
with self.subTest("Check prediction"):
ind_outcome = dr.estimate_individual_outcome(data["X"], data["a"])
y = data["y"] if isinstance(dr, AIPW) else None # Avoid warnings
pop_outcome = dr.estimate_population_outcome(data["X"], data["a"], y)
dr.estimate_effect(ind_outcome[1], ind_outcome[0], agg="individual")
dr.estimate_effect(pop_outcome[1], pop_outcome[0])
self.assertTrue(True) # Dummy assert, didn't crash
def ensure_pipeline_learner(self):
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.pipeline import make_pipeline
data = self.create_uninformative_ox_dataset()
weight_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LogisticRegression())
outcome_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LinearRegression())
for ipw, std in product([IPW], [Standardization, StratifiedStandardization]):
with self.subTest("Test combination of {} and {} does not crash".format(ipw, std)):
ipw_model = ipw(weight_learner)
std_model = std(outcome_learner)
with self.subTest("Test initialization with pipeline learner"):
self.estimator = self.estimator.__class__(std_model, ipw_model)
self.assertTrue(True) # Dummy assert for not thrown exception
with self.subTest("Test fit with pipeline learner"):
self.estimator.fit(data["X"], data["a"], data["y"])
self.assertTrue(True) # Dummy assert for not thrown exception
with self.subTest("Test 'predict' with pipeline learner"):
self.estimator.estimate_individual_outcome(data["X"], data["a"])
self.assertTrue(True) # Dummy assert for not thrown exception
def ensure_many_models(self, clip_min=None, clip_max=None):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
data = self.create_uninformative_ox_dataset()
for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
RandomForestClassifier(n_estimators=100),
MLPClassifier(hidden_layer_sizes=(5,)),
KNeighborsClassifier(n_neighbors=20)]:
weight_model = IPW(propensity_learner, clip_min=clip_min, clip_max=clip_max)
propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10),
MLPRegressor(hidden_layer_sizes=(5,)),
ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(),
KNeighborsRegressor(), SVR(), LinearSVR()]:
outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
outcome_model = Standardization(outcome_learner)
with self.subTest("Test fit & predict using {} & {}".format(propensity_learner_name,
outcome_learner_name)):
model = self.estimator.__class__(outcome_model, weight_model)
model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
model.estimate_individual_outcome(data["X"], data["a"])
self.assertTrue(True) # Fit did not crash
class TestAIPW(TestDoublyRobustBase):
@classmethod
def setUpClass(cls):
TestDoublyRobustBase.setUpClass()
# Avoids regularization of the model:
ipw = IPW(LogisticRegression(C=1e6, solver='lbfgs', max_iter=500), use_stabilized=False)
std = Standardization(LinearRegression(normalize=True))
cls.estimator = AIPW(std, ipw)
def test_uninformative_tx_leads_to_std_like_results(self):
with self.subTest("`overlap_weighting=False`"):
self.ensure_uninformative_tx_leads_to_std_like_results(self.estimator)
with self.subTest("`overlap_weighting=True`"):
self.estimator.overlap_weighting = True
self.ensure_uninformative_tx_leads_to_std_like_results(self.estimator)
self.estimator.overlap_weighting = False
def test_uninformative_ox_leads_to_ipw_like_results(self):
with self.subTest("`overlap_weighting=False`"):
self.ensure_uninformative_ox_leads_to_ipw_like_results(self.estimator)
with self.subTest("`overlap_weighting=True`"):
self.estimator.overlap_weighting = True
self.ensure_uninformative_ox_leads_to_ipw_like_results(self.estimator)
self.estimator.overlap_weighting = False
def test_is_fitted(self):
self.ensure_is_fitted(self.estimator)
def test_data_is_separated_between_models(self):
self.ensure_data_is_separated_between_models(self.estimator, 1) # 1 treatment assignment feature
def test_weight_refitting_refits(self):
self.ensure_weight_refitting_refits(self.estimator)
def test_model_combinations_work(self):
self.ensure_model_combinations_work(AIPW)
def test_pipeline_learner(self):
self.ensure_pipeline_learner()
def test_many_models(self):
self.ensure_many_models()
def test_effect_recovery(self):
with self.subTest("`overlap_weighting=False`"):
self.ensure_effect_recovery()
with self.subTest("`overlap_weighting=True`"):
self.estimator.overlap_weighting = True
self.ensure_effect_recovery()
self.estimator.overlap_weighting = False
def test_effect_calculation_against_direct_effect_formula(self):
from causallib.datasets import load_nhefs
data = load_nhefs()
X, a, y = data.X, data.a, data.y
a = a.astype(float) # Test the propensity lookup for non-integer values
estimator = AIPW(
self.estimator.outcome_model, self.estimator.weight_model,
overlap_weighting=True,
)
estimator.fit(X, a, y)
# Estimate the effect from the model:
effect_from_model = estimator.estimate_population_outcome(X, a, y) # type:pd.Series
effect_from_model = estimator.estimate_effect(effect_from_model[1], effect_from_model[0])["diff"]
# Estimate the effect manually using the direct-effect formula
# (not mitigated by counterfactual outcomes)
ps = estimator.weight_model.learner.predict_proba(X)[:, 1]
y_pred = estimator.outcome_model.estimate_individual_outcome(X, a)
ey0, ey1 = y_pred[0].values, y_pred[1].values
effect_from_formula = np.mean(
((a*y)/ps - (1-a)*y/(1-ps)) # IPW
- (a-ps)/(ps*(1-ps)) * ((1 - ps)*ey1 + ps*ey0) # Correction
)
np.testing.assert_allclose(effect_from_formula, effect_from_model)
def test_binary_outcome_effect_recovery(self):
from causallib.tests.test_tmle import generate_data
data = generate_data(1100, 2, 0, seed=0)
data['y'] = data['y_bin']
for overlap_weights in [False, True]:
estimator = AIPW(
Standardization(LogisticRegression(), predict_proba=True),
IPW(LogisticRegression()),
overlap_weighting=overlap_weights,
)
estimator.fit(data['X'], data['a'], data['y'])
pop_outcomes = estimator.estimate_population_outcome(data['X'], data['a'], data['y'])
effect = estimator.estimate_effect(pop_outcomes[1], pop_outcomes[0])['diff']
np.testing.assert_allclose(
data["y_propensity"][data['a'] == 1].mean() - data["y_propensity"][data['a'] == 0].mean(),
effect,
atol=0.1,
)
def test_multiple_treatments_error(self):
estimator = AIPW(
self.estimator.outcome_model, self.estimator.weight_model,
overlap_weighting=True,
)
data = self.create_uninformative_tx_dataset()
data["a"].iloc[-data["a"].shape[0] // 4:] += 1 # Create a dummy third class
with self.assertRaises(AssertionError):
estimator.fit(data["X"], data["a"], data["a"])
class TestWeightedStandardization(TestDoublyRobustBase):
@classmethod
def setUpClass(cls):
TestDoublyRobustBase.setUpClass()
# Avoids regularization of the model:
ipw = IPW(LogisticRegression(C=1e6, solver='lbfgs'), use_stabilized=False)
std = Standardization(LinearRegression(normalize=True))
cls.estimator = WeightedStandardization(std, ipw)
def test_uninformative_tx_leads_to_std_like_results(self):
self.ensure_uninformative_tx_leads_to_std_like_results(self.estimator)
def test_uninformative_ox_leads_to_ipw_like_results(self):
self.ensure_uninformative_ox_leads_to_ipw_like_results(self.estimator)
def test_is_fitted(self):
self.ensure_is_fitted(self.estimator)
def test_data_is_separated_between_models(self):
self.ensure_data_is_separated_between_models(self.estimator, 1) # 1 treatment assignment feature
def test_weight_refitting_refits(self):
self.ensure_weight_refitting_refits(self.estimator)
def test_model_combinations_work(self):
self.ensure_model_combinations_work(WeightedStandardization)
def test_pipeline_learner(self):
self.ensure_pipeline_learner()
def test_many_models(self):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
data = self.create_uninformative_ox_dataset()
for propensity_learner in [GradientBoostingClassifier(n_estimators=10),
RandomForestClassifier(n_estimators=100),
MLPClassifier(hidden_layer_sizes=(5,)),
KNeighborsClassifier(n_neighbors=20)]:
weight_model = IPW(propensity_learner)
propensity_learner_name = str(propensity_learner).split("(", maxsplit=1)[0]
for outcome_learner in [GradientBoostingRegressor(n_estimators=10),
RandomForestRegressor(n_estimators=10),
RANSACRegressor(), HuberRegressor(), SVR(), LinearSVR()]:
outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
outcome_model = Standardization(outcome_learner)
with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)):
model = self.estimator.__class__(outcome_model, weight_model)
model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
self.assertTrue(True) # Fit did not crash
for outcome_learner in [MLPRegressor(hidden_layer_sizes=(5,)),
# ElasticNet(), # supports sample_weights since v0.23, remove to support v<0.23
PassiveAggressiveRegressor(), KNeighborsRegressor()]:
outcome_learner_name = str(outcome_learner).split("(", maxsplit=1)[0]
outcome_model = Standardization(outcome_learner)
with self.subTest("Test fit using {} & {}".format(propensity_learner_name, outcome_learner_name)):
model = self.estimator.__class__(outcome_model, weight_model)
with self.assertRaises(TypeError):
# Joffe forces learning with sample_weights,
# not all ML models support that and so calling should fail
model.fit(data["X"], data["a"], data["y"], refit_weight_model=False)
def test_effect_recovery(self):
self.ensure_effect_recovery()
class TestPropensityFeatureStandardization(TestDoublyRobustBase):
@classmethod
def setUpClass(cls):
TestDoublyRobustBase.setUpClass()
# Avoids regularization of the model:
ipw = IPW(LogisticRegression(C=1e6, solver='lbfgs'), use_stabilized=False)
std = Standardization(LinearRegression(normalize=True))
cls.estimator = PropensityFeatureStandardization(std, ipw)
def fit_and_predict_all_learners(self, data, estimator):
X, a, y = data["X"], data["a"], data["y"]
self.estimator.fit(X, a, y)
doubly_res = self.estimator.estimate_population_outcome(X, a)
std_res = Standardization(LinearRegression(normalize=True)).fit(X, a, y).estimate_population_outcome(X, a)
ipw_res = | |
= y*tan(dec)
if not keys:
keys = ['x','y','z']
if not len(keys) == 3:
logger.error('get_rotation: provided keylist need to have three components.')
return stream #self
logger.info('get_rotation: Determining rotation angle towards a magnetic coordinate system assuming z to be vertical down.')
ind1 = KEYLIST.index(keys[0])
ind2 = KEYLIST.index(keys[1])
ind3 = KEYLIST.index(keys[2])
if len(self.ndarray[0]) > 0:
if len(self.ndarray[ind1]) > 0 and len(self.ndarray[ind2]) > 0 and len(self.ndarray[ind3]) > 0:
# get mean disregarding nans
xl = [el for el in self.ndarray[ind1] if not np.isnan(el)]
yl = [el for el in self.ndarray[ind2] if not np.isnan(el)]
if annualmeans:
meanx = annualmeans[0]
else:
meanx = np.mean(xl)+xcompensation
meany = np.mean(yl)
# get rotation angle so that meany == 0
#print ("Rotation",meanx, meany)
#zeroy = meanx*np.sin(ra)+meany*np.cos(ra)
#-meany/meanx = np.tan(ra)
rotangle = np.arctan2(-meany,meanx) * (180.) / np.pi
logger.info('getrotation: Rotation angle determined: {} deg'.format(rotangle))
return rotangle
def get_sampling_period(self):
"""
returns the dominant sampling frequency in unit ! days !
for time savings, this function only tests the first 1000 elements
"""
# For proper applictation - duplicates are removed
self = self.removeduplicates()
if len(self.ndarray[0]) > 0:
timecol = self.ndarray[0].astype(float)
else:
timecol= self._get_column('time')
# New way:
if len(timecol) > 1:
diffs = np.asarray(timecol[1:]-timecol[:-1])
diffs = diffs[~np.isnan(diffs)]
me = np.median(diffs)
st = np.std(diffs)
diffs = [el for el in diffs if el <= me+2*st and el >= me-2*st]
return np.median(diffs)
else:
return 0.0
"""
timedifflist = [[0,0]]
timediff = 0
if len(timecol) <= 1000:
testrange = len(timecol)
else:
testrange = 1000
print "Get_sampling_rate", np.asarray(timecol[1:]-timecol[:-1])
print "Get_sampling_rate", np.median(np.asarray(timecol[1:]-timecol[:-1]))*3600.*24.
for idx, val in enumerate(timecol[:testrange]):
if idx > 1 and not isnan(val):
timediff = np.round((val-timeprev),7)
found = 0
for tel in timedifflist:
if tel[1] == timediff:
tel[0] = tel[0]+1
found = 1
if found == 0:
timedifflist.append([1,timediff])
timeprev = val
#print self
if not len(timedifflist) == 0:
timedifflist.sort(key=lambda x: int(x[0]))
# get the most often found timediff
domtd = timedifflist[-1][1]
else:
logger.error("get_sampling_period: unkown problem - returning 0")
domtd = 0
if not domtd == 0:
return domtd
else:
try:
return timedifflist[-2][1]
except:
logger.error("get_sampling_period: could not identify dominant sampling rate")
return 0
"""
def samplingrate(self, **kwargs):
"""
DEFINITION:
returns a rounded value of the sampling rate
in seconds
and updates the header information
"""
# XXX include that in the stream reading process....
digits = kwargs.get('digits')
notrounded = kwargs.get('notrounded')
if not digits:
digits = 1
if not self.length()[0] > 1:
return 0.0
sr = self.get_sampling_period()*24*3600
unit = ' sec'
val = sr
# Create a suitable rounding function:
# Use simple rounds if sr > 60 secs
# Check accuracy for sr < 10 secs (three digits:
# if abs(sr-round(sr,0)) * 1000 e.g. (1.002 -> 2, 0.998 -> 2)
if sr < 0.05:
for i in range(0,5):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,5)
break
elif sr < 59:
for i in range(0,3):
multi = 10**i
srfloor = np.floor(sr*multi)
if srfloor >= 1:
# found multiplicator
# now determine significance taking into account three more digits
digs = np.floor(np.abs(sr*multi-srfloor)*1000)
if digs<5: # round to zero
val = np.round(srfloor/multi,1)
else:
val = np.round(sr,3)
break
else:
val = np.round(sr,1)
"""
if np.round(sr*10.,0) == 0:
val = np.round(sr,2)
#unit = ' Hz'
elif np.round(sr,0) == 0:
if 0.09 < sr < 0.11:
val = np.round(sr,digits)
else:
val = np.round(sr,2)
#unit = ' Hz'
else:
val = np.round(sr,0)
"""
if notrounded:
val = sr
self.header['DataSamplingRate'] = str(val) + unit
return val
def integrate(self, **kwargs):
"""
DESCRIPTION:
Method to integrate selected columns respect to time.
-- Using scipy.integrate.cumtrapz
VARIABLES:
optional:
keys: (list - default ['x','y','z','f'] provide limited key-list
"""
logger.info('--- Integrating started at %s ' % str(datetime.now()))
keys = kwargs.get('keys')
if not keys:
keys = ['x','y','z']
array = [[] for key in KEYLIST]
ndtype = False
if len(self.ndarray[0])>0:
ndtype = True
t = self.ndarray[0]
array[0] = t
else:
t = self._get_column('time')
for key in keys:
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind]
array[ind] = np.asarray(val)
else:
val = self._get_column(key)
dval = sp.integrate.cumtrapz(np.asarray(val),t)
dval = np.insert(dval, 0, 0) # Prepend 0 to maintain original length
if ndtype:
ind = KEYLIST.index('d'+key)
array[ind] = np.asarray(dval)
else:
self._put_column(dval, 'd'+key)
self.ndarray = np.asarray(array)
logger.info('--- integration finished at %s ' % str(datetime.now()))
return self
def interpol(self, keys, **kwargs):
"""
DEFINITION:
Uses Numpy interpolate.interp1d to interpolate streams.
PARAMETERS:
Variables:
- keys: (list) List of keys to interpolate.
Kwargs:
- kind: (str) type of interpolation. Options:
linear = linear - Default
slinear = spline (first order)
quadratic = spline (second order)
cubic = spline (third order)
nearest = ?
zero = ?
(TODO: add these?)
- timerange: (timedelta object) default=timedelta(hours=1).
- fitdegree: (float) default=4.
- knotstep: (float < 0.5) determines the amount of knots:
amount = 1/knotstep ---> VERY smooth 0.1 | NOT VERY SMOOTH 0.001
RETURNS:
- func: (list) Contains the following:
list[0]: (dict) {'f+key': interpolate function}
list[1]: (float) date2num value of minimum timestamp
list[2]: (float) date2num value of maximum timestamp
EXAMPLE:
>>> int_data = pos_data.interpol(['f'])
APPLICATION:
"""
kind = kwargs.get('kind')
if not kind:
kind = 'linear'
if kind not in ['linear','slinear','quadratic','cubic','nearest','zero']:
logger.warning("interpol: Interpolation kind %s not valid. Using linear interpolation instead." % kind)
kind = 'linear'
ndtype = False
if len(self.ndarray[0]) > 0:
t = self.ndarray[0]
ndtype = True
else:
t = self._get_column('time')
nt,sv,ev = self._normalize(t)
sp = self.get_sampling_period()
functionkeylist = {}
logger.info("interpol: Interpolating stream with %s interpolation." % kind)
for key in keys:
if not key in NUMKEYLIST:
logger.error("interpol: Column key not valid!")
if ndtype:
ind = KEYLIST.index(key)
val = self.ndarray[ind].astype(float)
else:
val = self._get_column(key)
# interplolate NaN values
nans, xxx= nan_helper(val)
try: # Try to interpolate nan values
val[nans]= np.interp(xxx(nans), xxx(~nans), val[~nans])
except:
#val[nans]=int(nan)
pass
if len(val)>1:
exec('f'+key+' = interpolate.interp1d(nt, val, kind)')
exec('functionkeylist["f'+key+'"] = f'+key)
else:
logger.warning("interpol: interpolation of zero length data set - wont work.")
pass
logger.info("interpol: Interpolation complete.")
func = [functionkeylist, sv, ev]
return func
def interpolate_nans(self, keys):
""""
DEFINITION:
Provides a simple linear nan interpolator that returns the interpolated
data in the stream. Uses method that is already present elsewhere, e.g.
in filter, for easy and quick access.
PARAMETERS:
- keys: List of keys to interpolate.
RETURNS:
- stream: Original stream with nans replaced by linear interpolation.
"""
for key in keys:
if key not in NUMKEYLIST:
logger.error("interpolate_nans: {} is an invalid key! Cannot interpolate.".format(key))
y = self._get_column(key)
nans, x = nan_helper(y)
y[nans] = np.interp(x(nans), x(~nans), y[~nans])
self._put_column(y, key)
logger.info("interpolate_nans: Replaced nans in {} with linearly interpolated values.".format(key))
return self
def k_extend(self, **kwargs):
"""
DESCRIPTION:
Extending the k_scale from 9 to 28 values as used for the GFZ kp value
"""
k9_level = kwargs.get('k9_level')
if not k9_level:
if 'StationK9' in self.header:
# 1. Check header info
k9_level = self.header['StationK9']
else:
# 2. Set Potsdam default
k9_level = 500
fortscale = [0,7.5,15,30,60,105,180,300,495,750]
k_scale = [float(k9_level)*elem/750.0 for elem in fortscale]
newlst = []
klst = [0.,0.33,0.66,1.,1.33,1.66,2.,2.33,2.66,3.,3.33,3.66,4.,4.33,4.66,5.,5.33,5.66,6.,6.33,6.66,7.,7.33,7.66,8.,8.33,8.66,9.]
for idx,elem in enumerate(k_scale):
if idx > 0:
diff = elem - k_scale[idx-1]
newlst.append(elem-2*diff/3)
newlst.append(elem-diff/3)
newlst.append(elem)
indvar1 = KEYLIST.index('var1')
indvar2 = KEYLIST.index('var2')
ar = []
for elem in self.ndarray[indvar2]:
for count,val in enumerate(newlst):
if elem > val:
k = klst[count]
ar.append(k)
self.ndarray[indvar1] = np.asarray(ar)
return self
def k_fmi(self, **kwargs):
"""
DESCRIPTION:
Calculating k values following the fmi approach. The method uses three major steps:
Firstly, the record is eventually filtered to minute data, outliers are removed
(using default options) and gaps are interpolated. Ideally, these steps have been
contucted before, which allows for complete control of these steps.
Secondly, the last 27 hours are investigated. Starting from the last record, the last
three hour segment is taken and the fmi approach is applied. Finally, the provided
stream is analyzed from the beginning. Definite values are thus produced for the
previous | |
complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
SDS score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
"""
score_name = "SDS"
score_range = [1, 8]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 4)
_assert_value_range(data, score_range)
# Invert scores
data = invert(data, score_range=score_range, cols=to_idx([2, 4]))
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
def tb(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Technology Commitment Questionnaire (TB – Technologiebereitschaft)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Technology Acceptance (Technikakzeptanz – ``TechAcc``): [1, 2, 3, 4]
* Technology Competence Beliefs (Technikkompetenzüberzeugungen – ``TechComp``): [5, 6, 7, 8]
* Technology Control Beliefs (Technikkontrollüberzeugungen – ``TechControl``): [9, 10, 11, 12]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TB score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (2016). Kurzskala. Technikbereitschaft (TB)[Technology commitment].
In *ZIS-Zusammenstellung sozialwissenschaftlicher Items und Skalen (ed.)*.
"""
score_name = "TB"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 12)
subscales = {"TechAcc": [1, 2, 3, 4], "TechComp": [5, 6, 7, 8], "TechControl": [9, 10, 11, 12]}
_assert_value_range(data, score_range)
# Reverse scores 5, 6, 7, 8
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(data, subscales=subscales, idx_dict={"TechComp": [0, 1, 2, 3]}, score_range=score_range)
tb_data = _compute_questionnaire_subscales(data, score_name, subscales)
tb_data = pd.DataFrame(tb_data, index=data.index)
if len(data.columns) == 12:
# compute total score if all columns are present
tb_data[score_name] = tb_data.sum(axis=1)
return tb_data
def asku(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Allgemeine Selbstwirksamkeit Kurzskala (ASKU)**.
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
ASKU score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2014). Allgemeine Selbstwirksamkeit Kurzskala (ASKU).
In *Zusammenstellung sozialwissenschaftlicher Items und Skalen*.
"""
score_name = "ASKU"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 3)
_assert_value_range(data, score_range)
return pd.DataFrame(data.mean(axis=1), columns=[score_name])
def wpi(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Wiener Patientenzufriedenheitsinventar (WPI)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Access to Treatment (Zugang zur Behandlung – ``AccessTreatment``): [2, 3, 4, 5, 6]
* Staff Competence (Kompetenz des Personals – ``StaffCompetence``): [11, 12]
* Effectiveness of Treatment (Wirksamkeit der Behandlung – ``EffectTreatment``): [22, 23, 24]
* Station Equipment (Stationsaustattung – ``StationEquipment``): [8, 9, 10]
* Staff-Patient Relationship (Personal-Patientenbeziehung –``Relation``): [7, 15, 16, 17, 18, 19, 21]
* Information about and Influence on Disease (Information über und Einflussnahme auf Erkrankung – ``Information``):
[13, 14, 20]
* Overall Satisfaction (Insgesamte Zufriedenheit – ``OverallSatisfaction``): [1],
* Special Treatment Interventions (Spezielle Behandlungsinterventionen –``TreatmentInterventions``):
[25, 28, 29, 30, 31],
* Education about Medications (Aufklärung über Medikamente – ``Education``): [26, 27],
* Psychosocial Support Offer (Psychosoziales Unterstützungsangebot – ``Support``): [32, 33, 34, 35]
.. note::
This implementation assumes a score range of [1, 4] (for items 1-24) and [1, 5] (for items 25-35).
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
WPI score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (2018). *WPI-Wiener Patientenzufriedenheitsinventar*.
"""
score_name = "WPI"
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 35)
subscales = {
"AccessTreatment": [2, 3, 4, 5, 6],
"StaffCompetence": [11, 12],
"EffectTreatment": [22, 23, 24],
"StationEquipment": [8, 9, 10],
"Relation": [7, 15, 16, 17, 18, 19, 21],
"Information": [13, 14, 20],
"OverallSatisfaction": [1],
"TreatmentInterventions": [25, 28, 29, 30, 31],
"Education": [26, 27],
"Support": [32, 33, 34, 35],
}
score_ranges = {
"AccessTreatment": [1, 4],
"StaffCompetence": [1, 4],
"EffectTreatment": [1, 4],
"StationEquipment": [1, 4],
"Relation": [1, 4],
"Information": [1, 4],
"OverallSatisfaction": [1, 4],
"TreatmentInterventions": [1, 5],
"Education": [1, 5],
"Support": [1, 5],
}
for subscale, subscale_idx in subscales.items():
_assert_value_range(data.iloc[:, to_idx(subscale_idx)], score_ranges[subscale])
wpi_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
return pd.DataFrame(wpi_data, index=data.index)
def eval_clinic(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Evaluation of the Current Clinic Stay Questionnaire (EvalClinic)**.
.. note::
This implementation assumes a score range of [1, 6].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire | |
########################################################################
#
# Date: 2013 Authors: <NAME>, <NAME>
#
# <EMAIL>
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: <NAME> and TSRI 2013
#
#########################################################################
#
# $Header: /opt/cvs/AutoDockFR/gridScorer.py,v 1.22 2014/08/18 21:36:04 pradeep Exp $
#
# $Id: gridScorer.py,v 1.22 2014/08/18 21:36:04 pradeep Exp $
#
from AutoDockFR.trilinterp import trilinterp
import os.path
from time import time
from bhtree import bhtreelib
from math import sqrt
from MolKit.molecule import AtomSet
import numpy
class GridScorer:
"""
Scorer using affinity grids
"""
def __init__(self, mapFileList):
self.atypes = {}
self.charge = {}
self.abscharge = {}
# read autogrid maps we assume the maps exist and are name 1abf_rec.C.map
from Volume.IO.AutoGridReader import ReadAutoGrid
reader = ReadAutoGrid()
maps = {}
self.mapFileList = mapFileList
self.maxValues = {} # atomType: maxValue in grid
for mapName in mapFileList:
atype = os.path.splitext(os.path.splitext(mapName)[0])[1][1:]
print atype, mapName
maps[atype] = reader.read(mapName, 0)
self.maxValues[atype] = max(maps[atype].data.flatten())
self.maxGridVal = max(self.maxValues.values())
self.maps = maps
self.gridOrigin = ox, oy, oz = maps['e'].origin
self.spacing = sx, sy, sz = maps['e'].stepSize
nbptx, nbpty, nbptz = maps['e'].data.shape
sizeX, sizeY, sizeZ = self.boxDim = ((nbptx-1)*sx, (nbpty-1)*sy, (nbptz-1)*sz)
self.gridEnd = (ox+sizeX, oy+sizeY, oz+sizeZ)
self.inv_spacing = [1./x for x in maps['e'].stepSize]
self.nbPoints = nbptx*nbpty*nbptz
def addAtomSet(self, atoms, setName):
# find all atom types in ligand and
# build list of atom indices for each type
atypes = {}.fromkeys(atoms.autodock_element)
for k in atypes.keys():
atypes[k] = []
# put atom indices into atom type list i.e. 'c': [0, 2,5,7,8]
for i, a in enumerate(atoms):
atypes[a.autodock_element].append(i)
self.atypes[setName] = atypes
self.charge[setName] = atoms.charge
self.abscharge[setName] = [abs(x) for x in self.charge[setName]]
def getSurface(self, dockingObject):
"""
identify MSMS surface components that is surrounding the ligand
"""
import mslib
ats = dockingObject.rigidRecAtoms
radii = ats[0].top.defaultRadii()
coords = ats.coords
srf = mslib.MSMS(coords=coords, radii=radii)
# compute reduced surface for all components
srf.compute_rs(probe_radius=1.5, allComponents=1)
# find the component closest to the ligand
# 1 - get vertices for each component
comp = srf.rsr.fst
compVerts = []
allRSv = {}
while comp:
print comp.nbf
face = comp.ffa
vd = {}
while face:
a, b, c = face._s()
vd[a] = coords[a]
vd[b] = coords[b]
vd[c] = coords[c]
face = face.nxt
allRSv.update(vd)
comp = comp.nxt
compVerts.append(vd)
# find smallest distance from ligand atom to RSVertex
lig = dockingObject.ligand
ligAtomsCoords = lig.allAtoms.coords
vertInd = allRSv.keys()
vertInd.sort()
rsvCoords = []
for ind in vertInd:
rsvCoords.append(allRSv[ind])
bht = bhtreelib.BHtree( rsvCoords, None, 10)
results = numpy.zeros(5000, 'i')
dist2 = numpy.zeros(5000, 'f')
mini = 10000
minInd = None
for ligAtCoord in ligAtomsCoords:
nb = bht.closePointsDist2(tuple(ligAtCoord), 4.0, results, dist2)
for ind, d2 in zip(results[:nb], dist2[:nb]):
if d2 < mini:
mini = d2
minInd = ind
minInd = vertInd[minInd]
# find the components that contain minInd
comps = []
for i in range(len(compVerts)):
if minInd in compVerts[i].keys():
comps.append(i)
print 'closest receptor atom', minInd, allRSv[minInd], ats[minInd], ats[minInd].coords, comps
if len(comps)>1:
# use the largest one ! . this might not always be right !
maxi = 0
for c in comps:
if len(compVerts[c])> maxi:
comp = c
maxi = len(compVerts[c])
print "WARNING %d components found %s using largest one %d"%(
len(comps), comps, comp)
else:
comp = comps[0]
print 'Using component %d of the molecular surface'%comp
srf.compute_ses(component=comp)
srf.triangulate(component=comp, density=6.0)
vf, vi, f = srf.getTriangles()
verts = vf[:, :3]
normals = vf[:, 3:6]
# exclude vertices on edges of analytical surface to avoid
# singular vertices with bad normals
bhverts = []
bhnormals = []
for i in xrange(len(vf)):
if vi[i][0] >= 0:
bhverts.append(verts[i])
bhnormals.append(normals[i])
# verify that moving receptor atoms are in the cavity
if dockingObject.setting['rmsdRecRef']:
bhts = bhtreelib.BHtree( bhverts, None, 10)
Aresults = numpy.zeros(len(bhverts), 'i')
Adist2 = numpy.zeros(len(bhverts), 'f')
movingRecAtoms = dockingObject.sortedRecRefAts
#outsideAtoms = []
for atom in movingRecAtoms:
if atom.element=='H':
atom.outside = None
continue
pt = atom.coords
cut = 2.0
nb = 0
while nb==0:
nb = bhts.closePointsDist2(tuple(pt), cut, Aresults, Adist2)
if nb == 0:
cut += 1.0
closestSurfInd = Aresults[numpy.argmin(Adist2[:nb])]
clSP = bhverts[closestSurfInd]
clN = bhnormals[closestSurfInd]
# vector for surface to atom
v = [ pt[0]-clSP[0], pt[1]-clSP[1], pt[2]-clSP[2]]
# dot product of surface normal with v
dot = clN[0]*v[0] + clN[1]*v[1] + clN[2]*v[2]
if dot < 0:
# implments that only atoms more the 1.0 outside surface
# make the side chain be rejected
#n = sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])
#print 'ATOM outside cavity ', atom.full_name(), 'by', n
#if n>1.0:
# outsideAtoms.append(atom)
# tag atom as outside
atom.outside = True
else:
atom.outside= False
#import pdb
#pdb.set_trace()
from MolKit.protein import ResidueSet
notInPocket = ResidueSet([])
for res in movingRecAtoms.parent.uniq():
nba = 0
nbaout = 0
for a in movingRecAtoms:
if a.parent != res: continue
if a.element != 'H':
nba +=1 # one more atom in this residue
#print a.name
if a.outside:
#print "OUT",a.name
nbaout +=1
del a.outside
#print 'AAAA', res.name, nba, nbaout
if nbaout==nba: # all are out
notInPocket.append(res)
if len(notInPocket):
print 'Side chain atoms for residue are not in pocket', notInPocket.full_name()
raise ValueError
#if len(outsideAtoms):
# print 'Residue(s) are not in pocket, please remove from flexRec'
# for res in AtomSet(outsideAtoms).parent.uniq():
# #if len(res.atoms) == len(outsideAtoms):
# print " ", res.name
# raise ValueError
return verts, normals
def fixTranslation(self, dockingObject, fixMaps=False):
##
## limit translations to favorable grid points
##
name = dockingObject.setting['Receptor']
name = os.path.splitext(os.path.basename(name))[0]
## FIXME .. we can;t be sure ligandTree.root.motion.motionList[1]
## is the boxtranslation
# get handle to translation and boxTranslation motion objects
boxTrans = dockingObject.gnm.ligandTree.root.motion.motionList[1]
# we configure the size of the boxTrans box to match the AutoDockGrid so that
# good genes that percents of the box size will be correct
boxTrans.configure(boxDim=self.boxDim)
# get affinity map for root atom
rootAtom = dockingObject.ligand.ROOT
rootMap = self.maps[rootAtom.autodock_element].data
mapECutOff = dockingObject.setting['mapECutOff']
if fixMaps:
maps = self.maps
ox, oy, oz = self.gridOrigin
sx, sy, sz = maps['e'].stepSize
nbptx, nbpty, nbptz = maps['e'].data.shape
sizeX, sizeY, sizeZ = self.boxDim
goodPtsGene = []
goodc = []
coords = []
cut = 0.0
print 'reducing box'
###import numpy
result = numpy.zeros( (500,), 'i' )
dist2 = numpy.zeros( (500,), 'f' )
bht = dockingObject.receptorBht
# get parameters of search box to select points in AD grids that
# fall into search box adn are not too close to the protein
# search box edge lengths
# AutoGrid map parameter
sbdimx, sbdimy, sbdimz = self.boxDim
sbox, sboy, sboz = ox, oy, oz
sbex, sbey, sbez = ox+sbdimx, oy+sbdimy, oz+sbdimz
if dockingObject.setting['useXmlBox']: # only take grid points that fall inside the ligand's
# search box size
sbdimx, sbdimy, sbdimz = boxTrans.boxDim # translation box
# search box center
sbcenterx, sbcentery, sbcenterz = trans.point2
# search box origin
sbox, sboy, sboz = sbcenterx-(sbdimx*.5), sbcentery-(sbdimy*.5), sbcenterz-(sbdimz*.5)
# search box end
sbex, sbey, sbez = sbcenterx+(sbdimx*.5), sbcentery+(sbdimy*.5), sbcenterz+(sbdimz*.5)
t0 = time()
inBoxPtsCounter = 0
#import pdb
#pdb.set_trace()
mini = 0.0
maxi = 1.0
removedPtsInd = []
keptijk = []
for i in range(nbptx+1):
x = ox+i*sx
if x<sbox or x>sbex: continue
for j in range(nbpty+1):
y = oy+j*sy
if y<sboy or y>sbey: continue
for k in range(nbptz+1):
z = oz+k*sz
if z<sboz or z>sbez: continue
inBoxPtsCounter += 1
# check if grid point is far enough from closest receptor atom
nb = bht.closePointsDist2((x,y,z), 1.0, result, dist2)
if nb>500:
raise
keep = True
for aind, anum in enumerate(result[:nb]):
# #if dist2[aind] < 12.0: # grid point is too close to receptor atom
keep = False
break
if keep:
goodc.append( (x, y, z) )
keptijk.append((i,j,k))
# compute percentage for this point in AutoGrip Map
gx, gy, gz = (x-sbox)/sbdimx, (y-sboy)/sbdimy, (z-sboz)/sbdimz
assert mini-gx<0.00001 and gx-maxi<0.00001 and \
mini-gy<0.00001 and gy-maxi<0.00001 and \
mini-gz<0.00001 and gz-maxi<0.00001
goodPtsGene.append( (gx, gy, gz) )
else:
removedPtsInd.append((i,j,k))
coords.append( (x, y, z) )
verts, normals = self.getSurface(dockingObject)
srfVerticesBHT = bhtreelib.BHtree(verts, None, 10)
reallyGood = []
reallyGoodGenes = []
anchorGood = []
anchorGoodGenes = []
removed = []
keptijk2 = []
t0 = time()
for pt, gene, ijk in zip(goodc, goodPtsGene, keptijk):
cut = 2.0
nb = 0
while nb==0:
nb = srfVerticesBHT.closePointsDist2(tuple(pt), cut, result, dist2)
cut += 2.
vertInd = result[numpy.argmin(dist2[:nb])]
vx, vy, vz | |
<reponame>obulpathi/boltcoinpy
import os
import pytest
from pyethereum import tester, utils
import serpent
# customize VM log output to your needs
# hint: use 'py.test' with the '-s' option to dump logs to the console
tester.set_logging_level(2)
gasprice = 0
startgas = 10000
# Test EVM contracts
serpent_code = '''
def main(a,b):
return(a ^ b)
'''
evm_code = serpent.compile(serpent_code)
def test_evm():
s = tester.state()
c = s.evm(evm_code)
o = s.send(tester.k0, c, 0, funid=0, abi=[2, 5])
assert o == [32]
# Test serpent compilation of variables using _with_, doing a simple
# arithmetic calculation 20 * 30 + 10 = 610
sixten_code =\
'''
(with 'x 10
(with 'y 20
(with 'z 30
(seq
(set 'a (add (mul (get 'y) (get 'z)) (get 'x)))
(return (ref 'a) 32)
)
)
)
)
'''
def test_sixten():
s = tester.state()
c = '1231231231231234564564564564561231231231'
s.block.set_code(c, tester.serpent.compile_lll(sixten_code))
o1 = s.send(tester.k0, c, 0, [])
assert o1 == [610]
# Test Serpent's import mechanism
mul2_code = \
'''
def double(v):
log(v)
return(v*2)
'''
filename = "mul2_qwertyuioplkjhgfdsa.se"
returnten_code = \
'''
extern mul2: [double]
x = create("%s")
return(x.double(5))
''' % filename
def test_returnten():
s = tester.state()
open(filename, 'w').write(mul2_code)
c = s.contract(returnten_code)
o1 = s.send(tester.k0, c, 0, [])
os.remove(filename)
assert o1 == [10]
# Test a simple namecoin implementation
namecoin_code =\
'''
def main(k, v):
if !self.storage[k]:
self.storage[k] = v
return(1)
else:
return(0)
'''
def test_namecoin():
s = tester.state()
c = s.contract(namecoin_code)
o1 = s.send(tester.k0, c, 0, funid=0, abi=['"george"', 45])
assert o1 == [1]
o2 = s.send(tester.k0, c, 0, funid=0, abi=['"george"', 20])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, funid=0, abi=['"harry"', 60])
assert o3 == [1]
assert s.block.to_dict()
# Test a simple currency implementation
currency_code = '''
data balances[2^160]
def init():
self.balances[msg.sender] = 1000
def query(addr):
return(self.balances[addr])
def send(to, value):
from = msg.sender
fromvalue = self.balances[from]
if fromvalue >= value:
self.balances[from] = fromvalue - value
self.balances[to] = self.balances[to] + value
log(from, to, value)
return(1)
else:
return(0)
'''
def test_currency():
s = tester.state()
c = s.contract(currency_code, sender=tester.k0)
o1 = s.send(tester.k0, c, 0, funid=1, abi=[tester.a2, 200])
assert o1 == [1]
o2 = s.send(tester.k0, c, 0, funid=1, abi=[tester.a2, 900])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, funid=0, abi=[tester.a0])
assert o3 == [800]
o4 = s.send(tester.k0, c, 0, funid=0, abi=[tester.a2])
assert o4 == [200]
# Test a data feed
data_feed_code = '''
data creator
data values[]
def init():
self.creator = msg.sender
def set(k, v):
if msg.sender == self.creator:
self.values[k] = v
return(1)
else:
return(0)
def get(k):
return(self.values[k])
'''
def test_data_feeds():
s = tester.state()
c = s.contract(data_feed_code, sender=tester.k0)
o2 = s.send(tester.k0, c, 0, funid=1, abi=[500])
assert o2 == [0]
o3 = s.send(tester.k0, c, 0, funid=0, abi=[500, 19])
assert o3 == [1]
o4 = s.send(tester.k0, c, 0, funid=1, abi=[500])
assert o4 == [19]
o5 = s.send(tester.k1, c, 0, funid=0, abi=[500, 726])
assert o5 == [0]
o6 = s.send(tester.k0, c, 0, funid=0, abi=[500, 726])
assert o6 == [1]
return s, c
# Test an example hedging contract, using the data feed. This tests
# contracts calling other contracts
hedge_code = '''
extern datafeed: [set, get]
data partyone
data partytwo
data hedgeValue
data datafeed
data index
data fiatValue
data maturity
def main(datafeed, index):
if !self.partyone:
self.partyone = msg.sender
self.hedgeValue = msg.value
self.datafeed = datafeed
self.index = index
return(1)
elif !self.partytwo:
ethvalue = self.hedgeValue
if msg.value >= ethvalue:
self.partytwo = msg.sender
c = self.datafeed.get(self.index)
othervalue = ethvalue * c
self.fiatValue = othervalue
self.maturity = block.timestamp + 500
return([2, othervalue]:a)
else:
othervalue = self.fiatValue
ethvalue = othervalue / self.datafeed.get(self.index)
if ethvalue >= self.balance:
send(self.partyone, self.balance)
return(3)
elif block.timestamp > self.maturity:
send(self.partytwo, self.balance - ethvalue)
send(self.partyone, ethvalue)
return(4)
else:
return(5)
'''
def test_hedge():
s, c = test_data_feeds()
c2 = s.contract(hedge_code, sender=tester.k0)
# Have the first party register, sending 10^16 wei and
# asking for a hedge using currency code 500
o1 = s.send(tester.k0, c2, 10**16, funid=0, abi=[c, 500])
assert o1 == [1]
# Have the second party register. It should receive the
# amount of units of the second currency that it is
# entitled to. Note that from the previous test this is
# set to 726
o2 = s.send(tester.k2, c2, 10**16)
assert o2 == [2, 7260000000000000000]
snapshot = s.snapshot()
# Set the price of the asset down to 300 wei
o3 = s.send(tester.k0, c, 0, funid=0, abi=[500, 300])
assert o3 == [1]
# Finalize the contract. Expect code 3, meaning a margin call
o4 = s.send(tester.k0, c2, 0)
assert o4 == [3]
s.revert(snapshot)
# Don't change the price. Finalize, and expect code 5, meaning
# the time has not expired yet
o5 = s.send(tester.k0, c2, 0)
assert o5 == [5]
s.mine(100, tester.a3)
# Mine ten blocks, and try. Expect code 4, meaning a normal execution
# where both get their share
o6 = s.send(tester.k0, c2, 0)
assert o6 == [4]
# Test the LIFO nature of call
arither_code = '''
def init():
self.storage[0] = 10
def f1():
self.storage[0] += 1
def f2():
self.storage[0] *= 10
self.f1()
self.storage[0] *= 10
def f3():
return(self.storage[0])
'''
def test_lifo():
s = tester.state()
c = s.contract(arither_code)
s.send(tester.k0, c, 0, funid=1, abi=[])
o2 = s.send(tester.k0, c, 0, funid=2, abi=[])
assert o2 == [1010]
# Test suicides and suicide reverts
suicider_code = '''
def mainloop(rounds):
self.storage[15] = 40
self.suicide()
i = 0
while i < rounds:
i += 1
def entry(rounds):
self.storage[15] = 20
self.mainloop(rounds, gas=tx.gas - 100)
def ping_ten():
return(10)
def suicide():
suicide(0)
def ping_storage15():
return(self.storage[15])
'''
def test_suicider():
s = tester.state()
c = s.contract(suicider_code)
prev_gas_limit = tester.gas_limit
tester.gas_limit = 8000
# Run normally: suicide processes, so the attempt to ping the
# contract fails
s.send(tester.k0, c, 0, funid=0, abi=[1, 10])
o2 = s.send(tester.k0, c, 0, funid=0, abi=[2])
assert o2 == []
c = s.contract(suicider_code)
# Run the suicider in such a way that it suicides in a sub-call,
# then runs out of gas, leading to a revert of the suicide and the
# storage mutation
s.send(tester.k0, c, 0, funid=1, abi=[8000])
# Check that the suicide got reverted
o2 = s.send(tester.k0, c, 0, funid=2, abi=[])
assert o2 == [10]
# Check that the storage op got reverted
o3 = s.send(tester.k0, c, 0, funid=4, abi=[])
assert o3 == [20]
tester.gas_limit = prev_gas_limit
# Test reverts
reverter_code = '''
def entry():
self.non_recurse(gas=1000)
self.recurse(gas=1000)
def non_recurse():
send(7, 9)
self.storage[8080] = 4040
self.storage[160160] = 2020
def recurse():
send(8, 9)
self.storage[8081] = 4039
self.storage[160161] = 2019
self.recurse()
self.storage["waste_some_gas"] = 0
'''
def test_reverter():
s = tester.state()
c = s.contract(reverter_code, endowment=10**15)
s.send(tester.k0, c, 0, funid=0, abi=[0])
assert s.block.get_storage_data(c, 8080) == 4040
assert s.block.get_balance('0'*39+'7') == 9
assert s.block.get_storage_data(c, 8081) == 0
assert s.block.get_balance('0'*39+'8') == 0
# Test stateless contracts
add1_code = \
'''
def main(x):
self.storage[1] += x
'''
filename2 = "stateless_qwertyuioplkjhgfdsa.se"
callcode_test_code = \
'''
extern add1: [main]
x = create("%s")
x.main(6)
x.main(4, call=code)
x.main(60, call=code)
x.main(40)
return(self.storage[1])
''' % filename2
def test_callcode():
s = tester.state()
open(filename2, 'w').write(add1_code)
c = s.contract(callcode_test_code)
o1 = s.send(tester.k0, c, 0)
os.remove(filename2)
assert o1 == [64]
# https://github.com/ethereum/serpent/issues/8
array_code = '''
a = array(1)
a[0] = 1
return(a, 1)
'''
def test_array():
s = tester.state()
c = s.contract(array_code)
assert [1] == s.send(tester.k0, c, 0, [])
array_code2 = '''
a = array(1)
something = 2
a[0] = 1
return(a, 1)
'''
def test_array2():
s = tester.state()
c = s.contract(array_code2)
assert [1] == s.send(tester.k0, c, 0, [])
array_code3 = """
a = array(3)
return(a, 3)
"""
def test_array3():
s = tester.state()
c = s.contract(array_code3)
assert [0, 0, 0] == s.send(tester.k0, c, 0, [])
calltest_code = """
def main():
self.first(1, 2, 3, 4, 5)
self.second(2, 3, 4, 5, 6)
self.third(3, 4, 5, 6, 7)
def first(a, b, c, d, e):
self.storage[1] = a * 10000 + b * 1000 + c * 100 + d * 10 + e
def second(a, b, c, d, e):
self.storage[2] = a * 10000 + b * 1000 + c * 100 + d * 10 + e
def third(a, b, c, d, e):
self.storage[3] = a * 10000 + b * 1000 + c * 100 + d * 10 + e
def get(k):
return(self.storage[k])
"""
def test_calls():
s = tester.state()
c = s.contract(calltest_code)
s.send(tester.k0, c, 0, funid=0, abi=[])
assert [12345] == s.send(tester.k0, c, 0, funid=4, abi=[1])
assert [23456] == s.send(tester.k0, c, 0, funid=4, abi=[2])
assert [34567] == s.send(tester.k0, c, 0, funid=4, abi=[3])
s.send(tester.k0, c, 0, funid=1, abi=[4, 5, 6, 7, 8])
assert [45678] == s.send(tester.k0, c, 0, funid=4, abi=[1])
s.send(tester.k0, c, 0, funid=2, abi=[5, 6, 7, 8, 9])
assert [56789] == s.send(tester.k0, c, 0, funid=4, abi=[2])
storage_object_test_code = """
extern moo: [ping, query_chessboard, query_stats, query_items, query_person, testping, testping2]
data chessboard[8][8]
data users[100](health, x, y, items[5])
data person(head, arms[2](elbow, fingers[5]), legs[2])
def ping():
self.chessboard[0][0] = 1
self.chessboard[0][1] = 2
| |
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
def channel_standardization(image):
'''
Stanadrdization of image channel wise => Standard score
Parameters:
image: Input image
Return:
Standardized image, s.t. (pixel_value -)
'''
mean_val = np.mean(image, axis=-1)
std_dev_val = np.std(image, axis=-1)
output = (image - np.expand_dims(mean_val, axis=-1)) / (np.expand_dims(std_dev_val, axis=-1))
# some val for std.dev = 0
cast = np.nan_to_num(output)
return cast
def concat_recursive(a, b, max_count, count=0):
'''
Recursively concatenate the image stacks with the next image stacks
@param a: Top first image stacks
@param b: Following image stacks
'''
while count < max_count - 1:
c = np.concatenate((a, b), axis=0)
a = c
count += 1
concat_recursive(a, b, max_count, count)
return a
# ------------------------------------------------------functions for tf_records----------------------------
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
# If the value is an eager tensor BytesList won't unpack a string from an EagerTensor.
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def serialize_example(image):
'''
Adding image and label info to TFRecords dataset
'''
feature = {
'image': _bytes_feature(image)
}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def write_tfrecords(tfrecord_dir, image_paths):
'''
write TFRecords to appointed directory
'''
with tf.io.TFRecordWriter(tfrecord_dir) as writer:
for image in image_paths:
img_bytes = tf.io.serialize_tensor(image)
example = serialize_example(img_bytes)
writer.write(example)
def read_tfrecord(serialized_example):
'''
read TFRecords from appointed directory
'''
feature_description = {
'image': tf.io.FixedLenFeature((), tf.string)
}
example = tf.io.parse_single_example(serialized_example, feature_description)
image = tf.io.parse_tensor(example['image'], out_type=float)
return image
def parse_tfrecord(tf_dir):
tfrecord_dataset = tf.data.TFRecordDataset(tf_dir)
parsed_dataset = tfrecord_dataset.map(read_tfrecord)
return parsed_dataset
# --------------------------------------------------------------------------------------------------------
def std_norm(slice):
"""
Removes 1% of the top and bottom intensities and perform
normalization on the input 2D slice.
"""
b = np.percentile(slice, 99)
t = np.percentile(slice, 1)
slice = np.clip(slice, t, b)
if np.std(slice) == 0:
return slice
else:
slice = (slice - np.mean(slice)) / np.std(slice)
return slice
def min_max_norm(images):
"""
Min max normalization of images
Parameters:
images: Input stacked image list
Return:
Image list after min max normalization
"""
m = np.max(images)
mi = np.min(images)
if m==mi:
images = np.zeros_like(images)
else:
images = (images - mi) / (m - mi)
return images
def normalize_modalities(Slice, mode=None):
"""
Performs normalization on each modalities of input
"""
assert mode != None, "Please in put [mode] type! 'std' for standard normalization, 'minmax' for minmax normalization"
normalized_slices = np.zeros_like(Slice).astype(np.float32)
for slice_ix in range(4):
if mode == 'std':
normalized_slices[..., slice_ix] = std_norm(Slice[..., slice_ix])
if mode == 'minmax':
normalized_slices[..., slice_ix] = min_max_norm(Slice[..., slice_ix])
return normalized_slices
def dicesq(y_true, y_pred, smooth=1e-5):
'''
Modified dice coefficient as refer to: https://arxiv.org/abs/1606.04797
:param y_true: Ground truth
:param y_pred: Prediction from the model
:return: Modified dice coefficient
'''
nmr = 2 * tf.reduce_sum(y_true * y_pred)
dnmr = tf.reduce_sum(y_true ** 2) + tf.reduce_sum(y_pred ** 2) + smooth
return (nmr / dnmr)
def dicesq_loss(y_true, y_pred):
'''
Modified dice coefficient loss
:param y_true: Ground truth
:param y_pred: Prediction from the model
'''
return 1 - dicesq(y_true, y_pred)
def dice_coef(y_true, y_pred, smooth=1e-5):
'''
Dice coefficient for tensorflow
:param y_true: Ground truth
:param y_pred: Prediction from the model
:return: dice coefficient
'''
# if input is not flatten
if (tf.rank(y_true) != 1 and tf.rank(y_pred) != 1):
y_true = tf.reshape(y_true, [-1]) # flatten
y_pred = tf.reshape(y_pred, [-1]) # flatten
# casting for label from int32 to float32 for computation
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
intersection = tf.reduce_sum(y_true * y_pred)
# if intersection==0:
# return 0.0
# else:
dc = (2.0 * intersection + smooth) / \
(tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + smooth)
return dc.numpy()
def dice_coef_loss(y_true, y_pred):
'''
Dice coefficient loss for IOU
'''
return 1 - dice_coef(y_true, y_pred)
def dice_coef_bool(y_true, y_pred):
'''
Dice coefficient for tensorflow (boolean version)
* None differiantiable!
:param y_true: Ground truth
:param y_pred: Prediction from the model
:return: dice coefficient
'''
if (tf.rank(y_true) != 1 and tf.rank(y_pred) != 1):
y_true = tf.reshape(y_true, [-1]) # flatten
y_pred = tf.reshape(y_pred, [-1]) # flatten
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
intersection = tf.math.count_nonzero(y_true & y_pred)
size_i1 = tf.math.count_nonzero(y_true)
size_i2 = tf.math.count_nonzero(y_pred)
summation = size_i1 + size_i2
if summation != 0:
dc = (2.0 * tf.cast(intersection, tf.float32) / tf.cast(summation, tf.float32)).numpy()
else:
dc = 1.0
return dc
def ss_metric(y_true, y_pred, label_type='binary', mode='global', smooth=1e-5):
'''
Compute sensitivity and specificity for groundtruth and prediction
:param y_true: Ground truth
:param y_pred: Prediction from the model
:label_type: 'binary': input labels is binarized
'multi': mutli class labels
:mode: 'local' compute the sensitivity label wise
'global' compute the sensitivity overall
:return: sensitivity & specificity
'''
# if input is not flatten
if (tf.rank(y_true) != 1 and tf.rank(y_pred) != 1):
y_true = tf.reshape(y_true, [-1]) # flatten
y_pred = tf.reshape(y_pred, [-1]) # flatten
# label types
if label_type == 'binary':
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=[0, 1]).ravel()
sensitivity = (tp + smooth) / (tp + fn + smooth)
specificity = (tn + smooth) / (tn + fp + smooth)
if label_type == 'multi':
cm = confusion_matrix(y_true, y_pred, labels=[0, 1, 2, 3])
# true positive rate
if mode == 'global':
tp = np.trace(cm)
tp_fn = np.sum(cm)
else: # local
tp = np.diag(cm)
tp_fn = np.sum(cm, 1)
sensitivity = (tp + smooth) / (tp_fn + smooth)
# true negative rate
diag = np.diag(cm)
tn = []
for i in range(len(cm)):
negs = np.sum([neg for neg in diag if neg != diag[i]])
tn.append(negs)
cm_copy = cm
# make diagonal 0
for i in range(len(cm)):
for j in range(len(cm)):
if i == j:
cm_copy[i, j] = 0
if mode == 'global':
tn = np.sum(tn)
fp = np.sum(cm_copy)
else: # local
tn = np.array(tn)
fp = np.sum(cm_copy, 0)
specificity = (tn + smooth) / (tn + fp + smooth)
return sensitivity, specificity
def compute_metric(y_true, y_pred, label_type='binary'):
'''
This function compute the metrics specify by BraTS competition
which is dice coefficient, sensitivity, specificity
:param y_true: Ground truth image
:param y_pred: Prediction image from the model
:param label_type: 'binary': input labels is binarized
'multi': mutli class labels
:return: dice coefficient, sensitivity & specificity list
with order ['core', 'enhancing', 'complete']
'''
y_list = [y_true, y_pred]
tumours = ['core', 'enhancing', 'complete']
dc_output = []
sens_output = []
spec_output = []
# compute dice coefficient for each tumour type
for tumour_type in tumours:
if label_type == 'multi':
# label 1, 3(4)
if tumour_type == 'core':
y_true, y_pred = [np.where(((lbl == 1) | (lbl == 3)), lbl, 0) for lbl in y_list]
# label 3(4)
if tumour_type == 'enhancing':
y_true, y_pred = [np.where(lbl == 3, lbl, 0) for lbl in y_list]
# label 1,2,3,
if tumour_type == 'complete':
y_true, y_pred = [np.where(lbl > 0, lbl, 0) for lbl in y_list]
if label_type == 'binary':
# label 1, 3(4) =>1
if tumour_type == 'core':
y_true, y_pred = [np.where(((lbl == 1) | (lbl == 3)), 1, 0) for lbl in y_list]
# label 3(4) =>1
if tumour_type == 'enhancing':
y_true, y_pred = [np.where(lbl == 3, 1, 0) for lbl in y_list]
# label 1,2,3 =>1
if tumour_type == 'complete':
y_true, y_pred = [np.where(lbl > 0, 1, 0) for lbl in y_list]
dc_list = []
sens_list = []
spec_list = []
# only single images [240, 240]
if y_true.ndim == 2:
dc = dice_coef_bool(y_true, y_pred)
sensitivity, specificity = ss_metric(y_true, y_pred)
# append for each tumour type
dc_output.append(dc)
sens_output.append(sensitivity)
spec_output.append(specificity)
# batched images [?,240,240]
else:
for idx in range(len(y_true)):
y_true_f = tf.reshape(y_true[idx], [-1]) # flatten
y_pred_f = tf.reshape(y_pred[idx], [-1]) # flatten
dc = dice_coef_bool(y_true_f, y_pred_f)
sensitivity, specificity = ss_metric(y_true_f, y_pred_f)
# store values
dc_list.append(dc)
sens_list.append(sensitivity)
spec_list.append(specificity)
# output [BATCH_SIZE, tumours_type]
# taking the mean along the batch axis
mean_ = lambda x: np.mean(x)
dc_batch_mean = mean_(dc_list)
sens_batch_mean = mean_(sens_list)
spec_batch_mean = mean_(spec_list)
# append for each tumour type
dc_output.append(dc_batch_mean)
sens_output.append(sens_batch_mean)
spec_output.append(spec_batch_mean)
# for each list the order is as following=> 'core','enhancing','complete'
return dc_output, sens_output, spec_output
def compute_metric_dc(y_true, y_pred):
"""
This function compute the dice coefficient metrics specify by BraTS competition
:param y_true: Ground truth image
:param y_pred: Prediction image from the model
| |
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
from collections import OrderedDict
import numpy
from titus.producer.transformation import Transformation
import titus.prettypfa
### metrics are nested objects like Euclidean(AbsDiff()), and they may
### be user-defined. They mirror the metrics and similarity functions
### available in PFA.
### interfaces
def _NotImplementedError():
raise NotImplementedError
class Similarity(object):
"""Trait for similarity functions in Numpy and PFA (compare two scalars, return a non-negative number)."""
def __init__(self):
self.calculate = lambda dataset, cluster: _NotImplementedError()
def pfa(self):
raise NotImplementedError
class Metric(object):
"""Trait for metric functions in Numpy and PFA (compare two vectors, return a non-negative number)."""
def __init__(self):
self.calculate = lambda dataset, cluster: _NotImplementedError()
def pfa(self):
raise NotImplementedError
### similarity
class AbsDiff(Similarity):
"""Absolute difference similarity function for Numpy and PFA."""
def __init__(self):
self.calculate = lambda dataset, cluster: numpy.absolute(dataset - cluster)
def pfa(self):
return {"fcn": "metric.absDiff"}
class GaussianSimilarity(Similarity):
"""Gaussian similarity function for Numpy and PFA."""
def __init__(self, sigma):
self.calculate = lambda dataset, cluster: numpy.exp(-numpy.log(2) * numpy.square(dataset - cluster) / sigma**2)
self.sigma = sigma
def pfa(self):
x = "similarityX"
y = "similarityY"
return {"params": [{x: "double"}, {y: "double"}],
"ret": "double",
"do": {"metric.gaussianSimilarity": [x, y, self.sigma]}}
### metrics
class Euclidean(Metric):
"""Euclidean metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sqrt(numpy.sum(numpy.square(similarity.calculate(dataset, cluster)), axis=1))
self.similarity = similarity
def pfa(self, x, y):
return {"metric.euclidean": [self.similarity.pfa(), x, y]}
class SquaredEuclidean(Metric):
"""Squared euclidean metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sum(numpy.square(similarity.calculate(dataset, cluster)), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.squaredEuclidean": [self.similarity.pfa(), x, y]}
class Chebyshev(Metric):
"""Chebyshev (maximum) metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.max(similarity.calculate(dataset, cluster), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.chebyshev": [self.similarity.pfa(), x, y]}
class Taxicab(Metric):
"""Taxicab (sum) metric for Numpy and PFA."""
def __init__(self, similarity):
self.calculate = lambda dataset, cluster: numpy.sum(similarity.calculate(dataset, cluster), axis=1)
self.similarity = similarity
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.taxicab": [self.similarity.pfa(), x, y]}
class Minkowski(Metric):
"""Minkowski metric for Numpy and PFA."""
def __init__(self, similarity, p):
self.calculate = lambda dataset, cluster: numpy.pow(numpy.sum(numpy.pow(similarity.calculate(dataset, cluster), p), axis=1), 1.0/p)
self.similarity = similarity
self.p = p
def pfa(self, x, y):
x = "metricX"
y = "metricY"
return {"metric.minkowski": [self.similarity.pfa(), x, y, self.p]}
### stopping conditions are functions that take iterationNumber (int),
# corrections (Python list of Numpy arrays), datasetSize (int) and
# return bool (continue iterating if True)
### they may be user-defined or constructed from these functions like
# whileall(printChange("6.4f"), halfChange(0.001), clusterJumped()) to
# print iteration data, stop when at least half change by less than
# 0.001, and keep going if one jumped
def printValue(format="g"):
"""Generates a "stopping condition" that prints the current value and never stops.
:type format: string
:param format: format string ("g" is general number, "8.3f" is 8-characters wide, 3-digits after the decimal floating point, etc.)
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
state = {"ready": False}
def out(iterationNumber, corrections, values, datasetSize):
if not state["ready"]:
state["j"] = "{:5s} (jump)"
for v in values:
if v is not None:
state["n"] = "{0:5s}" + "".join((" {%d:%s}" % (i + 1, format)) for i in xrange(len(v)))
break
if "n" in state:
state["ready"] = True
print "iter values"
print "----------------------------------"
for index, v in enumerate(values):
if index == 0:
it = repr(iterationNumber)
else:
it = ""
if v is None:
print state["j"].format(it)
else:
print state["n"].format(it, *v)
return True
return out
def printChange(format="g"):
"""Generates a "stopping condition" that prints changes in values and never stops.
:type format: string
:param format: format string ("g" is general number, "8.3f" is 8-characters wide, 3-digits after the decimal floating point, etc.)
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
state = {"ready": False}
def out(iterationNumber, corrections, values, datasetSize):
if not state["ready"]:
state["j"] = "{:5s} (jump)"
for corr in corrections:
if corr is not None:
state["n"] = "{0:5s}" + "".join((" {%d:%s}" % (i + 1, format)) for i in xrange(len(corr)))
break
if "n" in state:
state["ready"] = True
print "iter changes"
print "----------------------------------"
for index, corr in enumerate(corrections):
if index == 0:
it = repr(iterationNumber)
else:
it = ""
if corr is None:
print state["j"].format(it)
else:
print state["n"].format(it, *corr)
return True
return out
def clusterJumped():
"""Generates a stopping condition that stops if no clusters jumped (reset to a random point because of encounting nan).
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: all(x is not None for x in corrections)
def maxIterations(number):
"""Generates a stopping condition that stops after a given number of iterations have passed.
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: iterationNumber < number
def allChange(threshold):
"""Generates a stopping condition that stops if all cluster changes are less than a threshold.
:type threshold: number
:param threshold: maximum change allowed for all clusters
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: not all((numpy.absolute(x) < threshold).all() for x in corrections if x is not None)
def halfChange(threshold):
"""Generates a stopping condition that stops if half of the cluster changes are less than a threshold.
:type threshold: number
:param threshold: maximum change allowed for half of the clusters
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda iterationNumber, corrections, values, datasetSize: numpy.sum([(numpy.absolute(x) < threshold).all() for x in corrections if x is not None], dtype=numpy.dtype(float)) / numpy.sum([x is not None for x in corrections], dtype=numpy.dtype(float)) < 0.5
def whileall(*conditions):
"""Generates a stopping condition that continues while all of its subconditions continue.
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda *args: all(x(*args) for x in conditions)
def whileany(*conditions):
"""Generates a stopping condition that continues while any of its subconditions continue.
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return lambda *args: any(x(*args) for x in conditions)
def moving():
"""Generates a stopping condition that stops when all clusters change less than 1e-15 and none are jumping (reset to a random point because of encounting nan).
:type conditions: stopping condition functions
:param conditions: subconditions
:rtype: callable that takes iterationNumber, corrections, values, datasetSize as arguments
:return: stopping condition function
"""
return whileall(clusterJumped(), allChange(1e-15))
### the KMeans class
class KMeans(object):
"""Represents a k-means optimization by storing a dataset and performing all operations *in-place*.
Usually, you would construct the object, possibly stepup, then optimize and export to pfaDocument.
"""
def __init__(self, numberOfClusters, dataset, weights=None, metric=Euclidean(AbsDiff()), minPointsInCluster=None, maxPointsForClustering=None):
"""Construct a KMeans object, initializing cluster centers to unique, random points from the dataset.
:type numberOfClusters: positive integer
:param numberOfClusters: number of clusters (the "k" in k-means)
:type dataset: 2-d Numpy array
:param dataset: dataset to cluster; ``dataset.shape[0]`` is the number of records (rows), ``dataset.shape[1]`` is the number of dimensions for each point (columns)
:type weights: 1-d Numpy array or ``None``
:param weights: how much to weight each point in the ``dataset``: must have shape equal to ``(dataset.shape[0],)``; ``0`` means ignore the dataset, ``1`` means normal weight; | |
``_CompoundClause`` has only one
item, and that item matches the given item.
"""
if not isinstance(other, _CompoundClause):
if len(self.clauses) == 1:
return self.clauses[0].compare(other)
if ClauseList.compare(self, other):
return self.operator == other.operator
else:
return False
class _CalculatedClause(ClauseList, ColumnElement):
"""Describe a calculated SQL expression that has a type, like ``CASE``.
Extends ``ColumnElement`` to provide column-level comparison
operators.
"""
def __init__(self, name, *clauses, **kwargs):
self.name = name
self.type = sqltypes.to_instance(kwargs.get('type', None))
self._engine = kwargs.get('engine', None)
ClauseList.__init__(self, *clauses)
key = property(lambda self:self.name or "_calc_")
def copy_container(self):
clauses = [clause.copy_container() for clause in self.clauses]
return _CalculatedClause(type=self.type, engine=self._engine, *clauses)
def get_children(self, **kwargs):
return self.clauses
def accept_visitor(self, visitor):
visitor.visit_calculatedclause(self)
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, type=self.type, unique=True)
def select(self):
return select([self])
def scalar(self):
return select([self]).scalar()
def execute(self):
return select([self]).execute()
def _compare_type(self, obj):
return self.type
class _Function(_CalculatedClause, FromClause):
"""Describe a SQL function.
Extends ``_CalculatedClause``, turn the *clauselist* into function
arguments, also adds a `packagenames` argument.
"""
def __init__(self, name, *clauses, **kwargs):
self.name = name
self.type = sqltypes.to_instance(kwargs.get('type', None))
self.packagenames = kwargs.get('packagenames', None) or []
self._engine = kwargs.get('engine', None)
ClauseList.__init__(self, parens=True, *clauses)
key = property(lambda self:self.name)
def append(self, clause):
if _is_literal(clause):
if clause is None:
clause = null()
else:
clause = _BindParamClause(self.name, clause, shortname=self.name, type=None, unique=True)
self.clauses.append(clause)
def copy_container(self):
clauses = [clause.copy_container() for clause in self.clauses]
return _Function(self.name, type=self.type, packagenames=self.packagenames, engine=self._engine, *clauses)
def get_children(self, **kwargs):
return self.clauses
def accept_visitor(self, visitor):
visitor.visit_function(self)
class _Cast(ColumnElement):
def __init__(self, clause, totype, **kwargs):
if not hasattr(clause, 'label'):
clause = literal(clause)
self.type = sqltypes.to_instance(totype)
self.clause = clause
self.typeclause = _TypeClause(self.type)
def get_children(self, **kwargs):
return self.clause, self.typeclause
def accept_visitor(self, visitor):
visitor.visit_cast(self)
def _get_from_objects(self):
return self.clause._get_from_objects()
def _make_proxy(self, selectable, name=None):
if name is not None:
co = _ColumnClause(name, selectable, type=self.type)
co.orig_set = self.orig_set
selectable.columns[name]= co
return co
else:
return self
class _FunctionGenerator(object):
"""Generate ``_Function`` objects based on getattr calls."""
def __init__(self, engine=None):
self.__engine = engine
self.__names = []
def __getattr__(self, name):
self.__names.append(name)
return self
def __call__(self, *c, **kwargs):
kwargs.setdefault('engine', self.__engine)
return _Function(self.__names[-1], packagenames=self.__names[0:-1], *c, **kwargs)
class _BinaryClause(ClauseElement):
"""Represent two clauses with an operator in between."""
def __init__(self, left, right, operator, type=None):
self.left = left
self.right = right
self.operator = operator
self.type = sqltypes.to_instance(type)
self.parens = False
if isinstance(self.left, _BinaryClause) or hasattr(self.left, '_selectable'):
self.left.parens = True
if isinstance(self.right, _BinaryClause) or hasattr(self.right, '_selectable'):
self.right.parens = True
def copy_container(self):
return self.__class__(self.left.copy_container(), self.right.copy_container(), self.operator)
def _get_from_objects(self):
return self.left._get_from_objects() + self.right._get_from_objects()
def get_children(self, **kwargs):
return self.left, self.right
def accept_visitor(self, visitor):
visitor.visit_binary(self)
def swap(self):
c = self.left
self.left = self.right
self.right = c
def compare(self, other):
"""compares this _BinaryClause against the given _BinaryClause."""
return (
isinstance(other, _BinaryClause) and self.operator == other.operator and
self.left.compare(other.left) and self.right.compare(other.right)
)
class _BinaryExpression(_BinaryClause, ColumnElement):
"""represents a binary expression, which can be in a WHERE criterion or in the column list
of a SELECT. By adding "ColumnElement" to its inherited list, it becomes a Selectable
unit which can be placed in the column list of a SELECT."""
pass
class _BooleanExpression(_BinaryExpression):
"""represents a boolean expression."""
def __init__(self, *args, **kwargs):
self.negate = kwargs.pop('negate', None)
super(_BooleanExpression, self).__init__(*args, **kwargs)
def _negate(self):
if self.negate is not None:
return _BooleanExpression(self.left, self.right, self.negate, negate=self.operator, type=self.type)
else:
return super(_BooleanExpression, self)._negate()
class _Exists(_BooleanExpression):
def __init__(self, *args, **kwargs):
kwargs['correlate'] = True
s = select(*args, **kwargs)
_BooleanExpression.__init__(self, _TextClause("EXISTS"), s, None)
def _hide_froms(self):
return self._get_from_objects()
class Join(FromClause):
def __init__(self, left, right, onclause=None, isouter = False):
self.left = left._selectable()
self.right = right._selectable()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
name = property(lambda s: "Join object on " + s.left.name + " " + s.right.name)
def _locate_oid_column(self):
return self.left.oid_column
def _exportable_columns(self):
return [c for c in self.left.columns] + [c for c in self.right.columns]
def _proxy_column(self, column):
self._columns[column._label] = column
if column.primary_key:
self._primary_key.add(column)
for f in column.foreign_keys:
self._foreign_keys.add(f)
return column
def _match_primaries(self, primary, secondary):
crit = []
constraints = util.Set()
for fk in secondary.foreign_keys:
if fk.references(primary):
crit.append(primary.corresponding_column(fk.column) == fk.parent)
constraints.add(fk.constraint)
self.foreignkey = fk.parent
if primary is not secondary:
for fk in primary.foreign_keys:
if fk.references(secondary):
crit.append(secondary.corresponding_column(fk.column) == fk.parent)
constraints.add(fk.constraint)
self.foreignkey = fk.parent
if len(crit) == 0:
raise exceptions.ArgumentError("Cant find any foreign key relationships between '%s' and '%s'" % (primary.name, secondary.name))
elif len(constraints) > 1:
raise exceptions.ArgumentError("Cant determine join between '%s' and '%s'; tables have more than one foreign key constraint relationship between them. Please specify the 'onclause' of this join explicitly." % (primary.name, secondary.name))
elif len(crit) == 1:
return (crit[0])
else:
return and_(*crit)
def _group_parenthesized(self):
return True
def _get_folded_equivalents(self, equivs=None):
if equivs is None:
equivs = util.Set()
class LocateEquivs(NoColumnVisitor):
def visit_binary(self, binary):
if binary.operator == '=' and binary.left.name == binary.right.name:
equivs.add(binary.right)
equivs.add(binary.left)
LocateEquivs().traverse(self.onclause)
collist = []
if isinstance(self.left, Join):
left = self.left._get_folded_equivalents(equivs)
else:
left = list(self.left.columns)
if isinstance(self.right, Join):
right = self.right._get_folded_equivalents(equivs)
else:
right = list(self.right.columns)
used = util.Set()
for c in left + right:
if c in equivs:
if c.name not in used:
collist.append(c)
used.add(c.name)
else:
collist.append(c)
return collist
def select(self, whereclause = None, fold_equivalents=False, **kwargs):
"""Create a ``Select`` from this ``Join``.
whereclause
the WHERE criterion that will be sent to the ``select()`` function
fold_equivalents
based on the join criterion of this ``Join``, do not include equivalent
columns in the column list of the resulting select. this will recursively
apply to any joins directly nested by this one as well.
\**kwargs
all other kwargs are sent to the underlying ``select()`` function
"""
if fold_equivalents:
collist = self._get_folded_equivalents()
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def accept_visitor(self, visitor):
visitor.visit_join(self)
engine = property(lambda s:s.left.engine or s.right.engine)
def alias(self, name=None):
"""Create a ``Select`` out of this ``Join`` clause and return an ``Alias`` of it.
The ``Select`` is not correlating.
"""
return self.select(use_labels=True, correlate=False).alias(name)
def _hide_froms(self):
return self.left._get_from_objects() + self.right._get_from_objects()
def _get_from_objects(self):
return [self] + self.onclause._get_from_objects() + self.left._get_from_objects() + self.right._get_from_objects()
class Alias(FromClause):
def __init__(self, selectable, alias = None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.selectable
self.original = baseselectable
self.selectable = selectable
if alias is None:
if self.original.named_with_column():
alias = getattr(self.original, 'name', None)
if alias is None:
alias = 'anon'
elif len(alias) > 15:
alias = alias[0:15]
alias = alias + "_" + hex(random.randint(0, 65535))[2:]
self.name = alias
self.case_sensitive = getattr(baseselectable, "case_sensitive", True)
def supports_execution(self):
return self.original.supports_execution()
def _locate_oid_column(self):
if self.selectable.oid_column is not None:
return self.selectable.oid_column._make_proxy(self)
else:
return None
def named_with_column(self):
return True
def _exportable_columns(self):
#return self.selectable._exportable_columns()
return self.selectable.columns
def get_children(self, **kwargs):
for c in self.c:
yield c
yield self.selectable
def accept_visitor(self, visitor):
visitor.visit_alias(self)
def _get_from_objects(self):
return [self]
def _group_parenthesized(self):
return False
engine = property(lambda s: s.selectable.engine)
class _Label(ColumnElement):
def __init__(self, name, obj, type=None):
self.name = name
while isinstance(obj, _Label):
obj = obj.obj
self.obj = obj
self.case_sensitive = getattr(obj, "case_sensitive", True)
self.type = sqltypes.to_instance(type)
obj.parens=True
key = property(lambda s: s.name)
_label = property(lambda s: s.name)
orig_set = property(lambda s:s.obj.orig_set)
def get_children(self, **kwargs):
return self.obj,
def accept_visitor(self, visitor):
visitor.visit_label(self)
def _get_from_objects(self):
return self.obj._get_from_objects()
def _make_proxy(self, selectable, name = None):
if isinstance(self.obj, Selectable):
return self.obj._make_proxy(selectable, name=self.name)
else:
return column(self.name)._make_proxy(selectable=selectable)
legal_characters = util.Set(string.ascii_letters + string.digits + '_')
class _ColumnClause(ColumnElement):
"""Represent a textual column clause in a SQL statement.
May or may not be bound to an underlying ``Selectable``.
"""
def __init__(self, text, selectable=None, type=None, _is_oid=False, case_sensitive=True, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type)
self._is_oid = _is_oid
self.__label = None
self.case_sensitive = case_sensitive
self.is_literal = is_literal
def _get_label(self):
# for a "literal" column, we've no idea what the text is
# therefore no 'label' can be automatically generated
if self.is_literal:
return None
if self.__label is None:
if self.table is not None and self.table.named_with_column():
self.__label = self.table.name + "_" + self.name
if self.table.c.has_key(self.__label) or len(self.__label) >= 30:
self.__label = self.__label[0:24] + "_" + hex(random.randint(0, 65535))[2:]
else:
self.__label = self.name
self.__label = "".join([x for x in self.__label if x in legal_characters])
return self.__label
_label = property(_get_label)
def label(self, name):
# if going off the "__label" property and its None, we have
# no label; return self
if name is None:
return self
else:
| |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import csv
import math
import pytz
import six.moves.http_client
from datetime import datetime
from time import time
from random import shuffle
from six.moves.urllib.parse import urlencode, urlsplit, urlunsplit
from cgi import parse_qs
from graphite.compat import HttpResponse
from graphite.user_util import getProfileByUsername
from graphite.util import json, unpickle, pickle, msgpack, BytesIO
from graphite.storage import extractForwardHeaders
from graphite.logger import log
from graphite.render.evaluator import evaluateTarget
from graphite.render.attime import parseATTime
from graphite.functions import loadFunctions, PieFunction
from graphite.render.hashing import hashRequest, hashData
from graphite.render.glyph import GraphTypes
from graphite.tags.models import Series, Tag, TagValue, SeriesTag # noqa # pylint: disable=unused-import
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
from six.moves import zip
loadFunctions()
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
# TODO: Make that a namedtuple or a class.
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'now': requestOptions['now'],
'localOnly' : requestOptions['localOnly'],
'template' : requestOptions['template'],
'tzinfo' : requestOptions['tzinfo'],
'forwardHeaders': requestOptions['forwardHeaders'],
'data' : [],
'prefetched' : {},
'xFilesFactor' : requestOptions['xFilesFactor'],
}
data = requestContext['data']
response = None
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
response = cache.get(requestKey)
if response:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return response
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunction(requestOptions['pieMode'])
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
cachedData = None
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime, requestOptions['xFilesFactor'])
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
targets = requestOptions['targets']
data.extend(evaluateTarget(requestContext, targets))
if useCache:
cache.add(dataKey, data, cacheTimeout)
renderStart = time()
format = requestOptions.get('format')
if format == 'csv':
response = renderViewCsv(requestOptions, data)
elif format == 'json':
response = renderViewJson(requestOptions, data)
elif format == 'dygraph':
response = renderViewDygraph(requestOptions, data)
elif format == 'rickshaw':
response = renderViewRickshaw(requestOptions, data)
elif format == 'raw':
response = renderViewRaw(requestOptions, data)
elif format == 'pickle':
response = renderViewPickle(requestOptions, data)
elif format == 'msgpack':
response = renderViewMsgPack(requestOptions, data)
# if response wasn't generated above, render a graph image
if not response:
format = 'image'
renderStart = time()
response = renderViewGraph(graphOptions, requestOptions, data)
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('%s rendering time %6f' % (format, time() - renderStart))
log.rendering('Total request processing time %6f' % (time() - start))
return response
def renderViewGraph(graphOptions, requestOptions, data):
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions, requestOptions['forwardHeaders'])
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
if graphOptions['outputFormat'] == 'pdf':
return buildResponse(image, 'application/x-pdf')
if graphOptions['outputFormat'] == 'svg':
if 'jsonp' in requestOptions:
return HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
return buildResponse(image, 'image/svg+xml')
return buildResponse(image, 'image/png')
def renderViewCsv(requestOptions, data):
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
def renderViewJson(requestOptions, data):
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
maxDataPoints = requestOptions['maxDataPoints']
if maxDataPoints == 1:
for series in data:
series.consolidate(len(series))
datapoints = list(zip(series, [int(series.start)]))
series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints))
else:
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = list(zip(series, timestamps))
series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints))
elif 'noNullPoints' in requestOptions and any(data):
for series in data:
values = []
for (index,v) in enumerate(series):
if v is not None and not math.isnan(v):
timestamp = series.start + (index * series.step)
values.append((v,timestamp))
if len(values) > 0:
series_data.append(dict(target=series.name, tags=series.tags, datapoints=values))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = list(zip(series, timestamps))
series_data.append(dict(target=series.name, tags=series.tags, datapoints=datapoints))
output = json.dumps(series_data, indent=(2 if requestOptions.get('pretty') else None)).replace('None,', 'null,').replace('NaN,', 'null,').replace('Infinity,', '1e9999,')
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], output),
content_type='text/javascript')
else:
response = HttpResponse(
content=output,
content_type='application/json')
return response
def renderViewDygraph(requestOptions, data):
labels = ['Time']
output = '{}'
if data:
datapoints = [[ts] for ts in range(data[0].start, data[0].end, data[0].step)]
for series in data:
labels.append(series.name)
for i, point in enumerate(series):
if point is None:
point = 'null'
elif point == float('inf'):
point = 'Infinity'
elif point == float('-inf'):
point = '-Infinity'
elif math.isnan(point):
point = 'null'
datapoints[i].append(point)
line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
lines = [line_template % tuple(points) for points in datapoints]
output = '{"labels" : %s, "data" : [%s]}' % (json.dumps(labels), ', '.join(lines))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], output),
content_type='text/javascript')
else:
response = HttpResponse(
content=output,
content_type='application/json')
return response
def renderViewRickshaw(requestOptions, data):
series_data = []
for series in data:
timestamps = range(series.start, series.end, series.step)
datapoints = [{'x' : x, 'y' : y} for x, y in zip(timestamps, series)]
series_data.append( dict(target=series.name, datapoints=datapoints) )
output = json.dumps(series_data, indent=(2 if requestOptions.get('pretty') else None))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], output),
content_type='text/javascript')
else:
response = HttpResponse(
content=output,
content_type='application/json')
return response
def renderViewRaw(requestOptions, data):
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(repr,series)) )
response.write('\n')
return response
def renderViewPickle(requestOptions, data):
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
return response
def renderViewMsgPack(requestOptions, data):
response = HttpResponse(content_type='application/x-msgpack')
seriesInfo = [series.getInfo() for series in data]
msgpack.dump(seriesInfo, response, use_bin_type=True)
return response
def parseOptions(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
if graphType not in GraphTypes:
raise AssertionError("Invalid graphType '%s', must be one of %s"
% (graphType,list(GraphTypes)))
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
cacheTimeout = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
requestOptions['forwardHeaders'] = extractForwardHeaders(request)
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
template = dict()
for key, val in queryParams.items():
if key.startswith("template["):
template[key[9:-1]] = val
requestOptions['template'] = template
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
requestOptions['pretty'] = bool(queryParams.get('pretty'))
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
if 'noNullPoints' in queryParams:
requestOptions['noNullPoints'] = True
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
format = requestOptions.get('format')
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
elif format == 'pdf':
graphOptions['outputFormat'] = 'pdf'
else:
| |
from JumpScale import j
from stat import *
import brotli
import hashlib
import functools
import subprocess
import pwd
import grp
import os
import sys
import re
class FListFactory(object):
def __init__(self):
self.__jslocation__ = "j.tools.flist"
def get_flist(self):
"""
Return a Flist object
"""
return FList()
def get_archiver(self):
"""
Return a FListArchiver object
This is used to push flist to IPFS
"""
return FListArchiver()
class FList(object):
"""
FList (sometime "plist") files contains a plain/text representation of
a complete file system tree
FList stand for "file list" (plist for "path list"), this format is made
for mapping a file with his md5 hash, which allow to retreive file remotly
and get it's metadata separatly
FList is formatted to support POSIX ACL, File type representation and
extra data (can be any type but it's used internaly to describe some file-type)
A flist file contains one entry per file, fields are separated by "|".
Filename should not contains the pipe character in it's name otherwise it will
not be supported at all.
This is a flist file format supported by this library:
filepath|hash|filesize|uname|gname|permissions|filetype|ctime|mtime|extended
- filepath: the complete file path on the filesystem
- hash: md5 checksum of the file
- if the file is a special file (block, sylink, ...), use this hash:
md5("flist:" + filename (fullpath) + ":" + mtime)
- filesize: size in bytes
- uname: username owner of the file (used for permissions)
- note: if username doesn't match any userid, userid will be used
- gname: groupname owner of the file (used for permissions)
- note: if groupname doesn't match any groupid, groupid will be used
- permissions: octal representation of the posix permissions
- filetype: integer representing the file type:
- 0: socket (S_IFSOCK)
- 1: symlink (S_IFLNK)
- 2: regular file (S_IFREG)
- 3: block device (S_IFBLK)
- 4: directory (S_IFDIR) (used for empty directory)
- 5: char. device (S_IFCHR)
- 6: fifo pipe (S_IFIFO)
- ctime: unix timestamp of the creation time
- mtime: unix timestamp of the modification file
- extended: optional field which may contains extra-data related to
to file type:
- symlink : contains the target of the link
- block device: ...
- char. device: ...
"""
def __init__(self):
self._data = []
self._hash = {}
self._path = {}
def parse(self, filename):
del self._data[:]
self._hash.clear()
self._path.clear()
index = 0
with open(filename) as flist:
for line in flist:
f = line.strip().split('|')
index = self._indexForPath(f[1])
self._data[index] = [
f[0], # path
f[1], # hash
int(f[2]), # size
f[3], # uname
f[4], # gname
f[5], # permission
int(f[6]), # filetype
int(f[7]), # ctime
int(f[8]), # mtime
f[9] # extended
]
return index
"""
Getters
"""
def _indexsFromHash(self, hash):
if hash not in self._hash:
return None
return self._hash[hash]
def getHashList(self):
hashes = []
for x in self._data:
hashes.append(x[1])
return hashes
def filesFromHash(self, hash):
paths = []
ids = self._indexsFromHash(hash)
# adding paths from ids list
for x in ids:
paths.append(self._data[x][0])
return paths
def _getItem(self, filename, index):
id = self._path[filename]
if id is not None:
return self._data[id][index]
return None
def getHash(self, filename):
return self._getItem(filename, 1)
def getType(self, filename):
type = self._getItem(filename, 0)
if type is None:
return None
# FIXME
return None
def isRegular(self, filename):
return self._getItem(filename, 6) == 2
def getSize(self, filename):
return self._getItem(filename, 2)
def getMode(self, filename):
return self._getItem(filename, 5)
def getOwner(self, filename):
return self._getItem(filename, 3)
def getGroup(self, filename):
return self._getItem(filename, 4)
def getExtended(self, filename):
# return self._getItem(filename, 0)
return -1
def getCreationTime(self, filename):
return self._getItem(filename, 7)
def getModificationTime(self, filename):
return self._getItem(filename, 8)
"""
Setters
"""
def _indexForPath(self, filename):
if filename not in self._path:
# creating new entry
self._data.append([None] * 10)
id = len(self._data) - 1
self._data[id][0] = filename
self._path[filename] = id
return self._path[filename]
def _setItem(self, filename, value, index):
id = self._indexForPath(filename)
if id is None:
return None
self._data[id][index] = value
return value
def setHash(self, filename, value):
self._setItem(filename, value, 1)
# updating hash list
id = self._indexForPath(filename)
if value in self._hash:
self._hash[value].append(id)
else:
self._hash[value] = [id]
return value
def setType(self, filename, value):
# testing regular first, it will probably be
# the most often used type
if S_ISREG(value):
return self._setItem(filename, 2, 6)
# testing special files type
if S_ISSOCK(value):
return self._setItem(filename, 0, 6)
if S_ISLNK(value):
return self._setItem(filename, 1, 6)
if S_ISBLK(value):
return self._setItem(filename, 3, 6)
if S_ISCHR(value):
return self._setItem(filename, 5, 6)
if S_ISFIFO(value):
return self._setItem(filename, 6, 6)
# keep track of empty directories
if S_ISDIR(value):
return self._setItem(filename, 4, 6)
return None
def setSize(self, filename, value):
return self._setItem(filename, value, 2)
def setMode(self, filename, value):
return self._setItem(filename, value, 5)
def setOwner(self, filename, value):
return self._setItem(filename, value, 3)
def setGroup(self, filename, value):
return self._setItem(filename, value, 4)
def setExtended(self, filename, value):
"""
value: need to be a stat struct
"""
path = self._getItem(filename, 0)
# symlink
if S_ISLNK(value.st_mode):
xtd = os.readlink(path)
return self._setItem(filename, xtd, 9)
# block device
if S_ISBLK(value.st_mode) or S_ISCHR(value.st_mode):
id = '%d,%d' % (os.major(value.st_rdev), os.minor(value.st_rdev))
return self._setItem(filename, id, 9)
return self._setItem(filename, "", 9)
def setModificationTime(self, filename, value):
return self._setItem(filename, int(value), 7)
def setCreationTime(self, filename, value):
return self._setItem(filename, int(value), 8)
"""
Builder
"""
def _build(self, filename):
stat = os.stat(filename, follow_symlinks=False)
mode = oct(stat.st_mode)[4:]
# grab username from userid, if not found, use userid
try:
uname = pwd.getpwuid(stat.st_uid).pw_name
except:
uname = stat.st_uid
# grab groupname from groupid, if not found, use groupid
try:
gname = grp.getgrgid(stat.st_gid).gr_name
except:
gname = stat.st_gid
# compute hash only if it's a regular file, otherwise, comute filename hash
# the hash is used to access the file "id" in the list, we cannot have empty hash
if not S_ISREG(stat.st_mode):
hashstr = "flist:%s:%d" % (filename, stat.st_mtime)
hash = j.data.hash.md5_string(hashstr)
else:
hash = j.data.hash.md5(filename)
self.setHash(filename, hash)
self.setType(filename, stat.st_mode)
self.setSize(filename, stat.st_size)
self.setMode(filename, mode)
self.setOwner(filename, uname)
self.setGroup(filename, gname)
self.setExtended(filename, stat)
self.setModificationTime(filename, stat.st_mtime)
self.setCreationTime(filename, stat.st_ctime)
def __valid(self, fname, excludes):
for ex in excludes:
if ex.match(fname):
return False
return True
def build(self, path, excludes=[]):
if len(self._data) > 0:
# this can be only done on empty list
return None
# compiling regex for exclusion
__excludes = []
for ex in excludes:
__excludes.append(re.compile(ex))
for dirpath, dirs, files in os.walk(path, followlinks=True):
for dirname in dirs:
fname = os.path.join(dirpath, dirname)
# exclusion checking
if not self.__valid(fname, __excludes):
continue
if j.sal.fs.isEmptyDir(fname):
self._build(fname)
for filename in files:
fname = os.path.join(dirpath, filename)
# exclusion checking
if not self.__valid(fname, __excludes):
continue
self._build(fname)
return len(self._data)
"""
Exporting
"""
def dumps(self, trim=''):
data = []
for f in self._data:
p = f[0]
if p.startswith(trim):
p = p[len(trim):]
line = "%s|%s|%d|%s|%s|%s|%d|%d|%d|%s" % (
p, f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8], f[9]
)
data.append(line)
return "\n".join(data) + "\n"
def _debug(self):
tableMain = sys.getsizeof(self._data)
tableHash = sys.getsizeof(self._hash)
tablePath = sys.getsizeof(self._path)
print("Main table: %.2f ko" % (float(tableMain) / 1024))
print("Hash table: %.2f ko" % (float(tableHash) / 1024))
print("Path table: %.2f ko" % (float(tablePath) / 1024))
class FListArchiver:
# This is a not efficient way, the only other possibility
# is to call brotli binary to compress big file if needed
# currently, this in-memory way is used
def __init__(self, ipfs_cfgdir=None):
cl = j.tools.cuisine.local
self._ipfs = cl.core.command_location('ipfs')
if not ipfs_cfgdir:
self._env = 'IPFS_PATH=%s' % cl.core.args_replace('$cfgDir/ipfs/main')
else:
self._env = 'IPFS_PATH=%s' % ipfs_cfgdir
def _compress(self, source, destination):
with open(source, 'rb') as content_file:
content = content_file.read()
compressed = brotli.compress(content, quality=6)
with open(destination, "wb") as output:
output.write(compressed)
def push_to_ipfs(self, source):
cmd = "%s %s add '%s'" % (self._env, self._ipfs, source)
out = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
m = re.match(r'^added (.+) (.+)$', out.stdout.decode())
if m is None:
raise RuntimeError('invalid output from ipfs add: %s' % out)
return m.group(1)
def build(self, flist, backend):
hashes = flist.getHashList()
if not os.path.exists(backend):
os.makedirs(backend)
for hash in hashes:
files = flist.filesFromHash(hash)
# skipping non regular files
if not flist.isRegular(files[0]):
continue
print("Processing: %s" % hash)
root = "%s/%s/%s" % (backend, hash[0:2], hash[2:4])
file = hash
target = "%s/%s" % (root, file)
if not os.path.exists(root):
os.makedirs(root)
# compressing the file
self._compress(files[0], target)
# adding it to ipfs network
hash = self.push_to_ipfs(target)
print("Network hash: %s" % hash)
# updating flist hash with | |
and other
supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). One possible implementation of
_parse_example_spec is as follows:
```python
spec = {'raw': tf.FixedLenFeature(...)}
spec.update(input_fc._parse_example_spec)
return spec
```
"""
pass
def _reset_config(self):
"""Resets the configuration in the column.
Some feature columns e.g. embedding or shared embedding columns might
have some state that is needed to be reset sometimes. Use this method
in that scenario.
"""
class _DenseColumn(_FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc._get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
"""
pass
def _create_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, _CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
builder=builder,
units=units,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column,
builder,
units,
weight_collections,
trainable,
weight_var=None):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=[num_elements, units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return math_ops.matmul(tensor, weight, name='weighted_sum')
class _CategoricalColumn(_FeatureColumn):
"""Represents a categorical feature.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
__metaclass__ = abc.ABCMeta
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ['id_tensor', 'weight_tensor'])
@abc.abstractproperty
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to
create `IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.get_variable`).
"""
pass
def _create_categorical_column_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=(column._num_buckets, units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return embedding_ops.safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class _SequenceDenseColumn(_FeatureColumn):
"""Represents dense sequence data."""
__metaclass__ = abc.ABCMeta
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ['dense_tensor', 'sequence_length'])
@abc.abstractmethod
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
"""Returns a `TensorSequenceLengthPair`."""
pass
class _LazyBuilder(object):
"""Handles caching of transformations while building the model.
`_FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`_LazyBuilder` caches all previously transformed columns.
Example:
We're trying to use the following `_FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `_LazyBuilder` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `_LazyBuilder`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `_FeatureColumn` key
means that this `Tensor` is the output of an existing `_FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`_FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `_FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `_FeatureColumn`.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column._transform_feature(self) # pylint: disable=protected-access
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _shape_offsets(shape):
"""Returns moving offset for each dimension given shape."""
offsets = []
for dim in reversed(shape):
if | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ResponseBase(Model):
"""Response base.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Identifiable
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'_type': {'Identifiable': 'Identifiable'}
}
def __init__(self, **kwargs) -> None:
super(ResponseBase, self).__init__(**kwargs)
self._type = None
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Response
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Response': 'Response'}
}
def __init__(self, **kwargs) -> None:
super(Identifiable, self).__init__(**kwargs)
self.id = None
self._type = 'Identifiable'
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a
response should inherit from this.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Answer, Thing, ErrorResponse
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
}
_subtype_map = {
'_type': {'Answer': 'Answer', 'Thing': 'Thing', 'ErrorResponse': 'ErrorResponse'}
}
def __init__(self, **kwargs) -> None:
super(Response, self).__init__(**kwargs)
self.web_search_url = None
self._type = 'Response'
class Answer(Response):
"""Defines an answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer, TrendingTopics
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.newssearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer', 'TrendingTopics': 'TrendingTopics'}
}
def __init__(self, **kwargs) -> None:
super(Answer, self).__init__(**kwargs)
self.follow_up_queries = None
self._type = 'Answer'
class Thing(Response):
"""Defines a thing.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: NewsTopic, CreativeWork, Organization
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.newssearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
}
_subtype_map = {
'_type': {'News/Topic': 'NewsTopic', 'CreativeWork': 'CreativeWork', 'Organization': 'Organization'}
}
def __init__(self, **kwargs) -> None:
super(Thing, self).__init__(**kwargs)
self.name = None
self.url = None
self.image = None
self.description = None
self.alternate_name = None
self.bing_id = None
self._type = 'Thing'
class CreativeWork(Thing):
"""The most generic kind of creative work, including books, movies,
photographs, software programs, etc.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Article, MediaObject
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.newssearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.newssearch.models.Thing]
:ivar date_published: The date on which the CreativeWork was published.
:vartype date_published: str
:ivar video: A video of the item.
:vartype video:
~azure.cognitiveservices.search.newssearch.models.VideoObject
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'date_published': {'readonly': True},
'video': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'date_published': {'key': 'datePublished', 'type': 'str'},
'video': {'key': 'video', 'type': 'VideoObject'},
}
_subtype_map = {
'_type': {'Article': 'Article', 'MediaObject': 'MediaObject'}
}
def __init__(self, **kwargs) -> None:
super(CreativeWork, self).__init__(**kwargs)
self.thumbnail_url = None
self.provider = None
self.date_published = None
self.video = None
self._type = 'CreativeWork'
class Article(CreativeWork):
"""Article.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: NewsArticle
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
| |
from netutils.config.parser import ConfigLine
data = [
ConfigLine(config_line="version 17.1", parents=()),
ConfigLine(config_line="service timestamps debug datetime msec", parents=()),
ConfigLine(config_line="service timestamps log datetime msec", parents=()),
ConfigLine(config_line="service call-home", parents=()),
ConfigLine(config_line="platform qfp utilization monitor load 80", parents=()),
ConfigLine(config_line="platform punt-keepalive disable-kernel-core", parents=()),
ConfigLine(config_line="platform console serial", parents=()),
ConfigLine(config_line="hostname jcy-bb-01", parents=()),
ConfigLine(config_line="boot-start-marker", parents=()),
ConfigLine(config_line="boot-end-marker", parents=()),
ConfigLine(config_line="vrf definition MANAGEMENT", parents=()),
ConfigLine(config_line=" address-family ipv4", parents=("vrf definition MANAGEMENT",)),
ConfigLine(config_line=" exit-address-family", parents=("vrf definition MANAGEMENT", " address-family ipv4")),
ConfigLine(config_line=" address-family ipv6", parents=("vrf definition MANAGEMENT",)),
ConfigLine(config_line=" exit-address-family", parents=("vrf definition MANAGEMENT", " address-family ipv6")),
ConfigLine(config_line="logging userinfo", parents=()),
ConfigLine(config_line="no aaa new-model", parents=()),
ConfigLine(config_line="call-home", parents=()),
ConfigLine(config_line=" contact-email-addr <EMAIL>", parents=("call-home",)),
ConfigLine(config_line=' profile "CiscoTAC-1"', parents=("call-home",)),
ConfigLine(config_line=" active", parents=("call-home", ' profile "CiscoTAC-1"')),
ConfigLine(config_line=" destination transport-method http", parents=("call-home", ' profile "CiscoTAC-1"')),
ConfigLine(config_line="no ip domain lookup", parents=()),
ConfigLine(config_line="ip domain name infra.ntc.com", parents=()),
ConfigLine(config_line="login on-success log", parents=()),
ConfigLine(config_line="subscriber templating", parents=()),
ConfigLine(config_line="multilink bundle-name authenticated", parents=()),
ConfigLine(config_line="crypto pki trustpoint TP-self-signed-1088426642", parents=()),
ConfigLine(config_line=" enrollment selfsigned", parents=("crypto pki trustpoint TP-self-signed-1088426642",)),
ConfigLine(
config_line=" subject-name cn=IOS-Self-Signed-Certificate-1088426642",
parents=("crypto pki trustpoint TP-self-signed-1088426642",),
),
ConfigLine(config_line=" revocation-check none", parents=("crypto pki trustpoint TP-self-signed-1088426642",)),
ConfigLine(
config_line=" rsakeypair TP-self-signed-1088426642",
parents=("crypto pki trustpoint TP-self-signed-1088426642",),
),
ConfigLine(config_line="crypto pki trustpoint SLA-TrustPoint", parents=()),
ConfigLine(config_line=" enrollment pkcs12", parents=("crypto pki trustpoint SLA-TrustPoint",)),
ConfigLine(config_line=" revocation-check crl", parents=("crypto pki trustpoint SLA-TrustPoint",)),
ConfigLine(config_line="crypto pki certificate chain TP-self-signed-1088426642", parents=()),
ConfigLine(
config_line=" certificate self-signed 01", parents=("crypto pki certificate chain TP-self-signed-1088426642",)
),
ConfigLine(
config_line=" 30820330 30820218 A0030201 02020101 300D0609 2A864886 F70D0101 05050030",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 31312F30 2D060355 04031326 494F532D 53656C66 2D536967 6E65642D 43657274",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 69666963 6174652D 31303838 34323636 3432301E 170D3231 30333039 30333233",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 30325A17 0D333030 31303130 30303030 305A3031 312F302D 06035504 03132649",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 4F532D53 656C662D 5369676E 65642D43 65727469 66696361 74652D31 30383834",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 32363634 32308201 22300D06 092A8648 86F70D01 01010500 0382010F 00308201",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 0A028201 0100E61B 372A60D2 0A1D58E3 0EC5CEBD 1EA5BF18 3F6BF297 2162DB79",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" ECBF511A 8B14DF23 35EEF4A8 E219B625 E5ED6575 EAD5914A 905E07A2 C2298199",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" D143673A 13C6212A 5A2C034F C250F52F F1187F51 0F003B7F 23642CAE 00A1413C",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 270D4E53 55E7EDB4 1A7B69B6 4EB4B5E7 12ACA95C 07F22B02 80C65739 25C09B82",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 788C4DC2 9729D9DB F0471C13 E2082BDA 3C525850 56684AEF FCE2C18B 1A042FC1",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 20D1403B 837AA3EB D91F6055 0F5FFFE3 E7A9D3C8 09C454C0 CC2E793E B28B60CB",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 3428B6FD 28280899 90E4D168 57AA9005 2FC602B9 D40BCD5E 6AD97F2D 986B1A90",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" F76DE19B 870223D0 86093C37 62D81873 968EC939 53BA36D6 61650732 0124089F",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 7B9445D8 8EF90203 010001A3 53305130 0F060355 1D130101 FF040530 030101FF",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 301F0603 551D2304 18301680 1419973D 5607ACE0 D2127B39 A1F6AC00 2C7216A8",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" F4301D06 03551D0E 04160414 19973D56 07ACE0D2 127B39A1 F6AC002C 7216A8F4",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 300D0609 2A864886 F70D0101 05050003 82010100 0D01C4E1 544034EE F516AB65",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 9DB7D85D EC3CC16F E1BCD286 3C7F351F D86F3B9B D17EC2BA 75B597E9 F2E3C0A7",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" D4FFF39F F5152741 F0551772 135243FA 458E3F8B 77832597 1E18656E 3AA732BD",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 40F899EA 16969EB1 15D8656E 12E57B7A BD7221E6 BAF7D210 2B2E601C 7D047BBA",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" B5F736A4 4969D25B 8C3B2D80 1DB93EC2 80C021E7 A8045D05 3D775D18 ECC4A82A",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 158B85E7 89C03B6D 7D46BF23 0DC1684C 98F1E82D 898A56C6 A2442C46 15EC810B",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 80148784 767C8EE1 5F37AA98 57306ABB 0FF717AB E42D2949 346D867D A619CAC1",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 24AD8BB1 7696E51B 2E1BA9E8 5883E396 FE8C8DB3 96636DF0 3E963FFC 749EF8D4",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" 3C1D27CD 60B9C28E 5A32B0B7 76E74DB4 48D752EB",
parents=("crypto pki certificate chain TP-self-signed-1088426642", " certificate self-signed 01"),
),
ConfigLine(
config_line=" \tquit",
parents=(
"crypto pki certificate chain TP-self-signed-1088426642",
" certificate self-signed 01",
" 3C1D27CD 60B9C28E 5A32B0B7 76E74DB4 48D752EB",
),
),
ConfigLine(config_line="crypto pki certificate chain SLA-TrustPoint", parents=()),
ConfigLine(config_line=" certificate ca 01", parents=("crypto pki certificate chain SLA-TrustPoint",)),
ConfigLine(
config_line=" 30820321 30820209 A0030201 02020101 300D0609 2A864886 F70D0101 0B050030",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 32310E30 0C060355 040A1305 43697363 6F312030 1E060355 04031317 43697363",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 6F204C69 63656E73 696E6720 526F6F74 20434130 1E170D31 33303533 30313934",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 3834375A 170D3338 30353330 31393438 34375A30 32310E30 0C060355 040A1305",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 43697363 6F312030 1E060355 04031317 43697363 6F204C69 63656E73 696E6720",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 526F6F74 20434130 82012230 0D06092A 864886F7 0D010101 05000382 010F0030",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 82010A02 82010100 A6BCBD96 131E05F7 145EA72C 2CD686E6 17222EA1 F1EFF64D",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" CBB4C798 212AA147 C655D8D7 9471380D 8711441E 1AAF071A 9CAE6388 8A38E520",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 1C394D78 462EF239 C659F715 B98C0A59 5BBB5CBD 0CFEBEA3 700A8BF7 D8F256EE",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 4AA4E80D DB6FD1C9 60B1FD18 FFC69C96 6FA68957 A2617DE7 104FDC5F EA2956AC",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 7390A3EB 2B5436AD C847A2C5 DAB553EB 69A9A535 58E9F3E3 C0BD23CF 58BD7188",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 68E69491 20F320E7 948E71D7 AE3BCC84 F10684C7 4BC8E00F 539BA42B 42C68BB7",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" C7479096 B4CB2D62 EA2F505D C7B062A4 6811D95B E8250FC4 5D5D5FB8 8F27D191",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" C55F0D76 61F9A4CD 3D992327 A8BB03BD 4E6D7069 7CBADF8B DF5F4368 95135E44",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" DFC7C6CF 04DD7FD1 02030100 01A34230 40300E06 03551D0F 0101FF04 04030201",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 06300F06 03551D13 0101FF04 05300301 01FF301D 0603551D 0E041604 1449DC85",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 4B3D31E5 1B3E6A17 606AF333 3D3B4C73 E8300D06 092A8648 86F70D01 010B0500",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 03820101 00507F24 D3932A66 86025D9F E838AE5C 6D4DF6B0 49631C78 240DA905",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 604EDCDE FF4FED2B 77FC460E CD636FDB DD44681E 3A5673AB 9093D3B1 6C9E3D8B",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" D98987BF E40CBD9E 1AECA0C2 2189BB5C 8FA85686 CD98B646 5575B146 8DFC66A8",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 467A3DF4 4D565700 6ADF0F0D CF835015 3C04FF7C 21E878AC 11BA9CD2 55A9232C",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 7CA7B7E6 C1AF74F6 152E99B7 B1FCF9BB E973DE7F 5BDDEB86 C71E3B49 1765308B",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 5FB0DA06 B92AFE7F 494E8A9E 07B85737 F3A58BE1 1A48A229 C37C1E69 39F08678",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 80DDCD16 D6BACECA EEBC7CF9 8428787B 35202CDC 60E4616A B623CDBD 230E3AFB",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" 418616A9 4093E049 4D10AB75 27E86F73 932E35B5 8862FDAE 0275156F 719BB2F0",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01"),
),
ConfigLine(
config_line=" D697DF7F 28", parents=("crypto pki certificate chain SLA-TrustPoint", " certificate ca 01")
),
ConfigLine(
config_line=" \tquit",
parents=("crypto pki certificate chain SLA-TrustPoint", " certificate | |
<reponame>NikkaZ/dbtvault_spark
from behave import fixture
@fixture
def pit(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_PROFILE": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_PROFILE": {
"EFFECTIVE_FROM": "LOAD_DATE"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
"STG_CUSTOMER_LOGIN",
"STG_CUSTOMER_PROFILE"],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_PROFILE": {
"source_model": "STG_CUSTOMER_PROFILE",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_PROFILE": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
"STG_CUSTOMER_PROFILE": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"]
,
"RAW_STAGE_PROFILE":
["CUSTOMER_ID",
"DASHBOARD_COLOUR",
"DISPLAY_NAME",
"LOAD_DATE",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_PROFILE": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"DASHBOARD_COLOUR": "VARCHAR",
"DISPLAY_NAME": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_LOGIN": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DEVICE_USED": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_PROFILE": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"DASHBOARD_COLOUR": "VARCHAR",
"DISPLAY_NAME": "VARCHAR",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATE": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME",
"SAT_CUSTOMER_LOGIN_PK": "BINARY(16)",
"SAT_CUSTOMER_LOGIN_LDTS": "DATETIME",
"SAT_CUSTOMER_PROFILE_PK": "BINARY(16)",
"SAT_CUSTOMER_PROFILE_LDTS": "DATETIME"
}
}
}
@fixture
def pit_one_sat(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"CUSTOMER_ID": "VARCHAR",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"SAT_CUSTOMER_DETAILS_TS": {
"+column_types": {
"CUSTOMER_PK": "BINARY(16)",
"HASHDIFF": "BINARY(16)",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"EFFECTIVE_FROM": "DATETIME",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"AS_OF_DATE": {
"+column_types": {
"AS_OF_DATE": "DATETIME"
}
},
"PIT_CUSTOMER": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_TS": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_LG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME"
}
},
"PIT_CUSTOMER_HG": {
"+column_types": {
"AS_OF_DATE": "DATETIME",
"CUSTOMER_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_PK": "BINARY(16)",
"SAT_CUSTOMER_DETAILS_LDTS": "DATETIME"
}
}
}
@fixture
def pit_two_sats(context):
"""
Define the structures and metadata to perform PIT load
"""
context.vault_structure_type = "pit"
context.hashed_columns = {
"STG_CUSTOMER_DETAILS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_DETAILS_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"]
}
},
"STG_CUSTOMER_LOGIN": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
},
"STG_CUSTOMER_LOGIN_TS": {
"CUSTOMER_PK": "CUSTOMER_ID",
"HASHDIFF": {"is_hashdiff": True,
"columns": ["DEVICE_USED", "LAST_LOGIN_DATE"]
}
}
}
context.derived_columns = {
"STG_CUSTOMER_DETAILS": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_DETAILS_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
},
"STG_CUSTOMER_LOGIN": {
"EFFECTIVE_FROM": "LOAD_DATE"
},
"STG_CUSTOMER_LOGIN_TS": {
"EFFECTIVE_FROM": "LOAD_DATETIME"
}
}
context.vault_structure_columns = {
"HUB_CUSTOMER": {
"source_model": ["STG_CUSTOMER_DETAILS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"HUB_CUSTOMER_TS": {
"source_model": ["STG_CUSTOMER_DETAILS_TS",
],
"src_pk": "CUSTOMER_PK",
"src_nk": "CUSTOMER_ID",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS": {
"source_model": "STG_CUSTOMER_DETAILS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_DETAILS_TS": {
"source_model": "STG_CUSTOMER_DETAILS_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN": {
"source_model": "STG_CUSTOMER_LOGIN",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATE",
"src_source": "SOURCE"
},
"SAT_CUSTOMER_LOGIN_TS": {
"source_model": "STG_CUSTOMER_LOGIN_TS",
"src_pk": "CUSTOMER_PK",
"src_hashdiff": "HASHDIFF",
"src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"],
"src_eff": "EFFECTIVE_FROM",
"src_ldts": "LOAD_DATETIME",
"src_source": "SOURCE"
},
"PIT_CUSTOMER": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE"
},
"src_ldts": "LOAD_DATE"
},
"PIT_CUSTOMER_TS": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_LG": {
"source_model": "HUB_CUSTOMER_TS",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
},
"SAT_CUSTOMER_LOGIN_TS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATETIME"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME",
"STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME",
},
"src_ldts": "LOAD_DATETIME"
},
"PIT_CUSTOMER_HG": {
"source_model": "HUB_CUSTOMER",
"src_pk": "CUSTOMER_PK",
"as_of_dates_table": "AS_OF_DATE",
"satellites":
{
"SAT_CUSTOMER_DETAILS": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
},
"SAT_CUSTOMER_LOGIN": {
"pk":
{"PK": "CUSTOMER_PK"},
"ldts":
{"LDTS": "LOAD_DATE"}
}
},
"stage_tables":
{
"STG_CUSTOMER_DETAILS": "LOAD_DATE",
"STG_CUSTOMER_LOGIN": "LOAD_DATE",
},
"src_ldts": "LOAD_DATE"
}
}
context.stage_columns = {
"RAW_STAGE_DETAILS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_DETAILS_TS":
["CUSTOMER_ID",
"CUSTOMER_NAME",
"CUSTOMER_ADDRESS",
"CUSTOMER_DOB",
"LOAD_DATETIME",
"SOURCE"],
"RAW_STAGE_LOGIN":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATE",
"SOURCE"],
"RAW_STAGE_LOGIN_TS":
["CUSTOMER_ID",
"LAST_LOGIN_DATE",
"DEVICE_USED",
"LOAD_DATETIME",
"SOURCE"]
}
context.seed_config = {
"RAW_STAGE_DETAILS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_DETAILS_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"CUSTOMER_NAME": "VARCHAR",
"CUSTOMER_ADDRESS": "VARCHAR",
"CUSTOMER_DOB": "DATE",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATE": "DATE",
"SOURCE": "VARCHAR"
}
},
"RAW_STAGE_LOGIN_TS": {
"+column_types": {
"CUSTOMER_ID": "VARCHAR",
"LAST_LOGIN_DATE": "DATETIME",
"DEVICE_USED": "VARCHAR",
"LOAD_DATETIME": "DATETIME",
"SOURCE": "VARCHAR"
}
},
"HUB_CUSTOMER": | |
``node`` which will have ``size``
leaves (themselves not necessarily leaves of the actual tree).
Parameters
----------
node : frozenset[int]
The node of the tree to start with.
size : int
How many subtree leaves to aim for.
search : {'bfs', 'dfs', 'random'}, optional
How to build the tree:
- 'bfs': breadth first expansion
- 'dfs': depth first expansion (largest nodes first)
- 'random': random expansion
Returns
-------
sub_leaves : tuple[frozenset[int]]
Nodes which are subtree leaves.
branches : tuple[frozenset[int]]
Nodes which are between the subtree leaves and root.
"""
# nodes which are subtree leaves
branches = []
# actual tree leaves - can't expand
real_leaves = []
# nodes to expand
queue = [node]
while (len(queue) + len(real_leaves) < size) and queue:
if search == 'bfs':
p = queue.pop(0)
elif search == 'dfs':
p = queue.pop(-1)
elif search == 'random':
p = queue.pop(random.randint(0, len(queue) - 1))
if len(p) == 1:
real_leaves.append(p)
continue
# the left child is always >= in weight that right child
# if we append it last then ``.pop(-1)`` above perform the
# depth first search sorting by node subgraph size
l, r = self.children[p]
queue.append(r)
queue.append(l)
branches.append(p)
# nodes at the bottom of the subtree
sub_leaves = queue + real_leaves
return tuple(sub_leaves), tuple(branches)
def calc_subtree_candidates(self, pwr=2, what='flops'):
candidates = list(self.children)
if what == 'size':
weights = [self.get_size(x) for x in candidates]
elif what == 'flops':
weights = [self.get_flops(x) for x in candidates]
max_weight = max(weights)
# can be bigger than numpy int/float allows
weights = [float(w / max_weight)**(1 / pwr) for w in weights]
# sort by descending score
candidates, weights = zip(
*sorted(zip(candidates, weights), key=lambda x: -x[1]))
return list(candidates), list(weights)
def subtree_reconfigure(
self,
subtree_size=8,
subtree_search='bfs',
weight_what='flops',
weight_pwr=2,
select='max',
maxiter=500,
seed=None,
minimize='flops',
inplace=False,
progbar=False,
):
"""Reconfigure subtrees of this tree with locally optimal paths.
Parameters
----------
subtree_size : int, optional
The size of subtree to consider. Cost is exponential in this.
subtree_search : {'bfs', 'dfs', 'random'}, optional
How to build the subtrees:
- 'bfs': breadth-first-search creating balanced subtrees
- 'dfs': depth-first-search creating imbalanced subtrees
- 'random': random subtree building
weight_what : {'flops', 'size'}, optional
When assessing nodes to build and optimize subtrees from whether to
score them by the (local) contraction cost, or tensor size.
weight_pwr : int, optional
When assessing nodes to build and optimize subtrees from, how to
scale their score into a probability: ``score**(1 / weight_pwr)``.
The larger this is the more explorative the algorithm is when
``select='random'``.
select : {'max', 'min', 'random'}, optional
What order to select node subtrees to optimize:
- 'max': choose the highest score first
- 'min': choose the lowest score first
- 'random': choose randomly weighted on score -- see
``weight_pwr``.
maxiter : int, optional
How many subtree optimizations to perform, the algorithm can
terminate before this if all subtrees have been optimized.
seed : int, optional
A random seed (seeds python system random module).
minimize : {'flops', 'size'}, optional
Whether to minimize with respect to contraction flops or size.
inplace : bool, optional
Whether to perform the reconfiguration inplace or not.
progbar : bool, optional
Whether to show live progress of the reconfiguration.
Returns
-------
ContractionTree
"""
tree = self if inplace else self.copy()
# ensure these have been computed and thus are being tracked
tree.total_flops()
tree.max_size()
optimizer = DynamicProgramming(minimize=minimize)
# different caches as we might want to reconfigure one before other
self.already_optimized.setdefault(minimize, set())
already_optimized = self.already_optimized[minimize]
if seed is not None:
random.seed(seed)
candidates, weights = self.calc_subtree_candidates(
pwr=weight_pwr, what=weight_what)
if progbar:
import tqdm
pbar = tqdm.tqdm()
pbar.set_description(_describe_tree(tree))
r = 0
try:
while candidates and r < maxiter:
if select == 'max':
i = 0
elif select == 'min':
i = -1
elif select == 'random':
i, = random.choices(
range(len(candidates)), weights=weights)
weights.pop(i)
sub_root = candidates.pop(i)
# get a subtree to possibly reconfigure
sub_leaves, sub_branches = tree.get_subtree(
sub_root, size=subtree_size, search=subtree_search)
sub_leaves = frozenset(sub_leaves)
# check if its already been optimized
if sub_leaves in already_optimized:
continue
# else remove the branches, keeping track of current cost
if minimize == 'flops':
current_cost = tree.get_flops(sub_root) // 2
elif minimize == 'write':
current_cost = tree.get_size(sub_root)
elif minimize == 'size':
current_cost = tree.get_size(sub_root)
elif minimize == 'combo':
current_cost = (
tree.get_flops(sub_root) // 2 +
500 * tree.get_size(sub_root))
for node in sub_branches:
if minimize == 'flops':
current_cost += tree.get_flops(node) // 2
elif minimize == 'write':
current_cost += tree.get_size(node)
elif minimize == 'size':
current_cost = max(
current_cost, tree.get_size(node))
elif minimize == 'combo':
current_cost += (
tree.get_flops(node) // 2 +
500 * tree.get_size(node))
tree.remove_node(node)
# make the optimizer more efficient by supplying accurate cap
optimizer.cost_cap = current_cost
# and reoptimize the leaves
tree.contract(sub_leaves, optimize=optimizer)
already_optimized.add(sub_leaves)
r += 1
if progbar:
pbar.update()
pbar.set_description(_describe_tree(tree))
# if we have reconfigured simply re-add all candidates
candidates, weights = tree.calc_subtree_candidates(
pwr=weight_pwr, what=weight_what)
finally:
if progbar:
pbar.close()
return tree
subtree_reconfigure_ = functools.partialmethod(
subtree_reconfigure, inplace=True)
def subtree_reconfigure_forest(
self,
num_trees=8,
num_restarts=10,
restart_fraction=0.5,
subtree_maxiter=100,
subtree_size=10,
subtree_search=('random', 'bfs'),
subtree_select=('random',),
subtree_weight_what=('flops', 'size'),
subtree_weight_pwr=(2,),
parallel='auto',
parallel_maxiter_steps=4,
minimize='flops',
progbar=False,
inplace=False,
):
"""'Forested' version of ``subtree_reconfigure`` which is more
explorative and can be parallelized. It stochastically generates
a 'forest' reconfigured trees, then only keeps some fraction of these
to generate the next forest.
Parameters
----------
num_trees : int, optional
The number of trees to reconfigure at each stage.
num_restarts : int, optional
The number of times to halt, prune and then restart the
tree reconfigurations.
restart_fraction : float, optional
The fraction of trees to keep at each stage and generate the next
forest from.
subtree_maxiter : int, optional
Number of subtree reconfigurations per step.
``num_restarts * subtree_maxiter`` is the max number of total
subtree reconfigurations for the final tree produced.
subtree_size : int, optional
The size of subtrees to search for and reconfigure.
subtree_search : tuple[{'random', 'bfs', 'dfs'}], optional
Tuple of options for the ``search`` kwarg of
:meth:`ContractionTree.subtree_reconfigure` to randomly sample.
subtree_select : tuple[{'random', 'max', 'min'}], optional
Tuple of options for the ``select`` kwarg of
:meth:`ContractionTree.subtree_reconfigure` to randomly sample.
subtree_weight_what : tuple[{'flops', 'size'}], optional
Tuple of options for the ``weight_what`` kwarg of
:meth:`ContractionTree.subtree_reconfigure` to randomly sample.
subtree_weight_pwr : tuple[int], optional
Tuple of options for the ``weight_pwr`` kwarg of
:meth:`ContractionTree.subtree_reconfigure` to randomly sample.
parallel : 'auto', False, True, int, or distributed.Client
Whether to parallelize the search.
parallel_maxiter_steps : int, optional
If parallelizing, how many steps to break each reconfiguration into
in order to evenly saturate many processes.
minimize : {'flops', 'size'}, optional
Whether to minimize the total flops or maximum size of the
contraction tree.
progbar : bool, optional
Whether to show live progress.
inplace : bool, optional
Whether to perform the subtree reconfiguration inplace.
Returns
-------
ContractionTree
"""
tree = self if inplace else self.copy()
# candidate trees
num_keep = max(1, int(num_trees * restart_fraction))
# how to rank the trees
score = get_score_fn(minimize)
# set up the initial 'forest' and parallel machinery
pool = parse_parallel_arg(parallel)
if pool is not None:
try:
from dask.distributed import secede, rejoin
secede() # for nested parallelism
is_dask_worker = True
except (ImportError, ValueError):
is_dask_worker = False
# store the trees as futures for the entire process
forest = [pool.scatter(tree)]
maxiter = subtree_maxiter // parallel_maxiter_steps
else:
forest = [tree]
maxiter = subtree_maxiter
if progbar:
import tqdm
pbar = tqdm.tqdm(total=num_restarts)
pbar.set_description(_describe_tree(tree))
try:
for _ in range(num_restarts):
# on the next round take only the best trees
forest = itertools.cycle(forest[:num_keep])
# select some random configurations
saplings = [{
'tree': next(forest),
'maxiter': maxiter,
'minimize': minimize,
'subtree_size': subtree_size,
'subtree_search': random.choice(subtree_search),
'select': random.choice(subtree_select),
'weight_pwr': random.choice(subtree_weight_pwr),
'weight_what': random.choice(subtree_weight_what),
} for _ in range(num_trees)]
if pool is None:
forest = [_reconfigure_tree(**s) for s in saplings]
res = [{'tree': t, **_get_tree_info(t)} for t in forest]
else:
# submit in smaller steps to saturate processes
for _ in range(parallel_maxiter_steps):
for s in saplings:
s['tree'] = pool.submit(
_reconfigure_tree, pure=False, **s)
# compute scores remotely then gather
forest_futures = [s['tree'] for s in saplings]
res_futures = [pool.submit(_get_tree_info, t, pure=False)
| |
0, 0, 1, -360, 360],
[24, 319, 0.007, 0.068, 0.134, 9900, 0, 0, 0, 0, 1, -360, 360],
[25, 26, 0.036, 0.071, 0.034, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 27, 0.045, 0.12, 0.065, 9900, 0, 0, 0, 0, 1, -360, 360],
[26, 320, 0.043, 0.13, 0.014, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 34, 0, 0.063, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 38, 0.0025, 0.012, 0.013, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 40, 0.006, 0.029, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[33, 41, 0.007, 0.043, 0.026, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 42, 0.001, 0.008, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 72, 0.012, 0.06, 0.008, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 76, 0.006, 0.014, 0.002, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 77, 0.01, 0.029, 0.003, 9900, 0, 0, 0, 0, 1, -360, 360],
[36, 88, 0.004, 0.027, 0.043, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 38, 0.008, 0.047, 0.008, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 40, 0.022, 0.064, 0.007, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 41, 0.01, 0.036, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 49, 0.017, 0.081, 0.048, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 89, 0.102, 0.254, 0.033, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 90, 0.047, 0.127, 0.016, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 41, 0.008, 0.037, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 43, 0.032, 0.087, 0.04, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 42, 0.0006, 0.0064, 0.404, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 48, 0.026, 0.154, 0.022, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 42, 0, 0.029, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 49, 0.065, 0.191, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 51, 0.031, 0.089, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[42, 46, 0.002, 0.014, 0.806, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 44, 0.026, 0.072, 0.035, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 48, 0.095, 0.262, 0.032, 9900, 0, 0, 0, 0, 1, -360, 360],
[43, 53, 0.013, 0.039, 0.016, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 47, 0.027, 0.084, 0.039, 9900, 0, 0, 0, 0, 1, -360, 360],
[44, 54, 0.028, 0.084, 0.037, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 60, 0.007, 0.041, 0.312, 9900, 0, 0, 0, 0, 1, -360, 360],
[45, 74, 0.009, 0.054, 0.411, 9900, 0, 0, 0, 0, 1, -360, 360],
[46, 81, 0.005, 0.042, 0.69, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 73, 0.052, 0.145, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 113, 0.043, 0.118, 0.013, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 107, 0.025, 0.062, 0.007, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 51, 0.031, 0.094, 0.043, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 52, 0.037, 0.109, 0.049, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 55, 0.027, 0.08, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.025, 0.073, 0.035, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.035, 0.103, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 57, 0.065, 0.169, 0.082, 9900, 0, 0, 0, 0, 1, -360, 360],
[57, 58, 0.046, 0.08, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[57, 63, 0.159, 0.537, 0.071, 9900, 0, 0, 0, 0, 1, -360, 360],
[58, 59, 0.009, 0.026, 0.005, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.002, 0.013, 0.015, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.009, 0.065, 0.485, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 64, 0.016, 0.105, 0.203, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 144, 0.001, 0.007, 0.013, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 526, 0.0265, 0.172, 0.026, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 211, 0.051, 0.232, 0.028, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 79, 0.051, 0.157, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.032, 0.1, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 528, 0.02, 0.1234, 0.028, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.036, 0.131, 0.068, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.034, 0.099, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[72, 77, 0.018, 0.087, 0.011, 9900, 0, 0, 0, 0, 1, -360, 360],
[72, 531, 0.0256, 0.193, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[73, 76, 0.021, 0.057, 0.03, 9900, 0, 0, 0, 0, 1, -360, 360],
[73, 79, 0.018, 0.052, 0.018, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 88, 0.004, 0.027, 0.05, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 562, 0.0286, 0.2013, 0.379, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.016, 0.043, 0.004, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.001, 0.006, 0.007, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.014, 0.07, 0.038, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 552, 0.0891, 0.2676, 0.029, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 609, 0.0782, 0.2127, 0.022, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.006, 0.022, 0.011, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 84, 0, 0.036, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 211, 0.099, 0.375, 0.051, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 211, 0.022, 0.107, 0.058, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 194, 0.0035, 0.033, 0.53, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 195, 0.0035, 0.033, 0.53, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.008, 0.064, 0.128, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.012, 0.093, 0.183, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 323, 0.006, 0.048, 0.092, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 91, 0.047, 0.119, 0.014, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 92, 0.032, 0.174, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 94, 0.1, 0.253, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 97, 0.022, 0.077, 0.039, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 103, 0.019, 0.144, 0.017, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 105, 0.017, 0.092, 0.012, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 97, 0.278, 0.427, 0.043, 9900, 0, 0, 0, 0, 1, -360, 360],
[97, 100, 0.022, 0.053, 0.007, 9900, 0, 0, 0, 0, 1, -360, 360],
[97, 102, 0.038, 0.092, 0.012, 9900, 0, 0, 0, 0, 1, -360, 360],
[97, 103, 0.048, 0.122, 0.015, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.024, 0.064, 0.007, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 102, 0.034, 0.121, 0.015, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 107, 0.053, 0.135, 0.017, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 108, 0.002, 0.004, 0.002, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 109, 0.045, 0.354, 0.044, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 110, 0.05, 0.174, 0.022, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 102, 0.016, 0.038, 0.004, 9900, 0, 0, 0, 0, 1, -360, 360],
[102, 104, 0.043, 0.064, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.019, 0.062, 0.008, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 108, 0.076, 0.13, 0.044, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, | |
default direction
if type_ == '@direction' and type_ in ctx:
rval = ctx[type_]
# get specific entry information
if key in ctx['mappings']:
entry = ctx['mappings'][key]
if entry is None:
return None
# return whole entry
if type_ is None:
rval = entry
# return entry value for type
elif type_ in entry:
rval = entry[type_]
return rval
@staticmethod
def parse_nquads(input_):
"""
Parses RDF in the form of N-Quads.
:param input_: the N-Quads input to parse.
:return: an RDF dataset.
"""
# define partial regexes
iri = '(?:<([^:]+:[^>]*)>)'
bnode = '(_:(?:[A-Za-z][A-Za-z0-9]*))'
plain = '"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"'
datatype = '(?:\\^\\^' + iri + ')'
language = '(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*))'
literal = '(?:' + plain + '(?:' + datatype + '|' + language + ')?)'
ws = '[ \\t]+'
wso = '[ \\t]*'
eoln = r'(?:\r\n)|(?:\n)|(?:\r)'
empty = r'^' + wso + '$'
# define quad part regexes
subject = '(?:' + iri + '|' + bnode + ')' + ws
property = iri + ws
object = '(?:' + iri + '|' + bnode + '|' + literal + ')' + wso
graph = '(?:\\.|(?:(?:' + iri + '|' + bnode + ')' + wso + '\\.))'
# Note: Notice that the graph position does not include literals
# even though they are specified as a possible value in the
# N-Quads note (http://sw.deri.org/2008/07/n-quads/). This is
# intentional, as literals in that position are not supported by the
# RDF data model or the JSON-LD data model.
# See: https://github.com/digitalbazaar/pyld/pull/19
# full quad regex
quad = r'^' + wso + subject + property + object + graph + wso + '$'
# build RDF dataset
dataset = {}
# split N-Quad input into lines
lines = re.split(eoln, input_)
line_number = 0
for line in lines:
line_number += 1
# skip empty lines
if re.search(empty, line) is not None:
continue
# parse quad
match = re.search(quad, line)
if match is None:
raise JsonLdError(
'Error while parsing N-Quads invalid quad.',
'jsonld.ParseError', {'line': line_number})
match = match.groups()
# create RDF triple
triple = {'subject': {}, 'predicate': {}, 'object': {}}
# get subject
if match[0] is not None:
triple['subject'] = {'type': 'IRI', 'value': match[0]}
else:
triple['subject'] = {'type': 'blank node', 'value': match[1]}
# get predicate
triple['predicate'] = {'type': 'IRI', 'value': match[2]}
# get object
if match[3] is not None:
triple['object'] = {'type': 'IRI', 'value': match[3]}
elif match[4] is not None:
triple['object'] = {'type': 'blank node', 'value': match[4]}
else:
triple['object'] = {'type': 'literal'}
unescaped = (
match[5]
.replace('\\"', '\"')
.replace('\\t', '\t')
.replace('\\n', '\n')
.replace('\\r', '\r')
.replace('\\\\', '\\'))
if match[6] is not None:
triple['object']['datatype'] = match[6]
elif match[7] is not None:
triple['object']['datatype'] = RDF_LANGSTRING
triple['object']['language'] = match[7]
else:
triple['object']['datatype'] = XSD_STRING
triple['object']['value'] = unescaped
# get graph name ('@default' is used for the default graph)
name = '@default'
if match[8] is not None:
name = match[8]
elif match[9] is not None:
name = match[9]
# initialize graph in dataset
if name not in dataset:
dataset[name] = [triple]
# add triple if unique to its graph
else:
unique = True
triples = dataset[name]
for t in dataset[name]:
if JsonLdProcessor._compare_rdf_triples(t, triple):
unique = False
break
if unique:
triples.append(triple)
return dataset
@staticmethod
def to_nquads(dataset):
"""
Converts an RDF dataset to N-Quads.
:param dataset: the RDF dataset to convert.
:return: the N-Quads string.
"""
quads = []
for graph_name, triples in dataset.items():
for triple in triples:
if graph_name == '@default':
graph_name = None
quads.append(JsonLdProcessor.to_nquad(triple, graph_name))
quads.sort()
return ''.join(quads)
@staticmethod
def to_nquad(triple, graph_name=None):
"""
Converts an RDF triple and graph name to an N-Quad string (a single
quad).
:param triple: the RDF triple or quad to convert (a triple or quad
may be passed, if a triple is passed then `graph_name` should be
given to specify the name of the graph the triple is in, `None`
for the default graph).
:param graph_name: the name of the graph containing the triple, None
for the default graph.
:return: the N-Quad string.
"""
s = triple['subject']
p = triple['predicate']
o = triple['object']
g = triple.get('name', {'value': graph_name})['value']
quad = ''
# subject is an IRI
if s['type'] == 'IRI':
quad += '<' + s['value'] + '>'
else:
quad += s['value']
quad += ' '
# property is an IRI
if p['type'] == 'IRI':
quad += '<' + p['value'] + '>'
else:
quad += p['value']
quad += ' '
# object is IRI, bnode, or literal
if o['type'] == 'IRI':
quad += '<' + o['value'] + '>'
elif(o['type'] == 'blank node'):
quad += o['value']
else:
escaped = (
o['value']
.replace('\\', '\\\\')
.replace('\t', '\\t')
.replace('\n', '\\n')
.replace('\r', '\\r')
.replace('\"', '\\"'))
quad += '"' + escaped + '"'
if o['datatype'] == RDF_LANGSTRING:
if o['language']:
quad += '@' + o['language']
elif o['datatype'] != XSD_STRING:
quad += '^^<' + o['datatype'] + '>'
# graph
if g is not None:
if not g.startswith('_:'):
quad += ' <' + g + '>'
else:
quad += ' ' + g
quad += ' .\n'
return quad
@staticmethod
def arrayify(value):
"""
If value is an array, returns value, otherwise returns an array
containing value as the only element.
:param value: the value.
:return: an array.
"""
return value if _is_array(value) else [value]
@staticmethod
def _compare_rdf_triples(t1, t2):
"""
Compares two RDF triples for equality.
:param t1: the first triple.
:param t2: the second triple.
:return: True if the triples are the same, False if not.
"""
for attr in ['subject', 'predicate', 'object']:
if(t1[attr]['type'] != t2[attr]['type'] or
t1[attr]['value'] != t2[attr]['value']):
return False
if t1['object'].get('language') != t2['object'].get('language'):
return False
if t1['object'].get('datatype') != t2['object'].get('datatype'):
return False
return True
def _compact(self, active_ctx, active_property, element, options):
"""
Recursively compacts an element using the given active context. All
values must be in expanded form before this method is called.
:param active_ctx: the active context to use.
:param active_property: the compacted property with the element to
compact, None for none.
:param element: the element to compact.
:param options: the compaction options.
:return: the compacted value.
"""
# recursively compact array
if _is_array(element):
rval = []
for e in element:
# compact, dropping any None values
e = self._compact(active_ctx, active_property, e, options)
if e is not None:
rval.append(e)
if options['compactArrays'] and len(rval) == 1:
# use single element if no container is specified
container = JsonLdProcessor.arrayify(
JsonLdProcessor.get_context_value(
active_ctx, active_property, '@container'))
if not container:
rval = rval[0]
return rval
# use any scoped context on active_property
ctx = JsonLdProcessor.get_context_value(
active_ctx, active_property, '@context')
if ctx is not None:
active_ctx = self._process_context(
active_ctx, ctx, options,
propagate=True,
override_protected=True)
# recursively compact object
if _is_object(element):
if(options['link'] and '@id' in element and
element['@id'] in options['link']):
# check for a linked element to reuse
linked = options['link'][element['@id']]
for link in linked:
if link['expanded'] == element:
return link['compacted']
# do value compaction on @values and subject references
if _is_value(element) or _is_subject_reference(element):
rval = self._compact_value(
active_ctx, active_property, element, options)
if options['link'] and _is_subject_reference(element):
# store linked element
options['link'].setdefault(element['@id'], []).append(
{'expanded': element, 'compacted': rval})
return rval
# if expanded property is @list and we're contained within a list
# container, recursively compact this item to an array
if _is_list(element):
container = JsonLdProcessor.arrayify(
JsonLdProcessor.get_context_value(
active_ctx, active_property, '@container'))
if '@list' in container:
return self._compact(active_ctx, active_property, element['@list'], options)
# FIXME: avoid misuse of active property as an expanded property?
inside_reverse = (active_property == '@reverse')
rval = {}
# original context before applying property-scoped and local contexts
input_ctx = active_ctx
# revert to previous context, if there is one,
# and element is not a value object or a node reference
if not _is_value(element) and not _is_subject_reference(element):
active_ctx = self._revert_to_previous_context(active_ctx)
property_scoped_ctx = JsonLdProcessor.get_context_value(
input_ctx, active_property, '@context')
if property_scoped_ctx is not None:
active_ctx = self._process_context(
active_ctx, property_scoped_ctx, options,
propagate=True,
override_protected=True)
if options['link'] and '@id' in element:
# store linked element
options['link'].setdefault(element['@id'], []).append(
{'expanded': element, 'compacted': rval})
# find all type-scoped contexts based on current context, prior to updating it
type_ctx = active_ctx
# | |
entry is selected in the list of commands """
Model, TreeIter = Selection.get_selected()
if TreeIter != None:
CmdStartMark = self.CLITextbuffer.get_mark("CmdId")
Start = self.CLITextbuffer.get_iter_at_mark(CmdStartMark)
end = self.CLITextbuffer.get_end_iter()
self.CLITextbuffer.delete(Start, end) # replace what was typed by the command
self.CLITextbuffer.insert(Start, Model[TreeIter][0])
self.UpdtateAssistantPopover("")
def DisplayAssistantPopover(self, text):
""" Create and show the assistant popover """
if self.CLIManager.GetHideSyntaxAssistantParam() is False:
self.Popover = Gtk.Popover.new(self.CLITextview)
self.PopoverLabel = Gtk.Label()
self.PopoverLabel.set_markup(text)
self.Popover.add(self.PopoverLabel)
Pos = self.CLITextbuffer.get_end_iter()
Location = self.CLITextview.get_iter_location(Pos)
WinLocation = Location
WinLocation.x, WinLocation.y = self.CLITextview.buffer_to_window_coords(Gtk.TextWindowType.TEXT,Location.x,Location.y)
self.Popover.set_pointing_to(WinLocation)
self.Popover.set_modal(False) # The popover doesn't take user interaction
self.Popover.set_position(Gtk.PositionType.BOTTOM)
self.Popover.show_all()
self.AssistantPopoverActive = True;
def DestroyAssistantPopover(self):
""" Destroy th assistant pover and reset the associated flag """
if self.AssistantPopoverActive == True:
self.Popover.destroy()
self.AssistantPopoverActive = False
def UpdtateAssistantPopover(self, text):
""" Update the popover content and pointing tip position """
if self.AssistantPopoverActive == True:
if text != "": # Label not updated when empty string
self.PopoverLabel.set_markup(text)
Pos = self.CLITextbuffer.get_end_iter()
Location = self.CLITextview.get_iter_location(Pos)
# convert the buffer coords to window coords so the popover is always visible
# even when the window is scrolled
WinLocation = Location
WinLocation.x, WinLocation.y = self.CLITextview.buffer_to_window_coords(Gtk.TextWindowType.TEXT,Location.x,Location.y)
# Update pointing tip position
self.Popover.set_pointing_to(WinLocation)
def IsAssistantPopoverActive(self):
""" Tells if the assistant popover is currently displayed """
return self.AssistantPopoverActive
def IsOneSuggestion(self):
""" Tells if there is only one command corresponding to what is typed """
text = self.PopoverLabel.get_text()
# Check if there's more than one line in the label
if "\n" in text:
return False
else:
return True
def AddToHistory(self, command):
""" Add a command to history. Called after the user sent a command """
if command != "": # Do not insert blank lines
self.CLIHistory.append(command)
self.CLIHistoryOffset = len(self.CLIHistory) # Update offset value
def HistoryStepBackward(self):
""" Make a step backward in the history and display the entry """
if self.CLIHistoryOffset > 0:
self.CLIHistoryOffset -= 1
CmdStartMark = self.CLITextbuffer.get_mark("CmdId")
Start = self.CLITextbuffer.get_iter_at_mark(CmdStartMark)
end = self.CLITextbuffer.get_end_iter()
self.CLITextbuffer.delete(Start, end) # what was typed will be replaced by history entry
self.CLITextbuffer.insert(Start, self.CLIHistory[(self.CLIHistoryOffset)]) # Insert entry
self.UpdtateAssistantPopover("") # Update assistant popover if displayed
def HistoryStepForward(self):
""" Make a step forward in the history and display the entry """
if self.CLIHistoryOffset <= (len(self.CLIHistory) - 1):
self.CLIHistoryOffset += 1
if self.CLIHistoryOffset == len(self.CLIHistory): # Offset incremented just before
Entry = "" # Not an entry in the history
else:
Entry = self.CLIHistory[self.CLIHistoryOffset]
else: # Do not increment offset
Entry = ""
CmdStartMark = self.CLITextbuffer.get_mark("CmdId")
Start = self.CLITextbuffer.get_iter_at_mark(CmdStartMark)
end = self.CLITextbuffer.get_end_iter()
self.CLITextbuffer.delete(Start, end) # replace what was typed by the command
self.CLITextbuffer.insert(Start, Entry)
if Entry == "":
self.DestroyAssistantPopover() # Destroy popover if any
else:
self.UpdtateAssistantPopover("") # Update Popover
def AddConnectionsMenuActions(self, ActionGroup):
ActionGroup.add_actions([
("ConnectionsMenu", None, "Connections"),
("SelectConnection", None, "Select", None, None,
None),
("EditConnection", None, "Edit", None, None,
self.OnMenuEditConnections),
("Connect", Gtk.STOCK_CONNECT, "Connect", None, "Connect",
self.OnMenuConnect),
("Disconnect", Gtk.STOCK_DISCONNECT, "Disconnect", None, "Disconnect",
self.OnMenuDisconnect),
("ClearConsole", Gtk.STOCK_CLEAR, "Clear", None, "Clear console",
self.OnMenuClear)])
#Get default connection type
if self.CLIManager.ConManager.GetConnectionType() == "UDP":
DefaultType = 1
elif self.CLIManager.ConManager.GetConnectionType() == "TCP":
DefaultType = 2
ActionGroup.add_radio_actions([
("SelectUDP", None, "UDP", None, None, 1),
("SelectTCP", None, "TCP", None, None, 2)
], DefaultType, self.OnMenuConnectionTypeChanged)
def AddFileMenuActions(self, ActionGroup):
ActionGroup.add_actions([
("FileMenu", None, "File"),
("FileOpen", Gtk.STOCK_OPEN, "Open", None, "Open file (.set)",
self.OnMenuImportFromSet),
("ImportFromSource", Gtk.STOCK_CONVERT, "Import from source", None, None,
self.OnMenuImportFromSource),
("SaveAs", Gtk.STOCK_FLOPPY, "Save As", None, None,
self.OnMenuSaveAs)])
FilequitAction = Gtk.Action("FileQuit", None, None, Gtk.STOCK_QUIT)
FilequitAction.connect("activate", self.OnMenuFileQuit)
ActionGroup.add_action(FilequitAction)
def AddOptionsMenuActions(self, ActionGroup):
ActionGroup.add_actions([
("OptionsMenu", None, "Options"),
("CLIColor", None, "CLI color"),
("CLIFontSelection", None, "CLI font", None, None, self.OnOptionSelectFont) ])
EscapeChar = Gtk.ToggleAction("HideEscapeChar", "Hide escape sequences", \
None, None)
EscapeChar.connect("toggled", self.OnOptionEscapeCharToggled)
EscapeChar.set_active(self.CLIManager.GetHideEscapeParam())
ActionGroup.add_action(EscapeChar)
SyntaxAssistant = Gtk.ToggleAction("HideAssistantPopover", "Hide syntax assistant", \
None, None)
SyntaxAssistant.connect("toggled", self.OnOptionSyntaxAssistantToggled)
SyntaxAssistant.set_active(self.CLIManager.GetHideSyntaxAssistantParam())
ActionGroup.add_action(SyntaxAssistant)
ActionGroup.add_actions([
("ColorNone", None, "None", None, None, self.OnOptionSelectColor),
("ColorSea", None, "Sea", None, None, self.OnOptionSelectColor),
("ColorConsole", None, "Console", None, None, self.OnOptionSelectColor) ])
def OnMenuFileQuit(self, widget):
""" Called when the cross is clicked or quit from file menu """
Gtk.main_quit()
def OnOptionSelectColor(self, widget):
""" Called when the user select a color scheme for the CLI """
self.CLIManager.SetCLIColorConfig(widget.get_name())
self.SetCLIColor(widget.get_name())
def OnOptionSelectFont(self,widget):
""" Called when the user select a font for the CLI """
FontDialog = Gtk.FontChooserDialog("CLI font", self)
FontDialog.set_font_desc(self.CLIFont)
FontDialog.show()
Response = FontDialog.run()
if Response == Gtk.ResponseType.OK:
self.CLIFont = FontDialog.get_font_desc()
FontName = FontDialog.get_font()
self.CLIManager.SetCLIFontConfig(FontName) # Save as user preference
self.CLITextview.override_font(self.CLIFont) # Update CLI font
FontDialog.destroy()
def ParseColor(self, color):
""" Parse colors to set Bg and Fg colors according to selected color scheme """
_color = Gdk.RGBA()
_color.parse(color)
return _color
def SetCLIColor(self, ColorStyle):
""" Called when the CLI color must be changed """
if ColorStyle == "ColorNone":
self.CLITextview.override_background_color(Gtk.StateFlags.NORMAL, self.ParseColor("white"))
self.CLITextview.override_color(Gtk.StateFlags.NORMAL, self.ParseColor("black"))
elif ColorStyle == "ColorSea":
self.CLITextview.override_background_color(Gtk.StateFlags.NORMAL, self.ParseColor("#123A4A"))
self.CLITextview.override_color(Gtk.StateFlags.NORMAL, self.ParseColor("turquoise2"))
elif ColorStyle == "ColorConsole":
self.CLITextview.override_background_color(Gtk.StateFlags.NORMAL, self.ParseColor("black"))
self.CLITextview.override_color(Gtk.StateFlags.NORMAL, self.ParseColor("white"))
def OnOptionEscapeCharToggled(self, widget):
""" Called when the hide escape char option state is changed """
if widget.get_active():
self.CLIManager.SetHideEscapeParam(True)
self.SetVisibleColumn(True)
else:
self.CLIManager.SetHideEscapeParam(False)
self.SetVisibleColumn(False)
def SetVisibleColumn(self, Param):
""" Set the visible help column according to the option state """
if Param is True:
#The column without escape chars (3) is set to visible whereas the other is not
self.CmdSetTreeview.get_column(2).set_visible(False)
self.CmdSetTreeview.get_column(3).set_visible(True)
else:
#The column with escape chars (2) is set to visible whereas the other is not
self.CmdSetTreeview.get_column(2).set_visible(True)
self.CmdSetTreeview.get_column(3).set_visible(False)
def OnOptionSyntaxAssistantToggled(self,widget):
""" Called when the syntax assistant popover option state is changed """
if widget.get_active():
self.CLIManager.SetHideSyntaxAssistantParam(True)
self.DestroyAssistantPopover() # Option disabled so the popover should be destroyed immediately
else:
self.CLIManager.SetHideSyntaxAssistantParam(False)
def OnMenuClear(self, widget):
""" Called when the clear button from the toolbar is pressed """
# Empty the whole textview
Start, End = self.CLITextbuffer.get_bounds()
self.CLITextbuffer.delete(Start, End)
self.CLITextbuffer.insert_at_cursor("> ",2)
# Update the mark
self.CLITextbuffer.delete_mark_by_name("CmdId")
self.CLITextbuffer.create_mark("CmdId", self.CLITextbuffer.get_end_iter(), True)
def OnMenuConnectionTypeChanged(self, widget, current):
""" Called when the connection type to be used has changed """
if current.get_name() == "SelectUDP":
self.CLIManager.ConManager.SetConnectionType("UDP")
elif current.get_name() == "SelectTCP":
self.CLIManager.ConManager.SetConnectionType("TCP")
def DataHandler(self, data):
""" Callback to handle data received from the socket """
CmdStartMark = self.CLITextbuffer.get_mark("CmdId")
Start = self.CLITextbuffer.get_iter_at_mark(CmdStartMark)
end = self.CLITextbuffer.get_end_iter()
self.CLITextbuffer.delete(Start, end)
self.CLITextbuffer.insert(Start, data)
self.CLITextbuffer.insert_at_cursor("\n> ",3)
# Update the mark
self.CLITextbuffer.delete_mark_by_name("CmdId")
self.CLITextbuffer.create_mark("CmdId", self.CLITextbuffer.get_end_iter(), True)
self.CLITextview.scroll_to_mark(self.CLITextbuffer.get_insert(),0.0,True,0.5,0.5)
def OnMenuConnect(self, widget):
""" Called when the user ask for opening the port/establish connection """
Error = self.CLIManager.ConManager.Connect(self.DataHandler)
self.SetConnectionStatusInTitle()
self.AppStatusbar.Connect(Error) #Update the status bar
def OnMenuDisconnect(self, widget):
""" Called when the user ask for closing the socket/connection """
self.CLIManager.ConManager.Disconnect()
self.SetConnectionStatusInTitle()
self.AppStatusbar.Disconnect() #Update status bar
def OnMenuEditConnections(self, widget):
""" build the dialog for editing connections parameters """
Dialog = ConnectionsDialog(self)
Dialog.show()
Response = Dialog.run()
if Response == Gtk.ResponseType.OK:
if Dialog.HasConfigBeenModified():
Dialog.SaveConfig() #Save modifications
#No action if cancel button is pressed
Dialog.destroy()
def OnMenuImportFromSet(self, widget):
""" Called when the user request to open a file """
self.ImportFrom('List')
def OnMenuImportFromSource(self, widget):
""" Called when the user request to import a file from sources """
self.ImportFrom('Source')
def ImportFrom(self, FileType):
""" Called when a list of commands should be loaded to the gtk liststore """
if FileType == 'Source':
DialogTitle = "Select source file"
# Filters for the file chooser
FilterMain = Gtk.FileFilter()
FilterMain.set_name("C files")
FilterMain.add_pattern("*.c")
FilterAll = Gtk.FileFilter()
FilterAll.set_name("All")
FilterAll.add_pattern("*")
elif FileType == 'List':
DialogTitle = "Select list of commands file (.set)"
# Filters for the file chooser
FilterMain = Gtk.FileFilter()
FilterMain.set_name(".set")
FilterMain.add_pattern("*.set")
FilterAll = Gtk.FileFilter()
FilterAll.set_name("All")
FilterAll.add_pattern("*")
Dialog = Gtk.FileChooserDialog(DialogTitle, self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
Dialog.add_filter(FilterMain)
Dialog.add_filter(FilterAll)
Response = Dialog.run()
Filename = Dialog.get_filename() # Filename is nonetype when cancel button pressed
Dialog.destroy() # Dialog not needed anymore
if Response == Gtk.ResponseType.OK:
#Check if a set is already loaded
if self.CLIManager.IsCommandsSetLoaded():
AddToListDialog = AppendToListDialog(self)
AddToListDialog.show()
AppendResponse = AddToListDialog.run()
AddToListDialog.destroy() # Dialog not needed anymore
if AppendResponse == Gtk.ResponseType.CANCEL:
return # Import cancelled
if AppendResponse == Gtk.ResponseType.NO:
#liststore should be emptied first
self.CommandsListstore.clear()
self.CLIManager.SetCommandsSetLoaded(False)
#parse the file and load the generated list
Parser = CmdParser()
Parser.CmdParse(Filename, FileType, self.CommandsListstore)
self.CLIManager.SetHideEscapeCharColumn(self.CommandsListstore)
self.SetVisibleColumn(self.CLIManager.GetHideEscapeParam())
self.CLIManager.SetCommandsSetLoaded(True)
self.AppStatusbar.FileImported(Filename)
def OnMenuSaveAs(self,widget):
""" Called when the user request to save the commands currently loaded """
if self.CLIManager.IsCommandsSetLoaded():
Dialog = Gtk.FileChooserDialog("Save as", self,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK))
FilterAll = Gtk.FileFilter()
FilterAll.set_name("All")
FilterAll.add_pattern("*")
Dialog.add_filter(FilterAll)
Response = Dialog.run()
if Response == Gtk.ResponseType.OK:
Filename = Dialog.get_filename()
if not Filename.endswith ('.set'):
Filename += '.set'
self.CLIManager.GenerateCommandsSetFile(Filename, self.CommandsListstore )
self.AppStatusbar.FileSaved(Filename)
Dialog.destroy()
else:
Dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL, "Error")
Dialog.format_secondary_text("A set of commands must be loaded")
Dialog.run()
Dialog.destroy()
def CreateUIManager(self):
UIManager = Gtk.UIManager()
UIManager.add_ui_from_string(UI_MENU)
| |
if conp else 'P'
# get descriptor
name_desc = ''
other_args = {'conp': conp} if test_variable else {}
tester = dRopidT if not test_variable else dRopidE
if rxn_type == reaction_type.plog:
name_desc = '_plog'
tester = dRopi_plog_dT if not test_variable else dRopi_plog_dE
other_args['maxP'] = rate_info['plog']['max_P']
elif rxn_type == reaction_type.cheb:
name_desc = '_cheb'
tester = dRopi_cheb_dT if not test_variable else dRopi_cheb_dE
other_args['maxP'] = np.max(rate_info['cheb']['num_P'])
other_args['maxT'] = np.max(rate_info['cheb']['num_T'])
rtol = 1e-3
atol = 1e-7
def _small_compare(kc, our_vals, ref_vals, mask):
# get the condition extractor
extractor, cond, x, y = self.__get_comp_extractor(kc, mask)
if extractor is None:
# no need to test
return True
# find where there isn't a match
outv = extractor(our_vals, (cond, x, y))
refv = extractor(ref_vals, (cond, x, y), is_answer=True)
check = np.where(
np.logical_not(np.isclose(outv, refv, rtol=rtol)))[0]
correct = True
if check.size:
# check that our values are zero (which is correct)
correct = np.all(outv[check] == 0)
# and that the reference values are "small"
correct &= np.all(np.abs(refv[check]) <= atol)
return correct
# get compare mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dRopi{}_d{}'.format(name_desc, var_name),
comp.ref_answer, check=False,
strict_name_match=True,
allow_skip=test_variable,
input_mask=['kf', 'kr', 'conc'] + input_mask,
**args),
kernel_call('dRopi{}_d{}_ns'.format(name_desc, var_name),
comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True,
rtol=rtol, atol=atol, other_compare=_small_compare,
input_mask=['db', 'rop_rev', 'rop_fwd'],
**args)]
return self._generic_jac_tester(tester, kc, **other_args)
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: not (isinstance(rxn, ct.PlogReaction)
or isinstance(rxn, ct.ChebyshevReaction))),
2: np.array([0])
})
def test_dRopidT(self):
self.__run_ropi_test()
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.PlogReaction)),
2: np.array([0])
})
def test_dRopi_plog_dT(self):
self.__run_ropi_test(reaction_type.plog)
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.ChebyshevReaction)),
2: np.array([0])
})
def test_dRopi_cheb_dT(self):
self.__run_ropi_test(reaction_type.cheb)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: np.where(self.store.concs[:, -1] != 0)[0],
1: lambda self: self.__get_check(
lambda rxn: not (isinstance(rxn, ct.PlogReaction)
or isinstance(rxn, ct.ChebyshevReaction))),
2: np.array([1])
})
def test_dRopi_dE(self):
self.__run_ropi_test(test_variable=True, conp=True)
self.__run_ropi_test(test_variable=True, conp=False)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: np.where(self.store.concs[:, -1] != 0)[0],
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.PlogReaction)),
2: np.array([1])
})
def test_dRopi_plog_dE(self):
self.__run_ropi_test(reaction_type.plog, True, conp=True)
self.__run_ropi_test(reaction_type.plog, True, conp=False)
@attr('long')
@with_check_inds(check_inds={
# find states where the last species conc should be zero, as this
# can cause some problems in the FD Jac
0: lambda self: np.where(self.store.concs[:, -1] != 0)[0],
1: lambda self: self.__get_check(
lambda rxn: isinstance(rxn, ct.ChebyshevReaction)),
2: np.array([1])
})
def test_dRopi_cheb_dE(self):
self.__run_ropi_test(reaction_type.cheb, True, conp=True)
self.__run_ropi_test(reaction_type.cheb, True, conp=False)
def __get_non_ad_params(self, conp):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, conp, self.store.test_size)
return namestore, rate_info, opts
@with_check_inds(check_inds={
1: np.array([0]),
2: np.array([0]),
}, custom_checks={
# find NaN's
0: lambda self, conp: np.setdiff1d(
np.arange(self.store.test_size), np.unique(np.where(np.isnan(
self.__get_full_jac(conp)))[0]), assume_unique=True)
}
)
def __run_dtdot_dt(self, conp):
# get the full jacobian
fd_jac = self.__get_full_jac(conp)
spec_heat = self.store.spec_cp if conp else self.store.spec_cv
namestore, rate_info, opts = self.__get_non_ad_params(conp)
phi = self.store.phi_cp if conp else self.store.phi_cv
dphi = self.store.dphi_cp if conp else self.store.dphi_cv
spec_heat = np.sum(self.store.concs * spec_heat, axis=1)
jac = fd_jac.copy()
# reset values to be populated
self._set_at(jac, 0)
# get dcp
args = {'phi': lambda x: np.array(
phi, order=x, copy=True)}
dc_name = 'dcp' if conp else 'dcv'
dc = kernel_runner(_get_poly_wrapper(dc_name, conp),
self.store.test_size, args)(
opts, namestore, self.store.test_size)[dc_name]
args = {'conc': lambda x: np.array(
self.store.concs, order=x, copy=True),
'dphi': lambda x: np.array(
dphi, order=x, copy=True),
'phi': lambda x: np.array(
phi, order=x, copy=True),
'jac': lambda x: np.array(
jac, order=x, copy=True),
'wdot': lambda x: np.array(
self.store.species_rates, order=x, copy=True)
}
if conp:
args.update({
'h': lambda x: np.array(
self.store.spec_h, order=x, copy=True),
'cp': lambda x: np.array(
self.store.spec_cp, order=x, copy=True),
'dcp': lambda x: np.array(
dc, order=x, copy=True),
'cp_tot': lambda x: np.array(
spec_heat, order=x, copy=True)})
else:
args.update({
'u': lambda x: np.array(
self.store.spec_u, order=x, copy=True),
'cv': lambda x: np.array(
self.store.spec_cv, order=x, copy=True),
'dcv': lambda x: np.array(
dc, order=x, copy=True),
'cv_tot': lambda x: np.array(
spec_heat, order=x, copy=True),
'V_arr': lambda x: np.array(
self.store.V, order=x, copy=True)})
# find NaN's
comp = self._get_compare(fd_jac)
kc = kernel_call('dTdot_dT', comp.ref_answer, check=True,
compare_mask=[comp], compare_axis=comp.compare_axis,
equal_nan=True, other_compare=self.our_nan_compare,
**args)
return self._generic_jac_tester(dTdotdT, kc, conp=conp)
@attr('long')
def test_dTdot_dT(self):
# test conp
self.__run_dtdot_dt(True)
# test conv
self.__run_dtdot_dt(False)
def __run_dci_thd_dvar(self, rxn_type=reaction_type.thd, test_variable=False,
conp=True):
# setup the namestore and options
namestore, rate_info = self._make_namestore(conp)
ad_opts = namestore.loopy_opts
# setup arguements
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
kf, kf_fall = self.__get_kf_and_fall()
args = {
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.array(fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(rev_removed, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x),
'thd_conc': lambda x: np.zeros_like(self.store.ref_thd, order=x),
'kf': lambda x: np.zeros_like(kf, order=x),
'kf_fall': lambda x: np.zeros_like(kf_fall, order=x),
'Pr': lambda x: np.zeros_like(self.store.ref_Pr, order=x),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x)
}
if rxn_type == falloff_form.troe:
args.update({
'Fcent': lambda x: np.zeros((
self.store.test_size, self.store.troe_inds.size), order=x),
'Atroe': lambda x: np.zeros((
self.store.test_size, self.store.troe_inds.size), order=x),
'Btroe': lambda x: np.zeros((
self.store.test_size, self.store.troe_inds.size), order=x),
})
elif rxn_type == falloff_form.sri:
args.update({
'X': lambda x: np.zeros((
self.store.test_size, self.store.sri_inds.size), order=x)
})
if conp:
args.update({
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)
})
else:
args.update({
'V_arr': lambda x: np.array(self.store.V, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cv, order=x, copy=True)
})
# obtain the finite difference jacobian
kc = kernel_call('dci_dT', [None], **args)
# create the editor
edit = editor(
namestore.T_arr if not test_variable else namestore.E_arr,
namestore.n_dot, self.store.test_size,
order=ad_opts.order)
rate_sub = None
if rxn_type == falloff_form.lind:
rate_sub = get_lind_kernel
elif rxn_type == falloff_form.sri:
rate_sub = get_sri_kernel
elif rxn_type == falloff_form.troe:
rate_sub = get_troe_kernel
# tell the editor to raise a skip test if we don't have this type of falloff
# / rxn
edit.set_skip_on_missing(rate_sub)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, conp,
extra_funcs=[x for x in [get_concentrations, get_thd_body_concs,
get_simple_arrhenius_rates,
_get_fall_call_wrapper(),
get_reduced_pressure_kernel, rate_sub,
get_rxn_pres_mod, get_rop_net,
get_spec_rates] if x is not None])
# setup args
# create rop net w/o pres mod
args = {
'rop_fwd': lambda x: np.array(fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(rev_removed, order=x, copy=True),
# 'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
}
if conp:
args.update({
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)
})
else:
args.update({
'V_arr': lambda x: np.array(self.store.V, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cv, order=x, copy=True)
})
if test_variable:
args.update({
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True)
})
tester = dci_thd_dT if not test_variable else dci_thd_dE
if rxn_type != reaction_type.thd:
args.update({
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True)
})
if rxn_type == falloff_form.lind:
tester = dci_lind_dT if not test_variable else dci_lind_dE
elif rxn_type == falloff_form.sri:
tester = dci_sri_dT if not test_variable else dci_sri_dE
X = self.__get_sri_params(namestore)
args.update({'X': lambda x: np.array(X, order=x, copy=True)})
elif rxn_type == falloff_form.troe:
tester = dci_troe_dT if not test_variable else dci_troe_dE
Fcent, Atroe, Btroe = self.__get_troe_params(namestore)
args.update({
'Fcent': lambda x: np.array(Fcent, order=x, copy=True),
'Atroe': lambda x: np.array(Atroe, order=x, copy=True),
'Btroe': lambda x: np.array(Btroe, order=x, copy=True)
})
# get the compare mask
comp = self._get_compare(fd_jac)
if test_variable and conp:
# need to adjust the reference answer to account for the addition
# of the net ROP resulting from the volume derivative in this
# Jacobian entry.
starting_jac = np.zeros(namestore.jac.shape)
from ..utils import get_nu
for i, rxn in enumerate(self.store.reacs):
# this is a bit tricky: in order to get the proper derivatives
# in the auto-differentiation version, we set the falloff
# blending term to zero for reaction types
# not under consideration. This has the side effect of forcing
# the net ROP for these reactions to be zero in the AD-code
#
# Therefore, we must exclude the ROP for falloff/chemically
# activated reactions when looking at the third body
# derivatives
def __is_fall(rxn):
return (reaction_type.fall in rxn.type or
reaction_type.chem in | |
target space
- p : distribution in the source space
- q : distribution in the target space
- L : loss function to account for the misfit between the similarity matrices
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric cost matrix in the target space
p : ndarray, shape (ns,)
Distribution in the source space.
q : ndarray, shape (nt,)
Distribution in the target space.
loss_fun : str
loss function used for the solver either 'square_loss' or 'kl_loss'
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
armijo : bool, optional
If True the steps of the line-search is found via an armijo research. Else closed form is used.
If there is convergence issues use False.
Returns
-------
gw_dist : float
Gromov-Wasserstein distance
log : dict
convergence information and Coupling marix
References
----------
.. [12] Peyré, Gabriel, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
.. [13] Mémoli, Facundo. Gromov–Wasserstein distances and the
metric approach to object matching. Foundations of computational
mathematics 11.4 (2011): 417-487.
"""
constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
G0 = p[:, None] * q[None, :]
def f(G):
return gwloss(constC, hC1, hC2, G)
def df(G):
return gwggrad(constC, hC1, hC2, G)
res, log_gw = cg(p, q, 0, 1, f, df, G0, log=True, armijo=armijo, C1=C1, C2=C2, constC=constC, **kwargs)
log_gw['gw_dist'] = gwloss(constC, hC1, hC2, res)
log_gw['T'] = res
if log:
return log_gw['gw_dist'], log_gw
else:
return log_gw['gw_dist']
def fused_gromov_wasserstein(M, C1, C2, p, q, loss_fun='square_loss', alpha=0.5, armijo=False, log=False, **kwargs):
"""
Computes the FGW transport between two graphs see [24]
.. math::
\gamma = arg\min_\gamma (1-\\alpha)*<\gamma,M>_F + \\alpha* \sum_{i,j,k,l}
L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
s.t. \gamma 1 = p
\gamma^T 1= q
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- p and q are source and target weights (sum to 1)
- L is a loss function to account for the misfit between the similarity matrices
The algorithm used for solving the problem is conditional gradient as discussed in [24]_
Parameters
----------
M : ndarray, shape (ns, nt)
Metric cost matrix between features across domains
C1 : ndarray, shape (ns, ns)
Metric cost matrix representative of the structure in the source space
C2 : ndarray, shape (nt, nt)
Metric cost matrix representative of the structure in the target space
p : ndarray, shape (ns,)
Distribution in the source space
q : ndarray, shape (nt,)
Distribution in the target space
loss_fun : str, optional
Loss function used for the solver
alpha : float, optional
Trade-off parameter (0 < alpha < 1)
armijo : bool, optional
If True the steps of the line-search is found via an armijo research. Else closed form is used.
If there is convergence issues use False.
log : bool, optional
record log if True
**kwargs : dict
parameters can be directly passed to the ot.optim.cg solver
Returns
-------
gamma : ndarray, shape (ns, nt)
Optimal transportation matrix for the given parameters.
log : dict
Log dictionary return only if log==True in parameters.
References
----------
.. [24] <NAME>, <NAME>, <NAME>, <NAME>
and <NAME> "Optimal Transport for structured data with
application on graphs", International Conference on Machine Learning
(ICML). 2019.
"""
constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
G0 = p[:, None] * q[None, :]
def f(G):
return gwloss(constC, hC1, hC2, G)
def df(G):
return gwggrad(constC, hC1, hC2, G)
if log:
res, log = cg(p, q, (1 - alpha) * M, alpha, f, df, G0, armijo=armijo, C1=C1, C2=C2, constC=constC, log=True, **kwargs)
log['fgw_dist'] = log['loss'][::-1][0]
return res, log
else:
return cg(p, q, (1 - alpha) * M, alpha, f, df, G0, armijo=armijo, C1=C1, C2=C2, constC=constC, **kwargs)
def fused_gromov_wasserstein2(M, C1, C2, p, q, loss_fun='square_loss', alpha=0.5, armijo=False, log=False, **kwargs):
"""
Computes the FGW distance between two graphs see [24]
.. math::
\min_\gamma (1-\\alpha)*<\gamma,M>_F + \\alpha* \sum_{i,j,k,l}
L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}
s.t. \gamma 1 = p
\gamma^T 1= q
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- p and q are source and target weights (sum to 1)
- L is a loss function to account for the misfit between the similarity matrices
The algorithm used for solving the problem is conditional gradient as discussed in [1]_
Parameters
----------
M : ndarray, shape (ns, nt)
Metric cost matrix between features across domains
C1 : ndarray, shape (ns, ns)
Metric cost matrix respresentative of the structure in the source space.
C2 : ndarray, shape (nt, nt)
Metric cost matrix espresentative of the structure in the target space.
p : ndarray, shape (ns,)
Distribution in the source space.
q : ndarray, shape (nt,)
Distribution in the target space.
loss_fun : str, optional
Loss function used for the solver.
alpha : float, optional
Trade-off parameter (0 < alpha < 1)
armijo : bool, optional
If True the steps of the line-search is found via an armijo research.
Else closed form is used. If there is convergence issues use False.
log : bool, optional
Record log if True.
**kwargs : dict
Parameters can be directly pased to the ot.optim.cg solver.
Returns
-------
gamma : ndarray, shape (ns, nt)
Optimal transportation matrix for the given parameters.
log : dict
Log dictionary return only if log==True in parameters.
References
----------
.. [24] <NAME>, <NAME>, <NAME>, <NAME>
and <NAME>
"Optimal Transport for structured data with application on graphs"
International Conference on Machine Learning (ICML). 2019.
"""
constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
G0 = p[:, None] * q[None, :]
def f(G):
return gwloss(constC, hC1, hC2, G)
def df(G):
return gwggrad(constC, hC1, hC2, G)
res, log = cg(p, q, (1 - alpha) * M, alpha, f, df, G0, armijo=armijo, C1=C1, C2=C2, constC=constC, log=True, **kwargs)
if log:
log['fgw_dist'] = log['loss'][::-1][0]
log['T'] = res
return log['fgw_dist'], log
else:
return log['fgw_dist']
def entropic_gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein transport between (C1,p) and (C2,q)
(C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
GW = arg\min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}-\epsilon(H(T))
s.t. T 1 = p
T^T 1= q
T\geq 0
Where :
- C1 : Metric cost matrix in the source space
- C2 : Metric cost matrix in the target space
- p : distribution in the source space
- q : distribution in the target space
- L : loss function to account for the misfit between the similarity matrices
- H : entropy
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
p : ndarray, shape (ns,)
Distribution in the source space
q : ndarray, shape (nt,)
Distribution in the target space
loss_fun : string
Loss function used for the solver either 'square_loss' or 'kl_loss'
epsilon : float
Regularization term >0
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
Record log if True.
Returns
-------
T : ndarray, shape (ns, nt)
Optimal coupling between the two spaces
References
----------
.. [12] <NAME>, <NAME>, and <NAME>,
"Gromov-Wasserstein averaging of kernel and distance matrices."
International Conference on Machine Learning (ICML). 2016.
"""
C1 = np.asarray(C1, dtype=np.float64)
C2 = np.asarray(C2, dtype=np.float64)
T = np.outer(p, q) # Initialization
constC, hC1, hC2 = init_matrix(C1, C2, p, q, loss_fun)
cpt = 0
err = 1
if log:
log = {'err': []}
while (err > tol and cpt < max_iter):
Tprev = T
# compute the gradient
tens = gwggrad(constC, hC1, hC2, T)
T = sinkhorn(p, q, | |
40: OoO0O00 / O0
if 60 - 60: iIii1I11I1II1 / Oo0Ooo / oO0o + iII111i
if 66 - 66: iIii1I11I1II1 . O0 * IiII . ooOoO0o + i1IIi
if 83 - 83: o0oOOo0O0Ooo / II111iiii + I1IiiI - iII111i + OoO0O00
iiiii1ii11iI . mask_address ( iiiii1ii11iI . mask_len )
if 67 - 67: I1Ii111 - OoOoOO00 . i11iIiiIii - I1Ii111 . i11iIiiIii
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I11i + I11i
OO00oo0Oo , iiiii1ii11iI . print_prefix ( ) ) )
return ( iiiii1ii11iI )
if 42 - 42: OoOoOO00 % I1IiiI * Oo0Ooo * II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
if 25 - 25: i1IIi * ooOoO0o % OOooOOo / I1IiiI
if 75 - 75: i11iIiiIii
if 38 - 38: iIii1I11I1II1
def lisp_ms_compute_neg_prefix ( eid , group ) :
iiiii1ii11iI = lisp_address ( eid . afi , "" , 0 , 0 )
iiiii1ii11iI . copy_address ( eid )
iiiii1ii11iI . mask_len = 0
o0o00o0 = lisp_address ( group . afi , "" , 0 , 0 )
o0o00o0 . copy_address ( group )
o0o00o0 . mask_len = 0
o0o0o = None
if 48 - 48: I1Ii111
if 91 - 91: ooOoO0o / II111iiii % iIii1I11I1II1
if 70 - 70: i1IIi - II111iiii / I1IiiI + OoooooooOO + i11iIiiIii / i1IIi
if 80 - 80: i1IIi - iIii1I11I1II1 + OoooooooOO + ooOoO0o / IiII - I1ii11iIi11i
if 90 - 90: I1IiiI * ooOoO0o - I11i + O0 - I11i
if ( group . is_null ( ) ) :
oo0ooo = lisp_ddt_cache . lookup_cache ( eid , False )
if ( oo0ooo == None ) :
iiiii1ii11iI . mask_len = iiiii1ii11iI . host_mask_len ( )
o0o00o0 . mask_len = o0o00o0 . host_mask_len ( )
return ( [ iiiii1ii11iI , o0o00o0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 59 - 59: OOooOOo % II111iiii
iiIii = lisp_sites_by_eid
if ( oo0ooo . is_auth_prefix ( ) ) : o0o0o = oo0ooo . eid
else :
oo0ooo = lisp_ddt_cache . lookup_cache ( group , False )
if ( oo0ooo == None ) :
iiiii1ii11iI . mask_len = iiiii1ii11iI . host_mask_len ( )
o0o00o0 . mask_len = o0o00o0 . host_mask_len ( )
return ( [ iiiii1ii11iI , o0o00o0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 38 - 38: IiII . IiII
if ( oo0ooo . is_auth_prefix ( ) ) : o0o0o = oo0ooo . group
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
group , o0o0o , o0o00o0 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , o0o0o , o0o00o0 ) )
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
o0o00o0 . mask_address ( o0o00o0 . mask_len )
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , o0o0o . print_prefix ( ) if ( o0o0o != None ) else "'not found'" ,
# i1IIi + II111iiii
# iIii1I11I1II1 - O0 % Oo0Ooo * OoooooooOO / I1IiiI
# I11i % i1IIi - I1ii11iIi11i . Oo0Ooo
o0o00o0 . print_prefix ( ) ) )
if 69 - 69: ooOoO0o * OoO0O00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
iiIii = oo0ooo . source_cache
if 35 - 35: I1IiiI . OOooOOo * OoO0O00 . I1ii11iIi11i - I1IiiI
if 5 - 5: i1IIi * II111iiii
if 64 - 64: I1IiiI * iIii1I11I1II1 % I1Ii111
if 22 - 22: OoooooooOO + I1Ii111 . o0oOOo0O0Ooo * Oo0Ooo
if 61 - 61: iIii1I11I1II1
OOo000 = LISP_DDT_ACTION_DELEGATION_HOLE if ( o0o0o != None ) else LISP_DDT_ACTION_NOT_AUTH
if 95 - 95: I1ii11iIi11i + IiII * Ii1I - IiII
if 58 - 58: I1ii11iIi11i - oO0o % I11i * O0
if 43 - 43: OoOoOO00 + O0
if 71 - 71: ooOoO0o * I1IiiI / I1ii11iIi11i
if 8 - 8: I1Ii111 / iIii1I11I1II1
if 29 - 29: i11iIiiIii % i1IIi + oO0o . I1ii11iIi11i
eid , o0o0o , iiiii1ii11iI = iiIii . walk_cache ( lisp_neg_prefix_walk ,
( eid , o0o0o , iiiii1ii11iI ) )
if 51 - 51: OOooOOo + o0oOOo0O0Ooo . OOooOOo
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
iiiii1ii11iI . mask_address ( iiiii1ii11iI . mask_len )
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# o0oOOo0O0Ooo
# i1IIi . i11iIiiIii * IiII * I11i % I1IiiI
o0o0o . print_prefix ( ) if ( o0o0o != None ) else "'not found'" , iiiii1ii11iI . print_prefix ( ) ) )
if 67 - 67: O0 . I1Ii111 + ooOoO0o
if 88 - 88: I1Ii111 . O0 - oO0o + i1IIi % Oo0Ooo
return ( [ iiiii1ii11iI , o0o00o0 , OOo000 ] )
if 39 - 39: I1Ii111 - I1IiiI
if 18 - 18: i1IIi
if 42 - 42: II111iiii - i1IIi . oO0o % OOooOOo % ooOoO0o - i11iIiiIii
if 23 - 23: OOooOOo + iIii1I11I1II1 - i1IIi
if 72 - 72: OOooOOo . I1IiiI * O0 + i11iIiiIii - iII111i
if 79 - 79: o0oOOo0O0Ooo + I1ii11iIi11i
if 46 - 46: I11i
if 78 - 78: IiII / II111iiii
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 55 - 55: Oo0Ooo
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
oOO000 = map_request . nonce
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0o = 1440
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
if 81 - 81: OOooOOo * I11i - I1ii11iIi11i
if 82 - 82: I1ii11iIi11i * II111iiii - OoooooooOO % iII111i * I1IiiI % OoOoOO00
O0OOoOoOO = lisp_map_referral ( )
O0OOoOoOO . record_count = 1
O0OOoOoOO . nonce = oOO000
IIii1i = O0OOoOoOO . encode ( )
O0OOoOoOO . print_map_referral ( )
if 81 - 81: I11i + o0oOOo0O0Ooo / iII111i
IiiIIiIi1i11i = False
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( OOo0O0O0o0 ,
O0o00oOOOO00 )
oo0o = 15
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : oo0o = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0o = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : oo0o = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oo0o = 0
if 66 - 66: iII111i % iII111i
oooO0oOOOO0 = False
OOO0Oo0o = 0
oo0ooo = lisp_ddt_cache_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( oo0ooo != None ) :
OOO0Oo0o = len ( oo0ooo | |
{
'first_name': '',
'last_name': '',
'phone_number': '',
'newsletter': True,
'owner': True
}
form = MyInfoForm(data=data)
self.assertFalse(form.is_valid())
def test_my_info_form_bad_names(self):
data = {
'first_name': '$',
'last_name': 'E',
'phone_number': '415-413-4403',
'newsletter': True,
'owner': True
}
form = MyInfoForm(data=data)
self.assertFalse(form.is_valid())
def test_my_info_form_bad_phone_number(self):
data = {
'first_name': 'Eric',
'last_name': 'Emmerson',
'phone_number': '555-555-5555',
'newsletter': True,
'owner': True
}
form = MyInfoForm(data=data)
self.assertFalse(form.is_valid())
def test_my_info_form_valid(self):
data = {
'first_name': 'Eric',
'last_name': 'Emmerson',
'phone_number': '415-413-4403',
'newsletter': True,
'owner': True
}
form = MyInfoForm(data=data)
self.assertTrue(form.is_valid())
"""
Test household_profile view and form
"""
def test_household_profile_view(self):
self.client = Client()
logged_in = self.client.login(username='fred', password='password')
self.assertEquals(logged_in, True)
rig= RigType.objects.get(rig_type='Motorhome')
use = UseType.objects.get(use_type='Full-time')
income = IncomeType.objects.get(income_type='Self-employed')
# Test create
data = {
'start_year': 1992,
'members_in_household': 2,
'oldest_birthyear': 1952,
'rig_type': rig.pk,
'use_type': use.pk,
'income_type': income.pk,
'pets_dog': 1,
'pets_cat': 0,
'pets_other': 1,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
response = self.client.post(reverse('household:maintain_household'), data=data, secure=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username='fred')
account = Account.objects.get(user=user)
household = RVHousehold.objects.get(householdmembers__member_account=account)
self.assertEquals(household.start_year, 1992)
my_group = MyBudgetGroup.objects.get(my_group_name='Health Care')
self.assertEquals(str(my_group), 'Health Care')
my_category = MyBudgetCategory.objects.get(my_category_name='Insurance')
self.assertEquals(str(my_category), 'Insurance')
# Test update
data = {
'start_year': 1994,
'members_in_household': 2,
'oldest_birthyear': 1952,
'rig_type': rig.pk,
'use_type': use.pk,
'income_type': income.pk,
'pets_dog': 1,
'pets_cat': 1,
'pets_other': 0,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
response = self.client.post(reverse('household:maintain_household'), data=data, secure=True)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username='fred')
account = Account.objects.get(user=user)
household = RVHousehold.objects.get(householdmembers__member_account=account)
self.assertEquals(household.start_year, 1994)
def test_household_profile_form_empty(self):
data = {
'start_year': 0,
'members_in_household': 0,
'oldest_birthyear': 0,
'rig_type': 0,
'use_type': 0,
'income_type': 0,
'pets_dog': 0,
'pets_cat': 0,
'pets_other': 0,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
form = HouseholdProfileForm(data=data)
self.assertFalse(form.is_valid())
def test_household_profile_form_need_rig_use_income(self):
data = {
'start_year': 2011,
'members_in_household': 2,
'oldest_birthyear': 1963,
'rig_type': 0,
'use_type': 0,
'income_type': 0,
'pets_dog': 0,
'pets_cat': 0,
'pets_other': 0,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
form = HouseholdProfileForm(data=data)
self.assertFalse(form.is_valid())
def test_household_profile_form_too_many_pets(self):
data = {
'start_year': 2011,
'members_in_household': 2,
'oldest_birthyear': 1963,
'rig_type': 1,
'use_type': 1,
'income_type': 1,
'pets_dog': 11,
'pets_cat': 0,
'pets_other': 0,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
form = HouseholdProfileForm(data=data)
self.assertFalse(form.is_valid())
def test_household_profile_form_invalid_children_status(self):
data = {
'start_year': 2011,
'members_in_household': 2,
'oldest_birthyear': 1963,
'rig_type': 1,
'use_type': 1,
'income_type': 1,
'pets_dog': 1,
'pets_cat': 0,
'pets_other': 0,
'children': 1,
'children_status': 0, # <--
'grandchildren': 1,
'grandchildren_status': 0 # <--
}
form = HouseholdProfileForm(data=data)
self.assertFalse(form.is_valid())
def test_household_profile_form_invalid_children_count(self):
data = {
'start_year': 2011,
'members_in_household': 2,
'oldest_birthyear': 1963,
'rig_type': 1,
'use_type': 1,
'income_type': 1,
'pets_dog': 1,
'pets_cat': 0,
'pets_other': 0,
'children': 0, # <--
'children_status': 1,
'grandchildren': 0, # <--
'grandchildren_status': 1
}
form = HouseholdProfileForm(data=data)
self.assertFalse(form.is_valid())
def test_household_profile_form_valid(self):
data = {
'start_year': 2011,
'members_in_household': 2,
'oldest_birthyear': 1963,
'rig_type': 1,
'use_type': 1,
'income_type': 1,
'pets_dog': 1,
'pets_cat': 1,
'pets_other': 0,
'children': 0,
'children_status': 0,
'grandchildren': 0,
'grandchildren_status': 0
}
form = HouseholdProfileForm(data=data)
self.assertTrue(form.is_valid())
"""
Test household_members view, form, and ajax calls
"""
def test_household_members_view(self):
self.client = Client()
# Redirect for expired subscription
logged_in = self.client.login(username='greg', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:maintain_members'), follow=True, secure=True)
chain = response.redirect_chain[0]
self.assertEquals(chain[0], '/household/settings')
self.assertEquals(chain[1], 302)
self.assertEquals(response.status_code, 200)
# Redirect because non-owner cannot invite other members
logged_in = self.client.login(username='annie', password='password')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('household:maintain_members'), follow=True, secure=True)
chain = response.redirect_chain[0]
self.assertEquals(chain[0], '/household/settings')
self.assertEquals(chain[1], 302)
self.assertEquals(response.status_code, 200)
# Test invitation
logged_in = self.client.login(username='harry', password='password')
self.assertEquals(logged_in, True)
data = {
'email': '<EMAIL>'
}
response = self.client.post(reverse('household:maintain_members'), data=data, secure=True)
self.assertEqual(response.status_code, 200)
current = response.context['current'].filter(username='annie')
self.assertEquals(len(current), 1)
pending = response.context['pending'].filter(email='<EMAIL>')
self.assertEquals(len(pending), 1)
invite = HouseholdInvite.objects.get(email='<EMAIL>')
self.assertEquals(str(invite), '<EMAIL>')
def test_member_invite_form_email_already_exists_account(self):
data = {
'email': '<EMAIL>'
}
form = InviteMemberForm(data=data)
self.assertFalse(form.is_valid())
def test_member_invite_form_email_already_exists_invite(self):
data = {
'email': '<EMAIL>'
}
form = InviteMemberForm(data=data)
self.assertFalse(form.is_valid())
def test_member_invite_form_email_invalid(self):
data = {
'email': 'nono.nono.com'
}
form = InviteMemberForm(data=data)
self.assertFalse(form.is_valid())
def test_member_invite_form_email_valid(self):
data = {
'email': '<EMAIL>'
}
form = InviteMemberForm(data=data)
self.assertTrue(form.is_valid())
def test_ajax_delete_invite(self):
self.client = Client()
logged_in = self.client.login(username='harry', password='password')
self.assertEquals(logged_in, True)
# Invalid data tests
data0 = {
'id': '$', # <--
'user': 'harry'
}
response = self.client.post('/household/ajax/delete-invite/', data=data0, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
data1 = {
'id': 99999999,
'user': '$rie9%!' # <--
}
response = self.client.post('/household/ajax/delete-invite/', data=data1, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
# Invalid id; does not exist
data2 = {
'id': 99999999,
'user': 'harry'
}
response = self.client.post('/household/ajax/delete-invite/', data=data2, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
# Validate user name provided is same as user name logged in
data3 = {
'id': 99999999,
'user': 'greg'
}
response = self.client.post('/household/ajax/delete-invite/', data=data3, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
invite = HouseholdInvite.objects.get(email='<EMAIL>')
data = {
'id': invite.pk,
'user': 'harry'
}
response = self.client.post('/household/ajax/delete-invite/', data=data, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'OK')
invite = HouseholdInvite.objects.filter(email='<EMAIL>')
self.assertEquals(len(invite), 0)
def test_ajax_change_member_status(self):
self.client = Client()
logged_in = self.client.login(username='harry', password='password')
self.assertEquals(logged_in, True)
# Invalid data tests
data0 = {
'username': 'annie',
'user': '$', # <--
'status': 'Deactivate'
}
response = self.client.post('/household/ajax/change-member-status/', data=data0, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
data1 = {
'username': 'annie',
'user': 'harry',
'status': 'Off' # <--
}
response = self.client.post('/household/ajax/change-member-status/', data=data1, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
# User (username) does not exist
data2 = {
'username': 'ann-marie',
'user': 'harry',
'status': 'Deactivate'
}
response = self.client.post('/household/ajax/change-member-status/', data=data2, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
# User (username) is not member of given household (user)
data3 = {
'username': 'annie',
'user': 'greg',
'status': 'Deactivate'
}
response = self.client.post('/household/ajax/change-member-status/', data=data3, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'ERROR')
# Deactivate user (username) account
data_a = {
'username': 'annie',
'user': 'harry',
'status': 'Deactivate'
}
response = self.client.post('/household/ajax/change-member-status/', data=data_a, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'OK')
user = User.objects.get(username='annie')
self.assertFalse(user.is_active)
# Reactivate user (username) account
data_b = {
'username': 'annie',
'user': 'harry',
'status': 'Activate'
}
response = self.client.post('/household/ajax/change-member-status/', data=data_b, secure=True)
result = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(result['status'], 'OK')
user = User.objects.get(username='annie')
self.assertTrue(user.is_active)
"""
Test household_vehicles view, form, and ajax calls
"""
def test_household_vehicles_view(self):
self.client = Client()
logged_in = self.client.login(username='harry', password='password')
self.assertEquals(logged_in, True)
# Setup by getting objects for foreign keys
vehicle_type = VehicleType.objects.get(type='Motorhome')
vehicle_make = VehicleMake.objects.get(make='Tiffin')
vehicle_model = VehicleModel.objects.get(model_name='Allegro Bus')
purchase_type = VehiclePurchaseType.objects.get(purchase_type='Used-Private')
satisfaction = Satisfaction.objects.get(satisfaction_index=5)
vehicle_status = VehicleStatus.objects.get(vehicle_status='Owner')
# Data to create vehicle record
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-type': vehicle_type,
'form-0-make': vehicle_make,
'form-0-model_name': vehicle_model,
'form-0-model_year': 2006,
'form-0-fuel': 'Diesel',
'form-0-purchase_year': 2015,
'form-0-purchase_price': 50000.00,
'form-0-purchase_type': purchase_type,
'form-0-finance': 'Cash',
'form-0-satisfaction': satisfaction,
'form-0-status': vehicle_status,
'form-0-gone_year': 0
}
response = self.client.post(reverse('household:maintain_vehicles'), data=data, secure=True)
self.assertEqual(response.status_code, 200)
def test_vehicle_form_empty(self):
data = {
'type': 0,
'make': 0,
'model_name': 0,
'model_year': 0,
'fuel': 0,
'purchase_year': 0,
'purchase_price': 0,
'purchase_type': 0,
'finance': 0,
'satisfaction': 0,
'status': 0,
'gone_year': 0
}
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
def test_vehicle_form_bad_model_year(self):
data = {
'type': 1,
'make': 1,
'model_name': 1,
'model_year': 1800,
'fuel': 1,
'purchase_year': 2016,
'purchase_price': 50.00,
'purchase_type': 1,
'finance': 1,
'satisfaction': 1,
'status': 1,
'gone_year': 0
}
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
data['model_year'] = 2080
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
def test_vehicle_form_bad_purchase_year(self):
data = {
'type': 1,
'make': 1,
'model_name': 1,
'model_year': 2016,
'fuel': 1,
'purchase_year': 1900, # <-- too far in the past
'purchase_price': 50.00,
'purchase_type': 1,
'finance': 1,
'satisfaction': 1,
'status': 1,
'gone_year': 0
}
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
data['purchase_year'] = 2080 # <-- too far into the future
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
def test_vehicle_form_bad_purchase_price(self):
data = {
'type': 1,
'make': 1,
'model_name': 1,
'model_year': 2016,
'fuel': 1,
'purchase_year': 2016,
'purchase_price': .99,
'purchase_type': 1,
'finance': 1,
'satisfaction': 1,
'status': 1,
'gone_year': 0
}
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
def test_vehicle_form_gone_year_in_future(self):
data = {
'type': 1,
'make': 1,
'model_name': 1,
'model_year': 2012,
'fuel': 1,
'purchase_year': 2014,
'purchase_price': 50000.00,
'purchase_type': 1,
'finance': 1,
'satisfaction': 1,
'status': 1,
'gone_year': 2080
}
form = VehicleForm(data=data)
self.assertFalse(form.is_valid())
def test_vehicle_form_valid(self):
data = {
'type': 1,
'make': 1,
'model_name': 1,
'model_year': 2012,
'fuel': 1,
'purchase_year': 2014,
'purchase_price': 50000.00,
'purchase_type': 1,
'finance': 1,
'satisfaction': 1,
'status': 1,
'gone_year': 0
}
form = VehicleForm(data=data)
self.assertTrue(form.is_valid())
def test_ajax_makes_by_type(self):
vehicle_type = VehicleType.objects.get(type='Motorhome')
response = self.client.get('/household/ajax/makes-by-type/' + str(vehicle_type.pk) + '/', secure=True)
result = json.loads(response.content)
make = VehicleMake.objects.filter(filter=vehicle_type.filter)[0]
self.assertTrue(str(make.pk) in result)
def test_ajax_models_by_make(self):
vehicle_make = VehicleMake.objects.get(make='Tiffin')
response = self.client.get('/household/ajax/models-by-make/' + str(vehicle_make.pk) | |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import os
import six
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.functions import GetInput
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.utils.gettextutils import _
SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
DELETION_POLICY) = \
('type', 'properties', 'metadata',
'depends_on', 'update_policy', 'deletion_policy')
policy_type = ['tosca.policies.Placement',
'tosca.policies.Scaling',
'tosca.policies.Scaling.Cluster']
log = logging.getLogger('heat-translator')
class HotResource(object):
'''Base class for TOSCA node type translation to Heat resource type.'''
def __init__(self, nodetemplate, name=None, type=None, properties=None,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None, csar_dir=None):
log.debug(_('Translating TOSCA node type to HOT resource type.'))
self.nodetemplate = nodetemplate
if name:
self.name = name
else:
self.name = nodetemplate.name
self.type = type
self.properties = properties or {}
self.csar_dir = csar_dir
# special case for HOT softwareconfig
cwd = os.getcwd()
if type == 'OS::Heat::SoftwareConfig':
config = self.properties.get('config')
if isinstance(config, dict):
if self.csar_dir:
os.chdir(self.csar_dir)
implementation_artifact = os.path.abspath(config.get(
'get_file'))
else:
implementation_artifact = config.get('get_file')
if implementation_artifact:
filename, file_extension = os.path.splitext(
implementation_artifact)
file_extension = file_extension.lower()
# artifact_types should be read to find the exact script
# type, unfortunately artifact_types doesn't seem to be
# supported by the parser
if file_extension == '.ansible' \
or file_extension == '.yaml' \
or file_extension == '.yml':
self.properties['group'] = 'ansible'
if file_extension == '.pp':
self.properties['group'] = 'puppet'
if self.properties.get('group') is None:
self.properties['group'] = 'script'
os.chdir(cwd)
self.metadata = metadata
# The difference between depends_on and depends_on_nodes is
# that depends_on defines dependency in the context of the
# HOT template and it is used during the template output.
# Depends_on_nodes defines the direct dependency between the
# tosca nodes and is not used during the output of the
# HOT template but for internal processing only. When a tosca
# node depends on another node it will be always added to
# depends_on_nodes but not always to depends_on. For example
# if the source of dependency is a server, the dependency will
# be added as properties.get_resource and not depends_on
if depends_on:
self.depends_on = depends_on
self.depends_on_nodes = depends_on
else:
self.depends_on = []
self.depends_on_nodes = []
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self.group_dependencies = {}
# if hide_resource is set to true, then this resource will not be
# generated in the output yaml.
self.hide_resource = False
def handle_properties(self):
# the property can hold a value or the intrinsic function get_input
# for value, copy it
# for get_input, convert to get_param
for prop in self.nodetemplate.get_properties_objects():
pass
def handle_life_cycle(self):
hot_resources = []
deploy_lookup = {}
# TODO(anyone): sequence for life cycle needs to cover different
# scenarios and cannot be fixed or hard coded here
operations_deploy_sequence = ['create', 'configure', 'start']
operations = HotResource.get_all_operations(self.nodetemplate)
# create HotResource for each operation used for deployment:
# create, start, configure
# ignore the other operations
# observe the order: create, start, configure
# use the current HotResource for the first operation in this order
# hold the original name since it will be changed during
# the transformation
node_name = self.name
reserve_current = 'NONE'
for operation in operations_deploy_sequence:
if operation in operations.keys():
reserve_current = operation
break
# create the set of SoftwareDeployment and SoftwareConfig for
# the interface operations
hosting_server = None
if self.nodetemplate.requirements is not None:
hosting_server = self._get_hosting_server()
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
# hosting_server is None if requirements is None
hosting_on_server = hosting_server if hosting_server else None
base_type = HotResource.get_base_type_str(
self.nodetemplate.type_definition)
# if we are on a compute node the host is self
if hosting_on_server is None and base_type == 'tosca.nodes.Compute':
hosting_on_server = self.name
servers = {'get_resource': self.name}
cwd = os.getcwd()
for operation in operations.values():
if operation.name in operations_deploy_sequence:
config_name = node_name + '_' + operation.name + '_config'
deploy_name = node_name + '_' + operation.name + '_deploy'
if self.csar_dir:
os.chdir(self.csar_dir)
get_file = os.path.abspath(operation.implementation)
else:
get_file = operation.implementation
hot_resources.append(
HotResource(self.nodetemplate,
config_name,
'OS::Heat::SoftwareConfig',
{'config':
{'get_file': get_file}},
csar_dir=self.csar_dir))
if operation.name == reserve_current and \
base_type != 'tosca.nodes.Compute':
deploy_resource = self
self.name = deploy_name
self.type = sw_deploy_res
self.properties = {'config': {'get_resource': config_name},
server_key: servers}
deploy_lookup[operation] = self
else:
sd_config = {'config': {'get_resource': config_name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
deploy_lookup[operation] = deploy_resource
lifecycle_inputs = self._get_lifecycle_inputs(operation)
if lifecycle_inputs:
deploy_resource.properties['input_values'] = \
lifecycle_inputs
os.chdir(cwd)
# Add dependencies for the set of HOT resources in the sequence defined
# in operations_deploy_sequence
# TODO(anyone): find some better way to encode this implicit sequence
group = {}
op_index_min = None
op_index_max = -1
for op, hot in deploy_lookup.items():
# position to determine potential preceding nodes
op_index = operations_deploy_sequence.index(op.name)
if op_index_min is None or op_index < op_index_min:
op_index_min = op_index
if op_index > op_index_max:
op_index_max = op_index
for preceding_op_name in \
reversed(operations_deploy_sequence[:op_index]):
preceding_hot = deploy_lookup.get(
operations.get(preceding_op_name))
if preceding_hot:
hot.depends_on.append(preceding_hot)
hot.depends_on_nodes.append(preceding_hot)
group[preceding_hot] = hot
break
if op_index_max >= 0:
last_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_max]))
else:
last_deploy = None
# save this dependency chain in the set of HOT resources
self.group_dependencies.update(group)
for hot in hot_resources:
hot.group_dependencies.update(group)
roles_deploy_resource = self._handle_ansiblegalaxy_roles(
hot_resources, node_name, servers)
# add a dependency to this ansible roles deploy to
# the first "classic" deploy generated for this node
if roles_deploy_resource and op_index_min:
first_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_min]))
first_deploy.depends_on.append(roles_deploy_resource)
first_deploy.depends_on_nodes.append(roles_deploy_resource)
return hot_resources, deploy_lookup, last_deploy
def _handle_ansiblegalaxy_roles(self, hot_resources, initial_node_name,
hosting_on_server):
artifacts = self.get_all_artifacts(self.nodetemplate)
install_roles_script = ''
sw_deployment_resouce = \
HOTSoftwareDeploymentResources(hosting_on_server)
server_key = sw_deployment_resouce.server_key
sw_deploy_res = sw_deployment_resouce.software_deployment
for artifact_name, artifact in artifacts.items():
artifact_type = artifact.get('type', '').lower()
if artifact_type == 'tosca.artifacts.ansiblegalaxy.role':
role = artifact.get('file', None)
if role:
install_roles_script += 'ansible-galaxy install ' + role \
+ '\n'
if install_roles_script:
# remove trailing \n
install_roles_script = install_roles_script[:-1]
# add shebang and | to use literal scalar type (for multiline)
install_roles_script = '|\n#!/bin/bash\n' + install_roles_script
config_name = initial_node_name + '_install_roles_config'
deploy_name = initial_node_name + '_install_roles_deploy'
hot_resources.append(
HotResource(self.nodetemplate, config_name,
'OS::Heat::SoftwareConfig',
{'config': install_roles_script},
csar_dir=self.csar_dir))
sd_config = {'config': {'get_resource': config_name},
server_key: hosting_on_server}
deploy_resource = \
HotResource(self.nodetemplate, deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
return deploy_resource
def handle_connectsto(self, tosca_source, tosca_target, hot_source,
hot_target, config_location, operation):
# The ConnectsTo relationship causes a configuration operation in
# the target.
# This hot resource is the software config portion in the HOT template
# This method adds the matching software deployment with the proper
# target server and dependency
if config_location == 'target':
hosting_server = hot_target._get_hosting_server()
hot_depends = hot_target
elif config_location == 'source':
hosting_server = self._get_hosting_server()
hot_depends = hot_source
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
deploy_name = tosca_source.name + '_' + tosca_target.name + \
'_connect_deploy'
sd_config = {'config': {'get_resource': self.name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config,
depends_on=[hot_depends], csar_dir=self.csar_dir)
connect_inputs = self._get_connect_inputs(config_location, operation)
if connect_inputs:
deploy_resource.properties['input_values'] = connect_inputs
return deploy_resource
def handle_expansion(self):
pass
def handle_hosting(self):
# handle hosting server for the OS:HEAT::SoftwareDeployment
# from the TOSCA nodetemplate, traverse the relationship chain
# down to the server
sw_deploy_group = \
HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
sw_deploy = HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_RESOURCE
if self.properties.get('servers') and \
self.properties.get('server'):
del self.properties['server']
if self.type == sw_deploy_group or self.type == sw_deploy:
# skip if already have hosting
# If type is NodeTemplate, look up corresponding HotResrouce
host_server = self.properties.get('servers') \
or self.properties.get('server')
if host_server is None:
raise Exception(_("Internal Error: expecting host "
"in software deployment"))
elif isinstance(host_server.get('get_resource'), NodeTemplate):
self.properties['server']['get_resource'] = \
host_server['get_resource'].name
elif isinstance(host_server, dict) and \
not host_server.get('get_resource'):
self.properties['servers'] = \
host_server
def top_of_chain(self):
dependent = self.group_dependencies.get(self)
if dependent is None:
return self
else:
return dependent.top_of_chain()
# this function allows to provides substacks as external files
# those files will be dumped along the output file.
#
# return a dict of filename-content
def extract_substack_templates(self, base_filename, hot_template_version):
return {}
# this function asks the resource to embed substacks
# into | |
<reponame>johnmgregoire/JCAPdatavis<filename>echem_plate_ui.py
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
from echem_plate_math import *
from echem_plate_fcns import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
from matplotlib.ticker import FuncFormatter
from matplotlib.ticker import ScalarFormatter
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
def myexpformat_4digs(x, pos):
return '%.3e' %x
# for ndigs in range(4):
# lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e')
# if eval(lab)==x:
# return lab
# return lab
ExpTickLabels=FuncFormatter(myexpformat_4digs)
RegTickLabels=matplotlib.ticker.ScalarFormatter()
def autotickformat(ax, x=False, y=False, ndec=3):
for bl, xax, lims in zip([x, y], [ax.xaxis, ax.yaxis], [ax.get_xlim(), ax.get_ylim()]):
if bl:
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
continue
if doit:
xax.set_major_formatter(ExpTickLabels)
else:
xax.set_major_formatter(RegTickLabels)
def autocolorbarformat(lims, ndec=3):
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
return
if doit:
return ExpTickLabels
else:
return RegTickLabels
wd=os.getcwd()
sys.path.append(os.path.join(PyCodePath,'PythonCompositionPlots'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern2 import *
from quaternary_FOM_stackedtern20 import *
from quaternary_FOM_stackedtern30 import *
from quaternary_FOM_stackedtern9of100 import *
from quaternary_FOM_bintern import *
sys.path.append(os.path.join(PyCodePath,'JCAPPyDBComm'))
try:
from mysql_dbcommlib import *
except:
print 'JCAPPyDBComm not found, do not use option 1'
pass
sys.path.append(os.path.join(PyCodePath, 'PythonCodeSecureFiles'))
try:
from paths import *
if os.path.isdir(EchemSavePath):
os.chdir(EchemSavePath)
except:
print 'PythonCompositionPlots not found'
pass
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetopenfiles(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfns=QFileDialog.getOpenFileNames(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/'))
xparent.destroy()
xapp.quit()
else:
returnfns=QFileDialog.getOpenFileNames(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/'))
return [str(s) for s in returnfns]
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
def userinputcaller(parent, inputs=[('testnumber', int)], title='Enter values', cancelallowed=True):
problem=True
while problem:
idialog=userinputDialog(parent, inputs, title)
idialog.exec_()
problem=idialog.problem
if not idialog.ok and cancelallowed:
return None
inputs=[(tup[0], tup[1], s) for tup, s in zip(inputs, idialog.inputstrlist)]
return idialog.ans
class userinputDialog(QDialog):
def __init__(self, parent, inputs=[('testnumber', int, '')], title='Enter values'):
super(userinputDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.parent=parent
self.inputs=inputs
self.lelist=[]
for i, tup in enumerate(self.inputs):
lab=QLabel()
lab.setText(tup[0])
le=QLineEdit()
if len(tup)>2:
le.setText(tup[2])
self.lelist+=[le]
mainlayout.addWidget(lab, 0, i, 1, 1)
mainlayout.addWidget(le, 1, i, 1, 1)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, 2, 0, len(inputs), 1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
self.problem=False
self.ok=False
def ExitRoutine(self):
self.ok=True
self.problem=False
self.ans=[]
self.inputstrlist=[str(le.text()).strip() for le in self.lelist]
for s, tup in zip(self.inputstrlist, self.inputs):
if tup[1]==str:
try:
self.ans+=[s]
except:
self.problem=True
break
else:
try:
n=myeval(s)
self.ans+=[tup[1](n)]
except:
self.problem=True
break
if self.problem:
idialog=messageDialog(self, 'problem with conversion of ' + tup[0])
idialog.exec_()
class selectdbsessionsDialog(QDialog):
def __init__(self, parent, ex_trange_techl, maxsessions=15, title='Select DB experiment sessions to analyze'):
super(selectdbsessionsDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QVBoxLayout()
self.cblist=[]
self.cbinds=[]
for count, (ex, (t0, t1), techl) in enumerate(ex_trange_techl[:maxsessions]):
cb=QCheckBox()
cb.setText('exp %d: %s to %s, %s' %(ex, str(t0), str(t1), ','.join(techl)))
cb.setChecked(False)
mainlayout.addWidget(cb)
self.cblist+=[cb]
self.cbinds+=[[count]]
if len(ex_trange_techl)>maxsessions:
cb=QCheckBox()
ex, (t0, t1), techl=ex_trange_techl[maxsessions]
ex2, (t02, t12), techl2=ex_trange_techl[-1]
techl=list(set(techl+techl2))
cb.setText('exp %d-%d: %s to %s, %s' %(ex, ex2, str(t0), str(t12), ','.join(techl)))
cb.setChecked(True)
mainlayout.addWidget(cb)
self.cblist+=[cb]
self.cbinds+=[range(maxsessions, len(ex_trange_techl))]
cb.setChecked(True)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
def ExitRoutine(self):
self.selectinds=[]
for cb, l in zip(self.cblist, self.cbinds):
if cb.isChecked():
self.selectinds+=l
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.setWindowTitle('Echem Visualization')
self.echem=echemvisDialog(self, **kwargs)
if execute:
self.echem.exec_()
if self.echem.dbdatasource is 1:
try:
self.echem.dbc.db.close()
except:
pass
class echem10axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem10axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make10ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
scatter_10axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem20axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem20axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make20ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
scatter_20axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem30axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem30axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make30ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
scatter_30axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem100axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem100axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make9of100ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
scatter_9of100axes(d['comps'], d['fom'], self.stpl, s=20, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem4axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem4axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make4ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
scatter_4axes(d['comps'], d['fom'], self.stpl, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echembinWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echembinWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axbin, self.axbininset=plotbinarylines_axandinset(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
def plot(self, d, cb=True, ellabels=['A', 'B', 'C', 'D']):
if 'fomlabel' in d.keys():
cblabel=d['fomlabel']
else:
cblabel=''
plotbinarylines_quat(self.axbin, d['comps'], d['fom'], markersize=10, ellabels=d['ellabels'], linewidth=2)
self.axbin.set_xlabel('binary composition', fontsize=16)
self.axbin.set_ylabel(cblabel, fontsize=16)
class echemvisDialog(QDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(echemvisDialog, self).__init__(parent)
self.parent=parent
# self.echem30=echem30axesWidget()
# self.echem30.show()
self.plotillumkey=None
#Uncomment out next line to disable db access
#folderpath=PyCodePath
if folderpath is None:
self.dbdatasource=userinputcaller(self, inputs=[('DBsource?', int, '1')], title='Change to 0 to read for local harddrive.')[0]
if self.dbdatasource is 1:
self.dbc=None#self.createdbsession()
elif self.dbdatasource is 2:
if sys.platform.startswith('linux'):
self.kcomputers='/media/hteshare/computers'
self.kexperiments='/media/hteshare/experiments'
matplotlib.rcParams['font.family']='Bitstream Vera Sans'
elif sys.platform.startswith('win'):
self.kcomputers='K:\\computers'
self.kexperiments='K:\\experiments'
elif sys.platform.startswith('darwin'):
self.kcomputers='/Volumes/home/computers'
self.kexperiments='/Volumes/home/experiments'
else:
self.kcomputers="%s" % os.getcwd()
self.kexperiments="%s" % os.getcwd()
try:
print 'kcomputers is ' + self.kcomputers
except:
pass
else:
self.dbdatasource=0
self.techniquedictlist=[]
self.plotw_select=plotwidget(self)
# self.plotw_select.axes.set_xlabel('')
# self.plotw_select.axes.set_ylabel('')
#self.plotw_select.axes.set_aspect(1)
self.plotw_plate=plotwidget(self)
# self.plotw_plate.axes.set_xlabel('')
# self.plotw_plate.axes.set_ylabel('')
self.plotw_plate.axes.set_aspect(1)
self.plotw_tern=plotwidget(self)
# self.plotw_tern.axes.set_xlabel('')
# self.plotw_tern.axes.set_ylabel('')
#self.plotw_tern.axes.set_aspect(1)
self.plotw_quat=plotwidget(self, projection3d=True)
# self.plotw_quat.axes.set_xlabel('')
# self.plotw_quat.axes.set_ylabel('')
#self.plotw_quat.axes.set_aspect(1)
self.plotw_aux=plotwidget(self)
# self.plotw_aux.axes.set_xlabel('')
# self.plotw_aux.axes.set_ylabel('')
#self.plotw_aux.axes.set_aspect(1)
axrect=[0.82, 0.1, 0.04, 0.8]
self.plotw_plate.fig.subplots_adjust(left=0, right=axrect[0]-.01)
self.cbax_plate=self.plotw_plate.fig.add_axes(axrect)
self.plotw_tern.fig.subplots_adjust(left=0, right=axrect[0]-.01)
self.cbax_tern=self.plotw_tern.fig.add_axes(axrect)
self.plotw_quat.fig.subplots_adjust(left=0, right=axrect[0]-.01)
self.cbax_quat=self.plotw_quat.fig.add_axes(axrect)
self.plotw_select.fig.subplots_adjust(left=.2)
self.plotw_aux.fig.subplots_adjust(left=.2)
QObject.connect(self.plotw_plate, SIGNAL("genericclickonplot"), self.plateclickprocess)
#in options, always make an option that does not require user input at index 0
CVops=[\
['Imax', ['I(A)'], []], \
['Imin', ['I(A)'], []], \
['E_Ithresh', ['I(A)','Ewe(V)'], [['Ithresh(A)', float, '1e-5'], ['Num consec points', int, '20'], ['0 for below, 1 for above', int, '1'], ['Thresh not reached value', float, '1']]], \
['Eh in I=Io Exp(E/Eh)', ['I(A)', 'Ewe(V)'], []], \
['Io in I=Io Exp(E/Eh)', ['I(A)', 'Ewe(V)'], []], \
['Iphoto_max', ['Illum', 'I(A)', 'Ewe(V)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
['Iphoto_min', ['Illum', 'I(A)', 'Ewe(V)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
['None', ['I(A)', 'Ewe(V)'], []], \
]
OCVops=[\
['Efin', ['Ewe(V)'], []], \
['Eave', ['Ewe(V)', 't(s)'], [['Interval(s)', float, '2.'], ['Num StdDev outlier', float, '2.'], ['Num Pts in Window', int, '999999'], ['0 from beginning, 1 from end', int, '1']]], \
['Ess', ['Ewe(V)'], [['Weight | |
dictionary for the above functions to use as your input arguments,
# note it needs the ssh_conn argument which is an ConnectHandler object
# problems you can run into:
- the ssh_conn times out
- this can happen on large files and other files that take a long time
- make sure your vty timeout doesn't timeout before the transfer is compleate
- trying to do a transfer while bouncing though a bastion host
- the scp ssh control channel wont work
- in cisco ios you can use inline transfer mode to bounce though the bastion host
- the inline transfer happens all within one ssh channel,
- note: inline transfer will do a ttl file transfer and only works with text files
- you can't send binary files in inline transfer mode
- transfer_file only works with a limited group of ios platforms, meaning the device_type doesn't support scp
- this is a small list!
# A bastion host is a special-purpose computer on a network specifically designed and configured to withstand attacks.
# The computer generally hosts a single application, for example a proxy server,
# and all other services are removed or limited to reduce the threat to the computer
# example code
device1 = {
'host': 'cisco1.lasthop.io',
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_ios',
'session_log': 'my_session.txt',
'global_delay_factor': 2,
}
scp_dict = {
'source_file': 'testx.txt',
'dest_file': 'testx.txt',
'direction': 'put',
'file_system': 'bootflash:',
'overwrite_file': True
}
# we name the connection ssh_conn because that is the name of the argument that file_transfer takes
ssh_conn = netmiko.ConnectHandler(device1**)
# note you need to input the ssh_conn connection as the first input
# This is the connection object NOT the dictionary we used to set that connection up
# we update our scp_dict dictionary so we can use the dictionary as an argument in file_transfer
scp_dict.update({'ssh_conn': ssh_conn})
# we name our varable transfer_dict rather than output becuase file_transfer returns a dictionary
transfer_dict = netmiko.file_transfer(**scp_dict)
print(transfer_dict)
# notice transfer_dict returns a dictionary with information about our file_transfer
# file_exist - tells us if the dest_file exists on the remote machine and dose it's md5 hash match source_file
# (True of False)
# file_transferred - tells us if the file_transfer actually happened
# if file_exist=True, and file_transferred=True you know something weird happened
# file_verified - tells us of md5 hash verification was used in our file_transfer function
# note md5 hash verification can be turned off as an argument in file_transfer
"""
# using netmiko's save_config() example
"""
# this method is just like running rm mem in cisto
# example code
device1 = {
'host': 'cisco1.lasthop.io',
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_ios',
'session_log': 'my_session.txt',
'global_delay_factor': 2,
}
net_connect = netmiko.ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_config_from_file('my_config_file.txt')
print(output)
# save the config changes so they remain after a reboot (the equivalent to running wr mem)
# this is supported on a lot of the operating systems in netmiko
save_out = net_connect.save_config()
print(save_out)
"""
# ssh key example
"""
# setup to use a ssh key for a given user inside cisco ios is cumbersome,
# once keys are setup they are bound to the username
# if you use ssh keys ConnectHandler no long requires the argument password but now requires:
use_keys - must be set to True (set to True defualt is False)
key_file - filepath string to the ssh key you use to connect to the device with
# note you can use ssh proxies
# https://pynet.twb-tech.com/blog/automation/netmiko-proxy.html?__s=4tlrjds8fu5yh73gu7u4
-proxies do not work well if you are running windows, mush harder to set up
# note we now need the key_file and use_keys arguments.
# The ssh key must also be bound to the username in the device we are connecting to
device1 = {
'host': 'cisco1.lasthop.io',
'username': 'pyclass',
'device_type': 'cisco_ios',
'session_log': 'my_session.txt',
'use_keys': True,
'key_file': 'filepath/to/the/ssh/key',
'fast_cli': True
}
net_connect = netmiko.ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_command("show ip arp", use_textfsm=True)
pprint(output)
net_connect.disconnect()
"""
# interacting with devices though python outside of script / programs
"""
# example code
device1 = {
'host': 'cisco1.lasthop.io',
'username': 'pyclass',
'password': '<PASSWORD>',
'device_type': 'cisco_ios',
}
net_connect = netmiko.ConnectHandler(**device1)
print(net_connect.find_prompt())
output = net_connect.send_command("show ip arp", use_testfsm=True)
print(output)
#net_connect.disconnect()
# if you dont disconnect you can drop into the interpreter shell after running the python script using ipythons -i
# ipython -i test_dict.py
# you should get a python command prompt after the script runs
# in this command prompt you can now run more commands and interact with the connected device
# though netmiko python methods
dir(net_connect)
help(net_connect.config_mode)
# you usualy dont use the method config_mode() because you use net_connect.send_config_set() in your programs
# manuly enter config mode
net_connect.config_mode()
net_connect.exit_config_mode()
# check if you are in enable mode
net_connect.enable()
#write and read to established channels
# this is a low level way of sending ssh commands and reading the ssh output
# write_channel works with ssh, telnet and console, channels
# note you need to add a \n to the end of the command you are sending
net_connect.write_channel("show ip int brief\n")
# read from channel
#(there needs to be some time between when this is ran and when you run the read command)
output = net_connect.read_channel()
# you can change the device type when you are logged into the terminal server of endpoint device
# this is usually done and manually logging into the device using read_channel() and write_channel()
# more on this can be found under netmiko issues
net_connect.redispatch()
"""
########################## HOMEWORK ############################
# Exercises:
"""
My solutions to the exercises can be found at:
https://github.com/ktbyers/pyplus_course/tree/master/class2/exercises?__s=4tlrjds8fu5yh73gu7u4
"""
# Problem 1
"""
1. Use the extended 'ping' command and Netmiko on the 'cisco4' router.
This should prompt you for additional information as follows:
cisco4#ping
Protocol [ip]:
Target IP address: 8.8.8.8
Repeat count [5]:
Datagram size [100]:
Timeout in seconds [2]:
Extended commands [n]:
Sweep range of sizes [n]:
Type escape sequence to abort.
Sending 5, 100-byte ICMP Echos to 8.8.8.8, timeout is 2 seconds:
!!!!!
Success rate is 100 percent (5/5), round-trip min/avg/max = 1/1/4 ms
a. Use send_command_timing() to handle the additional prompting from this 'ping' command.
Specify a target IP address of '8.8.8.8'
b. Use send_command() and the expect_string argument to handle the additional prompting.
Once again specify a target IP address of '8.8.8.8'.
"""
# Problem 2
"""
2. Create a Netmiko connection to the 'nxos2' device using a global_delay_factor of 2.
Execute 'show lldp neighbors detail' and print the returned output to standard output.
Execute 'show lldp neighbors detail' a second time using send_command() with a delay_factor of 8.
Print the output of this command to standard output.
Use the Python datetime library to record the execution time of both of these commands.
Print these execution times to standard output.
"""
#Problem 3
"""
3. On your AWS lab server, look at the ntc-templates index file (at ~/ntc-templates/templates/index).
Look at some of the commands available for cisco_ios
(you can use 'cat ~/ntc-templates/templates/index | grep cisco_ios' to see this).
Also look at some of the abbreviated forms of Cisco IOS commands that are supported in the index file.
Create a script using Netmiko that executes 'show version'
and 'show lldp neighbors' against the Cisco4 device with use_textfsm=True.
What is the outermost data structure that is returned from 'show lldp neighbors'
(dictionary, list, string, something else)?
The Cisco4 device should only have one LLDP entry (the HPE switch that this router connects to). From this LLDP data,
print out the remote device's interface. In other words,
print out the port number on the HPE switch that Cisco4 connects into.
"""
# Problem 4
"""
4. Use Netmiko and the send_config_set() method to configure the following on the Cisco3 router.
ip name-server 1.1.1.1
ip name-server 1.0.0.1
ip domain-lookup
Experiment with fast_cli=True to see how long the script takes to execute (with and without this option enabled).
Verify DNS lookups on the router are now working by executing 'ping google.com'.
Verify from this that you receive a ping response back.
"""
# Problem 5
"""
5. On both the NXOS1 and NXOS2 switches configure five VLANs including VLAN names
(just pick 5 VLAN numbers between 100 - 999).
Use Netmiko's send_config_from_file() method to accomplish this.
Also use Netmiko's save_config() method to save the changes to the startup-config.
"""
# Problem 6
"""
6. Using SSH and netmiko connect to the Cisco4 router.
In your device definition, specify both an 'secret' and a 'session_log'. Your device definition should look as follows:
password = getpass()
device = {
| |
<reponame>Eric-Bradford/Nominal_NMPC
from pylab import *
import numpy as np
import math
from Problem_definition import *
from casadi import *
from scipy.io import savemat
import pickle
class NMPC:
def __init__(self):
# Variable definitions
self.xd, self.xa, self.u, self.ODEeq, self.Aeq, self.Obj_M, \
self.Obj_L, self.R, self.ng, self.gfcn, self.G, self.u_min, self.state_positive, \
self.u_max, self.states, self.algebraics, self.inputs, \
self.Sigma_d, self.Sigma_m, self.nm, self.xu, self.mun, self.covun,\
self.path, self.hfcn, self.Sigma_Q, self.Sigma_R = DAE_system()
self.x0, self.tf, self.nk, self.shrinking_horizon, self.deg, \
self.cp, self.nicp, self.simulation_time, self.opts, \
self.number_of_repeats, self.xhat0, self.Sigmahat0 = specifications()
self.h = self.tf/self.nk/self.nicp
self.nd, self.na = SX.size(self.xd)[0], SX.size(self.xa)[0]
self.nu = SX.size(self.u)[0]
self.state_estimator = state_estimator
# Internal function calls
self.C, self.D = self.collocation_points()
self.ffcn = self.model_fcn()
self.NV, self.V, self.vars_lb, self.vars_ub, self.vars_init, self.XD, \
self.XA, self.U, self.con = self.NLP_specification()
self.vars_init, self.vars_lb, self.vars_ub, self.g, self.lbg, self.ubg, \
self.lambdav, self.XD, self.XA, self.U, self.cfcn, self.lambdac \
= self.set_constraints()
self.Obj = self.set_objective()
self.solver = self.create_solver()
def collocation_points(self):
deg, cp, nk, h = self.deg, self.cp, self.nk, self.h
C = np.zeros((deg+1,deg+1)) # Coefficients of the collocation equation
D = np.zeros(deg+1) # Coefficients of the continuity equation
# All collocation time points
tau = SX.sym("tau") # Collocation point
tau_root = [0] + collocation_points(deg,cp)
T = np.zeros((nk,deg+1))
for i in range(nk):
for j in range(deg+1):
T[i][j] = h*(i + tau_root[j])
# For all collocation points: eq 10.4 or 10.17 in Biegler's book
# Construct Lagrange polynomials to get the polynomial basis at the collocation point
for j in range(deg+1):
L = 1
for j2 in range(deg+1):
if j2 != j:
L *= (tau-tau_root[j2])/(tau_root[j]-tau_root[j2])
lfcn = Function('lfcn', [tau],[L])
# Evaluate the polynomial at the final time to get the coefficients of the continuity equation
D[j] = lfcn(1.0)
# Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
tfcn = Function('tfcn', [tau],[tangent(L,tau)])
for j2 in range(deg+1):
C[j][j2] = tfcn(tau_root[j2])
return C, D
def model_fcn(self):
xd, xa, u, xu, ODEeq, Aeq = self.xd, self.xa, self.u, self.xu, self.ODEeq, self.Aeq
t = SX.sym("t")
p_s = SX.sym("p_s")
xddot = SX.sym("xddot",self.nd)
res = []
for i in range(self.nd):
res = vertcat(res,ODEeq[i]*p_s - xddot[i])
for i in range(self.na):
res = vertcat(res,Aeq[i]*p_s)
ffcn = Function('ffcn', [t,xddot,xd,xa,xu,u,p_s],[res])
return ffcn
def NLP_specification(self):
xd, xa, u, nk, deg, nicp = self.xd, self.xa, self.u, self.nk, self.deg, self.nicp
nd, na, nu, nx = self.nd, self.na, self.nu, self.nd+self.na
ng, gfcn = self.ng, self.gfcn
nicp, deg = self.nicp, self.deg
# Total number of variables
NXD = nicp*nk*(deg+1)*nd # Collocated differential states
NXA = nicp*nk*deg*na # Collocated algebraic states
NU = nk*nu # Parametrized controls
NV = NXD+NXA+NU
# NLP variable vector
V = MX.sym("V",NV+nk*ng)
con = MX.sym("con",nd+nk+nu)
# All variables with bounds and initial guess
vars_lb = np.zeros(NV+nk*ng)
vars_ub = np.zeros(NV+nk*ng)
vars_init = np.zeros(NV+nk*ng)
# differential states, algebraic states and control matrix definition after
# discredization
XD = np.resize(np.array([],dtype=MX),(nk,nicp,deg+1)) # NB: same name as above
XA = np.resize(np.array([],dtype=MX),(nk,nicp,deg)) # NB: same name as above
U = np.resize(np.array([],dtype=MX),nk)
return NV, V, vars_lb, vars_ub, vars_init, XD, XA, U, con
def set_constraints(self):
nk, nicp, deg, C, h = self.nk, self.nicp, self.deg, self.C, self.h
ffcn, D = self.ffcn, self.D
nd, na, nu, nx = self.nd, self.na, self.nu, self.nd+self.na
u_min,u_max = self.u_min, self.u_max
ng, gfcn, mun = self.ng, self.gfcn, self.mun
con, V, NV, path = self.con, self.V, self.NV, self.path
vars_lb, vars_ub = self.vars_lb, self.vars_ub
XD, XA, U, vars_init = self.XD, self.XA, self.U, self.vars_init
lambdac = [MX.zeros(ng)]*nk
x_current = con[:nd]
p_s = con[nd:nd+nk]
lambdav = V[-nk*ng:]
xD_init = np.array((nk*nicp*(deg+1))*[[1.]*nd])
xA_init = np.array((nk*nicp*(deg+1))*[[1.]*na])
u_init = np.array((nk*nicp*(deg+1))*[[1.]*nu])
vars_lb[-nk*ng:] = np.zeros(nk*ng)
vars_ub[-nk*ng:] = np.ones(nk*ng)*inf
offset = 0
xD_min, xD_max = np.array([-inf]*nx), np.array([inf]*nx)
xDf_min,xDf_max = np.array([-inf]*nx), np.array([inf]*nx)
xA_min, xA_max = np.array([-inf]*na), np.array([inf]*na)
# Get collocated states and parametrized control
for k in range(nk):
# Collocated states
for i in range(nicp):
#
for j in range(deg+1):
# Get the expression for the state vector
XD[k][i][j] = V[offset:offset+nd]
if j !=0:
XA[k][i][j-1] = V[offset+nd:offset+nd+na]
# Add the initial condition
index = (deg+1)*(nicp*k+i) + j
if k==0 and j==0 and i==0:
vars_init[offset:offset+nd] = xD_init[index,:]
vars_lb[offset:offset+nd] = xD_min
vars_ub[offset:offset+nd] = xD_max
offset += nd
else:
if j!=0:
vars_init[offset:offset+nx] = np.append(xD_init[index,:],xA_init[index,:])
vars_lb[offset:offset+nx] = np.append(xD_min,xA_min)
vars_ub[offset:offset+nx] = np.append(xD_max,xA_max)
offset += nx
else:
vars_init[offset:offset+nd] = xD_init[index,:]
vars_lb[offset:offset+nd] = xD_min
vars_ub[offset:offset+nd] = xD_max
offset += nd
# Parametrized controls
U[k] = V[offset:offset+nu]
vars_lb[offset:offset+nu] = u_min
vars_ub[offset:offset+nu] = u_max
vars_init[offset:offset+nu] = u_init[index,:]
offset += nu
assert(offset==NV)
# Constraint function for the NLP
g = []
lbg = []
ubg = []
# Initial value constraint
g += [XD[0][0][0] - x_current]
lbg.append(np.zeros(nd))
ubg.append(np.zeros(nd))
# For all finite elements
for k in range(nk):
for i in range(nicp):
# For all collocation points
for j in range(1,deg+1):
# Get an expression for the state derivative at the collocation point
xp_jk = 0
for j2 in range (deg+1):
xp_jk += C[j2][j]*XD[k][i][j2] # get the time derivative of the differential states (eq 10.19b)
# Add collocation equations to the NLP
fk = ffcn(0.,xp_jk/h,XD[k][i][j],XA[k][i][j-1],MX(mun),U[k],p_s[k])
g += [fk[:nd]] # impose system dynamics (for the differential states (eq 10.19b))
lbg.append(np.zeros(nd)) # equality constraints
ubg.append(np.zeros(nd)) # equality constraints
g += [fk[nd:]] # impose system dynamics (for the algebraic states (eq 10.19b))
lbg.append(np.zeros(na)) # equality constraints
ubg.append(np.zeros(na)) # equality constraints
np.resize(np.array([],dtype=SX),(nk,nicp,deg))
# Get an expression for the state at the end of the finite element
if k > 0:
xf_k = 0
for j in range(deg+1):
xf_k += D[j]*XD[k-1][i][j]
# Add continuity equation to NLP
if i==nicp-1:
g += [XD[k][0][0] - xf_k]
else:
g += [XD[k-1][i+1][0] - xf_k]
lbg.append(np.zeros(nd))
ubg.append(np.zeros(nd))
cfcn = Function('cfcn',[V],[U[0]])
offset2 = 0
for gg in range(ng):
if path[gg]:
for ii in range(nk):
# Soft constraints
lambdac[ii][gg] = lambdav[offset2:offset2+ng][gg]
g += [gfcn(XD[ii][nicp-1][deg],XA[ii][nicp-1][deg-1],U[ii])[gg]-lambdac[ii][gg]]
lbg.append([-inf]*1)
ubg.append([0.]*1)
if gg == ng-1:
offset2 += ng
else:
ii = nk-1
# Soft constraints
lambdac[ii][gg] = lambdav[offset2:offset2+ng][gg]
g += [gfcn(XD[ii][nicp-1][deg],XA[ii][nicp-1][deg-1],U[ii])[gg]-lambdac[ii][gg]]
lbg.append([-inf]*1)
ubg.append([0.]*1)
if gg == ng-1:
offset2 += ng
return vars_init, vars_lb, vars_ub, g, lbg, ubg, lambdav, XD, XA, U, cfcn, lambdac
def set_objective(self):
lambdac, G, R = self.lambdac, self.G, self.R
nk, nicp, deg = self.nk, self.nicp, self.deg
U, XD, XA = self.U, self.XD, self.XA
nd, na, nu, nx = self.nd, self.na, self.nu, self.nd+self.na
ng, con, Obj_L, Obj_M = self.ng, self.con, self.Obj_L, self.Obj_M
p_s = con[nd:nd+nk]
u_previous = con[nd+nk:nd+nk+nu]
Obj = MX.zeros(1)
# Soft-constraints for nonlinear constraints
lg = SX.sym('lg',ng)
ps = SX.sym('ps')
lfcn = Function('lfcn',[lg,ps],[mtimes(mtimes(transpose(lg),G),lg)*ps])
for k in range(nk):
Obj += lfcn(lambdac[k],p_s[k])
# Control penality
u1 = SX.sym('u1',nu)
u2 = SX.sym('u2',nu)
dufcn = Function('dufcn',[u1,u2,ps],[mtimes(mtimes(transpose(u2-u1),R),u2-u1)*ps])
deltau = MX.zeros(1)
for k in range(nk-1):
if k == 0:
deltau += dufcn(u_previous,U[k],p_s[k])
else:
deltau += dufcn(U[k],U[k+1],p_s[k])
Obj += deltau
# Lagrange term of objective
lagrange = MX.zeros(1)
for k in range(nk):
lagrange += Obj_L(XD[k][nicp-1][deg],XA[k][nicp-1][deg-1],U[k])*p_s[k]
Obj += lagrange
# Mayer term of objective
Obj += Obj_M(XD[nk-1][nicp-1][deg],XA[nk-1][nicp-1][deg-1],U[-1])
return Obj
def create_solver(self):
V, con, Obj, g, opts = self.V, self.con, self.Obj, self.g, self.opts
# Define NLP
nlp = {'x':V, 'p':con, 'f':Obj, 'g':vertcat(*g)}
# Allocate an NLP solver
solver = nlpsol("solver", "ipopt", nlp, opts)
return solver
def initialization(self):
tf, deltat, nu, nd = self.tf, self.tf/self.nk, self.nu, self.nd
number_of_repeats, na, ng = self.number_of_repeats, self.na, self.ng
time_loop = []
U_pasts = np.zeros((number_of_repeats,int(math.ceil(tf/deltat)),nu))
Xd_pasts = np.zeros((int(math.ceil(tf/deltat))*100+1,number_of_repeats,nd))
Xd_pastse = np.zeros((int(math.ceil(tf/deltat))+1,number_of_repeats,nd))
Xa_pasts = np.zeros((int(math.ceil(tf/deltat))*100,number_of_repeats,na))
Con_pasts = np.zeros((int(math.ceil(tf/deltat))*100,number_of_repeats,ng))
t_past = [0.]
u_nmpc = np.array([0.]*nu)
time_loop = []
return U_pasts, Xd_pasts, Xd_pastse, Xa_pasts, Con_pasts, u_nmpc, time_loop
def initialization_loop(self):
x_hat0 = self.xhat0
Sigmahat0 = self.Sigmahat0
x0 = self.x0
lbg, ubg, ng = self.lbg, self.ubg, self.ng
vars_lb, vars_ub, vars_init = self.vars_lb, self.vars_ub, self.vars_init
tf, deltat, nu, nd = self.tf, self.tf/self.nk, self.nu, self.nd
number_of_repeats, na = self.number_of_repeats, self.na
arg = | |
so, either y-2 and y-1 are the red cols or
# neither is a a red col
# if have moved the red car, then y is on the right side of the car
# and red_car_end_a = y
if x == 2 and y-2 in self._red_car_cols():
red_car_end_a = self._red_car_end_a + 1
else:
red_car_end_a = self._red_car_end_a
ret.add( RHState(nbr_board,red_car_end_a ) )
# y is on the right side of the truck
for x,y in mv_right_truck:
nbr_board = np.copy(self._board)
nbr_board[x,y] = RHConstants.HORIZONTAL_TRUCK
nbr_board[x,y-3] = RHConstants.BLANK_SPACE
ret.add( RHState(nbr_board,self._red_car_end_a) )
return(ret)
# ####################################################
#
# Display Routines
#
# #####################################################
###################################
#
# HTML Table Logic
#
###################################
def HTMLTable(self, order = 'GamePlay'):
"""
Return well-fomed HTML Table markup to represent the board
valid order options:
- generative - displays for easily walking state generation algorithm
Color coding scheme of cars/trucks is different
- Color coding aligns with game play.
All states within one component will have consisten coloring
scheme.
"""
html_colors = np.array([RHConstants.BLANK_COLOR_RGB] *36).reshape(6,6)
html_symbols = np.array([' ']*36).reshape(6,6)
# Keep track of which color/symbol will be painted next
car_index = 0
truck_index = 0
v = self._vehicles()
hcars = v['hcars']
vcars = v['vcars']
htrucks = v['htrucks']
vtrucks = v['vtrucks']
for car in hcars:
for r,c in car:
if r == 2 and c in self._red_car_cols():
html_colors[r,c] = RHConstants.RED_COLOR_RGB
html_symbols[r,c] = RHConstants.RED_SYMBOL
else:
html_colors[r,c] = RHConstants.CAR_COLORS_RGB[car_index]
html_symbols[r,c] = RHConstants.CAR_SYMBOLS[car_index]
car_index +=1
for car in vcars:
for r,c in car:
html_colors[r,c] = RHConstants.CAR_COLORS_RGB[car_index]
html_symbols[r,c] = RHConstants.CAR_SYMBOLS[car_index]
car_index +=1
for truck in htrucks:
for r,c in truck:
html_colors[r,c] = RHConstants.TRUCK_COLORS_RGB[truck_index]
html_symbols[r,c] = RHConstants.TRUCK_SYMBOLS[truck_index]
truck_index +=1
for truck in vtrucks:
for r,c in truck:
html_colors[r,c] = RHConstants.TRUCK_COLORS_RGB[truck_index]
html_symbols[r,c] = RHConstants.TRUCK_SYMBOLS[truck_index]
truck_index +=1
# !!!! TODO - craft text of html table
html = '<table>'
for r in range(6):
html = html + '<tr>'
for c in range(6):
html = html + '<td bgcolor = "' + html_colors[r,c] + '" '
html = html + ' style="width:30px; height:30px; vertical-align:middle; text-align:center">'
html = html + html_symbols[r,c]
html = html + '</td>'
# ['<td bgcolor="%s" style="width:30px; height:30px; vertical-align:middle; text-align:center">%s</td>'
html = html + '</tr>'
html = html + '</table>'
return(html)
##############################
#
# SVG Display Logic
#
##############################
def _init_svg(self):
self._svg_dwg = None
self._svg_space_size = 30
self._svg_board_size = 6 * self._svg_space_size
self._svg_border = 3
self._svg_round_radius = 5
self._svg_size = {}
self._svg_size[RHConstants.HORIZONTAL_CAR] = \
(2*self._svg_space_size - 2*self._svg_border , \
self._svg_space_size - 2*self._svg_border)
self._svg_size[RHConstants.VERTICAL_CAR] = \
(self._svg_space_size - 2*self._svg_border, \
2*self._svg_space_size - 2*self._svg_border)
self._svg_size[RHConstants.VERTICAL_TRUCK] = \
(self._svg_space_size - 2*self._svg_border, \
3*self._svg_space_size - 2*self._svg_border)
self._svg_size[RHConstants.HORIZONTAL_TRUCK] = \
(3*self._svg_space_size - 2*self._svg_border, \
self._svg_space_size - 2*self._svg_border)
# !!!! TODO - consider having a 6x6 array of colors and text
def svg_neighbors(self):
'''
cycle through the svg_pieces attempting using the piece color to
denote the edge color
data structure for verts[]:
edge_color: RHConstants Color
nbr_state: RHState
verts = {}
'''
pass
def svg_edge_colors(self):
'''
When components sets out to draw a neighborhood and navigate through
the component, it needs to know the number of edges vertical
horizontal. These don't structurally change during navigation of
a complenent
'''
pass
def svg_edge_color(self,other):
'''
RHComponent builds out SVGs.
'''
diff = other._board - self._board
# positive marks cell filled by moving piece from self to other
# negative marks cell emptied by moving piece from self to other
[pos_row,pos_col] = np.array(np.where(diff>0)).transpose()[0]
[neg_row,neg_col] = np.array(np.where(diff<0)).transpose()[0]
orientation = diff[diff>0][0]
if orientation in [RHConstants.VERTICAL_CAR, RHConstants.VERTICAL_TRUCK]:
self_end_a_col = pos_col # arbitrary choice for col
if pos_row > neg_row:
self_end_a_row = pos_row + 1
else:
self_end_a_row = neg_row
if orientation in [RHConstants.HORIZONTAL_CAR, RHConstants.HORIZONTAL_TRUCK]:
self_end_a_row = pos_row #arbitrary choice for row
if pos_col > neg_col:
self_end_a_col = neg_col
else:
self_end_a_col = pos_col + 1
edge_piece = [x for x in self.svg_pieces if \
x['end_a_row'] == self_end_a_row and \
x['end_a_col'] == self_end_a_col ][0]
return (edge_piece['color'])
def svg_pieces(self):
'''
Add color coding to the pieces in a predictable way such that all
states in a connected components are drawn with the same piece coloring.
Also intended to be used by RHComponent to permit drawing a graph
from the perspective of a single state.
'''
if self._svg_pieces:
return self._svg_pieces
car_colors = ['#' + x for x in RHConstants.CAR_COLORS_RGB]
car_symbols = RHConstants.CAR_SYMBOLS[:]
truck_colors = ['#' + x for x in RHConstants.TRUCK_COLORS_RGB ]
truck_symbols = RHConstants.TRUCK_SYMBOLS[:]
red_car_color = '#' + RHConstants.RED_COLOR_RGB
red_car_symbol = RHConstants.RED_SYMBOL
ret = []
verticals = [ x for x in self._get_pieces() if x['orientation'] \
in [RHConstants.VERTICAL_CAR, RHConstants.VERTICAL_TRUCK] ]
horizontals = [ x for x in self._get_pieces() if x['orientation'] \
in [RHConstants.HORIZONTAL_CAR, RHConstants.HORIZONTAL_TRUCK] ]
for col in range(6):
col_verts = [x for x in verticals if x['end_a_col'] == col]
sorted_verts = sorted(col_verts, key=lambda x:x['end_a_row'])
for v in sorted_verts:
piece = {}
piece['orientation'] = v['orientation']
piece['end_a_col'] = v['end_a_col']
piece['end_a_row'] = v['end_a_row']
if piece['orientation'] == RHConstants.VERTICAL_CAR:
piece['color'] = car_colors.pop()
piece['text'] = car_symbols.pop()
else:
piece['color'] = truck_colors.pop()
piece['text'] = truck_symbols.pop()
ret.append(piece)
for row in range(6):
row_horiz = [x for x in horizontals if x['end_a_row'] == row]
sorted_horiz = sorted(row_horiz, key=lambda x:x['end_a_col'])
for h in sorted_horiz:
piece = {}
piece['orientation'] = h['orientation']
piece['end_a_col'] = h['end_a_col']
piece['end_a_row'] = h['end_a_row']
if piece['orientation'] == RHConstants.HORIZONTAL_CAR:
if piece['end_a_row'] == 2 and piece['end_a_col'] in self._red_car_cols():
piece['color'] = red_car_color
piece['text'] = red_car_symbol
else:
piece['color'] = car_colors.pop()
piece['text'] = car_symbols.pop()
else:
piece['color'] = truck_colors.pop()
piece['text'] = truck_symbols.pop()
ret.append(piece)
self._svg_pieces = ret
return(self._svg_pieces)
'''
supress edge algo:
have array of dicts for the various nbhrd compass points
x:
y:
color:
js_id:
visibility:
in RHState:
- build neighborhood by moving svg_pieces
- have 4 lists of dicts
- up_nbrs = []
- left_nbrs = []
- down_nbrs = []
- right_nbrs = []
'''
def svg(self):
if self._svg_dwg is not None:
return self._svg_dwg.tostring()
# Build out SVG if not already built out
dwg = self._svg_base()
for piece in self._svg_pieces:
row = piece['end_a_row']
col = piece['end_a_col']
orient = piece['orientation']
color = piece['color']
text = piece['text']
dwg = self._svg_add_piece(dwg,row,col,orient,color,text)
self._svg_dwg = dwg
return self._svg_dwg.tostring()
def _svg_base(self):
dwg = svgwrite.Drawing('nosave.svg',(self._svg_board_size,self._svg_board_size),debug=True)
dwg.add(dwg.rect(insert=(0,0),size=(self._svg_board_size,self._svg_board_size),\
fill='#'+RHConstants.BLANK_COLOR_RGB))
for x in range(7):
dwg.add(dwg.line((30*x,0),(30*x,180),stroke='black',stroke_width=2))
dwg.add(dwg.line((0,30*x),(180,30*x),stroke='black',stroke_width=2))
return dwg
#dwg.add(dwg.rect(insert=(65, 35), size=(50, 20),rx=5,ry=5,fill='green', stroke_width=3))
#def svg_add_piece(dwg,nd_x,nd_y,orientation,color,text):
def _svg_add_piece(self,dwg,nd_row,nd_col,piece_type,color,text):
s = self._svg_space_size
b = self._svg_border
svg_x = nd_col * s + b
svg_y = nd_row * s + b
size = self._svg_size[piece_type]
dwg.add(dwg.rect(insert=(svg_x,svg_y),size=size,\
rx=self._svg_round_radius,\
ry=self._svg_round_radius,\
fill=color, \
stroke_width=3))
c_x = svg_x + size[0]/2.0
c_y = svg_y + size[1]/2.0
dwg.add(dwg.text(text,insert=(c_x,c_y),style='fill:black;text-anchor:middle;alignment-baseline:central'))
return dwg
'''
A vehicle data type:
orientation: RHConstants.VERTICAL_CAR
end_a_col
end_a_row
'''
def _svg_vehicles(self):
''' 9/1/19 - experimental approach to putting svg logic in one place
Not being used
two dicts for each piece on the board
rect_args:
x
y
size
rx
ry
fill_color
stroke_width
text_args:
text
c_x
y_x
style
'''
ret = {'verticals':[], 'horizontals':[]}
# by creating this copies of the color/symbol arrays, we can use
# arrray.pop() to cycle through the colors and avoid index math steps
car_colors = ['#' + x for x in RHConstants.CAR_COLORS_RGB]
car_symbols = RHConstants.CAR_SYMBOLS[:]
truck_colors = ['#' + x for x in RHConstants.TRUCK_COLORS_RGB ]
truck_symbols = RHConstants.TRUCK_SYMBOLS[:]
red_car_color = '#' + RHConstants.RED_COLOR_RGB
red_car_symbool = RHConstants.RED_SYMBOL
text_style = 'fill:black;text-anchor:middle;alignment-baseline:central'
discovered = np.aray([False]*36).reshape(6,6)
ret = {'vcars':[], 'hcars':[], 'vtrucks':[],'htrucks':[]}
for row in range(6):
for col in range(6):
x = col | |
== '__init__':
# __init__ can be incompatible -- it's a special case.
return
first = base1[name]
second = base2[name]
first_type = first.type
if first_type is None and isinstance(first.node, FuncDef):
first_type = self.function_type(first.node)
second_type = second.type
if second_type is None and isinstance(second.node, FuncDef):
second_type = self.function_type(second.node)
# TODO: What if some classes are generic?
if (isinstance(first_type, FunctionLike) and
isinstance(second_type, FunctionLike)):
# Method override
first_sig = method_type(first_type)
second_sig = method_type(second_type)
ok = is_subtype(first_sig, second_sig)
elif first_type and second_type:
ok = is_equivalent(first_type, second_type)
else:
if first_type is None:
self.msg.cannot_determine_type_in_base(name, base1.name(), ctx)
if second_type is None:
self.msg.cannot_determine_type_in_base(name, base2.name(), ctx)
ok = True
if not ok:
self.msg.base_class_definitions_incompatible(name, base1, base2,
ctx)
def visit_import_from(self, node: ImportFrom) -> Type:
self.check_import(node)
def visit_import_all(self, node: ImportAll) -> Type:
self.check_import(node)
def check_import(self, node: ImportBase) -> Type:
for assign in node.assignments:
lvalue = assign.lvalues[0]
lvalue_type, _, __ = self.check_lvalue(lvalue)
if lvalue_type is None:
# TODO: This is broken.
lvalue_type = AnyType()
message = '{} "{}"'.format(messages.INCOMPATIBLE_IMPORT_OF,
cast(NameExpr, assign.rvalue).name)
self.check_simple_assignment(lvalue_type, assign.rvalue, node,
msg=message, lvalue_name='local name',
rvalue_name='imported name')
#
# Statements
#
def visit_block(self, b: Block) -> Type:
if b.is_unreachable:
return None
for s in b.body:
self.accept(s)
if self.binder.breaking_out:
break
def visit_assignment_stmt(self, s: AssignmentStmt) -> Type:
"""Type check an assignment statement.
Handle all kinds of assignment statements (simple, indexed, multiple).
"""
self.check_assignment(s.lvalues[-1], s.rvalue, s.type is None)
if len(s.lvalues) > 1:
# Chained assignment (e.g. x = y = ...).
# Make sure that rvalue type will not be reinferred.
if s.rvalue not in self.type_map:
self.accept(s.rvalue)
rvalue = self.temp_node(self.type_map[s.rvalue], s)
for lv in s.lvalues[:-1]:
self.check_assignment(lv, rvalue, s.type is None)
def check_assignment(self, lvalue: Node, rvalue: Node, infer_lvalue_type: bool = True) -> None:
"""Type check a single assignment: lvalue = rvalue."""
if isinstance(lvalue, TupleExpr) or isinstance(lvalue, ListExpr):
self.check_assignment_to_multiple_lvalues(lvalue.items, rvalue, lvalue,
infer_lvalue_type)
else:
lvalue_type, index_lvalue, inferred = self.check_lvalue(lvalue)
if lvalue_type:
if isinstance(lvalue_type, PartialType) and lvalue_type.type is None:
# Try to infer a proper type for a variable with a partial None type.
rvalue_type = self.accept(rvalue)
if isinstance(rvalue_type, NoneTyp):
# This doesn't actually provide any additional information -- multiple
# None initializers preserve the partial None type.
return
if is_valid_inferred_type(rvalue_type):
var = lvalue_type.var
partial_types = self.find_partial_types(var)
if partial_types is not None:
if not self.current_node_deferred:
if experiments.STRICT_OPTIONAL:
var.type = UnionType.make_simplified_union(
[rvalue_type, NoneTyp()])
else:
var.type = rvalue_type
else:
var.type = None
del partial_types[var]
lvalue_type = var.type
else:
# Try to infer a partial type. No need to check the return value, as
# an error will be reported elsewhere.
self.infer_partial_type(lvalue_type.var, lvalue, rvalue_type)
elif (is_literal_none(rvalue) and
isinstance(lvalue, NameExpr) and
isinstance(lvalue.node, Var) and
lvalue.node.is_initialized_in_class):
# Allow None's to be assigned to class variables with non-Optional types.
rvalue_type = lvalue_type
else:
rvalue_type = self.check_simple_assignment(lvalue_type, rvalue, lvalue)
if rvalue_type and infer_lvalue_type:
self.binder.assign_type(lvalue,
rvalue_type,
lvalue_type,
self.typing_mode_weak())
elif index_lvalue:
self.check_indexed_assignment(index_lvalue, rvalue, rvalue)
if inferred:
self.infer_variable_type(inferred, lvalue, self.accept(rvalue),
rvalue)
def check_assignment_to_multiple_lvalues(self, lvalues: List[Node], rvalue: Node,
context: Context,
infer_lvalue_type: bool = True) -> None:
if isinstance(rvalue, TupleExpr) or isinstance(rvalue, ListExpr):
# Recursively go into Tuple or List expression rhs instead of
# using the type of rhs, because this allowed more fine grained
# control in cases like: a, b = [int, str] where rhs would get
# type List[object]
rvalues = rvalue.items
if self.check_rvalue_count_in_assignment(lvalues, len(rvalues), context):
star_index = next((i for i, lv in enumerate(lvalues) if
isinstance(lv, StarExpr)), len(lvalues))
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr,
lvalues[star_index]) if star_index != len(lvalues) else None
right_lvs = lvalues[star_index + 1:]
left_rvs, star_rvs, right_rvs = self.split_around_star(
rvalues, star_index, len(lvalues))
lr_pairs = list(zip(left_lvs, left_rvs))
if star_lv:
rv_list = ListExpr(star_rvs)
rv_list.set_line(rvalue.get_line())
lr_pairs.append((star_lv.expr, rv_list))
lr_pairs.extend(zip(right_lvs, right_rvs))
for lv, rv in lr_pairs:
self.check_assignment(lv, rv, infer_lvalue_type)
else:
self.check_multi_assignment(lvalues, rvalue, context, infer_lvalue_type)
def check_rvalue_count_in_assignment(self, lvalues: List[Node], rvalue_count: int,
context: Context) -> bool:
if any(isinstance(lvalue, StarExpr) for lvalue in lvalues):
if len(lvalues) - 1 > rvalue_count:
self.msg.wrong_number_values_to_unpack(rvalue_count,
len(lvalues) - 1, context)
return False
elif rvalue_count != len(lvalues):
self.msg.wrong_number_values_to_unpack(rvalue_count,
len(lvalues), context)
return False
return True
def check_multi_assignment(self, lvalues: List[Node],
rvalue: Node,
context: Context,
infer_lvalue_type: bool = True,
msg: str = None) -> None:
"""Check the assignment of one rvalue to a number of lvalues."""
# Infer the type of an ordinary rvalue expression.
rvalue_type = self.accept(rvalue) # TODO maybe elsewhere; redundant
undefined_rvalue = False
if isinstance(rvalue_type, AnyType):
for lv in lvalues:
if isinstance(lv, StarExpr):
lv = lv.expr
self.check_assignment(lv, self.temp_node(AnyType(), context), infer_lvalue_type)
elif isinstance(rvalue_type, TupleType):
self.check_multi_assignment_from_tuple(lvalues, rvalue, rvalue_type,
context, undefined_rvalue, infer_lvalue_type)
else:
self.check_multi_assignment_from_iterable(lvalues, rvalue_type,
context, infer_lvalue_type)
def check_multi_assignment_from_tuple(self, lvalues: List[Node], rvalue: Node,
rvalue_type: TupleType, context: Context,
undefined_rvalue: bool,
infer_lvalue_type: bool = True) -> None:
if self.check_rvalue_count_in_assignment(lvalues, len(rvalue_type.items), context):
star_index = next((i for i, lv in enumerate(lvalues)
if isinstance(lv, StarExpr)), len(lvalues))
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
right_lvs = lvalues[star_index + 1:]
if not undefined_rvalue:
# Infer rvalue again, now in the correct type context.
lvalue_type = self.lvalue_type_for_inference(lvalues, rvalue_type)
rvalue_type = cast(TupleType, self.accept(rvalue, lvalue_type))
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
rvalue_type.items, star_index, len(lvalues))
for lv, rv_type in zip(left_lvs, left_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
if star_lv:
nodes = [self.temp_node(rv_type, context) for rv_type in star_rv_types]
list_expr = ListExpr(nodes)
list_expr.set_line(context.get_line())
self.check_assignment(star_lv.expr, list_expr, infer_lvalue_type)
for lv, rv_type in zip(right_lvs, right_rv_types):
self.check_assignment(lv, self.temp_node(rv_type, context), infer_lvalue_type)
def lvalue_type_for_inference(self, lvalues: List[Node], rvalue_type: TupleType) -> Type:
star_index = next((i for i, lv in enumerate(lvalues)
if isinstance(lv, StarExpr)), len(lvalues))
left_lvs = lvalues[:star_index]
star_lv = cast(StarExpr, lvalues[star_index]) if star_index != len(lvalues) else None
right_lvs = lvalues[star_index + 1:]
left_rv_types, star_rv_types, right_rv_types = self.split_around_star(
rvalue_type.items, star_index, len(lvalues))
type_parameters = [] # type: List[Type]
def append_types_for_inference(lvs: List[Node], rv_types: List[Type]) -> None:
for lv, rv_type in zip(lvs, rv_types):
sub_lvalue_type, index_expr, inferred = self.check_lvalue(lv)
if sub_lvalue_type:
type_parameters.append(sub_lvalue_type)
else: # index lvalue
# TODO Figure out more precise type context, probably
# based on the type signature of the _set method.
type_parameters.append(rv_type)
append_types_for_inference(left_lvs, left_rv_types)
if star_lv:
sub_lvalue_type, index_expr, inferred = self.check_lvalue(star_lv.expr)
if sub_lvalue_type:
type_parameters.extend([sub_lvalue_type] * len(star_rv_types))
else: # index lvalue
# TODO Figure out more precise type context, probably
# based on the type signature of the _set method.
type_parameters.extend(star_rv_types)
append_types_for_inference(right_lvs, right_rv_types)
return TupleType(type_parameters, self.named_type('builtins.tuple'))
def split_around_star(self, items: List[T], star_index: int,
length: int) -> Tuple[List[T], List[T], List[T]]:
"""Splits a list of items in three to match another list of length 'length'
that contains a starred expression at 'star_index' in the following way:
star_index = 2, length = 5 (i.e., [a,b,*,c,d]), items = [1,2,3,4,5,6,7]
returns in: ([1,2], [3,4,5], [6,7])
"""
nr_right_of_star = length - star_index - 1
right_index = nr_right_of_star if -nr_right_of_star != 0 else len(items)
left = items[:star_index]
star = items[star_index:right_index]
right = items[right_index:]
return (left, star, right)
def type_is_iterable(self, type: Type) -> bool:
return (is_subtype(type, self.named_generic_type('typing.Iterable',
[AnyType()])) and
isinstance(type, Instance))
def check_multi_assignment_from_iterable(self, lvalues: List[Node], rvalue_type: Type,
context: Context,
infer_lvalue_type: bool = True) -> None:
if self.type_is_iterable(rvalue_type):
item_type = self.iterable_item_type(cast(Instance, rvalue_type))
for lv in lvalues:
if isinstance(lv, StarExpr):
self.check_assignment(lv.expr, self.temp_node(rvalue_type, context),
infer_lvalue_type)
else:
self.check_assignment(lv, self.temp_node(item_type, context),
infer_lvalue_type)
else:
self.msg.type_not_iterable(rvalue_type, context)
def check_lvalue(self, lvalue: Node) -> Tuple[Type, IndexExpr, Var]:
lvalue_type = None # type: Type
index_lvalue = None # type: IndexExpr
inferred = None # type: Var
if self.is_definition(lvalue):
if isinstance(lvalue, NameExpr):
inferred = cast(Var, lvalue.node)
assert isinstance(inferred, Var)
else:
m = cast(MemberExpr, lvalue)
self.accept(m.expr)
inferred = m.def_var
elif isinstance(lvalue, IndexExpr):
index_lvalue = lvalue
elif isinstance(lvalue, MemberExpr):
lvalue_type = self.expr_checker.analyze_ordinary_member_access(lvalue,
True)
self.store_type(lvalue, lvalue_type)
elif isinstance(lvalue, NameExpr):
lvalue_type = self.expr_checker.analyze_ref_expr(lvalue, lvalue=True)
self.store_type(lvalue, lvalue_type)
elif isinstance(lvalue, TupleExpr) or isinstance(lvalue, ListExpr):
types = [self.check_lvalue(sub_expr)[0] for sub_expr in lvalue.items]
lvalue_type = TupleType(types, self.named_type('builtins.tuple'))
else:
lvalue_type = self.accept(lvalue)
return lvalue_type, index_lvalue, inferred
def is_definition(self, s: Node) -> bool:
if isinstance(s, NameExpr):
if s.is_def:
return True
# If the node type is not defined, this must the first assignment
# that we process => this is a definition, even though the semantic
# analyzer did not recognize this as such. This can arise in code
# that uses isinstance checks, if type checking of the primary
# definition is skipped due to an always False type check.
node = s.node
if isinstance(node, Var):
return node.type is None
elif isinstance(s, MemberExpr):
| |
= get_children(elem, ['date'])
B236['date'] = resolve_date(children['date'])
return B236
def resolve_B230(elem: etree.Element) -> dict:
'''
<B230>: Other dates
'''
B230 = dict()
children = get_children(elem, ['B236', 'B238EP', 'B238'])
if 'B236' in children:
B230['B236'] = resolve_B236(children['B236'])
if 'B238EP' in children:
B230['B238EP'] = resolve_B238EP(children['B238EP'])
if 'B238' in children:
B230['B238'] = resolve_B238(children['B238'])
return B230
def resolve_B220(elem: etree.Element) -> dict:
'''
<B220>: Application filing date
'''
B220 = dict()
children = get_children(elem, ['date'])
B220['date'] = resolve_date(children['date'])
return B220
def resolve_B210(elem: etree.Element) -> str:
'''
[root]<B210>: Application number
'''
return elem.text
def resolve_B200(elem: etree.Element) -> dict:
'''
<B200>: Domestic filing data
'''
B200 = dict()
children = get_children(elem, ['B210', 'B220', 'B230', 'B240', 'B250', 'B251EP', 'B260', 'B270'])
B200['B210'] = resolve_B210(children['B210'])
B200['B220'] = resolve_B220(children['B220'])
if 'B230' in children:
B200['B230'] = resolve_B230(children['B230'])
if 'B240' in children:
B200['B240'] = resolve_B240(children['B240'])
if 'B250' in children:
B200['B250'] = resolve_B250(children['B250'])
if 'B251EP' in children:
B200['B251EP'] = resolve_B251EP(children['B251EP'])
if 'B260' in children:
B200['B260'] = resolve_B260(children['B260'])
if 'B270' in children:
B200['B270'] = resolve_B270(children['B270'])
return B200
def resolve_B190(elem: etree.Element) -> str:
'''
<B190>: Publishing country or organisation
'''
return elem.text
def resolve_B1552(elem: etree.Element) -> str:
'''
<B1552>: Part text
'''
return elem.text
def resolve_B1551(elem: etree.Element) -> str:
'''
<B1551>: Part language
'''
return elem.text
def resolve_B155(elem: etree.Element) -> list:
'''
<B155>: Affected parts of document
'''
B155 = list()
children_group = get_children_group(elem, {
'g': ['B1551', 'B1552']
})
for g in children_group['g']:
B155.append({
'B1551': resolve_B1551(g['B1551']),
'B1552': resolve_B1552(g['B1552'])
})
return B155
def resolve_B1542(elem: etree.Element) -> str:
'''
<B1542>: Note text
'''
return elem.text
def resolve_B1541(elem: etree.Element) -> str:
'''
<B1541>: Note language
'''
return elem.text
def resolve_B154(elem: etree.Element) -> list:
'''
<B154>: Standard notes
'''
B154 = list()
children_group = get_children_group(elem, {
'g': ['B1541', 'B1542']
})
for g in children_group['g']:
B154.append({
'B1541': resolve_B1551(g['B1541']),
'B1542': resolve_B1552(g['B1542'])
})
return B154
def resolve_B153(elem: etree.Element) -> str:
'''
<B153>: Affected INID codes</B153>
'''
return elem.text
def resolve_B151(elem: etree.Element) -> str:
'''
<B151>: Supplementary correction code
'''
return elem.text
def resolve_B150(elem: etree.Element) -> dict:
'''
<B150>: Patent correction information
'''
B150 = dict()
children = get_children(elem, ['B151', 'B153', 'B154', 'B155'])
if 'B151' in children:
B150['B151'] = resolve_B151(children['B151'])
if 'B153' in children:
B150['B153'] = resolve_B153(children['B153'])
if 'B154' in children:
B150['B154'] = resolve_B154(children['B154'])
if 'B155' in children:
B150['B155'] = resolve_B155(children['B155'])
return B150
def resolve_B140(elem: etree.Element) -> dict:
'''
<B140>: Date of publication
'''
B140 = dict()
children = get_children(elem, ['date'])
B140['date'] = resolve_date(children['date'])
return B140
def resolve_B132EP(elem: etree.Element) -> str:
'''
<B132EP>: Original kind code
'''
return elem.text
def resolve_B130(elem: etree.Element) -> str:
'''
<B130>: Kind of document
'''
return elem.text
def resolve_B121EP(elem: etree.Element) -> str:
'''
<B121>: Any Descriptive text for B121 (EPO)
'''
return elem.text
def resolve_B121(elem: etree.Element) -> str:
'''
<B121>: Plain language designation of the kind of document
'''
return elem.text
def resolve_B120(elem: etree.Element) -> dict:
'''
<B120>: Plain language designation
'''
B120 = dict()
children = get_children(elem, ['B121', 'B121EP'])
B120['B121'] = resolve_B121(children['B121'])
if 'B121EP' in children:
B120['B121EP'] = resolve_B121EP(children['B121EP'])
return B120
def resolve_B110(elem: etree.Element) -> str:
'''
<B110>: Publication number of the document (EPO or WIPO)
'''
return elem.text
def resolve_B100(elem: etree.Element) -> dict:
'''
<B100>: Document identification
'''
B100 = dict()
children = get_children(elem, ['B110', 'B120', 'B130', 'B132EP', 'B140', 'B150', 'B190'])
B100['B110'] = resolve_B110(children['B110'])
if 'B120' in children:
B100['B120'] = resolve_B120(children['B120'])
B100['B130'] = resolve_B110(children['B130'])
if 'B132EP' in children:
B100['B132EP'] = resolve_B132EP(children['B132EP'])
B100['B140'] = resolve_B140(children['B140'])
if 'B150' in children:
B100['B150'] = resolve_B150(children['B150'])
B100['B190'] = resolve_B190(children['B190'])
return B100
def resolve_B0933EP(elem: etree.Element) -> dict:
'''
<B0933EP>: Full payment received on
'''
B0933EP = dict()
children = get_children(elem, ['date'])
B0933EP['date'] = resolve_date(children['date'])
return B0933EP
def resolve_B0932EP(elem: etree.Element) -> dict:
'''
<B0932EP>: Translation received on
'''
B0932EP = dict()
children = get_children(elem, ['date'])
B0932EP['date'] = resolve_date(children['date'])
return B0932EP
def resolve_B0931EP(elem: etree.Element) -> dict:
'''
<B0931EP>: Despatched on
'''
B0931EP = dict()
children = get_children(elem, ['date'])
B0931EP['date'] = resolve_date(children['date'])
return B0931EP
def resolve_B093EP(elem: etree.Element) -> dict:
'''
<B093EP>: Limitation procedure - Limitation request allowed fields
'''
B093EP = dict()
children = get_children(elem, ['B0931EP', 'B0932EP', 'B0933EP'])
B093EP['B0931EP'] = resolve_B0931EP(children['B0931EP'])
if 'B0932EP' in children:
B093EP['B0932EP'] = resolve_B0932EP(children['B0932EP'])
if 'B0933EP' in children:
B093EP['B0933EP'] = resolve_B0933EP(children['B0933EP'])
return B093EP
def resolve_B0914EP(elem: etree.Element) -> dict:
'''
<B0914EP>: Date of payment
'''
B0914EP = dict()
children = get_children(elem, ['date'])
B0914EP['date'] = resolve_date(children['date'])
return B0914EP
def resolve_B0913EP(elem: etree.Element) -> str:
'''
<B0912EP>: Limitation kind
'''
return elem.text
def resolve_B0912EP(elem: etree.Element) -> str:
'''
<B0913EP>: Decision code
'''
return elem.text
def resolve_B0911EP(elem: etree.Element) -> dict:
'''
<B0911EP>: Date of filing
'''
B0911EP = dict()
children = get_children(elem, ['date'])
B0911EP['date'] = resolve_date(children['date'])
return B0911EP
def resolve_B091EP(elem: etree.Element) -> dict:
'''
<B091EP>: Limitation procedure - Initial filing
'''
B091EP = dict()
children = get_children(elem, ['B0911EP', 'B0912EP', 'B0913EP', 'B0914EP'])
B091EP['B0911EP'] = resolve_B0911EP(children['B0911EP'])
if 'B0912EP' in children:
B091EP['B0912EP'] = resolve_B0912EP(children['B0912EP'])
if 'B0913EP' in children:
B091EP['B0913EP'] = resolve_B0913EP(children['B0913EP'])
if 'B0914EP' in children:
B091EP['B0914EP'] = resolve_B0914EP(children['B0914EP'])
return B091EP
def resolve_B0900EP(elem: etree.Element) -> dict:
'''
<B0900EP>: sub-group of B090EP
'''
B0900EP = dict()
B0900EP['attr'] = get_attr(elem)
children = get_children(elem, ['B091EP', 'B093EP'])
B0900EP['B091EP'] = []
if isinstance(children['B091EP'], list):
for B091EP in children['B091EP']:
B0900EP['B091EP'].append(resolve_B091EP(B091EP))
else:
B0900EP['B091EP'].append(resolve_B091EP(children['B091EP']))
if 'B093EP' in children:
B0900EP['B093EP'] = resolve_B093EP(children['B093EP'])
return B0900EP
def resolve_B090EP(elem: etree.Element) -> dict:
'''
<B090EP>: Limitation procedure
'''
B090EP = dict()
children = get_children(elem, ['B0900EP'])
B090EP['B0900EP'] = []
if isinstance(children['B0900EP'], list):
for B0900EP in children['B0900EP']:
B090EP['B0900EP'].append(resolve_B0900EP(B0900EP))
else:
B090EP['B0900EP'].append(resolve_B0900EP(children['B0900EP']))
return B090EP
def resolve_B078EP(elem: etree.Element) -> dict:
'''
<B078EP>: Date of 'No opposition filed
'''
B078EP = dict()
children = get_children(elem, ['date'])
B078EP['date'] = resolve_date(children['date'])
return B078EP
def resolve_B0756EP(elem: etree.Element) -> str:
'''
<B0756EP>: Kind of decision : R01 to R12
'''
return elem.text
def resolve_B0755EP(elem: etree.Element) -> dict:
'''
<B0755EP>: Date of decision
'''
B0753EP = dict()
children = get_children(elem, ['date'])
B0753EP['date'] = resolve_date(children['date'])
return B0753EP
def resolve_B0753EP(elem: etree.Element) -> dict:
'''
<B0753EP>: Date of notice of petition (May be included at a later date)
'''
B0753EP = dict()
children = get_children(elem, ['date'])
B0753EP['date'] = resolve_date(children['date'])
return B0753EP
def resolve_B0752EP(elem: etree.Element) -> str:
'''
<B0752EP>: Petitioner code
'''
return elem.text
def resolve_B0751EP(elem: etree.Element) -> str:
'''
<B0751EP>: Appeal number
'''
return elem.text
def resolve_B0750EP(elem: etree.Element) -> dict:
'''
Petition for review number (As we can have multiple petitions for review). Comprises a unique number for a given year, e.g. R0001/08
'''
B0750EP = dict()
B0750EP['attr'] = get_attr(elem)
children = get_children(elem, ['B0751EP', 'B0752EP', 'B0753EP', 'B0755EP', 'B0756EP'])
if 'B0751EP' in children:
B0750EP['B0751EP'] = resolve_B0751EP(children['B0751EP'])
if 'B0752EP' in children:
B0750EP['B0752EP'] = resolve_B0752EP(children['B0752EP'])
if 'B0753EP' in children:
B0750EP['B0753EP'] = resolve_B0753EP(children['B0753EP'])
if 'B0755EP' in children:
B0750EP['B0755EP'] = resolve_B0755EP(children['B0755EP'])
if 'B0756EP' in children:
B0750EP['B0756EP'] = resolve_B0756EP(children['B0756EP'])
return B0750EP
def resolve_B075EP(elem: etree.Element) -> dict:
'''
<B075EP>: Petition for review
'''
B075EP = dict()
children = get_children(elem, ['B0750EP'])
B075EP['B0750EP'] = []
if isinstance(children['B0750EP'], list):
for B0750EP in children['B0750EP']:
B075EP['B0750EP'].append(resolve_B0750EP(B0750EP))
else:
B075EP['B0750EP'].append(resolve_B0750EP(children['B0750EP']))
return B075EP
def resolve_B070EP(elem: etree.Element) -> str:
'''
<B070EP>: B publication technical field (subsequently filed technical information)
'''
return elem.text
def resolve_B053EP(elem: etree.Element) -> dict:
'''
<B053EP>: Additional remarks
'''
return elem.text
def resolve_B051EP(elem: etree.Element) -> str:
'''
<B051EP>: Language
'''
return elem.text
def resolve_B052EP(elem: etree.Element) -> str:
'''
<B052EP>: Free Text
'''
return elem.text
def resolve_B050EP(elem: etree.Element) -> list:
'''
<B050EP>: Free text
'''
B050EP = list()
children_group = get_children_group(elem, {
'text': ['B051EP', 'B052EP']
})
for text in children_group['text']:
B050EP.append({
'B051EP': resolve_B051EP(text['B051EP']),
'B052EP': resolve_B052EP(text['B052EP'])
})
return B050EP
def resolve_B015EP(elem: etree.Element) -> str:
'''
<B015EP>: Number of copies to be printed
'''
return elem.text
def resolve_B011EP(elem: etree.Element) -> dict:
'''
<B011EP>: Serial number date and states
'''
B011EP = dict()
children = get_children(elem, ['date', 'dnum', 'ctry'])
B011EP['date'] = resolve_date(children['date'])
B011EP['dnum'] = resolve_dnum(children['dnum'])
if 'ctry' in children:
B011EP['ctry'] = []
if isinstance(children['ctry'], list):
for ctry in children['ctry']:
B011EP['ctry'] = resolve_ctry(ctry)
else:
B011EP['ctry'] = resolve_ctry(children['ctry'])
return B011EP
def resolve_B010EP(elem: etree.Element) -> dict:
'''
<B010EP>: Other rights and legal means of execution
'''
B010EP = dict()
children = get_children(elem, ['B011EP'])
B010EP['B011EP'] = []
if isinstance(children['B011EP'], list):
for B011EP in children['B011EP']:
B010EP['B011EP'].append(resolve_B011EP(B011EP))
else:
B010EP['B011EP'].append(resolve_B011EP(children['B011EP']))
return B010EP
def resolve_B009EP(elem: etree.Element) -> dict:
'''
<B009EP>: Text from B725EP tag in the three EPO official languages
Note: The text language order is German, English and French.
'''
B009EP = dict()
children = get_children(elem, ['text'])
B009EP['text'] = []
if isinstance(children['text'], list):
for | |
a list of the leaf nodes of a tree
including the root
"""
leaves = [node for node in tree if tree.degree(node) <= 1]
string_leaves = [l for l in leaves if (type(l) == str) and (l != 'root')]
number_leaves = [l for l in leaves if isinstance(l, Number)]
number_leaves.sort()
string_leaves.sort()
if include_root & ('root' in tree):
string_leaves = string_leaves + ['root']
return number_leaves + string_leaves
def get_internal_nodes(tree):
"""
Returns a list of the non-leaf nodes of a tree
"""
nodes = [node for node in tree if tree.degree(node) >= 2]
nodes.sort()
return nodes
def get_leaf_descendants(tree, node):
"""
Returns a list of the leaf nodes of the tree that are
descendants of node
"""
if tree.out_degree(node) == 0:
return [node]
else:
children = tree.successors(node)
leaf_descendants = []
for child in children:
leaf_descendants = leaf_descendants + get_leaf_descendants(tree, child)
return leaf_descendants
return
def compute_leaf_times(tree, num_leaves):
"""
Computes the list of times of the leaves by adding 'time_to_parent' along the path to 'root'
"""
rna, barcodes = extract_data_arrays(tree)
times = np.zeros(barcodes.shape[0])
for leaf in range(barcodes.shape[0]):
current_node = leaf
while not current_node == 'root':
times[leaf] = times[leaf] + tree.nodes[current_node]['time_to_parent']
current_node = next(tree.predecessors(current_node))
return times
def extract_data_arrays(tree):
"""
Returns arrays of the RNA expression and barcodes from leaves of the tree
Each row of each array is a cell
"""
leaves = get_leaves(tree, include_root = False)
expressions = np.array([tree.nodes[leaf]['cell'].x for leaf in leaves])
barcodes = np.array([tree.nodes[leaf]['cell'].barcode for leaf in leaves])
return expressions, barcodes
def extract_ancestor_data_arrays(late_tree, time, params):
"""
Returns arrays of the RNA expression and barcodes for ancestors of leaves of the tree
Each row of each array is a leaf node
"""
leaves = get_leaves(late_tree, include_root = False)
early_tree = truncate_tree(late_tree, time, params)
cells_early = get_leaves(early_tree, include_root = False)
expressions = np.nan*np.ones([len(leaves), params.num_genes])
barcodes = np.nan*np.ones([len(leaves), params.barcode_length])
for cell in cells_early:
parent = next(early_tree.predecessors(cell))
late_tree_cell = None
for child in late_tree.successors(parent):
if late_tree.nodes[child]['cell'].seed == early_tree.nodes[cell]['cell'].seed:
late_tree_cell = child
break
if late_tree_cell == None:
raise ValueError("A leaf in early_tree does not appear in late_tree. Cannot find coupling." +
"\nCheck whether either tree has been modified since truncating.")
descendants = get_leaf_descendants(late_tree, late_tree_cell)
expressions[descendants, :] = early_tree.nodes[cell]['cell'].x
barcodes[descendants, :] = early_tree.nodes[cell]['cell'].barcode
return expressions, barcodes
# Truncating a tree at an earlier endpoint
def truncate_tree(tree, new_end_time, params, inplace = False, current_node = 'root', next_leaf_to_add = 0):
"""
Removes all nodes at times greater than new_end_time
and adds new leaves at exactly new_end_time
params: simulation parameters used to create tree
"""
if not inplace:
tree = copy.deepcopy(tree)
if not ('time' in tree.nodes['root']):
add_node_times_from_division_times(tree)
if tree.nodes[current_node]['time'] >= new_end_time:
parent = next(tree.predecessors(current_node))
initial_cell = tree.nodes[parent]['cell'].deepcopy()
initial_cell.seed = tree.nodes[current_node]['cell'].seed
new_cell = sim.evolve_cell(initial_cell,
new_end_time - tree.nodes[parent]['time'],
params)
remove_node_and_descendants(tree, current_node)
tree.add_node(next_leaf_to_add)
tree.nodes[next_leaf_to_add]['time'] = new_end_time
tree.nodes[next_leaf_to_add]['time_to_parent'] = new_end_time - tree.nodes[parent]['time']
tree.nodes[next_leaf_to_add]['cell'] = new_cell
tree.add_edge(parent, next_leaf_to_add, time = new_end_time - tree.nodes[parent]['time'])
next_leaf_to_add = next_leaf_to_add + 1
else:
# not just tree.successors(node) because that changes as nodes are removed
children = [child for child in tree.successors(current_node)]
for child in children:
tree, next_leaf_to_add = truncate_tree(tree,
new_end_time,
params,
inplace = True,
current_node = child,
next_leaf_to_add = next_leaf_to_add)
if inplace:
# proxy for "if in recursive case"
return tree, next_leaf_to_add
else:
return tree
def remove_node_and_descendants(tree, node):
"""
Removes a node and all its descendants from the tree
"""
# not just tree.successors(node) because that changes as
# nodes are removed
children = [child for child in tree.successors(node)]
for child in children:
remove_node_and_descendants(tree, child)
parent = next(tree.predecessors(node))
tree.remove_edge(parent, node)
tree.remove_node(node)
return tree
def resample_cells(tree, params, current_node = 'root', inplace = False):
"""
Runs a new simulation of the cell evolution on a fixed tree
"""
if not inplace:
tree = copy.deepcopy(tree)
for child in tree.successors(current_node):
initial_cell = tree.nodes[current_node]['cell'].deepcopy()
initial_cell.reset_seed()
tree.nodes[child]['cell'] = sim.evolve_cell(initial_cell,
tree.nodes[child]['time_to_parent'],
params)
resample_cells(tree, params, current_node = child, inplace = True)
return tree
def get_true_coupling(early_tree, late_tree):
"""
Returns the coupling between leaves of early_tree and their descendants in
late_tree. Assumes that early_tree is a truncated version of late_tree
The marginal over the early cells is uniform; if cells have different
numbers of descendants, the marginal over late cells will not be uniform.
"""
num_cells_early = len(get_leaves(early_tree)) - 1
num_cells_late = len(get_leaves(late_tree)) - 1
coupling = np.zeros([num_cells_early, num_cells_late])
cells_early = get_leaves(early_tree, include_root = False)
for cell in cells_early:
parent = next(early_tree.predecessors(cell))
late_tree_cell = None
for child in late_tree.successors(parent):
if late_tree.nodes[child]['cell'].seed == early_tree.nodes[cell]['cell'].seed:
late_tree_cell = child
break
if late_tree_cell == None:
raise ValueError("A leaf in early_tree does not appear in late_tree. Cannot find coupling." +
"\nCheck whether either tree has been modified since truncating.")
descendants = get_leaf_descendants(late_tree, late_tree_cell)
coupling[cell, descendants] = 1/(num_cells_early*len(descendants))
return coupling
def get_lineage_distances_across_time(early_tree, late_tree):
"""
Returns the matrix of lineage distances between leaves of early_tree and leaves in
late_tree. Assumes that early_tree is a truncated version of late_tree
"""
num_cells_early = len(get_leaves(early_tree)) - 1
num_cells_late = len(get_leaves(late_tree)) - 1
d = np.zeros([num_cells_early, num_cells_late])
cells_early = get_leaves(early_tree, include_root = False)
cells_late = get_leaves(late_tree, include_root = False)
# get length of path up to parent of early_cell and back to early_cell
for late_cell in cells_late:
distance_dictionary, tmp = nx.single_source_dijkstra(nx.Graph(late_tree),
late_cell,
weight = 'time')
for early_cell in cells_early:
d[early_cell, late_cell] = (distance_dictionary[next(early_tree.predecessors(early_cell))]
+ early_tree.nodes[early_cell]['time_to_parent'])
# correct distances to descendants
for early_cell in cells_early:
parent = next(early_tree.predecessors(early_cell))
late_cell = None
for child in late_tree.successors(parent):
if late_tree.nodes[child]['cell'].seed == early_tree.nodes[early_cell]['cell'].seed:
late_cell = child
break
if late_cell is not None:
descendants = get_leaf_descendants(late_tree, late_cell)
d[early_cell, descendants] = (d[early_cell, descendants]
- 2*early_tree.nodes[early_cell]['time_to_parent'])
return d
# For OT using the lineage information, we want to have nodes for ancestors at the early sampling time
def add_nodes_at_time(tree, time_to_add, current_node = 'root', num_nodes_added = 0):
"""
Splits every edge (u,v) where u['time'] < time_to_add < v['time']
into (u, w) and (w, v) with w['time'] = time_to_add
Newly added nodes {w} are labeled as tuples (time_to_add, i)
The input tree should be annotated with node times already
"""
if tree.nodes[current_node]['time'] == time_to_add:
# Do not add a new node if we already have one at the correct time
return num_nodes_added
else:
current_children = [child for child in tree.successors(current_node)]
# Not iterating over edges directly because we're adding edges along the way
for child in current_children:
if tree.nodes[child]['time'] > time_to_add:
split_edge(tree, (current_node, child), (time_to_add, num_nodes_added))
# Add annotations and correct old ones
tree.nodes[(time_to_add, num_nodes_added)]['time'] = time_to_add
tree.nodes[(time_to_add, num_nodes_added)]['time_to_parent'] = time_to_add - tree.nodes[current_node]['time']
tree.nodes[child]['time_to_parent'] = tree.nodes[child]['time'] - time_to_add
tree.edges[current_node, (time_to_add, num_nodes_added)]['time'] = time_to_add - tree.nodes[current_node]['time']
tree.edges[(time_to_add, num_nodes_added), child]['time'] = tree.nodes[child]['time'] - time_to_add
num_nodes_added = num_nodes_added + 1
elif tree.nodes[child]['time'] <= time_to_add:
num_nodes_added = add_nodes_at_time(tree,
time_to_add,
current_node = child,
num_nodes_added = num_nodes_added)
return num_nodes_added
def split_edge(tree, edge, new_node):
tree.remove_edge(edge[0], edge[1])
tree.add_node(new_node)
tree.add_edge(edge[0], new_node)
tree.add_edge(new_node, edge[1])
return
def get_components(graph, edge_length_key = 'time'):
"""
Returns subgraph views corresponding to connected components of the graph
if edges of infinite length are removed
Parameters
----------
graph: NetworkX graph
edge_length_key: default 'time'
Returns
-------
subgraphs: List of NetworkX subgraph views
"""
# copying graph as undirected
g = nx.Graph(graph)
edges_to_remove = [e for e in g.edges if g.edges[e][edge_length_key] == np.inf]
g.remove_edges_from(edges_to_remove)
component_node_sets = nx.connected_components(g)
return [graph.subgraph(component_nodes) for component_nodes in component_node_sets]
def add_conditional_means_and_variances(tree, observed_nodes):
"""
Adds the mean and variance of the posterior on 'x' for each of the unobserved
nodes, conditional on the observed values of 'x' in observed_nodes,
assuming that differences along edges are Gaussian with variance equal to
the length of the edge.
In doing so, also adds inverse time annotations to edges.
If no nodes in tree are observed, inverse time annotations are added but
conditional means and variances are not (as there is nothing to condition on).
"""
node_list = [n for n in tree.nodes]
add_inverse_times_to_edges(tree)
l = nx.laplacian_matrix(nx.Graph(tree), nodelist = node_list, weight = 'inverse time')
unobserved_nodes = [n for n in node_list if not n in observed_nodes]
# Resorting so the order of indices in all matrices match
# and | |
import tensorflow as tf
import math
from tqdm import tqdm
from hmc import hmc
from tensorflow.python.platform import flags
from torch.utils.data import DataLoader, Dataset
from models import DspritesNet
from utils import optimistic_restore, ReplayBuffer
import os.path as osp
import numpy as np
from rl_algs.logger import TensorBoardOutputFormat
from scipy.misc import imsave
import os
from custom_adam import AdamOptimizer
flags.DEFINE_integer('batch_size', 256, 'Size of inputs')
flags.DEFINE_integer('data_workers', 4, 'Number of workers to do things')
flags.DEFINE_string('logdir', 'cachedir', 'directory for logging')
flags.DEFINE_string('savedir', 'cachedir', 'location where log of experiments will be stored')
flags.DEFINE_integer('num_filters', 64, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omniglot.')
flags.DEFINE_float('step_lr', 500, 'size of gradient descent size')
flags.DEFINE_string('dsprites_path', '/root/Data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz', 'path to dsprites characters')
flags.DEFINE_bool('cclass', True, 'not cclass')
flags.DEFINE_bool('proj_cclass', False, 'use for backwards compatibility reasons')
flags.DEFINE_bool('spec_norm', True, 'Whether to use spectral normalization on weights')
flags.DEFINE_bool('use_bias', True, 'Whether to use bias in convolution')
flags.DEFINE_bool('use_attention', False, 'Whether to use self attention in network')
flags.DEFINE_bool('plot_curve', False, 'Generate a curve of results')
flags.DEFINE_integer('num_steps', 20, 'number of steps to optimize the label')
flags.DEFINE_string('task', 'conceptcombine', 'conceptcombine, labeldiscover, gentest, genbaseline, etc.')
flags.DEFINE_bool('joint_shape', False, 'whether to use pos_size or pos_shape')
flags.DEFINE_bool('joint_rot', False, 'whether to use pos_size or pos_shape')
# Conditions on which models to use
flags.DEFINE_bool('cond_pos', True, 'whether to condition on position')
flags.DEFINE_bool('cond_rot', True, 'whether to condition on rotation')
flags.DEFINE_bool('cond_shape', True, 'whether to condition on shape')
flags.DEFINE_bool('cond_scale', True, 'whether to condition on scale')
flags.DEFINE_string('exp_size', 'dsprites_2018_cond_size', 'name of experiments')
flags.DEFINE_string('exp_shape', 'dsprites_2018_cond_shape', 'name of experiments')
flags.DEFINE_string('exp_pos', 'dsprites_2018_cond_pos_cert', 'name of experiments')
flags.DEFINE_string('exp_rot', 'dsprites_cond_rot_119_00', 'name of experiments')
flags.DEFINE_integer('resume_size', 169000, 'First iteration to resume')
flags.DEFINE_integer('resume_shape', 477000, 'Second iteration to resume')
flags.DEFINE_integer('resume_pos', 8000, 'Second iteration to resume')
flags.DEFINE_integer('resume_rot', 690000, 'Second iteration to resume')
flags.DEFINE_integer('break_steps', 300, 'steps to break')
# Whether to train for gentest
flags.DEFINE_bool('train', False, 'whether to train on generalization into multiple different predictions')
FLAGS = flags.FLAGS
class DSpritesGen(Dataset):
def __init__(self, data, latents, frac=0.0):
l = latents
if FLAGS.joint_shape:
mask_size = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16/31) & (l[:, 5] == 16/31) & (l[:, 2] == 0.5)
elif FLAGS.joint_rot:
mask_size = (l[:, 1] == 1) & (l[:, 4] == 16/31) & (l[:, 5] == 16/31) & (l[:, 2] == 0.5)
else:
mask_size = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16/31) & (l[:, 5] == 16/31) & (l[:, 1] == 1)
mask_pos = (l[:, 1] == 1) & (l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5)
data_pos = data[mask_pos]
l_pos = l[mask_pos]
data_size = data[mask_size]
l_size = l[mask_size]
n = data_pos.shape[0] // data_size.shape[0]
data_pos = np.tile(data_pos, (n, 1, 1))
l_pos = np.tile(l_pos, (n, 1))
self.data = np.concatenate((data_pos, data_size), axis=0)
self.label = np.concatenate((l_pos, l_size), axis=0)
mask_neg = (~(mask_size & mask_pos)) & ((l[:, 1] == 1) & (l[:, 3] == 30 * np.pi / 39))
data_add = data[mask_neg]
l_add = l[mask_neg]
perm_idx = np.random.permutation(data_add.shape[0])
select_idx = perm_idx[:int(frac*perm_idx.shape[0])]
data_add = data_add[select_idx]
l_add = l_add[select_idx]
self.data = np.concatenate((self.data, data_add), axis=0)
self.label = np.concatenate((self.label, l_add), axis=0)
self.identity = np.eye(3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
im = self.data[index]
im_corrupt = 0.5 + 0.5 * np.random.randn(64, 64)
if FLAGS.joint_shape:
label_size = np.eye(3)[self.label[index, 1].astype(np.int32) - 1]
elif FLAGS.joint_rot:
label_size = np.array([np.cos(self.label[index, 3]), np.sin(self.label[index, 3])])
else:
label_size = self.label[index, 2:3]
label_pos = self.label[index, 4:]
return (im_corrupt, im, label_size, label_pos)
def labeldiscover(sess, kvs, data, latents, save_exp_dir):
LABEL_SIZE = kvs['LABEL_SIZE']
model_size = kvs['model_size']
weight_size = kvs['weight_size']
x_mod = kvs['X_NOISE']
label_output = LABEL_SIZE
for i in range(FLAGS.num_steps):
label_output = label_output + tf.random_normal(tf.shape(label_output), mean=0.0, stddev=0.03)
e_noise = model_size.forward(x_mod, weight_size, label=label_output)
label_grad = tf.gradients(e_noise, [label_output])[0]
# label_grad = tf.Print(label_grad, [label_grad])
label_output = label_output - 1.0 * label_grad
label_output = tf.clip_by_value(label_output, 0.5, 1.0)
diffs = []
for i in range(30):
s = i*FLAGS.batch_size
d = (i+1)*FLAGS.batch_size
data_i = data[s:d]
latent_i = latents[s:d]
latent_init = np.random.uniform(0.5, 1, (FLAGS.batch_size, 1))
feed_dict = {x_mod: data_i, LABEL_SIZE:latent_init}
size_pred = sess.run([label_output], feed_dict)[0]
size_gt = latent_i[:, 2:3]
diffs.append(np.abs(size_pred - size_gt).mean())
print(np.array(diffs).mean())
def genbaseline(sess, kvs, data, latents, save_exp_dir, frac=0.0):
# tf.reset_default_graph()
if FLAGS.joint_shape:
model_baseline = DspritesNetGen(num_filters=FLAGS.num_filters, label_size=5)
LABEL = tf.placeholder(shape=(None, 5), dtype=tf.float32)
else:
model_baseline = DspritesNetGen(num_filters=FLAGS.num_filters, label_size=3)
LABEL = tf.placeholder(shape=(None, 3), dtype=tf.float32)
weights_baseline = model_baseline.construct_weights('context_baseline_{}'.format(frac))
X_feed = tf.placeholder(shape=(None, 2*FLAGS.num_filters), dtype=tf.float32)
X_label = tf.placeholder(shape=(None, 64, 64), dtype=tf.float32)
X_out = model_baseline.forward(X_feed, weights_baseline, label=LABEL)
loss_sq = tf.reduce_mean(tf.square(X_out - X_label))
optimizer = AdamOptimizer(1e-3)
gvs = optimizer.compute_gradients(loss_sq)
gvs = [(k, v) for (k, v) in gvs if k is not None]
train_op = optimizer.apply_gradients(gvs)
dataloader = DataLoader(DSpritesGen(data, latents, frac=frac), batch_size=FLAGS.batch_size, num_workers=6, drop_last=True, shuffle=True)
datafull = data
itr = 0
saver = tf.train.Saver()
vs = optimizer.variables()
sess.run(tf.global_variables_initializer())
if FLAGS.train:
for _ in range(5):
for data_corrupt, data, label_size, label_pos in tqdm(dataloader):
data_corrupt = data_corrupt.numpy()
label_size, label_pos = label_size.numpy(), label_pos.numpy()
data_corrupt = np.random.randn(data_corrupt.shape[0], 2*FLAGS.num_filters)
label_comb = np.concatenate([label_size, label_pos], axis=1)
feed_dict = {X_feed: data_corrupt, X_label: data, LABEL: label_comb}
output = [loss_sq, train_op]
loss, _ = sess.run(output, feed_dict=feed_dict)
itr += 1
saver.save(sess, osp.join(save_exp_dir, 'model_genbaseline'))
saver.restore(sess, osp.join(save_exp_dir, 'model_genbaseline'))
l = latents
if FLAGS.joint_shape:
mask_gen = (l[:, 3] == 30 * np.pi / 39) * (l[:, 2] == 0.5)
else:
mask_gen = (l[:, 3] == 30 * np.pi / 39) * (l[:, 1] == 1) & (~((l[:, 2] == 0.5) | ((l[:, 4] == 16/31) & (l[:, 5] == 16/31))))
data_gen = datafull[mask_gen]
latents_gen = latents[mask_gen]
losses = []
for dat, latent in zip(np.array_split(data_gen, 10), np.array_split(latents_gen, 10)):
data_init = np.random.randn(dat.shape[0], 2*FLAGS.num_filters)
if FLAGS.joint_shape:
latent_size = np.eye(3)[latent[:, 1].astype(np.int32) - 1]
latent_pos = latent[:, 4:6]
latent = np.concatenate([latent_size, latent_pos], axis=1)
feed_dict = {X_feed: data_init, LABEL: latent, X_label: dat}
else:
feed_dict = {X_feed: data_init, LABEL: latent[:, [2,4,5]], X_label: dat}
loss = sess.run([loss_sq], feed_dict=feed_dict)[0]
# print(loss)
losses.append(loss)
print("Overall MSE for generalization of {} for fraction of {}".format(np.mean(losses), frac))
data_try = data_gen[:10]
data_init = np.random.randn(10, 2*FLAGS.num_filters)
if FLAGS.joint_shape:
latent_scale = np.eye(3)[latent[:10, 1].astype(np.int32) - 1]
latent_pos = latents_gen[:10, 4:]
else:
latent_scale = latents_gen[:10, 2:3]
latent_pos = latents_gen[:10, 4:]
latent_tot = np.concatenate([latent_scale, latent_pos], axis=1)
feed_dict = {X_feed: data_init, LABEL: latent_tot}
x_output = sess.run([X_out], feed_dict=feed_dict)[0]
x_output = np.clip(x_output, 0, 1)
im_name = "size_scale_combine_genbaseline.png"
x_output_wrap = np.ones((10, 66, 66))
data_try_wrap = np.ones((10, 66, 66))
x_output_wrap[:, 1:-1, 1:-1] = x_output
data_try_wrap[:, 1:-1, 1:-1] = data_try
im_output = np.concatenate([x_output_wrap, data_try_wrap], axis=2).reshape(-1, 66*2)
impath = osp.join(save_exp_dir, im_name)
imsave(impath, im_output)
print("Successfully saved images at {}".format(impath))
return np.mean(losses)
def gentest(sess, kvs, data, latents, save_exp_dir):
X_NOISE = kvs['X_NOISE']
LABEL_SIZE = kvs['LABEL_SIZE']
LABEL_SHAPE = kvs['LABEL_SHAPE']
LABEL_POS = kvs['LABEL_POS']
LABEL_ROT = kvs['LABEL_ROT']
model_size = kvs['model_size']
model_shape = kvs['model_shape']
model_pos = kvs['model_pos']
model_rot = kvs['model_rot']
weight_size = kvs['weight_size']
weight_shape = kvs['weight_shape']
weight_pos = kvs['weight_pos']
weight_rot = kvs['weight_rot']
X = tf.placeholder(shape=(None, 64, 64), dtype=tf.float32)
datafull = data
# Test combination of generalization where we use slices of both training
x_final = X_NOISE
x_mod_size = X_NOISE
x_mod_pos = X_NOISE
for i in range(FLAGS.num_steps):
# use cond_pos
energies = []
x_mod_pos = x_mod_pos + tf.random_normal(tf.shape(x_mod_pos), mean=0.0, stddev=0.005)
e_noise = model_pos.forward(x_final, weight_pos, label=LABEL_POS)
# energies.append(e_noise)
x_grad = tf.gradients(e_noise, [x_final])[0]
x_mod_pos = x_mod_pos + tf.random_normal(tf.shape(x_mod_pos), mean=0.0, stddev=0.005)
x_mod_pos = x_mod_pos - FLAGS.step_lr * x_grad
x_mod_pos = tf.clip_by_value(x_mod_pos, 0, 1)
if FLAGS.joint_shape:
# use cond_shape
e_noise = model_shape.forward(x_mod_pos, weight_shape, label=LABEL_SHAPE)
elif FLAGS.joint_rot:
e_noise = model_rot.forward(x_mod_pos, weight_rot, label=LABEL_ROT)
else:
# use cond_size
e_noise = model_size.forward(x_mod_pos, weight_size, label=LABEL_SIZE)
# energies.append(e_noise)
# energy_stack = tf.concat(energies, axis=1)
# energy_stack = tf.reduce_logsumexp(-1*energy_stack, axis=1)
# energy_stack = tf.reduce_sum(energy_stack, axis=1)
x_grad = tf.gradients(e_noise, [x_mod_pos])[0]
x_mod_pos = x_mod_pos - FLAGS.step_lr * x_grad
x_mod_pos = tf.clip_by_value(x_mod_pos, 0, 1)
# for x_mod_size
# use cond_size
# e_noise = model_size.forward(x_mod_size, weight_size, label=LABEL_SIZE)
# x_grad = tf.gradients(e_noise, [x_mod_size])[0]
# x_mod_size = x_mod_size + tf.random_normal(tf.shape(x_mod_size), mean=0.0, stddev=0.005)
# x_mod_size = x_mod_size - FLAGS.step_lr * x_grad
# x_mod_size = tf.clip_by_value(x_mod_size, 0, 1)
# # use cond_pos
# e_noise = model_pos.forward(x_mod_size, weight_pos, label=LABEL_POS)
# x_grad = tf.gradients(e_noise, [x_mod_size])[0]
# x_mod_size = x_mod_size + tf.random_normal(tf.shape(x_mod_size), mean=0.0, stddev=0.005)
# x_mod_size = x_mod_size - FLAGS.step_lr * tf.stop_gradient(x_grad)
# x_mod_size = tf.clip_by_value(x_mod_size, 0, 1)
x_mod = x_mod_pos
x_final = x_mod
if FLAGS.joint_shape:
loss_kl = model_shape.forward(x_final, weight_shape, reuse=True, label=LABEL_SHAPE, stop_grad=True) + \
model_pos.forward(x_final, weight_pos, reuse=True, label=LABEL_POS, stop_grad=True)
energy_pos = model_shape.forward(X, weight_shape, reuse=True, label=LABEL_SHAPE) + \
model_pos.forward(X, weight_pos, reuse=True, label=LABEL_POS)
energy_neg = model_shape.forward(tf.stop_gradient(x_mod), weight_shape, reuse=True, label=LABEL_SHAPE) + \
model_pos.forward(tf.stop_gradient(x_mod), weight_pos, reuse=True, label=LABEL_POS)
elif FLAGS.joint_rot:
loss_kl = model_rot.forward(x_final, weight_rot, reuse=True, label=LABEL_ROT, stop_grad=True) + \
model_pos.forward(x_final, weight_pos, reuse=True, label=LABEL_POS, stop_grad=True)
energy_pos = model_rot.forward(X, weight_rot, reuse=True, label=LABEL_ROT) + \
model_pos.forward(X, weight_pos, | |
'Py3status'
mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
if file_ext.lower() == '.py':
py_mod = imp.load_source(mod_name, filepath)
if hasattr(py_mod, expected_class):
inst = py_mod.Py3status()
return (mod_name, inst)
def clear_cache(self):
"""
Reset the cache for all methods of this module.
"""
for meth in self.methods:
self.methods[meth]['cached_until'] = time()
if self.config['debug']:
syslog(LOG_INFO, 'clearing cache for method {}'.format(meth))
def load_methods(self, include_path, f_name):
"""
Read the given user-written py3status class file and store its methods.
Those methods will be executed, so we will deliberately ignore:
- private methods starting with _
- decorated methods such as @property or @staticmethod
- 'on_click' methods as they'll be called upon a click_event
- 'kill' methods as they'll be called upon this thread's exit
"""
module, class_inst = self.load_from_file(include_path + f_name)
if module and class_inst:
self.module_class = class_inst
for method in sorted(dir(class_inst)):
if method.startswith('_'):
continue
else:
m_type = type(getattr(class_inst, method))
if 'method' in str(m_type):
if method == 'on_click':
self.click_events = True
elif method == 'kill':
self.has_kill = True
else:
# the method_obj stores infos about each method
# of this module.
method_obj = {
'cached_until': time(),
'instance': None,
'last_output': {
'name': method,
'full_text': ''
},
'method': method,
'name': None,
'position': 0
}
self.methods[method] = method_obj
# done, syslog some debug info
if self.config['debug']:
syslog(
LOG_INFO,
'module {} click_events={} has_kill={} methods={}'.format(
self.module_name,
self.click_events,
self.has_kill,
self.methods.keys()
)
)
def click_event(self, event):
"""
Execute the 'on_click' method of this module with the given event.
"""
try:
click_method = getattr(self.module_class, 'on_click')
click_method(
self.i3status_thread.json_list,
self.i3status_thread.config,
event
)
except Exception:
err = sys.exc_info()[1]
msg = 'on_click failed with ({}) for event ({})'.format(err, event)
syslog(LOG_WARNING, msg)
def run(self):
"""
On a timely fashion, execute every method found for this module.
We will respect and set a cache timeout for each method if the user
didn't already do so.
We will execute the 'kill' method of the module when we terminate.
"""
while self.lock.is_set():
# execute each method of this module
for meth, obj in self.methods.items():
my_method = self.methods[meth]
# always check the lock
if not self.lock.is_set():
break
# respect the cache set for this method
if time() < obj['cached_until']:
continue
try:
# execute method and get its output
method = getattr(self.module_class, meth)
position, result = method(
self.i3status_thread.json_list,
self.i3status_thread.config
)
# validate the result
assert isinstance(result, dict), "should return a dict"
assert 'full_text' in result, "missing 'full_text' key"
assert 'name' in result, "missing 'name' key"
# validate the position
assert isinstance(position, int), "position is not an int"
# initialize method object
if my_method['name'] is None:
my_method['name'] = result['name']
if 'instance' in result:
my_method['instance'] = result['instance']
else:
my_method['instance'] = result['name']
# update method object cache
if 'cached_until' in result:
cached_until = result['cached_until']
else:
cached_until = time() + self.config['cache_timeout']
my_method['cached_until'] = cached_until
# update method object output
my_method['last_output'] = result
# update method object position
my_method['position'] = position
# debug info
if self.config['debug']:
syslog(
LOG_INFO,
'method {} returned {} '.format(meth, result) +
'for position {}'.format(position)
)
except Exception:
err = sys.exc_info()[1]
syslog(
LOG_WARNING,
'user method {} failed ({})'.format(meth, err)
)
# don't be hasty mate, let's take it easy for now
sleep(self.config['interval'])
# check and execute the 'kill' method if present
if self.has_kill:
try:
kill_method = getattr(self.module_class, 'kill')
kill_method(
self.i3status_thread.json_list,
self.i3status_thread.config
)
except Exception:
# this would be stupid to die on exit
pass
class Py3statusWrapper():
"""
This is the py3status wrapper.
"""
def __init__(self):
"""
Useful variables we'll need.
"""
self.modules = []
self.lock = Event()
def get_config(self):
"""
Create the py3status based on command line options we received.
"""
# get home path
home_path = os.path.expanduser('~')
# defaults
config = {
'cache_timeout': 60,
'i3status_config_path': '/etc/i3status.conf',
'include_paths': ['{}/.i3/py3status/'.format(home_path)],
'interval': 1
}
# package version
try:
import pkg_resources
version = pkg_resources.get_distribution('py3status').version
except:
version = 'unknown'
config['version'] = version
# command line options
parser = argparse.ArgumentParser(
description='The agile, python-powered, i3status wrapper')
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-c', action="store",
dest="i3status_conf",
type=str,
default=config['i3status_config_path'],
help="path to i3status config file")
parser.add_argument('--debug', action="store_true",
help="be verbose in syslog")
parser.add_argument('-i', action="append",
dest="include_paths",
help="""include user-written modules from those
directories (default ~/.i3/py3status)""")
parser.add_argument('-n', action="store",
dest="interval",
type=float,
default=config['interval'],
help="update interval in seconds (default 1 sec)")
parser.add_argument('-s', '--standalone', action="store_true",
help="standalone mode, do not use i3status")
parser.add_argument('-t', action="store",
dest="cache_timeout",
type=int,
default=config['cache_timeout'],
help="""default injection cache timeout in seconds
(default 60 sec)""")
parser.add_argument('-v', '--version', action="store_true",
help="""show py3status version and exit""")
options = parser.parse_args()
# only asked for version
if options.version:
from platform import python_version
print(
'py3status version {} (python {})'.format(
config['version'],
python_version()
)
)
sys.exit(0)
# override configuration and helper variables
config['cache_timeout'] = options.cache_timeout
config['debug'] = options.debug
config['i3status_config_path'] = options.i3status_conf
if options.include_paths:
config['include_paths'] = options.include_paths
config['interval'] = options.interval
config['standalone'] = options.standalone
# all done
return config
def list_modules(self):
"""
Search import directories and files through given include paths.
This method is a generator and loves to yield.
"""
for include_path in sorted(self.config['include_paths']):
include_path = os.path.abspath(include_path) + '/'
if os.path.isdir(include_path):
for f_name in sorted(os.listdir(include_path)):
if f_name.endswith('.py'):
yield (include_path, f_name)
def setup(self):
"""
Setup py3status and spawn i3status/events/modules threads.
"""
# set the Event lock
self.lock.set()
# setup configuration
self.config = self.get_config()
if self.config['debug']:
syslog(
LOG_INFO,
'py3status started with config {}'.format(
self.config
)
)
# setup i3status thread
self.i3status_thread = I3status(
self.lock,
self.config['i3status_config_path'],
self.config['standalone']
)
if self.config['standalone']:
self.i3status_thread.mock()
else:
self.i3status_thread.start()
while not self.i3status_thread.last_output:
if not self.i3status_thread.is_alive():
err = self.i3status_thread.error
raise IOError(err)
sleep(0.1)
if self.config['debug']:
syslog(
LOG_INFO,
'i3status thread {} with config {}'.format(
'started' if not self.config['standalone'] else 'mocked',
self.i3status_thread.config
)
)
# setup input events thread
self.events_thread = Events(self.lock, self.config, self.modules)
self.events_thread.start()
if self.config['debug']:
syslog(LOG_INFO, 'events thread started')
# suppress modules' ouput wrt issue #20
if not self.config['debug']:
sys.stdout = open('/dev/null', 'w')
sys.stderr = open('/dev/null', 'w')
# load and spawn modules threads
for include_path, f_name in self.list_modules():
try:
my_m = Module(
self.lock,
self.config,
include_path,
f_name,
self.i3status_thread
)
# only start and handle modules with available methods
if my_m.methods:
my_m.start()
self.modules.append(my_m)
elif self.config['debug']:
syslog(
LOG_INFO,
'ignoring {} (no methods found)'.format(f_name)
)
except Exception:
err = sys.exc_info()[1]
msg = 'loading {} failed ({})'.format(f_name, err)
self.i3_nagbar(msg, level='warning')
def i3_nagbar(self, msg, level='error'):
"""
Make use of i3-nagbar to display errors and warnings to the user.
We also make sure to log anything to keep trace of it.
"""
msg = 'py3status: {}. '.format(msg)
msg += 'please try to fix this and reload i3wm (Mod+Shift+R)'
try:
log_level = LOG_ERR if level == 'error' else LOG_WARNING
syslog(log_level, msg)
call(
['i3-nagbar', '-m', msg, '-t', level],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w')
)
except:
pass
def stop(self):
"""
Clear the Event lock, this will break all threads' loops.
"""
try:
self.lock.clear()
if self.config['debug']:
syslog(LOG_INFO, 'lock cleared, exiting')
except:
pass
def sig_handler(self, signum, frame):
"""
Raise a Warning level exception when a user sends a SIGUSR1 signal.
"""
raise UserWarning("received USR1, forcing refresh")
def clear_modules_cache(self):
"""
For every module, reset the 'cached_until' of all its methods.
"""
for module in self.modules:
module.clear_cache()
def get_modules_output(self, json_list):
"""
Iterate over user modules and their output. Return the list ordered
as the user asked.
If two modules specify the same output index/position, the sorting will
be alphabetical.
"""
# prepopulate the list so that every usable index exists, thx @Lujeni
m_list = [
'' for value in range(
sum([len(x.methods) for x in self.modules]) + len(json_list)
)
]
# debug the ordering matrix
if self.config['debug']:
syslog(
LOG_INFO,
'ordering matrix {}'.format(list(range(len(m_list))))
)
# run through modules/methods output and insert them in reverse order
debug_msg = ''
for m in reversed(self.modules):
for meth in m.methods:
position = m.methods[meth]['position']
last_output = m.methods[meth]['last_output']
try:
assert position in range(len(m_list))
if m_list[position] == '':
m_list[position] = last_output
else:
if '' in m_list:
m_list.remove('')
m_list.insert(position, last_output)
except (AssertionError, IndexError):
# out of range indexes get placed at the end of the output
m_list.append(last_output)
finally:
# debug user module's index
if self.config['debug']:
debug_msg += '{}={} '.format(
meth,
m_list.index(last_output)
)
# debug the user modules ordering
if self.config['debug']:
syslog(
LOG_INFO,
'ordering user modules positions {}'.format(debug_msg.strip())
)
# append i3status json list to the modules' list in empty | |
import asyncio
import json
import logging
import sys
import time
from contextlib import suppress
from functools import partial
import pytest
from aiorpcx import *
from aiorpcx.session import Concurrency, SessionBase
from util import RaiseTest
if sys.version_info >= (3, 7):
from asyncio import all_tasks
else:
from asyncio import Task
all_tasks = Task.all_tasks
def raises_method_not_found(message):
return RaiseTest(JSONRPC.METHOD_NOT_FOUND, message, RPCError)
class MyServerSession(RPCSession):
sessions = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.notifications = []
self.sessions.append(self)
assert self.session_kind == SessionKind.SERVER
@classmethod
async def current_server(self):
await sleep(0.05)
return self.sessions[0]
async def connection_lost(self):
await super().connection_lost()
self.sessions.remove(self)
async def handle_request(self, request):
handler = getattr(self, f'on_{request.method}', None)
invocation = handler_invocation(handler, request)
return await invocation()
async def on_send_bad_response(self, response):
message = json.dumps(response).encode()
await self._send_message(message)
async def on_echo(self, value):
return value
async def on_notify(self, thing):
self.notifications.append(thing)
async def on_bug(self):
raise ValueError
async def on_costly_error(self, cost):
raise RPCError(1, "that cost a bunch!", cost=cost)
async def on_disconnect(self, result=RPCError):
if result is RPCError:
raise ReplyAndDisconnect(RPCError(1, 'incompatible version'))
raise ReplyAndDisconnect(result)
async def on_sleepy(self):
await sleep(10)
def in_caplog(caplog, message):
return any(message in record.message for record in caplog.records)
def caplog_count(caplog, message):
return sum(message in record.message for record in caplog.records)
@pytest.fixture
def server_port(unused_tcp_port, event_loop):
coro = serve_rs(MyServerSession, 'localhost', unused_tcp_port, loop=event_loop)
server = event_loop.run_until_complete(coro)
yield unused_tcp_port
if hasattr(asyncio, 'all_tasks'):
tasks = asyncio.all_tasks(event_loop)
else:
tasks = asyncio.Task.all_tasks(loop=event_loop)
async def close_all():
server.close()
await server.wait_closed()
if tasks:
await asyncio.wait(tasks)
event_loop.run_until_complete(close_all())
class TestRPCSession:
@pytest.mark.asyncio
async def test_no_proxy(self, server_port):
proxy = SOCKSProxy('localhost:79', SOCKS5, None)
with pytest.raises(OSError):
async with connect_rs('localhost', server_port, proxy=proxy) as session:
pass
@pytest.mark.asyncio
async def test_handlers(self, server_port):
async with timeout_after(0.1):
async with connect_rs('localhost', server_port) as session:
assert session.session_kind == SessionKind.CLIENT
assert session.proxy() is None
with raises_method_not_found('something'):
await session.send_request('something')
await session.send_notification('something')
assert session.is_closing()
@pytest.mark.asyncio
async def test_send_request(self, server_port):
async with connect_rs('localhost', server_port) as session:
assert await session.send_request('echo', [23]) == 23
assert session.transport._closed_event.is_set()
assert session.transport._process_messages_task.done()
@pytest.mark.asyncio
async def test_send_request_buggy_handler(self, server_port):
async with connect_rs('localhost', server_port) as session:
with RaiseTest(JSONRPC.INTERNAL_ERROR, 'internal server error', RPCError):
await session.send_request('bug')
@pytest.mark.asyncio
async def test_unexpected_response(self, server_port, caplog):
async with connect_rs('localhost', server_port) as session:
# A request not a notification so we don't exit immediately
response = {"jsonrpc": "2.0", "result": 2, "id": -1}
with caplog.at_level(logging.DEBUG):
await session.send_request('send_bad_response', (response, ))
assert in_caplog(caplog, 'unsent request')
@pytest.mark.asyncio
async def test_unanswered_request_count(self, server_port):
async with connect_rs('localhost', server_port) as session:
server_session = await MyServerSession.current_server()
assert session.unanswered_request_count() == 0
assert server_session.unanswered_request_count() == 0
async with ignore_after(0.01):
await session.send_request('sleepy')
assert session.unanswered_request_count() == 0
assert server_session.unanswered_request_count() == 1
@pytest.mark.asyncio
async def test_send_request_bad_args(self, server_port):
async with connect_rs('localhost', server_port) as session:
# ProtocolError as it's a protocol violation
with RaiseTest(JSONRPC.INVALID_ARGS, 'list', ProtocolError):
await session.send_request('echo', "23")
@pytest.mark.asyncio
async def test_send_request_timeout0(self, server_port):
async with connect_rs('localhost', server_port) as session:
with pytest.raises(TaskTimeout):
async with timeout_after(0):
await session.send_request('echo', [23])
@pytest.mark.asyncio
async def test_send_request_timeout(self, server_port):
async with connect_rs('localhost', server_port) as session:
server_session = await MyServerSession.current_server()
with pytest.raises(TaskTimeout):
async with timeout_after(0.01):
await session.send_request('sleepy')
# Assert the server doesn't treat cancellation as an error
assert server_session.errors == 0
@pytest.mark.asyncio
async def test_error_base_cost(self, server_port):
async with connect_rs('localhost', server_port) as session:
server_session = await MyServerSession.current_server()
server_session.error_base_cost = server_session.cost_hard_limit * 1.1
await session._send_message(b'')
await sleep(0.05)
assert server_session.errors == 1
assert server_session.cost > server_session.cost_hard_limit
# Check next request raises and cuts us off
with pytest.raises(RPCError):
await session.send_request('echo', [23])
await sleep(0.02)
assert session.is_closing()
@pytest.mark.asyncio
async def test_RPCError_cost(self, server_port):
async with connect_rs('localhost', server_port) as session:
server_session = await MyServerSession.current_server()
err = RPCError(0, 'message')
assert err.cost == 0
with pytest.raises(RPCError):
await session.send_request('costly_error', [1000])
# It can trigger a cost recalc which refunds a tad
epsilon = 1
assert server_session.cost > server_session.error_base_cost + 1000 - epsilon
@pytest.mark.asyncio
async def test_send_notification(self, server_port):
async with connect_rs('localhost', server_port) as session:
server = await MyServerSession.current_server()
await session.send_notification('notify', ['test'])
await sleep(0.001)
assert server.notifications == ['test']
@pytest.mark.asyncio
async def test_force_close(self, server_port):
async with connect_rs('localhost', server_port) as session:
assert not session.transport._closed_event.is_set()
await session.close(force_after=0.001)
assert session.transport._closed_event.is_set()
@pytest.mark.asyncio
async def test_force_close_abort_codepath(self, server_port):
async with connect_rs('localhost', server_port) as session:
protocol = session.transport
assert not protocol._closed_event.is_set()
await session.close(force_after=0)
assert protocol._closed_event.is_set()
@pytest.mark.asyncio
async def test_verbose_logging(self, server_port, caplog):
async with connect_rs('localhost', server_port) as session:
session.verbosity = 4
with caplog.at_level(logging.DEBUG):
await session.send_request('echo', ['wait'])
assert in_caplog(caplog, "sending message b'{")
assert in_caplog(caplog, "received data b'{")
@pytest.mark.asyncio
async def test_framer_MemoryError(self, server_port, caplog):
async with connect_rs('localhost', server_port, framer=NewlineFramer(5)) as session:
msg = 'w' * 50
raw_msg = msg.encode()
# Even though long it will be sent in one bit
request = session.send_request('echo', [msg])
assert await request == msg
assert not caplog.records
session.transport.data_received(raw_msg) # Unframed; no \n
await sleep(0)
assert len(caplog.records) == 1
assert in_caplog(caplog, 'dropping message over 5 bytes')
# @pytest.mark.asyncio
# async def test_resource_release(self, server_port):
# loop = asyncio.get_event_loop()
# tasks = all_tasks(loop)
# try:
# session = connect_rs('localhost', 0)
# await session.create_connection()
# except OSError:
# pass
# assert all_tasks(loop) == tasks
# async with connect_rs('localhost', server_port):
# pass
# await asyncio.sleep(0.01) # Let things be processed
# assert all_tasks(loop) == tasks
@pytest.mark.asyncio
async def test_pausing(self, server_port):
called = []
limit = None
def my_write(data):
called.append(data)
if len(called) == limit:
session.transport.pause_writing()
async with connect_rs('localhost', server_port) as session:
protocol = session.transport
assert protocol._can_send.is_set()
try:
protocol.transport.write = my_write
except AttributeError: # uvloop: transport.write is read-only
return
await session._send_message(b'a')
assert protocol._can_send.is_set()
assert called
called.clear()
async def monitor():
await sleep(0.002)
assert called == [b'A\n', b'very\n']
assert not protocol_can_send.is_set()
protocol.resume_writing()
assert protocol._can_send.is_set()
limit = 2
msgs = b'A very long and boring meessage'.split()
task = await spawn(monitor)
for msg in msgs:
await session._send_message(msg)
assert called == [session.transport._framer.frame(msg) for msg in msgs]
limit = None
# Check idempotent
protocol.resume_writing()
@pytest.mark.asyncio
async def test_slow_connection_aborted(self, server_port):
async with connect_rs('localhost', server_port) as session:
protocol = session.transport
assert session.max_send_delay >= 10
session.max_send_delay = 0.004
protocol.pause_writing()
assert not protocol._can_send.is_set()
task = await spawn(session._send_message(b'a'))
await sleep(0.1)
assert task.cancelled()
assert protocol._can_send.is_set()
assert session.is_closing()
@pytest.mark.asyncio
async def test_concurrency(self, server_port):
async with connect_rs('localhost', server_port) as session:
# By default clients don't have a hard limit
assert session.cost_hard_limit == 0
session.cost_hard_limit = session.cost_soft_limit * 2
# Prevent this interfering
session.cost_decay_per_sec = 0
# Test usage below soft limit
session.cost = session.cost_soft_limit - 10
session.recalc_concurrency()
assert session._incoming_concurrency.max_concurrent == session.initial_concurrent
assert session._cost_fraction == 0.0
# Test usage at soft limit doesn't affect concurrency
session.cost = session.cost_soft_limit
session.recalc_concurrency()
assert session._incoming_concurrency.max_concurrent == session.initial_concurrent
assert session._cost_fraction == 0.0
# Test usage half-way
session.cost = (session.cost_soft_limit + session.cost_hard_limit) // 2
session.recalc_concurrency()
assert 1 < session._incoming_concurrency.max_concurrent < session.initial_concurrent
assert 0.49 < session._cost_fraction < 0.51
# Test at hard limit
session.cost = session.cost_hard_limit
session.recalc_concurrency()
assert session._cost_fraction == 1.0
# Test above hard limit disconnects
session.cost = session.cost_hard_limit + 1
session.recalc_concurrency()
with pytest.raises(ExcessiveSessionCostError):
async with session._incoming_concurrency:
pass
@pytest.mark.asyncio
async def test_concurrency_no_limit_for_outgoing(self, server_port):
async with connect_rs('localhost', server_port) as session:
# Prevent this interfering
session.cost_decay_per_sec = 0
# Test usage half-way
session.cost = (RPCSession.cost_soft_limit + RPCSession.cost_hard_limit) // 2
session.recalc_concurrency()
assert session._incoming_concurrency.max_concurrent == session.initial_concurrent
assert session._cost_fraction == 0
# Test above hard limit does not disconnect
session.cost = RPCSession.cost_hard_limit + 1
session.recalc_concurrency()
async with session._incoming_concurrency:
pass
@pytest.mark.asyncio
async def test_concurrency_decay(self, server_port):
async with connect_rs('localhost', server_port) as session:
session.cost_decay_per_sec = 100
session.cost = 1000
await sleep(0.1)
session.recalc_concurrency()
assert 970 < session.cost < 992
@pytest.mark.asyncio
async def test_concurrency_hard_limit_0(self, server_port):
async with connect_rs('localhost', server_port) as session:
session.cost = 1_000_000_000
session.cost_hard_limit = 0
session.recalc_concurrency()
assert session._incoming_concurrency.max_concurrent == session.initial_concurrent
@pytest.mark.asyncio
async def test_extra_cost(self, server_port):
async with connect_rs('localhost', server_port) as session:
# By default clients don't have a hard limit
assert session.cost_hard_limit == 0
session.cost_hard_limit = session.cost_soft_limit * 2
session.extra_cost = lambda: session.cost_soft_limit + 1
session.recalc_concurrency()
assert 1 > session._cost_fraction > 0
session.extra_cost = lambda: session.cost_hard_limit + 1
session.recalc_concurrency()
assert session._cost_fraction > 1
@pytest.mark.asyncio
async def test_request_over_hard_limit(self, server_port):
async with connect_rs('localhost', server_port) as session:
server = await MyServerSession.current_server()
server.bump_cost(server.cost_hard_limit + 100)
async with timeout_after(0.1):
with pytest.raises(RPCError) as e:
await session.send_request('echo', [23])
assert 'excessive resource usage' in str(e.value)
@pytest.mark.asyncio
async def test_request_sleep(self, server_port):
async with connect_rs('localhost', server_port) as session:
server = await MyServerSession.current_server()
server.bump_cost((server.cost_soft_limit + server.cost_hard_limit) / 2)
server.cost_sleep = 0.1
t1 = time.time()
await session.send_request('echo', [23])
t2 = time.time()
assert t2 - t1 > (server.cost_sleep / 2) * 0.9 # Fudge factor for Linux
@pytest.mark.asyncio
async def test_server_busy(self, server_port):
async with connect_rs('localhost', server_port) as session:
server = await | |
quadrant
q2 - first quadrant
q3 - first quadrant
q4 - first quadrant
data - focus on data (if angular spread is less then 10 deg
- groups (KEY) - key of keylist which defines color of points
(e.g. ('str2') in absolutes to select
different colors for different instruments
- legend (bool) - draws legend only if groups is given - default True
- legendposition (string) - draws the legend at chosen position (e.g. "upper right", "lower center") - default is "lower left"
- labellimit (integer)- maximum length of label in legend
- noshow: (bool) don't call show at the end, just returns figure handle
- outfile: (string) to save the figure, if path is not existing it will be created
- gridcolor: (string) Define grid color e.g. '0.5' greyscale, 'r' red, etc
- savedpi: (integer) resolution
- figure: (bool) True for GUI
REQUIRES:
- package operator for color selection
RETURNS:
- plot
ToDo:
- add alpha 95 calc
EXAMPLE:
>>> stream.stereoplot(focus='data',groups='str2')
"""
focus = kwargs.get('focus')
groups = kwargs.get('groups')
bgcolor = kwargs.get('bgcolor')
colorlist = kwargs.get('colorlist')
outfile = kwargs.get('outfile')
savedpi = kwargs.get('savedpi')
gridinccolor = kwargs.get('gridinccolor')
griddeccolor = kwargs.get('griddeccolor')
noshow = kwargs.get('noshow')
legend = kwargs.get('legend')
legendposition = kwargs.get('legendposition')
labellimit = kwargs.get('labellimit')
figure = kwargs.get('figure')
if not colorlist:
colorlist = ['b','r','g','c','m','y','k']
if not bgcolor:
bgcolor = '#d5de9c'
if not griddeccolor:
griddeccolor = '#316931'
if not gridinccolor:
gridinccolor = '#316931'
if not savedpi:
savedpi = 80
if not focus:
focus = 'all'
if not legend:
legend = 'True'
if not labellimit:
labellimit = 11
if not legendposition:
legendposition = "lower left"
if not self[0].typ == 'idff':
logger.error('Stereoplot: you need to provide idf data')
return
inc = self._get_column('x')
dec = self._get_column('y')
col = ['']
if groups:
sel = self._get_column(groups)
col = list(set(list(sel)))
if len(col) > 7:
col = col[:7]
if not len(dec) == len(inc):
logger.error('Stereoplot: check you data file - unequal inc and dec data?')
return
if not figure:
fig = plt.figure()
else:
fig = figure
ax = plt.gca()
ax.cla() # clear things for fresh plot
ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# Define koordinates:
basic1=plt.Circle((0,0),90,color=bgcolor,fill=True)
basic1a=plt.Circle((0,0),90,color=gridinccolor,fill=False)
basic2=plt.Circle((0,0),30,color=gridinccolor,fill=False,linestyle='dotted')
basic3=plt.Circle((0,0),60,color=gridinccolor,fill=False,linestyle='dotted')
basic4=plt.Line2D([0,0],[-90,90],color=griddeccolor,linestyle='dashed')
basic5=plt.Line2D([-90,90],[0,0],color=griddeccolor,linestyle='dashed')
fig.gca().add_artist(basic1)
fig.gca().add_artist(basic1a)
fig.gca().add_artist(basic2)
fig.gca().add_artist(basic3)
fig.gca().add_artist(basic4)
fig.gca().add_artist(basic5)
for j in range(len(col)):
color = colorlist[j]
xpos,ypos,xneg,yneg,xabs,y = [],[],[],[],[],[]
for i,el in enumerate(inc):
if groups:
if sel[i] == col[j]:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
else:
coinc = 90-np.abs(el)
sindec = np.sin(np.pi/180*dec[i])
cosdec = np.cos(np.pi/180*dec[i])
xabs.append(coinc*sindec)
y.append(coinc*cosdec)
if el < 0:
xneg.append(coinc*sindec)
yneg.append(coinc*cosdec)
else:
xpos.append(coinc*sindec)
ypos.append(coinc*cosdec)
xmax = np.ceil(max(xabs))
xmin = np.floor(min(xabs))
xdif = xmax-xmin
ymax = np.ceil(max(y))
ymin = np.floor(min(y))
ydif = ymax-ymin
maxdif = max([xdif,ydif])
mindec = np.floor(min(dec))
maxdec = np.ceil(max(dec))
mininc = np.floor(min(np.abs(inc)))
maxinc = np.ceil(max(np.abs(inc)))
if focus == 'data' and maxdif <= 10:
# decs
startdec = mindec
decline,inclst = [],[]
startinc = mininc
incline = []
while startdec <= maxdec:
xl = 90*np.sin(np.pi/180*startdec)
yl = 90*np.cos(np.pi/180*startdec)
decline.append([xl,yl,startdec])
startdec = startdec+1
while startinc <= maxinc:
inclst.append(90-np.abs(startinc))
startinc = startinc+1
if focus == 'all':
ax.set_xlim((-90,90))
ax.set_ylim((-90,90))
if focus == 'q1':
ax.set_xlim((0,90))
ax.set_ylim((0,90))
if focus == 'q2':
ax.set_xlim((-90,0))
ax.set_ylim((0,90))
if focus == 'q3':
ax.set_xlim((-90,0))
ax.set_ylim((-90,0))
if focus == 'q4':
ax.set_xlim((0,90))
ax.set_ylim((-90,0))
if focus == 'data':
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
#ax.annotate('Test', xy=(1.2, 25.2))
ax.plot(xpos,ypos,'o',color=color, label=col[j][:labellimit])
ax.plot(xneg,yneg,'o',color='white')
ax.annotate('60', xy=(0, 30))
ax.annotate('30', xy=(0, 60))
ax.annotate('0', xy=(0, 90))
ax.annotate('90', xy=(90, 0))
ax.annotate('180', xy=(0, -90))
ax.annotate('270', xy=(-90, 0))
if focus == 'data' and maxdif <= 10:
for elem in decline:
pline = plt.Line2D([0,elem[0]],[0,elem[1]],color=griddeccolor,linestyle='dotted')
xa = elem[0]/elem[1]*((ymax - ymin)/2+ymin)
ya = (ymax - ymin)/2 + ymin
annotext = "D:%i" % int(elem[2])
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pline)
for elem in inclst:
pcirc = plt.Circle((0,0),elem,color=gridinccolor,fill=False,linestyle='dotted')
xa = (xmax-xmin)/2 + xmin
ya = sqrt((elem*elem)-(xa*xa))
annotext = "I:%i" % int(90-elem)
ax.annotate(annotext, xy=(xa,ya))
fig.gca().add_artist(pcirc)
if groups and legend:
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels),key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=legendposition)
# 5. SAVE TO FILE (or show)
if figure:
return ax
if outfile:
path = os.path.split(outfile)[0]
if not path == '':
if not os.path.exists(path):
os.makedirs(path)
if fmt:
fig.savefig(outfile, format=fmt, dpi=savedpi)
else:
fig.savefig(outfile, dpi=savedpi)
elif noshow:
return fig
else:
plt.show()
def trim(self, starttime=None, endtime=None, newway=False):
"""
DEFINITION:
Removing dates outside of range between start- and endtime.
Returned stream has range starttime <= range < endtime.
PARAMETERS:
Variables:
- starttime: (datetime/str) Start of period to trim with
- endtime: (datetime/str) End of period to trim to
Kwargs:
- newway: (bool) Testing method for non-destructive trimming
RETURNS:
- stream: (DataStream object) Trimmed stream
EXAMPLE:
>>> data = data.trim(starttime, endtime)
APPLICATION:
"""
if starttime and endtime:
if self._testtime(starttime) > self._testtime(endtime):
logger.error('Trim: Starttime (%s) is larger than endtime (%s).' % (starttime,endtime))
raise ValueError("Starttime is larger than endtime.")
logger.info('Trim: Started from %s to %s' % (starttime,endtime))
ndtype = False
if self.ndarray[0].size > 0:
ndtype = True
self.container = [LineStruct()]
#-ndarrray---------------------------------------
if not newway:
newarray = list(self.ndarray) # Converting array to list - better for append and other item function (because its not type sensitive)
else:
newstream = self.copy()
newarray = list(newstream.ndarray)
if starttime:
starttime = self._testtime(starttime)
if newarray[0].size > 0: # time column present
idx = (np.abs(newarray[0].astype(float)-date2num(starttime))).argmin()
# Trim should start at point >= starttime, so check:
if newarray[0][idx] < date2num(starttime):
idx += 1
for i in range(len(newarray)):
if len(newarray[i]) >= idx:
newarray[i] = newarray[i][idx:]
if endtime:
endtime = self._testtime(endtime)
if newarray[0].size > 0: # time column present
idx = 1 + (np.abs(newarray[0].astype(float)-date2num(endtime))).argmin() # get the nearest index to endtime and add 1 (to get lenghts correctly)
#idx = 1+ (np.abs(self.ndarray[0]-date2num(endtime))).argmin() # get the nearest index to endtime
if idx >= len(newarray[0]): ## prevent too large idx values
idx = len(newarray[0]) - 1
while True:
if not float(newarray[0][idx]) < date2num(endtime) and idx != 0: # Make sure that last value is smaller than endtime
idx -= 1
else:
break
#self.ndarray = list(self.ndarray)
for i in range(len(newarray)):
length = len(newarray[i])
if length >= idx:
newarray[i] = newarray[i][:idx+1]
newarray = np.asarray(newarray,dtype=object)
#-ndarrray---------------------------------------
#--------------------------------------------------
if newway and not ndtype:
# Non-destructive trimming of stream
trimmedstream = DataStream()
trimmedstream.header = self.header
starttime = self._testtime(starttime)
endtime = self._testtime(endtime)
stval = 0
for idx, elem in enumerate(self):
newline = LineStruct()
if not isnan(elem.time):
if elem.time >= date2num(starttime) and elem.time < date2num(endtime):
newline.time = elem.time
for key in KEYLIST:
exec('newline.'+key+' = elem.'+key)
trimmedstream.add(newline)
return trimmedstream
#--------------------------------------------------
if not ndtype:
stream = DataStream()
if starttime:
# check starttime input
starttime = self._testtime(starttime)
stval = 0
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > starttime.replace(tzinfo=None):
#stval = idx-1 # changed because of latex output
stval = idx
break
if stval < 0:
stval = 0
self.container = self.container[stval:]
# remove data prior to endtime input
if endtime:
# check endtime input
endtime = self._testtime(endtime)
edval = len(self)
for idx, elem in enumerate(self):
if not isnan(elem.time):
if num2date(elem.time).replace(tzinfo=None) > endtime.replace(tzinfo=None):
edval = idx
#edval = idx-1
break
self.container = self.container[:edval]
if ndtype:
return DataStream(self.container,self.header,newarray)
else:
return DataStream(self.container,self.header,self.ndarray)
def use_sectime(self, swap=False):
"""
DEFINITION:
Drop primary time stamp and replace by secondary time stamp if available.
If swap is True, then primary time stamp is moved to secondary column (and
not dropped).
"""
if not 'sectime' in self._get_key_headers():
logger.warning("use_sectime: did not find secondary time column in the streams keylist - returning unmodified timeseries")
return self
# Non destructive
stream = self.copy()
pos = KEYLIST.index('sectime')
tcol = stream.ndarray[0]
stream = stream._move_column('sectime','time')
if swap:
stream = stream._put_column(tcol,'sectime')
else:
stream = stream._drop_column('sectime')
return stream
def variometercorrection(self, variopath, thedate, **kwargs):
"""
DEFINITION:
##### THS METHOD IS USELESS....
##### Either select a certain time in absolute calculation (TODO)
##### or calculate daily means of basevalues which ar already corrected for
##### variotion --- leon 2016-03
Function to perform a variometercorrection of an absresult stream
towards the given datetime using the given variometer stream.
Returns a new absresult object with new datetime and corrected values
APPLICATION:
Useful to compare various absolute measurement e.g. form one | |
# coding: utf-8
# In[ ]:
# All CIFAR experiments in the Confident Learning paper.
### Benchmark test@1 accuracies for non-cl methods in the were implemented at Google using the internal codebase and are not un
# In[1]:
import numpy as np
import cleanlab
from cleanlab import baseline_methods
from cleanlab.latent_estimation import compute_confident_joint
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
import pandas as pd
import os
import sys
import json
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib
font = {'family': 'sans-serif',
'sans-serif':['Helvetica'],
'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
# In[2]:
noisy_base_dir = '/home/cgn/cgn/cleanlab/examples/cifar/cifar10/'
base_dir = noisy_base_dir # '/home/cgn/OLD_BAD_cifar10/'
# In[3]:
rfn = '/datasets/datasets/cifar10/cifar10/train_filename2label.json'
with open(rfn, 'r') as rf:
d = json.load(rf)
y = np.asarray([v for k,v in d.items()])
# In[14]:
# This code assumes that the `cifar10/cifar
folders = [c for c in os.listdir(base_dir) if '__' in c]
results = []
for folder in sorted([f for f in folders if f != '__pycache__']):
print(folder)
psx_file = [z for z in os.listdir(base_dir + folder) if 'pyx' in z][0]
psx = np.load(base_dir + folder + "/" + psx_file)
#Make sure psx is the right shape
psx = psx[:,:10]
# Load noisy labels
frac_zero_noise_rates = folder.split('_')[-7]
noise_amount = folder.split('_')[-1]
if noise_amount == '8':
continue
rfn = 'cifar10_noisy_labels__frac_zero_noise_rates__0.{}__noise_amount__0.{}.json'.format(
frac_zero_noise_rates, noise_amount)
with open(noisy_base_dir + "cifar10_noisy_labels/" + rfn, 'r') as rf:
d = json.load(rf)
s = np.asarray([v for k,v in d.items()])
true_label_errors = s != y
acc = np.sum(s == y) / len(y)
print('accuracy of labels:', acc)
# Benchmarks
label_error_mask = np.zeros(len(s), dtype=bool)
label_error_indices = compute_confident_joint(
s, psx, return_indices_of_off_diagonals=True
)[1]
for idx in label_error_indices:
label_error_mask[idx] = True
conf_joint_only = label_error_mask
# # Confident learning optimized
# best_f1 = -1
# cl_opt = None
# for prune_method in ['prune_by_class', 'prune_by_noise_rate', 'both']:
# label_errs = cleanlab.pruning.get_noise_indices(
# s,
# psx,
# prune_method=prune_method,
# )
# f1 = precision_recall_fscore_support(
# y_true=true_label_errors,
# y_pred=label_errs,
# )[2][0]
# if f1 > best_f1:
# print(prune_method)
# best_f1 = f1
# cl_opt = label_errs
results.append({
'noise_amount_acc': acc,
'noise_amount': noise_amount,
'frac_zero_noise_rates': frac_zero_noise_rates,
'argmax' : confusion_matrix(
y_true=true_label_errors,
y_pred=baseline_methods.baseline_argmax(psx, s),
),
'argmax_cm': confusion_matrix(
y_true=true_label_errors,
y_pred=baseline_methods.baseline_argmax_confusion_matrix(psx, s),
),
'argmax_ccm': confusion_matrix(
y_true=true_label_errors,
y_pred=baseline_methods.baseline_argmax_calibrated_confusion_matrix(
psx, s),
),
'conf_joint_only': confusion_matrix(
y_true=true_label_errors,
y_pred=conf_joint_only,
),
'cl_pbnr': confusion_matrix(
y_true=true_label_errors,
y_pred=cleanlab.pruning.get_noise_indices(s, psx),
),
'cl_pbc': confusion_matrix(
y_true=true_label_errors,
y_pred=cleanlab.pruning.get_noise_indices(
s, psx, prune_method='prune_by_class'),
),
'cl_both': confusion_matrix(
y_true=true_label_errors,
y_pred=cleanlab.pruning.get_noise_indices(
s, psx, prune_method='both'),
),
# 'cl_opt': confusion_matrix(
# y_true=true_label_errors,
# y_pred=label_errs,
# ),
})
print()
# In[15]:
methods = ['argmax', 'argmax_ccm', 'argmax_cm', 'cl_pbnr', 'cl_pbc', 'cl_both', 'conf_joint_only']
precision_func = lambda x: x[1][1] * 1.0 / (x[1][1] + x[0][1])
recall_func = lambda x: x[1][1] * 1.0 / (x[1][1] + x[1][0])
acc_func = lambda x: (x[1][1] + x[0][0]) * 1.0 / (x[1][1] + x[1][0] + x[0][1] + x[0][0])
f1_func = lambda x: 2 * precision_func(x) * recall_func(x) / (precision_func(x) + recall_func(x))
scoring = {
'precision': precision_func,
'recall': recall_func,
'f1': f1_func,
'acc': acc_func
}
# Create pandas dataframe to view results
df = pd.DataFrame(results)
df['label_error_fraction'] = np.round(1 - df['noise_amount_acc'], 1)
# For each baseline method, apply each scoring method
for method in methods:
for k, v in scoring.items():
df[method + "_" + k] = df[method].apply(v)
# In[16]:
if type(df['frac_zero_noise_rates'].iloc[0]) is str:
df['frac_zero_noise_rates'] = df['frac_zero_noise_rates'].apply(
lambda x: int(x) / 10.)
if type(df['noise_amount'].iloc[0]) is str:
df['noise_amount'] = df['noise_amount'].apply(
lambda x: int(x) / 10.)
# In[100]:
a = df[(df['frac_zero_noise_rates']!=.4) & (df['frac_zero_noise_rates']!=.2) & (df['label_error_fraction'] != 0)].groupby(['label_error_fraction', 'frac_zero_noise_rates']).mean()[['argmax_acc', 'cl_pbc_acc', 'cl_pbnr_acc', 'cl_both_acc', 'conf_joint_only_acc']].T.round(2)
a
# In[101]:
b = df[(df['frac_zero_noise_rates']!=.4) & (df['frac_zero_noise_rates']!=.2) & (df['label_error_fraction'] != 0)].groupby(['label_error_fraction', 'frac_zero_noise_rates']).mean()[['argmax_f1', 'cl_pbc_f1', 'cl_pbnr_f1', 'cl_both_f1', 'conf_joint_only_f1']].T.round(2)
b
# In[102]:
c = df[(df['frac_zero_noise_rates']!=.4) & (df['frac_zero_noise_rates']!=.2) & (df['label_error_fraction'] != 0)].groupby(['label_error_fraction', 'frac_zero_noise_rates']).mean()[['argmax_precision', 'cl_pbc_precision', 'cl_pbnr_precision', 'cl_both_precision', 'conf_joint_only_precision']].T.round(2)
c
# In[103]:
d = df[(df['frac_zero_noise_rates']!=.4) & (df['frac_zero_noise_rates']!=.2) & (df['label_error_fraction'] != 0)].groupby(['label_error_fraction', 'frac_zero_noise_rates']).mean()[['argmax_recall', 'cl_pbc_recall', 'cl_pbnr_recall', 'cl_both_recall', 'conf_joint_only_recall']].T.round(2)
d
# In[109]:
for z in [a,b,c,d]:
print(z.to_latex())
# In[23]:
# df['noise'] = (1 - df['noise_amount_acc']).round(1)
columns = ['argmax', 'conf_joint_only', 'cl_pbc', 'cl_pbnr', 'cl_both'] # , 'argmax_cm', 'argmax_ccm', 'cl_pbnr', 'cl_both'
for frac_zero_noise_rates in [0, 0.2, 0.4, 0.6]:
print("\nfrac_zero_noise_rates:", frac_zero_noise_rates)
sys.stdout.flush()
# df[np.abs(df['frac_zero_noise_rates'] - frac_zero_noise_rates) < 1e-3].sort_values(by='label_error_fraction').set_index('label_error_fraction')[[c + "_precision" for c in columns]].plot(
# figsize=(20,5), linewidth=3)
# df[np.abs(df['frac_zero_noise_rates'] - frac_zero_noise_rates) < 1e-3].sort_values(by='label_error_fraction').set_index('label_error_fraction')[[c + "_recall" for c in columns]].plot(
# figsize=(20,5), linewidth=3)
# df[np.abs(df['frac_zero_noise_rates'] - frac_zero_noise_rates) < 1e-3].sort_values(by='label_error_fraction').set_index('label_error_fraction')[[c + "_f1" for c in columns]].plot(
# figsize=(20,5), linewidth=3)
df[np.abs(df['frac_zero_noise_rates'] - frac_zero_noise_rates) < 1e-3].sort_values(by='label_error_fraction').set_index('label_error_fraction')[[c + "_acc" for c in columns if 'amount' not in c]].plot(
figsize=(10,5), linewidth=3)
plt.show()
# In[22]:
# df['noise'] = (1 - df['noise_amount_acc']).round(1)
df[df['label_error_fraction'] > 0].groupby('label_error_fraction').mean()[[c for c in df.columns if 'precision' in c]].plot(
figsize=(20,10), linewidth=3)
df[df['label_error_fraction'] > 0].groupby('label_error_fraction').mean()[[c for c in df.columns if 'recall' in c]].plot(
figsize=(20,10), linewidth=3)
df[df['label_error_fraction'] > 0].groupby('label_error_fraction').mean()[[c for c in df.columns if 'f1' in c]].plot(
figsize=(20,10), linewidth=3)
df[df['label_error_fraction'] > 0].groupby('label_error_fraction').mean()[[c for c in df.columns if 'acc' in c and 'amount' not in c]].plot(
figsize=(20,10), linewidth=3)
# In[18]:
df[df['label_error_fraction'] == 0.4]#[[c for c in df.columns if 'acc' in c]]
# # Set up training experiments
# In[40]:
folders = [c for c in os.listdir(base_dir) if 'noise_amount' in c]
results = []
for folder in sorted(folders):
print(folder)
psx_file = [z for z in os.listdir(base_dir + folder) if 'pyx' in z][0]
psx = np.load(base_dir + folder + "/" + psx_file)
#Make sure psx is the right shape
psx = psx[:,:10]
# Load noisy labels
frac_zero_noise_rates = folder.split('_')[-7]
noise_amount = folder.split('_')[-1]
rfn = 'cifar10_noisy_labels__frac_zero_noise_rates__0.{}__noise_amount__0.{}.json'.format(
frac_zero_noise_rates, noise_amount)
with open(noisy_base_dir + "cifar10_noisy_labels/" + rfn, 'r') as rf:
d = json.load(rf)
s = np.asarray([v for k,v in d.items()])
true_label_errors = s != y
acc = np.sum(s == y) / len(y)
print('accuracy of labels:', acc)
# Benchmarks
label_error_mask = np.zeros(len(s), dtype=bool)
label_error_indices = compute_confident_joint(
s, psx, return_indices_of_off_diagonals=True
)[1]
for idx in label_error_indices:
label_error_mask[idx] = True
baseline_conf_joint_only = label_error_mask
baseline_argmax = baseline_methods.baseline_argmax(psx, s)
baseline_cl_pbc = cleanlab.pruning.get_noise_indices(
s, psx, prune_method='prune_by_class')
baseline_cl_pbnr = cleanlab.pruning.get_noise_indices(
s, psx, prune_method='prune_by_noise_rate')
baseline_cl_both = cleanlab.pruning.get_noise_indices(
s, psx, prune_method='both')
# Create folders for and store masks for training.
new_folder = base_dir + folder + "/train_pruned_conf_joint_only/"
try:
os.mkdir(new_folder)
except FileExistsError:
pass
np.save(new_folder + "train_mask.npy", ~baseline_conf_joint_only)
new_folder = base_dir + folder + "/train_pruned_argmax/"
try:
os.mkdir(new_folder)
except FileExistsError:
pass
np.save(new_folder + "train_mask.npy", ~baseline_argmax)
new_folder = base_dir + folder + "/train_pruned_cl_pbc/"
try:
os.mkdir(new_folder)
except FileExistsError:
pass
np.save(new_folder + "train_mask.npy", ~baseline_cl_pbc)
new_folder = base_dir + folder + "/train_pruned_cl_pbnr/"
try:
os.mkdir(new_folder)
except FileExistsError:
pass
np.save(new_folder + "train_mask.npy", ~baseline_cl_pbnr)
new_folder = base_dir + folder + "/train_pruned_cl_both/"
try:
os.mkdir(new_folder)
except FileExistsError:
pass
np.save(new_folder + "train_mask.npy", ~baseline_cl_both)
print()
# # Benchmarking learning with noisy labels accuracy
# In[42]:
import subprocess
# In[43]:
base = '/home/cgn/cgn/cleanlab/examples/cifar/cifar10/'
# In[44]:
experiments = ['train_pruned_argmax', 'train_pruned_cl_pbc', 'train_pruned_cl_pbnr', 'train_pruned_cl_both', 'train_pruned_conf_joint_only']
# In[45]:
results = []
for settings in sorted([f for f in os.listdir(base) if 'noise' in f]):
for experiment in experiments:
frac_zero_noise_rates = settings.split('_')[-7]
noise_amount = settings.split('_')[-1]
# Remove results with noise fraction 0.8 (way too high for any practical case)
if noise_amount != '8':
try:
cmd = 'python3 {}cifar10_train_crossval.py /datasets/datasets/cifar10/cifar10/ --resume {} --evaluate --gpu 0'.format(
base, base + settings + "/" + experiment + "/" + 'model_resnet50__masked_best.pth.tar')
result = subprocess.check_output(cmd, shell=True)
except:
cmd = 'python3 {}cifar10_train_crossval2.py /datasets/datasets/cifar10/cifar10/ --resume {} --evaluate --gpu 0'.format(
base, base + settings + "/" + experiment + "/" + 'model_resnet50__masked_best.pth.tar')
result = subprocess.check_output(cmd, shell=True)
acc1, _, acc5 = result.split(b"* Acc@1 ")[-1].strip().split()
acc1, acc5 = float(acc1), float(acc5)
results.append({
'experiment': experiment[13:],
'frac_zero_noise_rates': frac_zero_noise_rates,
'noise_amount': noise_amount,
'acc1': acc1,
'acc5': acc5,
})
print(results[-1])
# In[46]:
df_results = pd.concat([
z.sort_values(by=['noise_amount', 'frac_zero_noise_rates']).set_index(
['noise_amount', 'frac_zero_noise_rates']).drop(
['acc5', 'experiment'], axis=1).T.set_index([['OURS: ' + i]]) \
for i, z in pd.DataFrame(results).groupby('experiment')
])
## Results on other models using google code by <NAME> (author of MentorNet)
mentornet = [[
0.9378, # 0 noise
0.8493, 0.8514, 0.8319, 0.8342, # 0.2 noise
0.6444, 0.6423, 0.6238, 0.6146, # 0.4 noise
0.2996, 0.3160, 0.2930, 0.2786, # 0.6 noise
]]
mentornet = pd.DataFrame(mentornet, columns=df_results.columns, index = ['mentornet'])
smodel = [[
0.9375, # 0 noise
0.8000, 0.7996, 0.7974, 0.7910, # 0.2 noise
0.5856, 0.6121, 0.5913, 0.5752, # 0.4 noise
0.2845, 0.2853, 0.2793, 0.2726, # 0.6 noise
]]
smodel = pd.DataFrame(smodel, columns=df_results.columns, index = ['smodel'])
reed = [[
0.9372, # 0 noise
0.7809, 0.7892, 0.8076, 0.7927, # 0.2 noise
0.6048, 0.6041, 0.6124, 0.5860, # 0.4 noise
0.2904, 0.2939, 0.2913, 0.2677, # 0.6 noise
]]
reed = pd.DataFrame(reed, columns=df_results.columns, index = ['reed'])
vanilla = [[
0.935, # 0 noise
0.7843, 0.7916, 0.7901, 0.7825, # 0.2 noise
0.6022, 0.6077, 0.5963, 0.5727, # 0.4 noise
0.2696, 0.2966, 0.2824, 0.2681, # 0.6 noise
]]
vanilla = pd.DataFrame(vanilla, columns=df_results.columns, index = ['vanilla'])
# In[47]:
# These are the results if we train our model with batch size 64 for all noise
# rates except 0.4 where we use 32 batch size.
cifar10_final_benchmarks = (df_results / 100).append(mentornet).append(smodel).append(reed).append(vanilla)
cifar10_final_benchmarks.to_csv('cifar10/benchmarks.csv')
# In[89]:
# Final table in | |
header_position == COLHEADER.ch_append:
if v_len + 1 >= maxlen:
# prepend the value, consider if we have to truncate it and omit the header altogether
value = value[:maxlen] + (' ' if maxlen == v_len + 1 else '') + ('...' if ellipsis else '')
header = ''
else:
header = header[:(maxlen - v_len - 1)] + ('...' if ellipsis else '')
else:
# header is set to '' by the collector
value = value[:maxlen] + ('...' if ellipsis else '')
return header, value
def display_prefix(self, collector, header):
prefix = self.data[collector]['prefix']
if prefix:
prefix_len = len(prefix)
prefix_newline = prefix[-1] == '\n'
# truncate the prefix if it doesn't fit the screen
if prefix_len >= self.screen_x and prefix_newline:
prefix = prefix[:max(self.screen_x - 1, 0)]
elif prefix_len >= self.screen_x / 5 and not prefix_newline:
return 0
color = (self.COLOR_INVERSE_HIGHLIGHT if prefix_newline else self.COLOR_NORMAL)
self.screen.addnstr(self.next_y, 1, str(prefix), len(str(prefix)), color)
if prefix_newline:
return -1
else:
return prefix_len
else:
return 0
def display_header(self, layout, align, types):
for field in layout:
text = self._align_field(field, '', layout[field]['width'], align.get(field, COLALIGN.ca_none),
types.get(field, COLTYPES.ct_string))
self.screen.addnstr(self.next_y, layout[field]['start'], text, layout[field]['width'], self.COLOR_NORMAL |
curses.A_BOLD)
def calculate_fields_position(self, collector, xstart):
width = self.data[collector]['w']
fields = self._get_fields_sorted_by_position(collector)
to_hide = self.data[collector]['hide']
noautohide = self.data[collector]['noautohide']
candrop = [name for name in fields if name not in to_hide and not noautohide.get(name, False)]
return self.layout_x(xstart, width, fields, to_hide, candrop)
def show_status_of_invisible_fields(self, layout, status, xstart):
"""
Show red/blue bar to the left of the screen representing the most critical
status of the fields that are now shown.
"""
status_rest = self._invisible_fields_status(layout, status)
if status_rest != COLSTATUS.cs_ok:
color_rest = self._status_to_color(status_rest, False)
self.screen.addch(self.next_y, 0, ' ', color_rest)
@staticmethod
def _align_field(text, header, width, align, typ):
if align == COLALIGN.ca_none:
if typ == COLTYPES.ct_number:
align = COLALIGN.ca_right
else:
align = COLALIGN.ca_left
textlen = len(text) + len(header) + (1 if header and text else 0)
width_left = width - textlen
if align == COLALIGN.ca_right:
return '{0}{1}'.format(' ' * width_left, text)
if align == COLALIGN.ca_center:
left_space = width_left / 2
right_space = width_left - left_space
return '{0}{1}{2}'.format(' ' * left_space, text, ' ' * right_space)
return str(text)
def _get_fields_sorted_by_position(self, collector):
pos = self.data[collector]['pos']
sorted_by_pos = sorted(((x, pos[x]) for x in pos if pos[x] != -1), key=itemgetter(1))
return [f[0] for f in sorted_by_pos]
def _invisible_fields_status(self, layout, statuses):
highest_status = COLSTATUS.cs_ok
invisible = [col for col in statuses if col not in layout]
for col in invisible:
for no in statuses[col]:
if statuses[col][no] > highest_status:
highest_status = statuses[col][no]
if highest_status == COLSTATUS.cs_critical:
return COLSTATUS.cs_critical
return highest_status
def layout_x(self, xstart, colwidth, colnames, colhidden, colcandrop):
""" Figure out width and X start position for each column. Some of the columns
can be hidden, if they are not important (determined at column defintion) and
if we don't have enough space for them.
"""
layout = {}
# get only the columns that are not hidden
col_remaining = [name for name in colnames if name not in colhidden]
# calculate the available screen X dimensions and the width required by all columns
width_available = self.screen_x - (xstart + 1)
# we add width of all N fields + N-1 spaces between fields
width_required = sum(colwidth[name] for name in col_remaining) + len(col_remaining) - 1
if width_available < width_required and colcandrop and len(colcandrop) > 0:
for name in colcandrop:
if name in col_remaining:
# remove a column, re-calculate width
col_remaining.remove(name)
width_required -= colwidth[name] + 1
# drop non-essential columns
if width_required <= width_available:
break
# we dropped what we can, now show the rest. Track the accumulated width to
# figure out which columns won't fit.
x = xstart
total_remaining = len(col_remaining)
for idx, name in enumerate(col_remaining):
w = colwidth[name]
layout[name] = {'start': x, 'width': w}
x += w
if idx != total_remaining - 1:
x += 1
# the last possible X position is screen_x - 1, the position of the last character
# of the current word is layout[name]['start'] + w - 1. The comparison below checks
# that the field width doesn't exceed the screen boundaries.
if layout[name]['start'] + w > self.screen_x:
# if we can't fit even one character - just bail out and don't show the field
if layout[name]['start'] > self.screen_x - 1:
del layout[name]
else:
# truncate it to the length that fits the screen
layout[name]['truncate'] = True
layout[name]['width'] = self.screen_x - layout[name]['start']
# oops, we ran across the screen boundary
# all the columns after this one should be dropped
break
return layout
# some utility functions
def read_configuration(config_file_name):
# read PostgreSQL connection options
config_data = {}
if not config_file_name:
return None
config = ConfigParser.ConfigParser()
f = config.read(config_file_name)
if not f:
logger.error('Configuration file {0} is empty or not found'.format(config_file_name))
return None
# get through all defined databases
for section in config.sections():
config_data[section] = {}
for argname in (
'port',
'host',
'user',
'dbname',
):
try:
val = config.get(section, argname)
except ConfigParser.NoOptionError:
val = None
# might happen also if the option is there, but the value is not set
if val is not None:
config_data[section][argname] = val
return config_data
def read_postmaster_pid(work_directory, dbname):
""" Parses the postgres directory tree and extracts the pid of the postmaster process """
fp = None
try:
fp = open('{0}/postmaster.pid'.format(work_directory))
pid = fp.readline().strip()
except:
# XXX: do not bail out in case we are collecting data for multiple PostgreSQL clusters
logger.error('Unable to read postmaster.pid for {name} at {wd}\n HINT: \
make sure Postgres is running'.format(name=dbname,
wd=work_directory))
return None
finally:
if fp is not None:
fp.close()
return pid
# execution starts here
def loop(collectors, consumer, groups, output_method):
if output_method == OUTPUT_METHOD.curses:
curses.wrapper(do_loop, groups, output_method, collectors, consumer)
else:
do_loop(None, groups, output_method, collectors, consumer)
def poll_keys(screen, output):
global display_units
global freeze
global filter_aux
global autohide_fields
global notrim
global realtime
c = screen.getch()
if c == ord('u'):
display_units = display_units is False
if c == ord('f'):
freeze = freeze is False
if c == ord('s'):
filter_aux = filter_aux is False
if c == ord('h'):
output.toggle_help()
if c == ord('a'):
autohide_fields = autohide_fields is False
if c == ord('t'):
notrim = notrim is False
if c == ord('r'):
realtime = realtime is False
if c == ord('q'):
# bail out immediately
return False
return True
def do_loop(screen, groups, output_method, collectors, consumer):
""" Display output (or pass it through to ncurses) """
output = None
global display_units
global freeze
global filter_aux
global autohide_fields
global notrim
global realtime
if output_method == OUTPUT_METHOD.curses:
if screen is None:
logger.error('No parent screen is passed to the curses application')
sys.exit(1)
else:
# initialize the curses output class.
output = CursesOutput(screen)
if not output.is_color_supported:
logger.error('Curses output requires a terminal that supports color')
sys.exit(1)
else:
output = CommonOutput()
while 1:
# process input:
consumer.consume()
for st in collectors:
if output_method == OUTPUT_METHOD.curses:
if not poll_keys(screen, output):
# bail out immediately
return
st.set_units_display(display_units)
st.set_ignore_autohide(not autohide_fields)
st.set_notrim(notrim)
process_single_collector(st)
if output_method == OUTPUT_METHOD.curses:
if not poll_keys(screen, output):
return
if output_method == OUTPUT_METHOD.curses:
process_groups(groups)
# in the non-curses cases display actually shows the data and refresh
# clears the screen, so we need to refresh before display to clear the old data.
if options.clear_screen and output_method != OUTPUT_METHOD.curses:
output.refresh()
for st in collectors:
output.display(st.output(output_method))
# in the curses case, refresh shows the data queued by display
if output_method == OUTPUT_METHOD.curses:
output.refresh()
if not realtime:
time.sleep(TICK_LENGTH)
def process_single_collector(st):
""" perform all heavy-lifting for a single collector, i.e. data collection,
diff calculation, etc. This is meant to be run in a separate thread.
"""
if isinstance(st, PgstatCollector):
st.set_aux_processes_filter(filter_aux)
st.tick()
if not freeze:
if st.needs_refresh():
st.refresh()
if st.needs_diffs():
st.diff()
else:
# if the server goes offline, we need to clear diffs here,
# otherwise rows from the last successful reading will be
# displayed forever
st.clear_diffs()
def process_groups(groups):
for name in groups:
part = groups[name]['partitions']
pg = groups[name]['pg']
part.ncurses_set_prefix(pg.ncurses_produce_prefix())
def is_postgres_process(pid):
# read /proc/stat, check for the PostgreSQL string
stat_file_name = '/proc/{0}/stat'.format(pid)
with open(stat_file_name, 'rU') as fp:
stat_fields = fp.read().strip().split()
if len(stat_fields) > 3 and | |
<gh_stars>100-1000
# custom.py
# Author: <NAME> <<EMAIL>>
import numpy
from .base import BaseSelection
from .base import BaseGraphSelection
class CustomSelection(BaseSelection):
"""A selector based off a custom, user-defined feature-based function.
This selector wraps a custom function that is passed in by the user. This
function should take in a matrix containing a subset of the ground set and
output the standard measure of quality of the subset. Each row in the matrix
should be an example and each column should be a feature value for the
example (like a standard data matrix for machine learning).
.. warning::
If the function that the user wants to optimize is graph-based,
i.e., that it determines the quality of a subset using similarities
with other examples instead of the feature values directly,
e.g. facility location, CustomGraphSelection should be used instead.
.. note::
Although apricot is built for submodular functions, there is no explicit
restriction that the function passed in be submodular. Sometimes,
supermodular functions can be reasonably maximized using the same
greedy approaches used on submodular functions.
Parameters
----------
n_samples : int
The number of samples to return.
func : function
The feature-based set function to that this selector should wrap.
initial_subset : list, numpy.ndarray or None
If provided, this should be a list of indices into the data matrix
to use as the initial subset, or a group of examples that may not be
in the provided data should beused as the initial subset. If indices,
the provided array should be one-dimensional. If a group of examples,
the data should be 2 dimensional.
optimizer : string or optimizers.BaseOptimizer, optional
The optimization approach to use for the selection. Default is
'two-stage', which makes selections using the naive greedy algorithm
initially and then switches to the lazy greedy algorithm. Must be
one of
'random' : randomly select elements (dummy optimizer)
'modular' : approximate the function using its modular upper bound
'naive' : the naive greedy algorithm
'lazy' : the lazy (or accelerated) greedy algorithm
'approximate-lazy' : the approximate lazy greedy algorithm
'two-stage' : starts with naive and switches to lazy
'stochastic' : the stochastic greedy algorithm
'sample' : randomly take a subset and perform selection on that
'greedi' : the GreeDi distributed algorithm
'bidirectional' : the bidirectional greedy algorithm
Default is 'two-stage'.
optimizer_kwds : dict, optional
Arguments to pass into the optimizer object upon initialization.
Default is {}.
function_kwds : dict, optional
Arguments to pass into the function object for each call.
Default is {}.
n_jobs : int, optional
The number of cores to use for processing. This value is multiplied
by 2 when used to set the number of threads. If set to -1, use all
cores and threads. Default is -1.
random_state : int or RandomState or None, optional
The random seed to use for the random selection process. Only used
for stochastic greedy.
verbose : bool
Whether to print output during the selection process.
Attributes
----------
n_samples : int
The number of samples to select.
ranking : numpy.array int
The selected samples in the order of their gain with the first number in
the ranking corresponding to the index of the first sample that was
selected by the greedy procedure.
gains : numpy.array float
The gain of each sample in the returned set when it was added to the
growing subset. The first number corresponds to the gain of the first
added sample, the second corresponds to the gain of the second added
sample, and so forth.
"""
def __init__(self, n_samples, function, initial_subset=None,
optimizer='two-stage', optimizer_kwds={}, function_kwds={},
n_jobs=1, random_state=None, verbose=False):
if callable(function) == False:
raise ValueError("Passed in function must be callable.")
self.function = function
self.function_kwds = function_kwds
super(CustomSelection, self).__init__(n_samples=n_samples,
initial_subset=initial_subset, optimizer=optimizer,
optimizer_kwds=optimizer_kwds, n_jobs=n_jobs,
random_state=random_state, verbose=verbose)
def fit(self, X, y=None, sample_weight=None, sample_cost=None):
"""Run submodular optimization to select the examples.
This method is a wrapper for the full submodular optimization process.
It takes in some data set (and optionally labels that are ignored
during this process) and selects `n_samples` from it in the greedy
manner specified by the optimizer.
This method will return the selector object itself, not the transformed
data set. The `transform` method will then transform a data set to the
selected points, or alternatively one can use the ranking stored in
the `self.ranking` attribute. The `fit_transform` method will perform
both optimization and selection and return the selected items.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and th corresponding labels for the rows that have
been selected.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The weight of each example. Currently ignored in apricot but
included to maintain compatibility with sklearn pipelines.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
self : CustomSelection
The fit step returns this selector object.
"""
return super(CustomSelection, self).fit(X, y=y,
sample_weight=sample_weight, sample_cost=sample_cost)
def _initialize(self, X):
super(CustomSelection, self)._initialize(X)
if self.initial_subset is None:
pass
elif self.initial_subset.ndim == 2:
if self.initial_subset.shape[1] != X.shape[1]:
raise ValueError("The number of columns in the initial subset must " \
"match the number of columns in X.")
elif self.initial_subset.ndim == 1:
self.initial_subset = X[self.initial_subset]
else:
raise ValueError("The initial subset must be either a two dimensional" \
" matrix of examples or a one dimensional mask.")
if self.initial_subset is None:
self.total_gain = 0
else:
self.total_gain = self.function(self.initial_subset)
def _calculate_gains(self, X, idxs=None):
"""This function will return the gain that each example would give.
This function will return the gains that each example would give if
added to the selected set. When a matrix of examples is given, a
vector will be returned showing the gain for each example. When
a single element is passed in, it will return a singe value."""
idxs = idxs if idxs is not None else self.idxs
gains = numpy.zeros(idxs.shape[0], dtype='float64')
x0 = numpy.zeros((1, X.shape[1]))
if self.initial_subset is not None:
X_ = numpy.concatenate([self.initial_subset, self.subset, x0])
else:
X_ = numpy.concatenate([self.subset, x0])
for i, idx in enumerate(idxs):
X_[-1] = X[idx]
gains[i] = self.function(X_, **self.function_kwds) - self.total_gain
return gains
def _calculate_sieve_gains(self, X, thresholds, idxs):
"""This function will update the internal statistics from a stream.
This function will update the various internal statistics that are a
part of the sieve algorithm for streaming submodular optimization. This
function does not directly return gains but it updates the values
used by a streaming optimizer.
"""
super(CustomSelection, self)._calculate_sieve_gains(X,
thresholds, idxs)
raise NotImplementedError
def _select_next(self, X, gain, idx):
"""This function will add the given item to the selected set."""
self.total_gain += gain
super(CustomSelection, self)._select_next(
X, gain, idx)
class CustomGraphSelection(BaseGraphSelection):
"""A selector based off a custom, user-defined graph-based function.
This selector wraps a custom graph-based function that is passed in by the user.
This function should take in a matrix containing a subset of the ground set and
output the standard measure of quality of the subset. Each row in the matrix
should be an example and each column should be the similarity to each element
in the ground set, with the number of columns being equal to the size of the
ground set.
.. note::
Although apricot is built for submodular functions, there is no explicit
restriction that the function passed in be submodular. Sometimes,
supermodular functions can be reasonably maximized using the same
greedy approaches used on submodular functions.
Parameters
----------
n_samples : int
The number of samples to return.
func : function
The graph-based set function to that this selector should wrap.
initial_subset : list, numpy.ndarray or None
If provided, this should be a list of indices into the data matrix
to use as the initial subset, or a group of examples that may not be
in the provided data should beused as the initial subset. If indices,
the provided array should be one-dimensional. If a group of examples,
the data should be 2 dimensional.
optimizer : string or optimizers.BaseOptimizer, optional
The optimization approach to use for the selection. Default is
'two-stage', which makes selections using the naive greedy algorithm
initially and then switches to the lazy greedy algorithm. Must be
one of
'random' : randomly select elements (dummy optimizer)
'modular' : approximate the function using its modular upper bound
'naive' : the naive greedy algorithm
'lazy' : the lazy (or accelerated) greedy algorithm
'approximate-lazy' : the approximate lazy greedy algorithm
'two-stage' : starts with naive and switches to lazy
'stochastic' : the stochastic greedy algorithm
'sample' : randomly take a subset and perform selection on that
'greedi' : the GreeDi distributed algorithm
'bidirectional' : the bidirectional greedy algorithm
Default is 'two-stage'.
optimizer_kwds : dict, optional
Arguments to pass into the optimizer object upon initialization.
Default is {}.
function_kwds : dict, optional
Arguments to pass into the function object for each call.
Default is {}.
n_jobs : int, optional
The number of cores to use for processing. This value is multiplied
by | |
<reponame>duboviy/teamcity-messages
# coding=utf-8
import contextlib
import os
import platform
import sys
import pytest
import virtual_environments
from diff_test_tools import expected_messages, SCRIPT
from service_messages import ServiceMessage, assert_service_messages, has_service_messages
from test_util import run_command, get_teamcity_messages_root
def construct_fixture():
params = [('pytest>=4,<5',)]
if sys.version_info > (3, 0):
params.append(('pytest>=5',))
@pytest.fixture(scope='module', params=params)
def venv(request):
return virtual_environments.prepare_virtualenv(request.param)
return venv
globals()['venv'] = construct_fixture()
def fix_slashes(s):
if platform.system() == 'Windows':
return s.replace('/', '\\')
else:
return s.replace('\\', '/')
@contextlib.contextmanager
def make_ini(content):
path = os.path.join(get_teamcity_messages_root(), 'pytest.ini')
with open(path, 'w+') as f:
f.write(content)
yield
os.remove(path)
# disable pytest-pep8 on 3.4 due to "No such file or directory: 'doc'" issue
# see https://bugs.funtoo.org/browse/FL-3596
if (sys.version_info[0] == 2 and sys.version_info >= (2, 7)) or (sys.version_info[0] == 3 and sys.version_info >= (3, 5)):
def test_pytest_pep8(venv):
venv_with_pep8 = virtual_environments.prepare_virtualenv(venv.packages + ("pytest-pep8",))
output = run(venv_with_pep8, 'pep8_test.py', options="--pep8")
pep8_test_name = "tests.guinea-pigs.pytest.pep8_test.PEP8"
test_name = "tests.guinea-pigs.pytest.pep8_test.test_ok"
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': pep8_test_name}),
ServiceMessage('testFailed', {'name': pep8_test_name}),
ServiceMessage('testFinished', {'name': pep8_test_name}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFinished', {'name': test_name}),
])
assert ms[2].params["details"].find("E302 expected 2 blank lines, found 1") > 0
def test_pytest_pylint(venv):
venv_with_pylint = virtual_environments.prepare_virtualenv(venv.packages + ("pytest-pylint<0.14.0",))
output = run(venv_with_pylint, 'pylint_test.py', options="--pylint")
pylint_test_name = "tests.guinea-pigs.pytest.pylint_test.Pylint"
test_name = "tests.guinea-pigs.pytest.pylint_test.test_ok"
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': pylint_test_name}),
ServiceMessage('testFailed', {'name': pylint_test_name}),
ServiceMessage('testFinished', {'name': pylint_test_name}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFinished', {'name': test_name}),
])
assert ms[2].params["details"].find("Unused import sys") > 0
def test_pytest_flake8(venv):
venv_with_pylint = virtual_environments.prepare_virtualenv(venv.packages + ("pytest-flake8",))
file_names = ['./flake8_test1.py', './flake8_test2.py']
output = run(venv_with_pylint, file_names, options="--flake8")
file_paths = [os.path.realpath(os.path.join('tests', 'guinea-pigs', 'pytest', file_name))
for file_name in file_names]
expected = [ServiceMessage('testCount', {'count': "4"})]
for file_path in file_paths:
test_base, _ = os.path.splitext(os.path.basename(file_path))
flake8_test_name = "tests.guinea-pigs.pytest.{}.FLAKE8".format(test_base)
pytest_name = "tests.guinea-pigs.pytest.{}.test_ok".format(test_base)
expected.extend([
ServiceMessage('testStarted', {'name': flake8_test_name}),
ServiceMessage('testFailed', {'name': flake8_test_name}),
ServiceMessage('testFinished', {'name': flake8_test_name}),
ServiceMessage('testStarted', {'name': pytest_name}),
ServiceMessage('testFinished', {'name': pytest_name}),
])
for file_path in file_paths:
test_message = "F401 |'sys|' imported but unused"
test_name = "pep8: {}: {}".format(file_path.replace("\\", "/"), test_message)
expected.extend([
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFailed', {'name': test_name, 'message': test_message}),
ServiceMessage('testFinished', {'name': test_name}),
])
ms = assert_service_messages(output, expected)
assert ms[2].params["details"].find(test_message.replace('|', '|||')) > 0
def test_hierarchy(venv):
output = run(venv, 'namespace')
test_name = 'tests.guinea-pigs.pytest.namespace.pig_test.TestSmoke.test_smoke'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
])
def test_force_tc_reporting(venv):
output = run(venv, 'namespace', options="--teamcity", set_tc_version=False)
assert has_service_messages(output)
def test_tc_reporting(venv):
output = run(venv, 'namespace')
assert has_service_messages(output)
def test_no_reporting_when_no_teamcity(venv):
output = run(venv, 'namespace', set_tc_version=False)
assert not has_service_messages(output)
def test_reporting_disabled(venv):
output = run(venv, 'namespace', set_tc_version=True, options="--no-teamcity")
assert not has_service_messages(output)
def test_custom_test_items(venv):
output = run(venv, 'custom')
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.custom.test_simple_yml.line1'}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.custom.test_simple_yml.line1'}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.custom.test_simple_yml.line2'}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.custom.test_simple_yml.line2'}),
])
if sys.version_info >= (2, 6):
@pytest.mark.parametrize("coverage_version, pytest_cov_version", [
("==4.4.2", "==2.7.1"),
("==4.5.4", "==2.7.1"),
("==5.0.1", "==2.7.1"),
# latest
("", ""),
])
def test_coverage(venv, coverage_version, pytest_cov_version):
venv_with_coverage = virtual_environments.prepare_virtualenv(
venv.packages + (
"coverage" + coverage_version,
"pytest-cov" + pytest_cov_version))
output = run(venv_with_coverage, 'coverage_test', options="--cov coverage_test")
test_name = "tests.guinea-pigs.pytest.coverage_test.coverage_test.test_covered_func"
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFinished', {'name': test_name}),
ServiceMessage('buildStatisticValue', {'key': 'CodeCoverageAbsLCovered', 'value': '9'}),
ServiceMessage('buildStatisticValue', {'key': 'CodeCoverageAbsLTotal', 'value': '13'}),
ServiceMessage('buildStatisticValue', {'key': 'CodeCoverageAbsLUncovered', 'value': '4'}),
])
def test_runtime_error(venv):
output = run(venv, 'runtime_error_test.py')
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.runtime_error_test.test_exception'}),
ServiceMessage('testFailed', {'flowId': 'tests.guinea-pigs.pytest.runtime_error_test.test_exception'}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.runtime_error_test.test_exception'}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.runtime_error_test.test_error'}),
ServiceMessage('testFailed', {}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.runtime_error_test.test_error'}),
])
assert ms[2].params["details"].find("raise Exception") > 0
assert ms[2].params["details"].find("oops") > 0
assert ms[5].params["details"].find("assert 0 != 0") > 0
def test_unittest_error(venv):
output = run(venv, 'unittest_error_test.py')
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.unittest_error_test.TestErrorFail.test_error'}),
ServiceMessage('testFailed', {}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.unittest_error_test.TestErrorFail.test_error'}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.unittest_error_test.TestErrorFail.test_fail'}),
ServiceMessage('testFailed', {}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.unittest_error_test.TestErrorFail.test_fail'}),
])
assert ms[2].params["details"].find("raise Exception") > 0
assert ms[2].params["details"].find("oops") > 0
assert ms[5].params["details"].find("AssertionError") > 0
def test_fixture_error(venv):
output = run(venv, 'fixture_error_test.py')
test1_name = 'tests.guinea-pigs.pytest.fixture_error_test.test_error1'
test2_name = 'tests.guinea-pigs.pytest.fixture_error_test.test_error2'
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': test1_name, 'flowId': test1_name}),
ServiceMessage('testFailed', {'name': test1_name,
'message': 'test setup failed',
'flowId': test1_name}),
ServiceMessage('testFinished', {'name': test1_name}),
ServiceMessage('testStarted', {'name': test2_name, 'flowId': test2_name}),
ServiceMessage('testFailed', {'name': test2_name,
'message': 'test setup failed',
'flowId': test2_name}),
ServiceMessage('testFinished', {'name': test2_name}),
])
assert ms[2].params["details"].find("raise Exception") > 0
assert ms[2].params["details"].find("oops") > 0
assert ms[5].params["details"].find("raise Exception") > 0
assert ms[5].params["details"].find("oops") > 0
def test_output(venv):
output = run(venv, 'output_test.py')
test_name = 'tests.guinea-pigs.pytest.output_test.test_out'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name, 'captureStandardOutput': 'false'}),
ServiceMessage('blockOpened', {'name': 'test setup', 'flowId': test_name}),
ServiceMessage('testStdOut', {'name': test_name, 'flowId': test_name, 'out': 'setup stdout|n'}),
ServiceMessage('testStdErr', {'name': test_name, 'flowId': test_name, 'out': 'setup stderr|n'}),
ServiceMessage('blockClosed', {'name': 'test setup'}),
ServiceMessage('testStdOut', {'name': test_name, 'flowId': test_name, 'out': 'test stdout|n'}),
ServiceMessage('testStdErr', {'name': test_name, 'flowId': test_name, 'out': 'test stderr|n'}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
ServiceMessage('blockOpened', {'name': 'test teardown', 'flowId': test_name}),
ServiceMessage('testStdOut', {'name': test_name, 'flowId': test_name, 'out': 'teardown stdout|n'}),
ServiceMessage('testStdErr', {'name': test_name, 'flowId': test_name, 'out': 'teardown stderr|n'}),
ServiceMessage('blockClosed', {'name': 'test teardown'}),
])
def test_class_with_method(venv):
output = run(venv, 'class_with_method.py')
assert_service_messages(
output,
[ServiceMessage('testCount', {'count': "1"})] +
[ServiceMessage('testStarted', {"metainfo": "test_method"})] +
[ServiceMessage('testFailed', {})] +
[ServiceMessage('testFinished', {})]
)
def test_chunked_output(venv):
output = run(venv, 'chunked_output_test.py')
full_line = 'x' * 50000
leftovers = 'x' * (1024 * 1024 - 50000 * 20)
assert_service_messages(
output,
[ServiceMessage('testCount', {'count': "1"})] +
[ServiceMessage('testStarted', {})] +
[ServiceMessage('testStdOut', {'out': full_line})] * 20 +
[ServiceMessage('testStdOut', {'out': leftovers})] +
[ServiceMessage('testStdErr', {'out': full_line})] * 20 +
[ServiceMessage('testStdErr', {'out': leftovers})] +
[ServiceMessage('testFinished', {})]
)
def test_output_no_capture(venv):
output = run(venv, 'output_test.py', options="-s")
test_name = 'tests.guinea-pigs.pytest.output_test.test_out'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name, 'captureStandardOutput': 'true'}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
])
assert "setup stderr" in output
assert "setup stdout" in output
assert "test stderr" in output
assert "test stdout" in output
assert "teardown stderr" in output
assert "teardown stdout" in output
def test_teardown_error(venv):
output = run(venv, 'teardown_error_test.py')
teardown_test_id = 'tests.guinea-pigs.pytest.teardown_error_test.test_error_teardown'
ms = assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.teardown_error_test.test_error'}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.teardown_error_test.test_error'}),
ServiceMessage('testStarted', {'name': teardown_test_id, 'flowId': teardown_test_id}),
ServiceMessage('testFailed', {'flowId': teardown_test_id,
'message': fix_slashes('tests/guinea-pigs/pytest/teardown_error_test.py') + ':13 (test_error)'}),
ServiceMessage('testFinished', {'name': teardown_test_id, 'flowId': teardown_test_id}),
])
assert ms[4].params["details"].find("raise Exception") > 0
assert ms[4].params["details"].find("teardown oops") > 0
def test_module_error(venv):
output = run(venv, 'module_error_test.py')
ms = assert_service_messages(
output,
[
ServiceMessage('testStarted', {'name': 'tests.guinea-pigs.pytest.module_error_test.top_level_collect'}),
ServiceMessage('testFailed', {}),
ServiceMessage('testFinished', {'name': 'tests.guinea-pigs.pytest.module_error_test.top_level_collect'}),
ServiceMessage('testCount', {'count': "0"}),
])
assert ms[1].params["details"].find("raise Exception") > 0
assert ms[1].params["details"].find("module oops") > 0
def test_skip(venv):
if "pytest==2.7" in venv.packages:
pytest.skip("Diff is broken for ancient pytest")
output = run(venv, 'skip_test.py')
test_name = 'tests.guinea-pigs.pytest.skip_test.test_function'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testIgnored', {'message': u'Skipped: skip reason причина', 'flowId': test_name}),
ServiceMessage('testFinished', {'name': test_name}),
])
def test_monkey_patch_strftime(venv):
output = run(venv, 'monkey_patch_strftime_test.py')
test_name = 'tests.guinea-pigs.pytest.monkey_patch_strftime_test.test_monkeypatch'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFinished', {'name': test_name}),
])
assert output.find("spam") == -1
def test_collect_exception(venv):
output = run(venv, 'collect_exception_test.py')
test_name = 'tests.guinea-pigs.pytest.collect_exception_test.top_level_collect'
ms = assert_service_messages(
output,
[
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testStdOut', {'out': 'Some output|n', 'flowId': test_name}),
ServiceMessage('testFailed', {'flowId': test_name}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testCount', {'count': "0"}),
])
assert ms[2].params["details"].find("runtime error") > 0
@pytest.mark.skipif("sys.version_info < (3, 6)", reason="requires Python 3.6+")
def test_rerun(venv):
run(venv, 'test_rerun.py')
output = run(venv, 'test_rerun.py', options='--last-failed')
test_name = "tests.guinea-pigs.pytest.test_rerun.TestPyTest.testTwo"
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testFailed', {'flowId': test_name}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
])
@pytest.mark.skipif("sys.version_info < (2, 7)", reason="requires Python 2.7+")
def test_collect_skip(venv):
output = run(venv, 'collect_skip_test.py')
test_name = 'tests.guinea-pigs.pytest.collect_skip_test.top_level_collect'
ms = assert_service_messages(
output,
[
ServiceMessage('testStarted', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testStdOut', {'out': 'Some output|n', 'flowId': test_name}),
ServiceMessage('testIgnored', {'flowId': test_name}),
ServiceMessage('testFinished', {'name': test_name, 'flowId': test_name}),
ServiceMessage('testCount', {'count': "0"}),
])
assert ms[2].params["message"].find("skip reason") > 0
def test_params(venv):
output = run(venv, 'params_test.py')
test1_name = 'tests.guinea-pigs.pytest.params_test.test_eval(3+5-8)'
test2_name = "tests.guinea-pigs.pytest.params_test.test_eval(|'1_5|' + |'2|'-1_52)"
test3_name = 'tests.guinea-pigs.pytest.params_test.test_eval(6*9-42)'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "3"}),
ServiceMessage('testStarted', {'name': test1_name, 'metainfo': 'test_eval|[3+5-8|]'}),
ServiceMessage('testFinished', {'name': test1_name}),
ServiceMessage('testStarted', {'name': test2_name}),
ServiceMessage('testFinished', {'name': test2_name}),
ServiceMessage('testStarted', {'name': test3_name}),
ServiceMessage('testFailed', {'name': test3_name,
'message': fix_slashes(
'tests/guinea-pigs/pytest/params_test.py') + ':3 (test_eval|[6*9-42|])|n54 != 42|n'}),
ServiceMessage('testFinished', {'name': test3_name}),
])
@pytest.mark.skipif("sys.version_info < (2, 5)", reason="broken on 2.4 somehow")
def test_params_2(venv):
output = run(venv, 'params_test_2.py')
test1_name = 'tests.guinea-pigs.pytest.params_test_2.test(None-https://facebook_com/)'
test2_name = "tests.guinea-pigs.pytest.params_test_2.test(None-https://facebook_com/share_php?http://foo_com/)"
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "2"}),
ServiceMessage('testStarted', {'name': test1_name}),
ServiceMessage('testFinished', {'name': test1_name}),
ServiceMessage('testStarted', {'name': test2_name}),
ServiceMessage('testFinished', {'name': test2_name}),
])
@pytest.mark.skipif("sys.version_info < (2, 7) ", reason="requires Python 2.7")
def test_long_diff(venv):
output = run(venv, "../diff_assert_error_long.py")
test_name = 'tests.guinea-pigs.diff_assert_error_long.test_test'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFailed', {'name': test_name, "actual": "foo" * 10000, "expected": "spam" * 10}),
ServiceMessage('testFinished', {'name': test_name}),
])
@pytest.mark.skipif("sys.version_info < (2, 7) ", reason="requires Python 2.7")
def test_num_diff(venv):
output = run(venv, "../diff_assert_error_nums.py")
test_name = 'tests.guinea-pigs.diff_assert_error_nums.FooTest.test_test'
assert_service_messages(
output,
[
ServiceMessage('testCount', {'count': "1"}),
ServiceMessage('testStarted', {'name': test_name}),
ServiceMessage('testFailed', {'name': test_name, "actual": "123", "expected": "456"}),
ServiceMessage('testFinished', {'name': test_name}),
| |
index -1 from the list. Same list length.
else:
node_to_replace = self._head.get_back(self)
node.set_back(self, self._head.get_back(self).get_back(self))
node.set_next(self, self._head)
self._head.get_back(self).get_back(self).set_next(self, node)
self._head.set_back(self, node)
node_to_replace._links.pop(self._id)
# return a reference to the overriden node in case we need it
# return node_to_replace
return (position, node, node_to_replace)
# we are appending. Link the last node to its back, the first
# node of the list to its next. Make the former last node's
# next and the first node's back point to this node and increment
# self._length by one.
else:
last_node = self._head.get_back(self)
self._head.set_back(self, node)
last_node.set_next(self, node)
node.set_back(self, last_node)
node.set_next(self, self._head)
self._length += 1
# return a reference to the inserted node in case we need to use it
# for anything.
return (position, node)
# if there is one node in the list and we are overwriting
elif len(self) == 1 and overwrite:
# set the new node as the only node in the list and unlink the former one.
# Return a reference to the overriden node if needed for anything
node_to_replace = self._head
self._head = node
self._head.set_next(self, node)
self._head.set_back(self, node)
node_to_replace._links.pop(self._id)
return (0, self._head, node_to_replace)
# the list is not empty and we are not appending.
else:
# find the node to move forward by one
node_after = self._get_node_by_idx(position)
# link the inserted node back and next references to their now neighbour nodes,
# and the previous node's next and following node's back to it. Adjust length.
node.set_next(self, node_after)
node.set_back(self, node_after.get_back(self))
node_after.get_back(self).set_next(self, node)
node_after.set_back(self, node)
self._length += 1
# If the node was inserted at head position, self._head will now point to it.
if node_after is self._head:
self._head = node
# on overwrite=True, unlink the node after the inserted one.
# It is the same effect as replacing, or 'overwriting' a node.
# Also, readjust length since we are removing one node here.
# And return a reference to the overriden node if needed for anything later.
if overwrite:
node_after.get_next(self).set_back(self, node)
node.set_next(self, node_after.get_next(self))
node_after._links.pop(self._id)
self._length -= 1
# return node_after
return (position, node, node_after)
# return a reference to that node in case we need to use it for anything.
# return node
return (position, node)
def reverse(self):
"""
Switches each node's 'back' and 'next' references and repositions the head
reference to the last node in the list, effectively reversing it in place.
This method is the equivalent to Python's list[::-1].
"""
# switch the 'next' and 'back' references for each node
for node in self:
node._links[self._id]['next'], node._links[self._id]['back'] = \
node._links[self._id]['back'], node._links[self._id]['next']
# and set the head reference to the former last node, now first one
self._head = self._head.get_next(self)
def _get_node_by_idx(self, idx: int):
"""
Takes an integer index (idx) and returns a reference to the node in that index
position in the list. Accepts negative indexing, like a regular Python list.
"""
if idx < -self._length or idx > self._length - 1:
raise IndexError('Index out of range.')
assert self._length > 0, \
'List is empty.'
current = self._head
# if we passed a negative index
if idx < 0:
# abs value of negative index is less than len(self) // 2
if self._length // 2 < -idx:
# iterate forward from head
for _ in range(len(self) + idx):
current = current.get_next(self)
# abs value of negative index is more than len(self) // 2
else:
# iterate backwards from head
for _ in range(-idx):
current = current.get_back(self)
# a positive index was passed, follow the same fashion as above,
# condition and iterators are changed to adjust positive indexing
elif self._length // 2 > idx:
for _ in range(idx):
current = current.get_next(self)
else:
for _ in range(len(self) - idx):
# input(current)
current = current.get_back(self)
return current
def _get_node_by_name(self, name):
"""
Given the node's name (id) passed as parameter, it searches on the list for a node
with that name and returns it. Otherwise, it returns None.
This method works on lists whose nodes have no repeated names.
"""
assert self._length > 0, \
'List is empty.'
fordwardtrack = self._head
backtrack = self._head.get_back(self)
# traverse through the list until the target node is encountered.
# If it is, return it. Otherwise, return None.
for _ in range(self._length // 2 + 1):
if fordwardtrack._id == name:
return fordwardtrack
elif backtrack._id == name:
return backtrack
fordwardtrack = fordwardtrack.get_next(self)
backtrack = backtrack.get_back(self)
return None
def _nodify(self, node_or_value):
"""
Takes an instance of MSCDLLNode or any value and:
(1) if it is a node, it adds this list to the node's _links' keys, or
(2) if it is any value, it converts it to a MSCDLLNode and also adds this
list to the node's _links' keys.
Parameters:
node_or_value (MSCDLLNode|any): a node to add this list to its _links'
keys, or a value to convert to a node and do the same.
Returns:
(MSCDLLNode): a valid MSCDLLNode instance with this list set in its
_links' keys.
"""
if isinstance(node_or_value, MSCDLLNode):
node_or_value._links[self._id] = {}
return node_or_value
node_or_value = MSCDLLNode(node_or_value)
node_or_value._links[self._id] = {}
return node_or_value
def get_observer(self):
"""
Returns a reference to the list's observer (MSCDLLObserver assigned instance),
or None if it does not have any, or if the observer is invalid or does not
contain this instance as a subscriber.
"""
try:
self._assert_subscription()
except AssertionError:
return None
return self._observer
def get_observer_subscribers(self) -> list:
"""
Returns a list of all linked lists being observed by this list's assigned
MSCDLLObserver instance, or None if this list is not subscribed to an observer,
or if the observer is invalid or does not contain this instance as a subscriber.
"""
try:
self._assert_subscription()
except AssertionError:
return None
return self._observer._subscribers
def subscribe(self, observer):
"""
Calls for subscribe() in the MSCDLLObserver instance passed as a parameter,
which adds this list to its subscribers.
It also sets that observer instance as this list's observer.
Parameters:
observer (MSCDLLObserver): an observer which this list will subscribe to.
Returns:
(MSCDLLObserver): a reference to the observer this list is now subscribed to.
"""
if isinstance(observer, MSCDLLObserver) and not self._observer:
observer.subscribe(self)
return observer
raise TypeError(
'An instance of MSCDLL can only subscribe to an instance of MSCDLLObserver, ' +
'and must unsubscribe to its assigned observer (if any) before subscribing ' +
'to a new one.'
)
def unsubscribe(self):
"""
Calls for unsubscribe() in the MSCDLLObserver instance passed as a parameter,
which removes this list to its subscribers.
It also sets this list's _observer field to none.
Returns:
(MSCDLLObserver): a reference to the observer this list has unsubscribed from.
"""
try:
self._assert_subscription()
except AssertionError:
raise TypeError(
'An instance of MSCDLL can only unsubscribe to an instance of MSCDLLObserver it is subscribed to.'
)
observer = self._observer
self._observer.unsubscribe(self)
return observer
def _assert_subscription(self):
"""
Checks if the list is subscribed to a valid MSCDLLObserver instance, and
that MSCDLLObserver has the list as one of its subscribers. Returns True
if so. Otherwise, it raises a AssertionError.
"""
assert isinstance(self._observer, MSCDLLObserver), \
'This instance must be subscribed to a MSCDLLObserver to use any \'linked\' methods.'
assert self in self._observer._subscribers, \
f'This instance is not subscribed to the assigned observer named {self._observer._id}.'
return True
def linked_append(self, node_or_value, **kwargs) -> dict:
"""
Calls for the assigned observer's _append method to append node_or_value
to all of its subscribers. It uses insert() to do so, passing 'end' as a
positional argument and kwargs['overwrite'] as False.
Parameters:
node_or_value (MSCDLLNode|any): the same one as this class' append().
Returns:
(dict): a dictionary whose keys are the names (ids) of all lists, and
the return of each list's append() as their values.
"""
self._assert_subscription()
return self._observer._append(node_or_value, **kwargs)
def linked_prepend(self, node_or_value, **kwargs) -> dict:
"""
Calls for the assigned observer's | |
# -*- coding: utf-8 -*-
"""
MagicTelecomAPILib.Controllers.AccountsController
This file was automatically generated by APIMATIC BETA v2.0 on 06/22/2016
"""
from MagicTelecomAPILib.APIHelper import APIHelper
from MagicTelecomAPILib.APIException import APIException
from MagicTelecomAPILib.Configuration import Configuration
from MagicTelecomAPILib.Http.HttpRequest import HttpRequest
from MagicTelecomAPILib.Http.HttpResponse import HttpResponse
from MagicTelecomAPILib.Http.RequestsClient import RequestsClient
from MagicTelecomAPILib.Controllers.BaseController import BaseController
class AccountsController(BaseController):
"""A Controller to access Endpoints in the MagicTelecomAPILib API."""
def __init__(self, http_client = None):
"""Constructor which allows a different HTTP client for this controller."""
BaseController.__init__(self, http_client)
def create_caller_locations(self,
account_number,
caller_location_form):
"""Does a POST request to /accounts/{account_number}/caller_locations.
Create a new caller location
Args:
account_number (string): Number of the account
caller_location_form (CallerLocationForm): Caller Location Data
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"content-type": "application/json; charset=utf-8",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.post(query_url, headers=headers, parameters=APIHelper.json_serialize(caller_location_form))
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_caller_location_by_id(self,
account_number,
caller_location_id):
"""Does a GET request to /accounts/{account_number}/caller_locations/{caller_location_id}.
Get a caller location by Id
Args:
account_number (string): Account Number
caller_location_id (int): a caller location id
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations/{caller_location_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"caller_location_id": caller_location_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def delete_caller_locations(self,
account_number):
"""Does a DELETE request to /accounts/{account_number}/caller_locations.
Delete all caller locations
Args:
account_number (string): Account Number
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.delete(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return str(response.raw_body)
def update_caller_location_by_id(self,
account_number,
caller_location_id,
caller_location_form):
"""Does a PUT request to /accounts/{account_number}/caller_locations/{caller_location_id}.
Update a caller location
Args:
account_number (string): Account Number
caller_location_id (int): Caller Location Id
caller_location_form (CallerLocationForm): Caller Location Params
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations/{caller_location_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"caller_location_id": caller_location_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"content-type": "application/json; charset=utf-8",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.put(query_url, headers=headers, parameters=APIHelper.json_serialize(caller_location_form))
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException(" Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return str(response.raw_body)
def delete_caller_location_by_id(self,
account_number,
caller_location_id):
"""Does a DELETE request to /accounts/{account_number}/caller_locations/{caller_location_id}.
Delete a caller location by id
Args:
account_number (string): Account Number
caller_location_id (int): Caller Location Id
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations/{caller_location_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number,
"caller_location_id": caller_location_id
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.delete(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException(" Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return str(response.raw_body)
def get_caller_locations(self,
account_number,
page=None,
limit=None):
"""Does a GET request to /accounts/{account_number}/caller_locations.
Allow clients to get the list of caller locations for the specific
account.
Args:
account_number (string): Number of the account
page (int, optional): Zero based offset index for the results.
e.g. 0 would start at the first result and 10 would start at
the eleventh result
limit (int, optional): Maximum number of results to return in the
response.
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/accounts/{account_number}/caller_locations"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"account_number": account_number
})
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, | |
<filename>commons/process_mols.py
import math
import warnings
import pandas as pd
import dgl
import numpy as np
import scipy.spatial as spa
import torch
from Bio.PDB import get_surface, PDBParser, ShrakeRupley
from Bio.PDB.PDBExceptions import PDBConstructionWarning
from biopandas.pdb import PandasPdb
from rdkit import Chem
from rdkit.Chem import MolFromPDBFile, AllChem, GetPeriodicTable, rdDistGeom
from rdkit.Chem.rdPartialCharges import ComputeGasteigerCharges
from scipy import spatial
from scipy.special import softmax
from commons.geometry_utils import rigid_transform_Kabsch_3D, rigid_transform_Kabsch_3D_torch
from commons.logger import log
biopython_parser = PDBParser()
periodic_table = GetPeriodicTable()
allowable_features = {
'possible_atomic_num_list': list(range(1, 119)) + ['misc'],
'possible_chirality_list': [
'CHI_UNSPECIFIED',
'CHI_TETRAHEDRAL_CW',
'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER'
],
'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],
'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],
'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_hybridization_list': [
'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'
],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring3_list': [False, True],
'possible_is_in_ring4_list': [False, True],
'possible_is_in_ring5_list': [False, True],
'possible_is_in_ring6_list': [False, True],
'possible_is_in_ring7_list': [False, True],
'possible_is_in_ring8_list': [False, True],
'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU',
'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],
'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD',
'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],
'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2',
'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1',
'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],
}
lig_feature_dims = (list(map(len, [
allowable_features['possible_atomic_num_list'],
allowable_features['possible_chirality_list'],
allowable_features['possible_degree_list'],
allowable_features['possible_formal_charge_list'],
allowable_features['possible_implicit_valence_list'],
allowable_features['possible_numH_list'],
allowable_features['possible_number_radical_e_list'],
allowable_features['possible_hybridization_list'],
allowable_features['possible_is_aromatic_list'],
allowable_features['possible_numring_list'],
allowable_features['possible_is_in_ring3_list'],
allowable_features['possible_is_in_ring4_list'],
allowable_features['possible_is_in_ring5_list'],
allowable_features['possible_is_in_ring6_list'],
allowable_features['possible_is_in_ring7_list'],
allowable_features['possible_is_in_ring8_list'],
])), 1) # number of scalar features
rec_atom_feature_dims = (list(map(len, [
allowable_features['possible_amino_acids'],
allowable_features['possible_atomic_num_list'],
allowable_features['possible_atom_type_2'],
allowable_features['possible_atom_type_3'],
])), 2)
rec_residue_feature_dims = (list(map(len, [
allowable_features['possible_amino_acids']
])), 2)
def lig_atom_featurizer(mol):
ComputeGasteigerCharges(mol) # they are Nan for 93 molecules in all of PDBbind. We put a 0 in that case.
ringinfo = mol.GetRingInfo()
atom_features_list = []
for idx, atom in enumerate(mol.GetAtoms()):
g_charge = atom.GetDoubleProp('_GasteigerCharge')
atom_features_list.append([
safe_index(allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
allowable_features['possible_chirality_list'].index(str(atom.GetChiralTag())),
safe_index(allowable_features['possible_degree_list'], atom.GetTotalDegree()),
safe_index(allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
safe_index(allowable_features['possible_implicit_valence_list'], atom.GetImplicitValence()),
safe_index(allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
safe_index(allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
safe_index(allowable_features['possible_hybridization_list'], str(atom.GetHybridization())),
allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
safe_index(allowable_features['possible_numring_list'], ringinfo.NumAtomRings(idx)),
allowable_features['possible_is_in_ring3_list'].index(ringinfo.IsAtomInRingOfSize(idx, 3)),
allowable_features['possible_is_in_ring4_list'].index(ringinfo.IsAtomInRingOfSize(idx, 4)),
allowable_features['possible_is_in_ring5_list'].index(ringinfo.IsAtomInRingOfSize(idx, 5)),
allowable_features['possible_is_in_ring6_list'].index(ringinfo.IsAtomInRingOfSize(idx, 6)),
allowable_features['possible_is_in_ring7_list'].index(ringinfo.IsAtomInRingOfSize(idx, 7)),
allowable_features['possible_is_in_ring8_list'].index(ringinfo.IsAtomInRingOfSize(idx, 8)),
g_charge if not np.isnan(g_charge) and not np.isinf(g_charge) else 0.
])
return torch.tensor(atom_features_list)
sr = ShrakeRupley(probe_radius=1.4, # in A. Default is 1.40 roughly the radius of a water molecule.
n_points=100) # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation.
def rec_atom_featurizer(rec, surface_indices):
surface_atom_feat = []
c_alpha_feat = []
sr.compute(rec, level="A")
for i, atom in enumerate(rec.get_atoms()):
if i in surface_indices or atom.name == 'CA':
atom_name, element = atom.name, atom.element
sasa = atom.sasa
bfactor = atom.bfactor
if element == 'CD':
element = 'C'
assert not element == ''
assert not np.isinf(bfactor)
assert not np.isnan(bfactor)
assert not np.isinf(sasa)
assert not np.isnan(sasa)
try:
atomic_num = periodic_table.GetAtomicNumber(element)
except:
atomic_num = -1
atom_feat = [safe_index(allowable_features['possible_amino_acids'], atom.get_parent().get_resname()),
safe_index(allowable_features['possible_atomic_num_list'], atomic_num),
safe_index(allowable_features['possible_atom_type_2'], (atom_name + '*')[:2]),
safe_index(allowable_features['possible_atom_type_3'], atom_name),
sasa,
bfactor]
if i in surface_indices:
surface_atom_feat.append(atom_feat)
if atom.name == 'CA':
c_alpha_feat.append(atom_feat)
return torch.tensor(c_alpha_feat, dtype=torch.float32), torch.tensor(surface_atom_feat, dtype=torch.float32)
def get_receptor_atom_subgraph(rec, rec_coords, lig, lig_coords=None ,graph_cutoff=4, max_neighbor=8, subgraph_radius=7):
lig_coords = lig.GetConformer().GetPositions() if lig_coords == None else lig_coords
rec_coords = np.concatenate(rec_coords, axis=0)
sr.compute(rec, level="A")
lig_rec_distance = spa.distance.cdist(lig_coords, rec_coords)
subgraph_indices = np.where(np.min(lig_rec_distance, axis=0) < subgraph_radius)[0]
subgraph_coords = rec_coords[subgraph_indices]
distances = spa.distance.cdist(subgraph_coords, subgraph_coords)
src_list = []
dst_list = []
dist_list = []
mean_norm_list = []
for i in range(len(subgraph_coords)):
dst = list(np.where(distances[i, :] < graph_cutoff)[0])
dst.remove(i)
if max_neighbor != None and len(dst) > max_neighbor:
dst = list(np.argsort(distances[i, :]))[1: max_neighbor + 1]
if len(dst) == 0:
dst = list(np.argsort(distances[i, :]))[1:2] # choose second because first is i itself
log(f'The graph_cutoff {graph_cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')
assert i not in dst
src = [i] * len(dst)
src_list.extend(src)
dst_list.extend(dst)
valid_dist = list(distances[i, dst])
dist_list.extend(valid_dist)
valid_dist_np = distances[i, dst]
sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))
weights = softmax(- valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1) # (sigma_num, neigh_num)
assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01
diff_vecs = subgraph_coords[src, :] - subgraph_coords[dst, :] # (neigh_num, 3)
mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)
denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)
mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)
mean_norm_list.append(mean_vec_ratio_norm)
assert len(src_list) == len(dst_list)
assert len(dist_list) == len(dst_list)
graph = dgl.graph((torch.tensor(src_list), torch.tensor(dst_list)), num_nodes=len(subgraph_coords), idtype=torch.int32)
_, features = rec_atom_featurizer(rec, surface_indices=list(subgraph_indices))
graph.ndata['feat'] = features
graph.edata['feat'] = distance_featurizer(dist_list, divisor=1) # avg distance = 7. So divisor = (4/7)*7 = 4
graph.ndata['x'] = torch.from_numpy(subgraph_coords.astype(np.float32))
graph.ndata['mu_r_norm'] = torch.from_numpy(np.array(mean_norm_list).astype(np.float32))
log('number of subgraph nodes = ', len(subgraph_coords), ' number of edges in subgraph = ', len(dist_list) )
return graph
def rec_residue_featurizer(rec):
feature_list = []
sr.compute(rec, level="R")
for residue in rec.get_residues():
sasa = residue.sasa
for atom in residue:
if atom.name == 'CA':
bfactor = atom.bfactor
assert not np.isinf(bfactor)
assert not np.isnan(bfactor)
assert not np.isinf(sasa)
assert not np.isnan(sasa)
feature_list.append([safe_index(allowable_features['possible_amino_acids'], residue.get_resname()),
sasa,
bfactor])
return torch.tensor(feature_list, dtype=torch.float32) # (N_res, 1)
class AtomEncoder(torch.nn.Module):
def __init__(self, emb_dim, feature_dims, use_scalar_feat=True, n_feats_to_use=None):
# first element of feature_dims tuple is a list with the lenght of each categorical feature and the second is the number of scalar features
super(AtomEncoder, self).__init__()
self.use_scalar_feat = use_scalar_feat
self.n_feats_to_use = n_feats_to_use
self.atom_embedding_list = torch.nn.ModuleList()
self.num_categorical_features = len(feature_dims[0])
self.num_scalar_features = feature_dims[1]
for i, dim in enumerate(feature_dims[0]):
emb = torch.nn.Embedding(dim, emb_dim)
torch.nn.init.xavier_uniform_(emb.weight.data)
self.atom_embedding_list.append(emb)
if i + 1 == self.n_feats_to_use:
break
if self.num_scalar_features > 0:
self.linear = torch.nn.Linear(self.num_scalar_features, emb_dim)
def forward(self, x):
x_embedding = 0
assert x.shape[1] == self.num_categorical_features + self.num_scalar_features
for i in range(self.num_categorical_features):
x_embedding += self.atom_embedding_list[i](x[:, i].long())
if i + 1 == self.n_feats_to_use:
break
if self.num_scalar_features > 0 and self.use_scalar_feat:
x_embedding += self.linear(x[:, self.num_categorical_features:])
if torch.isnan(x_embedding).any():
log('nan')
return x_embedding
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
def get_receptor_from_cleaned(rec_path):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PDBConstructionWarning)
structure = biopython_parser.get_structure('random_id', rec_path)
rec = structure[0]
coords = []
c_alpha_coords = []
n_coords = []
c_coords = []
for res_idx, residue in enumerate(rec.get_residues()):
residue_coords = []
c_alpha, n, c = None, None, None
for atom in residue:
if atom.name == 'CA':
c_alpha = list(atom.get_vector())
if atom.name == 'N':
n = list(atom.get_vector())
if atom.name == 'C':
c = list(atom.get_vector())
residue_coords.append(list(atom.get_vector()))
assert c_alpha != None and n != None and c != None
c_alpha_coords.append(c_alpha)
n_coords.append(n)
c_coords.append(c)
coords.append(np.array(residue_coords))
c_alpha_coords = np.stack(c_alpha_coords, axis=0) # [n_residues, 3]
n_coords = np.stack(n_coords, axis=0) # [n_residues, 3]
c_coords = np.stack(c_coords, axis=0) # [n_residues, 3]
return rec, coords, c_alpha_coords, n_coords, c_coords
def get_receptor(rec_path, lig, cutoff):
conf = lig.GetConformer()
lig_coords = conf.GetPositions()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PDBConstructionWarning)
structure = biopython_parser.get_structure('random_id', rec_path)
rec = structure[0]
min_distances = []
coords = []
c_alpha_coords = []
n_coords = []
c_coords = []
valid_chain_ids = []
lengths = []
for i, chain in enumerate(rec):
chain_coords = [] # num_residues, num_atoms, 3
chain_c_alpha_coords = []
chain_n_coords = []
chain_c_coords = []
chain_is_water = False
count = 0
invalid_res_ids = []
for res_idx, residue in enumerate(chain):
if residue.get_resname() == 'HOH':
chain_is_water = True
residue_coords = []
c_alpha, n, c = None, None, None
for atom in residue:
if atom.name == 'CA':
c_alpha = list(atom.get_vector())
if atom.name == 'N':
n = list(atom.get_vector())
if atom.name == 'C':
c = list(atom.get_vector())
residue_coords.append(list(atom.get_vector()))
# TODO: Also include the chain_coords.append(np.array(residue_coords)) for non amino acids such that they can be used when using the atom representation of the receptor
if c_alpha != None and n != None and c != None: # only append residue if it is an amino acid and not some weired molecule that is part of the complex
chain_c_alpha_coords.append(c_alpha)
chain_n_coords.append(n)
chain_c_coords.append(c)
chain_coords.append(np.array(residue_coords))
count += 1
else:
invalid_res_ids.append(residue.get_id())
for res_id in invalid_res_ids:
chain.detach_child(res_id)
if len(chain_coords) > 0:
all_chain_coords = np.concatenate(chain_coords, axis=0)
distances = spatial.distance.cdist(lig_coords, all_chain_coords)
min_distance = | |
"""RGN model for 3D protein structure prediction.
The implicit ordering of tensor dimensions is:
[NUM_STEPS, BATCH_SIZE, NUM_DIHEDRALS, NUM_DIMENSIONS]
Tensors have this orientation unless otherwise labeled.
"""
import os
from glob import glob
from copy import deepcopy
from itertools import zip_longest
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.python.ops import control_flow_ops
from geom_ops import *
from net_ops import *
from utils import *
import rnn
import transformer
# Public interface
SCOPE = 'RGN'
DUMMY_LOSS = -1.
LOSS_SCALING_FACTOR = 0.01 # this is to convert recorded losses to angstroms
class RGNModel(object):
"""Recurrent geometric network model
Attributes:
mode: train or predict
config: parameter dictionary
"""
# static variable to control creation of new objects and starting the model
_is_started = False
# ??? Should this be called model number not number of models?
_num_models = 0
def __init__(self, mode, config):
"""Sets up configurations and invokes the TF graph. """
# Make sure model hasn't been started.
if not RGNModel._is_started:
self.mode = mode
self.config = deepcopy(config)
# Set up public methods based on mode (for initial state).
if mode == 'training':
self.start = self._start
else:
self.evaluate = self._evaluate
self.predict = self._predict
# Process config for derived properties
io = self.config.io
arch = self.config.architecture
reg = self.config.regularization
curr = self.config.curriculum
opt = self.config.optimization
init = self.config.initialization
# Test for correct curriculum configuration
if curr['mode'] is None and curr['behavior'] is not None:
raise RuntimeError(
'Must set curriculum mode if curriculum behavior is set.')
elif curr['mode'] is not None and curr['behavior'] is None:
raise RuntimeError(
'Must set curriculum behavior if curriculum mode is set.')
# model name
if io['name'] is None:
io['name'] = 'model_' + str(RGNModel._num_models)
RGNModel._num_models = RGNModel._num_models + 1
# ??? what does this file contain?
# alphabet-related
arch['alphabet'] = np.loadtxt(io['alphabet_file'], delimiter = ',')[:, 6:] \
if io['alphabet_file'] is not None else None
# set alphabet size if implicit
if arch['alphabet'] is not None:
arch['alphabet_size'] = len(arch['alphabet'])
# having multiple alphabets is isomorphic to not reusing alphabet
arch['single_or_no_alphabet'] = type(arch['alphabet_size']) is not list
arch['is_alphabetized'] = 'alphabet' in arch['tertiary_output']
# angularization
arch['is_angularized'] = 'angular' in arch['tertiary_output']
# optimization
if opt['optimizer'] == 'adadelta':
opt.update({'rho': opt['decay']})
# initialization
if arch['higher_order_layers']:
for key in ['recurrent_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_layer_size'])
if arch['recurrent_nonlinear_out_proj_size'] is not None:
for key in ['recurrent_nonlinear_out_proj_init']:
if type(init[key]) is not list:
init[key] = [init[key]] * len(arch['recurrent_nonlinear_out_proj_size'])
# regularization
for key in ['recurrent_input_keep_probability',
'recurrent_output_keep_probability',
'recurrent_keep_probability',
'recurrent_state_zonein_probability',
'recurrent_memory_zonein_probability',
'alphabet_keep_probability',
'alphabet_normalization']:
if type(reg[key]) is not list:
reg[key] = [reg[key]] * len(arch['recurrent_layer_size'])
# create graph
self._create_graph(mode, self.config)
else:
raise RuntimeError('Model already started; cannot create new objects.')
def _create_graph(self, mode, config):
"""Creates TensorFlow computation graph depending on the mode.
Builds the head (training) graph to start, train, and checkpoint a model.
Or create any number of 'evaluation' models that depend on the head model,
but with additional data sets, different model semantics (e.g. no dropout)
for the evaluation, and logging of their performance.
Two types of internal variables stored in each object:
ops collections, like training_ops, evaluation_ops, etc.
As the graph is built up, ops are added to these lists.
various nodes that are like TF methods, like the initializer, saver, etc,
which are stored in the object and are accessed by various methods when necessary.
Args:
mode: training or predicting
config: dictionary of configuration parameters
"""
# set up appropriate op collections based on mode (for initial state)
if mode == 'training':
# collection of ops to be run at each step of training
self._training_ops = training_ops = {}
# collection of ops for diagnostics like weight norms and curriculum quantiles
self._diagnostic_ops = diagnostic_ops = {}
else:
# collection of ops for evaluation of losses
self._evaluation_ops = evaluation_ops = {}
# collection of ops for the last evaluation in a multi-invocation evaluation
self._last_evaluation_ops = last_evaluation_ops = {}
# collection of ops for prediction of structures
self._prediction_ops = prediction_ops = {}
# set variable scoping, op scoping, and place on appropriate device
with tf.variable_scope(SCOPE, reuse=(mode == 'evaluation')) as scope, \
tf.name_scope(SCOPE + '/' + config.io['name'] + '/'), \
tf.device(_device_function_constructor(
**{k: config.computing[k] for k in ('functions_on_devices', 'default_device')})):
# set graph seed
if mode == 'training':
tf.set_random_seed(config.initialization['graph_seed'])
# Create curriculum state and tracking variables if needed.
if config.curriculum['mode'] is not None:
# Variable to hold current curriculum iteration
curriculum_step = tf.get_variable(
name='curriculum_step',
shape=[],
trainable=False,
initializer=tf.constant_initializer(config.curriculum['base']))
if mode == 'training':
diagnostic_ops.update({'curriculum_step': curriculum_step})
# Set up data ports
if mode == 'training':
self._coordinator = tf.train.Coordinator()
if config.curriculum['mode'] == 'length':
max_length = tf.reduce_min(
[curriculum_step, config.optimization['num_steps']])
max_length = tf.cast(max_length, tf.int32)
else:
max_length = config.optimization['num_steps']
dataflow_config = merge_dicts(
config.io,
config.initialization,
config.optimization,
config.queueing)
ids, primaries, evolutionaries, secondaries, tertiaries, \
masks, num_stepss = _dataflow(dataflow_config, max_length)
# Set up inputs
inputs = _inputs(
merge_dicts(config.architecture, config.initialization),
primaries,
evolutionaries)
# Compute dRMSD weights
# Masks out meaningless (longer than sequence) pairwise distances
# Incorporates curriculum weights
weights_config = merge_dicts(
config.optimization,
config.curriculum,
config.loss,
config.io)
weights, flat_curriculum_weights = _weights(
weights_config,
masks,
curriculum_step if config.curriculum['mode'] == 'loss' else None)
if mode == 'training' and config.curriculum['mode'] == 'loss':
diagnostic_ops.update({'flat_curriculum_weights': flat_curriculum_weights})
# create alphabet if needed and if it will be shared between layers,
# otherwise set to None so that _dihedrals takes care of it
alphabet_config = merge_dicts(
config.architecture,
config.initialization)
if alphabet_config['is_alphabetized'] \
and alphabet_config['single_or_no_alphabet']:
alphabet = _alphabet(mode, alphabet_config)
if mode == 'training' and config.io['log_alphabet']:
diagnostic_ops.update({'alphabet': alphabet})
else:
alphabet = None
for case in switch(config.architecture['internal_representation']):
if case('transformer'):
transformer_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization)
inputs2 = tf.transpose(inputs, perm=[1,0,2])
recurrent_outputs = transformer._encoder_model(
inputs2,
transformer_config,
mode
)
recurrent_outputs = tf.transpose(
recurrent_outputs,
perm=[1,0,2])
elif case('recurrent'):
# Create recurrent layer(s) that translate
# primary sequences into internal representation
recurrence_config = merge_dicts(
config.initialization,
config.architecture,
config.regularization,
config.optimization,
config.computing, config.io)
# inputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
# recurrent_outputs: [NUM_STEPS, BATCH_SIZE, RECURRENT_LAYER_SIZE]
recurrent_outputs, recurrent_states = rnn._higher_recurrence(
mode,
recurrence_config,
inputs,
num_stepss,
alphabet=alphabet)
elif case('none'):
recurrent_outputs = inputs
else:
raise ValueError('Not an available internal representation.')
# Tertiary structure generation
if config.loss['tertiary_weight'] > 0:
# Convert internal representation to
# (thru some number of possible ways) dihedral angles
dihedrals_config = merge_dicts(
config.initialization,
config.optimization,
config.architecture,
config.regularization,
config.io)
dihedrals_config.update({
k: dihedrals_config[k][-1] for k in [
'alphabet_keep_probability',
'alphabet_normalization']})
if not dihedrals_config['single_or_no_alphabet']:
dihedrals_config.update({
'alphabet_size': dihedrals_config['alphabet_size'][-1]})
dihedrals = _dihedrals(
mode,
dihedrals_config,
recurrent_outputs,
alphabet=alphabet)
# Convert dihedrals into full 3D structures and compute dRMSDs
coordinates = _coordinates(
merge_dicts(
config.computing,
config.optimization,
config.queueing),
dihedrals)
drmsds = _drmsds(
merge_dicts(
config.optimization,
config.loss,
config.io),
coordinates,
tertiaries,
weights)
if mode == 'evaluation':
prediction_ops.update({
'ids': ids,
'coordinates': coordinates,
'num_stepss': num_stepss,})
# 'recurrent_states': recurrent_states})
# Losses
if config.loss['include']:
filters = {grp: id_filter(ids, grp) \
for grp in config.io['evaluation_sub_groups']} \
if mode == 'evaluation' else {}
filters.update({'all': tf.tile([True], tf.shape(ids))})
for group_id, group_filter in filters.items():
with tf.variable_scope(group_id):
# Tertiary loss
effective_tertiary_loss = 0.
if config.loss['tertiary_weight'] > 0:
if config.queueing['num_evaluation_invocations'] > 1 \
and mode == 'training':
raise RuntimeError('Cannot use multiple invocations with training mode.')
else:
# Compute tertiary loss quotient parts by reducing dRMSDs
# based on normalization behavior
tertiary_loss_numerator, tertiary_loss_denominator = _reduce_loss_quotient(
merge_dicts(config.loss, config.io, config.optimization),
drmsds,
masks,
group_filter,
name_prefix='tertiary_loss')
# Handles multiple invocations and gracefully degrades for single invocations.
# Variables are created below _per_ evaluation model, which is a deviation from my general design
# the scope of those variables is the evaluation model's, _not_ the training model's as usual
tertiary_loss, min_loss_achieved, min_loss_op, \
update_accu_op, reduce_accu_op = _accumulate_loss(
merge_dicts(config.io, config.queueing),
tertiary_loss_numerator,
tertiary_loss_denominator,
name_prefix='tertiary_loss')
if mode == 'evaluation':
evaluation_ops.update({
'update_accumulator_' + group_id + '_op': update_accu_op})
last_evaluation_ops.update({
'tertiary_loss_' + group_id: tertiary_loss * LOSS_SCALING_FACTOR,
'reduce_accumulator_' + group_id + '_op': reduce_accu_op,
'min_tertiary_loss_achieved_' + group_id: min_loss_achieved * LOSS_SCALING_FACTOR,
'min_tertiary_loss_' + group_id + '_op': min_loss_op})
if config.io['log_model_summaries']:
tf.add_to_collection(
config.io['name'] + '_tertiary_losses',
tertiary_loss)
effective_tertiary_loss = config.loss['tertiary_weight'] * tertiary_loss
# Final loss and related housekeeping
loss = tf.identity(effective_tertiary_loss, name='loss')
update_ops = | |
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Classes and constants for the representation of repositories
:author: <NAME>
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import os
try:
# Python 3
# pylint: disable=F0401,E0611
import urllib.request as urllib
import urllib.parse as urlparse
except ImportError:
# Python 2
# pylint: disable=F0401
import urlparse
import urllib
# Pelix utilities
from pelix.utilities import is_string
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class Artifact(object):
"""
Basic representation of an artifact, i.e. a bundle (Java),
a module (Python), ...
"""
def __init__(self, language, name, version=None, filename=None):
"""
Sets up the artifact
:param language: Language of implementation of the artifact (mandatory)
:param name: Name of the artifact (will be file name if empty)
:param version: Version of the artifact (optional)
:param filename: Path to the artifact file (optional)
:raise ValueError: Invalid parameter
"""
# Validate parameters
if not language:
raise ValueError("Artifact language can't be empty")
if not name and not filename:
raise ValueError("Artifact name and filename can't be both empty")
# Store values
self.__language = language.lower()
self.__name = name or filename
self.__version = Version(version)
self.__file = None
self.file = filename
def __hash__(self):
"""
Computes the hash of this object
"""
return hash(repr(self))
def __eq__(self, other):
"""
Tests the equality with another artifact
"""
if isinstance(other, Artifact):
# Compare the other artifact
if self.__file == other.__file:
# Same file, same artifact
return True
# All other attributes must be equal
return self.__language == other.__language \
and self.__name == other.__name \
and self.__version == other.__version
elif is_string(other):
# Compare by name
return self.__name == other
else:
# Unknown type
return NotImplemented
def __ne__(self, other):
"""
Tests the inequality with another artifact
"""
equality = self.__eq__(other)
if equality is NotImplemented:
return NotImplemented
return not equality
def __lt__(self, other):
"""
Compares this artifact with another
"""
if not isinstance(other, Artifact):
# Not a version
return NotImplemented
if self.__name == other.__name:
# Same name: compare versions
return self.__version < other.__version
else:
# Name order
return self.__name < other.__name
def __le__(self, other):
"""
Compares this artifact with another
"""
equals = self.__eq__(other)
if equals is NotImplemented:
return NotImplemented
return equals or self.__lt__(other)
def __gt__(self, other):
"""
Compares this artifact with another
"""
if not isinstance(other, Artifact):
# Not a version
return NotImplemented
if self.__name == other.__name:
# Same name: compare versions
return self.__version > other.__version
else:
# Name order
return self.__name > other.__name
def __ge__(self, other):
"""
Compares this artifact with another
"""
equals = self.__eq__(other)
if equals is NotImplemented:
return NotImplemented
return equals or self.__gt__(other)
def __repr__(self):
"""
String representation of the artifact
"""
return "Artifact('{art.language}', '{art.name}', " \
"{art.version!r}, '{art.file}')".format(art=self)
def __str__(self):
"""
Human-readable representation
"""
return "{art.name}-{art.version}".format(art=self)
@property
def file(self):
"""
Path to the artifact file
"""
return self.__file
@file.setter
def file(self, filename):
"""
Sets the path to the artifact file
"""
if not filename:
self.__file = None
else:
self.__file = os.path.realpath(filename)
@property
def language(self):
"""
Language of implementation of the artifact (read-only)
"""
return self.__language
@property
def name(self):
"""
Name of the artifact (read-only)
"""
return self.__name
@property
def version(self):
"""
Version of the artifact (read-only)
"""
return self.__version
@property
def url(self):
"""
The URL to the bundle file, if known
"""
if self.__file:
return urlparse.urljoin('file:', urllib.pathname2url(self.__file))
# ------------------------------------------------------------------------------
class Factory(object):
"""
Represents a component factory
"""
def __init__(self, name, language, model, artifact):
"""
Sets up the component factory
:param name: Name of the component factory
:param language: Language of implementation of the factory
:param model: The component model that can handle the factory
:param artifact: Artifact that provides this factory
:raise ValueError: Invalid parameters
"""
if not name:
raise ValueError("Empty factory name")
if not language:
raise ValueError("No language given")
if not model:
raise ValueError("No component model given")
if artifact is None:
raise ValueError("Invalid artifact")
# Read-only members
self.__name = name
self.__language = language
self.__model = model
self.__artifact = artifact
# Provided services
self.provides = set()
def __repr__(self):
"""
String representation of the factory
"""
return "Factory('{fact.name}', '{fact.language}', " \
"'{fact.model}', {fact.artifact!r})".format(fact=self)
def __str__(self):
"""
Human-readable representation
"""
return "{fact.name} ({fact.model})".format(fact=self)
@property
def artifact(self):
"""
Artifact that provides the factory (read-only)
"""
return self.__artifact
@property
def language(self):
"""
Component factory implementation language (read-only)
"""
return self.__language
@property
def model(self):
"""
Component model (read-only)
"""
return self.__model
@property
def name(self):
"""
Component factory name (read-only)
"""
return self.__name
# ------------------------------------------------------------------------------
class Version(object):
"""
Represents a version (OSGi style)
"""
def __init__(self, version_str=None):
"""
Parses the given version string, if given
"""
self.version = None
self.qualifier = ''
if version_str is not None:
self.parse(version_str)
def __str__(self):
"""
String representation
"""
if self.version is None:
return '0.0.0'
version = '.'.join((str(version) for version in self.version))
if self.qualifier:
version = '{0}-{1}'.format(version, self.qualifier)
return version
def __repr__(self):
"""
Object string representation
"""
return "Version('{0}')".format(self.__str__())
def __normalize_cmp(self, other):
"""
Returns a version of both local and other version with the same tuple
"""
local_version = self.version
other_version = other.version
if local_version is None or other_version is None:
# Do nothing if one of the version is None
return local_version or (0, 0, 0), other_version or (0, 0, 0)
local_len = len(self.version)
other_len = len(other.version)
# Add the missing length
if local_len < other_len:
local_version = list(self.version)
local_version.extend([0] * (other_len - local_len))
local_version = tuple(local_version)
elif local_len > other_len:
other_version = list(other.version)
other_version.extend([0] * (local_len - other_len))
other_version = tuple(other_version)
return local_version, other_version
def __lt__(self, other):
"""
Compares this version with another
"""
if not isinstance(other, Version):
# Not a version
return NotImplemented
local_version, other_version = self.__normalize_cmp(other)
if local_version == other_version:
return self.qualifier < other.qualifier
return local_version < other_version
def __le__(self, other):
"""
Compares this version with another
"""
equals = self.__eq__(other)
if equals is NotImplemented:
return NotImplemented
return equals or self.__lt__(other)
def __eq__(self, other):
"""
Compares this version with another
"""
if not isinstance(other, Version):
# Not a version
return NotImplemented
local_version, other_version = self.__normalize_cmp(other)
return local_version == other_version \
and self.qualifier == other.qualifier
def __ne__(self, other):
"""
Compares this version with another
"""
equality = self.__eq__(other)
if equality is NotImplemented:
return NotImplemented
return not equality
def __gt__(self, other):
"""
Compares this version with another
"""
if not isinstance(other, Version):
# Not a version
return NotImplemented
local_version, other_version = self.__normalize_cmp(other)
if local_version == other_version:
return self.qualifier > other.qualifier
return local_version > other_version
def __ge__(self, other):
"""
Compares this version with another
"""
equals = self.__eq__(other)
if equals is NotImplemented:
return NotImplemented
return equals or self.__gt__(other)
def __add__(self, other):
"""
Adds a version tuple or object. Trims or enlarges the current version
tuple to the size of the given one.
"""
if not isinstance(other, Version):
other = Version(other)
if other is None:
return
# Compute lengths
local_len = len(self.version)
other_len = len(other.version)
i = 0
new_version = []
while i < other_len:
if i < local_len:
local_part = self.version[i]
else:
local_part = None
if i < other_len:
other_part = other.version[i]
if local_part is not None:
# Increment version number
new_version.append(local_part + other_part)
else:
# All current parts added
new_version.append(other_part)
else:
# All other parts added
new_version.append(local_part)
i += 1
while len(new_version) < local_len:
# Fill with zeros, to match the previous size
new_version.append(0)
# Add the qualifier, if any
qualifier = other.qualifier or self.qualifier
if qualifier:
new_version.append(qualifier)
return Version(new_version)
def matches(self, other):
"""
Tests if this version matches the given one
"""
if other is None:
# None matches | |
/ seg
t = 0
for i in range(seg):
xb = base * cos(t)
yb = base * sin(t)
xc = cap * cos(t)
yc = cap * sin(t)
verts.append((xb, yb, zb))
verts.append((xc, yc, zc))
t += angle
for i in range(seg):
f = i * 2
if i == seg - 1:
faces.append([0, 1, f + 1, f])
else:
faces.append([f + 2, f + 3, f + 1, f])
if base != 0:
base_face = []
for i in range(seg - 1, -1, -1):
p = i * 2
base_face.append(p)
faces.append(base_face)
if cap != 0:
cap_face = []
for i in range(seg):
p = i * 2 + 1
cap_face.append(p)
faces.append(cap_face)
mesh = pov_define_mesh(mesh, verts, [], faces, "PovCone", True)
if not ob:
ob_base = object_utils.object_data_add(context, mesh, operator=None)
ob = ob_base.object
ob.pov.object_as = "CONE"
ob.pov.cone_base_radius = base
ob.pov.cone_cap_radius = cap
ob.pov.cone_height = height
ob.pov.cone_base_z = zb
ob.pov.cone_cap_z = zc
class POVRAY_OT_cone_add(bpy.types.Operator):
bl_idname = "pov.cone_add"
bl_label = "Cone"
bl_description = "Add Cone"
bl_options = {'REGISTER', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
# XXX Keep it in sync with __init__'s RenderPovSettingsConePrimitive
# If someone knows how to define operators' props from a func, I'd be delighted to learn it!
base = FloatProperty(
name = "Base radius", description = "The first radius of the cone",
default = 1.0, min = 0.01, max = 100.0)
cap = FloatProperty(
name = "Cap radius", description = "The second radius of the cone",
default = 0.3, min = 0.0, max = 100.0)
seg = IntProperty(
name = "Segments", description = "Radial segmentation of the proxy mesh",
default = 16, min = 3, max = 265)
height = FloatProperty(
name = "Height", description = "Height of the cone",
default = 2.0, min = 0.01, max = 100.0)
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
return (engine in cls.COMPAT_ENGINES)
def execute(self, context):
pov_cone_define(context, self, None)
self.report({'INFO'}, "This native POV-Ray primitive won't have any vertex to show in edit mode")
return {'FINISHED'}
class POVRAY_OT_cone_update(bpy.types.Operator):
bl_idname = "pov.cone_update"
bl_label = "Update"
bl_description = "Update Cone"
bl_options = {'REGISTER', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
ob = context.object
return (ob and ob.data and ob.type == 'MESH' and engine in cls.COMPAT_ENGINES)
def execute(self, context):
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
bpy.ops.object.mode_set(mode="OBJECT")
pov_cone_define(context, None, context.object)
return {'FINISHED'}
#########################################################################################################
class POVRAY_OT_isosurface_box_add(bpy.types.Operator):
bl_idname = "pov.addisosurfacebox"
bl_label = "Isosurface Box"
bl_description = "Add Isosurface contained by Box"
bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
layers = 20*[False]
layers[0] = True
bpy.ops.mesh.primitive_cube_add(layers = layers)
ob = context.object
bpy.ops.object.mode_set(mode="EDIT")
self.report({'INFO'}, "This native POV-Ray primitive "
"won't have any vertex to show in edit mode")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
ob.pov.object_as = "ISOSURFACE"
ob.pov.contained_by = 'box'
ob.name = 'Isosurface'
return {'FINISHED'}
class POVRAY_OT_isosurface_sphere_add(bpy.types.Operator):
bl_idname = "pov.addisosurfacesphere"
bl_label = "Isosurface Sphere"
bl_description = "Add Isosurface contained by Sphere"
bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
layers = 20*[False]
layers[0] = True
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=4,layers=layers)
ob = context.object
bpy.ops.object.mode_set(mode="EDIT")
self.report({'INFO'}, "This native POV-Ray primitive "
"won't have any vertex to show in edit mode")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.shade_smooth()
ob.pov.object_as = "ISOSURFACE"
ob.pov.contained_by = 'sphere'
ob.name = 'Isosurface'
return {'FINISHED'}
class POVRAY_OT_sphere_sweep_add(bpy.types.Operator):
bl_idname = "pov.addspheresweep"
bl_label = "Sphere Sweep"
bl_description = "Create Sphere Sweep along curve"
bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
layers = 20*[False]
layers[0] = True
bpy.ops.curve.primitive_nurbs_curve_add(layers = layers)
ob = context.object
ob.name = ob.data.name = "PovSphereSweep"
ob.pov.curveshape = "sphere_sweep"
ob.data.bevel_depth = 0.02
ob.data.bevel_resolution = 4
ob.data.fill_mode = 'FULL'
#ob.data.splines[0].order_u = 4
return {'FINISHED'}
class POVRAY_OT_blob_add(bpy.types.Operator):
bl_idname = "pov.addblobsphere"
bl_label = "Blob Sphere"
bl_description = "Add Blob Sphere"
bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
layers = 20*[False]
layers[0] = True
bpy.ops.object.metaball_add(type = 'BALL',layers = layers)
ob = context.object
ob.name = "Blob"
return {'FINISHED'}
class POVRAY_OT_rainbow_add(bpy.types.Operator):
bl_idname = "pov.addrainbow"
bl_label = "Rainbow"
bl_description = "Add Rainbow"
bl_options = {'REGISTER', 'UNDO'}
def execute(self,context):
cam = context.scene.camera
bpy.ops.object.lamp_add(type='SPOT', radius=1)
ob = context.object
ob.data.show_cone = False
ob.data.spot_blend = 0.5
ob.data.shadow_buffer_clip_end = 0
ob.data.shadow_buffer_clip_start = 4*cam.location.length
ob.data.distance = cam.location.length
ob.data.energy = 0
ob.name = ob.data.name = "PovRainbow"
ob.pov.object_as = "RAINBOW"
#obj = context.object
bpy.ops.object.constraint_add(type='DAMPED_TRACK')
ob.constraints["Damped Track"].target = cam
ob.constraints["Damped Track"].track_axis = 'TRACK_NEGATIVE_Z'
ob.location = -cam.location
#refocus on the actual rainbow
bpy.context.scene.objects.active = ob
ob.select=True
return {'FINISHED'}
class POVRAY_OT_height_field_add(bpy.types.Operator, ImportHelper):
bl_idname = "pov.addheightfield"
bl_label = "Height Field"
bl_description = "Add Height Field "
bl_options = {'REGISTER', 'UNDO'}
# XXX Keep it in sync with __init__'s hf Primitive
# filename_ext = ".png"
# filter_glob = StringProperty(
# default="*.exr;*.gif;*.hdr;*.iff;*.jpeg;*.jpg;*.pgm;*.png;*.pot;*.ppm;*.sys;*.tga;*.tiff;*.EXR;*.GIF;*.HDR;*.IFF;*.JPEG;*.JPG;*.PGM;*.PNG;*.POT;*.PPM;*.SYS;*.TGA;*.TIFF",
# options={'HIDDEN'},
# )
quality = IntProperty(name = "Quality",
description = "",
default = 100, min = 1, max = 100)
hf_filename = StringProperty(maxlen = 1024)
hf_gamma = FloatProperty(
name="Gamma",
description="Gamma",
min=0.0001, max=20.0, default=1.0)
hf_premultiplied = BoolProperty(
name="Premultiplied",
description="Premultiplied",
default=True)
hf_smooth = BoolProperty(
name="Smooth",
description="Smooth",
default=False)
hf_water = FloatProperty(
name="Water Level",
description="Wather Level",
min=0.00, max=1.00, default=0.0)
hf_hierarchy = BoolProperty(
name="Hierarchy",
description="Height field hierarchy",
default=True)
def execute(self,context):
props = self.properties
impath = bpy.path.abspath(self.filepath)
img = bpy.data.images.load(impath)
im_name = img.name
im_name, file_extension = os.path.splitext(im_name)
hf_tex = bpy.data.textures.new('%s_hf_image'%im_name, type = 'IMAGE')
hf_tex.image = img
mat = bpy.data.materials.new('Tex_%s_hf'%im_name)
hf_slot = mat.texture_slots.create(-1)
hf_slot.texture = hf_tex
layers = 20*[False]
layers[0] = True
quality = props.quality
res = 100/quality
w,h = hf_tex.image.size[:]
w = int(w/res)
h = int(h/res)
bpy.ops.mesh.primitive_grid_add(x_subdivisions=w, y_subdivisions=h,radius = 0.5,layers=layers)
ob = context.object
ob.name = ob.data.name = '%s'%im_name
ob.data.materials.append(mat)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.noise(factor=1)
bpy.ops.object.mode_set(mode="OBJECT")
#needs a loop to select by index?
#bpy.ops.object.material_slot_remove()
#material just left there for now
mat.texture_slots.clear(-1)
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
ob.pov.object_as = 'HEIGHT_FIELD'
ob.pov.hf_filename = impath
return {'FINISHED'}
############################TORUS############################################
def pov_torus_define(context, op, ob):
if op:
mas = op.mas
mis = op.mis
mar = op.mar
mir = op.mir
else:
assert(ob)
mas = ob.pov.torus_major_segments
mis = ob.pov.torus_minor_segments
mar = ob.pov.torus_major_radius
mir = ob.pov.torus_minor_radius
#keep object rotation and location for the add object operator
obrot = ob.rotation_euler
obloc = ob.location
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
bpy.ops.mesh.primitive_torus_add(rotation = obrot, location = obloc, major_segments=mas, minor_segments=mis,major_radius=mar, minor_radius=mir)
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
if not ob:
bpy.ops.mesh.primitive_torus_add(major_segments=mas, minor_segments=mis,major_radius=mar, minor_radius=mir)
ob = context.object
ob.name = ob.data.name = "PovTorus"
ob.pov.object_as = "TORUS"
ob.pov.torus_major_segments = mas
ob.pov.torus_minor_segments = mis
ob.pov.torus_major_radius = mar
ob.pov.torus_minor_radius = mir
bpy.ops.object.mode_set(mode="EDIT")
bpy.ops.mesh.hide(unselected=False)
bpy.ops.object.mode_set(mode="OBJECT")
class POVRAY_OT_torus_add(bpy.types.Operator):
bl_idname = "pov.addtorus"
bl_label = "Torus"
bl_description = "Add Torus"
bl_options = {'REGISTER', 'UNDO'}
# XXX Keep it in sync with __init__'s torus Primitive
mas = IntProperty(name = "Major Segments",
description = "",
default = 48, min = 3, max = 720)
mis = IntProperty(name = "Minor Segments",
description = "",
default = 12, min = 3, max = 720)
mar = FloatProperty(name = "Major Radius",
description = "",
default = 1.0)
mir = FloatProperty(name = "Minor Radius",
description = "",
default = 0.25)
def execute(self,context):
props = self.properties
mar = props.mar
mir = props.mir
mas = props.mas
mis = props.mis
pov_torus_define(context, self, None)
self.report({'INFO'}, "This native POV-Ray primitive "
"won't have any vertex to show in edit mode")
return {'FINISHED'}
class POVRAY_OT_torus_update(bpy.types.Operator):
bl_idname = "pov.torus_update"
bl_label = "Update"
bl_description = "Update Torus"
bl_options = {'REGISTER', 'UNDO'}
COMPAT_ENGINES = {'POVRAY_RENDER'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
ob = context.object
return (ob and ob.data and ob.type == 'MESH' and engine in cls.COMPAT_ENGINES)
def execute(self, context):
pov_torus_define(context, None, context.object)
return {'FINISHED'}
###################################################################################
class POVRAY_OT_prism_add(bpy.types.Operator):
bl_idname = "pov.addprism"
bl_label = "Prism"
bl_description = "Create Prism"
bl_options = {'REGISTER', 'UNDO'}
prism_n = IntProperty(name = "Sides",
description = "Number of sides",
default = 5, min = 3, max = 720)
prism_r = FloatProperty(name = "Radius",
description = "Radius",
default = 1.0)
def execute(self,context):
props = self.properties
loftData = bpy.data.curves.new('Prism', type='CURVE')
loftData.dimensions = '2D'
loftData.resolution_u = 2
loftData.show_normal_face = False
loftData.extrude = 2
n=props.prism_n
r=props.prism_r
coords = []
z = 0
angle = 0
for p in range(n):
x = r*cos(angle)
y = r*sin(angle)
coords.append((x,y,z))
angle+=pi*2/n
poly = loftData.splines.new('POLY')
poly.points.add(len(coords)-1)
for i, coord in enumerate(coords):
x,y,z = coord
poly.points[i].co = (x, y, z, 1)
poly.use_cyclic_u = True
ob = bpy.data.objects.new('Prism_shape', loftData)
scn = bpy.context.scene
scn.objects.link(ob)
scn.objects.active = ob
ob.select = True
ob.pov.curveshape = "prism"
ob.name = ob.data.name = "Prism"
return {'FINISHED'}
##############################PARAMETRIC######################################
def pov_parametric_define(context, op, ob):
if op:
u_min = op.u_min
u_max = op.u_max
v_min = op.v_min
v_max | |
<gh_stars>0
from __future__ import print_function
import numpy as np
import sys
try:
import keplertools.CyKeplerSTM
except ImportError:
pass
"""
Kepler State Transition Matrix
Class container for defining a planetary system (or group of planets in multiple
systems) via their gravitational parameters and state vectors. Contains methods
for propagating state vectors forward in time via the Kepler state transition
matrix.
Constructor takes the following arguments:
x0 (ndarray):
6n vector of stacked positions and velocities for n planets
mu (ndarray):
n vector of standard gravitational parameters mu = G(m+m_s) where m is
the planet mass, m_s is the star mass and G is the gravitational
constant
epsmult (float):
default multiplier on floating point precision, used as convergence
metric. Higher values mean faster convergence, but sacrifice precision.
prefVallado (bool):
If True, always try the Vallado algorithm first, otherwise try Shepherd first.
Defaults False;
noc (bool):
Do not attempt to use cythonized code even if found. Defaults False.
Step function (updateState) takes the following arguments:
dt (float):
time step
All units must be complementary (i.e., if position is AU and velocity
is AU/day, mu must be in AU^3/day^2.
Two algorithms are implemented, both using Batting/Goodyear universal variables.
The first is from Shepperd (1984), using continued fraction to solve the Kepler equation.
The second is from Vallado (2004), using Newton iteration to solve the time equation.
One algorithm is used preferentially, and the other is called only in the case of convergence
failure on the first. All convergence is calculated to machine precision of the data type and
variable size, scaled by a user-selected multiple.
"""
class planSys:
def __init__(self, x0, mu, epsmult=4.0, prefVallado=False, noc=False):
# determine number of planets and validate input
nplanets = x0.size / 6.0
if nplanets - np.floor(nplanets) > 0:
raise Exception("The length of x0 must be a multiple of 6.")
if mu.size != nplanets:
raise Exception("The length of mu must be the length of x0 divided by 6")
self.nplanets = int(nplanets)
self.mu = np.squeeze(mu)
if self.mu.size == 1:
self.mu = np.array(mu)
self.epsmult = epsmult
if prefVallado:
self.algOrder = [self.calcSTM_vallado, self.calcSTM]
else:
self.algOrder = [self.calcSTM, self.calcSTM_vallado]
# create position and velocity index matrices
tmp = np.reshape(np.arange(self.nplanets * 6), (self.nplanets, 6)).T
self.rinds = tmp[0:3]
self.vinds = tmp[3:6]
if not (noc) and ("keplertools.CyKeplerSTM" in sys.modules):
self.havec = True
else:
self.havec = False
self.updateState(np.squeeze(x0))
def updateState(self, x0):
self.x0 = x0
r0 = self.x0[self.rinds]
v0 = self.x0[self.vinds]
# constants
self.r0norm = np.sqrt(np.sum(r0 ** 2.0, 0)) # ||r0||
self.v0norm2 = np.sum(v0 * v0, 0) # ||v0||^2
self.nu0 = np.sum(r0 * v0, 0) # r0 \cdot v0
self.beta = 2 * self.mu / self.r0norm - self.v0norm2 # -2E
self.alpha = self.beta / self.mu
self.nu0osmu = self.nu0 / np.sqrt(self.mu)
def takeStep(self, dt):
if self.havec:
try:
tmp = keplertools.CyKeplerSTM.CyKeplerSTM(
self.x0, dt, self.mu, self.epsmult
)
self.updateState(tmp)
return
except:
print("Cython propagation failed. Falling back to python.")
try:
Phi = self.algOrder[0](dt)
except ValueError as detail:
print("First algorithm error: %s\n Trying second algorithm." % (detail))
Phi = self.algOrder[1](dt)
self.updateState(np.dot(Phi, self.x0))
def calcSTM(self, dt):
# allocate
u = np.zeros(self.nplanets)
deltaU = np.zeros(self.beta.size)
t = np.zeros(self.nplanets)
counter = 0
# For elliptic orbits, calculate period effects
eorbs = self.beta > 0
if any(eorbs):
P = 2 * np.pi * self.mu[eorbs] * self.beta[eorbs] ** (-3.0 / 2.0)
n = np.floor((dt + P / 2 - 2 * self.nu0[eorbs] / self.beta[eorbs]) / P)
deltaU[eorbs] = 2 * np.pi * n * self.beta[eorbs] ** (-5.0 / 2.0)
# loop until convergence of the time array to the time step
while (np.max(np.abs(t - dt)) > self.epsmult * np.spacing(dt)) and (
counter < 1000
):
q = self.beta * u ** 2.0 / (1 + self.beta * u ** 2.0)
U0w2 = 1.0 - 2.0 * q
U1w2 = 2.0 * (1.0 - q) * u
temp = self.contFrac(q)
U = 16.0 / 15.0 * U1w2 ** 5.0 * temp + deltaU
U0 = 2.0 * U0w2 ** 2.0 - 1.0
U1 = 2.0 * U0w2 * U1w2
U2 = 2.0 * U1w2 ** 2.0
U3 = self.beta * U + U1 * U2 / 3.0
r = self.r0norm * U0 + self.nu0 * U1 + self.mu * U2
t = self.r0norm * U1 + self.nu0 * U2 + self.mu * U3
u = u - (t - dt) / (4.0 * (1.0 - q) * r)
counter += 1
if counter == 1000:
raise ValueError(
"Failed to converge on t: %e/%e"
% (np.max(np.abs(t - dt)), self.epsmult * np.spacing(dt))
)
# Kepler solution
f = 1 - self.mu / self.r0norm * U2
g = self.r0norm * U1 + self.nu0 * U2
F = -self.mu * U1 / r / self.r0norm
G = 1 - self.mu / r * U2
Phi = np.zeros([6 * self.nplanets] * 2)
for j in np.arange(self.nplanets):
st = j * 6
Phi[st : st + 6, st : st + 6] = np.vstack(
(
np.hstack((np.eye(3) * f[j], np.eye(3) * g[j])),
np.hstack((np.eye(3) * F[j], np.eye(3) * G[j])),
)
)
return Phi
def contFrac(self, x, a=5.0, b=0.0, c=5.0 / 2.0):
# initialize
k = 1 - 2 * (a - b)
l = 2 * (c - 1)
d = 4 * c * (c - 1)
n = 4 * b * (c - a)
A = np.ones(x.size)
B = np.ones(x.size)
G = np.ones(x.size)
Gprev = np.zeros(x.size) + 2
counter = 0
# loop until convergence of continued fraction
while (np.max(np.abs(G - Gprev)) > self.epsmult * np.max(np.spacing(G))) and (
counter < 1000
):
k = -k
l = l + 2.0
d = d + 4.0 * l
n = n + (1.0 + k) * l
A = d / (d - n * A * x)
B = (A - 1.0) * B
Gprev = G
G = G + B
counter += 1
if counter == 1000:
raise ValueError(
"Failed to converge on G, most likely due to divergence in continued fractions."
)
return G
def calcSTM_vallado(self, dt):
# classify orbits
epsval = 1e-12
eorbs = self.alpha >= epsval
porbs = np.abs(self.alpha) < epsval
horbs = self.alpha <= -epsval
xi = np.zeros(self.nplanets)
if np.any(eorbs):
atmp = self.alpha[eorbs]
tmp = np.sqrt(self.mu[eorbs]) * dt * atmp
circinds = np.abs(atmp - 1) > epsval
if any(circinds):
tmp[circinds] *= 0.97
xi[eorbs] = tmp
if np.any(porbs):
r0 = self.x0[self.rinds]
v0 = self.x0[self.vinds]
h = np.cross(r0[:, porbs].T, v0[:, porbs].T).T
p = np.sum(h * h, 0) / self.mu[porbs]
s = np.arctan2(1.0, (3.0 * np.sqrt(self.mu[porbs] / p ** 3.0) * dt)) / 2.0
w = np.arctan((np.tan(s)) ** (1.0 / 3.0))
xi[porbs] = sqrt(p) * 2.0 / tan(2 * w)
self.alpha[porbs] = 0
if np.any(horbs):
a = 1.0 / (self.alpha[horbs])
xi[horbs] = (
np.sign(dt)
* np.sqrt(-a)
* np.log(
-2
* self.mu[horbs]
* self.alpha[horbs]
* dt
/ (
self.nu0[horbs]
+ np.sign(dt)
* np.sqrt(-self.mu[horbs] * self.alpha[horbs])
* (1.0 - self.r0norm[horbs] * self.alpha[horbs])
)
)
)
# loop
counter = 0
r = self.r0norm
xiup = 10.0 * np.max(np.abs(np.hstack((xi, r))))
while (
np.max(np.abs(xiup))
> self.epsmult * np.spacing(np.max(np.abs(np.hstack((xi, r)))))
) and (counter < 1000):
ps = xi ** 2.0 * self.alpha
c2, c3 = self.psi2c2c3(ps)
r = (
xi ** 2.0 * c2
+ self.nu0osmu * xi * (1 - ps * c3)
+ self.r0norm * (1 - ps * c2)
)
xiup = (
np.sqrt(self.mu) * dt
- xi ** 3.0 * c3
- self.nu0osmu * xi ** 2.0 * c2
- self.r0norm * xi * (1 - ps * c3)
) / r
xi += xiup
counter += 1
if counter == 1000:
raise ValueError(
"Failed to converge on xi: %e/%e"
% (
np.max(np.abs(xiup)),
self.epsmult * np.spacing(np.max(np.abs(np.hstack((xi, r))))),
)
)
# kepler solution
f = 1.0 - xi ** 2.0 / self.r0norm * c2
g = dt - xi ** 3.0 / np.sqrt(self.mu) * c3
F = np.sqrt(self.mu) / r / self.r0norm * xi * (ps * c3 - 1.0)
G = 1.0 - xi ** 2.0 / r * c2
Phi = | |
from contextlib import contextmanager
from StringIO import StringIO
import logging
from posix import rmdir
import unittest
import os
from time import time
from eventlet import GreenPool
from hashlib import md5
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from copy import copy
import math
import tarfile
from eventlet.wsgi import Input
from zerocloud import objectquery
from swift.common import utils
from test.unit import FakeLogger, create_random_numbers, get_sorted_numbers, \
create_tar
from test.unit import trim
from swift.common.swob import Request
from swift.common.utils import mkdirs, normalize_timestamp, get_logger
from swift.obj.server import ObjectController
from test_proxyquery import ZEROVM_DEFAULT_MOCK
from zerocloud.common import ACCESS_READABLE, ACCESS_WRITABLE, ACCESS_CDR, \
parse_location, ACCESS_RANDOM
from zerocloud import TAR_MIMES
from zerocloud.configparser import ZvmNode
from zerocloud.thread_pool import WaitPool, Zuid
def get_headers(self):
headers = {}
for key, value in self.pax_headers.items():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
headers[key.title()] = value
return headers
tarfile.TarInfo.get_headers = get_headers
class FakeLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
class FakeApp(ObjectController):
def __init__(self, conf):
ObjectController.__init__(self, conf)
self.bytes_per_sync = 1
self.fault = False
def __call__(self, env, start_response):
if self.fault:
raise Exception
ObjectController.__call__(self, env, start_response)
class OsMock():
def __init__(self):
self.closed = False
self.unlinked = False
self.path = os.path
self.SEEK_SET = os.SEEK_SET
def close(self, fd):
self.closed = True
raise OSError
def unlink(self, fd):
self.unlinked = True
raise OSError
def write(self, fd, str):
return os.write(fd, str)
def read(self, fd, bufsize):
return os.read(fd, bufsize)
def lseek(self, fd, pos, how):
return os.lseek(fd, pos, how)
class TestObjectQuery(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
self.testdir = \
os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
self.conf = {
'devices': self.testdir,
'mount_check': 'false',
'disable_fallocate': 'true',
'zerovm_sysimage_devices': ('sysimage1 /opt/zerovm/sysimage1 '
'sysimage2 /opt/zerovm/sysimage2')
}
self.obj_controller = FakeApp(self.conf)
self.app = objectquery.ObjectQueryMiddleware(
self.obj_controller, self.conf, logger=FakeLogger())
self.app.zerovm_maxoutput = 1024 * 1024 * 10
self.zerovm_mock = None
self.uid_generator = Zuid()
def tearDown(self):
""" Tear down for testing swift.object_server.ObjectController """
rmtree(os.path.dirname(self.testdir))
if self.zerovm_mock:
os.unlink(self.zerovm_mock)
def setup_zerovm_query(self, mock=None):
# ensure that python executable is used
zerovm_mock = ZEROVM_DEFAULT_MOCK
if mock:
fd, zerovm_mock = mkstemp()
os.write(fd, mock)
os.close(fd)
self.zerovm_mock = zerovm_mock
self.app.zerovm_exename = ['python', zerovm_mock]
# do not set it lower than 2 * BLOCKSIZE (2 * 512)
# it will break tar RPC protocol
self.app.app.network_chunk_size = 2 * 512
randomnumbers = create_random_numbers(10)
self.create_object(randomnumbers)
self._nexescript = 'return pickle.dumps(sorted(id))'
self._sortednumbers = get_sorted_numbers()
self._randomnumbers_etag = md5()
self._randomnumbers_etag.update(randomnumbers)
self._randomnumbers_etag = self._randomnumbers_etag.hexdigest()
self._sortednumbers_etag = md5()
self._sortednumbers_etag.update(self._sortednumbers)
self._sortednumbers_etag = self._sortednumbers_etag.hexdigest()
self._nexescript_etag = md5()
self._nexescript_etag.update(self._nexescript)
self._nexescript_etag = self._nexescript_etag.hexdigest()
self._stderr = '\nfinished\n'
self._emptyresult = '(l.'
self._emptyresult_etag = md5()
self._emptyresult_etag.update(self._emptyresult)
self._emptyresult_etag = self._emptyresult_etag.hexdigest()
def create_object(self, body, path='/sda1/p/a/c/o'):
timestamp = normalize_timestamp(time())
headers = {'X-Timestamp': timestamp,
'Content-Type': 'application/octet-stream'}
req = Request.blank(path,
environ={'REQUEST_METHOD': 'PUT'}, headers=headers)
req.body = body
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def zerovm_object_request(self):
req = Request.blank('/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-account-name': 'a',
'x-zerovm-access': 'GET'})
req.headers['x-zerocloud-id'] = self.uid_generator.get()
return req
def zerovm_free_request(self):
req = Request.blank('/sda1/p/a',
environ={'REQUEST_METHOD': 'POST'},
headers={'Content-Type': 'application/x-gtar',
'x-zerovm-execute': '1.0',
'x-account-name': 'a',
'x-zerovm-access': ''})
req.headers['x-zerocloud-id'] = self.uid_generator.get()
return req
def test_tmpdir_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.testdir, 'sda1', 'tmp')
os.rmdir(tmpdir)
with objectquery.TmpDir(tmpdir, 'sda1').mkstemp():
self.assert_(os.path.exists(tmpdir))
def __test_QUERY_realzvm(self):
orig_exe = self.app.zerovm_exename
orig_sysimages = self.app.zerovm_sysimage_devices
try:
self.app.zerovm_sysimage_devices['python-image'] = (
'/media/40G/zerovm-samples/zshell/zpython2/python.tar'
)
self.setup_zerovm_query()
self.app.zerovm_exename = ['/opt/zerovm/bin/zerovm']
req = self.zerovm_free_request()
req.headers['x-zerovm-daemon'] = 'asdf'
conf = ZvmNode(1, 'python', parse_location(
'file://python-image:python'), args='hello.py')
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf.add_new_channel(
'python-image', ACCESS_READABLE | ACCESS_RANDOM)
conf.add_new_channel('image', ACCESS_CDR, removable='yes')
conf = conf.dumps()
sysmap = StringIO(conf)
image = open('/home/kit/python-script.tar', 'rb')
with self.create_tar({'sysmap': sysmap, 'image': image}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
resp = req.get_response(self.app)
print ['x-zerovm-daemon', resp.headers.get('x-zerovm-daemon',
'---')]
print ['x-nexe-cdr-line', resp.headers['x-nexe-cdr-line']]
if resp.content_type in TAR_MIMES:
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
for n, m in zip(names, members):
print [n, tar.extractfile(m).read()]
else:
print resp.body
finally:
self.app.zerovm_exename = orig_exe
self.app.zerovm_sysimage_devices = orig_sysimages
def test_QUERY_sort(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO(self._nexescript)
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
self.assertEqual(members[-1].size, len(self._sortednumbers))
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), self._sortednumbers)
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 2 56 0 0 0 0')
def test_QUERY_sort_textout(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO('return str(sorted(id))')
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
# self.assertEqual(self.app.logger.log_dict['info'][0][0][0],
# 'Zerovm CDR: 0 0 0 0 1 46 2 40 0 0 0 0')
def test_QUERY_http_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/http')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim(r'''
resp = '\n'.join([
'HTTP/1.1 200 OK',
'Content-Type: application/json',
'X-Object-Meta-Key1: value1',
'X-Object-Meta-Key2: value2',
'', ''
])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'],
'application/json')
self.assertEqual(stdout_headers['X-Object-Meta-Key1'],
'value1')
self.assertEqual(stdout_headers['X-Object-Meta-Key2'],
'value2')
def test_QUERY_cgi_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/cgi')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim(r'''
resp = '\n'.join([
'Content-Type: application/json',
'X-Object-Meta-Key1: value1',
'X-Object-Meta-Key2: value2',
'', ''
])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(), '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEquals(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'],
'application/json')
self.assertEqual(stdout_headers['X-Object-Meta-Key1'],
'value1')
self.assertEqual(stdout_headers['X-Object-Meta-Key2'],
'value2')
def test_QUERY_invalid_http_message(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel(
'stdout', ACCESS_WRITABLE, content_type='message/http')
conf = conf.dumps()
sysmap = StringIO(conf)
nexefile = StringIO(trim('''
resp = '\\n'.join(['Status: 200 OK',
'Content-Type: application/json', '', ''])
out = str(sorted(id))
return resp + out
'''))
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members = tar.getmembers()
self.assertIn('stdout', names)
self.assertEqual(names[-1], 'stdout')
file = tar.extractfile(members[-1])
self.assertEqual(file.read(),
'Status: 200 OK\n'
'Content-Type: application/json\n\n'
'[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]')
self.assertEqual(resp.headers['x-nexe-retcode'], '0')
self.assertEqual(resp.headers['x-nexe-status'], 'ok.')
self.assertEqual(resp.headers['x-nexe-validation'], '0')
self.assertEqual(resp.headers['x-nexe-system'], 'sort')
timestamp = normalize_timestamp(time())
self.assertEqual(math.floor(float(resp.headers['X-Timestamp'])),
math.floor(float(timestamp)))
self.assertEqual(
resp.headers['content-type'], 'application/x-gtar')
stdout_headers = members[-1].get_headers()
self.assertEqual(stdout_headers['Content-Type'], 'message/http')
def test_QUERY_invalid_nexe(self):
self.setup_zerovm_query()
req = self.zerovm_object_request()
nexefile = StringIO('INVALID')
conf = ZvmNode(1, 'sort', parse_location('swift://a/c/exe'))
conf.add_new_channel(
'stdin', ACCESS_READABLE, parse_location('swift://a/c/o'))
conf.add_new_channel('stdout', ACCESS_WRITABLE)
conf = conf.dumps()
sysmap = StringIO(conf)
with create_tar({'boot': nexefile, 'sysmap': sysmap}) as tar:
length = os.path.getsize(tar)
req.body_file = Input(open(tar, 'rb'), length)
req.content_length = length
resp = req.get_response(self.app)
fd, name = mkstemp()
for chunk in resp.app_iter:
os.write(fd, chunk)
os.close(fd)
self.assertEqual(os.path.getsize(name), resp.content_length)
tar = tarfile.open(name)
names = tar.getnames()
members | |
<gh_stars>10-100
#
# python_grabber
#
# Authors:
# <NAME> <<EMAIL>>
#
# Copyright (C) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import numpy as np
import os.path
from enum import Enum
from comtypes.persist import IPropertyBag
from pygrabber.dshow_core import *
from pygrabber.windows_media import *
from pygrabber.dshow_ids import *
from pygrabber.win_api_extra import *
class StateGraph(Enum):
Stopped = 0
Paused = 1
Running = 2
class RecordingFormat(Enum):
AVI = 0
ASF = 1
class FilterType(Enum):
video_input = 0
audio_input = 1
video_compressor = 2
audio_compressor = 3
sample_grabber = 4
render = 5
file_sink = 6
muxer = 7
smart_tee = 8
class Filter:
# Wrapper around a Direct Show filter
def __init__(self, instance, name, capture_builder):
self.instance = instance
self.capture_builder = capture_builder
self.Name = name
self.out_pins = []
self.in_pins = []
self.reload_pins()
def get_out(self):
return self.out_pins[0]
def get_in(self, index=0):
return self.in_pins[index]
def find_pin(self, direction, category=None, type=None, unconnected=True):
try:
return self.capture_builder.FindPin(self.instance, direction, category, type, unconnected, 0)
except COMError:
return None # assuming preview pin not found
def reload_pins(self):
# 0 = in, 1 = out
self.out_pins = []
self.in_pins = []
enum = self.instance.EnumPins()
pin, count = enum.Next(1)
while count > 0:
if pin.QueryDirection() == 0:
self.in_pins.append(pin)
else:
self.out_pins.append(pin)
pin, count = enum.Next(1)
def set_properties(self):
show_properties(self.instance)
def get_name(self):
filter_info = self.instance.QueryFilterInfo()
return wstring_at(filter_info.achName)
def print_info(self):
print(f"Pins of: {self.get_name()}")
enum = self.instance.EnumPins()
pin, count = enum.Next(1)
while count > 0:
info = pin.QueryPinInfo()
direction, name = (info.dir, wstring_at(info.achName))
print(f"PIN {'in' if direction == 0 else 'out'} - {name}")
pin, count = enum.Next(1)
class VideoInput(Filter):
def __init__(self, args, capture_builder):
Filter.__init__(self, args[0], args[1], capture_builder)
def get_current_format(self):
stream_config = self.get_out().QueryInterface(IAMStreamConfig)
media_type = stream_config.GetFormat()
p_video_info_header = cast(media_type.contents.pbFormat, POINTER(VIDEOINFOHEADER))
bmp_header = p_video_info_header.contents.bmi_header
return bmp_header.biWidth, bmp_header.biHeight
def get_formats(self):
# https://docs.microsoft.com/en-us/windows/win32/directshow/configure-the-video-output-format
stream_config = self.get_out().QueryInterface(IAMStreamConfig)
media_types_count, _ = stream_config.GetNumberOfCapabilities()
result = []
for i in range(0, media_types_count):
media_type, capability = stream_config.GetStreamCaps(i)
p_video_info_header = cast(media_type.contents.pbFormat, POINTER(VIDEOINFOHEADER))
bmp_header = p_video_info_header.contents.bmi_header
result.append({
'index': i,
'media_type_str': subtypes[str(media_type.contents.subtype)],
'width': bmp_header.biWidth,
'height': bmp_header.biHeight,
'min_framerate': 10000000 / capability.MinFrameInterval,
'max_framerate': 10000000 / capability.MaxFrameInterval
})
#print(f"{capability.MinOutputSize.cx}x{capability.MinOutputSize.cx} - {capability.MaxOutputSize.cx}x{capability.MaxOutputSize.cx}")
return result
def set_format(self, format_index):
stream_config = self.get_out().QueryInterface(IAMStreamConfig)
media_type, _ = stream_config.GetStreamCaps(format_index)
stream_config.SetFormat(media_type)
def show_format_dialog(self):
show_properties(self.get_out())
class AudioInput(Filter):
def __init__(self, args, capture_builder):
Filter.__init__(self, args[0], args[1], capture_builder)
class VideoCompressor(Filter):
def __init__(self, args, capture_builder):
Filter.__init__(self, args[0], args[1], capture_builder)
class AudioCompressor(Filter):
def __init__(self, args, capture_builder):
Filter.__init__(self, args[0], args[1], capture_builder)
class Render(Filter):
def __init__(self, instance, capture_builder):
Filter.__init__(self, instance, "Render", capture_builder)
try:
self.video_window = self.instance.QueryInterface(IVideoWindow)
except COMError:
self.video_window = None # probably interface IVideoWindow not supported because using NullRender
def configure_video_window(self, handle):
# must be called after the graph is connected
self.video_window.put_Owner(handle)
self.video_window.put_WindowStyle(WS_CHILD | WS_CLIPSIBLINGS)
def set_window_position(self, x, y, width, height):
self.video_window.SetWindowPosition(x, y, width, height)
class SampleGrabber(Filter):
def __init__(self, capture_builder):
Filter.__init__(self, client.CreateObject(GUID(clsids.CLSID_SampleGrabber), interface=qedit.IBaseFilter), "Sample Grabber", capture_builder)
self.sample_grabber = self.instance.QueryInterface(ISampleGrabber)
self.callback = None
def set_callback(self, callback, which_method_to_callback):
self.callback = callback
self.sample_grabber.SetCallback(callback, which_method_to_callback)
def set_media_type(self, media_type, media_subtype):
sg_type = qedit._AMMediaType()
sg_type.majortype = GUID(media_type)
sg_type.subtype = GUID(media_subtype)
self.sample_grabber.SetMediaType(sg_type)
def get_resolution(self):
media_type = self.sample_grabber.GetConnectedMediaType()
p_video_info_header = cast(media_type.pbFormat, POINTER(VIDEOINFOHEADER))
bmp_header = p_video_info_header.contents.bmi_header
return bmp_header.biWidth, bmp_header.biHeight
def initialize_after_connection(self):
self.callback.image_resolution = self.get_resolution()
class SmartTee(Filter):
def __init__(self, capture_builder):
Filter.__init__(self, client.CreateObject(GUID(clsids.CLSID_SmartTee),interface=qedit.IBaseFilter), "Smart Tee", capture_builder)
class Muxer(Filter):
def __init__(self, args, capture_builder):
Filter.__init__(self, args, "Muxer", capture_builder)
class SystemDeviceEnum:
def __init__(self):
self.system_device_enum = client.CreateObject(clsids.CLSID_SystemDeviceEnum, interface=ICreateDevEnum)
def get_available_filters(self, category_clsid):
filter_enumerator = self.system_device_enum.CreateClassEnumerator(GUID(category_clsid), dwFlags=0)
moniker, count = filter_enumerator.Next(1)
result = []
while count > 0:
result.append(get_moniker_name(moniker))
moniker, count = filter_enumerator.Next(1)
return result
def get_filter_by_index(self, category_clsid, index):
filter_enumerator = self.system_device_enum.CreateClassEnumerator(GUID(category_clsid), dwFlags=0)
moniker, count = filter_enumerator.Next(1)
i = 0
while i != index and count > 0:
moniker, count = filter_enumerator.Next(1)
i = i + 1
return moniker.BindToObject(0, 0, qedit.IBaseFilter._iid_).QueryInterface(qedit.IBaseFilter), \
get_moniker_name(moniker)
class FilterFactory:
def __init__(self, system_device_enum, capture_builder):
self.system_device_enum = system_device_enum
self.capture_builder = capture_builder
def build_filter(self, filter_type, id):
if filter_type == FilterType.video_input:
return VideoInput(self.system_device_enum.get_filter_by_index(DeviceCategories.VideoInputDevice, id), self.capture_builder)
elif filter_type == FilterType.audio_input:
return AudioInput(self.system_device_enum.get_filter_by_index(DeviceCategories.AudioInputDevice, id), self.capture_builder)
elif filter_type == FilterType.video_compressor:
return VideoCompressor(self.system_device_enum.get_filter_by_index(DeviceCategories.VideoCompressor, id), self.capture_builder)
elif filter_type == FilterType.audio_compressor:
return AudioCompressor(self.system_device_enum.get_filter_by_index(DeviceCategories.AudioCompressor, id), self.capture_builder)
elif filter_type == FilterType.render:
return Render(client.CreateObject(GUID(id), interface=qedit.IBaseFilter), self.capture_builder)
elif filter_type == FilterType.sample_grabber:
return SampleGrabber(self.capture_builder)
elif filter_type == FilterType.muxer:
return Muxer(id, self.capture_builder)
elif filter_type == FilterType.smart_tee:
return SmartTee(self.capture_builder)
else:
raise ValueError('Cannot create filter', filter_type, id)
class MediaType:
def __init__(self, majortype_guid, subtype_guid):
self.instance = qedit._AMMediaType()
self.instance.majortype = GUID(majortype_guid)
self.instance.subtype = GUID(subtype_guid)
class WmProfileManager:
def __init__(self):
self.profile_manager = POINTER(IWMProfileManager2)()
WMCreateProfileManager(byref(self.profile_manager))
self.profile_manager.SetSystemProfileVersion(0x00080000)
self.profiles, self.profiles_names = self.__load_profiles()
def __load_profiles(self):
nr_profiles = self.profile_manager.GetSystemProfileCount()
profiles = [self.profile_manager.LoadSystemProfile(i) for i in range(0, nr_profiles)]
profiles_names = []
buf = create_unicode_buffer(200)
for profile in profiles:
i = DWORD(200)
profile.GetName(buf, pointer(i))
profiles_names.append(buf.value)
return profiles, profiles_names
class FilterGraph:
def __init__(self):
self.filter_graph = client.CreateObject(clsids.CLSID_FilterGraph, interface=qedit.IFilterGraph)
self.graph_builder = self.filter_graph.QueryInterface(qedit.IGraphBuilder)
self.media_control = self.filter_graph.QueryInterface(quartz.IMediaControl)
self.media_event = self.filter_graph.QueryInterface(quartz.IMediaEvent)
self.capture_builder = client.CreateObject(clsids.CLSID_CaptureGraphBuilder2, interface=ICaptureGraphBuilder2)
self.capture_builder.SetFiltergraph(self.filter_graph)
self.system_device_enum = SystemDeviceEnum()
self.filter_factory = FilterFactory(self.system_device_enum, self.capture_builder)
self.wm_profile_manager = WmProfileManager()
self.filters = {}
self.recording_format = None
self.is_recording = False
def __add_filter(self, filter_type, filter_id):
assert not(filter_type in self.filters)
filter = self.filter_factory.build_filter(filter_type, filter_id)
self.filters[filter_type] = filter
self.filter_graph.AddFilter(filter.instance, filter.Name)
def add_video_input_device(self, index):
self.__add_filter(FilterType.video_input, index)
def add_audio_input_device(self, index):
self.__add_filter(FilterType.audio_input, index)
def add_video_compressor(self, index):
self.__add_filter(FilterType.video_compressor, index)
def add_audio_compressor(self, index):
self.__add_filter(FilterType.audio_compressor, index)
def add_sample_grabber(self, callback):
self.__add_filter(FilterType.sample_grabber, None)
sample_grabber = self.filters[FilterType.sample_grabber]
sample_grabber_cb = SampleGrabberCallback(callback)
sample_grabber.set_callback(sample_grabber_cb, 1)
sample_grabber.set_media_type(MediaTypes.Video, MediaSubtypes.RGB24)
def add_null_render(self):
self.__add_filter(FilterType.render, clsids.CLSID_NullRender)
def add_default_render(self):
self.__add_filter(FilterType.render, clsids.CLSID_VideoRendererDefault)
def add_video_mixing_render(self):
self.__add_filter(FilterType.render, clsids.CLSID_VideoMixingRenderer)
def add_file_writer_and_muxer(self, filename):
extension = os.path.splitext(filename)[1].upper()
mediasubtype = MediaSubtypes.ASF if extension == ".WMV" else MediaSubtypes.AVI
self.recording_format = RecordingFormat.ASF if extension == ".WMV" else RecordingFormat.AVI
mux, filesink = self.capture_builder.SetOutputFileName(GUID(mediasubtype), filename)
self.filters[FilterType.muxer] = self.filter_factory.build_filter(FilterType.muxer, mux)
def configure_asf_compressor(self):
pass
# asf_config = self.mux.QueryInterface(IConfigAsfWriter)
# print(asf_config.GetCurrentProfileGuid())
#profile = asf_config.GetCurrentProfile()
def prepare_preview_graph(self):
assert FilterType.video_input in self.filters
assert FilterType.render in self.filters
if FilterType.sample_grabber not in self.filters:
self.graph_builder.Connect(self.filters[FilterType.video_input].get_out(),
self.filters[FilterType.render].get_in())
else:
self.graph_builder.Connect(self.filters[FilterType.video_input].get_out(),
self.filters[FilterType.sample_grabber].get_in())
self.graph_builder.Connect(self.filters[FilterType.sample_grabber].get_out(),
self.filters[FilterType.render].get_in())
self.filters[FilterType.sample_grabber].initialize_after_connection()
self.is_recording = False
def __get_capture_and_preview_pins(self):
preview_pin = self.filters[FilterType.video_input].find_pin(PIN_OUT, category=GUID(PinCategory.Preview))
capture_pin = self.filters[FilterType.video_input].find_pin(PIN_OUT, category=GUID(PinCategory.Capture))
if (preview_pin is None) or (capture_pin is None):
self.__add_filter(FilterType.smart_tee, None)
smart_tee = self.filters[FilterType.smart_tee]
self.graph_builder.Connect(capture_pin if capture_pin is not None else preview_pin, smart_tee.get_in())
# assuming the 1st output pin of the smart tee filter is always the capture one
capture_pin, preview_pin = smart_tee.out_pins
return preview_pin, capture_pin
def prepare_recording_graph(self):
# in theory we could use self.capture_builder.RenderStream,
# but it is not working when including the video compressor :-(
assert FilterType.video_input in self.filters
assert FilterType.render in self.filters
assert FilterType.muxer in self.filters
preview_pin, capture_pin = self.__get_capture_and_preview_pins()
if self.recording_format == RecordingFormat.ASF:
self.graph_builder.Connect(capture_pin,
self.filters[FilterType.muxer].get_in(1))
self.graph_builder.Connect(self.filters[FilterType.audio_input].get_out(),
self.filters[FilterType.muxer].get_in(0))
self.graph_builder.Connect(preview_pin, self.filters[FilterType.render].get_in())
else:
self.graph_builder.Connect(capture_pin, self.filters[FilterType.video_compressor].get_in())
self.graph_builder.Connect(self.filters[FilterType.video_compressor].get_out(),
self.filters[FilterType.muxer].get_in())
self.graph_builder.Connect(preview_pin, self.filters[FilterType.render].get_in())
if FilterType.audio_input in self.filters:
self.graph_builder.Connect(self.filters[FilterType.audio_input].get_out(),
self.filters[FilterType.audio_compressor].get_in())
self.filters[FilterType.muxer].reload_pins()
# when you connect an input pin of the muxer, an additional input pin is added
self.graph_builder.Connect(self.filters[FilterType.audio_compressor].get_out(),
self.filters[FilterType.muxer].get_in(1))
self.is_recording = True
def configure_render(self, handle):
self.filters[FilterType.render].configure_video_window(handle)
def update_window(self, width, height):
if FilterType.render in self.filters:
img_w, img_h = self.filters[FilterType.video_input].get_current_format()
scale_w = width / img_w
scale_h = height / img_h
scale = min(scale_w, scale_h, 1)
self.filters[FilterType.render].set_window_position(0, 0, int(img_w * scale), int(img_h * scale))
def run(self):
self.media_control.Run()
def stop(self):
if self.media_control is not None:
# calling stop without calling prepare
self.media_control.Stop()
# if self.video_window is not None:
# self.video_window.put_Visible(False)
# self.video_window.put_Owner(0)
def pause(self):
self.media_control.Pause()
def get_state(self):
return StateGraph(self.media_control.GetState(0xFFFFFFFF)) # 0xFFFFFFFF = infinite timeout
def get_input_devices(self):
return self.system_device_enum.get_available_filters(DeviceCategories.VideoInputDevice)
def get_audio_devices(self):
return self.system_device_enum.get_available_filters(DeviceCategories.AudioInputDevice)
def get_video_compressors(self):
return self.system_device_enum.get_available_filters(DeviceCategories.VideoCompressor)
def get_audio_compressors(self):
return self.system_device_enum.get_available_filters(DeviceCategories.AudioCompressor)
def get_asf_profiles(self):
return self.wm_profile_manager.profiles_names
def grab_frame(self):
if FilterType.sample_grabber in self.filters:
self.filters[FilterType.sample_grabber].callback.grab_frame()
return True
else:
return False
def get_input_device(self):
return self.filters[FilterType.video_input]
def remove_filters(self):
enum_filters = self.filter_graph.EnumFilters()
filt, count = enum_filters.Next(1)
| |
with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-minLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-minLength-1-3.xml",
class_name="NistschemaSvIvListNmtokenMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_min_length_nistxml_sv_iv_list_nmtoken_min_length_1_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-minLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-minLength-1-4.xml",
class_name="NistschemaSvIvListNmtokenMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_min_length_nistxml_sv_iv_list_nmtoken_min_length_1_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-minLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-minLength-1-5.xml",
class_name="NistschemaSvIvListNmtokenMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_4_nistxml_sv_iv_list_nmtoken_max_length_5_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-5.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-5-1.xml",
class_name="NistschemaSvIvListNmtokenMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_4_nistxml_sv_iv_list_nmtoken_max_length_5_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-5.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-5-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_4_nistxml_sv_iv_list_nmtoken_max_length_5_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-5.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-5-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_4_nistxml_sv_iv_list_nmtoken_max_length_5_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-5.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-5-4.xml",
class_name="NistschemaSvIvListNmtokenMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_4_nistxml_sv_iv_list_nmtoken_max_length_5_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-5.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-5-5.xml",
class_name="NistschemaSvIvListNmtokenMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_3_nistxml_sv_iv_list_nmtoken_max_length_4_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-4.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-4-1.xml",
class_name="NistschemaSvIvListNmtokenMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_3_nistxml_sv_iv_list_nmtoken_max_length_4_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-4.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-4-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_3_nistxml_sv_iv_list_nmtoken_max_length_4_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-4.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-4-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_3_nistxml_sv_iv_list_nmtoken_max_length_4_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-4.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-4-4.xml",
class_name="NistschemaSvIvListNmtokenMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_3_nistxml_sv_iv_list_nmtoken_max_length_4_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-4.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-4-5.xml",
class_name="NistschemaSvIvListNmtokenMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_2_nistxml_sv_iv_list_nmtoken_max_length_3_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-3.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-3-1.xml",
class_name="NistschemaSvIvListNmtokenMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_2_nistxml_sv_iv_list_nmtoken_max_length_3_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-3.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-3-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_2_nistxml_sv_iv_list_nmtoken_max_length_3_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-3.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-3-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_2_nistxml_sv_iv_list_nmtoken_max_length_3_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-3.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-3-4.xml",
class_name="NistschemaSvIvListNmtokenMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_2_nistxml_sv_iv_list_nmtoken_max_length_3_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-3.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-3-5.xml",
class_name="NistschemaSvIvListNmtokenMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-1.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-4.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_1_nistxml_sv_iv_list_nmtoken_max_length_2_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-2.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-2-5.xml",
class_name="NistschemaSvIvListNmtokenMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-1.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-3.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-4.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_5(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-5.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_white_space_nistxml_sv_iv_list_name_white_space_1_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-whiteSpace-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-whiteSpace-1-1.xml",
class_name="NistschemaSvIvListNameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_white_space_nistxml_sv_iv_list_name_white_space_1_2(mode, save_output, output_format):
"""
Type list/Name is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-whiteSpace-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-whiteSpace-1-2.xml",
class_name="NistschemaSvIvListNameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_white_space_nistxml_sv_iv_list_name_white_space_1_3(mode, save_output, output_format):
"""
Type list/Name is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-whiteSpace-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-whiteSpace-1-3.xml",
class_name="NistschemaSvIvListNameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_white_space_nistxml_sv_iv_list_name_white_space_1_4(mode, save_output, output_format):
"""
Type list/Name is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-whiteSpace-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-whiteSpace-1-4.xml",
class_name="NistschemaSvIvListNameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_white_space_nistxml_sv_iv_list_name_white_space_1_5(mode, save_output, output_format):
"""
Type list/Name is restricted by facet whiteSpace with value collapse.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-whiteSpace-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-whiteSpace-1-5.xml",
class_name="NistschemaSvIvListNameWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_4_nistxml_sv_iv_list_name_enumeration_5_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-5.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-5-1.xml",
class_name="NistschemaSvIvListNameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_4_nistxml_sv_iv_list_name_enumeration_5_2(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-5.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-5-2.xml",
class_name="NistschemaSvIvListNameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_4_nistxml_sv_iv_list_name_enumeration_5_3(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-5.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-5-3.xml",
class_name="NistschemaSvIvListNameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_4_nistxml_sv_iv_list_name_enumeration_5_4(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-5.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-5-4.xml",
class_name="NistschemaSvIvListNameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_4_nistxml_sv_iv_list_name_enumeration_5_5(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-5.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-5-5.xml",
class_name="NistschemaSvIvListNameEnumeration5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_3_nistxml_sv_iv_list_name_enumeration_4_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-4.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-4-1.xml",
class_name="NistschemaSvIvListNameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_3_nistxml_sv_iv_list_name_enumeration_4_2(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-4.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-4-2.xml",
class_name="NistschemaSvIvListNameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_3_nistxml_sv_iv_list_name_enumeration_4_3(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-4.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-4-3.xml",
class_name="NistschemaSvIvListNameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_3_nistxml_sv_iv_list_name_enumeration_4_4(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-4.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-4-4.xml",
class_name="NistschemaSvIvListNameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_3_nistxml_sv_iv_list_name_enumeration_4_5(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-4.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-4-5.xml",
class_name="NistschemaSvIvListNameEnumeration4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_2_nistxml_sv_iv_list_name_enumeration_3_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-3.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-3-1.xml",
class_name="NistschemaSvIvListNameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_2_nistxml_sv_iv_list_name_enumeration_3_2(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-3.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-3-2.xml",
class_name="NistschemaSvIvListNameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_2_nistxml_sv_iv_list_name_enumeration_3_3(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-3.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-3-3.xml",
class_name="NistschemaSvIvListNameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_2_nistxml_sv_iv_list_name_enumeration_3_4(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-3.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-3-4.xml",
class_name="NistschemaSvIvListNameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_2_nistxml_sv_iv_list_name_enumeration_3_5(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-3.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-3-5.xml",
class_name="NistschemaSvIvListNameEnumeration3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_1_nistxml_sv_iv_list_name_enumeration_2_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-2.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-2-1.xml",
class_name="NistschemaSvIvListNameEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_1_nistxml_sv_iv_list_name_enumeration_2_2(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-2.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-2-2.xml",
class_name="NistschemaSvIvListNameEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_1_nistxml_sv_iv_list_name_enumeration_2_3(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-2.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-2-3.xml",
class_name="NistschemaSvIvListNameEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_1_nistxml_sv_iv_list_name_enumeration_2_4(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-2.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-2-4.xml",
class_name="NistschemaSvIvListNameEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_1_nistxml_sv_iv_list_name_enumeration_2_5(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-2.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-2-5.xml",
class_name="NistschemaSvIvListNameEnumeration2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_nistxml_sv_iv_list_name_enumeration_1_1(mode, save_output, output_format):
"""
Type list/Name is restricted by facet enumeration.
"""
assert_bindings(
schema="nistData/list/Name/Schema+Instance/NISTSchema-SV-IV-list-Name-enumeration-1.xsd",
instance="nistData/list/Name/Schema+Instance/NISTXML-SV-IV-list-Name-enumeration-1-1.xml",
class_name="NistschemaSvIvListNameEnumeration1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_name_enumeration_nistxml_sv_iv_list_name_enumeration_1_2(mode, save_output, output_format):
"""
| |
>>> G.in_edges(2, 'all')
(tensor([0, 1]), tensor([2, 2]), tensor([1, 2]))
>>> G.in_edges(2, 'eid')
tensor([1, 2])
For multiple nodes:
>>> G.in_edges([1, 2])
(tensor([0, 0, 1]), tensor([1, 2, 2]))
>>> G.in_edges([1, 2], 'all')
(tensor([0, 0, 1]), tensor([1, 2, 2]), tensor([0, 1, 2]))
"""
v = utils.toindex(v)
src, dst, eid = self._graph.in_edges(v)
if form == 'all':
return (src.tousertensor(), dst.tousertensor(), eid.tousertensor())
elif form == 'uv':
return (src.tousertensor(), dst.tousertensor())
elif form == 'eid':
return eid.tousertensor()
else:
raise DGLError('Invalid form:', form)
def out_edges(self, v, form='uv'):
"""Return the outbound edges of the node(s).
Parameters
----------
v : int, list, tensor
The node(s).
form : str, optional
The return form. Currently support:
- 'all' : a tuple (u, v, eid)
- 'uv' : a pair (u, v), default
- 'eid' : one eid tensor
Returns
-------
A tuple of Tensors `(eu, ev, eid)` if `form == 'all'`.
`eid[i]` is the ID of an outbound edge from `eu[i]` from `ev[i]`.
All outbound edges from `v` are returned.
A pair of Tensors (eu, ev) if form == 'uv'
`ev[i]` is the destination node of an outbound edge from `eu[i]`.
All outbound edges from `v` are returned.
One Tensor if form == 'eid'
`eid[i]` is ID of an outbound edge from any of the nodes in `v`.
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
For a single node:
>>> G.out_edges(0)
(tensor([0, 0]), tensor([1, 2]))
>>> G.out_edges(0, 'all')
(tensor([0, 0]), tensor([1, 2]), tensor([0, 1]))
>>> G.out_edges(0, 'eid')
tensor([0, 1])
For multiple nodes:
>>> G.out_edges([0, 1])
(tensor([0, 0, 1]), tensor([1, 2, 2]))
>>> G.out_edges([0, 1], 'all')
(tensor([0, 0, 1]), tensor([1, 2, 2]), tensor([0, 1, 2]))
"""
v = utils.toindex(v)
src, dst, eid = self._graph.out_edges(v)
if form == 'all':
return (src.tousertensor(), dst.tousertensor(), eid.tousertensor())
elif form == 'uv':
return (src.tousertensor(), dst.tousertensor())
elif form == 'eid':
return eid.tousertensor()
else:
raise DGLError('Invalid form:', form)
def all_edges(self, form='uv', sorted=False):
"""Return all the edges.
Parameters
----------
form : str, optional
The return form. Currently support:
- 'all' : a tuple (u, v, eid)
- 'uv' : a pair (u, v), default
- 'eid' : one eid tensor
sorted : bool
True if the returned edges are sorted by their src and dst ids.
Returns
-------
A tuple of Tensors (u, v, eid) if form == 'all'
`eid[i]` is the ID of an edge between `u[i]` and `v[i]`.
All edges are returned.
A pair of Tensors (u, v) if form == 'uv'
An edge exists between `u[i]` and `v[i]`.
If `n` edges exist between `u` and `v`, then `u` and `v` as a pair
will appear `n` times.
One Tensor if form == 'eid'
`eid[i]` is the ID of an edge in the graph.
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
>>> G.all_edges()
(tensor([0, 0, 1]), tensor([1, 2, 2]))
>>> G.all_edges('all')
(tensor([0, 0, 1]), tensor([1, 2, 2]), tensor([0, 1, 2]))
"""
src, dst, eid = self._graph.edges(sorted)
if form == 'all':
return (src.tousertensor(), dst.tousertensor(), eid.tousertensor())
elif form == 'uv':
return (src.tousertensor(), dst.tousertensor())
elif form == 'eid':
return eid.tousertensor()
else:
raise DGLError('Invalid form:', form)
def in_degree(self, v):
"""Return the in-degree of node `v`.
Parameters
----------
v : int
The node ID.
Returns
-------
int
The in-degree.
Examples
--------
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
>>> G.in_degree(2)
2
See Also
--------
in_degrees
"""
return self._graph.in_degree(v)
def in_degrees(self, v=ALL):
"""Return the array `d` of in-degrees of the node array `v`.
`d[i]` is the in-degree of node `v[i]`.
Parameters
----------
v : list, tensor, optional.
The node ID array. Default is to return the degrees of all the nodes.
Returns
-------
d : tensor
The in-degree array.
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
>>> G.in_degrees([1, 2])
tensor([1, 2])
See Also
--------
in_degree
"""
if is_all(v):
v = utils.toindex(slice(0, self.number_of_nodes()))
else:
v = utils.toindex(v)
return self._graph.in_degrees(v).tousertensor()
def out_degree(self, v):
"""Return the out-degree of node `v`.
Parameters
----------
v : int
The node ID.
Returns
-------
int
The out-degree.
Examples
--------
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
>>> G.out_degree(0)
2
See Also
--------
out_degrees
"""
return self._graph.out_degree(v)
def out_degrees(self, v=ALL):
"""Return the array `d` of out-degrees of the node array `v`.
`d[i]` is the out-degree of node `v[i]`.
Parameters
----------
v : list, tensor
The node ID array. Default is to return the degrees of all the nodes.
Returns
-------
d : tensor
The out-degree array.
Examples
--------
The following example uses PyTorch backend.
>>> G = dgl.DGLGraph()
>>> G.add_nodes(3)
>>> G.add_edges([0, 0, 1], [1, 2, 2]) # (0, 1), (0, 2), (1, 2)
>>> G.out_degrees([0, 1])
tensor([2, 1])
See Also
--------
out_degree
"""
if is_all(v):
v = utils.toindex(slice(0, self.number_of_nodes()))
else:
v = utils.toindex(v)
return self._graph.out_degrees(v).tousertensor()
def to_networkx(self, node_attrs=None, edge_attrs=None):
"""Convert to networkx graph.
The edge id will be saved as the 'id' edge attribute.
Parameters
----------
node_attrs : iterable of str, optional
The node attributes to be copied.
edge_attrs : iterable of str, optional
The edge attributes to be copied.
Returns
-------
networkx.DiGraph
The nx graph
"""
nx_graph = self._graph.to_networkx()
#TODO(minjie): attributes
dgl_warning('to_networkx currently does not support converting'
' node/edge features automatically.')
return nx_graph
def from_networkx(self, nx_graph, node_attrs=None, edge_attrs=None):
"""Convert from networkx graph.
If 'id' edge attribute exists, the edge will be added follows
the edge id order. Otherwise, order is undefined.
Parameters
----------
nx_graph : networkx.DiGraph
The nx graph
node_attrs : iterable of str, optional
The node attributes needs to be copied.
edge_attrs : iterable of str, optional
The edge attributes needs to be copied.
"""
self.clear()
self._graph.from_networkx(nx_graph)
self._node_frame.add_rows(self.number_of_nodes())
self._edge_frame.add_rows(self.number_of_edges())
self._msg_graph.add_nodes(self._graph.number_of_nodes())
# copy attributes
def _batcher(lst):
if F.is_tensor(lst[0]):
return F.cat([F.unsqueeze(x, 0) for x in lst], dim=0)
else:
return F.tensor(lst)
if node_attrs is not None:
attr_dict = {attr : [] for attr in node_attrs}
for nid in range(self.number_of_nodes()):
for attr in node_attrs:
attr_dict[attr].append(nx_graph.nodes[nid][attr])
for attr in node_attrs:
self._node_frame[attr] = _batcher(attr_dict[attr])
if edge_attrs is not None:
attr_dict = {attr : [] for attr in edge_attrs}
src, dst, _ = self._graph.edges()
for u, v in zip(src.tolist(), dst.tolist()):
for attr in edge_attrs:
attr_dict[attr].append(nx_graph.edges[u, v][attr])
for attr in edge_attrs:
self._edge_frame[attr] = _batcher(attr_dict[attr])
def from_scipy_sparse_matrix(self, a):
""" Convert from scipy sparse matrix.
Parameters
----------
a : scipy sparse matrix
The graph's adjacency matrix
"""
self.clear()
self._graph.from_scipy_sparse_matrix(a)
self._node_frame.add_rows(self.number_of_nodes())
self._edge_frame.add_rows(self.number_of_edges())
self._msg_graph.add_nodes(self._graph.number_of_nodes())
def node_attr_schemes(self):
"""Return the node feature schemes.
Returns
-------
dict of str to schemes
The schemes of node feature columns.
"""
return self._node_frame.schemes
def edge_attr_schemes(self):
"""Return the edge feature schemes.
Returns
-------
dict of str to schemes
The schemes of edge feature columns.
"""
return self._edge_frame.schemes
def set_n_initializer(self, initializer, field=None):
"""Set the initializer for empty node features.
Initializer is a callable that returns a tensor given the shape, data type
and device context.
Parameters
----------
initializer : callable
The initializer.
field : str, optional
The feature field name. Default is set an initializer for all the
feature fields.
See Also
--------
dgl.init.base_initializer
"""
self._node_frame.set_initializer(initializer, field)
def set_e_initializer(self, initializer, field=None):
"""Set the initializer for empty edge features.
Initializer is a callable that returns a tensor given the shape, data type
and device context.
Parameters
----------
initializer : callable
The initializer.
field : str, optional
The feature field name. Default is set an initializer for all the
feature fields.
See Also
--------
dgl.init.base_initializer
"""
self._edge_frame.set_initializer(initializer, field)
@property
def nodes(self):
"""Return a node view that can used to set/get feature data."""
return NodeView(self)
@property
def ndata(self):
"""Return the data view of all the nodes."""
| |
"""ACIL_GetImage is a module developed for the internal use of the Applied Chest Imaging Laboratory to download
cases stored in MAD server via ssh.
It works both in Unix/Mac/Windows, and it uses an internal SSH key created specifically for this purpose, so it
doesn't need that the user has an authorized SSH key installed.
First version: <NAME> (ACIL, <EMAIL>). Sept 2014"""
import os, sys
from __main__ import vtk, qt, ctk, slicer
from collections import OrderedDict
import subprocess
# Add the CIP common library to the path if it has not been loaded yet
try:
from CIP.logic.SlicerUtil import SlicerUtil
except Exception as ex:
currentpath = os.path.dirname(os.path.realpath(__file__))
# We assume that CIP_Common is in the development structure
path = os.path.normpath(currentpath + '/../../Scripted/CIP_Common')
if not os.path.exists(path):
# We assume that CIP is a subfolder (Slicer behaviour)
path = os.path.normpath(currentpath + '/CIP')
sys.path.append(path)
print(("The following path was manually added to the PythonPath in CIP_GetImage: " + path))
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
import CIP.ui as CIPUI
class CIP_GetImage:
"""Load cases from a SSH server or other device"""
def __init__(self, parent):
"""Constructor for main class"""
self.parent = parent
#ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "CIP GetImage"
self.parent.categories = ["Chest Imaging Platform.Modules"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME>", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = "This is an internal module to load images from MAD repository via SSH"
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
class CIP_GetImageWidget:
"""Visual object"""
# Study ids. Convention: Descriptive text (key) / Name of the folder in the server
studyIds = OrderedDict()
studyIds["Study 1"] = "Study1"
studyIds["Study 2"] = "Study2"
studyIds["Other"] = "Other"
# Image types. You can add as many as different volume types you have
# Convention:
# Descriptive text (key)
# Files extension (example: "processed").
imageTypes = OrderedDict()
imageTypes["CT"] = "" # Default. No extension
imageTypes["CT Processed"] = "processed" # Default. No extension
# Label maps types. Idem
# Convention:
# Descriptive text (key)
# Checked by default
# Files extension (example: case_partialLungLabelMap.nrrd)
labelMapTypes = OrderedDict()
labelMapTypes["Partial Lung"] = (False, "_partialLungLabelMap")
labelMapTypes["Body Composition"] = (False, "_bodyComposition")
labelMapTypes["Body Composition (interactive)"] = (False, "_interactiveBodyComposition")
def __init__(self, parent = None):
"""Widget constructor (existing module)"""
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
"""Init the widget """
self.modulePath = SlicerUtil.getModuleFolder("CIP_GetImage")
self.resourcesPath = os.path.join(self.modulePath, "CIP_GetImage_Resources")
self.StudyId = ""
self.logic = CIP_GetImageLogic(self.modulePath)
# Widget to load cases faster
self.loadSaveDatabuttonsWidget = CIPUI.LoadSaveDataWidget(parentWidget=self.parent)
self.loadSaveDatabuttonsWidget.setup(moduleName="CIP_GetImage")
#
# Obligatory parameters area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Image data"
self.layout.addWidget(parametersCollapsibleButton)
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
# Study radio buttons
label = qt.QLabel()
label.text = "Select the study:"
parametersFormLayout.addRow(label)
self.rbgStudy=qt.QButtonGroup()
for key in self.studyIds:
rbStudyid = qt.QRadioButton(key)
self.rbgStudy.addButton(rbStudyid)
parametersFormLayout.addWidget(rbStudyid)
self.txtOtherStudy = qt.QLineEdit()
self.txtOtherStudy.hide()
parametersFormLayout.addWidget(self.txtOtherStudy)
# Case id
self.txtCaseId = qt.QLineEdit()
parametersFormLayout.addRow("Case ID ", self.txtCaseId)
# Image types
label = qt.QLabel()
label.text = "Select the images that you want to load:"
parametersFormLayout.addRow(label)
self.cbsImageTypes = []
for key in self.imageTypes:
check = qt.QCheckBox()
check.checked = True
check.setText(key)
parametersFormLayout.addWidget(check)
self.cbsImageTypes.append(check)
# Label maps
label = qt.QLabel()
label.text = "Select the label maps that you want to load:"
parametersFormLayout.addRow(label)
# Labelmap types checkboxes
self.cbsLabelMapTypes = []
for key in self.labelMapTypes:
check = qt.QCheckBox()
check.setText(key)
check.checked = self.labelMapTypes[key][0]
parametersFormLayout.addWidget(check)
self.cbsLabelMapTypes.append(check)
# Load image Button
self.downloadButton = qt.QPushButton("Download")
self.downloadButton.toolTip = "Load the image"
#self.downloadButton.enabled = False
self.downloadButton.setStyleSheet("background-color: green; font-weight:bold; color:white" )
parametersFormLayout.addRow(self.downloadButton)
self.downloadButton.connect('clicked (bool)', self.onDownloadButton)
# Information message
self.lblDownloading = qt.QLabel()
self.lblDownloading.text = "Downloading images. Please wait..."
self.lblDownloading.hide()
parametersFormLayout.addRow(self.lblDownloading)
#
# Optional Parameters
#
optionalParametersCollapsibleButton = ctk.ctkCollapsibleButton()
optionalParametersCollapsibleButton.text = "Optional parameters"
self.layout.addWidget(optionalParametersCollapsibleButton)
optionalParametersFormLayout = qt.QFormLayout(optionalParametersCollapsibleButton)
# Local storage (Slicer temporary path)
self.localStoragePath = "{0}/CIP".format(slicer.app.temporaryPath)
if not os.path.exists(self.localStoragePath):
os.makedirs(self.localStoragePath)
# Make sure that everybody has write permissions (sometimes there are problems because of umask)
os.chmod(self.localStoragePath, 0o777)
self.storagePathButton = ctk.ctkDirectoryButton()
self.storagePathButton.directory = self.localStoragePath
optionalParametersFormLayout.addRow("Local directory: ", self.storagePathButton)
# Connection type (SSH, "normal")
label = qt.QLabel()
label.text = "Connection type:"
optionalParametersFormLayout.addRow(label)
self.rbgConnectionType=qt.QButtonGroup()
self.rbSSH = qt.QRadioButton("SSH (secure connection)")
self.rbSSH.setChecked(True)
self.rbgConnectionType.addButton(self.rbSSH)
optionalParametersFormLayout.addWidget(self.rbSSH)
self.rbCP = qt.QRadioButton("Common")
self.rbgConnectionType.addButton(self.rbCP)
optionalParametersFormLayout.addWidget(self.rbCP)
# SSH Server login
self.txtServer = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "server", "This is your ssh user and server. Example: myuser@192.168.1.1")
self.txtServer.text = s # This is your ssh user and server. Example: my<EMAIL>@192.168.1.1"
optionalParametersFormLayout.addRow("Server:", self.txtServer)
# Server root path
self.txtServerpath = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "serverRootPath", "This is your root path to search for files. Ex: /Cases/Processed")
self.txtServerpath.text = s # This is your root path to search for files. Ex: /Cases/Processed
optionalParametersFormLayout.addRow("Server root path:", self.txtServerpath)
# SSH Private key
self.txtPrivateKeySSH = qt.QLineEdit()
s = SlicerUtil.settingGetOrSetDefault("CIP_GetImage", "sshKey", "")
self.txtPrivateKeySSH.text = s # this is the full path to your ssh key if you need it. Be aware of Unix/Windows comaptibility (hint: use os.path.join)
# Please notice that you won't need a SSH key if your computer already has one locally installed"
optionalParametersFormLayout.addRow("SSH private key (leave blank for computer's default): ", self.txtPrivateKeySSH)
# Cache mode
self.cbCacheMode = qt.QCheckBox("Cache mode activated")
self.cbCacheMode.setChecked(True) # Cache mode is activated by default
optionalParametersFormLayout.addRow("", self.cbCacheMode)
# Clean cache Button
self.cleanCacheButton = qt.QPushButton("Clean cache")
self.cleanCacheButton.toolTip = "Remove all the local cached files"
optionalParametersFormLayout.addRow(self.cleanCacheButton)
optionalParametersCollapsibleButton.collapsed = True
if SlicerUtil.IsDevelopment:
# reload button
self.reloadButton = qt.QPushButton("Reload (just development)")
self.reloadButton.toolTip = "Reload this module (for development purposes)."
self.reloadButton.name = "Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# Add vertical spacer
self.layout.addStretch(1)
# Connections
self.rbgStudy.connect("buttonClicked (QAbstractButton*)", self.onRbStudyClicked)
self.txtOtherStudy.connect("textEdited (QString)", self.onTxtOtherStudyEdited)
self.rbgConnectionType.connect("buttonClicked (QAbstractButton*)", self.onRbgConnectionType)
self.storagePathButton.connect("directorySelected(QString)", self.onTmpDirChanged)
self.cleanCacheButton.connect('clicked (bool)', self.onCleanCacheButtonClicked)
def saveSettings(self):
"""Save the current values in settings to reuse it in future sessions"""
SlicerUtil.setSetting("CIP_GetImage", "sshKey", self.txtPrivateKeySSH.text)
SlicerUtil.setSetting("CIP_GetImage", "server", self.txtServer.text)
SlicerUtil.setSetting("CIP_GetImage", "serverRootPath", self.txtServerpath.text)
def cleanup(self):
self.saveSettings()
#
# Events handling
#
def onDownloadButton(self):
"""Click in download button"""
# Check if there is a Study and Case introduced
self.CaseId = self.txtCaseId.text.strip()
if self.CaseId and self.StudyId:
self.lblDownloading.show()
slicer.app.processEvents()
# Get the selected image types and label maps
imageTypes = [self.imageTypes[cb.text] for cb in [check for check in self.cbsImageTypes if check.isChecked()]]
labelMapExtensions = [self.labelMapTypes[cb.text] for cb in [check for check in self.cbsLabelMapTypes if check.isChecked()]]
result = self.logic.loadCase(self.txtServer.text, self.txtServerpath.text, self.StudyId, self.txtCaseId.text, imageTypes, labelMapExtensions, self.localStoragePath, self.cbCacheMode.checkState(), self.rbSSH.isChecked(), self.txtPrivateKeySSH.text)
self.lblDownloading.hide()
if (result == Util.ERROR):
self.msgBox = qt.QMessageBox(qt.QMessageBox.Warning, 'Error', "There was an error when downloading some of the images of this case. It is possible that some of the selected images where not available in the server. Please review the log console for more details.\nSuggested actions:\n-Empty cache\n-Restart Slicer")
self.msgBox.show()
else:
# Show info messsage
self.msgBox = qt.QMessageBox(qt.QMessageBox.Information, 'Attention', "Please make sure that you have selected a study and a case")
self.msgBox.show()
def onRbStudyClicked(self, button):
"""Study radio buttons clicked (any of them)"""
self.StudyId = self.studyIds[button.text]
self.txtOtherStudy.visible = (button.text == "Other")
if (self.txtOtherStudy.visible):
self.StudyId = self.txtOtherStudy.text.strip()
#self.checkDownloadButtonEnabled()
def onRbgConnectionType(self, button):
self.txtServer.enabled = self.txtPrivateKeySSH.enabled = self.rbSSH.isChecked()
#self.txtPrivateKeySSH.enabled = self.rbSSH.checked
def onTxtOtherStudyEdited(self, text):
"""Any letter typed in "Other study" text box """
self.StudyId = text
#self.checkDownloadButtonEnabled()
def onCleanCacheButtonClicked(self):
"""Clean cache button clicked. Remove all the files in the current local storage path directory"""
import shutil
# Remove directory
shutil.rmtree(self.localStoragePath, ignore_errors=True)
# Recreate it (this is a safe method for symbolic links)
os.makedirs(self.localStoragePath)
# Make sure that everybody has write permissions (sometimes there are problems because of umask)
os.chmod(self.localStoragePath, 0o777)
def onTmpDirChanged(self, d):
print(("Temp dir changed. New dir: " + d))
self.localStoragePath = d
def onReload(self, moduleName="CIP_GetImage"):
"""Reload the module. Just for development purposes. This is a combination of the old and new style in modules writing"""
try:
slicer.util.reloadScriptedModule(moduleName)
except:
#Generic reload method for any scripted | |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 19:28:45 2018
@author: xingrongtech
"""
from quantities.quantity import Quantity
from .num import Num
from .const import Const
from .numitem import NumItem
from .lsym import LSym
from .system.unit_open import openUnit, closeUnit
from .system.format_units import format_units_unicode, format_units_latex
from .system.exceptions import itemNotSameLengthException, itemNotSameTypeException, itemNotSameKeysException, expressionInvalidException
class LSymItem():
'''LSymItem为LaTeX符号组类,该类能够按组批量计算并生成代数式、数值式的LaTeX表达式。'''
sepSymCalc = False #静态属性,控制是否分离代数表达式和计算表达式
__lsyms = []
__sepSym = None
__index = 0
__q = 1
def __init__(self, sym, sNumItem, unit=None, subs=None):
'''初始化一个LSymItem符号组
【参数说明】
1.sym(str):符号组的符号。
2.sNumItem:符号组对应的数值数组。sNumItem可以是以下数据类型:
(1)NumItem:直接给出符号组对应的NumItem数组。
(2)str:对于还没有转换成Num的数值,将数值以空格隔开,表示成字符串表达式,以此生成符号组对应的NumItem数组。
(3)list<Num>:对于已经转换成Num的数值,将数值用list表示出,以此生成符号组对应的NumItem数组。
3.unit(可选,str):单位。当unit为None时,会选择list<Num>中第1个元素的unit,或者NumItem的unit作为LSymItem的unit,否则没有单位。默认unit=None。
4.subs(可选,str):符号组中每个符号的下标,以空格隔开。在LSymItem.sepSymCalc为False的前提下,当subs为None时,会按照0、1、2...给每个符号索引,1、2、3...给每个符号编号;给出subs时,按照subs中给出的编号给每个符号索引和编号。默认subs=None。'''
if sym == None and sNumItem == None:
return
if unit != None:
self.__q = Quantity(1., unit) if type(unit) == str else unit
if type(sNumItem) == str:
try:
sNumItem = NumItem(sNumItem)
except:
raise expressionInvalidException('用于创建符号组的数组部分的参数无效')
elif type(sNumItem) == list and type(sNumItem[0]) == Num:
if unit == None:
self.__q = sNumItem[0]._Num__q
elif type(sNumItem) == NumItem:
if unit == None:
self.__q = sNumItem._NumItem__arr[0]._Num__q
else:
raise expressionInvalidException('用于创建符号组的参数无效')
if unit != None:
for ni in sNumItem:
ni._Num__q = self.__q
if LSymItem.sepSymCalc: #将代数表达式与数值表达式分离
if subs == None: #未给出下标时,lsyms为list
self.__lsyms = [LSym(None, ni) for ni in sNumItem]
else: #给出下标时,lsyms为dict
subs = subs.split(' ')
if len(sNumItem) != len(subs):
raise itemNotSameLengthException('给出subs时,sNumItem和subs必须为等长列表')
self.__lsyms = {}
for i in range(len(sNumItem)):
self.__lsyms[subs[i]] = LSym(None, sNumItem[i])
self.__sepSym = LSym(sym, None)
else:
if subs == None: #未给出下标时,lsyms为list
self.__lsyms = [LSym('{' + sym + '}', ni) for ni in sNumItem]
for i in range(len(sNumItem)):
self.__lsyms[i]._LSym__symText += '_{' + str(i+1) + '}'
else: #给出下标时,lsyms为dict
subs = subs.split(' ')
if len(sNumItem) != len(subs):
raise itemNotSameLengthException('给出subs时,sNumItem和subs必须为等长列表')
self.__lsyms = {}
for i in range(len(sNumItem)):
self.__lsyms[subs[i]] = LSym('{%s}_{%s}' % (sym, subs[i]), sNumItem._NumItem__arr[i])
def refreshSym(self, sym):
'''更新符号
调用此方法后,原本的符号表达式将会被更新成新的符号表达式,原本的计算表达式将会被更新为当前LaTeX符号组中每个符号的数值,即LaTeX符号组被以新的符号和数值初始化。
【参数说明】
sym(str):要更新成的符号。
'''
##############################################
def lsymSetCal(li):
'''设置一个LSym的与计算计算有关的参量'''
if type(li._LSym__sNum) == int or type(li._LSym__sNum) == float:
li._LSym__calText = '%g' % li._LSym__sNum
elif str(type(li._LSym__sNum)) == "<class 'analyticlab.num.Num'>":
if li._LSym__sNum._Num__sciDigit() != 0:
li._LSym__calPrior = 2
li._LSym__calText = '{' + li._LSym__sNum.dlatex() + '}'
if li._LSym__sNum != None: #如果是原始符号,则需要考虑是否因为负数或科学记数法而需要改变prior的情形
if li._LSym__sNum < 0: #负数prior为0
li._LSym__calPrior = 0
elif str(type(li._LSym__sNum)) == "<class 'analyticlab.num.Num'>" and li._LSym__sNum._Num__sciDigit() != 0: #科学记数法prior为2
li._LSym__calPrior = 2
else:
li._LSym__calPrior = 6
else:
li._LSym__calPrior = 6
##############################################
if LSymItem.sepSymCalc: #代数表达式与数值表达式分离时,更新sepSym的symText和lsyms的calText
self.__sepSym._LSym__symText = '{' + sym + '}'
if type(self.__lsyms) == list:
for li in self.__lsyms:
lsymSetCal(li)
else:
for ki in self.__lsyms.keys():
lsymSetCal(self.__lsyms[ki])
else: #代数表达式与数值表达式不分离时,更新lsyms的symText和calText
if type(self.__lsyms) == list:
for i in range(len(self.__lsyms)):
self.__lsyms[i]._LSym__symText = '{%s}_{%d}' % (sym, i+1)
lsymSetCal(self.__lsyms[i])
else:
for ki in self.__lsyms.keys():
self.__lsyms[ki]._LSym__symText = '{%s}_{%s}' % (sym, ki)
lsymSetCal(self.__lsyms[ki])
def __newInstance(self):
return LSymItem(None, None)
def __qUpdate(self):
if type(self.__lsyms) == list:
for li in self.__lsyms:
li._LSym__sNum._Num__q = self.__q
else:
for li in self.__lsyms.values():
li._LSym__sNum._Num__q = self.__q
def __sepSymCalc(self):
return LSymItem.sepSymCalc
def __getitem__(self, index):
if type(index) == int or type(index) == str:
return self.__lsyms[index]
elif type(index) == slice:
new = LSymItem(None, None)
new.__lsyms = self.__lsyms[index]
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym
new.__q = self.__q
return new
def __len__(self):
return len(self.__lsyms)
def __next__(self):
if self.__index >= len(self.__lsyms):
self.__index = 0
raise StopIteration
else:
result = self.__lsyms[self.__index]
self.__index += 1
return result
def __str__(self):
'''获得LaTeX符号组的描述'''
if LSymItem.sepSymCalc:
expr = '%s -> [%s]' % (self.__sepSym, ', '.join([li._LSym__calText for li in self.__lsyms]))
else:
expr = '['
if type(self.__lsyms) == list:
expr += ', '.join(['`%s`->`%s`' % (li._LSym__symText, li._LSym__calText) for li in self.__lsyms])
else:
expr += ', '.join(['%s: `%s`->`%s`' % (ki, self.__lsyms[ki]._LSym__symText, self.__lsyms[ki]._LSym__calText) for ki in self.__lsyms.keys()])
expr += ']'
unitExpr = format_units_unicode(self.__q)
if unitExpr != '':
expr += ' ' + unitExpr
return expr
def __repr__(self):
return self.__str__()
def _repr_latex_(self):
if LSymItem.sepSymCalc:
expr = r'%s \to \left[%s\right]' % (self.__sepSym, ','.join([li._LSym__calText for li in self.__lsyms]))
else:
expr = r'\left['
if type(self.__lsyms) == list:
expr += ','.join([r'%s \to %s' % (li._LSym__symText, li._LSym__calText) for li in self.__lsyms])
else:
expr += ','.join([r'%s: %s \to %s' % (ki, self.__lsyms[ki]._LSym__symText, self.__lsyms[ki]._LSym__calText) for ki in self.__lsyms.keys()])
expr += r'\right]'
return '$%s%s$' % (expr, format_units_latex(self.__q))
def getSepSym(self):
'''当代数表达式与数值表达式分离时,用于获得分离出来的LaTeX符号。
【返回值】
LSym:分离出来的LaTeX符号。
'''
return self.__sepSym
def resetUnit(self, unit=None):
'''重设LSymItem符号组中各符号对应数值的单位
【参数说明】
unit(可选,str):重设后的单位。默认unit=None,即没有单位。'''
if self.__sNum != None:
if unit == None:
self.__q = 1
else:
self.__q = Quantity(1., unit) if type(unit) == str else unit
self.__qUpdate()
def __abs__(self):
new = LSymItem(None, None)
if type(self.__lsyms) == list:
new.__lsyms = [ni.__abs__() for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__abs__() for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__abs__()
new.__q = self.__q
return new
def __neg__(self):
new = LSymItem(None, None)
if type(self.__lsyms) == list:
new.__lsyms = [ni.__neg__() for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__neg__() for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__neg__()
new.__q = self.__q
return new
def __add__(self, obj):
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__add__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {ki: self.__lsyms[ki].__neg__() for ki in self.__lsyms.keys()}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__add__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__add__(obj.__sepSym)
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__add__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__add__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__add__(obj)
new.__q = self.__q
return new
def __radd__(self, obj):
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__radd__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__radd__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__radd__(obj.__sepSym)
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__radd__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__radd__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__radd__(obj)
new.__q = self.__q
return new
def __sub__(self, obj):
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__sub__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__sub__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__sub__(obj.__sepSym)
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__sub__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__sub__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__sub__(obj)
new.__q = self.__q
return new
def __rsub__(self, obj):
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__rsub__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__rsub__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__rsub__(obj.__sepSym)
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__rsub__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__rsub__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__rsub__(obj)
new.__q = self.__q
return new
def __mul__(self, obj):
closeUnit()
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__mul__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__mul__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__mul__(obj.__sepSym)
new.__q = self.__q * obj.__q
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__mul__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__mul__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__mul__(obj)
if type(obj) == LSym:
new.__q = self.__q * obj._LSym__sNum._Num__q
elif type(obj) == Const:
new.__q = self.__q * obj._Const__q
else:
new.__q = self.__q
new.__qUpdate()
openUnit()
return new
def __rmul__(self, obj):
closeUnit()
new = LSymItem(None, None)
if type(obj) == LSymItem:
if len(self) != len(obj):
raise itemNotSameLengthException('进行符号组运算的两个符号组元素个数必须一致')
if type(self.__lsyms) != type(obj.__lsyms):
raise itemNotSameTypeException('进行符号组运算的两个符号组必须是同种类型')
if type(self.__lsyms) == list:
new.__lsyms = []
for i in range(len(self)):
new.__lsyms.append(self.__lsyms[i].__rmul__(obj.__lsyms[i]))
else:
try:
new.__lsyms = {}
for ki in self.__lsyms.keys():
new.__lsyms[ki] = self.__lsyms[ki].__rmul__(obj.__lsyms[ki])
except:
raise itemNotSameKeysException('进行符号组运算的两个符号组下标必须一致')
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__rmul__(obj.__sepSym)
new.__q = obj.__q * self.__q
else:
if type(self.__lsyms) == list:
new.__lsyms = [ni.__rmul__(obj) for ni in self.__lsyms]
else:
new.__lsyms = {ki: self.__lsyms[ki].__rmul__(obj) for ki in self.__lsyms.keys()}
if LSymItem.sepSymCalc:
new.__sepSym = self.__sepSym.__rmul__(obj)
| |
<filename>trainer/train_las_bmuf_otfaug.py
"""
LAS model/rescorer Training Script
"""
#system utils
import sys
import argparse
import importlib
import os
import os.path
import math
import numpy as np
#torch related
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch._six import inf
#supposed to be after kaldi import
from utils.logger import Logger
from kaldi.matrix import _matrix_ext, DoubleMatrix
from kaldi.util import io
from trainer.bmuf import BmufTrainer
cudnn.benchmark = True
MASTER_NODE = 0
class LASLossCompute():
"""
Utility Class used to calculate LAS model loss
Args:
dec_proj (nn.module): decoder projection layer
enc_proj (nn.module): encoder projection layer
dec_loss_scale (float): decoder loss scale
enc_loss_scale (float): encoder loss scale
padding_idx (int): padding index
"""
def __init__(self, dec_proj, enc_proj,
dec_loss_scale=1.0, enc_loss_scale=0.0,
padding_idx=-1):
super(LASLossCompute, self).__init__()
self.padding_idx = padding_idx
self.dec_loss_scale = dec_loss_scale
self.enc_loss_scale = enc_loss_scale
self.dec_proj = dec_proj
self.enc_proj = enc_proj
self.dec_criterion = nn.NLLLoss(ignore_index=padding_idx,
size_average=False)
self.enc_criterion = nn.CTCLoss()
#the variables need to be backward
#when self._shards() iterator ends
self.variables = []
def _compute_loss(self, output, target):
output = F.log_softmax(self.dec_proj(self._bottle(output)))
target = target.contiguous().view(-1)
loss = self.dec_criterion(output, target)
return loss
def _compute_ctc_loss(self, output, lens, target):
target = target.view(target.size(0), -1).transpose(0, 1)
#prepare label and label_size for ctc loss
#exclude padding_idx, SOS = 0 and EOS = 1
mask = torch.lt(target, self.padding_idx)
mask = mask * torch.gt(target, 1)
label = target[mask].cpu().int()
mask = mask.cpu().int()
label_size = torch.sum(mask.data, 1)
enc_loss = self.enc_criterion(output, label, \
Variable(torch.from_numpy(lens)), \
Variable(label_size))
return enc_loss
def monolithic_compute_loss(self, output, enc_output,
enc_lens, target):
"""
Compute the loss monolithically, not dividing into shards.
"""
dec_loss = 0.0
enc_loss = 0.0
if self.dec_loss_scale > 0.0:
#exclude SOS for target
xent_loss = self.dec_loss_scale * self._compute_loss(output,
target[1:])
dec_loss += xent_loss.item()
if self.enc_loss_scale > 0.0:
l, b, _ = enc_output.size()
enc_pout = self.enc_proj(self._bottle(enc_output)).view(l, b, -1)
ctc_loss = self.enc_loss_scale * \
self._compute_ctc_loss(enc_pout, enc_lens, target)
enc_loss += ctc_loss.item()
return dec_loss, enc_loss
def sharded_compute_loss(self, output, enc_output, enc_lens, target):
"""
Compute the loss in shards for memory efficiency.
"""
dec_loss = 0.0
enc_loss = 0.0
#compute sharded xent loss for decoder
if self.dec_loss_scale > 0.0:
#exclude SOS for target
xent_loss = self.dec_loss_scale * self._compute_loss(output,
target[1:])
xent_loss.backward()
dec_loss += xent_loss.item()
#we can not really do sharded loss for the encoder
#as CTC is a sequential loss, after the last shard
#compute CTC loss monolithically, see self._shards
if self.enc_loss_scale > 0.0:
#perform encoder joint CTC training
l, b, _ = enc_output.size()
#enc_output detached from the graph
enc_output_detached = Variable(enc_output.data, requires_grad=True,
volatile=False)
enc_pout = self.enc_proj(self._bottle(enc_output_detached)).view(l, b, -1)
ctc_loss = self.enc_loss_scale * \
self._compute_ctc_loss(enc_pout,
enc_lens,
target)
ctc_loss.backward()
enc_loss += ctc_loss.item()
self.variables.append((enc_output, enc_output_detached.grad.data))
return dec_loss, enc_loss
def _bottle(self, v):
return v.view(-1, v.size(2))
###run one epoch of training/validation###
def run_one_epoch(epoch, model, log_f,
args, bmuf_trainer, training):
"""
Run one epoch of training
Args:
epoch (int): zero based epoch index
model (torch.nn.module): model
log_f (file): logging file
args : arguments from outer
bmuf_trainer : initialized bmuf_trainer
training (bool): training or validation
"""
log_f.write('===> Epoch {} <===\n'.format(epoch))
total_num_batches = args.num_epochs * args.num_batches_per_epoch
num_batches_processed = epoch * args.num_batches_per_epoch
lr = args.initial_lr * math.exp(num_batches_processed * \
math.log(args.final_lr /\
args.initial_lr) /\
total_num_batches)
log_f.write('===Using Learning Rate {}===\n'.format(lr))
optimizer = optim.SGD(model.parameters(), lr,
momentum=args.momentum,
nesterov=True)
dec_loss_logger = Logger(args.log, args.log_per_n_frames, ['DecLoss'])
#set a large value to prevent progressive logging
enc_loss_logger = Logger(args.log, 1e15, ['EncLoss'])
las_loss = LASLossCompute(model.dec_proj,
model.enc_proj,
args.dec_loss_scale,
args.enc_loss_scale,
padding_idx=args.padding_idx)
#for batchnorm and dropout
if training:
model.train()
if args.shared_encoder is not None:
args.shared_encoder.eval()
else:
model.eval()
optimizer = None
if args.sampling_decoder:
if epoch >= args.increase_sampling_prob_epoch:
args.sampling_prob = args.sampling_prob + 0.1
if args.sampling_prob > 0.4:
args.sampling_prob = 0.4
model.decoder.set_sampling_prob(args.sampling_prob)
#enable decoder when forward/backward
# used when doing encoder pretraining
enable_decoder = args.dec_loss_scale > 0.0
#enable encoder when forward/backward
# used when doing decoder pretraining
enable_encoder = not args.pretrain_decoder
for num_done, (data_cpu, target_cpu, len_cpu, _) in \
enumerate(args.dataloader(args.data_lst, args.rir,
args.noise, args)):
if training:
optimizer.zero_grad()
if data_cpu is not None:
data_batch = data_cpu.cuda(args.local_rank)
target_batch = target_cpu.long().cuda(args.local_rank)
if args.cmvn_stats:
#cmn#
data_batch -= data_batch.mean(dim=1).unsqueeze(dim=1)
data_batch += args.offset.unsqueeze(dim=0).unsqueeze(dim=0)
data_batch *= args.scale.unsqueeze(dim=0).unsqueeze(dim=0)
if args.shared_encoder is not None:
#shared encoder forward if any
with torch.no_grad():
data_batch = args.shared_encoder(data_batch)
#re-calculate lens due to shared encoder
len_batch = len_cpu
len_batch = len_batch - args.encoder_lctx - args.encoder_rctx
len_batch = len_batch // args.encoder_stride + \
torch.ne((len_batch%args.encoder_stride), 0).int()
#re-calculate lens due to model itself
len_batch = len_batch - args.model_lctx - args.model_rctx
len_batch = len_batch // args.model_stride + \
torch.ne((len_batch%args.model_stride), 0).int()
#B, T, C -> T, B, C
data_batch.transpose_(0, 1)
target_batch.transpose_(0, 1)
target_batch = torch.unsqueeze(target_batch, 2)
dec_state = None
# forward pass
outputs, _, dec_state, enc_outputs = \
model.forward(data_batch, target_batch,
len_batch,
dec_state, enable_decoder,
enable_encoder)
if training:
#note that backward is
#done inside function
dec_loss, enc_loss = las_loss.sharded_compute_loss(outputs,
enc_outputs,
len_batch,
target_batch)
if args.grad_clip > 0:
torch.nn.utils.clip_grad_norm(model.parameters(),
args.grad_clip,
norm_type=inf)
optimizer.step()
else: # empty batch
dec_loss, enc_loss = 0.0, 0.0
if training:
if num_done != 0 and num_done % args.sync_period == 0:
if not bmuf_trainer.update_and_sync():
return float('nan')
num_batches_processed = (epoch * args.num_batches_per_epoch\
+ num_done)
lr = args.initial_lr * math.exp(num_batches_processed * \
math.log(args.final_lr /\
args.initial_lr) /\
total_num_batches)
optimizer = optim.SGD(model.parameters(), lr,
momentum=args.momentum,
nesterov=True)
#exclude padding idx
if data_cpu is not None:
tokens = torch.numel(torch.nonzero(torch.lt(target_batch.data,
args.padding_idx)))
frames = len_batch.sum().item()
dec_loss_logger.update_and_log(tokens, [dec_loss])
enc_loss_logger.update_and_log(frames, [enc_loss])
if training:
if not bmuf_trainer.update_and_sync():
return float('nan')
tot_loss, tot_num = dec_loss_logger.summarize_and_log()
enc_loss_logger.summarize_and_log()
else:
tot_loss, tot_num = dec_loss_logger.summarize_and_log()
#aggregate across workers
loss_tensor = torch.FloatTensor([tot_loss, float(tot_num)])
loss_tensor = loss_tensor.cuda(args.local_rank)
bmuf_trainer.sum_reduce(loss_tensor)
bmuf_trainer.broadcast(loss_tensor)
reduced_loss = loss_tensor[0] / loss_tensor[1]
return reduced_loss.item()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LAS training')
parser.add_argument('nnet_proto', type=str,
help='pytorch NN proto definition filename')
parser.add_argument('data_lst', type=str,
help='list of mrk, seq, ali files for data')
parser.add_argument('log', type=str,
help='log file for the job')
parser.add_argument('output_dir', type=str,
help='path to save the final model')
parser.add_argument('--init_model', type=str, default=None,
help='initial model')
parser.add_argument('--shared_encoder_model', type=str, default=None,
help='initial model')
#Model Options
parser.add_argument('--encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use.""")
parser.add_argument('--decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help='Type of decoder layer to use.')
parser.add_argument('--layers', type=int, default=-1,
help='Number of layers in enc/dec.')
parser.add_argument('--enc_layers', type=int, default=2,
help='Number of layers in the encoder')
parser.add_argument('--dec_layers', type=int, default=2,
help='Number of layers in the decoder')
parser.add_argument('--rnn_size', type=int, default=512,
help='Size of LSTM hidden states')
parser.add_argument('--input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
parser.add_argument('--input_feed_multihead', type=int, default=0,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
parser.add_argument('--num_heads', type=int, default=0,
help=""" Number of heads for multihead attention""")
parser.add_argument('--rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
help="""The gate type to use in the RNNs""")
parser.add_argument('--downsampler_type', type=str, default='rnn',
choices=['rnn', 'cnn', 'dnn'],
help="""type of downsampler""")
parser.add_argument('--use_downsampler', action='store_true',
help='enable downampler between encoder and decoder')
parser.add_argument('--downsampler_layers', type=int, default=1,
help="""number of downsampler layers""")
parser.add_argument('--downsampler_rate', type=int, default=2,
help="""downsampling rate""")
parser.add_argument('--sampling_decoder', action='store_true',
help='enable sampling decoder')
parser.add_argument('--sampling_prob', type=float, default=0.0,
help='sampling probability when sampling from'
' previous decoder output')
parser.add_argument('--embd_dim', type=int, default=300,
help='embeddings dimension for decoder')
parser.add_argument('--input_dim', type=int, default=300,
help='input dimension of neural network')
parser.add_argument('--output_dim', type=int, default=8000,
help='output dimension of neural network')
parser.add_argument('--encoder_lctx', type=int, default=0,
help='shared encoder left context')
parser.add_argument('--encoder_rctx', type=int, default=0,
help='shared encoder right context')
parser.add_argument('--encoder_stride', type=int, default=1,
help='shared encoder stride, ie., '
'subsampling in the shared encoder')
parser.add_argument('--model_lctx', type=int, default=0,
help='model left context')
parser.add_argument('--model_rctx', type=int, default=0,
help='model right context')
parser.add_argument('--model_stride', type=int, default=1,
help='model stride, ie., '
'subsampling in the model')
parser.add_argument('--brnn', action="store_true",
help="Deprecated, use `encoder_type`.")
parser.add_argument('--brnn_merge', default='concat',
choices=['concat', 'sum'],
help="Merge action for the bidir hidden states")
parser.add_argument('--context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
parser.add_argument('--pretrain_decoder', action='store_true',
help='is current task decoder pretraing, ie LM training')
parser.add_argument('--cmn', action="store_true",
help="apply cepstrum mean normalizaiton per utterance")
parser.add_argument('--cmvn_stats', type=str, default=None,
help='cmvn_stats file')
# Attention options
parser.add_argument('--global_attention', type=str, default='mlp',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprot or general (Luong) or MLP (Bahdanau)""")
# Genenerator and loss options.
parser.add_argument('--copy_attn', action="store_true",
help='Train copy attention layer.')
parser.add_argument('--copy_attn_force', action="store_true",
help='When available, train to copy.')
parser.add_argument('--coverage_attn', action="store_true",
help='Train a coverage attention layer.')
parser.add_argument('--lambda_coverage', type=float, default=1,
help='Lambda value for coverage.')
parser.add_argument('--optim', type=str, default='sgd',
choices=['sgd', 'adam', 'adadelta'],
help="""optimizer to use """)
parser.add_argument('--grad_clip', type=float, default=-1.0,
help='gradient clipping threshold, valid when greater than zero')
parser.add_argument('--lr', type=float, default=1.0,
help='initial learning rate')
parser.add_argument('--initial_lr', type=float, default=1.0,
help='initial learning rate')
parser.add_argument('--final_lr', type=float, | |
are deleted. For more information, see Stopping an instance.
Args:
request: (ComputeInstancesStopRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
class LicensesService(base_api.BaseApiService):
"""Service class for the licenses resource."""
_NAME = u'licenses'
def __init__(self, client):
super(ComputeAlpha.LicensesService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.licenses.get',
ordered_params=[u'project', u'license'],
path_params=[u'license', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/licenses/{license}',
request_field='',
request_type_name=u'ComputeLicensesGetRequest',
response_type_name=u'License',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Returns the specified license resource.
Args:
request: (ComputeLicensesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(License) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
class MachineTypesService(base_api.BaseApiService):
"""Service class for the machineTypes resource."""
_NAME = u'machineTypes'
def __init__(self, client):
super(ComputeAlpha.MachineTypesService, self).__init__(client)
self._method_configs = {
'AggregatedList': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.machineTypes.aggregatedList',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/aggregated/machineTypes',
request_field='',
request_type_name=u'ComputeMachineTypesAggregatedListRequest',
response_type_name=u'MachineTypeAggregatedList',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.machineTypes.get',
ordered_params=[u'project', u'zone', u'machineType'],
path_params=[u'machineType', u'project', u'zone'],
query_params=[],
relative_path=u'projects/{project}/zones/{zone}/machineTypes/{machineType}',
request_field='',
request_type_name=u'ComputeMachineTypesGetRequest',
response_type_name=u'MachineType',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.machineTypes.list',
ordered_params=[u'project', u'zone'],
path_params=[u'project', u'zone'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/zones/{zone}/machineTypes',
request_field='',
request_type_name=u'ComputeMachineTypesListRequest',
response_type_name=u'MachineTypeList',
supports_download=False,
),
}
self._upload_configs = {
}
def AggregatedList(self, request, global_params=None):
"""Retrieves the list of machine type resources grouped by scope.
Args:
request: (ComputeMachineTypesAggregatedListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(MachineTypeAggregatedList) The response message.
"""
config = self.GetMethodConfig('AggregatedList')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified machine type resource.
Args:
request: (ComputeMachineTypesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(MachineType) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of machine type resources available to the specified project.
Args:
request: (ComputeMachineTypesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(MachineTypeList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class NetworksService(base_api.BaseApiService):
"""Service class for the networks resource."""
_NAME = u'networks'
def __init__(self, client):
super(ComputeAlpha.NetworksService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.networks.delete',
ordered_params=[u'project', u'network'],
path_params=[u'network', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/networks/{network}',
request_field='',
request_type_name=u'ComputeNetworksDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.networks.get',
ordered_params=[u'project', u'network'],
path_params=[u'network', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/networks/{network}',
request_field='',
request_type_name=u'ComputeNetworksGetRequest',
response_type_name=u'Network',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.networks.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/networks',
request_field=u'network',
request_type_name=u'ComputeNetworksInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.networks.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/networks',
request_field='',
request_type_name=u'ComputeNetworksListRequest',
response_type_name=u'NetworkList',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified network resource.
Args:
request: (ComputeNetworksDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified network resource.
Args:
request: (ComputeNetworksGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Network) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a network resource in the specified project using the data included in the request.
Args:
request: (ComputeNetworksInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of network resources available to the specified project.
Args:
request: (ComputeNetworksListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(NetworkList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ComputeAlpha.ProjectsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.projects.get',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}',
request_field='',
request_type_name=u'ComputeProjectsGetRequest',
response_type_name=u'Project',
supports_download=False,
),
'SetCommonInstanceMetadata': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.projects.setCommonInstanceMetadata',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/setCommonInstanceMetadata',
request_field=u'metadata',
request_type_name=u'ComputeProjectsSetCommonInstanceMetadataRequest',
response_type_name=u'Operation',
supports_download=False,
),
'SetUsageExportBucket': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.projects.setUsageExportBucket',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/setUsageExportBucket',
request_field=u'usageExportLocation',
request_type_name=u'ComputeProjectsSetUsageExportBucketRequest',
response_type_name=u'Operation',
supports_download=False,
),
'SetUsageExportCloudStorageBucket': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.projects.setUsageExportCloudStorageBucket',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/setUsageExportCloudStorageBucket',
request_field=u'usageExportLocation',
request_type_name=u'ComputeProjectsSetUsageExportCloudStorageBucketRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Returns the specified project resource.
Args:
request: (ComputeProjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Project) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def SetCommonInstanceMetadata(self, request, global_params=None):
"""Sets metadata common to all instances within the specified project using the data included in the request.
Args:
request: (ComputeProjectsSetCommonInstanceMetadataRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetCommonInstanceMetadata')
return self._RunMethod(
config, request, global_params=global_params)
def SetUsageExportBucket(self, request, global_params=None):
"""Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.
Args:
request: (ComputeProjectsSetUsageExportBucketRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetUsageExportBucket')
return self._RunMethod(
config, request, global_params=global_params)
def SetUsageExportCloudStorageBucket(self, request, global_params=None):
"""[Deprecated] Use setUsageExportBucket instead.
Args:
request: (ComputeProjectsSetUsageExportCloudStorageBucketRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetUsageExportCloudStorageBucket')
return self._RunMethod(
config, request, global_params=global_params)
class RegionOperationsService(base_api.BaseApiService):
"""Service class for the regionOperations resource."""
_NAME = u'regionOperations'
def __init__(self, client):
super(ComputeAlpha.RegionOperationsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.regionOperations.delete',
ordered_params=[u'project', u'region', u'operation'],
path_params=[u'operation', u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/operations/{operation}',
request_field='',
request_type_name=u'ComputeRegionOperationsDeleteRequest',
response_type_name=u'ComputeRegionOperationsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.regionOperations.get',
ordered_params=[u'project', u'region', u'operation'],
path_params=[u'operation', u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}/operations/{operation}',
request_field='',
request_type_name=u'ComputeRegionOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.regionOperations.list',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/regions/{region}/operations',
request_field='',
request_type_name=u'ComputeRegionOperationsListRequest',
response_type_name=u'OperationList',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified region-specific operation resource.
Args:
request: (ComputeRegionOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ComputeRegionOperationsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Retrieves the specified region-specific operation resource.
Args:
request: (ComputeRegionOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of operation resources contained within the specified region.
Args:
request: (ComputeRegionOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class RegionsService(base_api.BaseApiService):
"""Service class for the regions resource."""
_NAME = u'regions'
def __init__(self, client):
super(ComputeAlpha.RegionsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.regions.get',
ordered_params=[u'project', u'region'],
path_params=[u'project', u'region'],
query_params=[],
relative_path=u'projects/{project}/regions/{region}',
request_field='',
request_type_name=u'ComputeRegionsGetRequest',
response_type_name=u'Region',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.regions.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/regions',
request_field='',
request_type_name=u'ComputeRegionsListRequest',
response_type_name=u'RegionList',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Returns the specified region resource.
Args:
request: (ComputeRegionsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Region) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of region resources available to the specified project.
Args:
request: (ComputeRegionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RegionList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class RoutesService(base_api.BaseApiService):
"""Service class for the routes resource."""
_NAME = u'routes'
def __init__(self, client):
super(ComputeAlpha.RoutesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'compute.routes.delete',
ordered_params=[u'project', u'route'],
path_params=[u'project', u'route'],
query_params=[],
relative_path=u'projects/{project}/global/routes/{route}',
request_field='',
request_type_name=u'ComputeRoutesDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.routes.get',
ordered_params=[u'project', u'route'],
path_params=[u'project', u'route'],
query_params=[],
relative_path=u'projects/{project}/global/routes/{route}',
request_field='',
request_type_name=u'ComputeRoutesGetRequest',
response_type_name=u'Route',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'compute.routes.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/routes',
request_field=u'route',
request_type_name=u'ComputeRoutesInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'compute.routes.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/routes',
request_field='',
request_type_name=u'ComputeRoutesListRequest',
response_type_name=u'RouteList',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified route resource.
Args:
request: (ComputeRoutesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified route resource.
Args:
request: (ComputeRoutesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Route) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a route resource in the specified project using the data included in the request.
Args:
request: (ComputeRoutesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = | |
<reponame>diassor/CollectorCity-Market-Place<gh_stars>100-1000
import datetime
import logging
import random
from urlparse import urlparse
from django.db import transaction
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
import braintree
import subscriptions
from django.shortcuts import render_to_response
if getattr(settings, 'BRAINTREE_PRODUCTION', False):
BRAINTREE_ENVIROMENT = braintree.Environment.Production
else:
BRAINTREE_ENVIROMENT = braintree.Environment.Sandbox
class BraintreeGateway():
def __init__(self, merchant_id, public_key, private_key):
#TODO: Remove this!!
#merchant_id = settings.MERCHANT_ID
#public_key = settings.PUBLIC_KEY
#private_key = settings.PRIVATE_KEY
#up to here
braintree.Configuration.configure(
BRAINTREE_ENVIROMENT,
merchant_id,
public_key,
private_key
)
def __unicode__(self):
return "BrainTree"
def sandbox_get_valid_cc(self, card="Visa"):
cc_numbers = {
"Visa" : ["4111111111111111", "4005519200000004", "4009348888881881", "4012000033330026", "4012000077777777", "4012888888881881", "4217651111111119", "4500600000000061" ],
"MasterCard" : ["5555555555554444"],
"American Express" : ["378282246310005", "371449635398431"],
"Discover" : ["6011111111111117"]
}
card_aux = cc_numbers[card]
idx = random.randint(0, len(card_aux)-1)
return card_aux[idx]
def sandbox_get_invalid_cc(self, card="Visa"):
cc_numbers = {
"Visa" : "4222222222222",
"MasterCard" : "5105105105105100",
"American Express" : "378734493671000",
"Discover" : "6011000990139424",
}
return cc_numbers[card]
def sandbox_get_amount(self, type="SUCCESS"):
"""
You can pass specific amounts to simulate different responses from the gateway.
* Amounts between $0.01 - $1999.99 will simulate a successful authorization
* Amounts between $2000.00 - $2046.00 and $3000.00 will decline with the coordinating Processor Response
* Amounts between $2047.00 - $2099.00 will simulate the generic decline message Processor Declined.
"""
if type == "SUCCESS":
return float(random.randrange(0, 1999))
elif type == "DECLINE":
return float(random.randrange(2000, 2046))
elif type == "PROCESSO_DECLINE":
return float(random.randrange(2047, 2099))
else:
return float(15)
def update_customer_shopname(self, customer_id, shop_id=None, shop_name=None):
data = {}
if shop_id is not None: data["website"] = shop_name
if shop_name is not None: data["custom_fields"] = {"shop_id" : str(shop_id)}
result = braintree.Customer.update(customer_id, data)
return result
def create_credit_card(self, customer_id, cc_number, cc_security_number, cc_expiration_date, street, city, state, zip):
country = "US"
result = braintree.CreditCard.create({
"customer_id": customer_id,
"number": cc_number,
"cvv": cc_security_number,
"expiration_date": cc_expiration_date,
"billing_address": {
"street_address": street,
"extended_address": "-",
"locality": city,
"region": state,
"postal_code": zip,
"country_code_alpha2": country,
"country_code_alpha2": "US"
}
})
return result
def new_customer_credit_card(self, customer_id, cardholder_name, cc_number, cc_expiration_date, cc_security_number):
result = braintree.CreditCard.create({
"customer_id": customer_id,
"number": cc_number,
"cvv": cc_security_number,
"expiration_date": cc_expiration_date,
"cardholder_name": cardholder_name,
"options": {
"make_default": True,
"verify_card": True,
}
})
return result
def create_customer(self, first_name, last_name, email, cc_number, cc_expiration_date, cc_security_number, street, city, state, zip, shop_name, shop_id):
country="US"
extra="-"
result = braintree.Customer.create({
"first_name": first_name,
"last_name": last_name,
"email": email,
"website": shop_name,
#"company": "--",
#"phone": "--",
#"fax": "--",
"credit_card": {
"number": cc_number,
"cvv": cc_security_number,
"expiration_date": cc_expiration_date,
"billing_address": {
"street_address": street,
"extended_address": extra,
"locality": city,
"region": state,
"postal_code": zip,
"country_code_alpha2": country,
},
"options": {
"verify_card": True,
}
},
"custom_fields": {
"shop_id" : str(shop_id),
}
})
return result
def delete_customer(self, customer_id):
""" Delete the customer with customer_id in braintree """
result = braintree.Customer.delete(customer_id)
return result.is_success
def create_subscription(self, plan_id, token):
"""
Create a new subscription with the token associated to an specific customer (previously registered)
error = ErrorResult()
error.is_success == False
error.message == "Gateway Rejected: duplicate"
@return: braintree.error_result.ErrorResult object | braintree.error_result.SuccessResult object
"""
result = braintree.Subscription.create({
"payment_method_token": token,
"plan_id": plan_id,
})
return result
def cancel_subscription(self, subscription_id):
result = braintree.Subscription.cancel(subscription_id)
return result
def change_subscription(self, subscription_id, new_plan_id ):
""" Change the customer subscription plan """
new_price = "145.00"
result = braintree.Subscription.update(subscription_id, {"plan_id" : new_plan_id , "price": new_price })
return result
def log_response(self, result):
""" Process an API response """
if result.is_success:
logging.info( "success! tx_id : %s" % result.transaction.id)
elif result.transaction:
logging.info( "Error processing transaction:")
logging.info( " code: " + result.transaction.processor_response_code)
logging.info( " text: " + result.transaction.processor_response_text)
else:
for error in result.errors.deep_errors:
logging.info("attribute: " + error.attribute)
logging.info("code: " + error.code)
logging.info("message: " + error.message)
def charge_purchase(self, token, amount):
""" Full example
result = braintree.Transaction.sale({
"amount": "10.00", #REQUIRED
"order_id": "order id",
"merchant_account_id": "a_merchant_account_id",
"credit_card": {
"number": "5105105105105100", #REQUIRED
"expiration_date": "05/2012", #REQUIRED
"cardholder_name": "<NAME>",
"cvv": "cvv"
},
"customer": {
"first_name": "Drew",
"last_name": "Smith",
"company": "Braintree",
"phone": "312-555-1234",
"fax": "312-555-1235",
"website": "http://www.example.com",
"email": "<EMAIL>"
},
"billing": {
"first_name": "Paul",
"last_name": "Smith",
"company": "Braintree",
"street_address": "1 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "Illinois",
"postal_code": "60622",
"country_code_alpha2": "US"
},
"shipping": {
"first_name": "Jen",
"last_name": "Smith",
"company": "Braintree",
"street_address": "1 E 1st St",
"extended_address": "Suite 403",
"locality": "Bartlett",
"region": "Illinois",
"postal_code": "60103",
"country_code_alpha2": "US"
},
"options": {
"submit_for_settlement": True, #REQUIRED
}
})
"""
result = braintree.Transaction.sale({
"payment_method_token": token,
"amount": amount,
#"credit_card": {"cvv": "100"} optional
})
return result
def get_customer_details(self, customer_id):
""" Return a customer Object with ID equals to customer_id """
customer = braintree.Customer.find(customer_id)
return customer
def get_all_customers(self):
""" Return all registered customers """
collection = braintree.Customer.all()
customers = []
for customer in collection.items:
customers.append(customer)
return customers
def get_subscription_details(self, subscription_id):
""" Get a Subscription Object """
subscription = braintree.Subscription.find(subscription_id)
return subscription
def get_all_subscriptions(self):
""" Get All Subscriptions """
search_results = braintree.Subscription.search(
braintree.SubscriptionSearch.status.in_list(
braintree.Subscription.Status.Active,
braintree.Subscription.Status.Canceled,
braintree.Subscription.Status.Expired,
braintree.Subscription.Status.PastDue,
braintree.Subscription.Status.Pending))
subscriptions = []
for subscription in search_results.items:
subscriptions.append(subscription)
return subscriptions
def get_active_subscriptions(self):
""" Get All ACTIVE Subscriptions """
search_results = braintree.Subscription.search([
braintree.SubscriptionSearch.status == braintree.Subscription.Status.Active
])
subscriptions = []
for subscription in search_results.items:
subscriptions.append(subscription)
return subscriptions
def get_past_due_subscriptions(self, days=2):
""" Get back all subscriptions that were in past due status 'situacion de mora' """
if days is None:
search_results = braintree.Subscription.search([
braintree.SubscriptionSearch.days_past_due == days
])
else:
search_results = braintree.Subscription.search([
braintree.SubscriptionSearch.days_past_due == days
])
subscriptions = []
for subscription in search_results.items:
subscriptions.append(subscription)
return subscriptions
def get_transaction_details(self, tx_id):
transaction = braintree.Transaction.find(tx_id)
return transaction
def is_submitted_for_settlement(self, tx_id):
transaction = self.get_transaction_details(tx_id)
return transaction.status == braintree.Transaction.Status.SubmittedForSettlement
def is_settled(self, tx_id):
transaction = self.get_transaction_details(tx_id)
return transaction.status == braintree.Transaction.Status.Settled
def is_authorized(self, tx_id):
transaction = self.get_transaction_details(tx_id)
return transaction.status == braintree.Transaction.Status.Authorized
def get_daily_transactions(self, day):
day_init = datetime.datetime(day.year, day.month, day.day, 0, 0, 0)
day_end = datetime.datetime(day.year, day.month, day.day, 23, 59, 59)
search_results = braintree.Transaction.search([
braintree.TransactionSearch.created_at.between(day_init, day_end)
])
result = [transaction for transaction in search_results.items]
return result
def get_transactions(self, day):
day_init = datetime.datetime(day.year, day.month, day.day, 0, 0, 0)
day_end = datetime.datetime(day.year, day.month, day.day, 23, 59, 59)
#FOR DEV
#day_init = datetime.datetime(2010, 4, 1 , 0, 0, 0)
#day_end = datetime.datetime(2011, 5, 10, 23, 59, 59)
result = {}
#----------- Declined Transactions
declined_results = braintree.Transaction.search([
braintree.TransactionSearch.processor_declined_at.between(day_init, day_end),
])
result['declined'] = [transaction for transaction in declined_results.items]
#----------- Failed Transactions
failed_results = braintree.Transaction.search([
braintree.TransactionSearch.failed_at.between(day_init, day_end),
])
result['failed'] = [transaction for transaction in failed_results.items]
#----------- Rejected Transactions
gateway_rejected_results = braintree.Transaction.search([
braintree.TransactionSearch.gateway_rejected_at.between(day_init, day_end),
])
result['rejected'] = [transaction for transaction in gateway_rejected_results.items]
#----------- Settled Transactions
gateway_settled_results = braintree.Transaction.search([
braintree.TransactionSearch.settled_at.between(day_init, day_end),
])
result['settled'] = [transaction for transaction in gateway_settled_results.items]
return result
def get_expired_credit_cards(self):
collection = braintree.CreditCard.expired()
return collection
def refund_transaction(self, tx_id):
transaction = self.get_transaction_details(tx_id)
result = braintree.Transaction.refund(transaction.id)
result.transaction.type
# "credit"
result.transaction.id
# e.g. "mtpw3x"
return result
def render_button(self, cart, request):
import decimal
context = decimal.Context(prec=20, rounding=decimal.ROUND_HALF_DOWN)
decimal.setcontext(context)
url = braintree.TransparentRedirect.url()
#TODO: Replace this in production
entry_point = request.build_absolute_uri(reverse("braintree_confirm"))
amount = cart.total_with_taxes()
logging.warn(amount)
amount = amount.quantize(decimal.Decimal('.01'))
tr_data = braintree.Transaction.tr_data_for_sale({
"transaction": {
"type": "sale",
"amount": str(amount),
# "options": {
# "submit_for_settlement": True
# }
}
}, entry_point)
html = """
<form action="%s" method="POST">
<input type="hidden" name="tr_data" value="%s" />
<label>Credit Card Number</label><input type="text" name="transaction[credit_card][number]" /><br/>
<label>Expiration Date</label><input type="text" name="transaction[credit_card][expiration_date]" /><br/>
<label>CVV</label><input type="text" name="transaction[credit_card][cvv]" /><br/>
<button class="primaryAction small awesome" type="submit">Pay</button>
</form>
""" % (url, tr_data)
logging.debug("---- BRAINTREE FORM ----- \n%s" % html)
return html
def confirm_purchase(self, query):
return braintree.TransparentRedirect.confirm(query)
def submit_for_settlement(self, txn_id):
result = braintree.Transaction.submit_for_settlement(txn_id)
return result
@transaction.commit_on_success
def confirm(request):
"""
Braintree will resend our form, and we should confirm resending the query (removing the leading ?)
http://example.com/path?http_status=200&id=vgqssrhqhxfhgrwz&hash=0c3c641f1de3ed1c732c54cab367355350603b28
"""
from payments.models import BraintreeShopSettings, BrainTreeTransaction
shop = request.shop
cart = request.cart
#### Verify Products Availability
if not cart.is_available():
request.flash['message'] = 'Items not longer available: '
for item in cart.items_not_availables():
request.flash['message'] += item.product.title
cart.remove_not_available_items()
return HttpResponseRedirect(reverse('my_shopping'))
query_string = "http_status=%s&id=%s&kind=%s&hash=%s" % (request.GET['http_status'], request.GET['id'], request.GET['kind'], request.GET['hash'])
braintree_settings = BraintreeShopSettings.objects.filter(shop = shop).get()
gw = BraintreeGateway(braintree_settings.merchant_id,
braintree_settings.public_key,
braintree_settings.private_key)
result = gw.confirm_purchase(query_string)
#Check if txn is authorized!
if result.is_success:
#TODO: At this point the transaction is authorized but NOT submitted for settlement,
#if we want, here we could do | |
<reponame>ace-ecosystem/ACE
# vim: sw=4:ts=4:et:cc=120
#
# Carbon Black Collector
#
import json
import datetime
import dateutil.parser
import logging
import saq
from saq import proxy
from saq.collectors import Collector, Submission
from saq.constants import *
from saq.error import report_exception
from saq.persistence import *
from saq.util import *
from saq.carbon_black import CBC_API
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.errors import ServerError, ClientError, ObjectNotFoundError
from cbinterface.psc.query import make_process_query
from cbinterface.psc.ubs import get_file_metadata, request_and_get_file
from cbinterface.helpers import get_os_independent_filepath
@persistant_property('last_end_time')
class CarbonBlackAlertCollector(Collector):
"""Collector for Carbon Black PSC Alerts."""
def __init__(self, *args, **kwargs):
super().__init__(service_config=saq.CONFIG['service_carbon_black_cloud_collector'],
workload_type='carbon_black',
*args, **kwargs)
self.query_frequency = create_timedelta(self.service_config['query_frequency'])
self.initial_range = create_timedelta(self.service_config['initial_range'])
self.alert_queue = self.service_config.get('alert_queue', fallback=saq.constants.QUEUE_DEFAULT)
# the alert API returns alerts on processes before the process data is accessible via the process search API
# introducing this delay to give the data time to propagate for correlation
self.time_delay = create_timedelta(self.service_config.get('query_time_delay', '00:05:00'))
self.cb_url = self.service_config['url']
self.token = self.service_config['token']
self.org_key = self.service_config['org_key']
self.cbapi = CbThreatHunterAPI(url=self.cb_url, token=self.token, org_key=self.org_key)
# HACK: directly setting proxies as passing above reveals cbapi error
self.cbapi.session.proxies = proxy.proxies()
# only collect alerts from a specified list of watchlist IDs.
self.watchlist_id_list = self.service_config.get('watchlist_id_list', "").split(',')
def alert_search(self, start_time, end_time, watchlist_id):
"""Yield alerts."""
url = f"/appservices/v6/orgs/{self.cbapi.credentials.org_key}/alerts/watchlist/_search"
criteria = {'watchlist_id': [watchlist_id],
'create_time': {'start': start_time.isoformat(), 'end': end_time.isoformat()},
'workflow': ["OPEN"]}
sort = [{"field": "first_event_time", "order": "ASC"}]
search_data = {"criteria": criteria, "rows": -1, "sort": sort}
position = 0
still_querying = True
while still_querying:
search_data["start"] = position
resp = self.cbapi.post_object(url, search_data)
result = resp.json()
total_results = result["num_found"]
results = result.get("results", [])
logging.info(f"got {len(results)+position} out of {total_results} total unorganized alerts.")
for item in results:
yield item
position += 1
if position >= total_results:
still_querying = False
break
def execute_extended_collection(self):
try:
self.collect_watchlist_alerts()
except Exception as e:
logging.error(f"unable to collect cbc watchlist alerts: {e}")
report_exception()
return self.query_frequency.total_seconds()
def collect_watchlist_alerts(self):
end_time = local_time() - self.time_delay
start_time = self.last_end_time
if start_time is None:
start_time = end_time - self.initial_range
if not self.watchlist_id_list:
logging.error(f"for not, must specify watchlist IDs to collect alerts from.")
return None
logging.info(f"starting collection for watchlists: {self.watchlist_id_list}")
# get all alerts and organize them by report and device id
alert_data_map = {}
for watchlist_id in self.watchlist_id_list:
logging.info(f"starting alert collection for watchlist {watchlist_id}")
alert_data_map[watchlist_id] = {}
for alert in self.alert_search(start_time, end_time, watchlist_id):
# NOTE: Putting shutdown check here allows the service to be shutdown while processing alerts
if self.service_shutdown_event.is_set():
return
report_id = alert['report_id']
device_id = alert['device_id']
if report_id not in alert_data_map[watchlist_id].keys():
alert_data_map[watchlist_id][report_id] = {}
if device_id not in alert_data_map[watchlist_id][report_id].keys():
alert_data_map[watchlist_id][report_id][device_id] = []
alert_data_map[watchlist_id][report_id][device_id].append(alert)
# submit the alerts - everythin in the report_id/device_id grouped list
for report_id in alert_data_map[watchlist_id].keys():
submission = None
for device_id in alert_data_map[watchlist_id][report_id].keys():
alert_data = alert_data_map[watchlist_id][report_id][device_id]
device_name = alert_data[0]["device_name"]
report_name = alert_data[0]['report_name']
watchlist_name = alert_data[0]["watchlists"][0]["name"]
reason_list = list(set([alert['reason'] for alert in alert_data if alert.get('reason')]))
# sorted in ascending order, so will take first time
event_time = dateutil.parser.parse(alert_data[0]["first_event_time"])
observables = []
hostname = device_name[device_name.rfind('\\')+1:] if '\\' in device_name else device_name
observables.append({'type': F_HOSTNAME,
'value': hostname})
for alert in alert_data:
# add weblink
alert['weblink'] = f"{self.cb_url}/alerts?s[c][query_string][0]=alert_id%3A{alert['id']}"
observables.append({'type': F_FILE_NAME,
'value': alert["process_name"]})
observables.append({'type': F_CBC_PROCESS_GUID,
'value': alert["process_guid"],
'time': event_time})
observables.append({'type': F_INDICATOR,
'value': f"cbc:{alert['report_id']}/{alert.get('ioc_id')}"})
if alert.get("threat_cause_actor_sha256"):
observables.append({'type': F_SHA256,
'value': alert["threat_cause_actor_sha256"]})
if alert.get("threat_cause_actor_md5"):
observables.append({'type': F_MD5,
'value': alert["threat_cause_actor_md5"]})
if alert.get("device_internal_ip"):
observables.append({'type': F_IPV4,
'value': alert["device_internal_ip"]})
if alert.get("device_external_ip"):
observables.append({'type': F_IPV4,
'value': alert["device_external_ip"]})
if alert.get("device_username"):
username = alert["device_username"]
username = username[username.rfind('\\')+1:] if '\\' in username else username
observables.append({'type': F_USER,
'value': username})
submission = Submission(
description = f"Carbon Black: {report_name} - {device_name} ({len(alert_data)})",
analysis_mode = ANALYSIS_MODE_CORRELATION,
tool = 'carbon_black',
tool_instance = self.cbapi.credentials.url,
type = ANALYSIS_TYPE_CARBON_BLACK,
event_time = event_time,
details = alert_data,
observables = observables,
tags = [],
files = [],
queue = self.alert_queue)
self.queue_submission(submission)
self.last_end_time = end_time
return True
@persistant_property('last_end_time')
class CarbonBlackCloudBinaryCollector(Collector):
"""Collector for Carbon Black UBS binaries.
This collector submits unsigned/unverified binaries seen
in "modloads" or process executions to ACE for analysis
Files are stored according to the SHA256 hash and are only
not submitted for analysis if they exist in storage.
"""
def __init__(self, *args, **kwargs):
super().__init__(service_config=saq.CONFIG['service_cbc_binary_collector'],
workload_type='carbon_black',
*args, **kwargs)
self.query_frequency = create_timedelta(self.service_config['query_frequency'])
self.initial_range = create_timedelta(self.service_config['initial_range'])
# the alert API returns alerts on processes before the process data is accessible via the process search API
# introducing this delay to give the data time to propagate for correlation
self.time_delay = create_timedelta(self.service_config.get('query_time_delay', '00:05:00'))
self.alert_queue = self.service_config.get('alert_queue', fallback=saq.constants.QUEUE_DEFAULT)
self.tracking_dir = os.path.join(saq.DATA_DIR, self.service_config['tracking_dir'])
self.modload_query = self.service_config.get('modload_query')
self.process_query = self.service_config.get('process_query')
def execute_extended_collection(self):
try:
self.collect_binaries()
except Exception as e:
logging.error(f"unable to collect cbc binaries: {e}")
report_exception()
return self.query_frequency.total_seconds()
def collect_binaries(self):
from dateutil.parser import parse as date_parse
if not CBC_API:
logging.critical("missing CBC API connection.")
return False
end_time = local_time() - self.time_delay
start_time = self.last_end_time
if start_time is None:
start_time = end_time - self.initial_range
# CB default timezone is GMT/UTC, same as ACE.
# hackery: remove TZ for avoiding org.apache.solr.common.SolrException: Invalid Date in Date Math String:'2021-04-28T16:00:00+00:00'
start_time = datetime.datetime.strptime(start_time.strftime("%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S")
end_time = datetime.datetime.strptime(end_time.strftime("%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S")
# make sure the storage structure exists
if not os.path.isdir(self.tracking_dir):
try:
os.makedirs(self.tracking_dir)
except Exception as e:
logging.error("unable to create directory {}: {}".format(self.tracking_dir, e))
if not self.modload_query and not self.process_query:
logging.error("No modload or process queries configured. There is nothing to do...")
return None
# map tracking unique binaries needing analysis
all_binaries = {}
# target unsigned DLLs loaded by the processes resulting from the modload_query
# NOTE: the modload_query is just a process query but the behavior is such that modloads are crawled for unsigned binaries
if self.modload_query:
# TODO: Refine the query in the docs.
# TODO: See if we can query the Events directly instead of crawling every modload event for every process result.
procs = None
try:
logging.info(f"making query='{self.modload_query}' between {start_time} and {end_time}")
procs = make_process_query(CBC_API, self.modload_query, start_time, end_time)
except Exception as e:
logging.error(f"problem querying CBC: {e}")
return False
if not procs:
logging.info(f"no results for query='{self.modload_query}' between {start_time} and {end_time}")
return None
logging.info(f"{len(procs)} results for query='{self.modload_query}' between {start_time} and {end_time}")
for p in procs:
logging.debug(f"getting suspect modloads from {p.get('process_guid')}")
try:
for ml in p.events(event_type="modload").and_(modload_publisher_state='FILE_SIGNATURE_STATE_NOT_SIGNED'):
if ml.get('modload_sha256') and ml.get('modload_sha256') not in all_binaries:
all_binaries[ml.get('modload_sha256')] = ml._info
except ServerError as e:
# XXX TODO create persistence to stop here and pick back up later so hashes don't get missed
logging.warning(f"problem collecting modload binary hashes for {p.get('process_guid')}. Can happen when the data set is very large.")
continue
else:
logging.debug(f"Modload query not defined.")
# get unsigned processes
if self.process_query:
procs = None
try:
logging.info(f"making query='{self.process_query}' between {start_time} and {end_time}")
procs = make_process_query(CBC_API, self.process_query, start_time, end_time)
except Exception as e:
logging.error(f"problem querying CBC: {e}")
return False
if not procs:
logging.info(f"no results for query='{self.process_query}' between {start_time} and {end_time}")
return None
logging.info(f"{len(procs)} results for query='{self.process_query}' between {start_time} and {end_time}")
for p in procs:
if p.get('process_sha256') and p.get('process_sha256') not in all_binaries:
logging.debug(f"adding suspect process identified by {p.get('process_guid')}")
all_binaries[p.get('process_sha256')] = p._info
else:
logging.debug(f"Process query not defined.")
logging.info(f"processing {len(all_binaries.keys())} binaries.")
# make sure sub-directories exist and
# remove files we already know about
skipped_count = 0
for sha256 in all_binaries.keys():
# make dirs be first 3 chars of sha256
subdir_path = os.path.join(self.tracking_dir, sha256[0:3])
if not os.path.exists(subdir_path):
try:
os.makedirs(subdir_path)
except Exception as e:
logging.error(f"unable to create directory {subdir_path}: {e}")
binary_data_path = os.path.join(subdir_path, f"{sha256}.json")
if os.path.exists(binary_data_path):
logging.debug(f"skipping already analyzed file: {binary_data_path}")
skipped_count += 1
continue
# get the file_path and file_name
file_path = all_binaries[sha256].get('modload_name')
if not file_path:
file_path = all_binaries[sha256].get('process_name')
file_name = get_os_independent_filepath(file_path).name or sha256
binary_path = os.path.join(subdir_path, file_name)
# get the binary and make the submission
downloaded = False
try:
downloaded = request_and_get_file(CBC_API, sha256, expiration_seconds=60, write_path=binary_path, compressed=False)
except Exception as e:
logging.error(f"problem downloading binary for {sha256}: {e}")
if downloaded:
ubs_file_data = None
try:
ubs_file_data = get_file_metadata(CBC_API, [sha256])
except Exception as e:
logging.error(f"failed to get metadata for {sha256}: {e}")
metadata = {'event_info': all_binaries[sha256],
'ubs': ubs_file_data}
event_time = all_binaries[sha256].get('event_timestamp')
if not event_time:
event_time = all_binaries[sha256].get('process_start_time')
event_time = date_parse(event_time)
observables = []
description = sha256
if file_path:
observables.append({'type': F_FILE_PATH,
'value': file_path})
description = file_path
process_guid = all_binaries[sha256].get('process_guid')
observables.append({'type': F_CBC_PROCESS_GUID,
'value': process_guid})
submission = Submission(
description = f"Carbon Black Binary: {description}",
analysis_mode = ANALYSIS_MODE_BINARY,
tool = 'carbon_black',
tool_instance = CBC_API.credentials.url,
type = 'cbc_binary',
event_time = event_time,
details = metadata,
observables = observables,
tags = [],
files = [binary_path],
queue = | |
if t not in tables.tags:
self.error('missing section: [tabletags-%s]' % t,halt=True)
if self.separator:
# Evaluate escape characters.
self.separator = literal_eval('"'+self.separator+'"')
#TODO: Move to class Tables
# Check global table parameters.
elif config.pagewidth is None:
self.error('missing [miscellaneous] entry: pagewidth')
elif config.pageunits is None:
self.error('missing [miscellaneous] entry: pageunits')
def validate_attributes(self):
"""Validate and parse table attributes."""
# Set defaults.
format = self.format
tags = self.tags
separator = self.separator
abswidth = float(config.pagewidth)
pcwidth = 100.0
for k,v in self.attributes.items():
if k == 'format':
if v not in self.FORMATS:
self.error('illegal %s=%s' % (k,v))
else:
format = v
elif k == 'tags':
if v not in tables.tags:
self.error('illegal %s=%s' % (k,v))
else:
tags = v
elif k == 'separator':
separator = v
elif k == 'width':
if not re.match(r'^\d{1,3}%$',v) or int(v[:-1]) > 100:
self.error('illegal %s=%s' % (k,v))
else:
abswidth = float(v[:-1])/100 * config.pagewidth
pcwidth = float(v[:-1])
# Calculate separator if it has not been specified.
if not separator:
separator = Table.SEPARATORS[format]
if format == 'csv':
if len(separator) > 1:
self.error('illegal csv separator=%s' % separator)
separator = ','
else:
if not is_re(separator):
self.error('illegal regular expression: separator=%s' %
separator)
self.parameters.format = format
self.parameters.tags = tags
self.parameters.separator = separator
self.abswidth = abswidth
self.pcwidth = pcwidth
def get_tags(self,params):
tags = self.get_param('tags',params)
assert(tags and tags in tables.tags)
return tables.tags[tags]
def get_style(self,prefix):
"""
Return the style dictionary whose name starts with 'prefix'.
"""
if prefix is None:
return None
names = self.styles.keys()
names.sort()
for name in names:
if name.startswith(prefix):
return self.styles[name]
else:
self.error('missing style: %s*' % prefix)
return None
def parse_cols(self, cols, halign, valign):
"""
Build list of column objects from table 'cols', 'halign' and 'valign'
attributes.
"""
# [<multiplier>*][<align>][<width>][<style>]
COLS_RE1 = r'^((?P<count>\d+)\*)?(?P<align>[<\^>.]{,3})?(?P<width>\d+%?)?(?P<style>[a-z]\w*)?$'
# [<multiplier>*][<width>][<align>][<style>]
COLS_RE2 = r'^((?P<count>\d+)\*)?(?P<width>\d+%?)?(?P<align>[<\^>.]{,3})?(?P<style>[a-z]\w*)?$'
reo1 = re.compile(COLS_RE1)
reo2 = re.compile(COLS_RE2)
cols = str(cols)
if re.match(r'^\d+$',cols):
for i in range(int(cols)):
self.columns.append(Column())
else:
for col in re.split(r'\s*,\s*',cols):
mo = reo1.match(col)
if not mo:
mo = reo2.match(col)
if mo:
count = int(mo.groupdict().get('count') or 1)
for i in range(count):
self.columns.append(
Column(mo.group('width'), mo.group('align'),
self.get_style(mo.group('style')))
)
else:
self.error('illegal column spec: %s' % col,self.start)
# Set column (and indirectly cell) default alignments.
for col in self.columns:
col.halign = col.halign or halign or document.attributes.get('halign') or 'left'
col.valign = col.valign or valign or document.attributes.get('valign') or 'top'
# Validate widths and calculate missing widths.
n = 0; percents = 0; props = 0
for col in self.columns:
if col.width:
if col.width[-1] == '%': percents += int(col.width[:-1])
else: props += int(col.width)
n += 1
if percents > 0 and props > 0:
self.error('mixed percent and proportional widths: %s'
% cols,self.start)
pcunits = percents > 0
# Fill in missing widths.
if n < len(self.columns) and percents < 100:
if pcunits:
width = float(100 - percents)/float(len(self.columns) - n)
else:
width = 1
for col in self.columns:
if not col.width:
if pcunits:
col.width = str(int(width))+'%'
percents += width
else:
col.width = str(width)
props += width
# Calculate column alignment and absolute and percent width values.
percents = 0
for col in self.columns:
if pcunits:
col.pcwidth = float(col.width[:-1])
else:
col.pcwidth = (float(col.width)/props)*100
col.abswidth = self.abswidth * (col.pcwidth/100)
if config.pageunits in ('cm','mm','in','em'):
col.abswidth = '%.2f' % round(col.abswidth,2)
else:
col.abswidth = '%d' % round(col.abswidth)
percents += col.pcwidth
col.pcwidth = int(col.pcwidth)
if round(percents) > 100:
self.error('total width exceeds 100%%: %s' % cols,self.start)
elif round(percents) < 100:
self.error('total width less than 100%%: %s' % cols,self.start)
def build_colspecs(self):
"""
Generate column related substitution attributes.
"""
cols = []
i = 1
for col in self.columns:
colspec = self.get_tags(col.style).colspec
if colspec:
self.attributes['halign'] = col.halign
self.attributes['valign'] = col.valign
self.attributes['colabswidth'] = col.abswidth
self.attributes['colpcwidth'] = col.pcwidth
self.attributes['colnumber'] = str(i)
s = subs_attrs(colspec, self.attributes)
if not s:
message.warning('colspec dropped: contains undefined attribute')
else:
cols.append(s)
i += 1
if cols:
self.attributes['colspecs'] = writer.newline.join(cols)
def parse_rows(self, text):
"""
Parse the table source text into self.rows (a list of rows, each row
is a list of Cells.
"""
reserved = {} # Reserved cells generated by rowspans.
if self.parameters.format in ('psv','dsv'):
colcount = len(self.columns)
parsed_cells = self.parse_psv_dsv(text)
ri = 0 # Current row index 0..
ci = 0 # Column counter 0..colcount
row = []
i = 0
while True:
resv = reserved.get(ri) and reserved[ri].get(ci)
if resv:
# We have a cell generated by a previous row span so
# process it before continuing with the current parsed
# cell.
cell = resv
else:
if i >= len(parsed_cells):
break # No more parsed or reserved cells.
cell = parsed_cells[i]
i += 1
if cell.vspan > 1:
# Generate ensuing reserved cells spanned vertically by
# the current cell.
for j in range(1, cell.vspan):
if not ri+j in reserved:
reserved[ri+j] = {}
reserved[ri+j][ci] = cell.clone_reserve()
ci += cell.span
if ci <= colcount:
row.append(cell)
if ci >= colcount:
self.rows.append(row)
ri += 1
row = []
ci = 0
elif self.parameters.format == 'csv':
self.rows = self.parse_csv(text)
else:
assert True,'illegal table format'
# Check for empty rows containing only reserved (spanned) cells.
for ri,row in enumerate(self.rows):
empty = True
for cell in row:
if not cell.reserved:
empty = False
break
if empty:
message.warning('table row %d: empty spanned row' % (ri+1))
# Check that all row spans match.
for ri,row in enumerate(self.rows):
row_span = 0
for cell in row:
row_span += cell.span
if ri == 0:
header_span = row_span
if row_span < header_span:
message.warning('table row %d: does not span all columns' % (ri+1))
if row_span > header_span:
message.warning('table row %d: exceeds columns span' % (ri+1))
def subs_rows(self, rows, rowtype='body'):
"""
Return a string of output markup from a list of rows, each row
is a list of raw data text.
"""
tags = tables.tags[self.parameters.tags]
if rowtype == 'header':
rtag = tags.headrow
elif rowtype == 'footer':
rtag = tags.footrow
else:
rtag = tags.bodyrow
result = []
stag,etag = subs_tag(rtag,self.attributes)
for row in rows:
result.append(stag)
result += self.subs_row(row,rowtype)
result.append(etag)
return writer.newline.join(result)
def subs_row(self, row, rowtype):
"""
Substitute the list of Cells using the data tag.
Returns a list of marked up table cell elements.
"""
result = []
i = 0
for cell in row:
if cell.reserved:
# Skip vertically spanned placeholders.
i += cell.span
continue
if i >= len(self.columns):
break # Skip cells outside the header width.
col = self.columns[i]
self.attributes['halign'] = cell.halign or col.halign
self.attributes['valign'] = cell.valign or col.valign
self.attributes['colabswidth'] = col.abswidth
self.attributes['colpcwidth'] = col.pcwidth
self.attributes['colnumber'] = str(i+1)
self.attributes['colspan'] = str(cell.span)
self.attributes['colstart'] = self.attributes['colnumber']
self.attributes['colend'] = str(i+cell.span)
self.attributes['rowspan'] = str(cell.vspan)
self.attributes['morerows'] = str(cell.vspan-1)
# Fill missing column data with blanks.
if i > len(self.columns) - 1:
data = ''
else:
data = cell.data
if rowtype == 'header':
# Use table style unless overriden by cell style.
colstyle = cell.style
else:
# If the cell style is not defined use the column style.
colstyle = cell.style or col.style
tags = self.get_tags(colstyle)
presubs,postsubs = self.get_subs(colstyle)
data = [data]
data = Lex.subs(data, presubs)
data = filter_lines(self.get_param('filter',colstyle),
data, self.attributes)
data = Lex.subs(data, postsubs)
if rowtype != 'header':
ptag = tags.paragraph
if ptag:
stag,etag = subs_tag(ptag,self.attributes)
text = '\n'.join(data).strip()
data = []
for para in re.split(r'\n{2,}',text):
data += dovetail_tags([stag],para.split('\n'),[etag])
if rowtype == 'header':
dtag = tags.headdata
elif rowtype == 'footer':
dtag = tags.footdata
else:
dtag = tags.bodydata
stag,etag = subs_tag(dtag,self.attributes)
result = result + dovetail_tags([stag],data,[etag])
i += cell.span
return result
def parse_csv(self,text):
"""
Parse the table source text and return a list of rows, each row
is a list of Cells.
"""
import StringIO
import csv
rows = []
rdr = csv.reader(StringIO.StringIO('\r\n'.join(text)),
delimiter=self.parameters.separator, skipinitialspace=True)
try:
for row in rdr:
rows.append([Cell(data) for data in row])
except Exception:
self.error('csv parse error: %s' % row)
return rows
def parse_psv_dsv(self,text):
"""
Parse list of PSV or DSV table source text lines and return a list of
Cells.
"""
def append_cell(data, span_spec, op, align_spec, style):
op = op or '+'
if op == '*': # Cell multiplier.
span = Table.parse_span_spec(span_spec)[0]
for i in range(span):
cells.append(Cell(data, '1', align_spec, style))
elif op == '+': # Column spanner.
cells.append(Cell(data, span_spec, align_spec, style))
else:
self.error('illegal | |
"""
################################################################################
Parallel-and-asynchronous Stitching Script - Parallel version
Author: <NAME>
This script pulls the data generated through TissueCyte (or another microscope system) and
can perfom image averaging correction on the images if requested, before calling ImageJ
from the command line to perform the stitching. You will need to have the plugin script
OverlapY.ijm installed in ImageJ in order for the difference in the X and Y overlap to be
registered. Otherwise the X overlap will be used for both.
The pipeline has been sped up in some areas by parallelising some functions.
Installation:
1) Navigate to the folder containing the parasyncstitchGM.py
2) Run 'pip install -r requirements.txt'
Instructions:
1) Run the script in a Python IDE (e.g. for Python 3 > exec(open('parasyncstitchicGM_v2.py').read()))
2) Fill in the parameters that you are asked for
Note: You can drag and drop folder paths (works on MacOS) or copy and paste the paths
Note: The temporary directory is required to speed up ImageJ loading of the files
Important updates:
06.03.19 - Updated the overlap and crop parameters to improve the image average result and
tiling artefacts.
11.03.19 - Included default values and parameter search from Mosaic file.
02.05.19 - Python 3 compatible
14.05.19 - Added slack integration
31.01.20 - Changed option for 16-bit output with improved scaling
05.02.20 - Removed 16-bit
05.02.20 - Added feathering to tile edge and stitching with max intensity
10.02.20 - Added average tile from flatfield imaging and uses that for correction
################################################################################
"""
import cv2, os, sys, warnings, time, glob, errno, subprocess, shutil, readline, re, tempfile, random
import numpy as np
import tifffile
from PIL import Image
from multiprocessing import Pool, cpu_count, Array, Manager
from functools import partial
from datetime import date
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
Image.MAX_IMAGE_PIXELS = 1000000000
#=============================================================================================
# Slack notification
#=============================================================================================
#def slack_message(text, channel, username):
# from urllib import request, parse
# import json
#
# post = {
# "text": "{0}".format(text),
# "channel": "{0}".format(channel),
# "username": "{0}".format(username),
# "icon_url": "https://github.com/gm515/gm515.github.io/blob/master/Images/imperialstplogo.png?raw=true"
# }
#
# try:
# json_data = json.dumps(post)
# req = request.Request('https://hooks.slack.com/services/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',
# data=json_data.encode('ascii'),
# headers={'Content-Type': 'application/json'})
# resp = request.urlopen(req)
# except Exception as em:
# print("EXCEPTION: " + str(em))
#=============================================================================================
# Function to load images in parallel
#=============================================================================================
def load_tile(file, cropstart, cropend):
if '_00' in file:
try:
tileimage_ch1 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage_ch2 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage_ch3 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage = np.maximum(tileimage_ch1, tileimage_ch2)
tileimage = np.maximum(tileimage, tileimage_ch3)
except (ValueError, IOError, OSError):
tileimage = np.zeros((cropend-cropstart, cropend-cropstart))
else:
try:
tileimage = np.array(Image.fromarray(tifffile.imread(file)).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
except (ValueError, IOError, OSError):
tileimage = np.zeros((cropend-cropstart, cropend-cropstart))
return tileimage
#=============================================================================================
# Function to check operating system
#=============================================================================================
# This check is to determine which file paths to use if run on the local Mac or Linux supercomputer
def get_platform():
platforms = {
'linux1' : 'Linux',
'linux2' : 'Linux',
'darwin' : 'Mac',
'win32' : 'Windows'
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
#=============================================================================================
# Generate an intensity correction tile
#=============================================================================================
def generate_corr(tcpath, scanid, startsec, endsec):
# Generate all possible folders in the scan directory
folderlist = []
for section in range(startsec,endsec+1,1):
if section <= 9:
sectiontoken = '000'+str(section)
elif section <= 99:
sectiontoken = '00'+str(section)
else:
sectiontoken = '0'+str(section)
folderlist.append(scanid+'-'+sectiontoken)
# Generate all possible tile paths for the channel in scan directory
tilelist = []
for folder in folderlist:
tilelist += glob.glob(os.path.join(tcpath, folder, '*_0'+channel+'.tif'))
average_num = 500
if len(tilelist) < average_num:
average_num = len(tilelist)
print ('Generating correction tile from n='+str(average_num)+' of '+str(len(tilelist))+' total tiles.')
tilelist = random.sample(tilelist, average_num)
size = Image.open(tilelist[0]).size
cropstart = int(round(0.014*size[0])) #0.0096 but leaves a bit too much on edge
cropend = int(round(size[0]-cropstart+1))
img_arr = np.array([np.array(Image.open(file).crop((cropstart, cropstart, cropend, cropend)).rotate(90)) for file in tilelist])
img_arr = img_arr.astype(np.float32)
# Average image
img_arr_nan = img_arr
img_arr_nan[img_arr_nan==0] = np.nan
avgimage = np.nanmean(img_arr_nan, axis=0)
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/0-average-tile.tif')
# Blur the result to remove random artefacts
avgimage = cv2.GaussianBlur(avgimage, (1501,1501), 0)
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/1-gaussian-tile.tif')
padsize = 144
weight_func = lambda x : np.sqrt(padsize**2 - x**2)/padsize
weight = np.flip(np.array([weight_func(x) for x in range(0, padsize+1)]))
# Normalise the average image so all we need to do is multiply each tile by the correction (returned)
avgimage = avgimage/np.max(avgimage)
avgimage = 2-avgimage
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/2-correction-tile.tif')
# Scale the pad area by the weight
copy = avgimage
for pos in np.flip(range(padsize+1)):
avgimage[pos,:] = copy[pos,:]*weight[pos]
avgimage[-pos,:] = copy[-pos,:]*weight[pos]
avgimage[:,pos] = copy[:,pos]*weight[pos]
avgimage[:,-pos] = copy[:,-pos]*weight[pos]
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/3-feathered-tile.tif')
return avgimage
#=============================================================================================
# Generate an intensity correction tile version 2 using flat field
#=============================================================================================
def generate_corr_v2(avg_tile_path):
avgimage = Image.open(avg_tile_path)
size = avgimage.size
cropstart = int(round(0.014*size[0])) #0.0096 but leaves a bit too much on edge
cropend = int(round(size[0]-cropstart+1))
avgimage = np.array(avgimage.crop((cropstart, cropstart, cropend, cropend)).rotate(90)).astype(np.float32)
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/0-average-tile.tif')
avgimage = cv2.GaussianBlur(avgimage, (201,201), 0)
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/1-gaussian-tile.tif')
avgimage = avgimage/np.max(avgimage)
avgimage = 2-avgimage
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/2-correction-tile.tif')
padsize = 140
weight_func = lambda x : np.sqrt(padsize**2 - x**2)/padsize
weight = np.flip(np.array([weight_func(x) for x in range(0, padsize+1)]))
copy = avgimage
for pos in np.flip(range(padsize+1)):
avgimage[pos,:] = copy[pos,:]*weight[pos]
avgimage[-pos,:] = copy[-pos,:]*weight[pos]
avgimage[:,pos] = copy[:,pos]*weight[pos]
avgimage[:,-pos] = copy[:,-pos]*weight[pos]
# Image.fromarray((255*avgimage/np.max(avgimage)).astype(np.uint8)).save('/Users/gm515/Desktop/3-feathered-tile.tif')
return avgimage
if __name__ == '__main__':
#=============================================================================================
# Function to check operating system
#=============================================================================================
if get_platform() == 'Mac':
imagejpath = '/Applications/Fiji.app/Contents/MacOS/ImageJ-macosx'
overlapypath = '"/Applications/Fiji.app/plugins/OverlapY.ijm"'
if get_platform() == 'linux':
imagejpath = '/opt/fiji/Fiji.app/ImageJ-linux64'
overlapypath = '"/opt/fiji/Fiji.app/plugins/OverlapY.ijm"'
if get_platform() == 'Windows':
imagejpath = 'fill in path to imagej executable'
overlapypath = '"fill in path to OverlapY.ijm"'
#=============================================================================================
# Input parameters
#=============================================================================================
print ('')
print ('------------------------------------------')
print (' Parameter Input ')
print ('------------------------------------------')
print ('')
print ('Fill in the following variables. To accept default value, leave response blank.')
print ('Please note this creates a temporary folder to hold images. You require at least 1 GB of free space.')
acknowledge = input('Press Enter to continue: ')
tcpath = input('Select TC data directory (drag-and-drop or type manually): ').rstrip()
startsec = input('Start section (default start): ')
endsec = input('End section (default end): ')
xoverlap = input('X overlap % (default 7.2): ')
yoverlap = input('Y overlap % (default.2): ')
channel = input('Channel to stitch (0 - combine all channels): ')
while not channel:
channel = input('Channel to stitch (0 - combine all channels): ')
avgcorr = input('Perform average correction? (y/n): ')
while avgcorr not in ('y', 'n'):
avgcorr = input('Perform average correction? (y/n): ')
convert = input('Perform additional downsize? (y/n): ')
while convert not in ('y', 'n'):
convert = input('Perform additional downsize? (y/n): ')
if convert == 'y':
downsize = input('Downsize amount (default 0.054 for 10 um/pixel): ')
# Handle default values
if not startsec:
startsec = 1
else:
startsec = int(float(startsec))
if endsec:
endsec = int(float(endsec))
if not xoverlap:
xoverlap = 7.2
else:
xoverlap = float(xoverlap)
if not yoverlap:
yoverlap = 7.2
else:
yoverlap = float(yoverlap)
if convert == 'y':
if not downsize:
downsize = 0.054
else:
downsize = float(downsize)
# Search the mosaic file for remaining parameters
mosaicfile = glob.glob(os.path.join(tcpath, 'Mosaic*.txt'))[0]
with open(mosaicfile, 'r') as f:
lines = f.readlines()
for line in lines:
if 'sections' in line:
trueendsec = int(re.split(':', line.rstrip())[-1])
if 'Sample ID' in line:
scanid = re.split(':', line.rstrip())[-1]
if 'mrows' in line:
xtiles = int(re.split(':', line.rstrip())[-1])
if 'mcolumns' in line:
ytiles = int(re.split(':', line.rstrip())[-1])
if 'layers' in line:
zlayers = int(re.split(':', line.rstrip())[-1])
if not endsec:
endsec = trueendsec
# Create stitch output folders
os.umask(0o000)
stitchpath = os.path.join(tcpath, scanid+'-Mosaic', 'Ch'+str(channel)+'_Stitched_Sections_'+str(date.today().strftime('%d_%m_%Y')))
try:
os.makedirs(stitchpath, 0o777)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if convert == 'y':
try:
os.makedirs(stitchpath+'_Downsized', 0o777)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# create temporary folder in home path
temppath = tempfile.mkdtemp(dir=os.path.expanduser('~'))
tilepath = temppath
crop = 0
filenamestruct = []
tstart = time.time()
zcount = ((startsec-1)*zlayers)+1
filenumber = 0
tilenumber = 0
lasttile = -1
tileimage = 0
corrtile = None
#=============================================================================================
# Stitching
#=============================================================================================
print ('')
print ('---------------------------------------------')
print (' Parasynchronous Stitching ')
print ('---------------------------------------------')
print ('')
tstart = time.time()
# Check that data exists
for section in range(startsec,endsec+1,1):
if section <= 9:
sectiontoken = '000'+str(section)
elif section <= 99:
sectiontoken = '00'+str(section)
else:
sectiontoken = '0'+str(section)
folder = scanid+'-'+sectiontoken
# Token variable hold
x = xtiles
y = ytiles
xstep = -1
for layer in range(1,zlayers+1,1):
completelayer = False
firsttile = xtiles*ytiles*((zlayers*(section-1))+layer-1)
lasttile = (xtiles*ytiles*((zlayers*(section-1))+layer))-1
# If last tile doesn't exist yet, wait for it
if glob.glob(os.path.join(tcpath, folder, '*-'+str(lasttile)+'_0*.tif')) == []:
while glob.glob(os.path.join(tcpath, folder, '*-'+str(lasttile)+'_0*.tif')) == []:
sys.stdout.write('\rLast tile not generated yet. Waiting.')
sys.stdout.flush()
time.sleep(3)
| |
# Licensed under MIT License.
# See LICENSE in the project root for license information.
"""Binary tree traversal.
Routines
--------
inorder_traverse(tree: `SupportedTreeType`, recursive: `bool`)
Perform in-order traversal.
preorder_traverse(tree: `SupportedTreeType`, recursive: `bool`)
Perform pre-order traversal.
postorder_traverse(tree: SupportedTreeType, recursive: `bool`)
Perform post-order traversal.
"""
# Time complexity for these traversals are all O(N).
# Space complexity for both recursive and non-recursive traversals:
# Average O(log N), worst case O(N), except for the level-order traversal,
# whose best case space complexity is O(1) (when each level has only one node) and
# the worst case O(N).
from typing import Any, Iterator, Union
from forest.binary_trees import avl_tree
from forest.binary_trees import binary_search_tree
from forest.binary_trees import binary_tree
# Alias for the supported node types. For type checking.
SupportedNodeType = Union[None, binary_search_tree.Node, avl_tree.Node]
"""Alias for the supported tree node types. For type checking."""
SupportedTreeType = Union[binary_search_tree.BinarySearchTree, avl_tree.AVLTree]
"""Alias for the supported tree types. For type checking."""
def inorder_traverse(
tree: SupportedTreeType, recursive: bool = True
) -> binary_tree.Pairs:
"""Perform in-order traversal, a kind of depth-first traversal.
In-order traversal traverses a tree by the order:
left subtree, current node, right subtree (ie LDR)
Parameters
----------
tree: `SupportedTreeType`
An instance of the supported binary tree types.
recursive: `bool`
Perform traversal recursively or not.
Yields (as an Iterator)
------
`Pairs`
The next (key, data) pair in the in-order traversal.
Examples
--------
>>> from forest.binary_trees import binary_search_tree
>>> from forest.binary_trees import traversal
>>> tree = binary_search_tree.BinarySearchTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in traversal.inorder_traverse(tree)]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
"""
if recursive:
return _inorder_traverse(node=tree.root)
return _inorder_traverse_non_recursive(root=tree.root)
def reverse_inorder_traverse(
tree: SupportedTreeType, recursive: bool = True
) -> binary_tree.Pairs:
"""Perform reversed in-order traversal.
Reversed in-order traversal traverses a tree by the order:
right subtree, current node, left subtree (RDL)
Parameters
----------
tree : `SupportedTreeType`
An instance of the supported binary tree types.
recursive: `bool`
Perform traversal recursively or not.
Yields
------
`Pairs`
The next (key, data) pair in the reversed in-order traversal.
Examples
--------
>>> from forest.binary_trees import binary_search_tree
>>> from forest.binary_trees import traversal
>>> tree = binary_search_tree.BinarySearchTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in traversal.reverse_inorder_traverse(tree)]
[(34, '34'), (30, '30'), (24, '24'), (23, '23'), (22, '22'), (20, '20'),
(15, '15'), (11, '11'), (7, '7'), (4, '4'), (1, '1')]
"""
if recursive:
return _reverse_inorder_traverse(node=tree.root)
return _reverse_inorder_traverse_non_recursive(root=tree.root)
def preorder_traverse(
tree: SupportedTreeType, recursive: bool = True
) -> binary_tree.Pairs:
"""Perform Pre-Order traversal.
Pre-order traversal traverses a tree by the order:
current node, left subtree, right subtree (DLR)
Parameters
----------
tree : `SupportedTreeType`
An instance of the supported binary tree types.
recursive: `bool`
Perform traversal recursively or not.
Yields
------
`Pairs`
The next (key, data) pair in the pre-order traversal.
Examples
--------
>>> from forest.binary_trees import binary_search_tree
>>> from forest.binary_trees import traversal
>>> tree = binary_search_tree.BinarySearchTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in traversal.preorder_traverse(tree)]
[(23, '23'), (4, '4'), (1, '1'), (11, '11'), (7, '7'), (20, '20'),
(15, '15'), (22, '22'), (30, '30'), (24, '24'), (34, '34')]
"""
if recursive:
return _preorder_traverse(node=tree.root)
return _preorder_traverse_non_recursive(root=tree.root)
def postorder_traverse(
tree: SupportedTreeType, recursive: bool = True
) -> binary_tree.Pairs:
"""Perform Post-Order traversal.
Post-order traversal traverses a tree by the order:
left subtree, right subtree, current node (LRD)
Parameters
----------
tree : `SupportedTreeType`
An instance of the supported binary tree types.
recursive: `bool`
Perform traversal recursively or not.
Yields
------
`Pairs`
The next (key, data) pair in the post-order traversal.
Examples
--------
>>> from forest.binary_trees import binary_search_tree
>>> from forest.binary_trees import traversal
>>> tree = binary_search_tree.BinarySearchTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in traversal.postorder_traverse(tree)]
[(1, '1'), (7, '7'), (15, '15'), (22, '22'), (20, '20'), (11, '11'),
(4, '4'), (24, '24'), (34, '34'), (30, '30'), (23, '23')]
"""
if recursive:
return _postorder_traverse(node=tree.root)
return _postorder_traverse_non_recursive(root=tree.root)
def levelorder_traverse(tree: SupportedTreeType) -> binary_tree.Pairs:
"""Perform Level-Order traversal.
Level-order traversal traverses a tree in the following order:
level by level, from left to right, starting from the root node.
Parameters
----------
tree : `SupportedTreeType`
An instance of the supported binary tree types.
Yields
------
`Pairs`
The next (key, data) pair in the level-order traversal.
Examples
--------
>>> from forest.binary_trees import binary_search_tree
>>> from forest.binary_trees import traversal
>>> tree = binary_search_tree.BinarySearchTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in traversal.levelorder_traverse(tree)]
[(23, '23'), (4, '4'), (30, '30'), (1, '1'), (11, '11'), (24, '24'),
(34, '34'), (7, '7'), (20, '20'), (15, '15'), (22, '22')]
"""
queue = [tree.root]
while len(queue) > 0:
temp = queue.pop(0)
# `temp` is the first elem of `queue` (since pop(0));
# `queue` is now without its original first elem (ie popped is deleted)
yield (temp.key, temp.data)
# Because stack `queue` is FIFO, insert left child before right child.
if temp.left:
queue.append(temp.left)
if temp.right:
queue.append(temp.right)
def _inorder_traverse(node: SupportedNodeType) -> binary_tree.Pairs:
if node:
yield from _inorder_traverse(node.left)
yield (node.key, node.data)
yield from _inorder_traverse(node.right)
def _inorder_traverse_non_recursive(root: SupportedNodeType) -> binary_tree.Pairs:
# An internal generator function (coz returning Pairs is an iterator).
# The algorithm is: (Remember that we want the final order to be LDR)
# 1. Create a stack. If the current node has a right child, we push (ie append)
# its right child to the stack and then push the node itself to the stack.
# 2. Then we move to current.left, and repeat Step 1.
# 3. When a node is popped from the stack, we produce the node if (the node has
# no right child) or (the node == the top root node).
if root is None:
raise StopIteration
stack = []
if root.right:
stack.append(root.right)
stack.append(root)
current = root.left
while True:
if current:
if current.right:
stack.append(current.right)
stack.append(current)
current = current.left
continue # this skips all the rest statements in the while-True loop
# If c.r is None, first we squeeze c into stack, then we move to c.l
stack.append(current)
current = current.left
else:
# If c is None, we are at the (locally) leftmost node
if stack:
# For a list, `if stack` means if it is non-empty (ie len(stack)>0).
# pop() returns the last value of a list. So if c is None, it means we
# had gone left one step too deep, so we retreat by one step by popping
current = stack.pop()
if current.right is None:
# We know that stack.pop() is c.parent, since c is its left child.
# So if c.parent.right is None, and c is None (remember that we are
# in the upmost `else`), we only need to yield c.parent, which is
# the now `current`. Then we set c to None again, so we go back to
# the start of this upmost `else` clause
yield (current.key, current.data)
current = None
continue
else:
if stack:
if current.right == stack[-1]:
# If c.r is the last in stack, the stack used to be [..,
# c.r, c]. Since c < c.r, | |
import numpy as NP
import copy
from astropy.io import fits
from astropy.io import ascii
from astropy import coordinates as coord
from astropy.coordinates import Galactic, FK5
from astropy import units
import astropy.cosmology as CP
import scipy.constants as FCNST
from scipy import interpolate
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.cm as CM
from matplotlib.ticker import FuncFormatter
import healpy as HP
from mwapy.pb import primary_beam as MWAPB
import geometry as GEOM
import interferometry as RI
import catalog as SM
import constants as CNST
import my_DSP_modules as DSP
import my_operations as OPS
import primary_beams as PB
import baseline_delay_horizon as DLY
import lookup_operations as LKP
import ipdb as PDB
# 01) Plot pointings information
# 02) Plot power patterns for snapshots
# 03) Plot foreground models with power pattern contours for snapshots
# 04) Plot delay maps on sky for baselines of different orientations
# 05) Plot FHD data and simulations on all baselines combined
# 06) Plot FHD data to simulation ratio on all baselines combined
# 07) Plot uncertainties in FHD data to simulation ratio on all baselines combined
# 08) Plot ratio of differences between FHD data and simulation to expected error on all baselines combined
# 09) Plot histogram of fractional differences between FHD data and simulation
# 10) Plot noiseless delay spectra from simulations for diffuse, compact and all-sky models
# 11) Plot noiseless delay spectra for all sky models broken down by baseline orientation
# 12) Plot delay spectra on northward and eastward baselines along with delay maps and sky models (with and without power pattern contours)
# 13) Plot EoR window foreground contamination when baselines are selectively removed
# 14) Plot delay spectra before and after baselines are selectively removed
# 15) Plot Fourier space
# 16) Plot average thermal noise in simulations and data as a function of baseline length
# 17) Plot delay spectra of the MWA tile power pattern using a uniform sky model
# 18) Plot delay spectra of the all-sky model with dipole, MWA tile, and HERA dish antenna shapes
# 19) Plot delay spectrum of uniform sky model with a uniform power pattern
plot_01 = False
plot_02 = False
plot_03 = False
plot_04 = False
plot_05 = True
plot_06 = False
plot_07 = False
plot_08 = False
plot_09 = False
plot_10 = False
plot_11 = False
plot_12 = False
plot_13 = False
plot_14 = False
plot_15 = False
plot_16 = False
plot_17 = False
plot_18 = False
plot_19 = False
# PLT.ioff()
PLT.ion()
project_MWA = True
project_HERA = False
project_beams = False
project_drift_scan = False
project_global_EoR = False
if project_MWA: project_dir = 'project_MWA'
if project_HERA: project_dir = 'project_HERA'
if project_beams: project_dir = 'project_beams'
if project_drift_scan: project_dir = 'project_drift_scan'
if project_global_EoR: project_dir = 'project_global_EoR'
telescope_id = 'custom'
element_size = 0.74
element_shape = 'delta'
phased_array = True
if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):
element_size = 0.74
element_shape = 'dipole'
elif telescope_id == 'vla':
element_size = 25.0
element_shape = 'dish'
elif telescope_id == 'gmrt':
element_size = 45.0
element_shape = 'dish'
elif telescope_id == 'hera':
element_size = 14.0
element_shape = 'dish'
elif telescope_id == 'custom':
if (element_shape is None) or (element_size is None):
raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')
elif element_size <= 0.0:
raise ValueError('Antenna element size must be positive.')
else:
raise ValueError('telescope ID must be specified.')
if telescope_id == 'custom':
if element_shape == 'delta':
telescope_id = 'delta'
else:
telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)
if phased_array:
telescope_id = telescope_id + '_array'
telescope_str = telescope_id+'_'
ground_plane = 0.3 # height of antenna element above ground plane
if ground_plane is None:
ground_plane_str = 'no_ground_'
else:
if ground_plane > 0.0:
ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)
else:
raise ValueError('Height of antenna element above ground plane must be positive.')
delayerr = 0.05 # delay error rms in ns
if delayerr is None:
delayerr_str = ''
delayerr = 0.0
elif delayerr < 0.0:
raise ValueError('delayerr must be non-negative.')
else:
delayerr_str = 'derr_{0:.3f}ns'.format(delayerr)
delayerr *= 1e-9
gainerr = None # Gain error rms in dB
if gainerr is None:
gainerr_str = ''
gainerr = 0.0
elif gainerr < 0.0:
raise ValueError('gainerr must be non-negative.')
else:
gainerr_str = '_gerr_{0:.2f}dB'.format(gainerr)
nrand = 1 # Number of random realizations
if nrand is None:
nrandom_str = ''
nrand = 1
elif nrand < 1:
raise ValueError('nrandom must be positive')
else:
nrandom_str = '_nrand_{0:0d}_'.format(nrand)
if (delayerr_str == '') and (gainerr_str == ''):
nrand = 1
nrandom_str = ''
delaygain_err_str = delayerr_str + gainerr_str + nrandom_str
if project_MWA:
delaygain_err_str = ''
latitude = -26.701
antenna_file = '/data3/t_nithyanandan/project_MWA/MWA_128T_antenna_locations_MNRAS_2012_Beardsley_et_al.txt'
max_bl_length = 200.0 # Maximum baseline length (in m)
ant_locs = NP.loadtxt(antenna_file, skiprows=6, comments='#', usecols=(0,1,2,3))
ref_bl, ref_bl_id = RI.baseline_generator(ant_locs[:,1:], ant_id=ant_locs[:,0].astype(int).astype(str), auto=False, conjugate=False)
ref_bl_length = NP.sqrt(NP.sum(ref_bl**2, axis=1))
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
neg_ref_bl_orientation_ind = ref_bl_orientation < 0.0
ref_bl[neg_ref_bl_orientation_ind,:] = -1.0 * ref_bl[neg_ref_bl_orientation_ind,:]
ref_bl_orientation = NP.angle(ref_bl[:,0] + 1j * ref_bl[:,1], deg=True)
sortind = NP.argsort(ref_bl_length, kind='mergesort')
ref_bl = ref_bl[sortind,:]
ref_bl_length = ref_bl_length[sortind]
ref_bl_orientation = ref_bl_orientation[sortind]
ref_bl_id = ref_bl_id[sortind]
n_bins_baseline_orientation = 4
nmax_baselines = 2048
ref_bl = ref_bl[:nmax_baselines,:]
ref_bl_length = ref_bl_length[:nmax_baselines]
ref_bl_id = ref_bl_id[:nmax_baselines]
ref_bl_orientation = ref_bl_orientation[:nmax_baselines]
total_baselines = ref_bl_length.size
Tsys = 95.0 # System temperature in K
freq = 185.0e6 # center frequency in Hz
wavelength = FCNST.c / freq # in meters
redshift = CNST.rest_freq_HI / freq - 1
oversampling_factor = 2.0
n_sky_sectors = 1
sky_sector = None # if None, use all sky sector. Accepted values are None, 0, 1, 2, or 3
if sky_sector is None:
sky_sector_str = '_all_sky_'
n_sky_sectors = 1
sky_sector = 0
else:
sky_sector_str = '_sky_sector_{0:0d}_'.format(sky_sector)
n_bl_chunks = 32
baseline_chunk_size = 64
baseline_bin_indices = range(0,total_baselines,baseline_chunk_size)
bl_chunk = range(len(baseline_bin_indices))
bl_chunk = bl_chunk[:n_bl_chunks]
truncated_ref_bl = NP.copy(ref_bl)
truncated_ref_bl_id = NP.copy(ref_bl_id)
truncated_ref_bl_length = NP.sqrt(NP.sum(truncated_ref_bl[:,:2]**2, axis=1))
# truncated_ref_bl_length = NP.copy(ref_bl_length)
truncated_ref_bl_orientation = NP.copy(ref_bl_orientation)
truncated_total_baselines = truncated_ref_bl_length.size
if max_bl_length is not None:
truncated_ref_bl_ind = ref_bl_length <= max_bl_length
truncated_ref_bl = truncated_ref_bl[truncated_ref_bl_ind,:]
truncated_ref_bl_id = truncated_ref_bl_id[truncated_ref_bl_ind]
truncated_ref_bl_orientation = truncated_ref_bl_orientation[truncated_ref_bl_ind]
truncated_ref_bl_length = truncated_ref_bl_length[truncated_ref_bl_ind]
truncated_total_baselines = truncated_ref_bl_length.size
bl_orientation_str = ['South-East', 'East', 'North-East', 'North']
spindex_rms = 0.0
spindex_seed = None
spindex_seed_str = ''
if spindex_rms > 0.0:
spindex_rms_str = '{0:.1f}'.format(spindex_rms)
else:
spindex_rms = 0.0
if spindex_seed is not None:
spindex_seed_str = '{0:0d}_'.format(spindex_seed)
use_alt_spindex = False
alt_spindex_rms = 0.3
alt_spindex_seed = 95
alt_spindex_seed_str = ''
if alt_spindex_rms > 0.0:
alt_spindex_rms_str = '{0:.1f}'.format(alt_spindex_rms)
else:
alt_spindex_rms = 0.0
if alt_spindex_seed is not None:
alt_spindex_seed_str = '{0:0d}_'.format(alt_spindex_seed)
nside = 64
use_GSM = True
use_DSM = False
use_CSM = False
use_NVSS = False
use_SUMSS = False
use_MSS = False
use_GLEAM = False
use_PS = False
obs_mode = 'custom'
avg_drifts = False
beam_switch = False
snapshot_type_str = ''
if avg_drifts:
snapshot_type_str = 'drift_averaged_'
if beam_switch:
snapshot_type_str = 'beam_switches_'
freq_resolution = 80e3 # in kHz
nchan = 384
bpass_shape = 'bhw'
max_abs_delay = 1.5 # in micro seconds
coarse_channel_resolution = 1.28e6 # in Hz
bw = nchan * freq_resolution
dsm_base_freq = 408e6 # Haslam map frequency
csm_base_freq = 1.420e9 # NVSS frequency
dsm_dalpha = 0.7/2 # Spread in spectral index in Haslam map
csm_dalpha = 0.7/2 # Spread in spectral index in NVSS
csm_jacobian_spindex = NP.abs(csm_dalpha * NP.log(freq/csm_base_freq))
dsm_jacobian_spindex = NP.abs(dsm_dalpha * NP.log(freq/dsm_base_freq))
if use_GSM:
fg_str = 'asm'
elif use_DSM:
fg_str = 'dsm'
elif use_CSM:
fg_str = 'csm'
elif use_SUMSS:
fg_str = 'sumss'
elif use_GLEAM:
fg_str = 'gleam'
elif use_PS:
fg_str = 'point'
elif use_NVSS:
fg_str = 'nvss'
else:
fg_str = 'other'
roifile = '/data3/t_nithyanandan/'+project_dir+'/roi_info_'+telescope_str+ground_plane_str+snapshot_type_str+obs_mode+'_gaussian_FG_model_'+fg_str+sky_sector_str+'nside_{0:0d}_'.format(nside)+delaygain_err_str+'Tsys_{0:.1f}K_{1:.1f}_MHz_{2:.1f}_MHz'.format(Tsys, freq/1e6, nchan*freq_resolution/1e6)+'.fits'
roi = RI.ROI_parameters(init_file=roifile)
telescope = roi.telescope
if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):
A_eff = (0.5*wavelength)**2
if (telescope_id == 'mwa') or phased_array:
A_eff *= 16
if telescope['shape'] == 'dish':
A_eff = NP.pi * (0.5*element_size)**2
pc = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)
pc_coords = 'dircos'
h = 0.7 # Hubble constant coefficient
cosmodel100 = CP.FlatLambdaCDM(H0=100.0, Om0=0.27) # Using H0 = 100 km/s/Mpc
cosmodel = CP.FlatLambdaCDM(H0=h*100.0, Om0=0.27) # Using H0 = h * 100 km/s/Mpc
dr_z = (FCNST.c/1e3) * bw * (1+redshift)**2 / CNST.rest_freq_HI / cosmodel100.H0.value / cosmodel100.efunc(redshift) # in Mpc/h
r_z = cosmodel100.comoving_transverse_distance(redshift).value # in Mpc/h
volfactor1 = A_eff / wavelength**2 / bw
volfactor2 = r_z**2 * dr_z / bw
Jy2K = wavelength**2 * CNST.Jy / (2*FCNST.k)
mJy2mK = NP.copy(Jy2K)
Jy2mK = 1e3 * Jy2K
mK2Jy = 1/Jy2mK
mK2mJy = 1/mJy2mK
K2Jy = 1/Jy2K
dspec_min = None
dspec_max = None
def kprll(eta, z):
return 2 * NP.pi * eta * cosmodel100.H0.value * CNST.rest_freq_HI * cosmodel100.efunc(z) / FCNST.c / (1+z)**2 * 1e3
def kperp(u, z):
return 2 * NP.pi * u / cosmodel100.comoving_transverse_distance(z).value
##########################################
if plot_01:
# 01) Plot pointings information
pointing_file = '/data3/t_nithyanandan/project_MWA/Aug23_obsinfo.txt'
pointing_info_from_file = NP.loadtxt(pointing_file, comments='#', usecols=(1,2,3), delimiter=',')
obs_id = NP.loadtxt(pointing_file, comments='#', usecols=(0,), delimiter=',', dtype=str)
if (telescope_id == 'mwa') or (phased_array):
delays_str = NP.loadtxt(pointing_file, comments='#', usecols=(4,), delimiter=',', dtype=str)
delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]
delay_settings = NP.asarray(delays_list)
delay_settings *= 435e-12
delays = NP.copy(delay_settings)
n_snaps = pointing_info_from_file.shape[0]
pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]
obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]
if (telescope_id == 'mwa') or (phased_array):
delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]
n_snaps = min(n_snaps, pointing_info_from_file.shape[0])
pointings_altaz = pointing_info_from_file[:,:2].reshape(-1,2)
pointings_altaz_orig = pointing_info_from_file[:,:2].reshape(-1,2)
lst = 15.0 * pointing_info_from_file[:,2]
lst_wrapped = lst + 0.0
lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0
lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))
lst = 0.5*(lst_edges[1:]+lst_edges[:-1])
t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3
pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')
pointings_hadec | |
#
# Copyright (c) 2017 Orange.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Datasource for configuration options"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import os
import six
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
import oslo_messaging as messaging
from congress.cfg_validator import parsing
from congress.cfg_validator import utils
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.dse2 import dse_node as dse
LOG = logging.getLogger(__name__)
FILE = u'file'
VALUE = u'binding'
OPTION = u'option'
OPTION_INFO = u'option_info'
INT_TYPE = u'int_type'
FLOAT_TYPE = u'float_type'
STR_TYPE = u'string_type'
LIST_TYPE = u'list_type'
RANGE_TYPE = u'range_type'
URI_TYPE = u'uri_type'
IPADDR_TYPE = u'ipaddr_type'
SERVICE = u'service'
HOST = u'host'
MODULE = u'module'
TEMPLATE = u'template'
TEMPLATE_NS = u'template_ns'
NAMESPACE = u'namespace'
class ValidatorDriver(datasource_driver.PollingDataSourceDriver):
"""Driver for the Configuration validation datasource"""
# pylint: disable=too-many-instance-attributes
DS_NAME = u'config'
def __init__(self, name=None, args=None):
super(ValidatorDriver, self).__init__(self.DS_NAME, args)
# { template_hash -> {name, namespaces} }
self.known_templates = {}
# { namespace_hash -> namespace_name }
self.known_namespaces = {}
# set(config_hash)
self.known_configs = set()
# { template_hash -> (conf_hash, conf)[] }
self.templates_awaited_by_config = {}
self.agent_api = ValidatorAgentClient()
self.rule_added = False
if hasattr(self, 'add_rpc_endpoint'):
self.add_rpc_endpoint(ValidatorDriverEndpoints(self))
self._init_end_start_poll()
# pylint: disable=no-self-use
def get_context(self):
"""context for RPC. To define"""
return {}
@staticmethod
def get_datasource_info():
"""Gives back a standardized description of the datasource"""
result = {}
result['id'] = 'config'
result['description'] = (
'Datasource driver that allows OS configs retrieval.')
result['config'] = {
'poll_time': constants.OPTIONAL,
'lazy_tables': constants.OPTIONAL}
return result
@classmethod
def get_schema(cls):
sch = {
# option value
VALUE: [
{'name': 'option_id', 'desc': 'The represented option'},
{'name': 'file_id',
'desc': 'The file containing the assignement'},
{'name': 'val', 'desc': 'Actual value'}],
OPTION: [
{'name': 'id', 'desc': 'Id'},
{'name': 'namespace', 'desc': ''},
{'name': 'group', 'desc': ''},
{'name': 'name', 'desc': ''}, ],
# options metadata, omitted : dest
OPTION_INFO: [
{'name': 'option_id', 'desc': 'Option id'},
{'name': 'type', 'desc': ''},
{'name': 'default', 'desc': ''},
{'name': 'deprecated', 'desc': ''},
{'name': 'deprecated_reason', 'desc': ''},
{'name': 'mutable', 'desc': ''},
{'name': 'positional', 'desc': ''},
{'name': 'required', 'desc': ''},
{'name': 'sample_default', 'desc': ''},
{'name': 'secret', 'desc': ''},
{'name': 'help', 'desc': ''}],
HOST: [
{'name': 'id', 'desc': 'Id'},
{'name': 'name', 'desc': 'Arbitraty host name'}],
FILE: [
{'name': 'id', 'desc': 'Id'},
{'name': 'host_id', 'desc': 'File\'s host'},
{'name': 'template',
'desc': 'Template specifying the content of the file'},
{'name': 'name', 'desc': ''}],
MODULE: [
{'name': 'id', 'desc': 'Id'},
{'name': 'base_dir', 'desc': ''},
{'name': 'module', 'desc': ''}],
SERVICE: [
{'name': 'service', 'desc': ''},
{'name': 'host', 'desc': ''},
{'name': 'version', 'desc': ''}],
TEMPLATE: [
{'name': 'id', 'desc': ''},
{'name': 'name', 'desc': ''}, ],
TEMPLATE_NS: [
{'name': 'template', 'desc': 'hash'},
{'name': 'namespace', 'desc': 'hash'}],
NAMESPACE: [
{'name': 'id', 'desc': ''},
{'name': 'name', 'desc': ''}],
INT_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'min', 'desc': ''},
{'name': 'max', 'desc': ''},
{'name': 'choices', 'desc': ''}, ],
FLOAT_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'min', 'desc': ''},
{'name': 'max', 'desc': ''}, ],
STR_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'regex', 'desc': ''},
{'name': 'max_length', 'desc': ''},
{'name': 'quotes', 'desc': ''},
{'name': 'ignore_case', 'desc': ''},
{'name': 'choices', 'desc': ''}, ],
LIST_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'item_type', 'desc': ''},
{'name': 'bounds', 'desc': ''}, ],
IPADDR_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'version', 'desc': ''}, ],
URI_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'max_length', 'desc': ''},
{'name': 'schemes', 'desc': ''}, ],
RANGE_TYPE: [
{'name': 'option_id', 'desc': ''},
{'name': 'min', 'desc': ''},
{'name': 'max', 'desc': ''}, ],
}
return sch
def poll(self):
LOG.info("%s:: polling", self.name)
# Initialize published state to a sensible empty state.
# Avoids races with queries.
if self.number_of_updates == 0:
for tablename in set(self.get_schema()):
self.state[tablename] = set()
self.publish(tablename, self.state[tablename],
use_snapshot=False)
self.agent_api.publish_templates_hashes(self.get_context())
self.agent_api.publish_configs_hashes(self.get_context())
self.last_updated_time = datetime.datetime.now()
self.number_of_updates += 1
def process_config_hashes(self, hashes, host):
"""Handles a list of config files hashes and their retrieval.
If the driver can process the parsing and translation of the config,
it registers the configs to the driver.
:param hashes: A list of config files hashes
:param host: Name of the node hosting theses config files
"""
LOG.debug('Received configs list from %s' % host)
for cfg_hash in set(hashes) - self.known_configs:
config = self.agent_api.get_config(self.get_context(),
cfg_hash, host)
if self.process_config(cfg_hash, config, host):
self.known_configs.add(cfg_hash)
LOG.debug('Config %s from %s registered' % (cfg_hash, host))
@lockutils.synchronized('validator_process_template_hashes')
def process_template_hashes(self, hashes, host):
"""Handles a list of template hashes and their retrieval.
Uses lock to avoid multiple sending of the same data.
:param hashes: A list of templates hashes
:param host: Name of the node hosting theses config files
"""
LOG.debug('Process template hashes from %s' % host)
for t_h in set(hashes) - set(self.known_templates):
LOG.debug('Treating template hash %s' % t_h)
template = self.agent_api.get_template(self.get_context(), t_h,
host)
ns_hashes = template['namespaces']
for ns_hash in set(ns_hashes) - set(self.known_namespaces):
namespace = self.agent_api.get_namespace(
self.get_context(), ns_hash, host)
self.known_namespaces[ns_hash] = namespace
self.known_templates[t_h] = template
for (c_h, config) in self.templates_awaited_by_config.pop(t_h, []):
if self.process_config(c_h, config, host):
self.known_configs.add(c_h)
LOG.debug('Config %s from %s registered (late)' %
(c_h, host))
return True
def translate_service(self, host_id, service, version):
"""Translates a service infos to SERVICE table.
:param host_id: Host ID, should reference HOST.ID
:param service: A service name
:param version: A version name, can be None
"""
if not host_id or not service:
return
service_row = tuple(
map(utils.cfg_value_to_congress, (service, host_id, version)))
self.state[SERVICE].add(service_row)
def translate_host(self, host_id, host_name):
"""Translates a host infos to HOST table.
:param host_id: Host ID
:param host_name: A host name
"""
if not host_id:
return
host_row = tuple(
map(utils.cfg_value_to_congress, (host_id, host_name)))
self.state[HOST].add(host_row)
def translate_file(self, file_id, host_id, template_id, file_name):
"""Translates a file infos to FILE table.
:param file_id: File ID
:param host_id: Host ID, should reference HOST.ID
:param template_id: Template ID, should reference TEMPLATE.ID
"""
if not file_id or not host_id:
return
file_row = tuple(
map(utils.cfg_value_to_congress,
(file_id, host_id, template_id, file_name)))
self.state[FILE].add(file_row)
def translate_template_namespace(self, template_id, name, ns_ids):
"""Translates a template infos and its namespaces infos.
Modifies tables : TEMPLATE, NAMESPACE and TEMPLATE_NS
:param template_id: Template ID
:param name: A template name
:param ns_ids: List of namespace IDs, defining this template, should
reference NAMESPACE.ID
"""
if not template_id:
return
template_row = tuple(
map(utils.cfg_value_to_congress, (template_id, name)))
self.state[TEMPLATE].add(template_row)
for ns_h, ns_name in six.iteritems(ns_ids):
if not ns_h:
continue
namespace_row = tuple(map(utils.cfg_value_to_congress,
(ns_h, ns_name)))
self.state[NAMESPACE].add(namespace_row)
tpl_ns_row = tuple(
map(utils.cfg_value_to_congress, (template_id, ns_h)))
self.state[TEMPLATE_NS].add(tpl_ns_row)
# pylint: disable=protected-access,too-many-branches
def translate_type(self, opt_id, cfg_type):
"""Translates a type to the appropriate type table.
:param opt_id: Option ID, should reference OPTION.ID
:param cfg_type: An oslo ConfigType for the referenced option
"""
if not opt_id:
return
if isinstance(cfg_type, types.String):
tablename = STR_TYPE
# oslo.config 5.2 begins to use a different representation of
# choices (OrderedDict). We first convert back to simple list to
# have consistent output regardless of oslo.config version
if isinstance(cfg_type.choices, OrderedDict):
choices = list(map(lambda item: item[0],
cfg_type.choices.items()))
else:
choices = cfg_type.choices
row = (cfg_type.regex, cfg_type.max_length, cfg_type.quotes,
cfg_type.ignore_case, choices)
elif isinstance(cfg_type, types.Integer):
tablename = INT_TYPE
# oslo.config 5.2 begins to use a different representation of
# choices (OrderedDict). We first convert back to simple list to
# have consistent output regardless of oslo.config version
if isinstance(cfg_type.choices, OrderedDict):
choices = list(map(lambda item: item[0],
cfg_type.choices.items()))
else:
choices = cfg_type.choices
row = (cfg_type.min, cfg_type.max, choices)
elif isinstance(cfg_type, types.Float):
tablename = FLOAT_TYPE
row = (cfg_type.min, cfg_type.max)
elif isinstance(cfg_type, types.List):
tablename = LIST_TYPE
row = (type(cfg_type.item_type).__name__, cfg_type.bounds)
elif isinstance(cfg_type, types.IPAddress):
tablename = IPADDR_TYPE
if cfg_type.version_checker == cfg_type._check_ipv4:
version = 4
elif cfg_type.version_checker == cfg_type._check_ipv6:
version = 6
else:
version = None
row = (version,)
elif isinstance(cfg_type, types.URI):
tablename = URI_TYPE
row = (cfg_type.max_length, cfg_type.schemes)
elif isinstance(cfg_type, types.Range):
tablename = RANGE_TYPE
row = (cfg_type.min, cfg_type.max)
else:
return
row = (opt_id,) + row
if isinstance(cfg_type, types.List):
self.translate_type(opt_id, cfg_type.item_type)
self.state[tablename].add(
tuple(map(utils.cfg_value_to_congress, row)))
def translate_value(self, file_id, option_id, value):
"""Translates a value to the VALUE table.
If value is a | |
<reponame>pquentin/trio-asyncio
# This code implements a clone of the asyncio mainloop which hooks into
# Trio.
import sys
import trio
import asyncio
import warnings
import threading
from contextvars import ContextVar
from async_generator import async_generator, yield_, asynccontextmanager
from ._util import run_aio_future, run_aio_generator
from ._async import TrioEventLoop
from ._deprecate import deprecated, warn_deprecated
try:
from trio.hazmat import wait_for_child
except ImportError:
from ._child import wait_for_child
# A substantial portion of the trio-asyncio test suite involves running the
# stock asyncio test suite with trio-asyncio imported. This is intended to
# test two things:
# - that trio-asyncio is a "good citizen": won't screw up other users of
# asyncio if Trio isn't running
# - that trio-asyncio provides an event loop that conforms to asyncio semantics,
# even if Trio is running
#
# It's hard to test both of these at once: in order to get good test
# coverage, we want normal asyncio calls to instantiate our loop, but
# the asyncio tests are full of tricky event loop manipulations, some
# of which expect to provide their own mock loop. We've compromised on
# the following.
#
# - The "actual" event loop policy (the one that would be returned by
# an unpatched asyncio.get_event_loop_policy()) is set at import
# time to a singleton instance of TrioPolicy, and never changed
# later. This is required for correct operation in CPython 3.7+,
# because the C _asyncio module caches the original
# asyncio.get_event_loop_policy() and calls it from its accelerated
# C get_event_loop() function. We want asyncio.get_event_loop() to be
# able to return a trio-asyncio event loop.
#
# - To cope with tests that set a custom policy, we monkeypatch
# asyncio.get_event_loop_policy() and set_event_loop_policy()
# so that they model an event loop policy that is thread-local when
# called outside of Trio context. Said policy is stored in at
# _faked_policy.policy. (Inside Trio context, we let get_event_loop_policy()
# return the singleton global TrioPolicy, and set_event_loop_policy()
# raises an exception.)
#
# - If you've previously called set_event_loop_policy() with a
# non-None argument in the current thread, then
# get_event_loop_policy() will return the thing that you passed to
# set_event_loop_policy().
#
# - If you haven't called set_event_loop_policy() in this thread
# yet, or the most recent call in this context had a None
# argument, then get_event_loop_policy() will return the asyncio
# event loop policy that was installed when trio_asyncio was
# imported.
#
# - Even though the user can set a per-thread policy and we'll echo it back,
# the "actual" global policy is still the TrioPolicy and we don't expose
# any way to change it. asyncio.get_event_loop() will use this TrioPolicy
# on 3.7+ no matter what we do, so we monkeypatch new_event_loop() and
# set_event_loop() to go through the TrioPolicy too (for consistency's sake)
# and let TrioPolicy forward to the appropriate actual policy specified by
# the user.
#
# - Inside a Trio context, TrioPolicy refuses to create a new event loop
# (you should use 'async with trio_asyncio.open_loop():' instead).
# Its get/set event loop methods access a contextvar (current_loop),
# which is normally set to the nearest enclosing open_loop() loop,
# but can be modified if you want to put some Trio tasks in a
# trio-asyncio event loop that doesn't correspond to their place in the
# Trio task tree.
#
# - Outside a Trio context when an event loop policy has been set,
# TrioPolicy delegates all three methods (new/get/set event loop)
# to that policy. Thus, if you install a custom policy, it will get
# used (trio-asyncio gets out of the way).
#
# - Outside a Trio context when no event loop policy has been set,
# the get/set event loop methods manage a thread-local event loop
# just like they do in default asyncio. However, new_event_loop() will
# create a synchronous trio-asyncio event loop (the kind that can
# be repeatedly started and stopped, which is helpful for many asyncio
# tests). Thus, if you don't install a custom policy, tests that use
# asyncio will exercise trio-asyncio.
class _FakedPolicy(threading.local):
policy = None
_faked_policy = _FakedPolicy()
def _in_trio_context():
try:
trio.hazmat.current_task()
except RuntimeError:
return False
else:
return True
class _TrioPolicy(asyncio.events.BaseDefaultEventLoopPolicy):
@staticmethod
def _loop_factory():
raise RuntimeError("Event loop creations shouldn't get here")
def new_event_loop(self):
if _in_trio_context():
raise RuntimeError(
"You're within a Trio environment.\n"
"Use 'async with open_loop()' instead."
)
if _faked_policy.policy is not None:
return _faked_policy.policy.new_event_loop()
if 'pytest' not in sys.modules:
warn_deprecated(
"Using trio-asyncio outside of a Trio event loop",
"0.10.0",
issue=None,
instead=None,
)
from ._sync import SyncTrioEventLoop
return SyncTrioEventLoop()
def get_event_loop(self):
"""Get the current event loop.
Note that this will auto-generate an event loop if none exists, for
compatibility with asyncio.
To get a Trio-compatible asyncio loop, use
``async with trio_asyncio.open_loop() as loop:``.
To test whether an event loop is running, check the loop policy's
``.current_event_loop`` property.
"""
try:
task = trio.hazmat.current_task()
except RuntimeError:
pass
else:
# Trio context. Note: NOT current_loop.get()! If this is called from
# asyncio code, current_task() is the trio-asyncio loop runner task,
# which has the correct loop set in its contextvar; but (on Python
# 3.7+) our current context is quite possibly something different, and
# might have the wrong contextvar value (e.g. in the case of a
# loop1.call_later() in loop2's context).
return task.context.get(current_loop)
# Not Trio context
if _faked_policy.policy is not None:
return _faked_policy.policy.get_event_loop()
# This will return the thread-specific event loop set using
# set_event_loop(), or if none has been set, will call back into
# our new_event_loop() to make a SyncTrioEventLoop and set it as
# this thread's event loop.
return super().get_event_loop()
@property
def current_event_loop(self):
"""The currently-running event loop, if one exists."""
loop = current_loop.get()
if loop is None:
loop = super().get_event_loop()
return loop
def set_event_loop(self, loop):
"""Set the current event loop."""
if _in_trio_context():
current_loop.set(loop)
elif _faked_policy.policy is not None:
_faked_policy.policy.set_event_loop(loop)
else:
super().set_event_loop(loop)
from asyncio import events as _aio_event
#####
def _new_policy_get():
if _in_trio_context():
return _trio_policy
elif _faked_policy.policy is not None:
return _faked_policy.policy
else:
return _original_policy
def _new_policy_set(new_policy):
if isinstance(new_policy, TrioPolicy):
raise RuntimeError("You can't set the Trio loop policy manually")
if _in_trio_context():
raise RuntimeError("You can't change the event loop policy in Trio context")
else:
assert new_policy is None or isinstance(new_policy, asyncio.AbstractEventLoopPolicy)
_faked_policy.policy = new_policy
_orig_policy_get = _aio_event.get_event_loop_policy
_orig_policy_set = _aio_event.set_event_loop_policy
_aio_event.get_event_loop_policy = _new_policy_get
_aio_event.set_event_loop_policy = _new_policy_set
asyncio.get_event_loop_policy = _new_policy_get
asyncio.set_event_loop_policy = _new_policy_set
#####
try:
_orig_run_get = _aio_event._get_running_loop
except AttributeError:
pass
else:
def _new_run_get():
try:
task = trio.hazmat.current_task()
except RuntimeError:
pass
else:
# Trio context. Note: NOT current_loop.get()!
# See comment in _TrioPolicy.get_event_loop().
return task.context.get(current_loop)
# Not Trio context
return _orig_run_get()
# Must override the non-underscore-prefixed get_running_loop() too,
# else will use the C-accelerated one which doesn't call the patched
# _get_running_loop()
def _new_run_get_or_throw():
result = _new_run_get()
if result is None:
raise RuntimeError("no running event loop")
return result
_aio_event._get_running_loop = _new_run_get
_aio_event.get_running_loop = _new_run_get_or_throw
#####
def _new_loop_get():
current_loop = _new_run_get()
if current_loop is not None:
return current_loop
return _trio_policy.get_event_loop()
def _new_loop_set(new_loop):
_trio_policy.set_event_loop(new_loop)
def _new_loop_new():
return _trio_policy.new_event_loop()
_orig_loop_get = _aio_event.get_event_loop
_orig_loop_set = _aio_event.set_event_loop
_orig_loop_new = _aio_event.new_event_loop
_aio_event.get_event_loop = _new_loop_get
_aio_event.set_event_loop = _new_loop_set
_aio_event.new_event_loop = _new_loop_new
asyncio.get_event_loop = _new_loop_get
asyncio.set_event_loop = _new_loop_set
asyncio.new_event_loop = _new_loop_new
#####
class TrioPolicy(_TrioPolicy, asyncio.DefaultEventLoopPolicy):
"""This is the loop policy that's active whenever we're in a Trio context."""
def _init_watcher(self):
with asyncio.events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = TrioChildWatcher()
if isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(current_loop.get())
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(current_loop.get())
def set_child_watcher(self, watcher):
if watcher is not None:
if not isinstance(watcher, TrioChildWatcher):
# raise RuntimeError("You must use a TrioChildWatcher here. "
# "Sorry.")
# warnings.warn("You must use a TrioChildWatcher.")
#
loop = watcher._loop # ugh.
watcher.close()
watcher = TrioChildWatcher()
watcher.attach_loop(loop)
super().set_child_watcher(watcher)
_original_policy = _orig_policy_get()
_trio_policy = TrioPolicy()
_orig_policy_set(_trio_policy)
# Backwards compatibility -- unused
current_policy = ContextVar('trio_aio_policy', default=_trio_policy)
current_loop = ContextVar('trio_aio_loop', default=None)
class TrioChildWatcher(asyncio.AbstractChildWatcher if sys.platform != 'win32' else object):
"""Watches for child processes to exit using Trio APIs.
All TrioChildWatchers behave identically, so there's no reason to construct
your own. This is more or less an implementation detail, exposed publicly
because you can get your hands on it anyway (using
``asyncio.get_event_loop_policy
"""
# AbstractChildWatcher not available under Windows
def __init__(self):
super().__init__()
self._callbacks = {} # pid => handler
def attach_loop(self, loop):
self._loop = loop
async def _waitpid(self, pid, callback, *args):
returncode = await wait_for_child(pid)
callback(pid, returncode, *args)
def add_child_handler(self, pid, callback, *args):
"""Add a callback to run when a child process terminates."""
h = self._loop.run_trio(self._waitpid, pid, callback, *args)
self._callbacks[pid] = h
def remove_child_handler(self, pid):
"""Remove the | |
provides the same result.
# TODO: Write expansion of small multi-axis rotation tests
- Angles between axes: Since the entire system is to be rotated as one, rigid body, the orthogonality of the
three axes must be kept true. Tiny anomalies are acceptable due to float point accuracy.
- Linked rotation error: The error arising from many linked rotations should be measured in order to know how
often to adjust for this by for instance checking for axis orthogonality
"""
# Rotation specific methods
def _check_for_ndarray_element_similarity(m1: np.ndarray, m2: np.ndarray, threshold: float = 0.00001):
"""
Helper method for rotate_system_test()
"""
if np.shape(m1) != np.shape(m2):
return False
for i in range(np.shape(m1)[0]):
for j in range(np.shape(m2)[1]):
if abs(m1[i, j] - m2[i, j]) > threshold:
return False
return True
def _check_system_coordinate_movement_directions(m1: np.ndarray, m2: np.ndarray, diffs: list,
threshold: float = 0.00001):
"""
Helper method for rotate_system_test().
:param m1: The system before a minor rotation is applied.
:param m2: The resulting system after the rotation.
:param diffs: A list of assumptions regarding the relationship between pairs of coordinates. If a diffs-element
equals -1, check that the resulting coordinate is of a smaller value than the original one. If the
diffs-element equals 1, check the opposite. If 0 (which I doubt will be tested for in reality with rotation
around more than one axis) check that the two coordinates are within the threshold of each other. NB: This list
is written row-wise from the two ndarrays.
:param threshold: Only used for cases where two coordinates are assumed equal to each other.
"""
for i in range(np.shape(m1)[0]):
for j in range(np.shape(m1)[1]):
if diffs[i * np.shape(m1)[0] + j] == -1:
if m2[i, j] > m1[i, j]:
print(f'Wrong assumption on negative movement in {i, j}')
return False
if diffs[i * np.shape(m1)[0] + j] == 1:
if m2[i, j] < m1[i, j]:
print(f'Wrong assumption on positive movement in {i, j}')
return False
if diffs[i * np.shape(m1)[0] + j] == 0:
if abs(m2[i, j] - m1[i, j]) > threshold:
return False
return True
def _angle(v1, v2):
return np.arccos((v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]
/ (np.sqrt(v1[0] ** 2 + v1[1] ** 2 + v1[2] ** 2) *
np.sqrt(v2[0] ** 2 + v2[1] ** 2 + v2[2] ** 2))))
def _check_orthogonal_axes(system: np.ndarray, threshold: float = 0.0001):
"""
Helper method for rotate_system_test().
Control whether the angles between the axes are within some threshold of 90 degrees.
"""
v1 = system[:, 0]
v2 = system[:, 1]
v3 = system[:, 2]
if abs(_angle(v1, v2) - 0.5 * np.pi) > threshold:
return False
if abs(_angle(v1, v3) - 0.5 * np.pi) > threshold:
return False
if abs(_angle(v2, v3) - 0.5 * np.pi) > threshold:
return False
return True
def _stats_from_skewed_system(sys_1: np.ndarray, sys_2: np.ndarray):
"""
Used for observing the error that stems from multiple roundtrip rotations.
"""
# Measure individual coordinate drift of each axis
max_coordinate_drift = np.max(np.abs(sys_2.flatten() - sys_1.flatten()))
mean_coordinate_drift = np.mean(np.abs(sys_2.flatten() - sys_1.flatten()))
# Measure angles between original axes and resulting axes
angles = np.zeros([3], dtype=float)
for i in 0, 1, 2:
angles[i] = _angle(sys_1[:, i], sys_2[:, i])
max_angular_drift = np.max(angles)
mean_angular_drift = np.mean(angles)
# Measure the orthogonality in the resulting system
angles[0] = _angle(sys_2[:, 0], sys_2[:, 1])
angles[1] = _angle(sys_2[:, 1], sys_2[:, 2])
angles[2] = _angle(sys_2[:, 0], sys_2[:, 2])
mean_orthogonality = np.mean(angles) - 0.5*np.pi
return max_coordinate_drift, mean_coordinate_drift, max_angular_drift, mean_angular_drift, mean_orthogonality
#######################################################################
print('_______ rotate_system_test() started _______\n')
error_message = 'FAILED\n' \
'rotate_system_test()\n' \
'Tests failed:'
rotation_tests = 0
passed_rotation_tests = 0
float_service = fs.Rotations()
# Testing no rotation
def _no_rotation_tests():
er_msg = ''
r_tests = 0
r_passed = 0
system = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# anyways
angles = np.array([0.0, 0.0, 0.0])
test_sys = float_service.rotate_system(sys_ax=system,
sensor_angles=angles)
# 0.1
r_tests += 1
if not _check_for_ndarray_element_similarity(system, test_sys):
er_msg += ' 0.1,'
else:
r_passed += 1
# 0.2
r_tests += 1
if not _check_for_ndarray_element_similarity(system, test_sys):
er_msg += ' 0.2,'
else:
r_passed += 1
return r_tests, r_passed, er_msg
t, p, m = _no_rotation_tests()
rotation_tests += t
passed_rotation_tests += p
error_message += m
###########################################################
# Elemental rotation tests
def _elemental_rotation_tests():
er_msg = ''
r_tests = 0
r_passed = 0
system = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# 90 degrees around x-axis
el_rot_1 = np.array([0.5 * np.pi, 0, 0])
el_rot_2 = np.array([-0.5 * np.pi, 0, 0])
# 90 degrees around y-axis
el_rot_3 = np.array([0, 0.5 * np.pi, 0])
el_rot_4 = np.array([0, -0.5 * np.pi, 0])
# 90 degrees around z-axis
el_rot_5 = np.array([0, 0, 0.5 * np.pi])
el_rot_6 = np.array([0, 0, -0.5 * np.pi])
el_rot_sys_1 = float_service.rotate_system(system, el_rot_1)
el_rot_sys_2 = float_service.rotate_system(system, el_rot_2)
el_rot_sys_3 = float_service.rotate_system(system, el_rot_3)
el_rot_sys_4 = float_service.rotate_system(system, el_rot_4)
el_rot_sys_5 = float_service.rotate_system(system, el_rot_5)
el_rot_sys_6 = float_service.rotate_system(system, el_rot_6)
el_rot_sys_1_true = np.array([[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
el_rot_sys_2_true = np.array([[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, -1.0, 0.0]])
el_rot_sys_3_true = np.array([[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0]])
el_rot_sys_4_true = np.array([[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0]])
el_rot_sys_5_true = np.array([[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
el_rot_sys_6_true = np.array([[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 1.0]])
# 1.1
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_1, el_rot_sys_1_true):
er_msg += ' 1.1,'
else:
r_passed += 1
# 1.2
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_2, el_rot_sys_2_true):
er_msg += ' 1.2,'
else:
r_passed += 1
# 1.3
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_3, el_rot_sys_3_true):
er_msg += ' 1.3,'
else:
r_passed += 1
# 1.4
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_4, el_rot_sys_4_true):
er_msg += ' 1.4,'
else:
r_passed += 1
# 1.5
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_5, el_rot_sys_5_true):
er_msg += ' 1.5,'
else:
r_passed += 1
# 1.6
r_tests += 1
if not _check_for_ndarray_element_similarity(el_rot_sys_6, el_rot_sys_6_true):
er_msg += ' 1.6,'
else:
r_passed += 1
return r_tests, r_passed, er_msg
t, p, m = _elemental_rotation_tests()
rotation_tests += t
passed_rotation_tests += p
error_message += m
###########################################################
# Linked rotation tests
def _linked_rotation_tests():
er_msg = ''
r_tests = 0
r_passed = 0
system = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# 90 degrees around x-axis
el_rot_1 = np.array([0.5 * np.pi, 0, 0])
el_rot_2 = np.array([-0.5 * np.pi, 0, 0])
# 90 degrees around y-axis
el_rot_3 = np.array([0, 0.5 * np.pi, 0])
el_rot_4 = np.array([0, -0.5 * np.pi, 0])
# 90 degrees around z-axis
el_rot_5 = np.array([0, 0, 0.5 * np.pi])
el_rot_6 = np.array([0, 0, -0.5 * np.pi])
# Sys 1: 90 deg x-axis, 90 deg y-axis
linked_rot_sys_1 = float_service.rotate_system(sys_ax=system, sensor_angles=el_rot_1)
linked_rot_sys_1 = float_service.rotate_system(sys_ax=linked_rot_sys_1, sensor_angles=el_rot_3)
# Sys 2: 90 deg z-axis, -90 deg y-axis
linked_rot_sys_2 = float_service.rotate_system(sys_ax=system, sensor_angles=el_rot_5)
linked_rot_sys_2 = float_service.rotate_system(sys_ax=linked_rot_sys_2, sensor_angles=el_rot_4)
# Sys 3: 180 deg y-axis, -90 deg z-axis, 90 deg x-axis
linked_rot_sys_3 = float_service.rotate_system(sys_ax=system, sensor_angles=el_rot_3)
linked_rot_sys_3 = float_service.rotate_system(sys_ax=linked_rot_sys_3, sensor_angles=el_rot_3)
linked_rot_sys_3 = float_service.rotate_system(sys_ax=linked_rot_sys_3, sensor_angles=el_rot_6)
linked_rot_sys_3 = float_service.rotate_system(sys_ax=linked_rot_sys_3, sensor_angles=el_rot_1)
linked_rot_sys_1_true = np.array([[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
linked_rot_sys_2_true = np.array([[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]])
linked_rot_sys_3_true = np.array([[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0]])
# 2.1
r_tests += 1
if not _check_for_ndarray_element_similarity(linked_rot_sys_1, linked_rot_sys_1_true):
er_msg += ' 2.1,'
else:
r_passed += 1
# 2.2
r_tests += 1
if not _check_for_ndarray_element_similarity(linked_rot_sys_2, linked_rot_sys_2_true):
er_msg += ' 2.2'
else:
r_passed += 1
# 2.3
r_tests += 1
if not _check_for_ndarray_element_similarity(linked_rot_sys_3, linked_rot_sys_3_true):
er_msg += ' 2.3,'
else:
r_passed += 1
return r_tests, r_passed, er_msg
t, p, m = _linked_rotation_tests()
rotation_tests += t
passed_rotation_tests += p
error_message += m
###########################################################
# Multi-axis rotation tests
def _multi_axis_rotation_tests():
"""
This test cannot be validated on the same terms as the other ones, by using check_for_ndarray_similarity().
This is because the other ones test for precise answers, while the only method we have to calculate the
result of this test(as of now) is the method being tested.
We can however look for signs as to whether it performs the way we want it to.
"""
er_msg = ''
r_tests = 0
r_passed | |
var ci = @{{classinfo}}.__array[index];
if (@{{isinstance}}(@{{object_}}, ci)) {
return true;
}
}
return false;
} else {
return @{{_isinstance}}(@{{object_}}, @{{classinfo}});
}
""")
def _isinstance(object_, classinfo):
JS("""
if (@{{object_}}.__is_instance__ === false && @{{classinfo}}.__is_instance__ === false) {
@{{object_}} = @{{object_}}.__class__;
} else if ( @{{object_}}.__is_instance__ !== true
|| @{{classinfo}}.__is_instance__ === null) {
return false;
}
if (@{{object_}}.__is_instance__ === false && @{{classinfo}} === @{{type}}) {
return true;
} else if (@{{classinfo}}.__is_instance__ === false) {
var hash = @{{classinfo}}.$H;
if (hash in @{{object_}}.__$super_cache__) {
return true;
}
if (@{{classinfo}} === @{{object}}) {
return true;
}
} else {
var __mro__ = @{{object_}}.__mro__.__array;
var n = __mro__.length;
while (--n >= 0) {
if (__mro__[n] === @{{classinfo}}.__class__) return true;
}
}
return false;
""")
def issubclass(class_, classinfo):
if JS(""" typeof @{{class_}} == 'undefined' || @{{class_}} === null || @{{class_}}.__is_instance__ !== false """):
raise TypeError("arg 1 must be a class")
if isinstance(classinfo, tuple):
for ci in classinfo:
if issubclass(class_, ci):
return True
return False
else:
if JS(""" typeof @{{classinfo}} == 'undefined' || @{{classinfo}}.__is_instance__ !== false """):
raise TypeError("arg 2 must be a class or tuple of classes")
return _issubtype(class_, classinfo)
def _issubtype(object_, classinfo):
JS("""
if ( @{{object_}}.__is_instance__ === null
|| @{{classinfo}}.__is_instance__ === null) {
return false;
}
var __mro__ = @{{object_}}.__mro__.__array;
var n = __mro__.length;
if (@{{classinfo}}.__is_instance__ === false) {
while (--n >= 0) {
if (@{{object_}}.__mro__.__array[n] === @{{classinfo}}) {
return true;
}
}
return false;
}
while (--n >= 0) {
if (@{{object_}}.__mro__.__array[n] === @{{classinfo}}.__class__) return true;
}
return false;
""")
def __getattr_check(attr, attr_left, attr_right, attrstr,
bound_methods, descriptors,
attribute_checking):
"""
(function(){
var $pyjs__testval;
var v, vl; /* hmm.... */
if (bound_methods || descriptors) {
pyjs__testval = (v=(vl=attr_left)[attr_right]) == null ||
((vl.__is_instance__) &&
typeof v == 'function');
if (descriptors) {
pyjs_testval = pyjs_testval ||
(typeof v['__get__'] == 'function');
}
pyjs__testval = (pyjs__testval ?
@{{getattr}}(vl, attr_right):
attr);
} else {
pyjs__testval = attr;
}
return (typeof $pyjs__testval=='undefined'?
(function(){throw TypeError(attrstr + " is undefined");})():
$pyjs__testval);
)();
"""
pass
_wrap_unbound_method = JS("""function(method) {
var fnwrap = function() {
if ($pyjs.options.arg_instance_type && arguments.length >= 1) {
$pyjs_check_instance_type(arguments[0], method, null);
}
if ($pyjs.options.arg_is_instance && arguments.length >= 1
&& (arguments[0] == null || arguments[0].__is_instance__ !== true)) {
$pyjs__exception_func_instance_expected(method.__name__, method.__class__.__name__, arguments[0]);
}
return method.apply(arguments[0], $pyjs_array_slice.call(arguments, 1));
};
fnwrap.__name__ = method.__name__;
fnwrap.__args__ = method.__args__;
fnwrap.__is_staticmethod__ = true;
fnwrap.__class__ = method.__class__;
fnwrap.__doc__ = method.__doc__ || '';
fnwrap.__is_instance__ = method.__is_instance__;
return fnwrap;
};
""")
_wrap_unchecked_unbound_method = JS("""function(method) {
var fnwrap = function() {
return method.apply(arguments[0], $pyjs_array_slice.call(arguments, 1));
};
fnwrap.__name__ = method.__name__;
fnwrap.__args__ = method.__args__;
fnwrap.__is_staticmethod__ = true;
fnwrap.__class__ = method.__class__;
fnwrap.__doc__ = method.__doc__ || '';
fnwrap.__is_instance__ = method.__is_instance__;
return fnwrap;
};
""")
def getattr(obj, name):
JS("""
if (@{{obj}} === null || typeof @{{obj}} == 'undefined') {
if (arguments.length != 3 || typeof @{{obj}} == 'undefined') {
throw $pyce(@{{AttributeError}}("'" + @{{repr}}(@{{obj}}) + "' has no attribute '" + @{{name}} + "'"));
}
return arguments[2];
}
// always remap `name` if in attrib_remap so that manually executing
// getattr() with `name` being a string will resolve correctly,
// however, note that accessing some attributes on native JS objects won't
// work i.e. `native_js_object.length` for example. This has to be accessed via
// JS()!
var re_mapped = '$$' + @{{name}} in attrib_remap ? '$$' + @{{name}} : @{{name}};
var method = @{{obj}}[re_mapped];
if (typeof method == 'undefined') {
/*
if (typeof @{{obj}} == 'function' && re_mapped == '__call__') {
return @{{obj}};
}
if (@{{obj}}.__is_instance__ === true &&
typeof @{{obj}}.__getattr__ == 'function') {
// pass the pure name to __getattr__ not the remapped one! (the user
// should not check for remapped names)
if (arguments.length != 3) {
return @{{obj}}.__getattr__(@{{name}});
}
else {
try {
return @{{obj}}.__getattr__(@{{name}});
} catch (e) {
if (@{{isinstance}}(e['$pyjs_exc'] || e, @{{AttributeError}})) {
return arguments[2];
}
throw e;
}
}
}
*/
if (arguments.length == 2) {
throw $pyce(@{{AttributeError}}("'" + @{{repr}}(@{{obj}}) + "' has no attribute '" + @{{name}}+ "'"));
}
return arguments[2];
}
if (method === null || @{{obj}}.$_fast_super)
return method;
if (typeof method.__get__ == 'function') {
if (@{{obj}}.__is_instance__) {
return method.__get__(@{{obj}}, @{{obj}}.__class__);
}
return method.__get__(null, @{{obj}}.__class__);
}
if ( typeof method != 'function'
|| typeof method.__is_instance__ != 'undefined'
|| (method.__is_classmethod__ !== true
&& (@{{obj}}.__is_instance__ === false
|| (typeof @{{obj}}.__class__ != 'undefined'
&& @{{obj}}.hasOwnProperty(re_mapped))))
|| re_mapped == '__class__') {
/*
if (typeof method == 'function'
&& typeof method.__is_instance__ == 'undefined'
&& method.__is_classmethod__ !== true
&& method.__is_staticmethod__ !== true
&& @{{obj}}.__is_instance__ === false) {
if ($pyjs.options.arg_instance_type || $pyjs.options.arg_is_instance) {
return @{{_wrap_unbound_method}}(method);
}
return @{{_wrap_unchecked_unbound_method}}(method);
}
*/
return method;
}
var fnwrap = function() {
if ($pyjs.options.arg_instance_type) {
var first_arg = null;
if (arguments.length >= 1)
first_arg = arguments[0];
$pyjs_check_instance_type(@{{obj}}, method, first_arg);
}
if ($pyjs.options.arg_is_instance) {
$pyjs_check_if_instance(@{{obj}}, method, arguments);
}
return method.apply(@{{obj}}, $pyjs_array_slice.call(arguments));
};
// copy all attribues
for (var attr in method) {
if (attr !== '__is_staticmethod__' && attr !== '__is_classmethod__') {
fnwrap[attr] = method[attr];
}
}
if (typeof fnwrap.__name__ == 'undefined') {
fnwrap.__name__ = re_mapped;
}
if (fnwrap.__args__ != null) {
// Remove the bound instance from the args list
fnwrap.__args__ = fnwrap.__args__.slice(0, 2).concat(fnwrap.__args__.slice(3));
}
fnwrap.__is_staticmethod__ = true;
fnwrap.__class__ = @{{obj}}.__class__;
fnwrap.__doc__ = method.__doc__ || '';
return fnwrap;
""")
def _del(obj):
JS("""
if (typeof @{{obj}}.__delete__ == 'function') {
@{{obj}}.__delete__(@{{obj}});
} else {
delete @{{obj}};
}
""")
def delattr(obj, name):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw $pyce(@{{UndefinedValueError}}("delattr() on undefined"));
}
if (typeof @{{name}}!= 'string') {
throw $pyce(@{{TypeError}}("attribute name must be string"));
}
if (@{{obj}}.__is_instance__ && typeof @{{obj}}.__delattr__ == 'function') {
// pass in the pure name instead of the remapped one (users should not
// care about remapping)!
@{{obj}}.__delattr__(@{{name}});
return;
}
var mapped_name = '$$' + @{{name}} in attrib_remap ? '$$' + @{{name}} : @{{name}};
if ( @{{obj}}!== null
&& (typeof @{{obj}}== 'object' || typeof @{{obj}}== 'function')
&& (typeof(@{{obj}}[mapped_name]) != "undefined")
&&(typeof(@{{obj}}[mapped_name]) != "function") ){
if (@{{obj}}.__is_instance__
&& typeof @{{obj}}[mapped_name].__delete__ == 'function') {
@{{obj}}[mapped_name].__delete__(@{{obj}});
} else {
delete @{{obj}}[mapped_name];
}
return;
}
if (@{{obj}}=== null) {
throw $pyce(@{{AttributeError}}("'NoneType' object"+
"has no attribute '"+@{{name}}+"'"));
}
if (typeof @{{obj}}!= 'object' && typeof @{{obj}}== 'function') {
throw $pyce(@{{AttributeError}}("'"+typeof(@{{obj}})+
"' object has no attribute '"+@{{name}}+"'"));
}
throw $pyce(@{{AttributeError}}(@{{obj}}.__name__+
" instance has no attribute '"+ @{{name}}+"'"));
""")
def setattr(obj, name, value):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw $pyce(@{{UndefinedValueError}}("setattr() on undefined"));
}
if (typeof @{{name}}!= 'string') {
throw $pyce(@{{TypeError}}("attribute name must be string"));
}
if (@{{obj}}.__is_instance__ && typeof @{{obj}}.__setattr__ == 'function' && @{{obj}}.__setattr__ !== @{{object}}.__setattr__) {
@{{obj}}.__setattr__(@{{name}}, @{{value}})
return;
}
if ('$$' + @{{name}} in attrib_remap) {
@{{name}}= '$$' + @{{name}};
}
if ( typeof @{{obj}}[@{{name}}] != 'undefined'
&& @{{obj}}.__is_instance__
&& @{{obj}}[@{{name}}] !== null
&& typeof @{{obj}}[@{{name}}].__set__ == 'function') {
@{{obj}}[@{{name}}].__set__(@{{obj}}, @{{value}});
} else {
@{{obj}}[@{{name}}] = @{{value}};
}
""")
def hasattr(obj, name):
JS("""
if (typeof @{{obj}} == 'undefined') {
throw $pyce(@{{UndefinedValueError}}("hasattr() on undefined"));
}
if (typeof @{{name}} != 'string') {
throw $pyce(@{{TypeError}}("attribute name must be string"));
}
if (@{{obj}} === null) {
return false;
}
if ('$$' + @{{name}} in attrib_remap) {
@{{name}} = '$$' + @{{name}};
}
if (typeof @{{obj}} == 'function' && @{{name}} === '__call__') {
return true;
}
if (typeof @{{obj}}[@{{name}}] == 'undefined') {
return false;
}
//if (@{{obj}}!= 'object' && typeof @{{obj}}!= 'function')
// return false;
return true;
""")
def dir(obj):
JS("""
if (typeof @{{obj}}== 'undefined') {
throw $pyce(@{{UndefinedValueError}}("dir() on undefined"));
}
var properties=@{{list}}.__new__(@{{list}});
for (var property in @{{obj}}) {
if (property.substring(0, 2) === '$$' && property in attrib_remap) {
// handle back mapping of name
properties.append(attrib_remap[property]);
} else if ('$$' + property in attrib_remap) {
// ignore JS-internal properties
continue;
} else {
properties.append(property);
}
}
return properties;
""")
def filter(obj, method, sequence=None):
# object context is LOST when a method is passed, hence object must be passed separately
# to emulate python behaviour, should generate this code inline rather than as a function call
items = []
if sequence is None:
sequence = method
method = obj
for item in sequence:
if method(item):
items.append(item)
else:
for item in sequence:
if method.call(obj, item):
items.append(item)
return items
def map(obj, method, | |
type as `s0`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (r0, r1).
r0: A `Tensor`. Has the same type as `s0`.
r1: A `Tensor`. Has the same type as `s0`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "BroadcastGradientArgs", name,
tld.op_callbacks, s0, s1)
_result = _BroadcastGradientArgsOutput._make(_result)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return broadcast_gradient_args_eager_fallback(
s0, s1, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BroadcastGradientArgs", s0=s0, s1=s1, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BroadcastGradientArgs", _inputs_flat, _attrs, _result)
_result = _BroadcastGradientArgsOutput._make(_result)
return _result
BroadcastGradientArgs = tf_export("raw_ops.BroadcastGradientArgs")(_ops.to_raw_op(broadcast_gradient_args))
def broadcast_gradient_args_eager_fallback(s0, s1, name, ctx):
_attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], ctx, _dtypes.int32)
(s0, s1) = _inputs_T
_inputs_flat = [s0, s1]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"BroadcastGradientArgs", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BroadcastGradientArgs", _inputs_flat, _attrs, _result)
_result = _BroadcastGradientArgsOutput._make(_result)
return _result
@_dispatch.add_dispatch_list
@tf_export('broadcast_to')
def broadcast_to(input, shape, name=None):
r"""Broadcast an array for a compatible shape.
Broadcasting is the process of making arrays to have compatible shapes
for arithmetic operations. Two shapes are compatible if for each
dimension pair they are either equal or one of them is one. When trying
to broadcast a Tensor to a shape, it starts with the trailing dimensions,
and works its way forward.
For example,
>>> x = tf.constant([1, 2, 3])
>>> y = tf.broadcast_to(x, [3, 3])
>>> print(y)
tf.Tensor(
[[1 2 3]
[1 2 3]
[1 2 3]], shape=(3, 3), dtype=int32)
In the above example, the input Tensor with the shape of `[1, 3]`
is broadcasted to output Tensor with shape of `[3, 3]`.
When doing broadcasted operations such as multiplying a tensor
by a scalar, broadcasting (usually) confers some time or space
benefit, as the broadcasted tensor is never materialized.
However, `broadcast_to` does not carry with it any such benefits.
The newly-created tensor takes the full memory of the broadcasted
shape. (In a graph context, `broadcast_to` might be fused to
subsequent operation and then be optimized away, however.)
Args:
input: A `Tensor`. A Tensor to broadcast.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
An 1-D `int` Tensor. The shape of the desired output.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "BroadcastTo", name,
tld.op_callbacks, input, shape)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return broadcast_to_eager_fallback(
input, shape, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
broadcast_to, (), dict(input=input, shape=shape, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"BroadcastTo", input=input, shape=shape, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
broadcast_to, (), dict(input=input, shape=shape, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "Tidx",
_op._get_attr_type("Tidx"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"BroadcastTo", _inputs_flat, _attrs, _result)
_result, = _result
return _result
BroadcastTo = tf_export("raw_ops.BroadcastTo")(_ops.to_raw_op(broadcast_to))
def broadcast_to_eager_fallback(input, shape, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_attr_Tidx, (shape,) = _execute.args_to_matching_eager([shape], ctx, _dtypes.int32)
_inputs_flat = [input, shape]
_attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
_result = _execute.execute(b"BroadcastTo", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"BroadcastTo", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('debugging.check_numerics', v1=['debugging.check_numerics', 'check_numerics'])
@deprecated_endpoints('check_numerics')
def check_numerics(tensor, message, name=None):
r"""Checks a tensor for NaN and Inf values.
When run, reports an `InvalidArgument` error if `tensor` has any values
that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
Args:
tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
message: A `string`. Prefix of the error message.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "CheckNumerics", name,
tld.op_callbacks, tensor, "message", message)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return check_numerics_eager_fallback(
tensor, message=message, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
check_numerics, (), dict(tensor=tensor, message=message,
name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
message = _execute.make_str(message, "message")
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"CheckNumerics", tensor=tensor, message=message, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
check_numerics, (), dict(tensor=tensor, message=message, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "message",
_op.get_attr("message"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"CheckNumerics", _inputs_flat, _attrs, _result)
_result, = _result
return _result
CheckNumerics = tf_export("raw_ops.CheckNumerics")(_ops.to_raw_op(check_numerics))
def check_numerics_eager_fallback(tensor, message, name, ctx):
message = _execute.make_str(message, "message")
_attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx)
_inputs_flat = [tensor]
_attrs = ("T", _attr_T, "message", message)
_result = _execute.execute(b"CheckNumerics", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"CheckNumerics", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def check_numerics_v2(tensor, message, name=None):
r"""Checks a tensor for NaN, -Inf and +Inf values.
When run, reports an `InvalidArgument` error if `tensor` has any values
that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the
errors it throws.
Args:
tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
message: A `string`. Prefix of the error message.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "CheckNumericsV2", name,
tld.op_callbacks, tensor, "message", message)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return check_numerics_v2_eager_fallback(
tensor, message=message, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
message = _execute.make_str(message, "message")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"CheckNumericsV2", tensor=tensor, message=message, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "message",
_op.get_attr("message"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"CheckNumericsV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
CheckNumericsV2 = tf_export("raw_ops.CheckNumericsV2")(_ops.to_raw_op(check_numerics_v2))
def check_numerics_v2_eager_fallback(tensor, message, name, ctx):
message = _execute.make_str(message, "message")
_attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx)
_inputs_flat = [tensor]
_attrs = ("T", _attr_T, "message", message)
_result = _execute.execute(b"CheckNumericsV2", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"CheckNumericsV2", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def concat(concat_dim, values, name=None):
r"""Concatenates tensors along one dimension.
Args:
concat_dim: A `Tensor` of type `int32`.
0-D. The dimension along which to concatenate. Must be in the
range [0, rank(values)).
values: A list of at least 2 `Tensor` objects with the same type.
The `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Concat", name,
tld.op_callbacks, concat_dim, values)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return concat_eager_fallback(
concat_dim, values, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if not isinstance(values, (list, tuple)):
raise TypeError(
"Expected list for 'values' argument to "
"'concat' Op, not %r." % values)
_attr_N = len(values)
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Concat", concat_dim=concat_dim, values=values, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Concat", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Concat = tf_export("raw_ops.Concat")(_ops.to_raw_op(concat))
def concat_eager_fallback(concat_dim, values, name, ctx):
if not isinstance(values, (list, tuple)):
raise TypeError(
"Expected list for 'values' argument to "
"'concat' Op, not %r." | |
by means
# of INIT-VALUE-REF elements. Unfortunately, these are not
# standard references so we have to go down a separate
# code path...
ref_elem = signal_elem.find(f'./ns:INIT-VALUE-REF',
self._xml_namespaces)
if ref_elem is None:
# no initial value found here
return None
literal_spec = \
self._follow_arxml_reference(
base_elem=signal_elem,
arxml_path=ref_elem.text,
dest_tag_name=ref_elem.attrib.get('DEST'),
refbase_name=ref_elem.attrib.get('BASE'))
if literal_spec is None:
# dangling reference...
return None
literal_value = \
literal_spec.find(f'./ns:VALUE', self._xml_namespaces)
return None if literal_value is None else literal_value.text
def _load_signal_byte_order(self, i_signal_to_i_pdu_mapping):
packing_byte_order = \
self._get_unique_arxml_child(i_signal_to_i_pdu_mapping,
'PACKING-BYTE-ORDER')
if packing_byte_order is not None \
and packing_byte_order.text == 'MOST-SIGNIFICANT-BYTE-FIRST':
return 'big_endian'
else:
return 'little_endian'
def _load_system_signal_unit(self, system_signal, compu_method):
res = self._get_unique_arxml_child(system_signal,
[
'PHYSICAL-PROPS',
'SW-DATA-DEF-PROPS-VARIANTS',
'&SW-DATA-DEF-PROPS-CONDITIONAL',
'&UNIT',
'DISPLAY-NAME'
])
if res is None and compu_method is not None:
# try to go via the compu_method
res = self._get_unique_arxml_child(compu_method,
[
'&UNIT',
'DISPLAY-NAME'
])
ignorelist = ( "NoUnit", )
if res is None or res.text in ignorelist:
return None
return res.text
def _load_texttable(self, compu_method, decimal, is_float):
minimum = None
maximum = None
choices = {}
for compu_scale in self._get_arxml_children(compu_method,
[
'&COMPU-INTERNAL-TO-PHYS',
'COMPU-SCALES',
'*&COMPU-SCALE'
]):
lower_limit = \
self._get_unique_arxml_child(compu_scale, 'LOWER-LIMIT')
upper_limit = \
self._get_unique_arxml_child(compu_scale, 'UPPER-LIMIT')
vt = \
self._get_unique_arxml_child(compu_scale, ['&COMPU-CONST', 'VT'])
comments = self._load_comments(compu_scale)
# range of the internal values of the scale.
minimum_int_scale = \
None \
if lower_limit is None \
else parse_number_string(lower_limit.text, is_float)
maximum_int_scale = \
None \
if upper_limit is None \
else parse_number_string(upper_limit.text, is_float)
# for texttables the internal and the physical values are identical
if minimum is None:
minimum = minimum_int_scale
elif minimum_int_scale is not None:
minimum = min(minimum, minimum_int_scale)
if maximum is None:
maximum = maximum_int_scale
elif maximum_int_scale is not None:
maximum = max(maximum, maximum_int_scale)
if vt is not None:
value = parse_number_string(lower_limit.text, is_float)
name = vt.text
choices[value] = NamedSignalValue(value, name, comments)
decimal.minimum = minimum
decimal.maximum = maximum
return minimum, maximum, choices
def _load_linear_factor_and_offset(self, compu_scale, decimal):
compu_rational_coeffs = \
self._get_unique_arxml_child(compu_scale, '&COMPU-RATIONAL-COEFFS')
if compu_rational_coeffs is None:
return None, None
numerators = self._get_arxml_children(compu_rational_coeffs,
['&COMPU-NUMERATOR', '*&V'])
if len(numerators) != 2:
raise ValueError(
'Expected 2 numerator values for linear scaling, but '
'got {}.'.format(len(numerators)))
denominators = self._get_arxml_children(compu_rational_coeffs,
['&COMPU-DENOMINATOR', '*&V'])
if len(denominators) != 1:
raise ValueError(
'Expected 1 denominator value for linear scaling, but '
'got {}.'.format(len(denominators)))
denominator = Decimal(denominators[0].text)
decimal.scale = Decimal(numerators[1].text) / denominator
decimal.offset = Decimal(numerators[0].text) / denominator
return float(decimal.scale), float(decimal.offset)
def _load_linear(self, compu_method, decimal, is_float):
compu_scale = self._get_unique_arxml_child(compu_method,
[
'COMPU-INTERNAL-TO-PHYS',
'COMPU-SCALES',
'&COMPU-SCALE'
])
lower_limit = self._get_unique_arxml_child(compu_scale, '&LOWER-LIMIT')
upper_limit = self._get_unique_arxml_child(compu_scale, '&UPPER-LIMIT')
# range of the internal values
minimum_int = \
None \
if lower_limit is None \
else parse_number_string(lower_limit.text, is_float)
maximum_int = \
None \
if upper_limit is None \
else parse_number_string(upper_limit.text, is_float)
factor, offset = \
self._load_linear_factor_and_offset(compu_scale, decimal)
factor = 1.0 if factor is None else factor
offset = 0.0 if offset is None else offset
# range of the physical values
minimum = None if minimum_int is None else minimum_int*factor + offset
maximum = None if maximum_int is None else maximum_int*factor + offset
decimal.minimum = None if minimum is None else Decimal(minimum)
decimal.maximum = None if maximum is None else Decimal(maximum)
return minimum, maximum, factor, offset
def _load_scale_linear_and_texttable(self, compu_method, decimal, is_float):
minimum = None
maximum = None
factor = 1
offset = 0
choices = {}
for compu_scale in self._get_arxml_children(compu_method,
[
'&COMPU-INTERNAL-TO-PHYS',
'COMPU-SCALES',
'*&COMPU-SCALE'
]):
lower_limit = \
self._get_unique_arxml_child(compu_scale, 'LOWER-LIMIT')
upper_limit = \
self._get_unique_arxml_child(compu_scale, 'UPPER-LIMIT')
vt = \
self._get_unique_arxml_child(compu_scale, ['&COMPU-CONST', 'VT'])
comments = self._load_comments(compu_scale)
# range of the internal values of the scale.
minimum_int_scale = \
None if lower_limit is None \
else parse_number_string(lower_limit.text, is_float)
maximum_int_scale = \
None if upper_limit is None \
else parse_number_string(upper_limit.text, is_float)
# TODO: make sure that no conflicting scaling factors and offsets
# are specified. For now, let's just assume that the ARXML file is
# well formed.
factor_scale, offset_scale = \
self._load_linear_factor_and_offset(compu_scale, decimal)
if factor_scale is not None:
factor = factor_scale
else:
factor_scale = 1.0
if offset_scale is not None:
offset = offset_scale
else:
offset_scale = 0.0
# range of the physical values of the scale.
if minimum is None:
minimum = minimum_int_scale*factor_scale + offset_scale
elif minimum_int_scale is not None:
minimum = min(minimum,
minimum_int_scale*factor_scale + offset_scale)
if maximum is None:
maximum = maximum_int_scale*factor_scale + offset_scale
elif maximum_int_scale is not None:
maximum = max(maximum,
maximum_int_scale*factor_scale + offset_scale)
if vt is not None:
assert(minimum_int_scale is not None \
and minimum_int_scale == maximum_int_scale)
value = minimum_int_scale
name = vt.text
choices[value] = NamedSignalValue(value, name, comments)
decimal.minimum = Decimal(minimum)
decimal.maximum = Decimal(maximum)
return minimum, maximum, factor, offset, choices
def _load_system_signal(self, system_signal, decimal, is_float):
minimum = None
maximum = None
factor = 1
offset = 0
choices = None
compu_method = self._get_compu_method(system_signal)
# Unit and comment.
unit = self._load_system_signal_unit(system_signal, compu_method)
comments = self._load_comments(system_signal)
if compu_method is not None:
category = self._get_unique_arxml_child(compu_method, 'CATEGORY')
if category is None:
# if no category is specified, we assume that the
# physical value of the signal corresponds to its
# binary representation.
return (minimum,
maximum,
factor,
offset,
choices,
unit,
comments)
category = category.text
if category == 'TEXTTABLE':
minimum, maximum, choices = \
self._load_texttable(compu_method, decimal, is_float)
elif category == 'LINEAR':
minimum, maximum, factor, offset = \
self._load_linear(compu_method, decimal, is_float)
elif category == 'SCALE_LINEAR_AND_TEXTTABLE':
(minimum,
maximum,
factor,
offset,
choices) = self._load_scale_linear_and_texttable(compu_method,
decimal,
is_float)
else:
LOGGER.debug('Compu method category %s is not yet implemented.',
category)
return \
minimum, \
maximum, \
1 if factor is None else factor, \
0 if offset is None else offset, \
choices, \
unit, \
comments
def _load_signal_type(self, i_signal):
is_signed = False
is_float = False
base_type = self._get_sw_base_type(i_signal)
if base_type is not None:
base_type_encoding = \
self._get_unique_arxml_child(base_type, '&BASE-TYPE-ENCODING')
if base_type_encoding is None:
btt = base_type.find('./ns:SHORT-NAME', self._xml_namespaces)
btt = btt.text
raise ValueError(
f'BASE-TYPE-ENCODING in base type "{btt}" does not exist.')
base_type_encoding = base_type_encoding.text
if base_type_encoding in ('2C', '1C', 'SM'):
# types which use two-complement, one-complement or
# sign+magnitude encodings are signed. TODO (?): The
# fact that if anything other than two complement
# notation is used for negative numbers is not
# reflected anywhere. In practice this should not
# matter, though, since two-complement notation is
# basically always used for systems build after
# ~1970...
is_signed = True
elif base_type_encoding == 'IEEE754':
is_float = True
return is_signed, is_float
def _get_absolute_arxml_path(self,
base_elem,
arxml_path,
refbase_name=None):
"""Return the absolute ARXML path of a reference
Relative ARXML paths are converted into absolute ones.
"""
if arxml_path.startswith('/'):
# path is already absolute
return arxml_path
base_path = self._node_to_arxml_path[base_elem]
base_path_atoms = base_path.split("/")
# Find the absolute path specified by the applicable
# reference base. The spec says the matching reference
# base for the "closest" package should be used, so we
# traverse the ARXML path of the base element in reverse
# to find the first package with a matching reference
# base.
refbase_path = None
for i in range(len(base_path_atoms), 0, -1):
test_path = '/'.join(base_path_atoms[0:i])
test_node = self._arxml_path_to_node.get(test_path)
if test_node is not None \
and test_node.tag != f'{{{self.xml_namespace}}}AR-PACKAGE':
# the referenced XML node does not represent a
# package
continue
if refbase_name is None:
# the caller did not specify a BASE attribute,
# i.e., we ought to use the closest default
# reference base
refbase_path = \
self._package_default_refbase_path.get(test_path)
if refbase_path is None:
# bad luck: this package does not specify a
# default reference base
continue
else:
break
# the caller specifies a BASE attribute
refbase_path = \
self._package_refbase_paths.get(test_path, {}) \
.get(refbase_name)
if refbase_path is None:
# bad luck: this package does not specify a
# reference base with the specified name
continue
else:
break
if refbase_path is None:
raise ValueError(f"Unknown reference base '{refbase_name}' "
f"for relative ARXML reference '{arxml_path}'")
return f'{refbase_path}/{arxml_path}'
def _follow_arxml_reference(self,
base_elem,
arxml_path,
dest_tag_name=None,
refbase_name=None):
"""Resolve an ARXML reference
It returns the ElementTree node which corresponds to the given
path through the ARXML package structure. If no such node
exists, a None object is returned.
"""
arxml_path = self._get_absolute_arxml_path(base_elem,
arxml_path,
refbase_name)
# resolve the absolute reference: This is simple because | |
<reponame>gmberton/deep-visual-geo-localization-benchmark<filename>test.py
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
def test_efficient_ram_usage(args, eval_ds, model, test_method="hard_resize"):
"""This function gives the same output as test(), but uses much less RAM.
This can be useful when testing with large descriptors (e.g. NetVLAD) on large datasets (e.g. San Francisco).
Obviously it is slower than test(), and can't be used with PCA.
"""
model = model.eval()
if test_method == 'nearest_crop' or test_method == "maj_voting":
distances = np.empty([eval_ds.queries_num * 5, eval_ds.database_num], dtype=np.float32)
else:
distances = np.empty([eval_ds.queries_num, eval_ds.database_num], dtype=np.float32)
with torch.no_grad():
if test_method == 'nearest_crop' or test_method == 'maj_voting':
queries_features = np.ones((eval_ds.queries_num * 5, args.features_dim), dtype="float32")
else:
queries_features = np.ones((eval_ds.queries_num, args.features_dim), dtype="float32")
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
if test_method == "nearest_crop" or test_method == 'maj_voting':
start_idx = (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
queries_features[indices, :] = features.cpu().numpy()
else:
queries_features[indices.numpy()-eval_ds.database_num, :] = features.cpu().numpy()
queries_features = torch.tensor(queries_features).type(torch.float32).cuda()
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(database_dataloader, ncols=100):
inputs = inputs.to(args.device)
features = model(inputs)
for pn, (index, pred_feature) in enumerate(zip(indices, features)):
distances[:, index] = ((queries_features-pred_feature)**2).sum(1).cpu().numpy()
del features, queries_features, pred_feature
predictions = distances.argsort(axis=1)[:, :max(args.recall_values)]
if test_method == 'nearest_crop':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each
elif test_method == 'maj_voting':
distances = np.array([distances[row, index] for row, index in enumerate(predictions)])
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
del distances
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def test(args, eval_ds, model, test_method="hard_resize", pca=None):
"""Compute features of the given dataset and compute the recalls."""
assert test_method in ["hard_resize", "single_query", "central_crop", "five_crops",
"nearest_crop", "maj_voting"], f"test_method can't be {test_method}"
if args.efficient_ram_testing:
return test_efficient_ram_usage(args, eval_ds, model, test_method)
model = model.eval()
with torch.no_grad():
logging.debug("Extracting database features for evaluation/testing")
# For database use "hard_resize", although it usually has no effect because database images have same resolution
eval_ds.test_method = "hard_resize"
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device=="cuda"))
if test_method == "nearest_crop" or test_method == 'maj_voting':
all_features = np.empty((5 * eval_ds.queries_num + eval_ds.database_num, args.features_dim), dtype="float32")
else:
all_features = np.empty((len(eval_ds), args.features_dim), dtype="float32")
for inputs, indices in tqdm(database_dataloader, ncols=100):
features = model(inputs.to(args.device))
features = features.cpu().numpy()
if pca != None:
features = pca.transform(features)
all_features[indices.numpy(), :] = features
logging.debug("Extracting queries features for evaluation/testing")
queries_infer_batch_size = 1 if test_method == "single_query" else args.infer_batch_size
eval_ds.test_method = test_method
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device=="cuda"))
for inputs, indices in tqdm(queries_dataloader, ncols=100):
if test_method == "five_crops" or test_method == "nearest_crop" or test_method == 'maj_voting':
inputs = torch.cat(tuple(inputs)) # shape = 5*bs x 3 x 480 x 480
features = model(inputs.to(args.device))
if test_method == "five_crops": # Compute mean along the 5 crops
features = torch.stack(torch.split(features, 5)).mean(1)
features = features.cpu().numpy()
if pca != None:
features = pca.transform(features)
if test_method == "nearest_crop" or test_method == 'maj_voting': # store the features of all 5 crops
start_idx = eval_ds.database_num + (indices[0] - eval_ds.database_num) * 5
end_idx = start_idx + indices.shape[0] * 5
indices = np.arange(start_idx, end_idx)
all_features[indices, :] = features
else:
all_features[indices.numpy(), :] = features
queries_features = all_features[eval_ds.database_num:]
database_features = all_features[:eval_ds.database_num]
faiss_index = faiss.IndexFlatL2(args.features_dim)
faiss_index.add(database_features)
del database_features, all_features
logging.debug("Calculating recalls")
distances, predictions = faiss_index.search(queries_features, max(args.recall_values))
if test_method == 'nearest_crop':
distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))
predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))
for q in range(eval_ds.queries_num):
# sort predictions by distance
sort_idx = np.argsort(distances[q])
predictions[q] = predictions[q, sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(predictions[q], return_index=True)
# unique_idx is sorted based on the unique values, sort it again
predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]
predictions = predictions[:, :20] # keep only the closer 20 predictions for each query
elif test_method == 'maj_voting':
distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))
predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))
for q in range(eval_ds.queries_num):
# votings, modify distances in-place
top_n_voting('top1', predictions[q], distances[q], args.majority_weight)
top_n_voting('top5', predictions[q], distances[q], args.majority_weight)
top_n_voting('top10', predictions[q], distances[q], args.majority_weight)
# flatten dist and preds from 5, 20 -> 20*5
# and then proceed as usual to keep only first 20
dists = distances[q].flatten()
preds = predictions[q].flatten()
# sort predictions by distance
sort_idx = np.argsort(dists)
preds = preds[sort_idx]
# remove duplicated predictions, i.e. keep only the closest ones
_, unique_idx = np.unique(preds, return_index=True)
# unique_idx is sorted based on the unique values, sort it again
# here the row corresponding to the first crop is used as a
# 'buffer' for each query, and in the end the dimension
# relative to crops is eliminated
predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]
predictions = predictions[:, 0, :20] # keep only the closer 20 predictions for each query
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
# args.recall_values by default is [1, 5, 10, 20]
recalls = np.zeros(len(args.recall_values))
for query_index, pred in enumerate(predictions):
for i, n in enumerate(args.recall_values):
if np.any(np.in1d(pred[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
# Divide by the number of queries*100, so the recalls are in percentages
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(args.recall_values, recalls)])
return recalls, recalls_str
def top_n_voting(topn, predictions, distances, maj_weight):
if topn == 'top1':
n = 1
selected = 0
elif topn == 'top5':
n = 5
selected = slice(0, 5)
elif topn == 'top10':
n = 10
selected = slice(0, 10)
# find predictions that repeat in the first, first five,
# or fist ten columns for each crop
vals, counts = np.unique(predictions[:, selected], return_counts=True)
# for each prediction that repeats more than once,
# subtract from its score
for val, count in | |
from typing import Callable, Union, Any
from enum import Enum
import numpy as np
from casadi import sum1, horzcat, if_else, vertcat, lt, MX, SX, Function
import biorbd
from .path_conditions import Bounds
from .penalty import PenaltyType, PenaltyFunctionAbstract, PenaltyOption, PenaltyNodes
from ..dynamics.ode_solver import OdeSolver
from ..misc.enums import Node, ControlType, InterpolationType
from ..misc.options import OptionList, OptionGeneric
class Constraint(PenaltyOption):
"""
A placeholder for a constraint
Attributes
----------
min_bound: np.ndarray
The vector of minimum bound of the constraint. Default is 0
max_bound: np.ndarray
The vector of maximal bound of the constraint. Default is 0
"""
def __init__(
self,
constraint: Any,
min_bound: Union[np.ndarray, float] = None,
max_bound: Union[np.ndarray, float] = None,
phase: int = 0,
**params: Any,
):
"""
Parameters
----------
constraint: ConstraintFcn
The chosen constraint
min_bound: np.ndarray
The vector of minimum bound of the constraint. Default is 0
max_bound: np.ndarray
The vector of maximal bound of the constraint. Default is 0
phase: int
The index of the phase to apply the constraint
params:
Generic parameters for options
"""
custom_function = None
if not isinstance(constraint, ConstraintFcn):
custom_function = constraint
constraint = ConstraintFcn.CUSTOM
super(Constraint, self).__init__(penalty=constraint, phase=phase, custom_function=custom_function, **params)
self.min_bound = min_bound
self.max_bound = max_bound
class ConstraintList(OptionList):
"""
A list of Constraint if more than one is required
Methods
-------
add(self, constraint: Union[Callable, "ConstraintFcn"], **extra_arguments)
Add a new Constraint to the list
print(self)
Print the ConstraintList to the console
"""
def add(self, constraint: Union[Callable, Constraint, Any], **extra_arguments: Any):
"""
Add a new constraint to the list
Parameters
----------
constraint: Union[Callable, Constraint, ConstraintFcn]
The chosen constraint
extra_arguments: dict
Any parameters to pass to Constraint
"""
if isinstance(constraint, Constraint):
self.copy(constraint)
else:
super(ConstraintList, self)._add(option_type=Constraint, constraint=constraint, **extra_arguments)
def print(self):
"""
Print the ConstraintList to the console
"""
# TODO: Print all elements in the console
raise NotImplementedError("Printing of ConstraintList is not ready yet")
class ConstraintFunction(PenaltyFunctionAbstract):
"""
Internal (re)implementation of the penalty functions
Methods
-------
inter_phase_continuity(ocp: OptimalControlProgram, pt: "PhaseTransition")
Add phase transition constraints between two phases.
add_to_penalty(ocp: OptimalControlProgram, nlp: NonLinearProgram, val: Union[MX, SX], penalty: Constraint)
Add the constraint to the constraint pool
clear_penalty(ocp: OptimalControlProgram, nlp: NonLinearProgram, penalty: Constraint)
Resets a penalty. A negative penalty index creates a new empty penalty.
_parameter_modifier(constraint: Constraint)
Apply some default parameters
_span_checker(constraint, nlp)
Check for any non sense in the requested times for the constraint. Raises an error if so
penalty_nature() -> str
Get the nature of the penalty
"""
class Functions:
"""
Implementation of all the constraint functions
Methods
-------
time_constraint(constraint: Constraint, pn: PenaltyNodes)
The time constraint is taken care elsewhere, but must be declared here. This function therefore does nothing
torque_max_from_actuators(constraint: Constraint, pn: PenaltyNodes, min_torque=None)
Non linear maximal values of joint torques computed from the torque-position-velocity relationship
non_slipping(constraint: Constraint, pn: PenaltyNodes,
tangential_component_idx: int, normal_component_idx: int, static_friction_coefficient: float)
Add a constraint of static friction at contact points allowing for small tangential forces. This constraint
assumes that the normal forces is positive
contact_force(constraint: Constraint, pn: PenaltyNodes, contact_force_idx: int)
Add a constraint of contact forces given by any forward dynamics with contact
"""
@staticmethod
def contact_force(
constraint: Constraint,
pn: PenaltyNodes,
contact_force_idx: int,
):
"""
Add a constraint of contact forces given by any forward dynamics with contact
Parameters
----------
constraint: Constraint
The actual constraint to declare
pn: PenaltyNodes
The penalty node elements
contact_force_idx: int
The index of the contact force to add to the constraint set
"""
for i in range(len(pn.u)):
ConstraintFunction.add_to_penalty(
pn.ocp,
pn.nlp,
pn.nlp.contact_forces_func(pn.x[i], pn.u[i], pn.p)[contact_force_idx, 0],
constraint,
)
@staticmethod
def non_slipping(
constraint: Constraint,
pn: PenaltyNodes,
tangential_component_idx: int,
normal_component_idx: int,
static_friction_coefficient: float,
):
"""
Add a constraint of static friction at contact points constraining for small tangential forces.
This function make the assumption that normal_force is always positive
That is mu*normal_force = tangential_force. To prevent from using a square root, the previous
equation is squared
Parameters
----------
constraint: Constraint
The actual constraint to declare
pn: PenaltyNodes
The penalty node elements
tangential_component_idx: int
Index of the tangential component of the contact force.
[0] = x_indices, [1] = y_indices / or [0] = component
normal_component_idx: int
Index of the normal component of the contact force
static_friction_coefficient: float
Static friction coefficient
"""
if isinstance(tangential_component_idx, int):
tangential_component_idx = [tangential_component_idx]
elif not isinstance(tangential_component_idx, (tuple, list)):
raise RuntimeError("tangential_component_idx must be a unique integer or a list of integer")
if isinstance(normal_component_idx, int):
normal_component_idx = [normal_component_idx]
elif not isinstance(normal_component_idx, (tuple, list)):
raise RuntimeError("normal_component_idx must be a unique integer or a list of integer")
mu_squared = static_friction_coefficient ** 2
constraint.min_bound = np.array([0, 0])
constraint.max_bound = np.array([np.inf, np.inf])
for i in range(len(pn.u)):
contact = pn.nlp.contact_forces_func(pn.x[i], pn.u[i], pn.p)
normal_contact_force_squared = sum1(contact[normal_component_idx, 0]) ** 2
if len(tangential_component_idx) == 1:
tangential_contact_force_squared = sum1(contact[tangential_component_idx[0], 0]) ** 2
elif len(tangential_component_idx) == 2:
tangential_contact_force_squared = (
sum1(contact[tangential_component_idx[0], 0]) ** 2
+ sum1(contact[tangential_component_idx[1], 0]) ** 2
)
else:
raise (ValueError("tangential_component_idx should either be x and y or only one component"))
# Since it is non-slipping normal forces are supposed to be greater than zero
ConstraintFunction.add_to_penalty(
pn.ocp,
pn.nlp,
vertcat(
mu_squared * normal_contact_force_squared - tangential_contact_force_squared,
mu_squared * normal_contact_force_squared + tangential_contact_force_squared,
),
constraint,
)
@staticmethod
def torque_max_from_actuators(
constraint: Constraint,
pn: PenaltyNodes,
min_torque=None,
):
"""
Non linear maximal values of joint torques computed from the torque-position-velocity relationship
Parameters
----------
constraint: Constraint
The actual constraint to declare
pn: PenaltyNodes
The penalty node elements
min_torque: float
Minimum joint torques. This prevent from having too small torques, but introduces an if statement
"""
# TODO: Add index to select the u (control_idx)
nlp = pn.nlp
nq = nlp.mapping["q"].to_first.len
q = [nlp.mapping["q"].to_second.map(mx[:nq]) for mx in pn.x]
qdot = [nlp.mapping["qdot"].to_second.map(mx[nq:]) for mx in pn.x]
if min_torque and min_torque < 0:
raise ValueError("min_torque cannot be negative in tau_max_from_actuators")
func = biorbd.to_casadi_func("torqueMax", nlp.model.torqueMax, nlp.q, nlp.qdot)
constraint.min_bound = np.repeat([0, -np.inf], nlp.nu)
constraint.max_bound = np.repeat([np.inf, 0], nlp.nu)
for i in range(len(pn.u)):
bound = func(q[i], qdot[i])
if min_torque:
min_bound = nlp.mapping["tau"].to_first.map(
if_else(lt(bound[:, 1], min_torque), min_torque, bound[:, 1])
)
max_bound = nlp.mapping["tau"].to_first.map(
if_else(lt(bound[:, 0], min_torque), min_torque, bound[:, 0])
)
else:
min_bound = nlp.mapping["tau"].to_first.map(bound[:, 1])
max_bound = nlp.mapping["tau"].to_first.map(bound[:, 0])
ConstraintFunction.add_to_penalty(
pn.ocp, nlp, vertcat(*[pn.u[i] + min_bound, pn.u[i] - max_bound]), constraint
)
@staticmethod
def time_constraint(
constraint: Constraint,
pn: PenaltyNodes,
**unused_param,
):
"""
The time constraint is taken care elsewhere, but must be declared here. This function therefore does nothing
Parameters
----------
constraint: Constraint
The actual constraint to declare
pn: PenaltyNodes
The penalty node elements
**unused_param: dict
Since the function does nothing, we can safely ignore any argument
"""
pass
@staticmethod
def add_or_replace(ocp, nlp, penalty: PenaltyOption):
"""
Doing some configuration before calling the super.add_or_replace function that prepares the adding of the
constraint to the constraint pool
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
nlp: NonLinearProgram
A reference to the current phase of the ocp
penalty: PenaltyOption
The actual constraint to declare
"""
if penalty.type == ConstraintFcn.TIME_CONSTRAINT:
penalty.node = Node.END
PenaltyFunctionAbstract.add_or_replace(ocp, nlp, penalty)
@staticmethod
def inner_phase_continuity(ocp):
"""
Add continuity constraints between each nodes of a phase.
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
"""
# Dynamics must be sound within phases
for i, nlp in enumerate(ocp.nlp):
penalty = Constraint([])
penalty.name = f"CONTINUITY {i}"
penalty.list_index = -1
ConstraintFunction.clear_penalty(ocp, None, penalty)
# Loop over shooting nodes or use parallelization
if ocp.n_threads > 1:
end_nodes = nlp.par_dynamics(horzcat(*nlp.X[:-1]), horzcat(*nlp.U), nlp.p)[0]
val = horzcat(*nlp.X[1:]) - end_nodes
ConstraintFunction.add_to_penalty(ocp, None, val.reshape((nlp.nx * nlp.ns, 1)), penalty)
else:
for k in range(nlp.ns):
# Create an evaluation node
if (
isinstance(nlp.ode_solver, OdeSolver.RK4)
or isinstance(nlp.ode_solver, OdeSolver.RK8)
or isinstance(nlp.ode_solver, OdeSolver.IRK)
):
if nlp.control_type == ControlType.CONSTANT:
u = nlp.U[k]
elif nlp.control_type == ControlType.LINEAR_CONTINUOUS:
u = horzcat(nlp.U[k], nlp.U[k + 1])
else:
raise NotImplementedError(f"Dynamics with {nlp.control_type} is not implemented yet")
end_node = nlp.dynamics[k](x0=nlp.X[k], p=u, params=nlp.p)["xf"]
else:
end_node = nlp.dynamics[k](x0=nlp.X[k], p=nlp.U[k])["xf"]
# Save continuity constraints
val = end_node - nlp.X[k + 1]
ConstraintFunction.add_to_penalty(ocp, None, val, penalty)
@staticmethod
def inter_phase_continuity(ocp, pt):
"""
Add phase transition constraints between two phases.
Parameters
----------
ocp: OptimalControlProgram
A reference to the ocp
pt: PhaseTransition
The phase transition to add
"""
# Dynamics must be respected between phases
penalty = OptionGeneric()
penalty.name = f"PHASE_TRANSITION {pt.phase_pre_idx}->{pt.phase_pre_idx + 1}"
penalty.min_bound = 0
penalty.max_bound = 0
penalty.list_index = | |
ct
return None
def max_type_score(self, t):
return 0 if t not in self.tis else max([ti.ms for ti in self.tis[t]])
def set_as_list_or_singleton(v):
s = set(v)
if len(s) < 1: return 0
l = list(s)
return l[0] if len(l) < 2 else l
def unique_cell_values(vs, maxListLength = 3):
''' Returns a unique value per cell, or None as appropriate. '''
def select_best_value(vs):
return None if len(vs) < 1 else sorted(vs, key = lambda v: len(v), reverse = True)[0]
def merger_by_token_list(v1):
return ' '.join(v1)
em = defaultdict(set) # an equivalence map
for v in vs:
if v is None or len(v) < 1: continue
key = normalize_and_validate_phrase(v)
if key is None: continue
em[merger_by_token_list(v)].add(v)
return list([select_best_value(vs) for vs in em.values()])[:maxListLength]
# TODO improve this custom date matcher if necessary by doing a first pass on all values at type inference time:
# - matcher 1 with languages = ['fr', 'en'],
# settings = { 'DATE_ORDER': 'MDY', 'PREFER_LANGUAGE_DATE_ORDER': False } (the more likely one)
# - matcher 2 with languages = ['fr', 'en'],
# settings = { 'DATE_ORDER': 'DMY', 'PREFER_LANGUAGE_DATE_ORDER': False }
# - then retain the majority matcher for the subsequent type inference pass (and the normalization step as well)
DDP = DateDataParser(languages = ['fr', 'en'], settings = { 'PREFER_LANGUAGE_DATE_ORDER': True })
class CustomDateMatcher(TypeMatcher):
def __init__(self):
super(CustomDateMatcher, self).__init__(F_DATE)
@timed
def match(self, c):
# TODO check prioritization of ambiguous dates: the most complex case would be first encountering some FR date(s),
# then an ambiguous date implicitly using US (not UK or AUS) locale (e.g. 03/04/2014 which should resolve to
# March 4th and not April 3rd)...
if c.value.isdigit():
raise TypeError('{} cannot parse date from numeric value "{}"'.format(self, c))
dd = DDP.get_date_data(c.value)
do, dp = dd['date_obj'], dd['period']
if do is None:
raise TypeError('{} found no date field from "{}"'.format(self, c))
y = do.year
if y < 1870 or 2120 < y:
raise ValueError('{} found no realistic date from "{}"'.format(self, c))
ds = str(y)
if dp == 'year':
self.register_full_match(c, F_YEAR, 100, ds)
return
ds = "{:02}/{}".format(do.month, ds)
if dp == 'month':
self.register_full_match(c, F_MONTH, 100, ds)
else:
self.register_full_match(c, F_DATE, 100, "{:02}/{}".format(do.day, ds))
def score_phone_number(z): return 100 if phonenumbers.is_valid_number(z) else 75 if phonenumbers.is_possible_number(z) else 5
def normalize_phone_number(z): return phonenumbers.format_number(z, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
class CustomTelephoneMatcher(TypeMatcher):
def __init__(self, partial = False):
super(CustomTelephoneMatcher, self).__init__(F_PHONE)
self.partial = partial
@timed
def match(self, c):
matched = False
if partial:
for match in phonenumbers.PhoneNumberMatcher(c.value, 'FR'):
# original string is in match.raw_string
self.register_partial_match(c, self.t, 100, normalize_phone_number(match.number), (match.start, match.end))
matched = True
else:
z = phonenumbers.parse(c.value, 'FR')
score = score_phone_number(z)
if score > 0:
self.register_full_match(c, self.t, score, normalize_phone_number(z))
matched = True
if not matched:
raise TypeError('Value "{}" did not match as phone number'.format(c.value))
# Various regexes for strict type matchers
PAT_EMAIL = "[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+"
PAT_URL = "(https?|ftp)://[^\s/$.?#].[^\s]*"
# Person-name matcher-normalizer code
PRENOM_LEXICON = file_to_set('prenom')
PATRONYME_LEXICON = file_to_set('patronyme_fr')
PAT_FIRST_NAME = '(%s)' % '|'.join([p for p in PRENOM_LEXICON])
PAT_LAST_NAME = '([A-Z][A-Za-z]+\s?)+'
PAT_LAST_NAME_ALLCAPS = '([A-Z][A-Z]+\s?)+'
PAT_INITIAL = '([A-Z][\.\-\s]{1,3}){1,3}'
# Ignore case on these two
PAT_FIRST_LAST_NAME = '\s*%s\s+(%s)\s*' % (PAT_FIRST_NAME, PAT_LAST_NAME)
PAT_LAST_FIRST_NAME = '\s*(%s)\s+%s\s*' % (PAT_LAST_NAME, PAT_FIRST_NAME)
# Don't ignore case on those two
PAT_FIRSTINITIAL_LAST_NAME = '\s*(%s)\s+((%s)|(%s))\s*' % (PAT_INITIAL, PAT_LAST_NAME, PAT_LAST_NAME_ALLCAPS)
PAT_LAST_FIRSTINITIAL_NAME = '\s*((%s)|(%s))\s+(%s)\s*' % (PAT_LAST_NAME, PAT_LAST_NAME_ALLCAPS, PAT_INITIAL)
def pattern_with_word_boundary(p): return '\\b' + p + '\\b'
def regex_with_word_boundary(p, flags = 0): return re.compile(pattern_with_word_boundary(p), flags)
PERSON_NAME_EXTRACTION_PATS = [
(regex_with_word_boundary(PAT_FIRST_NAME, re.IGNORECASE), 1, -1),
(regex_with_word_boundary('%s\s+(%s)' % (PAT_FIRST_NAME, PAT_LAST_NAME), re.IGNORECASE), 1, 2),
(regex_with_word_boundary('(%s)\s+%s' % (PAT_LAST_NAME, PAT_FIRST_NAME), re.IGNORECASE), 3, 1),
(regex_with_word_boundary('(%s)\s+((%s)|(%s))' % (PAT_INITIAL, PAT_LAST_NAME, PAT_LAST_NAME_ALLCAPS)), 1, 2),
(regex_with_word_boundary('((%s)|(%s))\s+(%s)' % (PAT_LAST_NAME, PAT_LAST_NAME_ALLCAPS, PAT_INITIAL)), 2, 1)
]
def validate_first_name(fst): return len(fst) > 1
def validate_last_name(lst): return len(lst) > 2
def validate_person_name(s):
''' Validator for items of type: full person name '''
for i, (r, firstGp, lastGp) in enumerate(PERSON_NAME_EXTRACTION_PATS):
m = r.match(s)
if m:
logging.debug('Person name pattern #%d matched: %s', i + 1, '; '.join(m.groups()))
fst = m.group(firstGp).strip()
lst = (m.group(lastGp) if lastGp >= 0 else s.replace(m.group(firstGp), '')).strip()
if validate_first_name(fst) and validate_last_name(lst):
return { F_FIRST: fst, F_LAST: lst }
return None
def validate_person_name_match(t):
v = validate_person_name(t[0])
return None if v is None else (v, t[1], t[2])
def singleton_list(s, itemValidator, stripChars):
''' Parameters:
itemValidator takes an input string and returns a dictionary
{field name -> field value if the item is validated or else None}.
Returns a list (of length one) whose element is a (field name, field value) dictionary. '''
if itemValidator is None: return [s]
try:
d = itemValidator(s)
return [] if d is None else [d]
except:
return []
DELIMITER_TOKENS_RE = re.compile(re.escape('et|and|&'), re.IGNORECASE)
def parse_person_name_list(s, itemValidator, i1 = 0, delimiters = ',;\t', stripChars = ' <>[](){}"\''):
''' Parameters:
itemValidator takes an input string and returns a list of mappings
{ field name: field value if the item is validated or else None }.
Returns a list of triples (item, startIndex, endIndex)
where item is a (field name, field value) dictionary. '''
s0 = DELIMITER_TOKENS_RE.sub(delimiters[0], s)
for d in delimiters:
(s1, s2, s3) = s0.partition(d)
if len(s2) == 1:
return singleton_list((s1, i1, i1 + len(s1)), validate_person_name_match, stripChars)
+ parse_person_name_list(s3, validate_person_name_match, i1 + len(s1) + len(s2), delimiters = delimiters)
return singleton_list((s0, i1, i1 + len(s0)), validate_person_name_match, stripChars)
##### START OF: specific, tailor-made parsing of person name lists
FR_FIRSTNAMES = map(str.lower, PRENOM_LEXICON)
FR_SURNAMES = map(str.lower, PATRONYME_LEXICON)
F_FIRSTORLAST = F_FIRST + '|' + F_LAST
PN_STRIP_CHARS = ' <>[](){}"\''
PN_DELIMITERS = ',;+/'
PN_TITLE_VARIANTS = {
'M': ['mr', 'monsieur', 'mister', 'sir'], # Form "M." is handled separately
'Mme': ['mme', 'mrs', 'madame', 'madam', 'ms', 'miss'],
'Dr': ['dr', 'docteur', 'doctor'],
'Pr': ['pr', 'professeur', 'prof', 'professor']
}
def person_name_singleton(s):
d = extract_person_name(s)
return [] if d is None else [d]
def extract_person_name(s):
tokens = s.split()
d = defaultdict(set)
for token in tokens:
if token[-1] in '-.':
t = token.strip('-.')
if len(t) > 1 or not t.isalpha() or t.upper() != t: continue
if t == 'M': d[F_TITLE] = 'M'
d[F_FIRST].add(t)
continue
t0 = token.strip('-.')
if len(t0) < 2: continue
t = t0.lower()
title = None
for (main, variants) in PN_TITLE_VARIANTS.items():
if t0[1:].islower() and t in map(str.lower, variants):
title = main
break
if title:
d[F_TITLE].add(title)
continue
if t in FR_FIRSTNAMES:
d[F_FIRST].add(t)
d[F_FIRSTORLAST].add(t)
elif t in FR_SURNAMES:
d[F_LAST].add(t)
d[F_FIRSTORLAST].add(t)
if len(d[F_LAST]) < 1:
if len(d[F_FIRSTORLAST]) > 0:
d[F_LAST] = d[F_FIRSTORLAST] - d[F_FIRST]
d[F_FIRST] = d[F_FIRST] - d[F_LAST]
d[F_FIRSTORLAST] = d[F_LAST] - d[F_FIRST]
if len(d[F_LAST]) < 1: return None
if len(d[F_FIRST]) < 1:
d[F_FIRST] = d[F_FIRSTORLAST] - d[F_LAST]
del d[F_FIRSTORLAST]
return d
##### END OF: specific, tailor-made parsing of person name lists
def parse_person_names(s):
''' Returns None if validation failed, or else a list of triples (personName, startIndex, endIndex)
where personName is a key/value dictionary.
Patterns currently handled by this parser:
<Last>
<Last> <First>
<First> <Last>
<FirstInitial> <Last>
<Last> <FirstInitial>
LIST<Person> (with any kind of delimiter) '''
return parse_person_name_list(s, validate_person_name)
class CustomPersonNameMatcher(TypeMatcher):
def __init__(self):
super(CustomPersonNameMatcher, self).__init__(F_PERSON)
@timed
def match(self, c):
l = c.value
parsed_names = custom_name_parsing.extractPersonNames(l)
if len(parsed_names) < 1: return
normalized_parsed_names = list([custom_name_parsing.joinPersonName(pn) for pn in parsed_names])
self.register_full_match(c, self.t, 100, '; '.join(normalized_parsed_names))
# Phone number normalization
def normalize_phone_number(x):
try:
return phonenumbers.format_number(x, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
except:
return None
# Address matcher-normalizer class
LIBPOSTAL_MAPPING = [
(F_STREET, ['house_number', 'road', 'suburb', 'city_district']),
(F_ZIP, ['state', 'postcode']),
(F_CITY, ['city']),
(F_COUNTRY, ['country']) ]
BAN_MAPPING = [
(F_HOUSENUMBER, 'housenumber'),
(F_STREET, 'street'),
(F_ZIP, 'postcode'),
(F_CITY, 'city') ]
class CustomAddressMatcher(TypeMatcher):
def __init__(self):
super(CustomAddressMatcher, self).__init__(F_ADDRESS)
@timed
def match(self, c):
if c.value.isdigit():
logging.debug('Bailing out of %s for numeric value: %s', self, c)
return
parsed = parse_address(c.value)
if not parsed: return
ps = {key: value for (value, key) in parsed}
v = c.value.lower()
comps = list()
for (f, lps) in LIBPOSTAL_MAPPING:
comp = []
for lp in lps:
if lp not in ps: continue
value = ps[lp]
superStr = rejoin(v)
subStr = rejoin(value)
if len(superStr) != len(subStr):
superStr = v
subStr = value
span = check_non_consecutive_subsequence(superStr, subStr)
if span is None:
logging.warning('%s could not find substring "%s" in original "%s"', self, subStr, superStr)
else:
self.register_partial_match(c, f, 100, subStr, span)
comp.append(value)
if len(comp) > 0: comps.append(' '.join(comp))
if len(comps) > 0:
self.register_full_match(c, self.t, None, ' '.join(comps))
COMMUNE_LEXICON = file_to_set('commune')
COUNTRY_FALLBACK = False
class FrenchAddressMatcher(LabelMatcher):
def __init__(self):
super(FrenchAddressMatcher, self).__init__(F_ADDRESS, COMMUNE_LEXICON, MATCH_MODE_EXACT)
@timed
def match(self, c):
v = c.value_to_match()
if len(v) < 4: return
response = urllib.request.urlopen("http://api-adresse.data.gouv.fr/search/?q=" + urllib.parse.quote_plus(v))
resp = response.read().decode('utf-8')
data = json.loads(resp)
if not data or 'features' not in data: return
logging.debug('Returned %d results from api-adresse.data.gouv.fr for %s', len(data['features']), v)
# Quick and dirty way to have two-tier results since based on BAN address matching results, when parsing a
# coarse-grained entity (city or equivalent) the results at a finer level (street, etc.) are completely unreliable
# as basically they are random, if not made-up, street addresses and districts.
hits = [set(), set()]
for point in data['features']:
if 'properties' not in point: continue
props = point['properties']
if 'type' not in props:
logging.warning('Properties do not contain any geolocation feature type! %s', data)
continue
kind = props['type']
iIdx = dict() # Build inverted index to register partial matches
if kind in ['housenumber', 'street', 'place', 'locality']: # Accurate enough, trust the result
l = props['label']
if l not in iIdx: iIdx[l] = props
hits[1].add(l)
elif kind in ['town', 'city', 'municipality']: # Sanity check on the commune name : exclude []
v = normalize_and_validate_phrase(v)
if v is not None and v in self.labelsMap:
l = props['label']
if l not in iIdx: iIdx[l] = props
hits[0].add(l)
else:
logging.warning('Properties unexpected geolocation feature type: %s', kind)
scoreFilter = partial(address_filter_score, v)
prioHits = sorted(hits[0] if len(hits[0]) > 0 else hits[1], key = scoreFilter, reverse = True)
for h in prioHits:
if scoreFilter(h) <= 100 or h not in iIdx:
continue
props = iIdx[h]
res_address = ' '.join([props[banField] for (ourField, banField) in BAN_MAPPING if banField in props])
if len(res_address) > 3:
self.register_full_match(c, self.t, None, res_address)
return
if COUNTRY_FALLBACK:
self.register_full_match(c, F_COUNTRY, 100, 'France')
else:
raise TypeError('Could not parse any address field value for "{}"'.format(v))
def address_filter_score(src, ref):
a1, a2 = case_phrase(src), case_phrase(ref)
return | |
<filename>python/example_code/ses/ses_replicate_identities.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Simple Email Service
(Amazon SES) and Amazon Route 53 to copy email and domain identity configuration
from one AWS Region to another.
"""
# snippet-start:[ses.python.ses_replicateidentities.complete]
import argparse
import json
import logging
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
def get_identities(ses_client):
"""
Gets the identities for the current Region. The Region is specified in the
Boto3 Amazon SES client object.
:param ses_client: A Boto3 Amazon SES client.
:return: The list of email identities and the list of domain identities.
"""
email_identities = []
domain_identities = []
try:
identity_paginator = ses_client.get_paginator('list_identities')
identity_iterator = identity_paginator.paginate(
PaginationConfig={'PageSize': 20})
for identity_page in identity_iterator:
for identity in identity_page['Identities']:
if '@' in identity:
email_identities.append(identity)
else:
domain_identities.append(identity)
logger.info(
"Found %s email and %s domain identities.", len(email_identities),
len(domain_identities))
except ClientError:
logger.exception("Couldn't get identities.")
raise
else:
return email_identities, domain_identities
def verify_emails(email_list, ses_client):
"""
Starts verification of a list of email addresses. Verification causes an email
to be sent to each address. To complete verification, the recipient must follow
the instructions in the email.
:param email_list: The list of email addresses to verify.
:param ses_client: A Boto3 Amazon SES client.
:return: The list of emails that were successfully submitted for verification.
"""
verified_emails = []
for email in email_list:
try:
ses_client.verify_email_identity(EmailAddress=email)
verified_emails.append(email)
logger.info("Started verification of %s.", email)
except ClientError:
logger.warning("Couldn't start verification of %s.", email)
return verified_emails
def verify_domains(domain_list, ses_client):
"""
Starts verification for a list of domain identities. This returns a token for
each domain, which must be registered as a TXT record with the DNS provider for
the domain.
:param domain_list: The list of domains to verify.
:param ses_client: A Boto3 Amazon SES client.
:return: The generated domain tokens to use to completed verification.
"""
domain_tokens = {}
for domain in domain_list:
try:
response = ses_client.verify_domain_identity(Domain=domain)
token = response['VerificationToken']
domain_tokens[domain] = token
logger.info("Got verification token %s for domain %s.", token, domain)
except ClientError:
logger.warning("Couldn't get verification token for domain %s.", domain)
return domain_tokens
def get_hosted_zones(route53_client):
"""
Gets the Amazon Route 53 hosted zones for the current account.
:param route53_client: A Boto3 Route 53 client.
:return: The list of hosted zones.
"""
zones = []
try:
zone_paginator = route53_client.get_paginator('list_hosted_zones')
zone_iterator = zone_paginator.paginate(PaginationConfig={'PageSize': 20})
zones = [
zone for zone_page in zone_iterator for zone in zone_page['HostedZones']]
logger.info("Found %s hosted zones.", len(zones))
except ClientError:
logger.warning("Couldn't get hosted zones.")
return zones
def find_domain_zone_matches(domains, zones):
"""
Finds matches between Amazon SES verified domains and Route 53 hosted zones.
Subdomain matches are taken when found, otherwise root domain matches are taken.
:param domains: The list of domains to match.
:param zones: The list of hosted zones to match.
:return: The set of matched domain-zone pairs. When a match is not found, the
domain is included in the set with a zone value of None.
"""
domain_zones = {}
for domain in domains:
domain_zones[domain] = None
# Start at the most specific sub-domain and walk up to the root domain until a
# zone match is found.
domain_split = domain.split('.')
for index in range(0, len(domain_split) - 1):
sub_domain = '.'.join(domain_split[index:])
for zone in zones:
# Normalize the zone name from Route 53 by removing the trailing '.'.
zone_name = zone['Name'][:-1]
if sub_domain == zone_name:
domain_zones[domain] = zone
break
if domain_zones[domain] is not None:
break
return domain_zones
def add_route53_verification_record(domain, token, zone, route53_client):
"""
Adds a domain verification TXT record to the specified Route 53 hosted zone.
When a TXT record already exists in the hosted zone for the specified domain,
the existing values are preserved and the new token is added to the list.
:param domain: The domain to add.
:param token: The verification token for the domain.
:param zone: The hosted zone where the domain verification record is added.
:param route53_client: A Boto3 Route 53 client.
"""
domain_token_record_set_name = f'_amazonses.{domain}'
record_set_paginator = route53_client.get_paginator(
'list_resource_record_sets')
record_set_iterator = record_set_paginator.paginate(
HostedZoneId=zone['Id'], PaginationConfig={'PageSize': 20})
records = []
for record_set_page in record_set_iterator:
try:
txt_record_set = next(
record_set for record_set
in record_set_page['ResourceRecordSets']
if record_set['Name'][:-1] == domain_token_record_set_name and
record_set['Type'] == 'TXT')
records = txt_record_set['ResourceRecords']
logger.info(
"Existing TXT record found in set %s for zone %s.",
domain_token_record_set_name, zone['Name'])
break
except StopIteration:
pass
records.append({'Value': json.dumps(token)})
changes = [{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': domain_token_record_set_name,
'Type': 'TXT',
'TTL': 1800,
'ResourceRecords': records}}]
try:
route53_client.change_resource_record_sets(
HostedZoneId=zone['Id'], ChangeBatch={'Changes': changes})
logger.info(
"Created or updated the TXT record in set %s for zone %s.",
domain_token_record_set_name, zone['Name'])
except ClientError as err:
logger.warning(
"Got error %s. Couldn't create or update the TXT record for zone %s.",
err.response['Error']['Code'], zone['Name'])
def generate_dkim_tokens(domain, ses_client):
"""
Generates DKIM tokens for a domain. These must be added as CNAME records to the
DNS provider for the domain.
:param domain: The domain to generate tokens for.
:param ses_client: A Boto3 Amazon SES client.
:return: The list of generated DKIM tokens.
"""
dkim_tokens = []
try:
dkim_tokens = ses_client.verify_domain_dkim(Domain=domain)['DkimTokens']
logger.info("Generated %s DKIM tokens for domain %s.", len(dkim_tokens), domain)
except ClientError:
logger.warning("Couldn't generate DKIM tokens for domain %s.", domain)
return dkim_tokens
def add_dkim_domain_tokens(hosted_zone, domain, tokens, route53_client):
"""
Adds DKIM domain token CNAME records to a Route 53 hosted zone.
:param hosted_zone: The hosted zone where the records are added.
:param domain: The domain to add.
:param tokens: The DKIM tokens for the domain to add.
:param route53_client: A Boto3 Route 53 client.
"""
try:
changes = [{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': f'{token}._domainkey.{domain}',
'Type': 'CNAME',
'TTL': 1800,
'ResourceRecords': [{'Value': f'{token}.dkim.amazonses.com'}]
}} for token in tokens]
route53_client.change_resource_record_sets(
HostedZoneId=hosted_zone['Id'], ChangeBatch={'Changes': changes})
logger.info(
"Added %s DKIM CNAME records to %s in zone %s.", len(tokens),
domain, hosted_zone['Name'])
except ClientError:
logger.warning(
"Couldn't add DKIM CNAME records for %s to zone %s.", domain,
hosted_zone['Name'])
def configure_sns_topics(identity, topics, ses_client):
"""
Configures Amazon Simple Notification Service (Amazon SNS) notifications for
an identity. The Amazon SNS topics must already exist.
:param identity: The identity to configure.
:param topics: The list of topics to configure. The choices are Bounce, Delivery,
or Complaint.
:param ses_client: A Boto3 Amazon SES client.
"""
for topic in topics:
topic_arn = input(
f"Enter the Amazon Resource Name (ARN) of the {topic} topic or press "
f"Enter to skip: ")
if topic_arn != '':
try:
ses_client.set_identity_notification_topic(
Identity=identity, NotificationType=topic, SnsTopic=topic_arn)
logger.info("Configured %s for %s notifications.", identity, topic)
except ClientError:
logger.warning(
"Couldn't configure %s for %s notifications.", identity, topic)
def replicate(source_client, destination_client, route53_client):
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
print('-'*88)
print(f"Replicating Amazon SES identities and other configuration from "
f"{source_client.meta.region_name} to {destination_client.meta.region_name}.")
print('-'*88)
print(f"Retrieving identities from {source_client.meta.region_name}.")
source_emails, source_domains = get_identities(source_client)
print("Email addresses found:")
print(*source_emails)
print("Domains found:")
print(*source_domains)
print("Starting verification for email identities.")
dest_emails = verify_emails(source_emails, destination_client)
print("Getting domain tokens for domain identities.")
dest_domain_tokens = verify_domains(source_domains, destination_client)
# Get Route 53 hosted zones and match them with Amazon SES domains.
answer = input(
"Is the DNS configuration for your domains managed by Amazon Route 53 (y/n)? ")
use_route53 = answer.lower() == 'y'
hosted_zones = get_hosted_zones(route53_client) if use_route53 else []
if use_route53:
print("Adding or updating Route 53 TXT records for your domains.")
domain_zones = find_domain_zone_matches(dest_domain_tokens.keys(), hosted_zones)
for domain in domain_zones:
add_route53_verification_record(
domain, dest_domain_tokens[domain], domain_zones[domain],
route53_client)
else:
print("Use these verification tokens to create TXT records through your DNS "
"provider:")
pprint(dest_domain_tokens)
answer = input("Do you want to configure DKIM signing for your identities (y/n)? ")
if answer.lower() == 'y':
# Build a set of unique domains from email and domain identities.
domains = {email.split('@')[1] for email in dest_emails}
domains.update(dest_domain_tokens)
domain_zones = find_domain_zone_matches(domains, hosted_zones)
for domain, zone in domain_zones.items():
answer = input(
f"Do you want to configure DKIM signing for {domain} (y/n)? ")
if answer.lower() == 'y':
dkim_tokens = generate_dkim_tokens(domain, destination_client)
if use_route53 and zone is not None:
add_dkim_domain_tokens(zone, domain, dkim_tokens, route53_client)
else:
print(
"Add the following DKIM tokens as CNAME records through your "
"DNS provider:")
print(*dkim_tokens, sep='\n')
answer = input(
"Do you want to configure Amazon SNS notifications for your identities (y/n)? ")
if answer.lower() == 'y':
for identity in dest_emails + list(dest_domain_tokens.keys()):
answer = | |
self.document = document
self.target_handler = target_handler
self.compound_parser = compound_parser
self.filter_ = filter_
self.context = None # type: Optional[RenderContext]
self.output_defname = True
# Nesting level for lists.
self.nesting_level = 0
def set_context(self, context: RenderContext) -> None:
self.context = context
if self.context.domain == '':
self.context.domain = self.get_domain()
# XXX: fix broken links in XML generated by Doxygen when Doxygen's
# SEPARATE_MEMBER_PAGES is set to YES; this function should be harmless
# when SEPARATE_MEMBER_PAGES is NO!
#
# The issue was discussed here: https://github.com/doxygen/doxygen/pull/7971
#
# A Doxygen anchor consists of a 32-byte string version of the results of
# passing in the stringified identifier or prototype that is being "hashed".
# An "a" character is then prefixed to mark it as an anchor. Depending on how
# the identifier is linked, it can also get a "g" prefix to mean it is part
# of a Doxygen group. This results in an id having either 33 or 34 bytes
# (containing a "g" or not). Some identifiers, eg enumerators, get twice that
# length to have both a unique enum + unique enumerator, and sometimes they
# get two "g" characters as prefix instead of one.
def _fixup_separate_member_pages(self, refid: str) -> str:
if refid:
parts = refid.rsplit("_", 1)
if len(parts) == 2 and parts[1].startswith("1"):
anchorid = parts[1][1:]
if len(anchorid) in set([33, 34]) and parts[0].endswith(anchorid):
return parts[0][:-len(anchorid)] + parts[1]
elif len(anchorid) > 34:
index = 0
if anchorid.startswith('gg'):
index = 1
_len = 35
elif anchorid.startswith('g'):
_len = 34
else:
_len = 33
if parts[0].endswith(anchorid[index:_len]):
return parts[0][:-(_len - index)] + parts[1]
return refid
def get_refid(self, refid: str) -> str:
if self.app.config.breathe_separate_member_pages: # type: ignore
refid = self._fixup_separate_member_pages(refid)
if self.app.config.breathe_use_project_refids: # type: ignore
return "%s%s" % (self.project_info.name(), refid)
else:
return refid
def get_domain(self) -> str:
"""Returns the domain for the current node."""
def get_filename(node) -> Optional[str]:
"""Returns the name of a file where the declaration represented by node is located."""
try:
return node.location.file
except AttributeError:
return None
self.context = cast(RenderContext, self.context)
node_stack = self.context.node_stack
node = node_stack[0]
# An enumvalue node doesn't have location, so use its parent node for detecting
# the domain instead.
if isinstance(node, six.string_types) or node.node_type == "enumvalue":
node = node_stack[1]
filename = get_filename(node)
if not filename and node.node_type == "compound":
file_data = self.compound_parser.parse(node.refid)
filename = get_filename(file_data.compounddef)
return self.project_info.domain_for_file(filename) if filename else ''
def join_nested_name(self, names: List[str]) -> str:
dom = self.get_domain()
sep = '::' if not dom or dom == 'cpp' else '.'
return sep.join(names)
def run_directive(self, obj_type: str, declaration: str, contentCallback: ContentCallback,
options={}) -> List[Node]:
self.context = cast(RenderContext, self.context)
args = [obj_type, [declaration]] + self.context.directive_args[2:]
directive = DomainDirectiveFactory.create(self.context.domain, args)
assert issubclass(type(directive), BaseObject)
directive.breathe_content_callback = contentCallback # type: ignore
# Translate Breathe's no-link option into the standard noindex option.
if 'no-link' in self.context.directive_args[2]:
directive.options['noindex'] = True
for k, v in options.items():
directive.options[k] = v
config = self.app.env.config
if config.breathe_debug_trace_directives:
global _debug_indent
print("{}Running directive: .. {}:: {}".format(
' ' * _debug_indent,
directive.name, declaration))
_debug_indent += 1
self.nesting_level += 1
nodes = directive.run()
self.nesting_level -= 1
# TODO: the directive_args seems to be reused between different run_directives
# so for now, reset the options.
# Remove this once the args are given in a different manner.
for k, v in options.items():
del directive.options[k]
if config.breathe_debug_trace_directives:
_debug_indent -= 1
# Filter out outer class names if we are rendering a member as a part of a class content.
rst_node = nodes[1]
finder = NodeFinder(rst_node.document)
rst_node.walk(finder)
signode = finder.declarator
if self.context.child:
signode.children = [n for n in signode.children if not n.tagname == 'desc_addname']
return nodes
def handle_declaration(self, node, declaration: str, *, obj_type: str = None,
content_callback: ContentCallback = None,
display_obj_type: str = None,
declarator_callback: DeclaratorCallback = None,
options={}) -> List[Node]:
if obj_type is None:
obj_type = node.kind
if content_callback is None:
def content(contentnode):
contentnode.extend(self.description(node))
content_callback = content
declaration = declaration.replace('\n', ' ')
nodes_ = self.run_directive(obj_type, declaration, content_callback, options)
if self.app.env.config.breathe_debug_trace_doxygen_ids:
target = self.create_doxygen_target(node)
if len(target) == 0:
print("{}Doxygen target: (none)".format(' ' * _debug_indent))
else:
print("{}Doxygen target: {}".format(' ' * _debug_indent,
target[0]['ids']))
# <desc><desc_signature> and then one or more <desc_signature_line>
# each <desc_signature_line> has a sphinx_line_type which hints what is present in that line
assert len(nodes_) >= 1
desc = nodes_[1]
assert isinstance(desc, addnodes.desc)
assert len(desc) >= 1
sig = desc[0]
assert isinstance(sig, addnodes.desc_signature)
# if may or may not be a multiline signature
isMultiline = sig.get('is_multiline', False)
declarator = None # type: Optional[Declarator]
if isMultiline:
for line in sig:
assert isinstance(line, addnodes.desc_signature_line)
if line.sphinx_line_type == 'declarator':
declarator = line
else:
declarator = sig
assert declarator is not None
if display_obj_type is not None:
n = declarator[0]
assert isinstance(n, addnodes.desc_annotation)
assert n.astext()[-1] == " "
txt = display_obj_type + ' '
declarator[0] = addnodes.desc_annotation(txt, txt)
if not self.app.env.config.breathe_debug_trace_doxygen_ids:
target = self.create_doxygen_target(node)
declarator.insert(0, target)
if declarator_callback:
declarator_callback(declarator)
return nodes_
def get_qualification(self) -> List[str]:
if self.nesting_level > 0:
return []
config = self.app.env.config
if config.breathe_debug_trace_qualification:
def debug_print_node(n):
return "node_type={}".format(n.node_type)
global _debug_indent
print("{}{}".format(_debug_indent * ' ',
debug_print_node(self.qualification_stack[0])))
_debug_indent += 1
names = [] # type: List[str]
for node in self.qualification_stack[1:]:
if config.breathe_debug_trace_qualification:
print("{}{}".format(_debug_indent * ' ', debug_print_node(node)))
if node.node_type == 'ref' and len(names) == 0:
if config.breathe_debug_trace_qualification:
print("{}{}".format(_debug_indent * ' ', 'res='))
return []
if (node.node_type == 'compound' and
node.kind not in ['file', 'namespace', 'group']) or \
node.node_type == 'memberdef':
# We skip the 'file' entries because the file name doesn't form part of the
# qualified name for the identifier. We skip the 'namespace' entries because if we
# find an object through the namespace 'compound' entry in the index.xml then we'll
# also have the 'compounddef' entry in our node stack and we'll get it from that. We
# need the 'compounddef' entry because if we find the object through the 'file'
# entry in the index.xml file then we need to get the namespace name from somewhere
names.append(node.name)
if (node.node_type == 'compounddef' and node.kind == 'namespace'):
# Nested namespaces include their parent namespace(s) in compoundname. ie,
# compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in
# namespace 'foo'. We need full compoundname because node_stack doesn't necessarily
# include parent namespaces and we stop here in case it does.
names.extend(reversed(node.compoundname.split('::')))
break
names.reverse()
if config.breathe_debug_trace_qualification:
print("{}res={}".format(_debug_indent * ' ', names))
_debug_indent -= 1
return names
# ===================================================================================
def get_fully_qualified_name(self):
names = []
node_stack = self.context.node_stack
node = node_stack[0]
# If the node is a namespace, use its name because namespaces are skipped in the main loop.
if node.node_type == 'compound' and node.kind == 'namespace':
names.append(node.name)
for node in node_stack:
if node.node_type == 'ref' and len(names) == 0:
return node.valueOf_
if (node.node_type == 'compound' and
node.kind not in ['file', 'namespace', 'group']) or \
node.node_type == 'memberdef':
# We skip the 'file' entries because the file name doesn't form part of the
# qualified name for the identifier. We skip the 'namespace' entries because if we
# find an object through the namespace 'compound' entry in the index.xml then we'll
# also have the 'compounddef' entry in our node stack and we'll get it from that. We
# need the 'compounddef' entry because if we find the object through the 'file'
# entry in the index.xml file then we need to get the namespace name from somewhere
names.insert(0, node.name)
if (node.node_type == 'compounddef' and node.kind == 'namespace'):
# Nested namespaces include their parent namespace(s) in compoundname. ie,
# compoundname is 'foo::bar' instead of just 'bar' for namespace 'bar' nested in
# namespace 'foo'. We need full compoundname because node_stack doesn't necessarily
# include parent namespaces and we stop here in case it does.
names.insert(0, node.compoundname)
break
return '::'.join(names)
def create_template_prefix(self, decl) -> str:
if not decl.templateparamlist:
return ""
nodes = self.render(decl.templateparamlist)
return 'template<' + ''.join(n.astext() for n in nodes) + '>' # type: ignore
def run_domain_directive(self, kind, names):
domain_directive = DomainDirectiveFactory.create(
self.context.domain, [kind, names] | |
per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
colour_log (bool): If true the colours are in log scale
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
show_basins (bool): If true, plot the basins
min_channel_point_size (float): The minimum size of a channel point in points
max_channel_point_size (float): The maximum size of a channel point in points
Returns:
Shaded relief plot with the basins coloured by basin ID. Includes channels. These can be plotted by various metrics denoted but the plotting_column parameter.
Author: SMM
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
# going to make the basin plots - need to have bil extensions.
print("I'm going to make the basin plots. Your topographic data must be in ENVI bil format or I'll break!!")
# get the rasters
raster_ext = '.bil'
#BackgroundRasterName = fname_prefix+raster_ext
HillshadeName = fname_prefix+'_hs'+raster_ext
BasinsName = fname_prefix+'_AllBasins'+raster_ext
ChiCoordName = fname_prefix+'_Maskedchi'+raster_ext
print (BasinsName)
Basins = LSDP.GetBasinOutlines(DataDirectory, BasinsName)
chi_csv_fname = DataDirectory+ChannelFileName
chi_csv_fname = DataDirectory+ChannelFileName
thisPointData = LSDMap_PD.LSDMap_PointData(chi_csv_fname)
# Remove data that has nodata values
thisPointData.selectValue(plotting_column,value = -9999, operator = "!=")
thisPointData.selectValue("basin_key",value = Basin_remove_list, operator = "!=")
#print("The new point data is:")
#print(thisPointData.GetLongitude())
# clear the plot
plt.clf()
# set up the base image and the map
print("I am showing the basins without text labels.")
MF = MapFigure(HillshadeName, DataDirectory,coord_type="UTM_km", colourbar_location="None")
# This adds the basins
if plot_chi_raster:
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False, colourmap = "gray", alpha = 1, outlines_only = True)
MF.add_drape_image(ChiCoordName,DataDirectory,colourmap = "cubehelix",alpha=0.6,zorder = 0.5)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,colorbarlabel = colorbarlabel, this_colourmap = cmap,scaled_data_in_log = True,max_point_size = max_channel_point_size, min_point_size = min_channel_point_size,zorder=0.4, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
else:
if show_basins:
MF.add_basin_plot(BasinsName,fname_prefix,DataDirectory, mask_list = Basin_remove_list, rename_dict = Basin_rename_dict, value_dict = value_dict, label_basins = add_basin_labels, show_colourbar = False, colourmap = "gray", alpha = 0.7, outlines_only = False)
MF.add_point_data(thisPointData,column_for_plotting = plotting_column,scale_points = True,column_for_scaling = "drainage_area", show_colourbar = True, colourbar_location = cbar_loc,colorbarlabel = colorbarlabel, this_colourmap = cmap,scaled_data_in_log = True,max_point_size = 2, min_point_size = 0.5,zorder=10, colour_log = colour_log, discrete_colours = discrete_colours, NColours = NColours)
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_chicoord_and_basins."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_chicoord_and_basins."+fig_format
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, FigFormat=fig_format, Fig_dpi = dpi)
def PrintChiStacked(DataDirectory,fname_prefix, ChannelFileName, cmap = "jet", cbar_loc = "bottom", size_format = "ESURF", fig_format = "png", dpi = 250,plotting_column = "source_key",discrete_colours = False, NColours = 10,colorbarlabel = "Colourbar", axis_data_name = "chi", plot_data_name = "m_chi", plotting_data_format = 'log', Basin_select_list = [], Basin_rename_dict = {}, out_fname_prefix = "", first_basin = 0, last_basin = 0, figure_aspect_ratio = 2, X_offset = 5, rotate_labels=False):
"""
This function prints chi profiles with stacks of chi or flow distance
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
add_basin_labels (bool): If true, label the basins with text. Otherwise use a colourbar.
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
Basin_remove_list (list): A lists containing either key or junction indices of basins you want to remove from plotting
Basin_rename_dict (dict): A dict where the key is either basin key or junction index, and the value is a new name for the basin denoted by the key
out_fname_prefix (str): The prefix of the image file. If blank uses the fname_prefix
axis_data_name (str): the data used as the x axis
plot_data_name (str): the data name used to colour the plot
Returns:
Plots of chi or flow distance profiles
"""
# specify the figure size and format
# set figure sizes based on format
if size_format == "geomorphology":
fig_size_inches = 6.25
elif size_format == "big":
fig_size_inches = 16
else:
fig_size_inches = 4.92126
ax_style = "Normal"
# get the basin IDs to make a discrete colourmap for each ID
BasinInfoDF = PlotHelp.ReadBasinInfoCSV(DataDirectory, fname_prefix)
basin_keys = list(BasinInfoDF['basin_key'])
basin_keys = [int(x) for x in basin_keys]
basin_junctions = list(BasinInfoDF['outlet_junction'])
basin_junctions = [float(x) for x in basin_junctions]
print ('Basin keys are: ')
print (basin_keys)
chi_csv_fname = DataDirectory+ChannelFileName
# Save the image
if len(out_fname_prefix) == 0:
ImageName = DataDirectory+fname_prefix+"_stacked_chi."+fig_format
else:
ImageName = DataDirectory+out_fname_prefix+"_stacked_chi."+fig_format
if axis_data_name == "flow_distance" and X_offset <= 10:
print("WARNING! You have a weird flow distance offset. I think it is the chi offset. Check your offset.")
x_offset = 50000
else:
x_offset = X_offset
# print("The colourbar is located on the "+cbar_loc)
# print("Cmap is: "+cmap)
print("About to go into the stacks. My x_offset is: " +str(x_offset)+ ", and my rename dict is:" )
print(Basin_rename_dict)
LSDCP.StackedProfilesGradient(chi_csv_fname, FigFileName = ImageName,
FigFormat = fig_format,elevation_threshold = 0,
first_basin = first_basin, last_basin = last_basin, basin_order_list = Basin_select_list,
basin_rename_dict = Basin_rename_dict,
this_cmap = cmap,axis_data_name = axis_data_name, colour_data_name = plot_data_name,
discrete_colours = discrete_colours, NColours = NColours,
colorbarlabel = colorbarlabel, cbar_loc = cbar_loc, X_offset = x_offset,
plotting_data_format = plotting_data_format,
label_sources = False, source_thinning_threshold = 0,
size_format = size_format, aspect_ratio = figure_aspect_ratio, dpi = dpi, rotate_labels=rotate_labels)
def PrintMultipleStacked(DataDirectory,fname_prefix, ChannelFileNameList, cmap = "jet", cbar_loc = "bottom", size_format = "ESURF", fig_format = "png", dpi = 250,discrete_colours = False, NColours = 10,colorbarlabel = "Colourbar", axis_data_name = "chi", plotting_data_format = 'log', Basin_select_list = [], Basin_rename_dict = {}, out_fname_prefix = "", first_basin = 0, last_basin = 0, figure_aspect_ratio = 2, X_offset = 5, rotate_labels=False):
"""
This function takes a list of files and converst them to a stacked plot
Args:
DataDirectory (str): the data directory with the m/n csv files
fname_prefix (str): The prefix for the m/n csv files
ChannelFileNameList (str): A list of strongs with the full paths to the csv files containg the profile data
cmap (str or colourmap): The colourmap to use for the plot
cbar_loc (str): where you want the colourbar. Options are none, left, right, top and botton. The colourbar will be of the elevation.
If you want only a hillshade set to none and the cmap to "gray"
size_format (str): Either geomorphology or big. Anything else gets you a 4.9 inch wide figure (standard ESURF size)
fig_format (str): An image format. png, pdf, eps, svg all valid
dpi (int): The dots per inch of the figure
plotting_column (str): the name of the column to plot
discrete_colours (bool): if true use a discrete colourmap
NColours (int): the number of colours to cycle through when making the colourmap
Basin_remove_list (list): A lists containing either key | |
copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val.dtype):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Series.nlargest)
def nlargest(self, n: int = 5, keep: str = "first"):
f = partial(Series.nlargest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
@doc(Series.nsmallest)
def nsmallest(self, n: int = 5, keep: str = "first"):
f = partial(Series.nsmallest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas | |
[A[j + k] for j in range(1, self.column - k + 1)])
return W
else:
return A
HNF = hermiteNormalForm
def _SimplifyHNF(self):
"""
This method is a common process used in
extHNF() and kernelAsModule()
"""
A = self.copy()
U = unitMatrix(A.column, A.coeff_ring)
rings = self.coeff_ring
# step 1 [Initialize]
j0 = k = self.column
for i in range(1, self.row + 1)[::-1]:
while 1:
# step 2 [Check zero]
for j in range(1, j0)[::-1]:
if bool(A[i, j]):
break
else: # j==1
break
# step 3 [Euclidean step]
u, v, d = rings.extgcd(A[i, k], A[i, j])
A_ik = ring.exact_division(A[i, k], d)
A_ij = ring.exact_division(A[i, j], d)
B = u * A[k] + v * A[j]
A[j] = A_ik * A[j] - A_ij * A[k]
A[k] = B
B = u * U[k] + v * U[j]
U[j] = A_ik * U[j] - A_ij * U[k]
U[k] = B
# step4 [Final reductions]
b = A[i, k]
if b < 0:
A[k] = -A[k]
U[k] = -U[k]
b = -b
if not bool(b):
k += 1
else:
for j in range(k + 1, self.column + 1):
q = A[i, j] // b
A[j] = A[j] - q * A[k]
U[j] = U[j] - q * U[k]
# step 5 [Finished?]
k -= 1
j0 = k
if k == 0:
break
# go to step 2
return (U, A, k)
def exthermiteNormalForm(self, non_zero=False):
# Modified Algorithm 2.4.5 in CCANT
"""
Find the Hermite Normal Form M for integer matrix.
Computing U which satisfied M=self*U.
Return matrices tuple,(U, M).
"""
U, A, k = self._SimplifyHNF()
if non_zero:
if k == self.column: # zero module
return None
new_A = createMatrix(
self.row, self.column - k,
[A[j + k] for j in range(1, self.column - k + 1)])
new_U = createMatrix(
self.column, self.column - k,
[U[j + k] for j in range(1, self.column - k + 1)])
return (new_U, new_A)
else:
return (U, A)
extHNF = exthermiteNormalForm
def kernelAsModule(self):
"""
Compute kernel as Z-module.
"""
U, A, k = self._SimplifyHNF()
if k == 0:
return None
else:
ker = createMatrix(
self.column, k, [U[j] for j in range(1, k + 1)])
return ker
class RingSquareMatrix(SquareMatrix, RingMatrix, ring.RingElement):
"""
RingSquareMatrix is a class for square matrices whose elements are in ring.
"""
def __init__(self, row, column=0, compo=0, coeff_ring=0):
"""
RingSquareMatrix(row [, column ,components, coeff_ring])
RingSquareMatrix must be row == column .
"""
self._initialize(row, column, compo, coeff_ring)
def __pow__(self, other):
"""
powering self to integer.
"""
n = +other
if not isinstance(n, int):
raise TypeError("index must be an integer")
power = unitMatrix(self.row, self.coeff_ring)
# check n
if n == 0:
return power
if n > 0:
z = self.copy()
else:
if hasattr(self, "inverse"):
n = abs(n)
z = self.inverse()
else:
raise NoInverse()
while 1:
if n & 1:
power = power * z
n //= 2
if n == 0:
return power
z = z * z
def toFieldMatrix(self):
"""RingSquareMatrix -> FieldSquareMatrix"""
self.__class__ = FieldSquareMatrix
self.coeff_ring = self.coeff_ring.getQuotientField()
def getRing(self):
"""
Return matrix ring of self.
"""
return MatrixRing.getInstance(self.row, self.getCoefficientRing())
def isOrthogonalMatrix(self):
"""
Check whether self is orthogonal matrix or not.
Orthogonal matrix satisfies M*M^T equals unit matrix.
"""
return self * self.transpose() == unitMatrix(self.row, self.coeff_ring)
def isAlternatingMatrix(self):
"""
Check whether self is alternating matrix or not.
Alternating (skew symmetric, or antisymmetric) matrix satisfies M=-M^T.
"""
for i in range(1, self.row + 1):
for j in range(i, self.column + 1):
if self[i, j] != -self[j, i]:
return False
return True
isAntisymmetricMatrix = isAlternatingMatrix
isSkewsymmetricMatrix = isAlternatingMatrix
def isSingular(self):
"""
Check determinant == 0 or not.
"""
return not bool(self.determinant())
def trace(self):
"""
Return trace of self.
"""
trace = self.coeff_ring.zero
for i in range(1, self.row + 1):
trace = trace + self[i, i]
return trace
def determinant(self): # Algorithm 2.2.6 of Cohen's book
"""
Return determinant of self.
"""
M = self.copy()
n = self.row
c = self.coeff_ring.one
sign = True
for k in range(1, n):
p = M[k, k]
if not bool(p): # p==0
i = k + 1
while not bool(M[i, k]):
if i == n:
return self.coeff_ring.zero
else:
i += 1
for j in range(k, n + 1):
tmp = M[i, j]
M[i, j] = M[k, j]
M[k, j] = tmp
sign = not(sign)
p = M[k, k]
for i in range(k + 1, n + 1):
for j in range(k + 1, n + 1):
t = p * M[i, j] - M[i, k] * M[k, j]
M[i, j] = ring.exact_division(t, c)
c = p
if sign:
return M[n, n]
else:
return -M[n, n]
def cofactor(self, i, j):
"""
Return (i, j)-cofactor of self.
"""
cofactor = (self.subMatrix(i, j)).determinant()
if (i + j) & 1:
cofactor = -cofactor
return cofactor
def commutator(self, other):
"""
Return commutator defined as follows:
[self, other] = self * other - other * self .
"""
return self * other - other * self
def characteristicMatrix(self):
"""
Return the characteristic matrix (i.e. xI-A) of self.
"""
import nzmath.poly.uniutil as uniutil
x = uniutil.polynomial({1:1}, self.coeff_ring)
return x * unitMatrix(self.row, x.getRing()) - self
def _characteristicPolyList(self): # Algorithm 2.2.7 of Cohen's book
"""
for characteristicPolynomial, adjugateMatrix
Assume self.row >= 2.
"""
unit = unitMatrix(self.row, self.coeff_ring)
coeff = [self.coeff_ring.one, -self.trace()]
C = self + coeff[-1] * unit
i = 2
while i < self.row:
C = self * C
coeff.append(ring.exact_division(-C.trace(), i))
C = C + coeff[-1] * unit
i += 1
coeff.append(ring.exact_division(-(self * C).trace(), i))
coeff.reverse()
return coeff, C
def characteristicPolynomial(self):
"""
characteristicPolynomial() -> Polynomial
"""
import nzmath.poly.uniutil
genPoly = nzmath.poly.uniutil.polynomial
if self.row == 1:
rings = self.coeff_ring
return genPoly({0:-self.trace(), 1:rings.one}, rings)
coeff = self._characteristicPolyList()[0]
return genPoly(dict(enumerate(coeff)), self.coeff_ring)
def adjugateMatrix(self):
"""
Return adjugate(classical adjoint) matrix.
"""
if self.row == 1:
return unitMatrix(self.row, self.coeff_ring)
C = self._characteristicPolyList()[1]
if self.row & 1:
return C
else:
return -C
def cofactorMatrix(self):
"""
Return cofactor matrix.
"""
return self.adjugateMatrix().transpose()
cofactors = cofactorMatrix
def smithNormalForm(self):# Algorithm 2.4.14 of Cohen's book
"""
Find the Smith Normal Form for square non-singular integral matrix.
Return the list of diagonal elements.
"""
M = self.copy()
n = M.row
R = M.determinant()
rings = self.coeff_ring
if not bool(R):
raise ValueError("Don't input singular matrix")
if R < 0:
R = -R
lst = []
while n != 1:
j = n
c = 0
while j != 1:
j -= 1
if M[n, j]:
u, v, d = rings.extgcd(M[n, j], M[n, n])
B = v * M.getColumn(n) + u * M.getColumn(j)
M_nn = ring.exact_division(M[n, n], d)
M_nj = ring.exact_division(M[n, j], d)
M.setColumn(j, ((M_nn * M.getColumn(j)
- M_nj * M.getColumn(n)) % R))
M.setColumn(n, (B % R))
j = n
while j != 1:
j -= 1
if M[j, n]:
u, v, d = rings.extgcd(M[j, n], M[n, n])
B = v * M.getRow(n) + u * M.getRow(j)
M_nn = ring.exact_division(M[n, n], d)
M_jn = ring.exact_division(M[j, n], d)
M.setRow(j, ((M_nn * M.getRow(j)
- M_jn * M.getRow(n)) % R))
M.setRow(n, (B % R))
c += 1
if c <= 0:
b = M[n, n]
flag = False
if not bool(b):
b = R
for k in range(1, n):
for l in range(1, n):
if (M[k, l] % b):
M.setRow(n, M.getRow(n) + M.getRow(k))
flag = True
if not flag:
dd = rings.gcd(M[n, n], R)
lst.append(dd)
R = ring.exact_division(R, dd)
n -= 1
dd = rings.gcd(M[1, 1], R)
lst.append(dd)
lst.reverse()
return lst
SNF = smithNormalForm
elementary_divisor = smithNormalForm
def extsmithNormalForm(self):
"""
Find the Smith Normal Form M for square matrix,
Computing U,V which satisfied M=U*self*V.
Return matrices tuple,(U,V,M).
"""
M = self.copy()
n = M.row
U = unitMatrix(M.row, M.coeff_ring)
V = unitMatrix(M.row, M.coeff_ring)
rings = self.coeff_ring
while n != 1:
j = n
c = 0
while j != 1:
j -= 1
if M[n, j]:
u, v, d = rings.extgcd(M[n, j], M[n, n])
M_nn = ring.exact_division(M[n, n], d)
M_nj = ring.exact_division(M[n, | |
import ast
import json
import math
import re
import sys
import traceback
from six import string_types
##############################################################################################################
# This is a JSON parser that isn't as strict as the normal json libraries.
# I can't believe a library for this didn't already exist and I had to write it myself...
##############################################################################################################
class JsonParsingException(Exception):
pass
def parse_loosely_defined_json(text):
"""
This function parses a string that represents a JSON object and isn't as strict as the normal json libraries.
It has the following features:
* Supports Unicode (implicitly, since it just uses whatever string format python is using)
* Supports escape characters like normal JSON does.
* Supports extra commas
* Supports unquoted strings for several kinds of characters that can often occur in Rules and Options
* Supports linebreaks in quoted strings (they are treated the same as writing \n, except that any spaces and tabs following them are ignored.)
* Supports both ' and " as quotation marks
* Supports both null and None, so it works for parsing both Javascript and Python
* Supports True/true and False/false for the same reason
* Does not support infinite or NaN numbers
* Has useful error messages
"""
parser = LooseJsonParser(text)
raised_error = None
try:
res = parser.get_object()
except Exception as e:
# don't raise the exception here directly because python will "helpfully" chain the exceptions together
raised_error = e
raised_error_details = get_error_message_details()
if raised_error is not None:
raise JsonParsingException("exception while parsing text into JSON format.\nException occured at line %d, column %d, for character '%s':\n%s" % (parser.line, parser.col, parser.chars[parser.pos], str(raised_error),))
# convert to JSON string and back again, just to be sure it works and any error arises now and not later
res = json.loads(json.dumps(res))
return res
class LooseJsonParser:
def __init__(self, text):
self.pos = 0
self.line = 1
self.col = 1
self.chars = list(text)
self.unquoted_characters = '[a-zA-Z0-9.?!\-_]'
self.EOF = object()
self.chars.append(self.EOF)
def get_object(self):
"""
Starting at the current position, continues parsing new characters until it has parsed a complete object, then returns that object.
When this starts, self.pos should be at the first character of the object (or leading whitespace)
and when it returns self.pos will be at the last character of the object.
"""
task = None
while self.pos < len(self.chars):
char = self.chars[self.pos]
if char == self.EOF:
raise JsonParsingException("reached the end of the file without encountering anything to parse.")
# update line and column on a linebreak
if char == '\n':
self.line += 1
self.col = 1
# how to handle the character depends on what is currently being done
if task is None:
if re.match('\s', char):
# while there is no task yet, ignore whitespace and continue looking for an object
pass
elif char == '[':
task = 'building_list'
res_builder = []
expecting_comma = False
elif char == '{':
task = 'building_dict'
res_builder = {}
stage = 'expecting_key'
elif char == '"':
task = 'building_primitive'
quote_type = 'double_quotes'
res_builder = []
string_escape = False
elif char == "'":
task = 'building_primitive'
quote_type = 'single_quotes'
res_builder = []
string_escape = False
elif re.match(self.unquoted_characters, char):
task = 'building_primitive'
quote_type = 'no_quotes'
res_builder = [char]
string_escape = False
is_finished, res = self._unquoted_text_lookahead_and_optionally_finish(res_builder)
if is_finished:
return res
else:
raise JsonParsingException("reached an unexpected character while looking for the start of the next object: %s" % char)
elif task == 'building_list':
if re.match('\s', char):
pass # skip whitespace in a list
elif char == ',':
if expecting_comma:
expecting_comma = False
else:
raise JsonParsingException("encountered multiple commas after another while parsing a list. Did you forget a list element?")
elif char == ']':
# the end of the list has been reached.
return res_builder
else:
if expecting_comma:
raise JsonParsingException("expected a comma before the next list element.")
else:
# recurse to get the next element
next_list_element = self.get_object()
res_builder.append(next_list_element)
expecting_comma = True
elif task == 'building_dict':
if re.match('\s', char):
pass # skip whitespace in a dictionary
elif char == '}':
if stage in ['expecting_key', 'expecting_comma']:
return res_builder
else:
raise JsonParsingException("the dictionary was closed too early. It's missing a value to go with the last key.")
else:
if stage == 'expecting_key':
# recurse to get the next element, and verify it's a string and it's new
next_dict_key = self.get_object()
if not isinstance(next_dict_key, string_types):
# if the key is not a string, but is a primitive, coerce it into a string representing the JSON object
# (this uses str(json.dumps(next_dict_key)) instead of just str() so that None/null get turned to 'null' instead of 'None')
if isinstance(next_dict_key, (int, float, bool)):
next_dict_key = str(json.dumps(next_dict_key))
if next_dict_key in res_builder:
raise JsonParsingException("this string has already been used as a key of this dictionary. No duplicate keys are allowed:\n%s" % next_dict_key)
stage = 'expecting_colon'
elif stage == 'expecting_colon':
if char == ':':
stage = 'expecting_value'
else:
raise JsonParsingException("expected a colon separating the dictionary's key from its value")
elif stage == 'expecting_value':
# recurse to get the next element
next_dict_value = self.get_object()
res_builder[next_dict_key] = next_dict_value
stage = 'expecting_comma'
elif stage == 'expecting_comma':
if char == ',':
stage = 'expecting_key'
else:
raise JsonParsingException("expected a comma before the next dictionary key.")
else:
raise Exception("Programming error: undefined stage of dictionary parsing: %s" % stage)
elif task == 'building_primitive':
if quote_type in ['double_quotes', 'single_quotes']:
if quote_type == 'double_quotes':
limiting_quote = '"'
else:
limiting_quote = "'"
if char == limiting_quote and not string_escape:
# the end of the string has been reached. Build the string.
# before evaluating the string, do some preprocessing that makes linebreaks possible
tmp = []
encountered_linebreak = False
for chr in res_builder:
if chr == '\n':
encountered_linebreak = True
tmp.append('\\')
tmp.append('n')
elif (chr == ' ' or chr == '\t') and encountered_linebreak:
# ignore any spaces and tabs following a linebreak
pass
else:
encountered_linebreak = False
tmp.append(chr)
# combine the characters into a string and evaluate it
res = "".join(tmp)
res = ast.literal_eval(limiting_quote + res + limiting_quote)
return res
# add the current character to the list
# (we already know it's valid because of an earlier call to
# self._unquoted_text_lookahead_and_optionally_finish())
res_builder.append(char)
# if a backslash occurs, enter escape mode unless escape mode is already active,
# else deactivate escape mode
if char == '\\' and not string_escape:
string_escape = True
else:
string_escape = False
elif quote_type == 'no_quotes':
if not re.match(self.unquoted_characters, char):
raise Exception("Programming error: this should have never been reached because of _unquoted_text_lookahead_and_optionally_finish().")
# add the element
res_builder.append(char)
# look ahead, and possibly finish up
is_finished, res = self._unquoted_text_lookahead_and_optionally_finish(res_builder)
if is_finished:
return res
else:
raise Exception("Programming error: undefined kind of string quotation: %s" % quote_type)
else:
raise Exception("Programming error: undefined task: %s" % task)
# increment the position and column
self.pos += 1
self.col += 1
raise JsonParsingException("Programming Error: reached the end of the file, but this should have been noticed earlier, when reaching the self.EOF object.")
def _unquoted_text_lookahead_and_optionally_finish(self, res_builder):
"""
Check if the next position is EOF or a character that is invalid for unquoted objects.
If so, finish up and return the unquoted object.
"""
next_char = self.chars[self.pos+1]
if next_char != self.EOF and re.match(self.unquoted_characters, next_char):
return (False, None)
# we have encountered a value that is not a valid part of the parser
# try parsing the result in various ways before returning it
res = "".join(res_builder)
# booleans
if res in ['true', 'True']:
return (True, True)
if res in ['false', 'False']:
return (True, False)
# null / None
if res in ['null', 'None']:
return (True, None)
# int
try:
return (True, int(res))
except:
pass
# float
error = None
try:
flt = float(res)
if math.isnan(flt) or math.isinf(flt):
error = "NaN and infinite are not valid JSON values!"
else:
return (True, flt)
except:
pass
if error is not None:
raise JsonParsingException(error)
# default: string
return (True, res)
def get_error_message_details(exception=None):
"""
Get a nicely formatted string for an error message collected with sys.exc_info().
"""
if exception is None:
exception = sys.exc_info()
exc_type, exc_obj, exc_trace = exception
trace = traceback.extract_tb(exc_trace)
| |
+ 1,
'IPAddress': ["192.168.0.2"],
'IPSubnet' : ["255.255.255.0"],
'DefaultIPGateway' : ["192.168.0.1"],
'AssignMethod' : 'static',
'DNSServers' : ['8.8.8.8','8.8.4.4']
})
elif answer['option'] == 'Search vendor':
while True:
mac_list = get_mac_list()
device = search_wildcard_vendor(mac_list, '*')
answer = prompt(
[{'type': 'input', 'name': 'option', 'message': 'Search by vendor name'}])
macs = []
names = []
name = answer['option'].lower()
for vndr in mac_list:
if name in vndr['vendor'].lower():
macs.append(vndr['mac'])
names.append(vndr['vendor'])
if len(names) == 0:
print("No vendor found with this name")
continue
answer = prompt(
[{'type': 'list', 'name': 'option', 'message': 'Select a vendor', 'choices': names}])
pos = names.index(answer['option'])
netwrk = HostInfoNetwork({
'Description': names[pos],
'SettingID': "{" + random_guid() + "}",
'MACAddress': random_mac_for_vendor(macs[pos]),
'DNSDomain': '',
'DNSHostName': '',
'Index': int(last_index) + 1,
'InterfaceIndex': int(last_index) + 1,
'IPAddress': ["192.168.0.2"],
'IPSubnet' : ["255.255.255.0"],
'DefaultIPGateway' : ["192.168.0.1"],
'AssignMethod' : 'static',
'DNSServers' : ['8.8.8.8','8.8.4.4']
})
break
elif answer['option'] == 'Basic':
netwrk = HostInfoNetwork({
'Description': 'Killer E2200 Gigabit Ethernet Controller',
'SettingID': "{" + random_guid() + "}",
'MACAddress': random_mac_for_vendor(search_wildcard_vendor(get_mac_list(), 'FCAA14')['mac']),
'DNSDomain': '',
'DNSHostName': '',
'Index': int(last_index) + 1,
'InterfaceIndex': int(last_index) + 1,
'IPAddress': ["192.168.0.2"],
'IPSubnet' : ["255.255.255.0"],
'DefaultIPGateway' : ["192.168.0.1"],
'AssignMethod' : 'static',
'DNSServers' : ['8.8.8.8','8.8.4.4']
})
netwrk = netwrk.edit_interactive()
return netwrk
def to_json(self):
return {
'Description': self.description,
'SettingID': self.setting_id,
'MACAddress': self.mac_address,
'DNSDomain': self.dns_domain,
'DNSHostName': self.dns_host_name,
'Index': int(self.index),
'InterfaceIndex': int(self.interface_index),
'IPAddress': self.ip_address,
'IPSubnet' : self.ip_subnet,
'DefaultIPGateway' : self.ip_gateway,
'AssignMethod' : self.assign_method,
'DNSServers' : self.dns_servers
}
def from_json(net_object):
return HostInfoNetwork(net_object)
class HostInfoWindowsVersion:
def __init__(self, name, major, minor, build, revision, major_revision, minor_revision):
self.name = name
self.major = major
self.minor = minor
self.build = build
self.revision = revision
self.major_revision = major_revision
self.minor_revision = minor_revision
self.win_type = get_win_type(self.name)
def __str__(self):
return "{} ({})".format(self.name, self.build)
def edit_interactive(self):
answer = prompt([{'type': 'list', 'name': 'option',
'message': 'Select a QEMU cpu type:', 'choices': ['Basic Edit','Full Edit']}])
if answer['option'] == 'Basic Edit':
answer = prompt([{'type': 'list', 'name': 'option',
'message': 'Select a QEMU cpu type:', 'choices': ['Win10','Win7','Win2012','Win2016','Win2019']}])
if answer['option'] == 'Win10':
self.name = 'Windows 10'
self.major = 10
self.minor = 0
self.build = 1909
self.revision = 18363
self.major_revision = 1
self.minor_revision = 0
elif answer['option'] == 'Win7':
self.name = 'Windows 7'
self.major = 6
self.minor = 1
self.build = 7601
self.revision = 65536
self.major_revision = 1
self.minor_revision = 0
elif answer['option'] == 'Win2012':
self.name = 'Windows Server 2012'
self.major = 6
self.minor = 2
self.build = 7600
self.revision = 1
self.major_revision = 1
self.minor_revision = 0
elif answer['option'] == 'Win2016':
self.name = 'Windows Server 2016'
self.major = 10
self.minor = 0
self.build = 14393
self.revision = 1607
self.major_revision = 1
self.minor_revision = 0
elif answer['option'] == 'Win2019':
self.name = 'Windows Server 2019'
self.major = 10
self.minor = 0
self.build = 19041
self.revision = 1
self.major_revision = 1
self.minor_revision = 0
self.win_type = answer['option']
else:
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS name', 'default': self.name}])
self.name = answer['option']
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS Major Version', 'default': str(self.major)}])
self.major = int(answer['option'])
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS Minor Version', 'default': str(self.minor)}])
self.minor = int(answer['option'])
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS Build Version', 'default': str(self.build)}])
self.build = answer['option']
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS revision Version', 'default': str(self.revision)}])
self.revision = answer['option']
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS major revision Version', 'default': str(self.major_revision)}])
self.major_revision = int(answer['option'])
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select OS minor revision Version', 'default': str(self.minor_revision)}])
self.minor_revision = int(answer['option'])
pass
def to_json(self):
return {
'Name': self.name,
'Major': self.major,
'Minor': self.minor,
'Build': self.build,
'Revision': self.revision,
'MajorRevision': self.major_revision,
'MinorRevision': self.minor_revision
}
def from_json(version_file):
if not 'Name' in version_file:
version_file['Name'] = 'Windows 10 Enterprise'
return HostInfoWindowsVersion(version_file['Name'], version_file['Major'], version_file['Minor'], version_file['Build'], version_file['Revision'], version_file['MajorRevision'], version_file['MinorRevision'])
class HostInfoCpu:
def __init__(self, name, cores, threads):
self.name = name
self.family = "Intel"
self.cores = cores
self.threads = threads
self.processor_type = "Haswell"
def __str__(self):
return "{} {}/{}".format(self.name, self.cores, self.threads)
def edit_interactive(self):
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Select CPU name', 'default': self.name}])
self.name = answer['option']
self.detect_cpu_family()
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Cores:', 'default': str(self.cores)}])
self.cores = int(answer['option'])
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Threads: ', 'default': str(self.threads)}])
self.threads = int(answer['option'])
#answer = prompt([{'type': 'list', 'name': 'option','message': 'Select a QEMU cpu type:', 'choices': list_processors(self.family)}])
#self.processor_type = answer['option']
return self
def safe_name(self):
return re.sub(r'[^a-zA-Z0-9]',"",self.name)
def detect_cpu_family(self):
if 'Intel' in self.name:
self.family = "Intel"
elif 'AMD' in self.name:
self.family = "AMD"
else:
self.family = None
def to_json(self):
return {
'Name': self.name,
'NumberOfCores': self.cores,
'NumberOfLogicalProcessors': self.threads,
'Family': self.family,
'CpuType': self.processor_type
}
def from_json(version_file):
return HostInfoCpu(version_file['Name'], version_file['NumberOfCores'], version_file['NumberOfLogicalProcessors'])
class HostInfoRAM:
def __init__(self, manufacturer,capacity):
self.manufacturer = manufacturer
self.capacity = int(capacity)
def __str__(self):
return "{} {}".format(self.manufacturer,size_numeric_to_textual(self.capacity))
def edit_interactive(self):
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Manufacturer', 'default': self.manufacturer}])
self.manufacturer = answer['option']
answer = prompt([{'type': 'input', 'name': 'option',
'message': 'Capacity:', 'default': str(size_numeric_to_textual(self.capacity))}])
self.capacity = int(size_textual_to_numeric(answer['option']))
return self
def to_mib(self):
return int(self.capacity / 1000000)
def to_json(self):
return {
'Manufacturer': self.manufacturer,
'Capacity': self.capacity,
}
def from_json(version_file):
return HostInfoRAM(version_file['Manufacturer'], int(version_file['Capacity']))
class HostInfoWindowsAccounts:
def __init__(self, account):
self.name = account['Name']
self.local_account = account['LocalAccount']
self.account_type = account['AccountType']
self.ps_computer_name = account['PSComputerName']
self.description = account['Description']
self.sid = account['SID']
self.lockout = account['Lockout']
self.password_changeable = account['PasswordChangeable']
self.password_expires = account['PasswordExpires']
self.password_required = account['PasswordRequired']
if 'Password' in account:
self.password = account['Password']
else:
self.password = "<PASSWORD>!"
if 'Domain' in account:
self.domain = account['Domain']
else:
self.domain = self.ps_computer_name
def edit_interactive(self):
# If needed change in cancamusa.json
"""
property_names = list(map(lambda x: str(x), dir(self)))
class_props = list(map(lambda x: str(x), dir(HostInfoWindowsAccounts)))
for element in class_props:
property_names.pop(property_names.index(element))
"""
property_names = ['name','description','domain','password']
for prop in property_names:
if prop.startswith("_"):
continue
answer = prompt([{'type': 'input', 'name': 'option', 'message': 'Edit: ' +
str(prop), 'default': str(getattr(self, prop))}])
setattr(self, prop, answer['option'])
return self
def __str__(self):
return "{} : {}".format(self.name, self.description)
def create_interactive(host_name="Windows"):
disk = HostInfoWindowsAccounts({
'Name': 'Administrator',
'LocalAccount': True,
'AccountType': 512,
'PSComputerName': host_name,
'Description': "Cuenta integrada para el acceso como invitado al equipo o dominio",
'SID': 'S-1-5-21-2718119982-1426233563-2378531167-500',
'Lockout': False,
'PasswordChangeable': True,
'PasswordExpires': False,
'PasswordRequired': True,
'Domain' : host_name,
'Password' : "<PASSWORD>!"
})
disk = disk.edit_interactive()
return disk
def to_json(self):
return {
'Name': self.name,
'LocalAccount': self.local_account,
'AccountType': self.account_type,
'PSComputerName': self.ps_computer_name,
'Description': self.description,
'SID': self.sid,
'Lockout': self.lockout,
'PasswordChangeable': self.password_changeable,
'PasswordExpires': self.password_expires,
'PasswordRequired': self.password_required,
'Domain' : self.domain,
'Password' : <PASSWORD>
}
def from_json(account):
return HostInfoWindowsAccounts(account)
class HostInfo:
def __init__(self, host_id=1000):
self.host_id = host_id
self.disks = []
self.bios = HostInfoBios(
bios_database.BIOS_AMERICAN_MEGATREND_ALASKA_F5)
self.computer_name = "Windows"
self.networks = []
self.os = HostInfoWindowsVersion('Windows 10 Enterprise',10, 0, 0, 0, 0, 0)
self.accounts = []
self.programs = []
self.roles = HostInfoRoles([])
self.cpus = []
self.ram = HostInfoRAM("Crucial",size_textual_to_numeric("8G"))
self.domain = None
self.selected_img_idx = None
self.selected_img_pth = None
self.language = "en-EN"
def set_language(self):
answer = prompt([{'type': 'input', 'name': 'option','message': 'Set Windows Language (en-EN) ', 'default': str(self.language)}])
self.language = answer['option']
def get_account_for_domain(self, domain):
for acc in self.accounts:
if acc.domain == domain:
return acc
return None
def add_disk(self, disk):
for dsk in self.disks:
if dsk.device_id == disk.device_id:
return
self.disks.append(disk)
def add_network(self, net):
last_index = 0
for nt in self.networks:
if nt.index > last_index:
last_index = nt.index + 1
if net.index < last_index:
net.index = last_index
self.networks.append(net)
def __str__(self):
to_ret = "ComputerName: {}\n".format(self.computer_name)
to_ret += "BIOS:\n\t{}\n".format(self.bios)
to_ret += "Disks:\n"
for disk in self.disks:
to_ret += "\t{}\n".format(disk)
to_ret += "Accounts:\n"
for acc in self.accounts:
to_ret += "\t{}\n".format(acc)
to_ret += "Network Interfaces:\n"
for nt in self.networks:
to_ret += "\t{}\n".format(nt)
to_ret += "CPUs:\n"
for nt in self.cpus:
to_ret += "\t{}\n".format(nt)
to_ret += "RAM:\n\t{}\n".format(self.ram)
return to_ret
def to_json(self):
to_ret = {
'host_id': self.host_id,
'cpus': [],
'ram' : self.ram.to_json()
}
to_ret['disks'] = []
to_ret['accounts'] = []
to_ret['networks'] = []
for disk in self.disks:
to_ret['disks'].append(disk.to_json())
for acc in self.accounts:
to_ret['accounts'].append(acc.to_json())
for nt in self.networks:
to_ret['networks'].append(nt.to_json())
for cpu in self.cpus:
to_ret['cpus'].append(cpu.to_json())
to_ret['bios'] = self.bios.to_json()
to_ret['roles'] = self.roles.to_json()
to_ret["os"] = self.os.to_json()
to_ret["computer_name"] = self.computer_name
to_ret["domain"] = self.domain
if self.selected_img_idx != None:
to_ret['selected_img_idx'] = self.selected_img_idx
if self.selected_img_pth != None:
to_ret['selected_img_pth'] = self.selected_img_pth
to_ret['language'] = self.language
return to_ret
def from_json(obj):
host = HostInfo()
for disk in obj['disks']:
host.disks.append(HostInfoDisk.from_json(disk))
for acc in obj['accounts']:
acc2 = HostInfoWindowsAccounts.from_json(acc)
acc2.ps_computer_name = obj["computer_name"]
host.accounts.append(acc2)
for nt in obj['networks']:
host.networks.append(HostInfoNetwork.from_json(nt))
| |
# -*- coding: utf-8 -*-
"""
Create vertical grid in WRF using smooth stretching functions.
Authors
-------
<NAME> and <NAME>
- stretching functions
<NAME> and <NAME>
- plotting, testing
"""
import numpy as np
from scipy import integrate, interpolate
import os
import matplotlib.pyplot as plt
import xarray as xr
import math
import pandas as pd
g = 9.81
Rd = 287.06
cp = 1004.666
# %% grid creation methods
def linear_dz(ztop, dz0, dzmax=None, nz=None):
"""Create height levels with Linearly increasing dz from dz0 at z=0 to dzmax at z=ztop.
Either nz or dzmax can be provided.
Parameters
----------
ztop : float
top of the domain (m).
dz0 : float
grid spacing (m) in the lowest model layer.
dzmax : float, optional
Maxmimum grid spacing (m). Can be used instead of nz. The default is None.
nz : int, optional
Number of vertical levels. The default is None.
Raises
------
ValueError
If neither nz nor dzmax are provided.
Returns
-------
z : numpy array
heights (m) of vertical levels.
"""
stop = False
search_nz = False
if nz is None:
if dzmax is None:
raise ValueError("For vertical grid method 0: if nz is not defined, "
"dzmax must be defined!")
nz = int(ztop/dzmax)
search_nz = True
while not stop:
roots = np.roots((nz - 2)*[dz0] + [dz0-ztop])
c = roots[~np.iscomplex(roots)].real
c = float(c[c > 0])
# if nz is not given, check if dzmax threshold is reached
if search_nz:
dzmax_c = dz0*c**(nz-2)
if dzmax_c <= dzmax:
stop = True
else:
stop = True
if not stop:
nz += 1
z = np.zeros(nz)
for i in range(nz - 1):
z[i+1] = dz0 + z[i] * c
return z
def tanh_method(ztop, dzmin, dzmax=None, nz=None, D1=0, alpha=1):
"""
Vertical grid with three layers. Spacing dz=dzmin in the first up to D1,
then hyperbolic stretching up to D1+D2 and then constant again up to ztop.
D2 is calculated automatically. If nz is None, nz is calculated from dzmax,
while setting D2=ztop.
Parameters
----------
ztop : float
domain top (m).
dzmin : float
spacing in the first layer (m).
dzmax : float or None
spacing in the third layer (m). If None, only two layers are used.
nz : int
number of levels or None. If None: 3rd layer is omitted.
D1 : float
depth of first layer (m).
alpha : float, optional
stretching coefficient. The default is 1.
Returns
-------
z : numpy array of floats
vertical levels.
dz : numpy array of floats
spacing for all levels.
"""
n1 = D1/dzmin
if n1 != int(n1):
raise ValueError("Depth of layer 1 is not a multiple of its grid spacing!")
n1 = int(n1)
if nz is None:
dzm = (dzmin + dzmax)/2
n2 = math.ceil((ztop-D1)/dzm)
# recalculate dzm and dzmax
dzm = (ztop - D1)/n2
dzmax = 2*dzm - dzmin
nz = n1 + n2 + 1
n3 = 0
elif dzmax is None: # only two layer
# if nz is None:
n2 = nz - n1 - 1
dzm = (ztop - D1)/n2
n3 = 0
else:
# average spacing in intermediate layer
dzm = (dzmin + dzmax)/2
# determine n2 from constraints
n2 = round((ztop - D1 + (n1 - nz + 1)*dzmax)/(dzm-dzmax))
D2 = dzm*n2
n3 = nz - 1 - n2 - n1
D3 = dzmax*n3
ztop = D1 + D2 + D3
nz = n1 + n2 + n3 + 1
for i, n in enumerate((n2, n3)):
if n != abs(int(n)):
raise ValueError("Vertical grid creation failed!")
# get spacing in layer 2 by stretching
ind = np.arange(1, n2+1)
a = (1 + n2)/2
dz2 = dzm + (dzmin - dzm)/np.tanh(2*alpha)*np.tanh(2*alpha*(ind-a)/(1-a))
# build spacings and levels
dz = np.concatenate((np.repeat(dzmin, n1), dz2, np.repeat(dzmax, n3)))
z = np.insert(np.cumsum(dz), 0, 0).astype(float)
np.testing.assert_allclose(ztop, z[-1])
return z, dz
# %% thermodynamic functions
def T_std(ztop, dz=1, strat=True):
"""Temperature (K) of the US standard atmosphere with
Tsfc=15°C and dT/dz=6.5 K/km from sea level up to ztop
with a spacing of dz (m).
If strat=False, the tropospheric lapse rate is also used
above 11 km, otherwise temperature is kept constant there
with a quadratic smoothing between troposphere and
stratosphere.
"""
T0 = 15 + 273.15
zvals = np.arange(0, ztop + 1, 1)
T = T0 - 0.0065*zvals
T = xr.DataArray(T, coords={"z": zvals}, dims=["z"])
if (ztop > 11000) and strat:
T.loc[11000:] = T.loc[11000]
T.loc[10000:12000] = np.nan
T = T.interpolate_na("z", method="quadratic")
return T
def height_to_pressure_std(z, p0=1013.25, return_da=False, strat=True):
"""Convert height to pressure for the US standard atmosphere.
If strat=False, the tropospheric lapse rate is also used
above 11 km.
"""
if np.array(z == 0).all():
return p0
ztop = np.array(z).max()
T = T_std(ztop, strat=strat)
T_int = integrate.cumtrapz(g / (Rd * T), T.z)
T_int = np.insert(T_int, 0, 0)
p = T.copy()
p[:] = p0*np.exp(-T_int)
p = p.interp(z=z)
if not return_da:
p = p.values
return p
def theta_to_pressure(theta, z, p0=1000):
"""Calculate pressure from potential temperature and surface pressure"""
integral = -integrate.cumtrapz(1/theta, z)
c = cp/Rd
pt = p0*(g/cp*integral + 1)**c
p = np.concatenate(([p0], pt))
return p
# %% create levels
def create_levels(ztop, dz0, method=0, nz=None, dzmax=None, strat=False,
theta=None, z_theta=None, sounding_path=None, p0=1000,
table=False, plot=False, savefig=False, **kwargs):
"""Create eta values for the vertical grid in WRF.
First the metric heights of the model levels are calculated from the input parameters.
Then these are converted to pressure levels using the potential temperature
array if given or the US standard atmosphere. The potential temperature array can
also be extracted from an idealized WRF input sounding file.
Finally the corresponding eta values are computed and the results are
displayed in a figure and in a table.
Parameters
----------
ztop : float
top of the domain (m).
dz0 : float
grid spacing (m) in the lowest model layer.
method : int, optional
Grid creation method:
0: linearly increasing dz from dz0 at z=0 to dzmax at z=ztop (default)
1: ARPS 3-layer tanh method
nz : int, optional
Number of vertical levels. The default is None.
dzmax : float, optional
Maxmimum grid spacing (m). Can be used instead of nz for methods 0 and 3
and is optional for method 2. The default is None.
strat : bool, optional
Take stratosphere into account when calculating pressure from height
using the US standard atmosphere.
The default is False.
theta : array-like, optional
Potential temperature used to convert height levels to pressure levels.
z_theta : array-like, optional
Height levels corresponding to theta values.
sounding_path : str or Path-like, optional
Path to idealized WRF input sounding used to extract the potential
temperature array theta (and z_theta).
p0 : float, optional
surface pressure (hPa). The default is 1000.
table : bool, optional
Print table. The default is False.
plot : bool, optional
Plot the vertical grid. The default is False.
savefig : bool, optional
Save the plot. The default is False.
**kwargs :
Keyword arguments passed to the underlying grid creation method.
Returns
-------
grid: xr.Dataset
Values for eta, height, pressure, grid spacing, and stretching ratio
for all vertical levels
"""
if "figloc" in kwargs:
figloc = kwargs.pop("figloc")
else:
figloc = "~/"
if method == 0:
# linearly increasing dz from dz0 at z=0 to dzmax at z=ztop
z = linear_dz(ztop, dz0, dzmax=dzmax, nz=nz)
elif method == 1:
# ARPS tanh method
z, _ = tanh_method(ztop, dz0, dzmax=dzmax, nz=nz, **kwargs)
else:
raise ValueError("Vertical grid method {} not implemented!".format(method))
# convert height to pressure levels
if sounding_path is not None:
sounding = pd.read_csv(sounding_path, skiprows=1, header=None, delim_whitespace=True)
z_theta, theta = sounding[0].values, sounding[1].values
if theta is not None:
if (z[-1] > z_theta[-1]) or (ztop > z_theta[-1]):
raise ValueError("Input theta must contain values up to ztop!")
interp_th = interpolate.interp1d(z_theta, theta, fill_value="extrapolate")
theta = interp_th(z)
p = theta_to_pressure(theta, z, p0=p0)
interp_p = interpolate.interp1d(z, p, fill_value="extrapolate")
ptop = interp_p(ztop)
else:
# use US standard atmosphere
ptop = height_to_pressure_std(ztop, p0=p0, strat=strat)
p = height_to_pressure_std(z, p0=p0, strat=strat)
if (method == 0) and (np.round(z[-1], 3) != ztop):
raise ValueError("Uppermost level ({}) is not at ztop ({})!".format(z[-1], ztop))
# Define stretched grid | |
= Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x597 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x598 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x599 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x600 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x601 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x602 = Var(within=Reals,bounds=(0,3.14159265358979),initialize=0.785398163397448)
m.x603 = Var(within=Reals,bounds=(0.785398163397448,0.785398163397448),initialize=0.785398163397448)
m.x604 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x605 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x606 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x607 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x608 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x609 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x610 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x611 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x612 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x613 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x614 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x615 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x616 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x617 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x618 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x619 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x620 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x621 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x622 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x623 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x624 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x625 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x626 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x627 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x628 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x629 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x630 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x631 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x632 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x633 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x634 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x635 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x636 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x637 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x638 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x639 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x640 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x641 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x642 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x643 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x644 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x645 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x646 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x647 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x648 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x649 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x650 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x651 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x652 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x653 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x654 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x655 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x656 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x657 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x658 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x659 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x660 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x661 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x662 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x663 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x664 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x665 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x666 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x667 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x668 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x669 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x670 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x671 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x672 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x673 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x674 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x675 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x676 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x677 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x678 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x679 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x680 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x681 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x682 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x683 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x684 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x685 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x686 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x687 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x688 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x689 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x690 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x691 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x692 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x693 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x694 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x695 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x696 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x697 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x698 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x699 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x700 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x701 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x702 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x703 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x704 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x705 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x706 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x707 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x708 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x709 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x710 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x711 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x712 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x713 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x714 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x715 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x716 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x717 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x718 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x719 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x720 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x721 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x722 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x723 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x724 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x725 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x726 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x727 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x728 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x729 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x730 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x731 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x732 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x733 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x734 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x735 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x736 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x737 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x738 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x739 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x740 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x741 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x742 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x743 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x744 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x745 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x746 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x747 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x748 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x749 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x750 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x751 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x752 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x753 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x754 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x755 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x756 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x757 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x758 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x759 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x760 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x761 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x762 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x763 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x764 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x765 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x766 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x767 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x768 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x769 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x770 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x771 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x772 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x773 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x774 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x775 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x776 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x777 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x778 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x779 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x780 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x781 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x782 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x783 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x784 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x785 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x786 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x787 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x788 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x789 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x790 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x791 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x792 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x793 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x794 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x795 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x796 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x797 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x798 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x799 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x800 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x801 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x802 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x803 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x804 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x805 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x806 = Var(within=Reals,bounds=(None,None),initialize=0.0418879020478639)
m.x807 = Var(within=Reals,bounds=(None,None),initialize=0.0628318530717959)
m.x808 = Var(within=Reals,bounds=(None,None),initialize=0.0837758040957278)
m.x809 = Var(within=Reals,bounds=(None,None),initialize=0.10471975511966)
m.x810 = Var(within=Reals,bounds=(None,None),initialize=0.125663706143592)
m.x811 = Var(within=Reals,bounds=(None,None),initialize=0.146607657167524)
m.x812 = Var(within=Reals,bounds=(None,None),initialize=0.167551608191456)
m.x813 = Var(within=Reals,bounds=(None,None),initialize=0.188495559215388)
m.x814 = Var(within=Reals,bounds=(None,None),initialize=0.20943951023932)
m.x815 = Var(within=Reals,bounds=(None,None),initialize=0.230383461263251)
m.x816 = Var(within=Reals,bounds=(None,None),initialize=0.251327412287183)
m.x817 = Var(within=Reals,bounds=(None,None),initialize=0.272271363311115)
m.x818 = Var(within=Reals,bounds=(None,None),initialize=0.293215314335047)
m.x819 = Var(within=Reals,bounds=(None,None),initialize=0.314159265358979)
m.x820 = Var(within=Reals,bounds=(None,None),initialize=0.335103216382911)
m.x821 = Var(within=Reals,bounds=(None,None),initialize=0.356047167406843)
m.x822 = Var(within=Reals,bounds=(None,None),initialize=0.376991118430775)
m.x823 = Var(within=Reals,bounds=(None,None),initialize=0.397935069454707)
m.x824 = Var(within=Reals,bounds=(None,None),initialize=0.418879020478639)
m.x825 = Var(within=Reals,bounds=(None,None),initialize=0.439822971502571)
m.x826 = Var(within=Reals,bounds=(None,None),initialize=0.460766922526503)
m.x827 = Var(within=Reals,bounds=(None,None),initialize=0.481710873550435)
m.x828 = Var(within=Reals,bounds=(None,None),initialize=0.502654824574367)
m.x829 = Var(within=Reals,bounds=(None,None),initialize=0.523598775598299)
m.x830 = Var(within=Reals,bounds=(None,None),initialize=0.544542726622231)
m.x831 = Var(within=Reals,bounds=(None,None),initialize=0.565486677646163)
m.x832 = Var(within=Reals,bounds=(None,None),initialize=0.586430628670095)
m.x833 = Var(within=Reals,bounds=(None,None),initialize=0.607374579694027)
m.x834 = Var(within=Reals,bounds=(None,None),initialize=0.628318530717959)
m.x835 = Var(within=Reals,bounds=(None,None),initialize=0.64926248174189)
m.x836 = Var(within=Reals,bounds=(None,None),initialize=0.670206432765822)
m.x837 = Var(within=Reals,bounds=(None,None),initialize=0.691150383789754)
m.x838 = Var(within=Reals,bounds=(None,None),initialize=0.712094334813686)
m.x839 = Var(within=Reals,bounds=(None,None),initialize=0.733038285837618)
m.x840 = Var(within=Reals,bounds=(None,None),initialize=0.75398223686155)
m.x841 = Var(within=Reals,bounds=(None,None),initialize=0.774926187885482)
m.x842 = Var(within=Reals,bounds=(None,None),initialize=0.795870138909414)
m.x843 = Var(within=Reals,bounds=(None,None),initialize=0.816814089933346)
m.x844 = Var(within=Reals,bounds=(None,None),initialize=0.837758040957278)
m.x845 = Var(within=Reals,bounds=(None,None),initialize=0.85870199198121)
m.x846 = Var(within=Reals,bounds=(None,None),initialize=0.879645943005142)
m.x847 = Var(within=Reals,bounds=(None,None),initialize=0.900589894029074)
m.x848 = Var(within=Reals,bounds=(None,None),initialize=0.921533845053006)
m.x849 = Var(within=Reals,bounds=(None,None),initialize=0.942477796076938)
m.x850 = Var(within=Reals,bounds=(None,None),initialize=0.96342174710087)
m.x851 = Var(within=Reals,bounds=(None,None),initialize=0.984365698124802)
m.x852 = Var(within=Reals,bounds=(None,None),initialize=1.00530964914873)
m.x853 = Var(within=Reals,bounds=(None,None),initialize=1.02625360017267)
m.x854 = Var(within=Reals,bounds=(None,None),initialize=1.0471975511966)
m.x855 = Var(within=Reals,bounds=(None,None),initialize=1.06814150222053)
m.x856 = Var(within=Reals,bounds=(None,None),initialize=1.08908545324446)
m.x857 = Var(within=Reals,bounds=(None,None),initialize=1.11002940426839)
m.x858 = Var(within=Reals,bounds=(None,None),initialize=1.13097335529233)
m.x859 = Var(within=Reals,bounds=(None,None),initialize=1.15191730631626)
m.x860 = Var(within=Reals,bounds=(None,None),initialize=1.17286125734019)
m.x861 = Var(within=Reals,bounds=(None,None),initialize=1.19380520836412)
m.x862 = Var(within=Reals,bounds=(None,None),initialize=1.21474915938805)
m.x863 = Var(within=Reals,bounds=(None,None),initialize=1.23569311041199)
m.x864 = Var(within=Reals,bounds=(None,None),initialize=1.25663706143592)
m.x865 = Var(within=Reals,bounds=(None,None),initialize=1.27758101245985)
m.x866 = Var(within=Reals,bounds=(None,None),initialize=1.29852496348378)
m.x867 = Var(within=Reals,bounds=(None,None),initialize=1.31946891450771)
m.x868 = Var(within=Reals,bounds=(None,None),initialize=1.34041286553164)
m.x869 = Var(within=Reals,bounds=(None,None),initialize=1.36135681655558)
m.x870 = Var(within=Reals,bounds=(None,None),initialize=1.38230076757951)
m.x871 = Var(within=Reals,bounds=(None,None),initialize=1.40324471860344)
m.x872 = Var(within=Reals,bounds=(None,None),initialize=1.42418866962737)
m.x873 = Var(within=Reals,bounds=(None,None),initialize=1.4451326206513)
m.x874 = Var(within=Reals,bounds=(None,None),initialize=1.46607657167524)
m.x875 = Var(within=Reals,bounds=(None,None),initialize=1.48702052269917)
m.x876 = Var(within=Reals,bounds=(None,None),initialize=1.5079644737231)
m.x877 = Var(within=Reals,bounds=(None,None),initialize=1.52890842474703)
m.x878 = Var(within=Reals,bounds=(None,None),initialize=1.54985237577096)
m.x879 = Var(within=Reals,bounds=(None,None),initialize=1.5707963267949)
m.x880 = Var(within=Reals,bounds=(None,None),initialize=1.59174027781883)
m.x881 = Var(within=Reals,bounds=(None,None),initialize=1.61268422884276)
m.x882 = Var(within=Reals,bounds=(None,None),initialize=1.63362817986669)
m.x883 = Var(within=Reals,bounds=(None,None),initialize=1.65457213089062)
m.x884 = Var(within=Reals,bounds=(None,None),initialize=1.67551608191456)
m.x885 = Var(within=Reals,bounds=(None,None),initialize=1.69646003293849)
m.x886 = Var(within=Reals,bounds=(None,None),initialize=1.71740398396242)
m.x887 = Var(within=Reals,bounds=(None,None),initialize=1.73834793498635)
m.x888 = Var(within=Reals,bounds=(None,None),initialize=1.75929188601028)
m.x889 = Var(within=Reals,bounds=(None,None),initialize=1.78023583703422)
m.x890 = Var(within=Reals,bounds=(None,None),initialize=1.80117978805815)
m.x891 = Var(within=Reals,bounds=(None,None),initialize=1.82212373908208)
m.x892 = Var(within=Reals,bounds=(None,None),initialize=1.84306769010601)
m.x893 = Var(within=Reals,bounds=(None,None),initialize=1.86401164112994)
m.x894 = Var(within=Reals,bounds=(None,None),initialize=1.88495559215388)
m.x895 = Var(within=Reals,bounds=(None,None),initialize=1.90589954317781)
m.x896 = Var(within=Reals,bounds=(None,None),initialize=1.92684349420174)
m.x897 = Var(within=Reals,bounds=(None,None),initialize=1.94778744522567)
m.x898 = Var(within=Reals,bounds=(None,None),initialize=1.9687313962496)
m.x899 = Var(within=Reals,bounds=(None,None),initialize=1.98967534727354)
m.x900 = Var(within=Reals,bounds=(None,None),initialize=2.01061929829747)
m.x901 = Var(within=Reals,bounds=(None,None),initialize=2.0315632493214)
m.x902 = Var(within=Reals,bounds=(None,None),initialize=2.05250720034533)
m.x903 = Var(within=Reals,bounds=(None,None),initialize=2.07345115136926)
m.x904 = Var(within=Reals,bounds=(None,None),initialize=2.0943951023932)
m.x905 = Var(within=Reals,bounds=(None,None),initialize=2.11533905341713)
m.x906 = Var(within=Reals,bounds=(None,None),initialize=2.13628300444106)
m.x907 = Var(within=Reals,bounds=(None,None),initialize=2.15722695546499)
m.x908 = Var(within=Reals,bounds=(None,None),initialize=2.17817090648892)
m.x909 = Var(within=Reals,bounds=(None,None),initialize=2.19911485751286)
m.x910 = Var(within=Reals,bounds=(None,None),initialize=2.22005880853679)
m.x911 = Var(within=Reals,bounds=(None,None),initialize=2.24100275956072)
m.x912 = Var(within=Reals,bounds=(None,None),initialize=2.26194671058465)
m.x913 = Var(within=Reals,bounds=(None,None),initialize=2.28289066160858)
m.x914 = Var(within=Reals,bounds=(None,None),initialize=2.30383461263252)
m.x915 = Var(within=Reals,bounds=(None,None),initialize=2.32477856365645)
m.x916 = Var(within=Reals,bounds=(None,None),initialize=2.34572251468038)
m.x917 = Var(within=Reals,bounds=(None,None),initialize=2.36666646570431)
m.x918 = Var(within=Reals,bounds=(None,None),initialize=2.38761041672824)
m.x919 = Var(within=Reals,bounds=(None,None),initialize=2.40855436775217)
m.x920 = Var(within=Reals,bounds=(None,None),initialize=2.42949831877611)
m.x921 = Var(within=Reals,bounds=(None,None),initialize=2.45044226980004)
m.x922 = Var(within=Reals,bounds=(None,None),initialize=2.47138622082397)
m.x923 = Var(within=Reals,bounds=(None,None),initialize=2.4923301718479)
m.x924 = Var(within=Reals,bounds=(None,None),initialize=2.51327412287183)
m.x925 = Var(within=Reals,bounds=(None,None),initialize=2.53421807389577)
m.x926 = Var(within=Reals,bounds=(None,None),initialize=2.5551620249197)
m.x927 = Var(within=Reals,bounds=(None,None),initialize=2.57610597594363)
m.x928 = Var(within=Reals,bounds=(None,None),initialize=2.59704992696756)
m.x929 = Var(within=Reals,bounds=(None,None),initialize=2.61799387799149)
m.x930 = Var(within=Reals,bounds=(None,None),initialize=2.63893782901543)
m.x931 = Var(within=Reals,bounds=(None,None),initialize=2.65988178003936)
m.x932 = Var(within=Reals,bounds=(None,None),initialize=2.68082573106329)
m.x933 = Var(within=Reals,bounds=(None,None),initialize=2.70176968208722)
m.x934 = Var(within=Reals,bounds=(None,None),initialize=2.72271363311115)
m.x935 = Var(within=Reals,bounds=(None,None),initialize=2.74365758413509)
m.x936 = Var(within=Reals,bounds=(None,None),initialize=2.76460153515902)
m.x937 = Var(within=Reals,bounds=(None,None),initialize=2.78554548618295)
m.x938 = Var(within=Reals,bounds=(None,None),initialize=2.80648943720688)
m.x939 = Var(within=Reals,bounds=(None,None),initialize=2.82743338823081)
m.x940 = Var(within=Reals,bounds=(None,None),initialize=2.84837733925475)
m.x941 = Var(within=Reals,bounds=(None,None),initialize=2.86932129027868)
m.x942 = Var(within=Reals,bounds=(None,None),initialize=2.89026524130261)
m.x943 = Var(within=Reals,bounds=(None,None),initialize=2.91120919232654)
m.x944 = Var(within=Reals,bounds=(None,None),initialize=2.93215314335047)
m.x945 = Var(within=Reals,bounds=(None,None),initialize=2.95309709437441)
m.x946 = Var(within=Reals,bounds=(None,None),initialize=2.97404104539834)
m.x947 = Var(within=Reals,bounds=(None,None),initialize=2.99498499642227)
m.x948 = Var(within=Reals,bounds=(None,None),initialize=3.0159289474462)
m.x949 = Var(within=Reals,bounds=(None,None),initialize=3.03687289847013)
m.x950 = Var(within=Reals,bounds=(None,None),initialize=3.05781684949407)
m.x951 = Var(within=Reals,bounds=(None,None),initialize=3.078760800518)
m.x952 = Var(within=Reals,bounds=(None,None),initialize=3.09970475154193)
m.x953 = Var(within=Reals,bounds=(None,None),initialize=3.12064870256586)
m.x954 = Var(within=Reals,bounds=(None,None),initialize=3.14159265358979)
m.x955 = Var(within=Reals,bounds=(None,None),initialize=3.16253660461372)
m.x956 = Var(within=Reals,bounds=(None,None),initialize=3.18348055563766)
m.x957 = Var(within=Reals,bounds=(None,None),initialize=3.20442450666159)
m.x958 = Var(within=Reals,bounds=(None,None),initialize=3.22536845768552)
m.x959 = Var(within=Reals,bounds=(None,None),initialize=3.24631240870945)
m.x960 = Var(within=Reals,bounds=(None,None),initialize=3.26725635973338)
m.x961 = Var(within=Reals,bounds=(None,None),initialize=3.28820031075732)
m.x962 = Var(within=Reals,bounds=(None,None),initialize=3.30914426178125)
m.x963 = Var(within=Reals,bounds=(None,None),initialize=3.33008821280518)
m.x964 = Var(within=Reals,bounds=(None,None),initialize=3.35103216382911)
m.x965 = Var(within=Reals,bounds=(None,None),initialize=3.37197611485304)
m.x966 = Var(within=Reals,bounds=(None,None),initialize=3.39292006587698)
m.x967 = Var(within=Reals,bounds=(None,None),initialize=3.41386401690091)
m.x968 = Var(within=Reals,bounds=(None,None),initialize=3.43480796792484)
m.x969 = Var(within=Reals,bounds=(None,None),initialize=3.45575191894877)
m.x970 = Var(within=Reals,bounds=(None,None),initialize=3.4766958699727)
m.x971 = Var(within=Reals,bounds=(None,None),initialize=3.49763982099664)
m.x972 = Var(within=Reals,bounds=(None,None),initialize=3.51858377202057)
m.x973 = Var(within=Reals,bounds=(None,None),initialize=3.5395277230445)
m.x974 = Var(within=Reals,bounds=(None,None),initialize=3.56047167406843)
m.x975 = Var(within=Reals,bounds=(None,None),initialize=3.58141562509236)
m.x976 = Var(within=Reals,bounds=(None,None),initialize=3.6023595761163)
m.x977 = Var(within=Reals,bounds=(None,None),initialize=3.62330352714023)
m.x978 = Var(within=Reals,bounds=(None,None),initialize=3.64424747816416)
m.x979 = Var(within=Reals,bounds=(None,None),initialize=3.66519142918809)
m.x980 = Var(within=Reals,bounds=(None,None),initialize=3.68613538021202)
m.x981 = Var(within=Reals,bounds=(None,None),initialize=3.70707933123596)
m.x982 = Var(within=Reals,bounds=(None,None),initialize=3.72802328225989)
m.x983 = Var(within=Reals,bounds=(None,None),initialize=3.74896723328382)
m.x984 = Var(within=Reals,bounds=(None,None),initialize=3.76991118430775)
m.x985 = Var(within=Reals,bounds=(None,None),initialize=3.79085513533168)
m.x986 = Var(within=Reals,bounds=(None,None),initialize=3.81179908635562)
m.x987 = Var(within=Reals,bounds=(None,None),initialize=3.83274303737955)
m.x988 = Var(within=Reals,bounds=(None,None),initialize=3.85368698840348)
m.x989 = Var(within=Reals,bounds=(None,None),initialize=3.87463093942741)
m.x990 = Var(within=Reals,bounds=(None,None),initialize=3.89557489045134)
m.x991 = Var(within=Reals,bounds=(None,None),initialize=3.91651884147528)
m.x992 = Var(within=Reals,bounds=(None,None),initialize=3.93746279249921)
m.x993 = Var(within=Reals,bounds=(None,None),initialize=3.95840674352314)
m.x994 = Var(within=Reals,bounds=(None,None),initialize=3.97935069454707)
m.x995 = Var(within=Reals,bounds=(None,None),initialize=4.000294645571)
m.x996 = Var(within=Reals,bounds=(None,None),initialize=4.02123859659493)
m.x997 = Var(within=Reals,bounds=(None,None),initialize=4.04218254761887)
m.x998 = Var(within=Reals,bounds=(None,None),initialize=4.0631264986428)
m.x999 = Var(within=Reals,bounds=(None,None),initialize=4.08407044966673)
m.x1000 = Var(within=Reals,bounds=(None,None),initialize=4.10501440069066)
m.x1001 = Var(within=Reals,bounds=(None,None),initialize=4.1259583517146)
m.x1002 = Var(within=Reals,bounds=(None,None),initialize=4.14690230273853)
m.x1003 = Var(within=Reals,bounds=(None,None),initialize=4.16784625376246)
m.x1004 = Var(within=Reals,bounds=(None,None),initialize=4.18879020478639)
m.x1005 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x1006 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x1007 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1008 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1009 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1010 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1011 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1012 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1013 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1014 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1015 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1016 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1017 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1018 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1019 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1020 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1021 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1022 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1023 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1024 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1025 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1026 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1027 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1028 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1029 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1030 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1031 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1032 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1033 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1034 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1035 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1036 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1037 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1038 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1039 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1040 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1041 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1042 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1043 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1044 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1045 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1046 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1047 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1048 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1049 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1050 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1051 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1052 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1053 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1054 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1055 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1056 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1057 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1058 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1059 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1060 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1061 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1062 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1063 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1064 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1065 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1066 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1067 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1068 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1069 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1070 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1071 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1072 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1073 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1074 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1075 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1076 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1077 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1078 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1079 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1080 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1081 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1082 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1083 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1084 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1085 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1086 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1087 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1088 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1089 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1090 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1091 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1092 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1093 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1094 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1095 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1096 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1097 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1098 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1099 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1100 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1101 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1113 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1114 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1115 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1116 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1117 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1118 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1119 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1120 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1121 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1122 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1123 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1124 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1125 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1126 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1127 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1128 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1129 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1130 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1131 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1132 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1133 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1134 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1135 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1136 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1137 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1138 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1139 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1140 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1141 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1142 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1143 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1144 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1145 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1146 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1147 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1148 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1149 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1150 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1151 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1152 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1153 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1154 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1155 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1156 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1157 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1158 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1159 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1160 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1161 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1162 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1163 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1164 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1165 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1166 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1167 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1168 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1169 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1170 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1171 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1172 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1173 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1174 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1175 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1176 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1177 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1178 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1179 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1180 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1181 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1182 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1183 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1184 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1185 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1186 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1187 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1188 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1189 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1190 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1191 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1192 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1193 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1194 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1195 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1196 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1197 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1198 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1199 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1200 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1201 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1202 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1203 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1204 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1205 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x1206 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x1207 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1208 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1209 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1210 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1211 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1212 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1213 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1214 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1215 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1216 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1217 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1218 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1219 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1220 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1221 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1222 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1223 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1224 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1225 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1226 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1227 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1228 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1229 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1230 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1231 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1232 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1233 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1234 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1235 = Var(within=Reals,bounds=(-1,1),initialize=0.001)
m.x1236 | |
batch
# Reset things as necessary
enz_curr = []
enz_remain = list(enzymes) # all enzymes to start each batch
snps_curr_batch = dict()
print("Batch number: %d" % batch_num)
print("Number of enzymes to test: %d" % len(enz_remain))
print("Number of availalbe SNPs: %d" % len(available_snps))
while ((len(snps_curr_batch)<target_batch_size) & (len(enz_remain)>0)): # keep adding to batch
print("Target batch size: %d" % target_batch_size)
print("Current batch size: %d" % len(snps_curr_batch))
nmax=0 # start over with number of snps when go over enz again
print("Starting next iteration to find 1 enzyme to add")
for i,e in enumerate(enz_remain):
print("Iterations %d: Enzyme %s" % (i,e))
# Enzyme list to test, add one enzyme at a time
enz_test = list(enz_curr)
enz_test.append(e)
goodsnps = what_snps_with_this_enzyme_list(enz_test,good_cuts,bad_cuts,fragend_cuts,available_snps)
print(goodsnps)
# is this the best enzyme addition we've seen?
n = len(goodsnps)
print("By adding %s, we find %d SNPs" % (e,n))
if n>nmax:
nmax=n
imax=i
snpsmax=goodsnps
print("Better than before")
if nmax>0:
print("Best enzyme index: %d" % imax)
print(enz_remain)
print(enz_remain[imax])
print(nmax)
# is this enzyme addition better than without it?
if nmax>len(snps_curr_batch):
chosen_enz = enz_remain[imax]
print("Enzyme: %s" % chosen_enz)
enz_curr.append(chosen_enz)
print("Previous SNPs")
if len(snps_curr_batch)>0:
prev=set(snps_curr_batch.keys())
print(prev)
print("New SNPs")
new=set(snpsmax.keys())
print(new)
if len(snps_curr_batch)>0:
print("Lost from old set")
print(prev.difference(new))
print("Added in new set")
print(new.difference(prev))
snps_curr_batch = snpsmax
enz_remain.remove(chosen_enz) # remove i
print("#####################################")
else:
break
sys.stdout.flush()
# Done with batch - adjust snp lists accordingly
print("Batch %d final selection" % batch_num)
if len(snps_curr_batch)>min_batch_size:
if len(snps_curr_batch)<target_batch_size: # between min and target
snps_curr.update(snps_curr_batch) # add current batch to full list
# remove snps from available list
# also add batch info to snp dict
for s in snps_curr_batch.keys():
available_snps.remove(s)
snpdict[s]['batch']=batch_num
enzymes_for_batch[batch_num] = enz_curr
batch_num += 1
new_snps_curr_batch = snps_curr_batch
else: # batch is too big, only keep max number
new_snps_curr_batch = {k: snps_curr_batch[k] for k in list(snps_curr_batch)[:target_batch_size]}
snps_curr.update(new_snps_curr_batch) # add current batch to full list
# remove snps from available list
# also add batch info to snp dict
for s in new_snps_curr_batch.keys():
available_snps.remove(s)
snpdict[s]['batch']=batch_num
enzymes_for_batch[batch_num] = enz_curr
batch_num += 1
else:
too_small_batch.append(snps_curr_batch.keys())
break
print(new_snps_curr_batch)
print("All selected SNPs")
print(snps_curr)
print("Remaining SNPs")
print(available_snps)
print("################################")
print("All selected SNPs")
print(snps_curr)
print("Remaining SNPs: %d" % len(available_snps))
end = time.time(); print("Time elapsed: %0.2f" % (end-start))
sys.stdout.flush()
###############################################################################
# Given final snp list get enzymes assignments and cut distances
# Update SNP dictionary with pass, drop, reasons etc
print("Collecting information on final snp and enzyme list"); start = time.time()
for s in snpdict:
if s in snps_curr:
# Get enzyme list for this SNP's batch:
enz_curr = enzymes_for_batch[snpdict[s]['batch']]
# Find enzyme and cut site upstream
sel_enz=''
min_cut=snpdict[s]['pos']+1000
max_cut=snpdict[s]['pos']-1000
for e in enz_curr:
cuts=good_cuts[e][s]
if len(cuts)>0:
if snpdict[s]['strand']=='top':
m=min(cuts)
if m<min_cut:
min_cut=m
sel_enz=e
else:
m=max(cuts)
if m>max_cut:
max_cut=m
sel_enz=e
snpdict[s]['enzyme']=sel_enz
snpdict[s]['enzyme_recog_site']=recogsites[sel_enz]
if snpdict[s]['strand']=='top':
snpdict[s]['nearest_upstream_cut']=min_cut
else:
snpdict[s]['nearest_upstream_cut']=max_cut
# Find closest cut site downstream (default 300)
downstreamcut_min=snpdict[s]['pos'] + config['frag_end_range'][0]
downstreamcut_max=snpdict[s]['pos'] - config['frag_end_range'][0]
for e in enz_curr:
cuts=fragend_cuts[e][s]
if len(cuts)>0:
if snpdict[s]['strand']=='top':
m=max(cuts)
if m>downstreamcut_min:
downstreamcut_min=m
else:
m=min(cuts)
if m<downstreamcut_max:
downstreamcut_max=m
if snpdict[s]['strand']=='top':
snpdict[s]['nearest_downstream_cut']=downstreamcut_min
else:
snpdict[s]['nearest_downstream_cut']=downstreamcut_max
# More information
snpdict[s]['dist_from_mut_to_upstream_cut']=np.abs(snpdict[s]['nearest_upstream_cut']-snpdict[s]['pos'])
pos1=min(snpdict[s]['nearest_upstream_cut'],snpdict[s]['nearest_downstream_cut'])
pos2=max(snpdict[s]['nearest_upstream_cut'],snpdict[s]['nearest_downstream_cut'])
if ( (pos2-pos1) < int(config['PRIMER_PRODUCT_SIZE_RANGE'][0])):
snpdict[s]['status']='drop'
snpdict[s]['drop_reason']='amplicon_too_short'
else:
snpdict[s]['target_seq_for_primer_search']=seq_dic[snpdict[s]['chrom']][pos1:pos2]
snpdict[s]['targetseq_pos1']=pos1
snpdict[s]['targetseq_pos2']=pos2
snpdict[s]['targetseq_coordinates']="%s:%d-%d" % (snpdict[s]['chrom'],pos1+1,pos2)
passedsnp_goodcuts='; '.join([e+':'+','.join([str(x) for x in good_cuts[e][s]]) for e in enz_curr])
passedsnp_badcuts='; '.join([e+':'+','.join([str(x) for x in bad_cuts[e][s]]) for e in enz_curr])
passedsnp_fragcuts='; '.join([e+':'+','.join([str(x) for x in fragend_cuts[e][s]]) for e in enz_curr])
snpdict[s]['good_cuts']=passedsnp_goodcuts
snpdict[s]['bad_cuts']=passedsnp_badcuts
snpdict[s]['fragend_cuts']=passedsnp_fragcuts
else:
# Check if SNP had no chance - no enzyme that works with it alone
if snpdict[s]['status']=='pass': # didn't fail earlier steps
if s not in possible_enzyme_match_found.keys():
snpdict[s]['status']='drop'
snpdict[s]['drop_reason']='no_single_enzyme_matches'
elif s in too_small_batch:
snpdict[s]['status']='drop'
snpdict[s]['drop_reason']='batch_too_small_from_enzyme_selection'
else:
failedsnp_goodcuts='; '.join([e+':'+','.join([str(x) for x in good_cuts[e][s]]) for e in enz_curr])
failedsnp_badcuts='; '.join([e+':'+','.join([str(x) for x in bad_cuts[e][s]]) for e in enz_curr])
failedsnp_fragcuts='; '.join([e+':'+','.join([str(x) for x in fragend_cuts[e][s]]) for e in enz_curr])
#TODO different enzymes enz-curr for different batches
snpdict[s]['good_cuts']=failedsnp_goodcuts
snpdict[s]['bad_cuts']=failedsnp_badcuts
snpdict[s]['fragend_cuts']=failedsnp_fragcuts
snpdict[s]['status']='drop'
snpdict[s]['drop_reason']='enzyme_cut_compatibility'
print_snp_dict(snpdict,True)
end = time.time(); print("Time elapsed: %0.2f" % (end-start))
sys.stdout.flush()
###############################################################################
#Run PRIMER3
date=datetime.datetime.now().strftime('%Y-%m-%d.%H-%M')
primer3file=config['output_folder']+"primer3.input."+config['sample']+"."+date+".txt"
blatqueryfile=config['output_folder']+"blat_query.fa."+config['sample']+"."+date+".txt"
blatresultfile=config['output_folder']+"blat_results.out."+config['sample']+"."+date+".txt"
print(primer3file)
print(blatqueryfile)
print(blatresultfile)
# make output directory if it doesn't exist
os.makedirs(os.path.dirname(primer3file), exist_ok=True)
primer3results=dict()
#primer3 = config['primer3']
primer3 = "primer3_core" # if installed in environment or on path
print("Running primer3")
sys.stdout.flush()
with open(blatqueryfile,'w') as blatf:
for snpid,snpinfo in snpdict.items():
if snpinfo['status']=='pass':
print("snp")
write_primer3_input_file(
primer3file,
snpid,
snpdict[snpid]['target_seq_for_primer_search'],
snpdict[snpid]['strand'],
snpdict[snpid]['dist_from_mut_to_upstream_cut'],
config)
print("")
p=subprocess.Popen("%s %s" % (primer3,primer3file), shell=True, stdout=subprocess.PIPE)
primer3out, err = p.communicate()
print(primer3out.decode('ascii'))
print("")
sys.stdout.flush()
# Store all the primer3 results
primer3results[snpid]=dict()
for line in primer3out.decode('ascii').split('\n'):
if line.startswith('PRIMER'):
t,val=line.split("=")
primer3results[snpid][t]=val
if "PRIMER_PAIR_NUM_RETURNED=0" in primer3out.decode('ascii'):
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='primer3_nonefound'
snpdict[snpid]['left_primer_explanation']=primer3results[snpid]['PRIMER_LEFT_EXPLAIN']
snpdict[snpid]['right_primer_explanation']=primer3results[snpid]['PRIMER_RIGHT_EXPLAIN']
elif "PRIMER_ERROR" in primer3out.decode('ascii'):
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='primer3_error_seelog'
else: # primer pairs found!
for i in range(config['PRIMER_NUM_RETURN']):
t="PRIMER_LEFT_%d_SEQUENCE" % i
if t in primer3results[snpid].keys():
seq=primer3results[snpid][t]
blatf.write(">"+snpid+"_"+t+"\n")
blatf.write(seq+"\n")
t="PRIMER_RIGHT_%d_SEQUENCE" % i
if t in primer3results[snpid].keys():
seq=primer3results[snpid][t]
blatf.write(">"+snpid+"_"+t+"\n")
blatf.write(seq+"\n")
###############################################################################
# Run BLAT on all primer options
start=time.time()
print("Running BLAT on all primer options for all SNPs")
sys.stdout.flush()
run_blat(blatqueryfile,blatresultfile,config)
end = time.time(); print("Time elapsed: %0.2f" % (end-start))
sys.stdout.flush()
###############################################################################
# Process BLAT results
# Counter for number of hits per sequence
blat_hits=dict()
with open(blatresultfile,'r') as blatr:
for line in blatr:
s=line.split()[9]
s_split=s.split("_")
snpid="_".join(s_split[0:3])
leftright=s_split[4]
primerid=s_split[5]
gaps=int(line.split()[6])
# Only count entry if score is X away from length of primer(query)
plen=int(line.split()[10])
score=int(line.split()[0])
if (score>=(plen - config['blat_num_mismatches'])):
print("%s - %d - %d - %d" % (s,plen,score,gaps))
if snpid in blat_hits:
if leftright in blat_hits[snpid]:
blat_hits[snpid][leftright].update([primerid])
else:
blat_hits[snpid][leftright]=Counter()
blat_hits[snpid][leftright].update([primerid])
else:
blat_hits[snpid]=dict()
blat_hits[snpid][leftright]=Counter()
blat_hits[snpid][leftright].update([primerid])
###############################################################################
# Look for valid pairs of primers based on blat hits
# Also check for SNPs in primer pairs
valid_primer_pairs=dict()
best_primer_pair=dict()
for snpid,counts in blat_hits.items():
perfectfound=False
perfectleft =[]
perfectright =[]
okleft =[]
okright =[]
print(snpid)
print(counts)
print("")
if 'LEFT' in counts:
for pid,ct in counts['LEFT'].items():
if ct==config['blat_perfect_num_hits']:
perfectleft.append(pid)
print("Perfect left: %s - %s" % (snpid,pid))
elif ct<config['blat_ok_num_hits']:
okleft.append(pid)
print("OK left: %s - %s" % (snpid,pid))
if 'RIGHT' in counts:
for pid,ct in counts['RIGHT'].items():
if ct==config['blat_perfect_num_hits']:
perfectright.append(pid)
print("Perfect right: %s - %s" % (snpid,pid))
elif ct<config['blat_ok_num_hits']:
okright.append(pid)
print("OK right: %s - %s" % (snpid,pid))
# check for perfect pair
perfectpairs=list(set(perfectleft).intersection(perfectright))
if len(perfectpairs)>0:
valid_primer_pairs[snpid]=perfectpairs
ps=valid_primer_pairs[snpid]
# check perfect pairs for snps in primers before skipping next step
for p in ps:
(primerstringL,primerstringR)=get_primer_coordinates(p,snpid,primer3results,snpdict)
[snp_positionsL,seq_positions,snp_alt_bases,region_ref_seq] = snps_or_indels_in_region(bam,primerstringL,bam_ref_dic,basequal_cutoff=config['basequal_cutoff'],vaf_cutoff=config['vaf_cutoff'],indelaf_cutoff=config['indelaf_cutoff'],var_count_cutoff=config['var_count_cutoff'],indel_count_cutoff=config['indel_count_cutoff'])
[snp_positionsR,seq_positions,snp_alt_bases,region_ref_seq] = snps_or_indels_in_region(bam,primerstringR,bam_ref_dic,basequal_cutoff=config['basequal_cutoff'],vaf_cutoff=config['vaf_cutoff'],indelaf_cutoff=config['indelaf_cutoff'],var_count_cutoff=config['var_count_cutoff'],indel_count_cutoff=config['indel_count_cutoff'])
if (len(snp_positionsL)>0) or (len(snp_positionsR)>0):
valid_primer_pairs[snpid].remove(p)
print("Found SNP in primer pair: %s" % p)
print("Left primer: %s" % primerstringL)
print("Right primer: %s" % primerstringR)
if len(valid_primer_pairs[snpid])>0:
perfectfound=True
if not perfectfound:
# check for one perfect and one ok
ok_perf_pairs=list(set(perfectleft).intersection(okright))
ok_perf_pairs.extend(list(set(perfectright).intersection(okleft)))
best_pairs=[]
m=config['blat_perfect_num_hits']+config['blat_ok_num_hits'] # min hits combined across 2 primers
for p in ok_perf_pairs:
m_obs = blat_hits[snpid]['LEFT'][p] + blat_hits[snpid]['RIGHT'][p]
if m_obs<m:
m=m_obs
best_pairs=[p]
elif m_obs==m:
best_pairs.append(p)
valid_primer_pairs[snpid]=best_pairs
print(valid_primer_pairs[snpid])
print("Has valid primer pairs before SNP checking")
# Further selection based on product size (larger is better)
if len(valid_primer_pairs[snpid])>0:
m=0
ps=valid_primer_pairs[snpid]
ps.sort(key=float) # sort so ties are broken by lowest number, which has best score from primer3
print(ps)
ps_no_snps = list(ps)
# Check valid primer pairs for snps in primer, drop if SNP in primer
for p in ps:
(primerstringL,primerstringR)=get_primer_coordinates(p,snpid,primer3results,snpdict)
[snp_positionsL,seq_positions,snp_alt_bases,region_ref_seq] = snps_or_indels_in_region(bam,primerstringL,bam_ref_dic,basequal_cutoff=config['basequal_cutoff'],vaf_cutoff=config['vaf_cutoff'],indelaf_cutoff=config['indelaf_cutoff'],var_count_cutoff=config['var_count_cutoff'],indel_count_cutoff=config['indel_count_cutoff'])
[snp_positionsR,seq_positions,snp_alt_bases,region_ref_seq] = snps_or_indels_in_region(bam,primerstringR,bam_ref_dic,basequal_cutoff=config['basequal_cutoff'],vaf_cutoff=config['vaf_cutoff'],indelaf_cutoff=config['indelaf_cutoff'],var_count_cutoff=config['var_count_cutoff'],indel_count_cutoff=config['indel_count_cutoff'])
if (len(snp_positionsL)>0) or (len(snp_positionsR)>0):
print("Found SNP in primer pair: %s" % p)
print("Left primer: %s" % primerstringL)
print("Right primer: %s" % primerstringR)
ps_no_snps.remove(p)
# Still has valid primer options
if len(ps_no_snps)>0:
m=0
for p in ps_no_snps:
prodsize=int(primer3results[snpid]["PRIMER_PAIR_%s_PRODUCT_SIZE" % p])
print("%d: %s" % (prodsize,p))
if prodsize>m:
m=prodsize
bestprimer=p
print("bigger product")
best_primer_pair[snpid]=bestprimer
print(bestprimer)
else:
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='blat_hits'
###############################################################################
print("Final filtering for batch size, duplicates, no primers found")
# Get batch size
batch_size_counter = Counter()
for snpid,snpinfo in snpdict.items():
print(snpid)
if snpinfo['status']=='pass':
print("pass")
if snpid in best_primer_pair:
print("has primer pair")
# Update batch size
b = snpdict[snpid]['batch']
batch_size_counter.update([b])
# If we've reached max for this batch, drop remainder
if batch_size_counter[b] > config['max_batch_size']:
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='max_batch_size_reached'
# Drop anything without valid primer pair
else:
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='SNP_in_primer'
# Find batches that are too small
dropbatch_small=[]
for b,batchsize in batch_size_counter.items():
if batchsize < config['min_batch_size']:
dropbatch_small.append(b)
# Drop small batches
for snpid,snpinfo in snpdict.items():
print("SNP")
print(snpid)
if snpinfo['status']=='pass':
b = snpdict[snpid]['batch']
if b in dropbatch_small:
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='batch_size_too_small'
# Check for duplicates
passed_snps = [x for x in snpdict.keys() if snpdict[x]['status']=='pass']
for snpid,snpinfo in snpdict.items():
print(snpid)
if snpinfo['status']=='pass':
# Check for same position on both strands in final list
if snpdict[snpid]['strand']!=config['strand_preference']:
opposite_snpid='_'.join(snpid.split('_')[0:2]+['top'])
if opposite_snpid in passed_snps: # top strand for same snp is in final list, drop this one
snpdict[snpid]['status']='drop'
snpdict[snpid]['drop_reason']='other_strand_same_snp_in_final_list'
#
###############################################################################
# Loop over current pass snps
# Update dict values for status based on primer results
print("Getting final snp info")
for snpid,snpinfo in snpdict.items():
if snpinfo['status']=='pass':
# Get assigned primer | |
import copy
import math
from dataclasses import dataclass, fields
from typing import Optional, Tuple, List, NamedTuple, Union, Iterable, Dict
from pathlib import Path
import pickle
from enum import Enum
import functools
from collections import Counter
import logging
import pandas as pd
import numpy as np
from .physionet import read_physionet_dataset, RespiratoryEventType, RespiratoryEvent, SleepStageType
class GroundTruthClass(Enum):
NoEvent = 0
CentralApnea = 1
ObstructiveApnea = 2
MixedApnea = 3
Hypopnea = 4
logger = logging.getLogger(__name__)
# Translation table for RespiratoryEventType -> GroundTruthClass
RESPIRATORY_EVENT_TYPE__GROUND_TRUTH_CLASS = {RespiratoryEventType.CentralApnea: GroundTruthClass.CentralApnea,
RespiratoryEventType.MixedApnea: GroundTruthClass.MixedApnea,
RespiratoryEventType.ObstructiveApnea: GroundTruthClass.ObstructiveApnea,
RespiratoryEventType.Hypopnea: GroundTruthClass.Hypopnea}
assert len(RespiratoryEventType) == len(GroundTruthClass)-1, \
f"There seems at least one class to be missing in either of the types {RespiratoryEventType.__name__} or {GroundTruthClass.__name__}"
WindowData = NamedTuple("WindowData", signals=pd.DataFrame, center_point=pd.Timedelta, ground_truth=Optional[pd.Series])
class SlidingWindowDataset:
"""
Wrapper for PhysioNetDataset class. It adds the following features:
- Preprocessing and generation of supporting data vectors
- Caching to dramatically speed-up loading
- Piecewise (window-based) retrieval of dataset data. Reference hereby is the center point of to-be retrieved window
- Generation of ground truth vector. When retrieving a window, the ground truth class is delivered alongside,
referring to the center point.
"""
@dataclass
class Config:
downsample_frequency_hz: float
time_window_size: pd.Timedelta # Length of the slided window. The outputted GT refers to its central point.
time_window_stride: Union[pd.Timedelta, int] = 1 # Step width that we proceed with when outputting time window & ground truth vector
ground_truth_vector_width: Union[pd.Timedelta, int] = 1 # Width of the outputted GT vector. If 'int' is passed: Must be a positive odd number!
def __get_index_steps(self, value: pd.Timedelta, variable_name: str) -> int:
reference_timedelta_ = pd.to_timedelta(f"{1/self.downsample_frequency_hz * 1_000_000}us")
index_steps_: float = value/reference_timedelta_
assert int(index_steps_) == index_steps_, \
f"Parameter '{variable_name}' ({value}) has no common factor with the given down-sample frequency ({self.downsample_frequency_hz} Hz)!"
return int(index_steps_)
def __post_init__(self):
# Determine all the regarding index steps out of the given parameters
window_index_steps_ = self.__get_index_steps(value=self.time_window_size, variable_name="time_window_size")
if (window_index_steps_ % 2) == 0:
window_index_steps_ += 1
self.time_window_size__index_steps = window_index_steps_
if isinstance(self.time_window_stride, pd.Timedelta):
self.time_window_stride__index_steps = self.__get_index_steps(value=self.time_window_stride, variable_name="time_window_stride")
elif isinstance(self.time_window_stride, int):
self.time_window_stride__index_steps = self.time_window_stride
else:
raise NotImplementedError
if isinstance(self.ground_truth_vector_width, pd.Timedelta):
gt_index_steps_ = self.__get_index_steps(value=self.ground_truth_vector_width, variable_name="ground_truth_vector_width")
if (gt_index_steps_ % 2) == 0:
gt_index_steps_ += 1 # For ground_truth_vector_width, we always wish to work with odd numbers!
elif isinstance(self.ground_truth_vector_width, int):
gt_index_steps_ = self.ground_truth_vector_width
else:
raise NotImplementedError
assert gt_index_steps_ > 0 and (gt_index_steps_ % 2) == 1, \
"When passing 'ground_truth_vector_width' as int, it must be a positive odd integer!"
self.ground_truth_vector_width__index_steps = gt_index_steps_
def __init__(self, config: Config, dataset_folder: Optional[Path], allow_caching: bool = True, cached_dataset_file: Optional[Path] = None):
self.config = config
if allow_caching is False:
assert cached_dataset_file is None, "Illegal parameter combination!"
# Handle our path parameter logic
assert dataset_folder is not None or cached_dataset_file is not None, \
"At least one of both parameters 'dataset_folder' and 'cached_dataset_file' must contain a value!"
self.dataset_folder: Optional[Path] = dataset_folder
self.dataset_name: str = \
dataset_folder.name if dataset_folder is not None else f"{cached_dataset_file.parent.name}/{cached_dataset_file.stem}"
if cached_dataset_file is None:
cached_dataset_file = self.dataset_folder.resolve() / "preprocessed.pkl"
# Let's see if there is a cached version that we can load
if allow_caching:
success = self._try_read_cached_dataset(cached_dataset_file=cached_dataset_file)
if success:
logging.debug(f"{dataset_folder.name}: Using pre-cached dataset")
return
assert dataset_folder is not None, \
"Since loading cached dataset file failed, we have to load from a dataset folder. But there is None!"
assert dataset_folder.exists() and dataset_folder.is_dir(), \
f"Given dataset folder '{dataset_folder.resolve()}' either not exists or is no folder."
# Load the PhysioNet dataset from disk and apply some pre-processing
try:
ds = read_physionet_dataset(dataset_folder=dataset_folder)
ds = ds.pre_clean().downsample(target_frequency=config.downsample_frequency_hz)
self.signals = ds.signals[["ABD", "CHEST", "AIRFLOW", "SaO2"]].astype(np.float32)
self.respiratory_events = ds.respiratory_events
self.sleep_stage_events = ds.sleep_stage_events
del ds
except (KeyboardInterrupt, SystemExit):
raise
except BaseException as e:
raise RuntimeError(f"Error parsing/preprocessing PhysioNet dataset '{dataset_folder.name}'") from e
# Some examinations
assert self.signals.index[0] <= config.time_window_size < self.signals.index[-1], \
f"Chosen time_window_size '{config.time_window_size}' is too large for the given PhysioNet dataset!"
# Determine some meta data
dist_ = int(max(self.config.time_window_size__index_steps/2, self.config.ground_truth_vector_width__index_steps/2))
dist_ = max(2, dist_) # Must be at least 2 to produce reasonable values
self._valid_center_points: pd.TimedeltaIndex = self.signals.index[dist_:-dist_:self.config.time_window_stride__index_steps]
self._idx__signal_int_index: List[int] = list(range(len(self.signals))[dist_:-dist_:self.config.time_window_stride__index_steps])
assert len(self._valid_center_points) == len(self._idx__signal_int_index)
# In case there are respiratory event annotations, generate our GroundTruth vector
self.ground_truth_series: Optional[pd.Series] = None
if self.respiratory_events is not None:
gt_series = self._generate_ground_truth_series(signals_time_index=self.signals.index, respiratory_events=self.respiratory_events)
assert len(gt_series) == len(self.signals)
# Erase beginning/ending of our gt vector, length depending on our time-window-size & gt-vector-width
edge_cut_indexes_lr = dist_ - int(self.config.ground_truth_vector_width__index_steps/2) - 1
gt_series[:edge_cut_indexes_lr + 1] = np.nan
gt_series[-edge_cut_indexes_lr:] = np.nan
self.ground_truth_series = gt_series
# Serialize preprocessed dataset to disk
if allow_caching:
with open(file=cached_dataset_file, mode="wb") as file:
pickle.dump(obj=self, file=file)
@functools.cached_property
def awake_series(self) -> Optional[pd.Series]:
"""
Returns an "awake" series, in case we have event annotations available. Otherwise, the result is None.
@return: Awake vector (1=awake, 0=asleep) as Series. The Series index corresponds to our signals index. None
if there are no event annotations available.
"""
if self.sleep_stage_events is None:
return None
is_awake_mat = np.zeros(shape=(len(self.signals.index),), dtype="int8")
for event in self.sleep_stage_events:
start_idx = self.signals.index.get_loc(event.start)
value = 1 if event.sleep_stage_type == SleepStageType.Wakefulness else 0
is_awake_mat[start_idx:] = value
is_awake_series = pd.Series(data=is_awake_mat, index=self.signals.index, name="Is awake (ref. sleep stages)")
return is_awake_series
@staticmethod
def _generate_ground_truth_series(signals_time_index: pd.TimedeltaIndex, respiratory_events: List[RespiratoryEvent]) -> pd.Series:
gt_vector = np.ndarray(shape=(len(signals_time_index),))
gt_vector[:] = GroundTruthClass.NoEvent.value
for event in respiratory_events:
start_idx = signals_time_index.get_loc(key=event.start, method="nearest")
end_idx = signals_time_index.get_loc(key=event.end, method="nearest")
assert event.event_type in RESPIRATORY_EVENT_TYPE__GROUND_TRUTH_CLASS.keys(), \
f"{event.event_type.name} seems not present in above dictionary (and likely in GroundTruthClass)"
gt_class = RESPIRATORY_EVENT_TYPE__GROUND_TRUTH_CLASS[event.event_type]
gt_vector[start_idx:end_idx] = gt_class.value
gt_series = pd.Series(data=gt_vector, index=signals_time_index, dtype="uint8")
return gt_series
@functools.cached_property
def gt_class_occurrences(self) -> Dict[GroundTruthClass, int]:
"""Offers a distribution of ground truth classes."""
gt_series__no_nans = self.ground_truth_series[~np.isnan(self.ground_truth_series)]
counter = Counter(gt_series__no_nans)
gt_class_occurrences: Dict[GroundTruthClass, int] = {klass: counter[klass.value] if klass.value in counter else 0 for klass in GroundTruthClass}
return gt_class_occurrences
def has_ground_truth(self):
return self.respiratory_events is not None
def _try_read_cached_dataset(self, cached_dataset_file: Path) -> bool:
if not cached_dataset_file.is_file() or not cached_dataset_file.exists():
return False
try:
with open(file=cached_dataset_file, mode="rb") as file:
cached_dataset: SlidingWindowDataset = pickle.load(file)
# Make sure the cached config matches the given config. We intentionally don't compare the configs directly!
for field in fields(self.config):
assert getattr(self.config, field.name) == getattr(cached_dataset.config, field.name)
# Now, retrieve the cached data fields
self._valid_center_points = cached_dataset._valid_center_points
self._idx__signal_int_index = cached_dataset._idx__signal_int_index
self.ground_truth_series = cached_dataset.ground_truth_series
self.respiratory_events = cached_dataset.respiratory_events
self.sleep_stage_events = cached_dataset.sleep_stage_events
self.signals = cached_dataset.signals
del cached_dataset
except (KeyboardInterrupt, SystemExit):
raise
except BaseException: # We intentionally catch everything here!
return False
return True
def __getitem__(self, idx: int) -> WindowData:
assert -len(self) <= idx < len(self), "Index out of bounds"
center_point_index = self._idx__signal_int_index[idx]
center_point_timedelta = self._valid_center_points[idx]
features = self.signals.iloc[center_point_index-int(self.config.time_window_size__index_steps/2):center_point_index+int(self.config.time_window_size__index_steps/2)+1]
assert len(features) == self.config.time_window_size__index_steps
assert not np.any(np.isnan(features.values)), f"Oops, there's something NaN! dataset_name='{self.dataset_name}', idx={idx}"
assert not np.any(np.isinf(features.values)), f"Oops, there's something inf! dataset_name='{self.dataset_name}', idx={idx}"
gt_series = None
if self.has_ground_truth():
gt_numbers = self.ground_truth_series[center_point_index - int(self.config.ground_truth_vector_width__index_steps / 2):center_point_index + int(self.config.ground_truth_vector_width__index_steps / 2) + 1]
assert len(gt_numbers) == self.config.ground_truth_vector_width__index_steps
assert not np.any(np.isnan(gt_numbers))
gt_classes = [GroundTruthClass(int(g)) for g in gt_numbers]
gt_series = pd.Series(data=gt_classes, index=gt_numbers.index, name="Ground truth")
return WindowData(signals=features, center_point=center_point_timedelta, ground_truth=gt_series)
def __len__(self):
return len(self._valid_center_points)
@functools.cached_property
def valid_center_points(self) -> pd.TimedeltaIndex:
"""
Provides the range of valid center points. Center point refers to the middle of the configured time window.
"""
return copy.deepcopy(self._valid_center_points)
def get(self, center_point: pd.Timedelta = None, raw_index: int = None) -> WindowData:
"""
Returns values for a specific time window. The position of the time window either refers to the raw index,
or to the center of the time window.
:param center_point: The function returns values centered around the given center point.
:param raw_index: The function acts exactly like __getitem__
"""
assert (center_point is None and raw_index is not None) or (center_point is not None and raw_index is None), \
"Exactly one of the given arguments must be None!"
if center_point is not None:
valid_start_ = self._valid_center_points[0]
valid_end_ = self._valid_center_points[-1]
assert valid_start_ <= center_point <= valid_end_, \
f"Given center point {center_point} not in range of valid center points ({valid_start_}..{valid_end_})!"
idx = self._valid_center_points.get_loc(center_point, method="nearest")
assert 0 <= idx < len(self)
else:
idx = raw_index
return self[idx]
def test_sliding_window_dataset():
from util.paths import DATA_PATH
from util.mathutil import normalize_robust
config = SlidingWindowDataset.Config(
downsample_frequency_hz=5,
time_window_size=pd.to_timedelta("5 minutes"),
time_window_stride=5,
ground_truth_vector_width=11
)
sliding_window_dataset = SlidingWindowDataset(config=config, dataset_folder=DATA_PATH/"training"/"tr03-0005", allow_caching=False)
len_ = len(sliding_window_dataset)
gt_class_occurrences = sliding_window_dataset.gt_class_occurrences
window_data = sliding_window_dataset[-1]
valid_center_points = sliding_window_dataset.valid_center_points
window_data_ = sliding_window_dataset.get(center_point=valid_center_points[0])
window_data_ = sliding_window_dataset.get(center_point=pd.Timedelta("0 days 00:15:15.930000"))
pass
def test_caching_speed():
| |
0, 0, 0, 0],
[1, 1, 1, 0, 1, 1, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 1, 0, 1, 0]
],
"off": [
[1, 1, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 5, 5, 0, 0, 0],
[1, 1, 1, 5, 0, 1, 1, 0],
[0, 0, 0, 5, 5, 1, 0, 0],
[0, 0, 0, 5, 0, 1, 1, 0],
[0, 0, 0, 5, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0]
]
}
def custom_receive_midi(self, midi_bytes):
return True
def clear_lights(self):
self.apc.really_do_send_midi((NOTE_ON_STATUS, 68, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 69, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 70, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 71, 0))
def gotoRootMenu(self):
self.apc.mode = self.apc.rootMenu
self.apc.mode.syncLights()
def exitMode(self):
self.apc.mode = None
# # Use this to refresh lights if have been monkeying around with them to do comms:
self.apc._update_hardware()
# Another way - only worked once...
# self._refresh_displays()
# Another way to refresh lights if have been monkeying around but seems slower and heavier reboot op
# self.refresh_state()
def paintLetter(self, letter):
for rowNum, rowArr in enumerate(letter):
rowStart = self.rowStarts[rowNum]
for colNum, val in enumerate(rowArr):
self.apc.really_do_send_midi((NOTE_ON_STATUS, rowStart + colNum, val))
# if val == 1:
# self.apc.really_do_send_midi((NOTE_ON_STATUS, rowStart + colNum, 1))
# else:
# self.apc.really_do_send_midi((NOTE_ON_STATUS, rowStart + colNum, 0))
def paintNumber(self, number):
numberAsString = str(number)
offset = 0
combined_letter = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
color = 1
inc = 2
if len(numberAsString) >= 3:
inc = 1
for char in numberAsString:
letter = self.letters["small" + char]
lastCol = 0
if inc == 1:
# No space between digits - use color to differentiate
if color == 1:
color = 5
else:
color = 1
for rowNum, rowArr in enumerate(letter):
for colNum, val in enumerate(rowArr):
if val == 1:
lastCol = max(lastCol, colNum)
if offset + colNum <= 7:
combined_letter[rowNum][offset + colNum] = color
offset = offset + lastCol + inc
self.paintLetter(combined_letter)
class ShiftedMenuMode(ModeBase):
modeNum = 0
def __init__(self, apc):
ModeBase.__init__(self, apc)
self.apc.show_message("menuMode")
self.modes = [RecordBarsMode(apc), TapTempoMode(apc), MetronomeMode(apc), PaintMode(apc)]
self.modeNum = 0
# self.syncLights()
def syncLights(self):
self.apc.log_message("Mode name")
self.apc.log_message(self.modes[self.modeNum].getName())
self.apc.log_message("grid")
self.apc.log_message(self.letters[self.modes[self.modeNum].getName()])
self.paintLetter(self.letters[self.modes[self.modeNum].getName()])
# Set status of menu buttons
self.clear_lights()
self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 1))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 65, 1))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 68, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 69, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 70, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 71, 0))
if self.modeNum < len(self.modes) - 1:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
else:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 0))
if self.modeNum > 0:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 1))
else:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
def custom_receive_midi(self, midi_bytes):
if midi_bytes[0] & 240 == NOTE_ON_STATUS:
note = midi_bytes[1]
if note == 65:
self.exitMode()
return
if note == 64:
self.apc.mode = self.modes[self.modeNum]
self.apc.mode.syncLights()
return
if note == 66:
self.modeNum = self.modeNum - 1
if self.modeNum < 0:
self.modeNum = 0
self.syncLights()
if note == 67:
self.modeNum = self.modeNum + 1
if self.modeNum >= len(self.modes):
self.modeNum = len(self.modes) - 1
self.syncLights()
return False
class PaintMode(ModeBase):
def __init__(self, apc):
ModeBase.__init__(self, apc)
self.grid = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
def getName(self):
return "edit"
def syncLights(self):
self.paintLetter(self.grid)
self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 65, 1))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 0))
def custom_receive_midi(self, midi_bytes):
if midi_bytes[0] & 240 == NOTE_ON_STATUS:
note = midi_bytes[1]
if note == 65:
self.gotoRootMenu()
return
if note < 64:
trackIndex = self.apc.getTrackIndex(note)
clipIndex = self.apc.getClipIndex(note)
note = self.grid[clipIndex][trackIndex]
if note == 0:
self.grid[clipIndex][trackIndex] = 1
elif note == 1:
self.grid[clipIndex][trackIndex] = 3
elif note == 3:
self.grid[clipIndex][trackIndex] = 5
elif note == 5:
self.grid[clipIndex][trackIndex] = 0
self.syncLights()
return False
class RecordBarsMode(ModeBase):
def __init__(self, apc):
ModeBase.__init__(self, apc)
# self.apc.__fixed_record_bar_length=2
def getName(self):
return "bar"
def syncLights(self):
self.paintNumber(self.apc.fixed_record_bar_length())
self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 65, 1))
if self.apc.fixed_record_bar_length() > 1:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 1))
else:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
def custom_receive_midi(self, midi_bytes):
if midi_bytes[0] & 240 == NOTE_ON_STATUS:
note = midi_bytes[1]
if note == 65:
self.gotoRootMenu()
if note == 67:
self.apc.set_fixed_record_bar_length(self.apc.fixed_record_bar_length() + 1)
self.syncLights()
if note == 66 and self.apc.fixed_record_bar_length() > 1:
self.apc.set_fixed_record_bar_length(self.apc.fixed_record_bar_length() - 1)
self.syncLights()
return False
class TapTempoMode(ModeBase):
def __init__(self, apc):
ModeBase.__init__(self, apc)
# self.apc.__fixed_record_bar_length=2
def getName(self):
return "bpm"
def syncLights(self):
song = self.apc.song()
self.paintNumber(int(song.tempo))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 65, 1))
# if self.apc.fixed_record_bar_length() > 1:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 1))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
# else:
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
def custom_receive_midi(self, midi_bytes):
if midi_bytes[0] & 240 == NOTE_ON_STATUS:
note = midi_bytes[1]
if note == 67:
self.apc.song().tempo = int(self.apc.song().tempo) + 1
self.syncLights()
return
if note == 66:
self.apc.song().tempo = int(self.apc.song().tempo) - 1
self.syncLights()
return
if note == 65:
self.gotoRootMenu()
song = self.apc.song()
# - round down tempo
song.tempo = int(song.tempo)
else:
song = self.apc.song()
song.tap_tempo()
# Tempo Tap key - round down tempo
# song.tempo = int(song.tempo)
self.syncLights()
return False
class MetronomeMode(ModeBase):
def __init__(self, apc):
ModeBase.__init__(self, apc)
def getName(self):
return "metronome"
def syncLights(self):
song = self.apc.song()
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 65, 1))
# if self.apc.fixed_record_bar_length() > 1:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 64, 0))
self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
if song.metronome:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 2))
self.paintLetter(self.letters["on"])
else:
self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
self.paintLetter(self.letters["off"])
# else:
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 66, 0))
# self.apc.really_do_send_midi((NOTE_ON_STATUS, 67, 1))
def custom_receive_midi(self, midi_bytes):
if midi_bytes[0] & 240 == NOTE_ON_STATUS:
note = midi_bytes[1]
if note == 67:
self.apc.song().metronome = not self.apc.song().metronome
self.syncLights()
return
if note < 64:
self.apc.song().metronome = not self.apc.song().metronome
self.syncLights()
if note == 65:
self.gotoRootMenu()
return False
class APC_mini_mle(APC_mini):
# @Overridden
SESSION_HEIGHT = 8
# @Overridden
HAS_TRANSPORT = False
# Locals :
shiftPressed = False
__fixed_record_bar_length = 8
lastShiftUpMillis = 0
firstShiftClickedNote = -1
lastShiftClickedNote = -1
noteDoubleClickMillis = 0
def __init__(self, *a, **k):
super(APC_mini_mle, self).__init__(*a, **k)
self.set_fixed_record_bar_length(8)
self._suppress_send_midi = False
self.mode = None
self.rootMenu = ShiftedMenuMode(self)
def getTrackIndex(self, note):
return int(note % 8)
def getClipIndex(self, note):
return 7 - int((note - self.getTrackIndex(note)) / 8)
def _releaseShiftMenu(self, midi_bytes):
song = self.song()
note = midi_bytes[1]
if note != SHIFT_KEY:
return False
self.shiftPressed = False
# Double tap on shift key => show advanced menu
now = int(round(time.time() * 1000))
if now - self.lastShiftUpMillis < 500:
self.mode = self.rootMenu
self.mode.syncLights()
self.lastShiftUpMillis = now
if self.firstShiftClickedNote >= 0:
firstNote = self.firstShiftClickedNote
lastNote = self.lastShiftClickedNote
self.firstShiftClickedNote = -1
self.lastShiftClickedNote = -1
fromTrackIndex = self.getTrackIndex(firstNote)
fromTrack = song.tracks[fromTrackIndex]
fromClipIndex = self.getClipIndex(firstNote)
fromClipSlot = fromTrack.clip_slots[fromClipIndex]
fromClip = fromClipSlot.clip
if fromClip is not None:
if lastNote < 0:
fromClipSlot.delete_clip()
else:
toTrackIndex = self.getTrackIndex(lastNote)
toTrack = song.tracks[toTrackIndex]
toClipIndex = self.getClipIndex(lastNote)
toClipSlot = toTrack.clip_slots[toClipIndex]
# fromTrack.duplicate_clip_slot(fromClipIndex)
fromClipSlot.duplicate_clip_to(toClipSlot)
self.show_message("")
return False
def _applyShiftMenu(self, midi_bytes):
song = self.song()
note = midi_bytes[1]
if note == SHIFT_KEY:
self.shiftPressed = True
self.show_message("Shift + row 6 = metronome, row 7= undo")
if note == 88 and self.shiftPressed:
song.undo()
return True
if note == 87 and self.shiftPressed:
song.tempo = round(song.tempo)
song.metronome = not song.metronome
return True
# Double tap on note key => quantize
now = int(round(time.time() * 1000))
if note < 64:
trackIndex = self.getTrackIndex(note)
track = song.tracks[trackIndex]
clipIndex = self.getClipIndex(note)
clipSlot = track.clip_slots[clipIndex]
if now - self.noteDoubleClickMillis < 500:
if clipSlot.has_clip and clipSlot.clip.is_midi_clip:
self.log_message("APC quantize with mode 7")
clipSlot.clip.quantize(7, 1) # 4 or 7 - 4 should be 1/8 + 1/8T; 7 should be 1/16 + 1/16T ?
return True
if clipSlot.has_clip:
self.noteDoubleClickMillis = now
# Catch note selected | |
"""Provides a simple way to work with configuration files.
This module can translate a ConfigParser-style configuration file into a
simple dict, wrapping that with a :py:class:`Conf` class providing
convenient "get" functions that work much like dict's "get" method
(return None or the default value provided if the section or option is
missing) for various value types.
Simple Example Using :py:func:`read_conf`::
from sys import exit
from brim.conf import read_conf
conf = read_conf(['/etc/myco/myapp.conf', '~/.myapp.conf'])
if not conf.files:
exit('No configuration found.')
port = conf.get_int('server', 'port', 1234)
print 'Using port', port
By default, any error parsing the conf files or converting a value calls
sys.exit with an explanatory message. But you can override this behavior
by setting exit_on_read_exception to False and setting the
:py:meth:`Conf.error` method to your own method.
More Complex Example With Overrides::
from ConfigParser import Error
from sys import exit
from brim.conf import read_conf
try:
conf = read_conf(
['/etc/myco/myapp.conf', '~/.myapp.conf'],
exit_on_read_exception=False)
except Error as err:
exit('Config read error: ' + str(err))
if not conf.files:
exit('No configuration found.')
def custom_error(section, option, value, conversion_type, err):
if not isinstance(section, basestring):
section = '|'.join(section) # Handle iter of sections
raise Exception(
'Configuration value [%s] %s of %r cannot be converted '
'to %s.' % (section, option, value, conversion_type))
conf.error = custom_error
try:
port = conf.get_int('server', 'port', 1234)
except Exception as err:
exit('Config conversion error: ' + str(err))
print 'Using port', port
Another feature of read_conf is that if a conf file has a [brim]
additional_confs setting, the files listed there are also be parsed.
This lets an end user make a conf file to be included by one or more
other conf files. Splitting configuration like this can make deployment
to clusters easier. For example::
[brim]
additional_confs = /etc/common.conf "/another file.conf" ~/.common.conf
"""
"""Copyright and License.
Copyright 2012-2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ConfigParser import Error, NoOptionError, NoSectionError, SafeConfigParser
from csv import reader
from os.path import expanduser
from sys import exit
from textwrap import wrap
TRUE_VALUES = ['1', 'on', 't', 'true', 'y', 'yes']
"""A list of lowercase string values that equate to True."""
FALSE_VALUES = ['0', 'f', 'false', 'n', 'no', 'off']
"""A list of lowercase string values that equate to False."""
class Conf(object):
"""Wraps a configuration dict for richer access methods.
Within the dict store, each key is a section name and each value is
another dict. Each section dict key is an option name and each value
the actual value of the section/option within the conf. The list of
file names the configuration was read from may optionally be stored.
Normally Conf instances are created with the global function
:py:func:`read_conf` but you can construct Conf instances directly
as well::
# Normally...
conf = read_conf(['/etc/myco/myapp.conf', '~/.myapp.conf'])
# Directly...
conf = Conf({
'section1': {'option1.1': 'a', 'option1.2': 'b'},
'section2': {'option2.1': 'c', 'option2.2': 'd'}})
:param store: A dict representing the configuration, as described
above.
:param files: A list of file names the configuration was read from.
"""
def __init__(self, store, files=None):
self.store = store
"""A dict containing the configuration information.
Each dict key is a section name and each value is another dict.
Each section dict key is an option name and each value the
actual value of the section/option within the conf.
"""
self.files = files
"""A list of source conf file names the conf was read from."""
def get(self, section, option, default=None):
"""Returns the value of the section/option."""
if isinstance(section, basestring):
return (self.store.get(section) or {}).get(option) or default
else:
for section in section:
value = self.store.get(section)
if value:
value = value.get(option)
if value:
return value
return default
def get_bool(self, section, option, default):
"""Returns the boolean value of the section/option."""
value = self.get(section, option, default)
if value is True or value is False:
return value
if value.lower() in TRUE_VALUES:
return True
if value.lower() in FALSE_VALUES:
return False
self.error(section, option, value, 'boolean', None)
def get_int(self, section, option, default):
"""Returns the int value of the section/option."""
value = self.get(section, option, default)
try:
return int(value)
except ValueError as err:
self.error(section, option, value, 'int', err)
def get_float(self, section, option, default):
"""Returns the float value of the section/option."""
value = self.get(section, option, default)
try:
return float(value)
except ValueError as err:
self.error(section, option, value, 'float', err)
def get_path(self, section, option, default=None):
"""Returns the path value of the section/option.
This is different that just retrieving a string only in that it
calls os.path.expanduser on the value, translating ~/path and
~user/path constructs.
"""
value = self.get(section, option, default)
if not value:
return value
try:
return expanduser(value)
except AttributeError as err:
self.error(section, option, value, 'path', err)
def error(self, section, option, value, conversion_type, err):
"""Handles an error converting a section/option value.
This function is called when one of the "get" methods cannot
convert a value. By default, this method calls sys.exit with an
explanatory message, but you can override it by setting
Conf.error to another method.
An example of overriding to raise an Exception::
def _error(section, option, value, conversion_type, err):
raise Exception(
'Configuration value [%s] %s of %r cannot be '
'converted to %s.' %
(section, option, value, conversion_type))
conf = read_conf(['some.conf'])
conf.error = _error
Note that the section parameter may have been given as an
iterator of sections rather than just one section name.
:param section: The section name (or an iterator of section
names) within the conf that was read.
:param option: The option name within the section that was read.
:param value: The value read and failed conversion.
:param conversion_type: The name of the type of conversion that
failed, such as ``'boolean'``, ``'int'``, ``'float'``, or
``path``.
:param err: The Exception that was raised, if any, during the
conversion.
"""
if not isinstance(section, basestring):
section = '|'.join(section) # Handle iter of sections
exit(
'Configuration value [%s] %s of %r cannot be converted to %s.' %
(section, option, value, conversion_type))
def __str__(self):
return 'Conf based on %r: %r' % (
self.files or 'unknown files', self.store)
def __repr__(self):
return str(self)
def _read_conf(parser, conf_files_read, conf_file, exit_on_read_exception):
if len(conf_files_read) > 50:
msg = (
'Tried to read more than 50 conf files.\n'
'Recursion with [brim] additional_confs?\n' +
'\n'.join(wrap(
'Files read so far: ' + ' '.join(conf_files_read), width=79)))
if exit_on_read_exception:
exit(msg)
else:
raise Error(msg)
if exit_on_read_exception:
try:
conf_files_read.extend(parser.read([expanduser(conf_file)]))
except Error as err:
exit(err)
else:
conf_files_read.extend(parser.read([expanduser(conf_file)]))
try:
additional_confs = parser.get('brim', 'additional_confs')
except (NoSectionError, NoOptionError):
pass
else:
parser.remove_option('brim', 'additional_confs')
for conf_file in list(
reader([additional_confs], delimiter=' '))[0]:
_read_conf(
parser, conf_files_read, conf_file, exit_on_read_exception)
def read_conf(conf_files, exit_on_read_exception=True):
"""Returns a new :py:class:`Conf` instance.
The new instance is based on the results from reading the conf_files
into a ConfigParser.SafeConfigParser.
Note that if the parser does not have access to read a given file it
acts as if it did not exist.
If a conf file has a [brim] additional_confs setting, the files
listed there are also be parsed.
You may wish to check the :py:attr:`Conf.files` list to determine
which files, if any, were read to form the Conf instance.
On a parser error, calls sys.exit with an explanatory message by
default. If you set exit_on_read_exception to False, the
ConfigParser.Error be raised instead.
:param conf_files: An iterable of conf files or a string
representing a single conf file to read and translate.
Values in files further into the list override any values from
prior files. File names may use the ~/filename or ~user/filename
format and are expanded with os.path.expanduser.
:param exit_on_read_exception: A boolean that indicates whether
sys.exit should be called on error or if a ConfigParser.Error
should be raised instead.
:returns: A new :py:class:`Conf` instance representing the
configuration read from the conf_files.
"""
if isinstance(conf_files, basestring):
conf_files = [conf_files]
parser = SafeConfigParser()
conf_files_read = []
for conf_file in conf_files:
_read_conf(parser, conf_files_read, conf_file, exit_on_read_exception)
store | |
<reponame>MScResearch/EEGprocessing
"""
This is nedfReader, a class to read ".nedf" files and obtain its data.
It is an object that contains data from nedf file,
and provides methods to obtain this information.
2018 Neuroelectrics Corporation
Created on Tue Feb 14 2018
@author: sergi and giulio (NE)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import json
import datetime
import os
import xml.etree.ElementTree as ET
class nedfReader(object):
"""NEDFReader object. Example of use:
>>> c = nedfReader("nedfdata/20180213122712_Patient01.nedf")
You must provide the relative or absolute path to the data file.
In general, output units are uV, uA and seconds (c.np_time) and mm/s^2 (np_acc).
(see http://wiki.neuroelectrics.com/index.php/Files_%26_Formats)
The EEG data for processing is kept in .np_eeg and is a numpy array (uV),
and its shape is (num eeg samples,num channels), e.g.,
>>> c.np_eeg.shape = (15000,32)
The stimulation data for processing is kept in .np_stim (uA) and is a numpy array,
and its shape is (num stim samples,num channels), e.g.,
>>> c.np_stim.shape = (30000,32)
The accelerometer data for processing is kept in .np_acc and is a numpy array,
and its shape is (num acc samples,num acc channels), e.g.,
>>> c.np_acc.shape = (3000,3)
The markers are kept in .np_markers and is a numpy array,
and its shape is (numsamples), e.g.,
>>> c.np_markers.shape = (15000)
Metadata information from the nedf header is returned as a json using method __get_info.
"""
def __init__(self, filepath, author="anonymous"):
self.filepath = filepath
self.np_eeg = [] # will hold data in uV
self.np_stim = [] # holds currents in uA
self.np_acc = [] # mm/s^2
self.np_markers = []
self.np_time = [] # seconds
self.eegstartdate_unixtime = 0
self.basename = ""
self.num_channels = 0
self.electrodes = []
self.samplesread = 0
self.author = author
enableINFO = False # If True, prints array shapes and info.
print()
""" First we check if file exists with or without file extension."""
if not os.path.isfile(filepath):
if filepath.endswith('.nedf'):
print(""" The path provided is not correct: \n {path} """.format(path=filepath))
return
else:
if not os.path.isfile(filepath+'.nedf'):
print(""" The path provided is not correct: \n {path} """.format(path=filepath))
print(" Remember that this class only accepts files with .nedf extension ")
return
else:
filepath = filepath + '.nedf'
print("File found! {file}".format(file=filepath))
# self.basename = filepath[filepath.rfind("/")+1:].replace(".nedf", "")
self.basename = os.path.basename(filepath)[:-5]
self.filenameroot = filepath[:-5]
file = open(filepath, 'rb')
""" Here we are reading just the header.
By definition of format the header cannot be larger than 10240 bytes"""
content = file.read(10240)
content = content.decode("utf-8")
nedftitle = content[1:content.find('>')]
# This is the last character of xml header
lastindex = content.find('</'+nedftitle+'>') + len('</'+nedftitle+'>')
content = content[:lastindex]
try:
""" We try to convert the header string into a XML structure using ElementTree."""
root = ET.fromstring(content)
except:
print("Nedf header is incorrect. The xml is corrupted")
return
""" Using XmlDictConfig we obtain a dictionary from the ET object"""
xmldict = XmlDictConfig(root)
self.header = dict(xmldict)
print("Reading file...")
""" As we already started reading the file, next read will start from 10240 byte,
and we will read the whole file at once and then close it. We store what we read
in a bytearray. This is done for efficiency matters. """
content2 = file.read()
file.close()
self.nedfbytes = bytearray(content2)
self.nedfbytessize = len(self.nedfbytes)
""" We initialize the object variables with header values"""
if xmldict['NEDFversion'] == '1.4':
try:
self.isaccon = xmldict['AccelerometerData'] == 'ON'
self.isstimon = 'STIMSettings' in xmldict
self.iseegon = 'EEGSettings' in xmldict
self.samples = 0
if self.iseegon:
self.num_channels = int(xmldict['EEGSettings']['TotalNumberOfChannels'])
else:
self.num_channels = int(xmldict['STIMSettings']['TotalNumberOfChannels'])
if self.iseegon:
self.eegstartdate_unixtime = int(xmldict['StepDetails']['StartDate_firstEEGTimestamp'])
# want this in calendar format
valuedate = datetime.datetime.fromtimestamp(self.eegstartdate_unixtime / 1000.)
self.eegstartdate = valuedate.strftime('%Y-%m-%d %H:%M:%S')
# want a simple list of electrodes ordered by channel as in np_eeg
electrodesDict = dict(xmldict['EEGSettings']['EEGMontage'])
electrodesDictNumKey = {int(k[7:]): val for (k, val) in electrodesDict.items()}
self.electrodes = [electrodesDictNumKey[k] for k in sorted(electrodesDictNumKey)]
self.eegtotaltime = int(xmldict['EEGSettings']['EEGRecordingDuration']) # seconds
self.fs = int(xmldict['EEGSettings']['EEGSamplingRate'])
self.samples = int(xmldict['EEGSettings']['NumberOfRecordsOfEEG'])
if self.isstimon:
self.stimtotaltime = int(xmldict['STIMSettings']['StimulationDuration'])+int(xmldict['STIMSettings']['RampDownDuration'])+int(xmldict['STIMSettings']['RampUpDuration'])+int(xmldict['STIMSettings']['ShamRampDuration'])
if self.samples == 0: # No EEG in the file
self.samples = int(xmldict['STIMSettings']['NumberOfRecordsOfStimulation']) // 2
if self.iseegon:
self.np_eeg = (np.zeros(shape=(self.samples, self.num_channels), dtype="float32"))
if self.isstimon:
self.np_stim = (np.zeros(shape=(self.samples*2, self.num_channels), dtype="float32"))
if self.isaccon:
self.np_acc = (np.zeros(shape=(self.samples//5, self.num_channels), dtype="float32"))
self.np_markers = (np.zeros(shape=(1), dtype="int32"))
self.np_time = (np.zeros(shape=(1), dtype="uint64"))
except Exception as e:
print("NEDF Header is missing some required fields: " + str(e))
return
elif xmldict['NEDFversion'] == '1.2':
try:
self.isaccon = 'OFF'
self.isstimon = 'STIMSettings' in xmldict
self.num_channels = int(xmldict['TotalNumberOfChannels'])
self.eegstartdate_unixtime = int(xmldict['StartDateEEG'])
# want this in calendar format
valuedate = datetime.datetime.fromtimestamp(self.eegstartdate_unixtime / 1000.)
self.eegstartdate = valuedate.strftime('%Y-%m-%d %H:%M:%S')
# want a simple list of electrodes ordered by channel as in np_eeg
electrodesDict = dict(xmldict['EEGMontage'])
electrodesDictNumKey = {int(k[7:]): val for (k, val) in electrodesDict.items()}
self.electrodes = [electrodesDictNumKey[k] for k in sorted(electrodesDictNumKey)]
self.eegtotaltime = int(xmldict['NumberOfRecordsOfEEG']) / int(xmldict['EEGSamplingRate']) # seconds
self.fs = int(xmldict['EEGSamplingRate'])
self.samples = int(xmldict['NumberOfRecordsOfEEG'])
if self.isstimon:
print('Not implemented. Contact to <EMAIL>')
self.np_eeg = (np.zeros(shape=(self.samples, self.num_channels), dtype="float32"))
if self.isaccon:
self.np_acc = (np.zeros(shape=(self.samples // 5, self.num_channels), dtype="float32"))
self.np_markers = (np.zeros(shape=(1), dtype="int32"))
self.np_time = (np.zeros(shape=(1), dtype="uint64"))
except Exception as e:
print("NEDF Header is missing some required fields: " + str(e))
return
print("Header information has been correctly retrieved.")
""" NEDF read is based on nedf file definition and sampling rate.
Accelerometer sampling rate is 100 samples per second.
EEG sampling rate is 500 samples per second.
Stimulation sampling rate is 1000 samples per second.
Based on that, we iterate taking EEG as reference. """
supereeg, superstim, superacc, supermarkers, supertime = self.__processBytes()
"""Finally we create the numpy arrays from python lists"""
self.np_acc = np.array(superacc, dtype="float32")
self.np_eeg = np.array(supereeg, dtype="float32")/1000. # uV (originally in nV)
self.np_stim = np.array(superstim, dtype="float32")
self.np_markers = np.array(supermarkers, dtype="float32")
# create a time column in seconds from beginning of file
np_time = np.array(supertime)/1000. # go to seconds
self.np_time = np.array(np_time, dtype="float32")
print("Finished processing")
if enableINFO:
print()
print("Data has been stored into the following self structures:")
print()
print(" > np_acc contains Accelerometer Data and it is shaped", self.np_acc.shape)
print(" > np_eeg contains EEG Data (uV) and it is shaped", self.np_eeg.shape)
if self.isstimon:
print(" > np_stim contains Stimulation Data (uA) and it is shaped", self.np_stim.shape)
print(" > np_markers contains Markers Data and it is shaped", self.np_markers.shape)
print(" > np_time contains EEG corresponding Timestamps (s from start) and it is shaped", self.np_time.shape)
print(" > np_acc contains acc data (mm/s^2) and it is shaped", self.np_time.shape)
print()
print("Header information can be obtained as a json using __get_info method or directly accessed:")
print()
print(" > self.filepath", self.filepath)
print(" > self.eegstartdate_unixtime", self.eegstartdate_unixtime)
print(" > self.basename", self.basename)
print(" > self.num_channels", self.num_channels)
if not self.isstimon:
print(" > self.eegtotaltime", self.eegtotaltime)
else:
print(" > self.stimtotaltime", self.stimtotaltime)
print(" > self.electrodes", "Keys:", list(electrodesDict.keys()))
print(" > self.author", self.author)
def __processBytes(self):
self.bytesread = -1
counteracc = 5
supereeg = []
superacc = []
superstim = []
supermarkers = []
supertime = []
for i in range(self.samples):
supertime.append(i*2)
if self.isaccon:
if counteracc == 5:
counteracc = 1
accsample = []
for j in range(3):
if self.nedfbytessize - self.bytesread < 2:
print("[Error] Not enough bytes while reading Accelerometer")
return supereeg, superstim, superacc, supermarkers, supertime
byte1 = self.__getByte()
byte2 = self.__getByte()
accvar = byte1*256+byte2
if byte1 >= 128:
accvar = accvar - 65536
accsample.append(accvar)
if len(accsample):
superacc.append(accsample)
else:
counteracc += 1
if self.iseegon:
eegsample = []
for j in range(self.num_channels):
if self.nedfbytessize - self.bytesread < 3:
print(" > [Error] Not enough bytes while reading EEG")
return supereeg, superstim, superacc, supermarkers, supertime
byte1 = self.__getByte()
byte2 = self.__getByte()
byte3 = self.__getByte()
eegvar = byte1 * 65536 + byte2 * 256 + byte3
if byte1 >= 128:
eegvar = (16777216 * 255) + eegvar - (16777216 * 256)
eegvar = (eegvar * 2.4 * 1000000000) / 6.0 / 8388607.0
eegsample.append(eegvar)
supereeg.append(eegsample)
if self.isstimon:
for s in range(2):
stimsample = []
for j in range(self.num_channels):
if self.nedfbytessize - self.bytesread < 3:
print("[Error] Not enough bytes while reading Stimulation")
return supereeg, superstim, superacc, supermarkers, supertime
byte1 = self.__getByte()
byte2 = self.__getByte()
byte3 = self.__getByte()
stimvar = byte1 * 65536 + byte2 * 256 + byte3
| |
"solvers"]
info(" ".join(argv))
with subprocess.Popen(argv, stdout=subprocess.PIPE,
stderr=sys.stderr) as p:
with tarfile.open(mode="r|", fileobj=p.stdout) as t:
t.extractall(args.output_dir)
os.rename(os.path.join(args.output_dir, "solvers"), dest_dir)
#
##
##
#
# repository management
FROM_UPTODATE = set()
def docker_uptodate_image(args, docker_argv, image):
if image not in FROM_UPTODATE:
argv = docker_argv + ["pull", image]
info(" ".join(argv))
subprocess.check_call(argv)
FROM_UPTODATE.add(image)
def docker_build(args, docker_argv, tag, root, build_args={}, Dockerfile=None):
with open(Dockerfile or os.path.join(root, "Dockerfile")) as fp:
FROMs = [l.split()[1] for l in fp.readlines() \
if l.startswith("FROM") and "{" not in l]
for f in FROMs:
docker_uptodate_image(args, docker_argv, f)
argv = docker_argv + ["build", "-t", tag, root]
if args.no_cache:
argv += ["--no-cache"]
if Dockerfile:
argv += ["-f", Dockerfile]
for k,v in build_args.items():
argv += ["--build-arg", f"{k}={v}"]
info(" ".join(argv))
subprocess.check_call(argv)
def build_images(args):
docker_argv = check_docker()
repo = Repository(args)
bases_uptodate = set()
only_dist_opts = ["RDEPENDS"]
hide_opts = [
"base_version",
"base_from",
"builder",
"builder_base",
"image_name",
]
for name in repo.images:
image = ImageManager(name, repo)
setup = image.setup
root = str(image.entry)
build_args = {k: v.format(**image.vars) for k,v in setup.items() if \
k not in hide_opts and isinstance(v, str)}
build_args.update(image.vars)
builder_path = setup["builder"]
if not builder_path.startswith("generic/"):
builder_path = os.path.join(image.entry, builder_path)
builder_Dockerfile = os.path.join(builder_path, "Dockerfile")
builder_target = f"{DOCKER_NS}/builder-{image.name}"
if "builder_base" in setup:
build_args["BUILDER_BASE"] = setup["builder_base"]
docker_uptodate_image(args, docker_argv, setup["builder_base"])
docker_build(args, docker_argv, builder_target, root,
build_args=build_args, Dockerfile=builder_Dockerfile)
base_version = setup["base_version"]
base_root = os.path.join("base", base_version)
base_from = setup.get("base_from")
base_args = {}
base_tag = base_version
if base_from:
base_args["BASE"] = base_from
from_tag = base_from.replace("/","_").replace(":","-")
base_tag = f"{base_version}-{from_tag}"
base_target = f"{DOCKER_NS}/base:{base_tag}"
if base_target not in bases_uptodate:
docker_build(args, docker_argv, base_target, base_root, base_args)
bases_uptodate.add(base_target)
dist_version = setup.get("dist_version", "v1")
dist_Dockerfile = f"generic/dist-{dist_version}/Dockerfile"
dist_args = {
"BASE": base_target,
"BUILDER_BASE": builder_target,
"IMAGE_NAME": image.name,
"SOLVER": build_args["SOLVER"],
"SOLVER_NAME": build_args["SOLVER_NAME"],
}
for k in only_dist_opts:
if k in setup:
dist_args[k] = setup[k]
fd, dbjson = tempfile.mkstemp(".json", "file", root)
try:
fp = os.fdopen(fd, "w")
json.dump({image.solver: image.registry}, fp)
fp.close()
dist_args["dbjson"] = os.path.basename(dbjson)
docker_build(args, docker_argv, f"{DOCKER_NS}/{image.name}",
root, dist_args, Dockerfile=dist_Dockerfile)
finally:
os.unlink(dbjson)
_retstr = {
10: "SAT",
20: "UNSAT",
124: "TIMEOUT",
}
def test_images(args):
docker_args = ["-v", f"{os.path.abspath('tests')}:/data"]
info(f"Testing with CNF {os.path.abspath('tests')}/{args.file}")
if args.timeout > 600:
args.timeout = 10
def call(test_name, image, image_args):
print(test_name, end="...", flush=True)
ret = docker_runs(args, [image.name], docker_args=docker_args,
image_args=image_args)
msg = _retstr.get(ret, ret)
if ret == 124 or ret == 0 or 10 <= ret <= 20:
print(green("ok"), f"({msg})")
return True
else:
print(red("fail"), f"({msg})")
return False
def test_cnf(image):
image_args = [args.file]
return call("cnf", image, image_args)
def test_gz(image):
image_args = [f"{args.file}.gz"]
return call("gz", image, image_args)
def test_proof(image):
if not "argsproof" in image.registry:
return True
image_args = [args.file, "proof.tmp"]
return call("proof", image, image_args)
def test_modes(image):
ok = True
for mode in [k for k in image.registry if k.startswith("args")]:
mode = mode[4:]
if not mode or mode == "proof":
continue
image_args = ["--mode", mode[4:], "aim-200-1_6-yes1-1.cnf.gz", "proof.tmp"]
ok = call(mode, image, image_args) and ok
return ok
tests = [test_cnf, test_gz, test_proof, test_modes]
failures = []
repo = Repository(args)
docker_argv = check_docker()
for name in repo.images:
prepare_image(args, docker_argv, f"{DOCKER_NS}/{name}")
for name in repo.images:
image = ImageManager(name, repo)
info(f"Testing {image.name}")
fails = [test.__name__[5:] for test in tests if not test(image)]
if fails:
failures.append((image, fails))
if not failures:
print(green("Bravo :-)"))
else:
print(red("Failed tests:"))
for image, fails in failures:
print(f"{image.name} failed tests {' '.join(fails)}")
if failures:
sys.exit(1)
def push_images(args):
docker_argv = check_docker()
for image in get_list(args):
argv = docker_argv + ["push", f"{DOCKER_NS}/{image}"]
info(" ".join(argv))
subprocess.check_call(argv)
def mrproper(args):
docker_argv = check_docker()
output = subprocess.check_output(docker_argv + ["images", "-f",
f"reference={DOCKER_NS}/*",
"--format", "{{.Repository}}:{{.Tag}}"])
todel = [l.strip() for l in output.decode().split("\n") if l]
if args.pattern:
todel = fnmatch.filter(todel, f"{DOCKER_NS}/{args.pattern}")
if not todel:
return
todel.sort()
argv = docker_argv + ["rmi"] + list(todel)
if args.pretend:
print(" ".join(argv))
else:
info(" ".join(argv))
sys.exit(subprocess.call(argv))
def dependencies(args):
docker_argv = check_docker()
repo = Repository(args)
deps = set()
for name in repo.images:
image = ImageManager(name, repo)
setup = image.setup
if "builder_base" in setup:
deps.add(setup["builder_base"])
deps.add(setup["base_from"])
for image in deps:
print(image)
if args.pull:
prepare_image(args, docker_argv, image)
def download_src(args):
def brace_expand(s):
"""
Bash-like brace+comma expansion
Source: https://rosettacode.org/wiki/Brace_expansion
"""
def getitem(s, depth=0):
out = [""]
while s:
c = s[0]
if depth and (c == ',' or c == '}'):
return out,s
if c == '{':
x = getgroup(s[1:], depth+1)
if x:
out,s = [a+b for a in out for b in x[0]], x[1]
continue
if c == '\\' and len(s) > 1:
s, c = s[1:], c + s[1]
out, s = [a+c for a in out], s[1:]
return out,s
def getgroup(s, depth):
out, comma = [], False
while s:
g,s = getitem(s, depth)
if not s: break
out += g
if s[0] == '}':
if comma: return out, s[1:]
return ['{' + a + '}' for a in out], s[1:]
if s[0] == ',':
comma,s = True, s[1:]
return None
return getitem(s)[0]
os.makedirs(args.output_dir, exist_ok=True)
repo = Repository(args)
for name in repo.images:
image = ImageManager(name, repo)
setup = image.setup
src_urls = setup["download_url"].format(**image.vars)
src_urls = brace_expand(src_urls)
for src_url in src_urls:
if args.subdir_entry:
os.makedirs(os.path.join(args.output_dir, str(image.entry)), exist_ok=True)
try:
with urlopen(src_url) as fp:
if "Content-Disposition" in fp.headers:
_, params = cgi.parse_header(fp.headers["Content-Disposition"])
filename = params['filename']
else:
filename = os.path.basename(src_url)
if "?" in filename:
filename = filename.split("?")[0]
if args.subdir_entry:
filename = os.path.join(str(image.entry), filename)
filename = os.path.join(args.output_dir, filename)
if os.path.exists(filename) and not args.overwrite:
error(f"{image.name}: {filename} already exists. Use --overwrite option to overwrite it")
print(f"{image.name}: downloading to {filename}...", end="", flush=True)
with open(filename, "wb") as dest:
dest.write(fp.read())
print(green("ok"))
if "zenodo.org" in src_url:
time.sleep(0.5)
except HTTPError as e:
error(f"{image.name}: error while downloading {src_url} ({e})",
exit=False)
except Exception as e:
error(f"{image.name}: error while downloading {src_url} ({type(e)} {e})",
exit=False)
def print_version(args):
print(__version__)
#
##
def main(redirected=False):
if IN_REPOSITORY and not redirected and \
os.path.abspath(__file__) != os.path.abspath("satex.py"):
info(f"using {os.path.abspath('satex.py')}")
del sys.modules["satex"]
sys.path.insert(0, os.getcwd())
from satex import main
return main(redirected=True)
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description="Helper script for managing SAT Heritage Docker images",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(f"""\
GitHub: https://github.com/sat-heritage/docker-images
DockerHub: https://hub.docker.com/u/satex
Version: {__version__}"""))
parser.add_argument("--refresh-list", default=False, action="store_true",
help="Force refresh of the list of images")
parser.add_argument("--pretend", "-p", default=False, action="store_true",
help="Print Docker commands without executing them")
subparsers = parser.add_subparsers(help="commands")
##
# options shared by several sub-commands
#
status_parser = argparse.ArgumentParser(add_help=False)
status_parser.add_argument("--unstable", action="store_true",
help="Consider images with non-ok and non-FIXME status")
status_parser.add_argument("--fixme", action="store_true",
help="Consider images with FIXME status")
status_parser.add_argument("--all", "-a", action="store_true",
help="Consider all images, with any status")
spec_parser = argparse.ArgumentParser(add_help=False)
spec_parser.add_argument("pattern",
help="Pattern for filtering images")
tracks_parser = argparse.ArgumentParser(add_help=False)
tracks_parser.add_argument("--track",
help="Filter solvers from the given track")
docker_parser = argparse.ArgumentParser(add_help=False)
docker_parser.add_argument("--pull", action="store_true",
help="Explicitly pull the image")
docker_parser.add_argument("-v", "--volume", action="append",
help="(Docker option) Mount a volume")
_docker_opts.append("volume")
docker_parser.add_argument("-e", "--env", action="append",
help="(Docker option) Set environment variables")
_docker_opts.append("env")
run_parser = argparse.ArgumentParser(add_help=False)
run_parser.add_argument("--timeout", type=int, default=3600,
help="Timeout for solver (in seconds; default: 3600)")
run_parser.add_argument("--fail-if-timeout", action="store_true",
help="Fail if timeout occurs")
#
##
##
# sub-commands
#
p = subparsers.add_parser("list",
help=f"List {DOCKER_NS} Docker images",
parents=[status_parser, tracks_parser])
p.add_argument("pattern", default="*", nargs="?",
help="Pattern for filtering images (default: *)")
p.set_defaults(func=print_list)
p = subparsers.add_parser("info",
help=f"Display information about the solver embedded in the given Docker images",
parents=[spec_parser, tracks_parser])
p.set_defaults(func=print_info)
p = subparsers.add_parser("run",
help=f"Run one or several {DOCKER_NS} Docker images",
parents=[spec_parser, status_parser, tracks_parser,
run_parser, docker_parser])
p.add_argument("--mode",
help="Select args mode")
p.add_argument("dimacs",
help="DIMACS file (possibly gzipped)")
p.add_argument("proof", nargs="?",
help="Output file for proof")
p.set_defaults(func=run_images)
p = subparsers.add_parser("run-raw",
help=f"Run one or several {DOCKER_NS} Docker images with direct call to solvers",
parents=[spec_parser, status_parser, tracks_parser,
run_parser, docker_parser])
p.add_argument("args", nargs=argparse.REMAINDER,
help="Arguments to docker image")
p.set_defaults(func=runraw_images)
p = subparsers.add_parser("shell",
help=f"Open shell within the given {DOCKER_NS} Docker image",
parents=[docker_parser])
p.add_argument("image", help="{DOCKER_NS} image")
p.set_defaults(func=run_shell)
p = subparsers.add_parser("extract",
help=f"Extract solvers binaries from {DOCKER_NS} Docker images",
parents=[spec_parser, status_parser, tracks_parser, docker_parser])
p.add_argument("output_dir", help="Output directory")
p.set_defaults(func=extract)
p = subparsers.add_parser("mrproper",
help=f"Remove all {DOCKER_NS} Docker images")
p.add_argument("pattern", default=None, nargs="?",
help="Pattern for filtering images")
p.set_defaults(func=mrproper)
if IN_REPOSITORY:
p = subparsers.add_parser("build",
help=f"Build {DOCKER_NS} Docker images",
parents=[spec_parser, status_parser, tracks_parser])
p.add_argument("--no-cache", action="store_true",
help="docker build option")
p.set_defaults(func=build_images)
p = subparsers.add_parser("test",
help=f"Test {DOCKER_NS} Docker images",
parents=[spec_parser, status_parser, run_parser, tracks_parser, docker_parser])
p.add_argument("--quiet", "-q", action="store_true")
p.add_argument("--file", "-f", default="aim-200-1_6-yes1-1.cnf",
help=".cnf test file (should also exists with .gz)")
p.set_defaults(func=test_images)
p = subparsers.add_parser("push",
help=f"Push {DOCKER_NS} Docker images",
parents=[spec_parser, status_parser, tracks_parser])
p.set_defaults(func=push_images)
p = subparsers.add_parser("image-deps",
help=f"List Docker image dependencies")
p.add_argument("--pull", action="store_true",
help="pull images")
p.set_defaults(func=dependencies)
p = subparsers.add_parser("fetch-sources",
help=f"Fetch solver sources",
parents=[spec_parser])
p.add_argument("output_dir", help="Output directory")
p.add_argument("--overwrite", help="Allow writing over existing files",
action="store_true", default=False)
p.add_argument("--subdir-entry", help="Output in sub-directory named as the entry (year)",
action="store_true", default=False)
p.set_defaults(func=download_src)
subparsers.add_parser("version",
help="Print script version")\
.set_defaults(func=print_version)
#
| |
= root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertFalse(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_C))
def test_post_analysis_after_false_return(self):
# the execute_post_analysis function should be called regardless of what happened during analysis
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_post_analysis', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
from saq.modules.test import PostAnalysisTestResult
self.assertFalse(test_observable.get_analysis(PostAnalysisTestResult))
self.assertEquals(log_count('execute_post_analysis called'), 1)
def test_maximum_cumulative_analysis_warning_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_cumulative_analysis_warning_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE has been analyzing'), 1)
def test_maximum_cumulative_analysis_warning_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_cumulative_analysis_warning_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE has been analyzing'), 1)
def test_maximum_cumulative_analysis_fail_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_cumulative_analysis_fail_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE took too long to analyze'), 1)
def test_maximum_cumulative_analysis_fail_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_cumulative_analysis_fail_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE took too long to analyze'), 1)
def test_maximum_analysis_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_analysis_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
# will fire again in final analysis
self.assertEquals(log_count('excessive time - analysis module'), 2)
def test_maximum_analysis_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_analysis_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
# will fire again in final analysis
self.assertEquals(log_count('excessive time - analysis module'), 2)
def test_is_module_enabled(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_dependency_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
from saq.modules.test import DependencyTestAnalysis, KEY_SUCCESS, KEY_FAIL
analysis = test_observable.get_analysis(DependencyTestAnalysis)
for key in analysis.details[KEY_SUCCESS].keys():
self.assertTrue(analysis.details[KEY_SUCCESS][key])
for key in analysis.details[KEY_FAIL].keys():
self.assertFalse(analysis.details[KEY_FAIL][key])
def test_analysis_mode_priority(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
test_1_uuid = root.uuid
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
test_2_uuid = root.uuid
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see test_2_uuid get selected BEFORE test_1_uuid gets selected
results = [_.getMessage() for _ in search_log('got work item')]
self.assertEquals(len(results), 2)
self.assertEquals(results.index('got work item RootAnalysis({})'.format(test_2_uuid)), 0)
def test_analysis_mode_no_priority(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
test_1_uuid = root.uuid
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
test_2_uuid = root.uuid
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# since we don't have any kind of priority set they should get selected in order they were inserted (FIFO)
# so we should see test_1_uuid get selected BEFORE test_2_uuid gets selected
results = [_.getMessage() for _ in search_log('got work item')]
self.assertEquals(len(results), 2)
self.assertEquals(results.index('got work item RootAnalysis({})'.format(test_1_uuid)), 0)
def test_merge(self):
# first analysis
root_1 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_1.initialize_storage()
test_observable_1 = root_1.add_observable(F_TEST, 'test_1')
existing_user_observable = root_1.add_observable(F_USER, 'admin')
root_1.save()
root_1.schedule()
# second analysis we want to merge into the first
root_2 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_2.initialize_storage()
test_observable_2 = root_2.add_observable(F_TEST, 'merge_test_1')
root_2.save()
root_2.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_merge_test')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import BasicTestAnalysis, MergeTestAnalysis
root_1.load()
test_observable_1 = root_1.get_observable(test_observable_1.id)
self.assertIsNotNone(test_observable_1)
basic_analysis = test_observable_1.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(basic_analysis)
root_2.load()
root_1.merge(basic_analysis, root_2)
root_1.save()
# now the basic analysis should have the test_observable_2
test_observable_2 = root_1.get_observable(test_observable_2.id)
self.assertIsNotNone(test_observable_2)
# and it should have the merge analysis
merge_analysis = test_observable_2.get_analysis(MergeTestAnalysis)
self.assertIsNotNone(merge_analysis)
# and that should have a new observable of it's own
output_observable = merge_analysis.get_observables_by_type(F_TEST)
self.assertEquals(len(output_observable), 1)
output_observable = output_observable[0]
self.assertEquals(output_observable.value, 'test_output')
self.assertTrue(output_observable.has_tag('test'))
# there should also be a file observable
file_observable = merge_analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observable), 1)
file_observable = file_observable[0]
with open(os.path.join(root_1.storage_dir, file_observable.value), 'r') as fp:
self.assertEquals(fp.read(), 'test')
# that should have a relationship to a URL observable
self.assertEquals(len(file_observable.relationships), 1)
self.assertEquals(file_observable.relationships[0].r_type, R_DOWNLOADED_FROM)
url_observable = file_observable.relationships[0].target
self.assertTrue(isinstance(url_observable, Observable))
self.assertTrue(url_observable.value, F_URL)
# we also merged an existing observable
# so we should see this observable twice
existing_observable = root_1.get_observable(existing_user_observable.id)
self.assertIsNotNone(existing_observable)
instance_copy = merge_analysis.get_observables_by_type(F_USER)
self.assertEquals(len(instance_copy), 1)
self.assertEquals(instance_copy[0].id, existing_observable.id)
def test_error_reporting(self):
# trigger the failure this way
saq.CONFIG['global']['maximum_cumulative_analysis_fail_time'] = '0'
# remember what was already in the error reporting directory
def _enum_error_reporting():
return set(os.listdir(os.path.join(saq.DATA_DIR, 'error_reports')))
existing_reports = _enum_error_reporting()
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine()
engine.copy_analysis_on_error = True
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# look at what is in the error reporting directory now
# exclude what we found before to find the new stuff
new_reports = _enum_error_reporting() - existing_reports
# we should have a single error report and a single storage directory in the error reporting directory
self.assertEquals(len(new_reports), 2)
# one should be a file and the other a directory
file_path = None
dir_path = None
for _file in new_reports:
path = os.path.join(os.path.join(saq.DATA_DIR, 'error_reports', _file))
if os.path.isfile(path):
file_path = path
if os.path.isdir(path):
dir_path = path
self.assertIsNotNone(file_path)
self.assertIsNotNone(dir_path)
# check that everything we expect to exist in the dir exists
self.assertTrue(os.path.exists(os.path.join(dir_path, 'data.json')))
self.assertTrue(os.path.exists(os.path.join(dir_path, 'saq.log')))
self.assertTrue(os.path.isdir(os.path.join(dir_path, 'stats')))
self.assertTrue(os.path.isdir(os.path.join(dir_path, '.ace')))
# go ahead and remove these since we check for them after running tests to review actual error reports
shutil.rmtree(dir_path)
os.remove(file_path)
def test_stats(self):
# clear engine statistics
if os.path.exists(os.path.join(saq.MODULE_STATS_DIR, 'ace')):
shutil.rmtree(os.path.join(saq.MODULE_STATS_DIR, 'ace'))
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# there should be one subdir in the engine's stats dir
self.assertEquals(len(os.listdir(os.path.join(saq.MODULE_STATS_DIR, 'ace'))), 1)
subdir = os.listdir(os.path.join(saq.MODULE_STATS_DIR, 'ace'))
subdir = subdir[0]
# this should have a single stats file in it
stats_files = os.listdir(os.path.join(os.path.join(saq.MODULE_STATS_DIR, 'ace', subdir)))
self.assertEquals(len(stats_files), 1)
# and it should not be empty
self.assertGreater(os.path.getsize(os.path.join(os.path.join(saq.MODULE_STATS_DIR, 'ace',
subdir, stats_files[0]))), 0)
def test_exclusion(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_6')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
# we should have two that were both excluded in different ways
self.assertEquals(len(analysis.observables), 2)
for new_observable in analysis.observables:
new_observable = analysis.observables[0]
new_analysis = new_observable.get_analysis(BasicTestAnalysis)
self.assertFalse(new_analysis)
def test_limited_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
observable.limit_analysis('basic_test')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_test_delayed_analysis')
engine.enable_module('analysis_module_test_engine_locking')
engine.enable_module('analysis_module_test_final_analysis')
engine.enable_module('analysis_module_test_post_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
# there should only be one analysis performed
self.assertEquals(len(observable.all_analysis), 1)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(len(search_log('analysis for test(test_1) limited to 1 modules (basic_test)')) > 0)
def test_limited_analysis_invalid(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
observable.limit_analysis('basic_tast') # mispelled test
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_test_delayed_analysis')
engine.enable_module('analysis_module_test_engine_locking')
engine.enable_module('analysis_module_test_final_analysis')
engine.enable_module('analysis_module_test_post_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
# there should be no analysis
self.assertEquals(len(observable.all_analysis), 0)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNone(analysis)
self.assertTrue(len(search_log('specified unknown limited analysis')) > 0)
#def test_cleanup(self):
#root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_cleanup')
#root.initialize_storage()
#root.save()
#root.schedule()
#engine = TestEngine()
#engine.controlled_stop()
#engine.start()
#engine.wait()
#self.assertFalse(os.path.isdir(root.storage_dir))
def test_cleanup_alt_workdir(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_cleanup')
root.storage_dir = workload_storage_dir(root.uuid)
root.initialize_storage()
root.save()
root.schedule()
engine = TestEngine()
engine.controlled_stop()
engine.start()
engine.wait()
self.assertFalse(os.path.isdir(root.storage_dir))
def test_no_cleanup(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_empty')
root.initialize_storage()
root.save()
root.schedule()
engine = TestEngine()
engine.controlled_stop()
engine.start()
engine.wait()
self.assertTrue(os.path.isdir(root.storage_dir))
def test_cleanup_with_delayed_analysis(self):
# we are set to cleanup, however, we don't because we have delayed analysis
saq.CONFIG['analysis_mode_test_groups']['cleanup'] = 'yes'
root = | |
tstamp=None, asr=None, acd=None, action=None,):
self.i_connection_quality_stats = i_connection_quality_stats
self.i_connection = i_connection
self.tstamp = tstamp
self.asr = asr
self.acd = acd
self.action = action
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.i_connection_quality_stats = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_connection = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.tstamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.asr = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.acd = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.action = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ConnectionQualityStats')
if self.i_connection_quality_stats is not None:
oprot.writeFieldBegin('i_connection_quality_stats', TType.I64, 1)
oprot.writeI64(self.i_connection_quality_stats)
oprot.writeFieldEnd()
if self.i_connection is not None:
oprot.writeFieldBegin('i_connection', TType.I64, 2)
oprot.writeI64(self.i_connection)
oprot.writeFieldEnd()
if self.tstamp is not None:
oprot.writeFieldBegin('tstamp', TType.I64, 3)
oprot.writeI64(self.tstamp)
oprot.writeFieldEnd()
if self.asr is not None:
oprot.writeFieldBegin('asr', TType.DOUBLE, 4)
oprot.writeDouble(self.asr)
oprot.writeFieldEnd()
if self.acd is not None:
oprot.writeFieldBegin('acd', TType.I32, 5)
oprot.writeI32(self.acd)
oprot.writeFieldEnd()
if self.action is not None:
oprot.writeFieldBegin('action', TType.STRING, 6)
oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RegisterError(TException):
"""
Attributes:
- cause
- i_call
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'cause', None, None, ), # 1
(2, TType.I64, 'i_call', None, None, ), # 2
)
def __init__(self, cause=None, i_call=None,):
self.cause = cause
self.i_call = i_call
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.cause = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.i_call = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RegisterError')
if self.cause is not None:
oprot.writeFieldBegin('cause', TType.I32, 1)
oprot.writeI32(self.cause)
oprot.writeFieldEnd()
if self.i_call is not None:
oprot.writeFieldBegin('i_call', TType.I64, 2)
oprot.writeI64(self.i_call)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TryBackupError(TException):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TryBackupError')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EagainError(TException):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EagainError')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Billables(object):
"""
Attributes:
- free_seconds
- connect_fee
- price_1
- price_n
- interval_1
- interval_n
- post_call_surcharge
- grace_period
- prefix
- decimal_precision
- cost_round_up
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'free_seconds', None, None, ), # 1
(2, TType.DOUBLE, 'connect_fee', None, None, ), # 2
(3, TType.DOUBLE, 'price_1', None, None, ), # 3
(4, TType.DOUBLE, 'price_n', None, None, ), # 4
(5, TType.I32, 'interval_1', None, None, ), # 5
(6, TType.I32, 'interval_n', None, None, ), # 6
(7, TType.DOUBLE, 'post_call_surcharge', None, None, ), # 7
(8, TType.I32, 'grace_period', None, None, ), # 8
(9, TType.STRING, 'prefix', 'UTF8', None, ), # 9
(10, TType.I32, 'decimal_precision', None, None, ), # 10
(11, TType.BOOL, 'cost_round_up', None, None, ), # 11
)
def __init__(self, free_seconds=None, connect_fee=None, price_1=None, price_n=None, interval_1=None, interval_n=None, post_call_surcharge=None, grace_period=None, prefix=None, decimal_precision=None, cost_round_up=None,):
self.free_seconds = free_seconds
self.connect_fee = connect_fee
self.price_1 = price_1
self.price_n = price_n
self.interval_1 = interval_1
self.interval_n = interval_n
self.post_call_surcharge = post_call_surcharge
self.grace_period = grace_period
self.prefix = prefix
self.decimal_precision = decimal_precision
self.cost_round_up = cost_round_up
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec), iprot.string_length_limit, iprot.container_length_limit)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.free_seconds = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.connect_fee = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.price_1 = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.DOUBLE:
self.price_n = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.interval_1 = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.interval_n = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.DOUBLE:
self.post_call_surcharge = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I32:
self.grace_period = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.STRING:
self.prefix = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.decimal_precision = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.cost_round_up = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Billables')
if self.free_seconds is not None:
oprot.writeFieldBegin('free_seconds', TType.I64, 1)
oprot.writeI64(self.free_seconds)
oprot.writeFieldEnd()
if self.connect_fee is not None:
oprot.writeFieldBegin('connect_fee', TType.DOUBLE, 2)
oprot.writeDouble(self.connect_fee)
oprot.writeFieldEnd()
if self.price_1 is not None:
oprot.writeFieldBegin('price_1', TType.DOUBLE, 3)
oprot.writeDouble(self.price_1)
oprot.writeFieldEnd()
if self.price_n is not None:
oprot.writeFieldBegin('price_n', TType.DOUBLE, 4)
oprot.writeDouble(self.price_n)
oprot.writeFieldEnd()
if self.interval_1 is not None:
oprot.writeFieldBegin('interval_1', TType.I32, 5)
oprot.writeI32(self.interval_1)
oprot.writeFieldEnd()
if self.interval_n is not None:
oprot.writeFieldBegin('interval_n', TType.I32, 6)
oprot.writeI32(self.interval_n)
oprot.writeFieldEnd()
if self.post_call_surcharge is not None:
oprot.writeFieldBegin('post_call_surcharge', TType.DOUBLE, 7)
oprot.writeDouble(self.post_call_surcharge)
oprot.writeFieldEnd()
if self.grace_period is not None:
oprot.writeFieldBegin('grace_period', TType.I32, 8)
oprot.writeI32(self.grace_period)
oprot.writeFieldEnd()
if self.prefix is not None:
oprot.writeFieldBegin('prefix', TType.STRING, 9)
oprot.writeString(self.prefix.encode('utf-8') if sys.version_info[0] == 2 else self.prefix)
oprot.writeFieldEnd()
if self.decimal_precision is not None:
oprot.writeFieldBegin('decimal_precision', TType.I32, 10)
oprot.writeI32(self.decimal_precision)
oprot.writeFieldEnd()
if self.cost_round_up is not None:
oprot.writeFieldBegin('cost_round_up', TType.BOOL, 11)
oprot.writeBool(self.cost_round_up)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AccountBillables(object):
"""
Attributes:
- bparams
- area_name
- i_commission_agent
- commission_size
- i_wholesaler
- fresh_balance
- plan_only
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'bparams', | |
center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter == False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 15 * 15
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) == True
assert bb.is_fully_within_image((20, 100, 3)) == False
assert bb.is_fully_within_image((100, 30, 3)) == False
assert bb.is_fully_within_image((1, 1, 3)) == False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) == True
assert bb.is_partly_within_image((20, 100, 3)) == True
assert bb.is_partly_within_image((100, 30, 3)) == True
assert bb.is_partly_within_image((1, 1, 3)) == False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) == False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) == False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) == True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) == False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) == True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) == True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) == True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) == False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
| |
<filename>malloovia/lpsolver.py<gh_stars>1-10
# coding: utf-8
# import pandas as pd
"""Malloovia interface to LP solver"""
from typing import Sequence, List, Any
from itertools import product as cartesian_product
from inspect import ismethod
from collections import namedtuple
from uuid import uuid4
import os
import pulp # type: ignore
from pulp import (
LpContinuous,
LpInteger,
LpVariable,
lpSum,
LpProblem,
LpMinimize,
LpMaximize,
PulpSolverError,
COIN_CMD,
log,
subprocess,
)
from .solution_model import (
MallooviaHistogram,
ReservedAllocation,
AllocationInfo,
Status,
pulp_to_malloovia_status,
)
from .model import System, Workload, App, TimeUnit
LpProblem.bestBound = None # Add new attribute to pulp problems
class MallooviaLp:
"""Solves the allocation problem, using Linear Programming.
This class contains methods to create a linear programming problem
(using PuLP), to add restrictions and extra variables to it,
to solve it (using PuLP supported solvers), and to retrieve
the solution in a format amenable to further analysis and display.
The LP problem instantiates these variables:
- For reserved instances: ``Y_(_a,_ic)``, where ``Y`` is a fixed prefix,
``a`` is a string representation of each application and ``ic`` is the string
representation of each reserved instance class considered.
After solving the LP problem, the value of the variable is the number of
reserved machines of instance class `ic` for application `a`, for the whole
reservation period.
- For on-demand instances: ``X_(_a,_ic,_l)``, where ``X`` is a fixed prefix,
``a`` is a string representation of each application, ``ic`` is the string
representation of each on-demand instance class considered and ``l`` is a
string representation of a "workload tuple", which is a tuple of numbers,
e.g: ``(1230, 442, 123)``, each one representing the workload of one of the apps.
After solving the LP problem, the value of the variable is the number of
on-demand machines of instance class `ic` deployed for application `a` at a
timeslot which has a workload prediction equal to the tuple ``l``.
Intended usage:
1. Instantiate the class (see constructor parameters below).
2. Call object's ``.create_problem()``.
3. Call object's ``.solve()``.
4. Retrieve solution by calling object's ``.get_allocation()`` to get the solution
for all variables, or ``.get_reserved_allocation()`` to get ony the number of
reserved instances of each type.
5. Retrieve the cost of the solution via object's ``.get_solution()``.
You can use object's property ``pulp_problem`` to access the PuLP problem object
which represents the linear programming problem, to inspect or save it if required.
"""
def __init__(
self,
system: System,
workloads: Sequence[Workload],
preallocation: ReservedAllocation = None,
relaxed: bool = False,
) -> None:
"""Constructor:
Args:
system: namedtuple containing "name", "apps", "instance_classes"
and "performances" for the problem to solve.
workloads: list of workloads, one per app. Each workload
is a namedtuple which contains a reference to the app, and a sequence
of N numbers which is the prediction for the next N timeslots. This
sequence must have the same length for all workloads in the list.
preallocation: number of reserved instances which are
preallocated. In phase I this parameter can be omitted (defaults to ``None``),
and in phase II it should contain the object returned by
``get_reserved_allocation()`` after solving phase I.
relaxed: if ``True``, the problem uses continuous variables
instead of integer ones.
"""
self.system = system
# Ensure that the workloads received are ordered by the field app in the same
# ordering than the list system.apps
self.workloads = reorder_workloads(workloads, system.apps)
if preallocation is None:
self.fixed_vms = None
else:
assert len(preallocation.instance_classes) == len(
preallocation.vms_number
), (
"preallocation is wrong, the number of elements in instance_classes and in "
"vms_number must be the same"
)
self.fixed_vms = dict(
zip(preallocation.instance_classes, preallocation.vms_number)
)
self.relaxed = relaxed
self.pulp_problem: Any = None
self.load_hist = get_load_hist_from_load(self.workloads)
self.solver_called = False
# CookedData stores some info required when building the problem, so that
# this data is gathered only once, during __init__, and used when required
CookedData = namedtuple( # pylint: disable=invalid-name
"CookedData",
[
"map_dem",
"map_res",
"instances_res",
"instances_dem",
"limiting_sets",
"instance_prices",
"instance_perfs",
],
)
# Separate the instances in two types: reserved and on-demand
# Also create dictionaries for fast lookup of price and performance, converted
# to the timeslot units
instances_res = []
instances_dem = []
instance_prices = {}
instance_perfs = {}
timeslot_length = self.workloads[0].time_unit
for iclass in system.instance_classes:
instance_prices[iclass] = iclass.price / TimeUnit(iclass.time_unit).to(
timeslot_length
)
for app in self.system.apps:
instance_perfs[iclass, app] = self.system.performances.values[
iclass, app
] / TimeUnit(self.system.performances.time_unit).to(timeslot_length)
if iclass.is_reserved:
instances_res.append(iclass)
else:
instances_dem.append(iclass)
# Compute the set of LimitingSets (clouds), extracted
# from the instances
limiting_sets = set()
for iclass in system.instance_classes:
limiting_sets.update(iclass.limiting_sets)
# Store cooked data
self.cooked = CookedData(
map_dem=None, # To be updated later by create_variables
map_res=None,
instances_res=instances_res,
instances_dem=instances_dem,
instance_prices=instance_prices,
instance_perfs=instance_perfs,
limiting_sets=limiting_sets,
)
def _create_variables(self) -> None:
"""Creates the set of variables Y* and X* of the PuLP problem.
Override it if you need to create extra variables (first use
``super().create_variables()`` to call the base class method)."""
if self.relaxed:
kind = LpContinuous
else:
kind = LpInteger
# List all combinations of apps and instances and workloads
comb_res = cartesian_product(self.system.apps, self.cooked.instances_res)
comb_dem = cartesian_product(
self.system.apps, self.cooked.instances_dem, self.load_hist.keys()
)
map_res = LpVariable.dicts("Y", comb_res, 0, None, kind)
map_dem = LpVariable.dicts("X", comb_dem, 0, None, kind)
self.cooked = self.cooked._replace(map_res=map_res, map_dem=map_dem)
def _cost_function(self) -> None:
"""Adds to the LP problem the function to optimize.
The function to optimize is the cost of the deployment. It is computed as
the sum of all Y_a_ic multiplied by the length of the period and by the price/timeslot
of each reserved instance class plus all X_a_ic_l multiplied by the price/timeslot
of each on-demand instance class and by the number of times that workload ``l``
appears in the period."""
period_length = sum(self.load_hist.values())
self.pulp_problem += (
lpSum(
[
self.cooked.map_res[_a, _ic]
* self.cooked.instance_prices[_ic]
* period_length
for _a in self.system.apps
for _ic in self.cooked.instances_res
]
+ [
self.cooked.map_dem[_a, _ic, _l]
* self.cooked.instance_prices[_ic]
* self.load_hist[_l]
for _a in self.system.apps
for _ic in self.cooked.instances_dem
for _l in self.load_hist.keys()
]
),
"Objective: minimize cost",
)
def create_problem(self) -> "MallooviaLp":
"""Creates the PuLP problem with all variables and restrictions.
Returns:
pulp.LpProblem: instance of the PuLP problem.
"""
# Create the linear programming problem
self.pulp_problem = LpProblem(self.system.name, LpMinimize)
# Once we have the variables represented as tuples, we use
# the tuples to create the linear programming variables for pulp
self._create_variables()
# Create the goal function
self._cost_function()
# Add all restrictions indicated with functions *_restriction
# in this class
self._add_all_restrictions()
return self
def _add_all_restrictions(self) -> None:
"""This functions uses introspection to discover all implemented
methods whose name ends with ``_restriction``, and runs them all."""
for name in dir(self):
attribute = getattr(self, name)
if ismethod(attribute) and name.endswith("_restriction"):
attribute()
def performance_restriction(self) -> None:
"""Adds performance restriction to the problem.
This restriction forces, for each workload tuple, the performance of the
solution to be greater than or equal to that workload level for
all applications.
"""
for i, app in enumerate(self.system.apps):
perf_reserved = []
for ins in self.cooked.instances_res:
perf_reserved.append(
self.cooked.map_res[app, ins] * self.cooked.instance_perfs[ins, app]
)
for load in self.load_hist.keys():
perf_ondemand = []
for ins in self.cooked.instances_dem:
perf_ondemand.append(
self.cooked.map_dem[app, ins, load]
* self.cooked.instance_perfs[ins, app]
)
self.pulp_problem += (
lpSum(perf_reserved + perf_ondemand) >= load[i],
"Minimum performance for application {} "
"when workload is {}".format(app, load),
)
return
def limit_instances_per_class_restriction(
self
) -> None: # pylint: disable=invalid-name
"""Adds ``max_vms`` per instance class restriction.
If the ``ic`` instance has a ``max_vms`` attribute, this is a limit for all
``Y_*_ic`` and ``X_*_ic_*`` variables."""
for ins in self.system.instance_classes:
if ins.max_vms == 0:
continue # No limit for this instance class
if ins.is_reserved:
self.pulp_problem += (
lpSum(self.cooked.map_res[app, ins] for app in self.system.apps)
<= ins.max_vms,
"Max instances reserved " "instance class {}".format(ins),
)
else:
for load in self.load_hist.keys():
self.pulp_problem += (
lpSum(
self.cooked.map_dem[app, ins, load]
for app in self.system.apps
)
<= ins.max_vms,
"Max instances for on-demand instance "
"class {} when workload is {}".format(ins, load),
)
def set_fixed_instances_restriction(self) -> None:
"""Adds restrictions for variables with pre-fixed values.
For every ``ic`` in ``self.fixed_vms`` a restriction is
added which forces the total number of those instance classes in
the solution to be at equal to a given value | |
<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/certificates/tests/test_api.py
"""Tests for the certificates Python API. """
import uuid
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import patch
import ddt
import pytz
from config_models.models import cache
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from edx_toggles.toggles.testutils import override_waffle_switch
from freezegun import freeze_time
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from testfixtures import LogCapture
from xmodule.data import CertificatesDisplayBehaviors
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.tests.factories import (
CourseEnrollmentFactory,
GlobalStaffFactory,
UserFactory
)
from common.djangoapps.util.testing import EventTestMixin
from lms.djangoapps.certificates.api import (
auto_certificate_generation_enabled,
available_date_for_certificate,
can_be_added_to_allowlist,
can_show_certificate_available_date_field,
can_show_certificate_message,
certificate_status_for_student,
certificate_downloadable_status,
create_certificate_invalidation_entry,
create_or_update_certificate_allowlist_entry,
display_date_for_certificate,
generate_certificate_task,
get_allowlist_entry,
get_allowlisted_users,
get_certificate_footer_context,
get_certificate_for_user,
get_certificate_for_user_id,
get_certificate_header_context,
get_certificate_invalidation_entry,
get_certificate_url,
get_certificates_for_user,
get_certificates_for_user_by_course_keys,
has_self_generated_certificates_enabled,
is_certificate_invalidated,
is_on_allowlist,
remove_allowlist_entry,
set_cert_generation_enabled,
)
from lms.djangoapps.certificates.config import AUTO_CERTIFICATE_GENERATION
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateStatuses,
GeneratedCertificate,
)
from lms.djangoapps.certificates.tests.factories import (
CertificateAllowlistFactory,
GeneratedCertificateFactory,
CertificateInvalidationFactory
)
from lms.djangoapps.certificates.tests.test_generation_handler import ID_VERIFIED_METHOD, PASSING_GRADE_METHOD
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangoapps.site_configuration.tests.test_util import with_site_configuration
CAN_GENERATE_METHOD = 'lms.djangoapps.certificates.generation_handler._can_generate_regular_certificate'
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
class WebCertificateTestMixin:
"""
Mixin with helpers for testing Web Certificates.
"""
def _setup_course_certificate(self):
"""
Creates certificate configuration for course
"""
certificates = [
{
'id': 1,
'name': 'Test Certificate Name',
'description': 'Test Certificate Description',
'course_title': 'tes_course_title',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
@ddt.ddt
class CertificateDownloadableStatusTests(WebCertificateTestMixin, ModuleStoreTestCase):
"""Tests for the `certificate_downloadable_status` helper function. """
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super().setUp()
self.student = UserFactory()
self.student_no_cert = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course',
end=datetime.now(pytz.UTC),
self_paced=False,
certificate_available_date=datetime.now(pytz.UTC) - timedelta(days=2)
)
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified'
)
self.request_factory = RequestFactory()
def test_cert_status_with_generating(self):
cert_user = UserFactory()
GeneratedCertificateFactory.create(
user=cert_user,
course_id=self.course.id,
status=CertificateStatuses.generating,
mode='verified'
)
assert certificate_downloadable_status(cert_user, self.course.id) ==\
{'is_downloadable': False,
'is_generating': True,
'is_unverified': False,
'download_url': None,
'uuid': None}
def test_cert_status_with_error(self):
cert_user = UserFactory()
GeneratedCertificateFactory.create(
user=cert_user,
course_id=self.course.id,
status=CertificateStatuses.error,
mode='verified'
)
assert certificate_downloadable_status(cert_user, self.course.id) ==\
{'is_downloadable': False,
'is_generating': True,
'is_unverified': False,
'download_url': None,
'uuid': None}
def test_without_cert(self):
assert certificate_downloadable_status(self.student_no_cert, self.course.id) ==\
{'is_downloadable': False,
'is_generating': False,
'is_unverified': False,
'download_url': None,
'uuid': None}
def verify_downloadable_pdf_cert(self):
"""
Verifies certificate_downloadable_status returns the
correct response for PDF certificates.
"""
cert_user = UserFactory()
cert = GeneratedCertificateFactory.create(
user=cert_user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url='www.google.com',
)
assert certificate_downloadable_status(cert_user, self.course.id) ==\
{'is_downloadable': True,
'is_generating': False,
'is_unverified': False,
'download_url': 'www.google.com',
'is_pdf_certificate': True,
'uuid': cert.verify_uuid}
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_pdf_cert_with_html_enabled(self):
self.verify_downloadable_pdf_cert()
def test_pdf_cert_with_html_disabled(self):
self.verify_downloadable_pdf_cert()
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_with_downloadable_web_cert(self):
cert_status = certificate_status_for_student(self.student, self.course.id)
assert certificate_downloadable_status(self.student, self.course.id) ==\
{'is_downloadable': True,
'is_generating': False,
'is_unverified': False,
'download_url': f'/certificates/{cert_status["uuid"]}',
'is_pdf_certificate': False,
'uuid': cert_status['uuid']}
@ddt.data(
(False, timedelta(days=2), False, True),
(False, -timedelta(days=2), True, None),
(True, timedelta(days=2), True, None)
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
@patch.dict(settings.FEATURES, {'ENABLE_V2_CERT_DISPLAY_SETTINGS': False})
def test_cert_api_return_v1(self, self_paced, cert_avail_delta, cert_downloadable_status, earned_but_not_available):
"""
Test 'downloadable status'
"""
cert_avail_date = datetime.now(pytz.UTC) + cert_avail_delta
self.course.self_paced = self_paced
self.course.certificate_available_date = cert_avail_date
self.course.save()
self._setup_course_certificate()
downloadable_status = certificate_downloadable_status(self.student, self.course.id)
assert downloadable_status['is_downloadable'] == cert_downloadable_status
assert downloadable_status.get('earned_but_not_available') == earned_but_not_available
@ddt.data(
(True, timedelta(days=2), CertificatesDisplayBehaviors.END_WITH_DATE, True, None),
(False, -timedelta(days=2), CertificatesDisplayBehaviors.EARLY_NO_INFO, True, None),
(False, timedelta(days=2), CertificatesDisplayBehaviors.EARLY_NO_INFO, True, None),
(False, -timedelta(days=2), CertificatesDisplayBehaviors.END, True, None),
(False, timedelta(days=2), CertificatesDisplayBehaviors.END, False, True),
(False, -timedelta(days=2), CertificatesDisplayBehaviors.END_WITH_DATE, True, None),
(False, timedelta(days=2), CertificatesDisplayBehaviors.END_WITH_DATE, False, True),
)
@ddt.unpack
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
@patch.dict(settings.FEATURES, {'ENABLE_V2_CERT_DISPLAY_SETTINGS': True})
def test_cert_api_return_v2(
self,
self_paced,
cert_avail_delta,
certificates_display_behavior,
cert_downloadable_status,
earned_but_not_available
):
"""
Test 'downloadable status'
"""
cert_avail_date = datetime.now(pytz.UTC) + cert_avail_delta
self.course.self_paced = self_paced
self.course.certificate_available_date = cert_avail_date
self.course.certificates_display_behavior = certificates_display_behavior
self.course.save()
self._setup_course_certificate()
downloadable_status = certificate_downloadable_status(self.student, self.course.id)
assert downloadable_status['is_downloadable'] == cert_downloadable_status
assert downloadable_status.get('earned_but_not_available') == earned_but_not_available
@ddt.ddt
class CertificateIsInvalid(WebCertificateTestMixin, ModuleStoreTestCase):
"""Tests for the `is_certificate_invalid` helper function. """
def setUp(self):
super().setUp()
self.student = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course'
)
self.course_overview = CourseOverviewFactory.create(
id=self.course.id
)
self.global_staff = GlobalStaffFactory()
self.request_factory = RequestFactory()
def test_method_with_no_certificate(self):
""" Test the case when there is no certificate for a user for a specific course. """
course = CourseFactory.create(
org='edx',
number='honor',
display_name='Course 1'
)
# Also check query count for 'is_certificate_invalid' method.
with self.assertNumQueries(1):
assert not is_certificate_invalidated(self.student, course.id)
@ddt.data(
CertificateStatuses.generating,
CertificateStatuses.downloadable,
CertificateStatuses.notpassing,
CertificateStatuses.error,
CertificateStatuses.unverified,
CertificateStatuses.deleted,
CertificateStatuses.unavailable,
)
def test_method_with_invalidated_cert(self, status):
""" Verify that if certificate is marked as invalid than method will return
True. """
generated_cert = self._generate_cert(status)
self._invalidate_certificate(generated_cert, True)
assert is_certificate_invalidated(self.student, self.course.id)
@ddt.data(
CertificateStatuses.generating,
CertificateStatuses.downloadable,
CertificateStatuses.notpassing,
CertificateStatuses.error,
CertificateStatuses.unverified,
CertificateStatuses.deleted,
CertificateStatuses.unavailable,
)
def test_method_with_inactive_invalidated_cert(self, status):
""" Verify that if certificate is valid but it's invalidated status is
false than method will return false. """
generated_cert = self._generate_cert(status)
self._invalidate_certificate(generated_cert, False)
assert not is_certificate_invalidated(self.student, self.course.id)
@ddt.data(
CertificateStatuses.generating,
CertificateStatuses.downloadable,
CertificateStatuses.notpassing,
CertificateStatuses.error,
CertificateStatuses.unverified,
CertificateStatuses.deleted,
CertificateStatuses.unavailable,
)
def test_method_with_all_statues(self, status):
""" Verify method return True if certificate has valid status but it is
marked as invalid in CertificateInvalidation table. """
certificate = self._generate_cert(status)
CertificateInvalidationFactory.create(
generated_certificate=certificate,
invalidated_by=self.global_staff,
active=True
)
# Also check query count for 'is_certificate_invalid' method.
with self.assertNumQueries(2):
assert is_certificate_invalidated(self.student, self.course.id)
def _invalidate_certificate(self, certificate, active):
""" Dry method to mark certificate as invalid. """
CertificateInvalidationFactory.create(
generated_certificate=certificate,
invalidated_by=self.global_staff,
active=active
)
# Invalidate user certificate
certificate.invalidate()
assert not certificate.is_valid()
def _generate_cert(self, status):
""" Dry method to generate certificate. """
return GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=status,
mode='verified'
)
class CertificateGetTests(SharedModuleStoreTestCase):
"""Tests for the `test_get_certificate_for_user` helper function. """
now = timezone.now()
@classmethod
def setUpClass(cls):
cls.freezer = freeze_time(cls.now)
cls.freezer.start()
super().setUpClass()
cls.student = UserFactory()
cls.student_no_cert = UserFactory()
cls.uuid = uuid.uuid4().hex
cls.nonexistent_course_id = CourseKey.from_string('course-v1:some+fake+course')
cls.web_cert_course = CourseFactory.create(
org='edx',
number='verified_1',
display_name='Verified Course 1',
cert_html_view_enabled=True
)
cls.pdf_cert_course = CourseFactory.create(
org='edx',
number='verified_2',
display_name='Verified Course 2',
cert_html_view_enabled=False
)
cls.no_cert_course = CourseFactory.create(
org='edx',
number='verified_3',
display_name='Verified Course 3',
)
# certificate for the first course
GeneratedCertificateFactory.create(
user=cls.student,
course_id=cls.web_cert_course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url='www.google.com',
grade="0.88",
verify_uuid=cls.uuid,
)
# certificate for the second course
GeneratedCertificateFactory.create(
user=cls.student,
course_id=cls.pdf_cert_course.id,
status=CertificateStatuses.downloadable,
mode='honor',
download_url='www.gmail.com',
grade="0.99",
verify_uuid=cls.uuid,
)
# certificate for a course that will be deleted
GeneratedCertificateFactory.create(
user=cls.student,
course_id=cls.nonexistent_course_id,
status=CertificateStatuses.downloadable
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.freezer.stop()
def test_get_certificate_for_user(self):
"""
Test to get a certificate for a user for a specific course.
"""
cert = get_certificate_for_user(self.student.username, self.web_cert_course.id)
assert cert['username'] == self.student.username
assert cert['course_key'] == self.web_cert_course.id
assert cert['created'] == self.now
assert cert['type'] == CourseMode.VERIFIED
assert cert['status'] == CertificateStatuses.downloadable
assert cert['grade'] == '0.88'
assert cert['is_passing'] is True
assert cert['download_url'] == 'www.google.com'
def test_get_certificate_for_user_id(self):
"""
Test to get a certificate for a user id for a specific course.
"""
cert = get_certificate_for_user_id(self.student, self.web_cert_course.id)
assert cert is not None
assert cert.course_id == self.web_cert_course.id
assert cert.mode == CourseMode.VERIFIED
assert cert.status == CertificateStatuses.downloadable
assert cert.grade == '0.88'
def test_get_certificates_for_user(self):
"""
Test to get all the certificates for a user
"""
certs = get_certificates_for_user(self.student.username)
assert len(certs) == 2
assert certs[0]['username'] == self.student.username
assert certs[1]['username'] == self.student.username
assert certs[0]['course_key'] == self.web_cert_course.id
assert certs[1]['course_key'] == self.pdf_cert_course.id
assert certs[0]['created'] == self.now
assert certs[1]['created'] == self.now
assert certs[0]['type'] == CourseMode.VERIFIED
assert certs[1]['type'] == CourseMode.HONOR
assert certs[0]['status'] == CertificateStatuses.downloadable
assert certs[1]['status'] == CertificateStatuses.downloadable
assert certs[0]['is_passing'] is True
assert certs[1]['is_passing'] is True
assert certs[0]['grade'] == '0.88'
assert certs[1]['grade'] == '0.99'
assert certs[0]['download_url'] == 'www.google.com'
assert certs[1]['download_url'] == 'www.gmail.com'
def test_get_certificates_for_user_by_course_keys(self):
"""
Test to get certificates for a user for certain course keys,
in a dictionary indexed by those course keys.
"""
certs = get_certificates_for_user_by_course_keys(
user=self.student,
course_keys={self.web_cert_course.id, self.no_cert_course.id},
)
assert set(certs.keys()) == {self.web_cert_course.id}
cert = certs[self.web_cert_course.id]
assert cert['username'] == self.student.username
assert cert['course_key'] == self.web_cert_course.id
assert cert['download_url'] == 'www.google.com'
def test_no_certificate_for_user(self):
"""
Test the case when there is no certificate for a user for a specific course.
"""
assert get_certificate_for_user(self.student_no_cert.username, self.web_cert_course.id) is None
def test_no_certificates_for_user(self):
"""
Test the case when there are no certificates for a user.
"""
assert get_certificates_for_user(self.student_no_cert.username) == []
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_get_web_certificate_url(self):
"""
Test the get_certificate_url with a web cert course
"""
expected_url = reverse(
'certificates:render_cert_by_uuid',
kwargs=dict(certificate_uuid=self.uuid)
)
cert_url = get_certificate_url(
user_id=self.student.id,
course_id=self.web_cert_course.id,
uuid=self.uuid
)
assert expected_url == cert_url
expected_url = reverse(
'certificates:render_cert_by_uuid',
kwargs=dict(certificate_uuid=self.uuid)
)
cert_url = get_certificate_url(
user_id=self.student.id,
course_id=self.web_cert_course.id,
uuid=self.uuid
)
assert expected_url == cert_url
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_get_pdf_certificate_url(self):
"""
Test the get_certificate_url with a pdf cert course
"""
cert_url = get_certificate_url(
user_id=self.student.id,
course_id=self.pdf_cert_course.id,
uuid=self.uuid
)
assert 'www.gmail.com' == cert_url
def test_get_certificate_with_deleted_course(self):
"""
Test the case when there is a certificate but the course was deleted.
"""
assert get_certificate_for_user(self.student.username, self.nonexistent_course_id) is None
class GenerateUserCertificatesTest(ModuleStoreTestCase):
"""Tests for generating certificates for students. """
def setUp(self):
super().setUp()
self.user = UserFactory()
self.course_run = CourseFactory()
self.course_run_key = self.course_run.id # pylint: disable=no-member
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course_run_key,
is_active=True,
mode=CourseMode.VERIFIED,
)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False})
def test_cert_url_empty_with_invalid_certificate(self):
| |
place where a Violation
# can be raised: it is caught and passed cleanly to the parent. If
# it happens anywhere else, or if any other exception is raised, the
# connection will be dropped.
# the downside to this approach is that .slice happens before
# .registerReference, so any late-validation being done in .slice
# will not be able to detect the fact that this object has already
# begun serialization. Validation performed in .next is ok.
# also note that if .slice is a generator, any exception it raises
# will not occur until .next is called, which happens *after* the
# slicer has been pushed. This check is only useful for .slice
# methods which are *not* generators.
itr = slicer.slice(topSlicer.streamable, self)
next = iter(itr).next
# we are now committed to sending the OPEN token, meaning that
# failures after this point will cause an ABORT/CLOSE to be sent
openID = None
if slicer.sendOpen:
openID = self.sendOpen()
if slicer.trackReferences:
topSlicer.registerReference(openID, obj)
# note that the only reason to hold on to the openID here is for
# the debug/optional copy in the CLOSE token. Consider ripping
# this code out if we decide to stop sending that copy.
slicertuple = (slicer, next, openID)
self.slicerStack.append(slicertuple)
def popSlicer(self):
slicer, next, openID = self.slicerStack.pop()
if openID is not None:
self.sendClose(openID)
if self.debugSend: print "pop", slicer
def describeSend(self):
where = []
for i in self.slicerStack:
try:
piece = i[0].describe()
except:
log.msg("Banana.describeSend")
log.err()
piece = "???"
where.append(piece)
return ".".join(where)
def setOutgoingVocabulary(self, vocabStrings):
"""Schedule a replacement of the outbound VOCAB table.
Higher-level code may call this at any time with a list of strings.
Immediately after the replacement has occured, the outbound VOCAB
table will contain all of the strings in vocabStrings and nothing
else. This table tells the token-sending code which strings to
abbreviate with short integers in a VOCAB token.
This function can be called at any time (even while the protocol is
in the middle of serializing and transmitting some other object)
because it merely schedules a replacement to occur at some point in
the future. A special marker (the ReplaceVocabSlicer) is placed in
the outbound queue, and the table replacement will only happend after
all the items ahead of that marker have been serialized. At the same
time the table is replaced, a (set-vocab..) sequence will be
serialized towards the far end. This insures that we set our outbound
table at the same 'time' as the far end starts using it.
"""
# build a VOCAB message, send it, then set our outgoingVocabulary
# dictionary to start using the new table
assert isinstance(vocabStrings, (list, tuple))
for s in vocabStrings:
assert isinstance(s, str)
vocabDict = dict(zip(vocabStrings, range(len(vocabStrings))))
s = ReplaceVocabSlicer(vocabDict)
# the ReplaceVocabSlicer does some magic to insure the VOCAB message
# does not use vocab tokens itself. This would be legal (sort of a
# differential compression), but confusing. It accomplishes this by
# clearing our self.outgoingVocabulary dict when it begins to be
# serialized.
self.send(s)
# likewise, when it finishes, the ReplaceVocabSlicer replaces our
# self.outgoingVocabulary dict when it has finished sending the
# strings. It is important that this occur in the serialization code,
# or somewhen very close to it, because otherwise there could be a
# race condition that could result in some strings being vocabized
# with the wrong keys.
def addToOutgoingVocabulary(self, value):
"""Schedule 'value' for addition to the outbound VOCAB table.
This may be called at any time. If the string is already scheduled
for addition, or if it is already in the VOCAB table, it will be
ignored. (TODO: does this introduce an annoying-but-not-fatal race
condition?) The string will not actually be added to the table until
the outbound serialization queue has been serviced.
"""
assert isinstance(value, str)
if value in self.outgoingVocabulary:
return
if value in self.pendingVocabAdditions:
return
self.pendingVocabAdditions.add(str)
s = AddVocabSlicer(value)
self.send(s)
def outgoingVocabTableWasReplaced(self, newTable):
# this is called by the ReplaceVocabSlicer to manipulate our table.
# It must certainly *not* be called by higher-level user code.
self.outgoingVocabulary = newTable
if newTable:
maxIndex = max(newTable.values()) + 1
self.nextAvailableOutgoingVocabularyIndex = maxIndex
else:
self.nextAvailableOutgoingVocabularyIndex = 0
def allocateEntryInOutgoingVocabTable(self, string):
assert string not in self.outgoingVocabulary
# TODO: a softer failure more for this assert is to re-send the
# existing key. To make sure that really happens, though, we have to
# remove it from the vocab table, otherwise we'll tokenize the
# string. If we can insure that, then this failure mode would waste
# time and network but would otherwise be harmless.
#
# return self.outgoingVocabulary[string]
self.pendingVocabAdditions.remove(self.value)
index = self.nextAvailableOutgoingVocabularyIndex
self.nextAvailableOutgoingVocabularyIndex = index + 1
return index
def outgoingVocabTableWasAmended(self, index, string):
self.outgoingVocabulary[string] = index
# these methods define how we emit low-level tokens
def sendPING(self, number=0):
if number:
int2b128(number, self.transport.write)
self.transport.write(PING)
def sendPONG(self, number):
if number:
int2b128(number, self.transport.write)
self.transport.write(PONG)
def sendOpen(self):
openID = self.openCount
self.openCount += 1
int2b128(openID, self.transport.write)
self.transport.write(OPEN)
return openID
def sendToken(self, obj):
write = self.transport.write
if isinstance(obj, (int, long)):
if obj >= 2**31:
s = long_to_bytes(obj)
int2b128(len(s), write)
write(LONGINT)
write(s)
elif obj >= 0:
int2b128(obj, write)
write(INT)
elif -obj > 2**31: # NEG is [-2**31, 0)
s = long_to_bytes(-obj)
int2b128(len(s), write)
write(LONGNEG)
write(s)
else:
int2b128(-obj, write)
write(NEG)
elif isinstance(obj, float):
write(FLOAT)
write(struct.pack("!d", obj))
elif isinstance(obj, str):
if self.outgoingVocabulary.has_key(obj):
symbolID = self.outgoingVocabulary[obj]
int2b128(symbolID, write)
write(VOCAB)
else:
self.maybeVocabizeString(obj)
int2b128(len(obj), write)
write(STRING)
write(obj)
else:
raise BananaError, "could not send object: %s" % repr(obj)
def maybeVocabizeString(self, string):
# TODO: keep track of the last 30 strings we've send in full. If this
# string appears more than 3 times on that list, create a vocab item
# for it. Make sure we don't start using the vocab number until the
# ADDVOCAB token has been queued.
if False:
self.addToOutgoingVocabulary(string)
def sendClose(self, openID):
int2b128(openID, self.transport.write)
self.transport.write(CLOSE)
def sendAbort(self, count=0):
int2b128(count, self.transport.write)
self.transport.write(ABORT)
def sendError(self, msg):
if not self.transport:
return
if len(msg) > SIZE_LIMIT:
msg = msg[:SIZE_LIMIT-10] + "..."
int2b128(len(msg), self.transport.write)
self.transport.write(ERROR)
self.transport.write(msg)
# now you should drop the connection
self.transport.loseConnection()
def sendFailed(self, f):
# call this if an exception is raised in transmission. The Failure
# will be logged and the connection will be dropped. This is
# suitable for use as an errback handler.
print "SendBanana.sendFailed:", f
log.msg("Sendfailed.sendfailed")
log.err(f)
try:
if self.transport:
self.transport.loseConnection()
except:
print "exception during transport.loseConnection"
log.err()
try:
self.rootSlicer.connectionLost(f)
except:
print "exception during rootSlicer.connectionLost"
log.err()
### ReceiveBanana
# called with dataReceived()
# calls self.receivedObject()
unslicerClass = RootUnslicer
debugReceive = False
logViolations = False
logReceiveErrors = True
useKeepalives = False
keepaliveTimeout = None
keepaliveTimer = None
disconnectTimeout = None
disconnectTimer = None
def initReceive(self):
self.inOpen = False # set during the Index Phase of an OPEN sequence
self.opentype = [] # accumulates Index Tokens
# to pre-negotiate, set the negotiation parameters and set
# self.negotiated to True. It might instead make sense to fill
# self.buffer with the inbound negotiation block.
self.negotiated = False
self.connectionAbandoned = False
self.buffer = stringchain.StringChain()
self.incomingVocabulary = {}
self.skipBytes = 0 # used to discard a single long token
self.discardCount = 0 # used to discard non-primitive objects
self.exploded = None # last-ditch error catcher
def initUnslicer(self):
self.rootUnslicer = self.unslicerClass(self)
self.receiveStack = [self.rootUnslicer]
self.objectCounter = 0
self.objects = {}
def printStack(self, verbose=0):
print "STACK:"
for s in self.receiveStack:
if verbose:
d = s.__dict__.copy()
del d['protocol']
print " %s: %s" % (s, d)
else:
print " %s" % s
def setObject(self, count, obj):
for i in range(len(self.receiveStack)-1, -1, -1):
self.receiveStack[i].setObject(count, obj)
def getObject(self, count):
for i in range(len(self.receiveStack)-1, -1, -1):
obj = self.receiveStack[i].getObject(count)
if obj is not None:
return obj
raise ValueError, "dangling reference '%d'" % count
def replaceIncomingVocabulary(self, vocabDict):
# maps small integer to string, should be called in response to a
# OPEN(set-vocab) sequence.
self.incomingVocabulary = vocabDict
def addIncomingVocabulary(self, key, value):
# called in response | |
pilot; yet, wert thou as far
As that vast shore wash'd with the farthest sea,
I would adventure for such merchandise.
Jul. Thou knowest the mask of night is on my face;
Else would a maiden blush bepaint my cheek
For that which thou hast heard me speak to-night.
Fain would I dwell on form- fain, fain deny
What I have spoke; but farewell compliment!
Dost thou love me, I know thou wilt say 'Ay';
And I will take thy word. Yet, if thou swear'st,
Thou mayst prove false. At lovers' perjuries,
They say Jove laughs. O gentle Romeo,
If thou dost love, pronounce it faithfully.
Or if thou thinkest I am too quickly won,
I'll frown, and be perverse, and say thee nay,
So thou wilt woo; but else, not for the world.
In truth, fair Montague, I am too fond,
And therefore thou mayst think my haviour light;
But trust me, gentleman, I'll prove more true
Than those that have more cunning to be strange.
I should have been more strange, I must confess,
But that thou overheard'st, ere I was ware,
My true-love passion. Therefore pardon me,
And not impute this yielding to light love,
Which the dark night hath so discovered.
Rom. Lady, by yonder blessed moon I swear,
That tips with silver all these fruit-tree tops-
Jul. O, swear not by the moon, th' inconstant moon,
That monthly changes in her circled orb,
Lest that thy love prove likewise variable.
Rom. What shall I swear by?
Jul. Do not swear at all;
Or if thou wilt, swear by thy gracious self,
Which is the god of my idolatry,
And I'll believe thee.
Rom. If my heart's dear love-
Jul. Well, do not swear. Although I joy in thee,
I have no joy of this contract to-night.
It is too rash, too unadvis'd, too sudden;
Too like the lightning, which doth cease to be
Ere one can say 'It lightens.' Sweet, good night!
This bud of love, by summer's ripening breath,
May prove a beauteous flow'r when next we meet.
Good night, good night! As sweet repose and rest
Come to thy heart as that within my breast!
Rom. O, wilt thou leave me so unsatisfied?
Jul. What satisfaction canst thou have to-night?
Rom. Th' exchange of thy love's faithful vow for mine.
Jul. I gave thee mine before thou didst request it;
And yet I would it were to give again.
Rom. Would'st thou withdraw it? For what purpose, love?
Jul. But to be frank and give it thee again.
And yet I wish but for the thing I have.
My bounty is as boundless as the sea,
My love as deep; the more I give to thee,
The more I have, for both are infinite.
I hear some noise within. Dear love, adieu!
[Nurse] calls within.
Anon, good nurse! Sweet Montague, be true.
Stay but a little, I will come again. [Exit.]
Rom. O blessed, blessed night! I am afeard,
Being in night, all this is but a dream,
Too flattering-sweet to be substantial.
Enter Juliet above.
Jul. Three words, dear Romeo, and good night indeed.
If that thy bent of love be honourable,
Thy purpose marriage, send me word to-morrow,
By one that I'll procure to come to thee,
Where and what time thou wilt perform the rite;
And all my fortunes at thy foot I'll lay
And follow thee my lord throughout the world.
Nurse. (within) Madam!
Jul. I come, anon.- But if thou meanest not well,
I do beseech thee-
Nurse. (within) Madam!
Jul. By-and-by I come.-
To cease thy suit and leave me to my grief.
To-morrow will I send.
Rom. So thrive my soul-
Jul. A thousand times good night! Exit.
Rom. A thousand times the worse, to want thy light!
Love goes toward love as schoolboys from their books;
But love from love, towards school with heavy looks.
Enter Juliet again, [above].
Jul. Hist! Romeo, hist! O for a falconer's voice
To lure this tassel-gentle back again!
Bondage is hoarse and may not speak aloud;
Else would I tear the cave where Echo lies,
And make her airy tongue more hoarse than mine
With repetition of my Romeo's name.
Romeo!
Rom. It is my soul that calls upon my name.
How silver-sweet sound lovers' tongues by night,
Like softest music to attending ears!
Jul. Romeo!
Rom. My dear?
Jul. At what o'clock to-morrow
Shall I send to thee?
Rom. By the hour of nine.
Jul. I will not fail. 'Tis twenty years till then.
I have forgot why I did call thee back.
Rom. Let me stand here till thou remember it.
Jul. I shall forget, to have thee still stand there,
Rememb'ring how I love thy company.
Rom. And I'll still stay, to have thee still forget,
Forgetting any other home but this.
Jul. 'Tis almost morning. I would have thee gone-
And yet no farther than a wanton's bird,
That lets it hop a little from her hand,
Like a poor prisoner in his twisted gyves,
And with a silk thread plucks it back again,
So loving-jealous of his liberty.
Rom. I would I were thy bird.
Jul. Sweet, so would I.
Yet I should kill thee with much cherishing.
Good night, good night! Parting is such sweet sorrow,
That I shall say good night till it be morrow.
[Exit.]
Rom. Sleep dwell upon thine eyes, peace in thy breast!
Would I were sleep and peace, so sweet to rest!
Hence will I to my ghostly father's cell,
His help to crave and my dear hap to tell.
Exit
Scene III.
Friar Laurence's cell.
Enter Friar, [Laurence] alone, with a basket.
Friar. The grey-ey'd morn smiles on the frowning night,
Check'ring the Eastern clouds with streaks of light;
And flecked darkness like a drunkard reels
From forth day's path and Titan's fiery wheels.
Non, ere the sun advance his burning eye
The day to cheer and night's dank dew to dry,
I must up-fill this osier cage of ours
With baleful weeds and precious-juiced flowers.
The earth that's nature's mother is her tomb.
What is her burying gave, that is her womb;
And from her womb children of divers kind
We sucking on her natural bosom find;
Many for many virtues excellent,
None but for some, and yet all different.
O, mickle is the powerful grace that lies
In plants, herbs, stones, and their true qualities;
For naught so vile that on the earth doth live
But to the earth some special good doth give;
Nor aught so good but, strain'd from that fair use,
Revolts from true birth, stumbling on abuse.
Virtue itself turns vice, being misapplied,
And vice sometime's by action dignified.
Within the infant rind of this small flower
Poison hath residence, and medicine power;
For this, being smelt, with that part cheers each part;
Being tasted, slays all senses with the heart.
Two such opposed kings encamp them still
In man as well as herbs- grace and rude will;
And where the worser is predominant,
Full soon the canker death eats up that plant.
Enter Romeo.
Rom. Good morrow, father.
Friar. Benedicite!
What early tongue so sweet saluteth me?
Young son, it argues a distempered head
So soon to bid good morrow to thy bed.
Care keeps his watch in every old man's eye,
And where care lodges sleep will never lie;
But where unbruised youth with unstuff'd brain
Doth couch his limbs, there golden sleep doth reign.
Therefore thy earliness doth me assure
Thou art uprous'd with some distemp'rature;
Or if not so, then here I hit it right-
Our Romeo hath not been in bed to-night.
Rom. That last is true-the sweeter rest | |
<gh_stars>1-10
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# This script builds a Python source distribution that can built be installed
# via pip install. This must be run in a git repository to determine the files
# to package. Also as a prerequisite, SWIG must be run as the generated files
# are part of the package. To create the distribution, in this directory, run
# "python setup_pip.py sdist", this creates a tar.gz file under ./dist .
from __future__ import print_function
import os, os.path, re, shutil, site, sys
from setuptools import setup, Distribution
from distutils.extension import Extension
import distutils.sysconfig
import distutils.ccompiler
from distutils.errors import CompileError, LinkError
import subprocess
from subprocess import call
import setuptools.command.install
import setuptools.command.build_ext
# msg --
# Print a message to stderr.
def msg(s):
print(os.path.basename(__file__) + ": " + s, file=sys.stderr)
# die --
# For failures, show a message and exit.
def die(s):
msg(s)
sys.exit(1)
# build_commands --
# Run a sequence of commands, and die if any fail.
def build_commands(commands, build_dir, build_env):
for command in commands:
callargs = [ 'sh', '-c', command ]
verbose_command = '"' + '" "'.join(callargs) + '"'
print('running: ' + verbose_command)
if call(callargs, cwd=build_dir, env=build_env) != 0:
die('build command failed: ' + verbose_command)
# check_needed_dependencies --
# Make a quick check of any needed library dependencies, and
# add to the library path and include path as needed. If a library
# is not found, it is not definitive.
def check_needed_dependencies(builtins, inc_paths, lib_paths):
library_dirs = get_library_dirs()
compiler = distutils.ccompiler.new_compiler()
distutils.sysconfig.customize_compiler(compiler)
compiler.set_library_dirs(library_dirs)
missing = []
for name, libname, instructions in builtins:
found = compiler.find_library_file(library_dirs, libname)
if found is None:
msg(libname + ": missing")
msg(instructions)
msg("after installing it, set LD_LIBRARY_PATH or DYLD_LIBRARY_PATH")
missing.append(libname)
else:
package_top = os.path.dirname(os.path.dirname(found))
inc_paths.append(os.path.join(package_top, 'include'))
lib_paths.append(os.path.join(package_top, 'lib'))
# XXX: we are not accounting for other directories that might be
# discoverable via /sbin/ldconfig. It might be better to write a tiny
# compile using -lsnappy, -lz...
#
#if len(missing) > 0:
# die("install packages for: " + str(missing))
# find_executable --
# Locate an executable in the PATH.
def find_executable(exename, path):
p = subprocess.Popen(['which', exename ], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate('')
out = str(out) # needed for Python3
if out == '':
if err != '':
err = ': "' + err + '"'
die('"' + exename + '": not found in path' + err)
dirname = os.path.dirname(out)
if not dirname in path:
path.append(dirname)
# get_build_path --
# Create a PATH that can be used for installation. Apparently,
# installation commands are run with a restricted PATH, and
# autoreconf/aclocal will not normally be found.
def get_build_path():
build_paths = []
find_executable('autoreconf', build_paths)
find_executable('aclocal', build_paths)
build_path = os.environ['PATH'] + ':' + ':'.join(build_paths)
return build_path
# get_compile_flags --
# Get system specific compile flags. Return a triple: C preprocessor
# flags, C compilation flags and linker flags.
def get_compile_flags(inc_paths, lib_paths):
# Suppress warnings building SWIG generated code
if sys.platform == 'win32' and cc == 'msvc':
cflags = ['/arch:SSE2', '/EHsc']
cppflags = []
ldflags = []
# Windows untested and incomplete, don't claim that it works.
die('Windows is not supported by this setup script')
else:
cflags = [ '-w', '-Wno-sign-conversion', '-std=c11' ]
cppflags = ['-I' + path for path in inc_paths]
cppflags.append('-DHAVE_CONFIG_H')
ldflags = ['-L' + path for path in lib_paths]
if sys.platform == 'darwin':
cflags.extend([ '-arch', 'x86_64' ])
return (cppflags, cflags, ldflags)
# get_sources_curdir --
# Get a list of sources from the current directory
def get_sources_curdir():
DEVNULL = open(os.devnull, 'w')
gitproc = subprocess.Popen(
['git', 'ls-tree', '-r', '--name-only', 'HEAD^{tree}'],
stdin=DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sources = [line.rstrip() for line in gitproc.stdout.readlines()]
err = gitproc.stderr.read()
gitproc.wait()
subret = gitproc.returncode
if subret != 0 or err:
msg("git command to get sources returned " + str(subret) +
", error=" + str(err))
die("this command must be run in a git repository")
return sources
# get_wiredtiger_versions --
# Read the version information from the RELEASE_INFO file.
def get_wiredtiger_versions(wt_dir):
v = {}
for l in open(os.path.join(wt_dir, 'RELEASE_INFO')):
if re.match(r'WIREDTIGER_VERSION_(?:MAJOR|MINOR|PATCH)=', l):
exec(l, v)
wt_ver = '%d.%d' % (v['WIREDTIGER_VERSION_MAJOR'],
v['WIREDTIGER_VERSION_MINOR'])
wt_full_ver = wt_ver + '.%d' % (v['WIREDTIGER_VERSION_PATCH'])
return (wt_ver, wt_full_ver)
# get_library_dirs
# Build a plausible set of library directories.
def get_library_dirs():
dirs = []
dirs.append("/usr/local/lib")
dirs.append("/usr/local/lib64")
dirs.append("/lib/x86_64-linux-gnu")
dirs.append("/opt/local/lib")
dirs.append("/usr/lib")
dirs.append("/usr/lib64")
for path in ['LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'LIBRARY_PATH']:
if path in os.environ:
dirs.extend(os.environ[path].split(':'))
dirs = list(set(filter(os.path.isdir, dirs)))
return dirs
# source_filter
# Make any needed changes to the sources list. Any entry that
# needs to be moved is returned in a dictionary.
def source_filter(sources):
result = []
movers = dict()
py_dir = os.path.join('lang', 'python')
pywt_dir = os.path.join(py_dir, 'wiredtiger')
pywt_prefix = pywt_dir + os.path.sep
for f in sources:
if not re.match(source_regex, f):
continue
src = f
dest = f
# move all lang/python files to the top level.
if dest.startswith(pywt_prefix):
dest = os.path.basename(dest)
if dest == 'pip_init.py':
dest = '__init__.py'
if dest != src:
movers[dest] = src
result.append(dest)
# Add SWIG generated files
result.append('wiredtiger.py')
movers['wiredtiger.py'] = os.path.join(pywt_dir, '__init__.py')
result.append(os.path.join(py_dir, 'wiredtiger_wrap.c'))
return result, movers
################################################################
# Do some initial setup and checks.
this_abs_script = os.path.abspath(__file__)
this_dir = os.path.dirname(this_abs_script)
pip_command = None
for arg in sys.argv[1:]:
if arg[0] != '-' and pip_command == None:
pip_command = arg
break
if this_dir.endswith(os.sep + os.path.join('lang', 'python')):
wt_dir = os.path.dirname(os.path.dirname(this_dir))
os.chdir(wt_dir)
elif os.path.isfile(os.path.join(this_dir, 'LICENSE')):
wt_dir = this_dir
else:
die('running from an unknown directory')
python3 = (sys.version_info[0] > 2)
if python3:
die('Python3 is not yet supported')
# Ensure that Extensions won't be built for 32 bit,
# that won't work with WiredTiger.
if sys.maxsize < 2**32:
die('need to be running on a 64 bit system, and have a 64 bit Python')
python_rel_dir = os.path.join('lang', 'python')
build_dir = os.path.join(wt_dir, 'build_posix')
makefile = os.path.join(build_dir, 'Makefile')
built_sentinal = os.path.join(build_dir, 'built.txt')
conf_make_dir = 'build_posix'
wt_swig_lib_name = os.path.join(python_rel_dir, '_wiredtiger.so')
################################################################
# Put together build options for the WiredTiger extension.
short_description = 'high performance, scalable, production quality, ' + \
'NoSQL, Open Source extensible platform for data management'
long_description = 'WiredTiger is a ' + short_description + '.\n\n' + \
open(os.path.join(wt_dir, 'README')).read()
wt_ver, wt_full_ver = get_wiredtiger_versions(wt_dir)
build_path = get_build_path()
# We only need a small set of directories to build a WT library,
# we also include any files at the top level.
source_regex = r'^(?:(?:api|build_posix|ext|lang/python|src|dist)/|[^/]*$)'
# The builtins that we include in this distribution.
builtins = [
# [ name, libname, instructions ]
[ 'snappy', 'snappy',
'Note: a suitable version of snappy can be found at\n' + \
' https://github.com/google/snappy/releases/download/' + \
'1.1.3/snappy-1.1.3.tar.gz\n' + \
'It can be installed via: yum install snappy snappy-devel' + \
'or via: apt-get install libsnappy-dev' ],
[ 'zlib', 'z',
'Need to install zlib\n' + \
'It can be installed via: apt-get install zlib1g' ]
]
builtin_names = [b[0] for b in builtins]
builtin_libraries = [b[1] for b in builtins]
# Here's the configure/make operations we perform before the python extension
# is linked.
configure_cmds = [
'./makemake --clean-and-make',
'./reconf',
# force building a position independent library; it will be linked
# into a single shared library with the SWIG interface code.
'CFLAGS="${CFLAGS:-} -fPIC -DPIC" ' + \
'../configure --enable-python --with-builtins=' + ','.join(builtin_names)
]
# build all the builtins, at the moment they are all compressors.
make_cmds = []
for name in builtin_names:
make_cmds.append('(cd ext/compressors/' + name + '/; | |
from functools import reduce
import re
strings = []
number_of_strings = -1
def top_level_split(s):
"""
Split `s` by top-level commas only. Commas within parentheses are ignored.
"""
# Parse the string tracking whether the current character is within
# parentheses.
balance = 0
parts = []
part = ""
for i in range(len(s)):
c = s[i]
part += c
if c == '(':
balance += 1
elif c == ')':
balance -= 1
elif c == ',' and balance == 0 and not s[i+1] == ',':
part = part[:-1].strip()
parts.append(part)
part = ""
# Capture last part
if len(part):
parts.append(part.strip())
return parts
def new_chord(on_pseudolayer, keycodes_hash, has_counter, value1, value2, function, output_buffer, index):
counter_link = "NULL"
output_buffer += "uint8_t state_" + str(index) + " = IDLE;\n"
if has_counter:
output_buffer += "uint8_t counter_" + str(index) + " = 0;\n"
counter_link = "&counter_" + str(index)
output_buffer += "const struct Chord chord_" + str(index) + " PROGMEM = {" + keycodes_hash + ", " + on_pseudolayer + ", &state_" + str(index) + ", " + counter_link + ", " + str(value1) + ", " + str(value2) + ", " + function + "};\n"
index += 1
return [output_buffer, index]
def KC(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, 0, "single_dance", output_buffer, index)
def AS(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode, 0, "autoshift_dance", output_buffer, index)
def AT(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "autoshift_toggle", output_buffer, index)
def KL(on_pseudolayer, keycodes_hash, keycode, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode, to_pseudolayer, "key_layer_dance", output_buffer, index)
def KK(on_pseudolayer, keycodes_hash, keycode1, keycode2, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, keycode1, keycode2, "key_key_dance", output_buffer, index)
def KM(on_pseudolayer, keycodes_hash, keycode, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, to_pseudolayer, "key_mod_dance", output_buffer, index)
def MO(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "temp_pseudolayer", output_buffer, index)
def MO_alt(on_pseudolayer, keycodes_hash, from_pseudolayer, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, from_pseudolayer, "temp_pseudolayer_alt", output_buffer, index)
def LOCK(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "lock", output_buffer, index)
def DF(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "perm_pseudolayer", output_buffer, index)
def TO(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "switch_layer", output_buffer, index)
def OSK(on_pseudolayer, keycodes_hash, keycode, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, keycode, 0, "one_shot_key", output_buffer, index)
def OSL(on_pseudolayer, keycodes_hash, to_pseudolayer, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, to_pseudolayer, 0, "one_shot_layer", output_buffer, index)
def CMD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "command", output_buffer, index)
def DM_RECORD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_record", output_buffer, index)
def DM_NEXT(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_next", output_buffer, index)
def DM_END(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_end", output_buffer, index)
def DM_PLAY(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "dynamic_macro_play", output_buffer, index)
def LEAD(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "leader", output_buffer, index)
def CLEAR(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "clear", output_buffer, index)
def RESET(on_pseudolayer, keycodes_hash, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, False, 0, 0, "reset", output_buffer, index)
def STR(on_pseudolayer, keycodes_hash, string_input, output_buffer, index, number_of_strings, strings):
[a, b] = new_chord(on_pseudolayer, keycodes_hash, False, number_of_strings, 0, "string_in", output_buffer, index)
return [a, b, number_of_strings + 1, strings + [string_input]]
def M(on_pseudolayer, keycodes_hash, value1, value2, fnc, output_buffer, index):
return new_chord(on_pseudolayer, keycodes_hash, True, value1, value2, fnc, output_buffer, index)
def expand_keycode_fnc(DEFINITION):
if DEFINITION == "`":
DEFINITION = "GRAVE"
elif DEFINITION == "-":
DEFINITION = "MINUS"
elif DEFINITION == "=":
DEFINITION = "EQUAL"
elif DEFINITION == "[":
DEFINITION = "LBRACKET"
elif DEFINITION == "]":
DEFINITION = "RBRACKET"
elif DEFINITION == "\\":
DEFINITION = "BSLASH"
elif DEFINITION == ";":
DEFINITION = "SCOLON"
elif DEFINITION == "'":
DEFINITION = "QUOTE"
elif DEFINITION == ",":
DEFINITION = "COMMA"
elif DEFINITION == ".":
DEFINITION = "DOT"
elif DEFINITION == "/":
DEFINITION = "SLASH"
elif DEFINITION == "~":
DEFINITION = "TILDE"
elif DEFINITION == "*":
DEFINITION = "ASTERISK"
elif DEFINITION == "+":
DEFINITION = "PLUS"
elif DEFINITION == "(":
DEFINITION = "LEFT_PAREN"
elif DEFINITION == ")":
DEFINITION = "RIGHT_PAREN"
elif DEFINITION == "<":
DEFINITION = "LEFT_ANGLE_BRACKET"
elif DEFINITION == ">":
DEFINITION = "RIGHT_ANGLE_BRACKET"
elif DEFINITION == "{":
DEFINITION = "LEFT_CURLY_BRACE"
elif DEFINITION == "}":
DEFINITION = "RIGHT_CURLY_BRACE"
elif DEFINITION == "?":
DEFINITION = "QUESTION"
elif DEFINITION == "~":
DEFINITION = "TILDE"
elif DEFINITION == ":":
DEFINITION = "COLON"
elif DEFINITION == "_":
DEFINITION = "UNDERSCORE"
elif DEFINITION == '"':
DEFINITION = "DOUBLE_QUOTE"
elif DEFINITION == "@":
DEFINITION = "AT"
elif DEFINITION == "#":
DEFINITION = "HASH"
elif DEFINITION == "$":
DEFINITION = "DOLLAR"
elif DEFINITION == "!":
DEFINITION = "EXCLAIM"
elif DEFINITION == "%":
DEFINITION = "PERCENT"
elif DEFINITION == "^":
DEFINITION = "CIRCUMFLEX"
elif DEFINITION == "&":
DEFINITION = "AMPERSAND"
elif DEFINITION == "|":
DEFINITION = "PIPE"
if DEFINITION in [
"A", "a", "B", "b", "C", "c", "D", "d", "E", "e",
"F", "f", "G", "g", "H", "h", "I", "i", "J", "j",
"K", "k", "L", "l", "M", "m", "N", "n", "O", "o",
"P", "p", "Q", "q", "R", "r", "S", "s", "T", "t",
"U", "u", "V", "v", "W", "w", "X", "x", "Y", "y",
"Z", "z", "1", "2", "3", "4", "5", "6", "7", "8",
"9", "0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15",
"F16", "F17", "F18", "F19", "F20", "F21", "F22",
"F23", "F24", "ENTER", "ENT", "ESCAPE", "ESC",
"BSPACE", "BSPC", "TAB", "SPACE", "SPC", "NONUS_HASH",
"NUHS", "NONUS_BSLASH", "NUBS", "COMMA", "COMM",
"DOT", "SLASH", "SLSH", "TILDE", "TILD", "EXCLAIM",
"EXLM", "AT", "HASH", "DOLLAR", "DLR", "PERCENT",
"PERC", "CIRCUMFLEX", "CIRC", "AMPERSAND", "AMPR",
"ASTERISK", "ASTR", "LEFT_PAREN", "LPRN", "RIGHT_PAREN",
"RPRN", "UNDERSCORE", "UNDS", "PLUS", "LEFT_CURLY_BRACE",
"LCBR", "RIGHT_CURLY_BRACE", "RCBR", "PIPE", "COLON",
"COLN", "DOUBLE_QUOTE", "DQUO", "DQT",
"LEFT_ANGLE_BRACKET", "LABK", "LT", "RIGHT_ANGLE_BRACKET",
"RABK", "GT", "QUESTION", "QUES", "SCOLON", "SCLN",
"QUOTE", "QUOT", "LBRACKET", "LBRC", "RBRACKET", "RBRC",
"BSLASH", "BSLS", "MINUS", "MINS", "EQUAL", "EQL",
"GRAVE", "GRV", "ZKHK", "CAPSLOCK", "CLCK", "CAPS",
"SCROLLOCK", "SLCK", "BRMD", "NUMLOCK", "NLCK",
"LOCKING_CAPS", "LCAP", "LOCKING_NUM", "LNUM",
"LOCKING_SCROLL", "LSCR", "LCTRL", "LCTL", "LSHIFT",
"LSFT", "LALT", "LGUI", "LCMD", "LWIN", "RCTRL",
"RCTL", "RSHIFT", "RSFT", "RALT", "RGUI", "RCMD",
"RWIN", "INT1", "RO", "INT2", "KANA", "INT3", "JYEN",
"INT4", "HENK", "INT5", "MHEN", "INT6", "INT7",
"INT8", "INT9", "LANG1", "HAEN", "LANG2", "HANJ",
"LANG3", "LANG4", "LANG5", "LANG6", "LANG7", "LANG8",
"LANG9", "PSCREEN", "PSCR", "PAUSE", "PAUS", "BRK",
"BRMU", "INSERT", "INS", "HOME", "PGUP", "DELETE",
"DEL", "END", "PGDOWN", "PGDN", "RIGHT", "RGHT",
"LEFT", "DOWN", "UP", "APPLICATION", "APP", "POWER",
"EXECUTE", "EXEC", "HELP", "MENU", "SELECT", "SLCT",
"STOP", "AGAIN", "AGIN", "UNDO", "CUT", "COPY",
"PASTE", "PSTE", "FIND", "MUTE", "VOLUP", "VOLDOWN",
"ALT_ERASE", "ERAS", "SYSREQ", "CANCEL", "CLEAR",
"CLR", "PRIOR", "RETURN", "SEPARATOR", "OUT", "OPER",
"CLEAR_AGAIN", "CRSEL", "EXSEL", "SYSTEM_POWER",
"PWR", "SYSTEM_SLEEP", "SLEP", "SYSTEM_WAKE", "WAKE",
"AUDIO_MUTE", "MUTE", "AUDIO_VOL_UP", "VOLU",
"AUDIO_VOL_DOWN", "VOLD", "MEDIA_NEXT_TRACK", "MNXT",
"MEDIA_PREV_TRACK", "MPRV", "CPRV", "MEDIA_STOP", "MSTP",
"MEDIA_PLAY_PAUSE", "MPLY", "MEDIA_SELECT", "MSEL",
"MEDIA_EJECT", "EJCT", "MAIL", "CALCULATOR", "CALC",
"MY_COMPUTER", "MYCM", "WWW_SEARCH", "WSCH", "WWW_HOME",
"WHOM", "WWW_BACK", "WBAK", "WWW_FORWARD", "WFWD",
"WWW_STOP", "WSTP", "WWW_REFRESH", "WREF",
"WWW_FAVORITES", "WFAV", "MEDIA_FAST_FORWARD", "MFFD",
"MEDIA_REWIND", "MRWD", "BRIGHTNESS_UP", "BRIU",
"BRIGHTNESS_DOWN", "BRID", "KP_SLASH", "PSLS",
"KP_ASTERISK", "PAST", "KP_MINUS", "PMNS", "KP_PLUS",
"PPLS", "KP_ENTER", "PENT", "KP_1", "P1", "KP_2", "P2",
"KP_3", "P3", "KP_4", "P4", "KP_5", "P5", "KP_6", "P6",
"KP_7", "P7", "KP_8", "P8", "KP_9", "P9", "KP_0", "P0",
"KP_DOT", "PDOT", "KP_EQUAL", "PEQL", "KP_COMMA", "PCMM",
"MS_BTN1", "BTN1", "MS_BTN2", "BTN2", "MS_BTN3", "BTN3",
"MS_BTN4", "BTN4", "MS_BTN5", "BTN5", "MS_BTN6", "BTN6",
"MS_LEFT", "MS_L", "MS_DOWN", "MS_D", "MS_UP", "MS_U",
"MS_RIGHT", "MS_R", "MS_WH_UP", "WH_U", "MS_WH_DOWN",
"WH_D", "MS_WH_LEFT", "MS_WH_L", "MS_WH_RIGHT", "MS_WH_R",
"KC_MS_ACCEL0", "ACL0", "KC_MS_ACCEL1", "ACL1",
"KC_MS_ACCEL2", "ACL2"
]:
return "KC_" + DEFINITION
else:
return DEFINITION
def MK(on_pseudolayer, keycodes_hash, definition, output_buffer, index):
l = len(definition.split(', '))
output_buffer += "void function_" + str(index) + "(const struct Chord* self) {\n"
output_buffer += " switch (*self->state) {\n"
output_buffer += " case ACTIVATED:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_in(" + code + ");\n"
output_buffer += " break;\n"
output_buffer += " case DEACTIVATED:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_out(" + code + ");\n"
output_buffer += " *self->state = IDLE;\n"
output_buffer += " break;\n"
output_buffer += " case RESTART:\n"
for i in range(0, l):
val = definition.split(',')[i].strip()
code = expand_keycode_fnc(val)
output_buffer += " key_out(" + code + ");\n"
output_buffer += " break;\n"
| |
pwd_mgr = urllib2.HTTPPasswordMgr()
pwd_mgr.add_password(realm, uri, self.user, self.passwd)
opener = urllib2.build_opener()
opener.add_handler(urllib2.HTTPDigestAuthHandler(pwd_mgr))
urllib2.install_opener(opener)
else:
valid_types = ", ".join(_allowedAuth)
raise NotImplementedError("Expecting one of: {0}, but received: {1}".format(valid_types,
self.http_auth))
# The header field name is capitalized in the request.add_header method.
for customHttpHeader in self.customHttpHeaders:
request.add_header(customHttpHeader, self.customHttpHeaders[customHttpHeader])
return request
def _query(self):
"""Internal method to execute the query. Returns the output of the
:func:`urllib2.urlopen` method of the :mod:`urllib2` Python library
:return: tuples with the raw request plus the expected format.
:raises QueryBadFormed: If the HTTP return code is ``400``.
:raises Unauthorized: If the HTTP return code is ``401``.
:raises EndPointNotFound: If the HTTP return code is ``404``.
:raises URITooLong: If the HTTP return code is ``414``.
:raises EndPointInternalError: If the HTTP return code is ``500``.
:raises urllib2.HTTPError: If the HTTP return code is different to ``400``, ``401``, ``404``, ``414``, ``500``.
"""
request = self._createRequest()
try:
if self.timeout:
response = urlopener(request, timeout=self.timeout)
else:
response = urlopener(request)
return response, self.returnFormat
except urllib2.HTTPError as e:
if e.code == 400:
raise QueryBadFormed(e.read())
elif e.code == 404:
raise EndPointNotFound(e.read())
elif e.code == 401:
raise Unauthorized(e.read())
elif e.code == 414:
raise URITooLong(e.read())
elif e.code == 500:
raise EndPointInternalError(e.read())
else:
raise e
def query(self):
"""
Execute the query.
Exceptions can be raised if either the URI is wrong or the HTTP sends back an error (this is also the
case when the query is syntactically incorrect, leading to an HTTP error sent back by the SPARQL endpoint).
The usual urllib2 exceptions are raised, which therefore cover possible SPARQL errors, too.
Note that some combinations of return formats and query types may not make sense. For example,
a SELECT query with Turtle response is meaningless (the output of a SELECT is not a Graph), or a CONSTRUCT
query with JSON output may be a problem because, at the moment, there is no accepted JSON serialization
of RDF (let alone one implemented by SPARQL endpoints). In such cases the returned media type of the result is
unpredictable and may differ from one SPARQL endpoint implementation to the other. (Endpoints usually fall
back to one of the "meaningful" formats, but it is up to the specific implementation to choose which
one that is.)
:return: query result
:rtype: :class:`QueryResult` instance
"""
return QueryResult(self._query())
def queryAndConvert(self):
"""Macro like method: issue a query and return the converted results.
:return: the converted query result. See the conversion methods for more details.
"""
res = self.query()
return res.convert()
def __str__(self):
"""This method returns the string representation of a :class:`SPARQLWrapper` object.
.. versionadded:: 1.8.3
:return: A human-readable string of the object.
:rtype: string
"""
fullname = self.__module__ + "." + self.__class__.__name__
items = ('"%s" : %r' % (k, v) for k, v in sorted(self.__dict__.items()))
str_dict_items = "{%s}" % (',\n'.join(items))
return "<%s object at 0x%016X>\n%s" % (fullname, id(self), str_dict_items)
#######################################################################################################
class QueryResult(object):
"""
Wrapper around an a query result. Users should not create instances of this class, it is
generated by a :func:`SPARQLWrapper.query` call. The results can be
converted to various formats, or used directly.
If used directly: the class gives access to the direct HTTP request results
``response`` obtained from the call to :func:`urllib.urlopen`.
It is a file-like object with two additional methods:
* ``geturl()`` to return the URL of the resource retrieved
* ``info()`` that returns the meta-information of the HTTP result as a dictionary-like object.
For convenience, these methods are also available on the :class:`QueryResult` instance.
The :func:`__iter__` and :func:`next` methods are also implemented (by mapping them to :attr:`response`). This means that the
common idiom ``for l in obj : do_something_with_line(l)`` would work, too.
:ivar response: the direct HTTP response; a file-like object, as return by the :func:`urllib2.urlopen` library call.
:ivar requestedFormat: The requested format. The possible values are: :data:`JSON`, :data:`XML`, :data:`RDFXML`, :data:`TURTLE`, :data:`N3`, :data:`RDF`, :data:`CSV`, :data:`TSV`, :data:`JSONLD`.
:type requestedFormat: string
"""
def __init__(self, result):
"""
:param result: HTTP response stemming from a :func:`SPARQLWrapper.query` call, or a tuple with the expected format: (response, format).
"""
if isinstance(result, tuple):
self.response = result[0]
self.requestedFormat = result[1]
else:
self.response = result
def geturl(self):
"""Return the URL of the original call.
:return: URL of the original call.
:rtype: string
"""
return self.response.geturl()
def info(self):
"""Return the meta-information of the HTTP result.
:return: meta-information of the HTTP result.
:rtype: dict
"""
return KeyCaseInsensitiveDict(self.response.info())
def __iter__(self):
"""Return an iterator object. This method is expected for the inclusion
of the object in a standard ``for`` loop.
"""
return self.response.__iter__()
def next(self):
"""Method for the standard iterator."""
return self.response.next()
def _convertJSON(self):
"""
Convert a JSON result into a Python dict. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: dict
"""
return json.loads(self.response.read().decode("utf-8"))
def _convertXML(self):
"""
Convert an XML result into a Python dom tree. This method can be overwritten in a
subclass for a different conversion method.
:return: converted result.
:rtype: :class:`xml.dom.minidom.Document`
"""
from xml.dom.minidom import parse
return parse(self.response)
def _convertRDF(self):
"""
Convert a RDF/XML result into an RDFLib Graph. This method can be overwritten
in a subclass for a different conversion method.
:return: converted result.
:rtype: :class:`rdflib.graph.Graph`
"""
try:
from rdflib.graph import ConjunctiveGraph
except ImportError:
from rdflib import ConjunctiveGraph
retval = ConjunctiveGraph()
# (DEPRECATED) this is a strange hack. If the publicID is not set, rdflib (or the underlying xml parser) makes a funny
# (DEPRECATED) (and, as far as I could see, meaningless) error message...
retval.load(self.response) # (DEPRECATED) publicID=' ')
return retval
def _convertN3(self):
"""
Convert a RDF Turtle/N3 result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertCSV(self):
"""
Convert a CSV result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertTSV(self):
"""
Convert a TSV result into a string. This method can be overwritten in a subclass
for a different conversion method.
:return: converted result.
:rtype: string
"""
return self.response.read()
def _convertJSONLD(self):
"""
Convert a RDF JSON-LD result into an RDFLib Graph. This method can be overwritten
in a subclass for a different conversion method.
:return: converted result
:rtype: :class:`rdflib.graph.Graph`
"""
from rdflib import ConjunctiveGraph
retval = ConjunctiveGraph()
retval.load(self.response, format='json-ld') # (DEPRECATED), publicID=' ')
return retval
def convert(self):
"""
Encode the return value depending on the return format:
* in the case of :data:`XML`, a DOM top element is returned
* in the case of :data:`JSON`, a json conversion will return a dictionary
* in the case of :data:`RDF/XML<RDFXML>`, the value is converted via RDFLib into a ``RDFLib Graph`` instance
* in the case of :data:`JSON-LD<JSONLD>`, the value is converted via RDFLib into a ``RDFLib Graph`` instance
* in the case of RDF :data:`Turtle<TURTLE>`/:data:`N3`, a string is returned
* in the case of :data:`CSV`/:data:`TSV`, a string is returned
* In all other cases the input simply returned.
:return: the converted query result. See the conversion methods for more details.
"""
def _content_type_in_list(real, expected):
""" Internal method for checking if the content-type header received matches any of the content types of the expected list.
:param real: The content-type header received.
:type real: string
:param expected: A list of expected content types.
:type expected: list
:return: Returns a boolean after checking if the content-type header received matches any of the content types of the expected list.
:rtype: boolean
"""
return True in [real.find(mime) != -1 for mime in expected]
def _validate_format(format_name, allowed, mime, requested):
""" Internal method for validating if the requested format is one of the allowed formats.
:param format_name: The format name (to be used in the warning message).
:type format_name: string
:param allowed: A list of allowed content types.
:type allowed: list
:param mime: The content-type header received (to be used in the warning message).
:type mime: string
:param requested: | |
<gh_stars>0
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.39884,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.690647,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.396106,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.48559,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.394236,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.65293,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0144583,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.104551,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.106928,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.104551,
'Execution Unit/Register Files/Runtime Dynamic': 0.121386,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.252639,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.661808,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.87685,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00420277,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00420277,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00371229,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00146535,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00153603,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0136539,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0384492,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.102792,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.34819,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.349129,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 0.852215,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0109624,
'L2/Runtime Dynamic': 0.0034841,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72554,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.20083,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0805063,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0805064,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10725,
'Load Store Unit/Runtime Dynamic': 1.67837,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198515,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.39703,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0704535,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0705696,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0572243,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.680294,
'Memory Management Unit/Runtime Dynamic': 0.127794,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.9819,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0203945,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.20786,
'Renaming Unit/Int Front End RAT/Subthreshold |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.