input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>atubbs/spitfire<filename>spitfire/compiler/optimizer.py
# Copyright 2007 The Spitfire Authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import __builtin__
import copy
import logging
import os.path
import re
from spitfire.compiler import ast
from spitfire.compiler import analyzer
from spitfire.compiler import walker
builtin_names = vars(__builtin__)
_BINOP_INVALID_COUNT = 1000 # Any value > 0 will work.
_BINOP_INITIAL_COUNT = 0
_BINOP_FIRST_PASS = 1
# Utility functions for searching up the tree.
def _get_parent_node_by_pred(node, pred, search_current=False):
"""Find the first parent node that satisfies a predicate function."""
if not search_current:
node = node.parent
while node is not None:
if pred(node):
return node
node = node.parent
return None
def _get_parent_node_by_type(node, node_type):
"""Find the first parent node that satisfies a specified type or types."""
return _get_parent_node_by_pred(node, lambda n: isinstance(n, node_type))
def _get_parent_loop(node):
return _get_parent_node_by_type(node, ast.ForNode)
def _get_parent_block(node):
return _get_parent_node_by_type(
node, (ast.FunctionNode, ast.ForNode, ast.IfNode, ast.ElseNode))
def _get_local_identifiers(node):
local_identifiers = []
partial_local_identifiers = []
dirty_local_identifiers = []
# search the parent scopes
# fixme: should this be recursive?
node = node.parent
while node is not None:
if isinstance(node, ast.ForNode):
local_identifiers.extend(node.loop_variant_set)
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
elif isinstance(node, ast.IfNode):
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
elif isinstance(node, ast.ElseNode):
# in this case, we don't want to go to the parent node, which is the
# ast.IfNode - we want to go to the parent scope
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
node = node.parent.parent
continue
elif isinstance(node, ast.FunctionNode):
local_identifiers.extend(node.scope.local_identifiers)
partial_local_identifiers.extend(
node.scope.partial_local_identifiers)
dirty_local_identifiers.extend(node.scope.dirty_local_identifiers)
break
node = node.parent
return (frozenset(local_identifiers), frozenset(partial_local_identifiers),
frozenset(dirty_local_identifiers))
def _get_identifiers_from_expression(node):
"""Find the IdentifierNodes present in an expression.
This function searches through the nodes of an expression and returns a set
of the IdentiferNodes that are present. This function doesn't traverse
ast.GetAttrNode or any LiteralNodes such as ListLiteral or DictLiteral
nodes.
"""
return set([n
for n in walker.flatten_tree(node)
if isinstance(n, ast.IdentifierNode)])
def _is_clean(node, scope):
"""Determine if the node references any dirty identifiers in the scope.
If there are any Identifiers in the node that are considered "dirty", this
function will return False. This is because it is not safe to hoist a node
that depends on modifications made within its parent scope.
"""
dirty_identifiers = scope.dirty_local_identifiers
node_identifiers = _get_identifiers_from_expression(node)
# There are no dirty idenitifiers in the node being hoisted.
return not node_identifiers & dirty_identifiers
def _get_common_aliased_expression_map(*scopes):
"""Get the common clean aliased expression map for multiple scopes."""
if not scopes:
return {}
clean_key_sets = []
for scope in scopes:
clean_scope_keys = set()
for alias in scope.aliased_expression_map.iterkeys():
if _is_clean(alias, scope):
clean_scope_keys.add(alias)
clean_key_sets.append(clean_scope_keys)
common_clean_keys = reduce(lambda x, y: x & y, clean_key_sets)
return dict([(key, scopes[0].aliased_expression_map[key])
for key in common_clean_keys])
# Utility functions for generating names.
def _generate_filtered_placeholder(node):
"""Given a node, generate a name for the cached filtered placeholder"""
return '_fph%08X' % ast.unsigned_hash(node)
def _generate_cached_resolved_placeholder(node):
"""Given a node, generate a name for the cached udn placeholder"""
return '_rudn%08X' % ast.unsigned_hash(node)
class _BaseAnalyzer(object):
def __init__(self, ast_root, options, compiler):
self.ast_root = ast_root
self.options = options
self.compiler = compiler
self.unoptimized_node_types = set()
# Used as a flag to determine how many binops we have analyzed.
self.binop_count = _BINOP_INVALID_COUNT
def optimize_ast(self):
self.visit_ast(self.ast_root)
if self.options.debug:
print "unoptimized_node_types", self.unoptimized_node_types
return self.ast_root
# build an AST node list from a single parse node
# need the parent in case we are going to delete a node
def visit_ast(self, node, parent=None):
node.parent = parent
method_name = 'analyze%s' % node.__class__.__name__
method = getattr(self, method_name, self.default_optimize_node)
if method_name in self.compiler.debug_flags:
print method_name, node
return method(node)
def skip_analyze_node(self, node):
return
analyzeLiteralNode = skip_analyze_node
analyzeIdentifierNode = skip_analyze_node
analyzeTargetNode = skip_analyze_node
def default_optimize_node(self, node):
# print "default_optimize_node", type(node)
self.unoptimized_node_types.add(type(node))
return
# this function has some rules that are a bit unclean - you aren't actually
# looking for the 'parent' scope, but one you might insert nodes into. for
# instance, you skip over a ast.ForNode so that optimizetions are inserted
# in a loop-invariant fashion.
def get_parent_scope(self, node):
node_stack = [node]
node = node.parent
while node is not None:
if type(node) == ast.FunctionNode:
return node.scope
elif type(node) == ast.IfNode:
# elements of the test clause need to reference the next scope
# "up" - usually the function, but could be another conditional
# block fixme: if we ever implement "elif" this will have to get
# fixed up
if node_stack[-1] != node.test_expression:
return node.scope
elif type(node) == ast.ElseNode:
return node.scope
elif type(node) == ast.ForNode:
if node_stack[-1] != node.expression_list:
return node.scope
node_stack.append(node)
node = node.parent
self.compiler.error(
analyzer.SemanticAnalyzerError("expected a parent function"),
pos=node_stack[-1].pos)
def get_insert_block_and_point(self, node):
original_node = node
insert_marker = node
node = node.parent
while node is not None:
if isinstance(node, (ast.FunctionNode, ast.ForNode, ast.IfNode,
ast.ElseNode)):
if insert_marker in node.child_nodes:
return node, insert_marker
insert_marker = node
node = node.parent
self.compiler.error(
analyzer.SemanticAnalyzerError("expected a parent block"),
pos=node.pos)
def replace_in_parent_block(self, node, new_node):
insert_block, insert_marker = self.get_insert_block_and_point(node)
insert_block.replace(insert_marker, new_node)
def reanalyzeConditionalNode(self, conditional_node):
if (not self.options.hoist_conditional_aliases and
not self.options.cache_filtered_placeholders):
return
parent_node = conditional_node
parent_block, insertion_point = self.get_insert_block_and_point(
conditional_node)
if self.options.hoist_conditional_aliases:
#print "reanalyzeConditionalNode", conditional_node
#print " parent_block", parent_block
#print " parent_scope", parent_block.scope
# NOTE: need to iterate over items, in case we modify something
items = conditional_node.scope.aliased_expression_map.items()
for alias_node, alias in items:
#print " check alias:", alias
#print " alias_node:", alias_node
assign_alias_node = ast.AssignNode(alias,
alias_node,
pos=alias_node.pos)
if alias_node in parent_block.scope.aliased_expression_map:
if self._is_condition_invariant(alias_node,
conditional_node):
#print " hoist:", assign_alias_node
self.hoist(conditional_node, parent_block,
insertion_point, alias_node,
assign_alias_node)
def reanalyzeLoopNode(self, loop_node):
if not self.options.hoist_loop_invariant_aliases:
return
parent_block, insertion_point = self.get_insert_block_and_point(
loop_node)
# NOTE: need to iterate over items, in case we modify something
for alias_node, alias in loop_node.scope.aliased_expression_map.items():
assign_alias = ast.AssignNode(alias, alias_node, pos=alias_node.pos)
if alias_node in parent_block.scope.aliased_expression_map:
if self._is_loop_invariant(alias_node, loop_node):
self.hoist(loop_node, parent_block, insertion_point,
alias_node, assign_alias)
else:
# if this alias is not already used in the parent scope, that's
# ok, hoist it if it's loop invariant
if self._is_loop_invariant(alias_node, loop_node):
loop_node.remove(assign_alias)
parent_block.insert_before(loop_node, assign_alias)
parent_block.scope.hoisted_aliases.append(alias_node)
def _is_condition_invariant(self, node, conditional_node):
"""The _is_condition_invariant_legacy function is broken, but seems to
be correct in some cases. Out of fear and redundancy, in order for
something to be hoisted, it must pass the old and new tests.
"""
return (self._is_condition_invariant_legacy(node, conditional_node) and
_is_clean(node, conditional_node.scope))
def _is_condition_invariant_legacy(self, node, conditional_node):
node_dependency_set = self.get_node_dependencies(node)
condition_invariant = (
not node_dependency_set & conditional_node.scope.local_identifiers)
#print "is_condition_invariant:", condition_invariant
#print " locals:", conditional_node.scope.local_identifiers
#print " deps:", node_dependency_set
return condition_invariant
def _is_loop_invariant(self, node, loop_node):
"""The _is_loop_invariant_legacy function is broken, but seems to be
correct in some cases. Out of fear and redundancy, in order for
something to be hoisted, it must pass the old and new tests.
"""
return (self._is_loop_invariant_legacy(node, loop_node) and
_is_clean(node, loop_node.scope))
def _is_loop_invariant_legacy(self, node, loop_node):
node_dependency_set = self.get_node_dependencies(node)
# print "is loop invariant node:", node
# for x in node_dependency_set:
# print " dep:", x
# find dependencies within the loop node but outside the node we're
# checking
node_dependency_set_except_node_tree = node_dependency_set - set(
walker.flatten_tree(node))
dependencies_within_loop = set(walker.flatten_tree(
loop_node)).intersection(node_dependency_set_except_node_tree)
depends_on_loop_variants = bool(loop_node.loop_variant_set.intersection(
node_dependency_set))
# TODO: Disabling warnings for now. They are useless witout
# filenames. Also need to make sure all these cases are valid.
# if not depends_on_loop_variants and dependencies_within_loop:
# # we can't assume this is invariant because it depends on other
# # nodes inside the loop. eventually we should hoist out both the
# # node and its dependencies.
# dependency_nodes = '\n'.join(' %s' % node.parent
# for node in dependencies_within_loop)
# logging.warning("Cannot hoist possible loop invariant: %s.", node)
# logging.warning("Please move following dependencies "
# "out of the loop:\n%s", dependency_nodes)
return not depends_on_loop_variants and not dependencies_within_loop
def get_node_dependencies(self, node):
node_dependency_set = set(walker.flatten_tree(node))
parent_block = _get_parent_block(node)
for n in list(node_dependency_set):
# when this is an identifier, you need to check all of the potential
# the dependencies for that symbol, which means doing some crawling
if isinstance(n, ast.IdentifierNode):
identifier = n
parent_block_to_check = parent_block
while parent_block_to_check:
for block_node in parent_block_to_check.child_nodes:
if isinstance(block_node, ast.AssignNode):
if block_node.left == identifier:
node_dependency_set.update(
self.get_node_dependencies(
block_node.right))
parent_block_to_check = None
break
elif isinstance(block_node, ast.IfNode):
# if you encounter a conditional in your chain, you
# depend on any dependencies of the condition itself
# FIXME: calling
# get_node_dependencies(block_node.test_expression)
# causes an infinite loop, but that is probably the
# correct way forward to address the dependency
| |
# plt.subplot(2, 1, 1, sharex=ax1)
# librosa.display.specshow(librosa.amplitude_to_db(self.chromSync, ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time',x_coords=self.onset_times_graph, cmap=cmap)
# plt.title('Spectre syncronisé')
#
# plt.subplot(2, 1, 2, sharex=ax1)
# librosa.display.specshow(librosa.amplitude_to_db(self.chromSyncSimpl, ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
# plt.title('Simplifié')
# plt.tight_layout()
######
# librosa.display.specshow(librosa.amplitude_to_db(self.chromSyncSimpl, ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
######
fig, ax = plt.subplots(figsize=(13, 7.5))
img = librosa.display.specshow(librosa.amplitude_to_db(self.chromSyncSimpl, ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time',x_coords=self.onset_times_graph, cmap=cmap)
# plt.title('Synchronised spectrum, β = {}, delay τ = {} s'.format(params.margin, T_att))
plt.title('Partial detection on synchronised spectrum, β = {}, with delay, δ = {}'.format(params.margin, params.δ))
for t in self.onset_times_graph:
ax.axvline(t, color = 'k',alpha=0.5, ls='--')
plt.axis('tight')
plt.tight_layout()
plt.show()
#Plot les spectrogrammes
if params.plot_chromDescr:
#Construction de la liste des descripteurs avec Chrom
spaceChrom = []
for descr in space:
if descr in ['concordance','concordance3','concordanceTot','roughness','crossConcordance','crossConcordanceTot','harmonicChange','diffConcordance']: spaceChrom.append(descr)
dimChrom = len(space)
times_plotChromDyn = [self.onset_times_graph[0]] + [t-0.25 for t in self.onset_times_graph[2:self.n_frames-1]] + [t+0.25 for t in self.onset_times_graph[2:self.n_frames-1]] + [self.onset_times_graph[self.n_frames]]
times_plotChromDyn.sort()
plt.figure(5,figsize=(13, 7.5))
# Partition
if params.plot_score & (len(self.score)!=0):
#plt.subplot(dim+1+s,1,s)
img=mpimg.imread(self.score)
score = plt.subplot(dimChrom+1,1,1)
plt.axis('off')
score.imshow(img)
plt.title(title +' '+instrument)
else:
ax1 = plt.subplot(dimChrom+1,1,1)
librosa.display.specshow(self.ChromDB, bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.times, cmap=cmap)
plt.title(title +' '+instrument)
for k, descr in enumerate(spaceChrom):
if (k==0) & params.plot_score & (len(self.score)!=0):
ax1 = plt.subplot(dimChrom+1,1,2)
else: plt.subplot(dimChrom+1, 1, k+2, sharex=ax1)
# Descripteurs statiques
if len(getattr(self, descr)) == self.n_frames:
if descr in ['roughness']:
librosa.display.specshow(getattr(self, 'chrom_'+descr), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
else:
librosa.display.specshow(librosa.amplitude_to_db(getattr(self, 'chrom_'+descr), ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
# Descripteurs dynamiques
else:
Max = np.amax(getattr(self, 'chrom_'+descr)[:,1:self.n_frames-2])
librosa.display.specshow(librosa.amplitude_to_db(np.insert(getattr(self, 'chrom_'+descr)[:,1:self.n_frames-2]/Max, range(self.n_frames-2),1, axis=1), ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time',x_coords=np.asarray(times_plotChromDyn), cmap=cmap)
plt.title('Spectre de ' + descr)
plt.tight_layout()
#Plot les descripteurs harmoniques
if params.plot_descr:
dim = len(space)
fig = plt.figure(6,figsize=(13, 7.5))
# Partition
if params.plot_score & (len(self.score)!=0):
#plt.subplot(dim+1+s,1,s)
img=mpimg.imread(self.score)
score = plt.subplot(dim+1,1,1)
plt.axis('off')
score.imshow(img)
plt.title(subTitle)
else:
ax1 = plt.subplot(dim+1,1,1)
librosa.display.specshow(self.ChromDB, bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.times,cmap=cmap)
# plt.title(title +' '+ instrument)
for k, descr in enumerate(space):
if (k==0) & params.plot_score & (len(self.score)!=0):
ax1 = plt.subplot(dim+1,1,2)
ax1.get_xaxis().set_visible(False)
else:
ax = plt.subplot(dim+1, 1, k+2, sharex=ax1)
ax.get_xaxis().set_visible(False)
# Je remplace les valeurs nan par 0
for i,val in enumerate(getattr(self,descr)):
if np.isnan(val): getattr(self,descr)[i] = 0
if len(getattr(self, descr)) == self.n_frames:
plt.vlines(self.onset_times_graph[1:self.n_frames], min(getattr(self, descr)), max(getattr(self, descr)[1:(self.n_frames-1)]), color='k', alpha=0.9, linestyle='--')
else:
plt.vlines(self.onset_times_graph[1:self.n_frames-1], min(getattr(self, descr)), max(getattr(self, descr)[1:(self.n_frames-1)]), color='k', alpha=0.9, linestyle='--')
plt.xlim(self.onset_times_graph[0],self.onset_times_graph[-1])
if not all(x>=0 for x in getattr(self, descr)[1:(self.n_frames-1)]):
plt.hlines(0,self.onset_times_graph[0], self.onset_times_graph[self.n_frames], alpha=0.5, linestyle = ':')
# Legend
context = ''
norm = ''
if params.plot_norm and (descr in params.dic_norm ): norm = '\n' + params.dic_norm[descr]
if descr in ['harmonicNovelty', 'harmonicityContext','roughnessContext','diffConcordanceContext','diffRoughnessContext'] :
if params.memory_size>=2: context = '\n' + 'Memory: {} chords, decr = {}'.format(params.memory_size, params.memory_decr_ponderation)
else: context = '\n' + 'Memory: {} chord, decr = {}'.format(params.memory_size, params.memory_decr_ponderation)
# Descripteurs statiques
if len(getattr(self, descr)) == self.n_frames:
plt.hlines(getattr(self, descr)[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames],color=['b','r','g','c','m','y','b','r','g'][k] , label=descr[0].upper() + descr[1:] + norm + context)
# Descripteurs dynamiques
elif len(getattr(self, descr)) == (self.n_frames-1):
if descr == 'diffRoughnessContext': plt.plot(self.onset_times_graph[2:(self.n_frames-1)], getattr(self, descr)[1:(self.n_frames-2)],['b','r','g','c','m','y','b','r','g'][k]+'o', label='DiffRoughness' + norm)
else: plt.plot(self.onset_times_graph[2:(self.n_frames-1)], getattr(self, descr)[1:(self.n_frames-2)],['b','r','g','c','m','y','b','r','g'][k]+'o', label=(descr[0].upper() + descr[1:]) + norm)
plt.hlines(getattr(self, descr)[1:(self.n_frames-2)], [t-0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], [t+0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], color=['b','r','g','c','m','y','b','r','g'][k], alpha=0.9, linestyle=':' )
# plt.plot(self.onset_times_graph[2:(self.n_frames-1)], [0.35, 0.21, 0.34, 0.23],['b','r','g','c','m','y','b','r','g'][1]+'o', label = 'Octave up')#label=(descr[0].upper() + descr[1:]) + norm + context)
# plt.hlines([0.35, 0.21, 0.34, 0.23], [t-0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], [t+0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], color=['b','r','g','c','m','y','b','r','g'][1], alpha=0.9, linestyle=':' )
# plt.hlines([0.61,0.47,0.59, 0.49], [t-0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], [t+0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], color=['b','r','g','c','m','y','b','r','g'][2], alpha=0.9, linestyle=':' )
# plt.plot(self.onset_times_graph[2:(self.n_frames-1)], [0.61,0.47,0.59, 0.49],['b','r','g','c','m','y','b','r','g'][2]+'o',label = 'Fourth down')#label=(descr[0].upper() + descr[1:]) + norm + context)
# plt.title('DiffRoughness, normalised')
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
#Plot descriptogramme + valeur numérique
if params.plot_OneDescr:
descr = space[0]
plt.figure(7,figsize=(10, 7.5))
############################
# Partition
if params.plot_score & (len(self.score)!=0):
img=mpimg.imread(self.score)
score = plt.subplot(3,1,1)
plt.axis('off')
score.imshow(img)
p = 1
else: p=0
ax1 = plt.subplot(p+2,1,p+1)
# Descripteurs statiques
if len(getattr(self, descr)) == self.n_frames:
if descr in ['concordanceTot', 'concordance3','roughness']:
librosa.display.specshow(getattr(self, 'chrom_'+descr), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
elif descr == 'harmonicity':
librosa.display.specshow(np.power(getattr(self, 'chrom_'+descr),4)[0:4*BINS_PER_OCTAVE], bins_per_octave=BINS_PER_OCTAVE, fmin=f_corr_min, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap)
else:
librosa.display.specshow(librosa.amplitude_to_db(getattr(self, 'chrom_'+descr)[0:int(5*self.n_bins/6),:], ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time', x_coords=self.onset_times_graph, cmap=cmap,sr = self.sr)
# Descripteurs dynamiques
else:
times_plotChromDyn = [self.onset_times_graph[0]] + [t-0.75 for t in self.onset_times_graph[2:self.n_frames-1]] + [t+0.75 for t in self.onset_times_graph[2:self.n_frames-1]] + [self.onset_times_graph[self.n_frames]]
times_plotChromDyn.sort()
Max = np.amax(getattr(self, 'chrom_'+descr)[:,1:self.n_frames-2])
if descr == 'diffRoughnessContext':
librosa.display.specshow(np.insert(getattr(self, 'chrom_'+descr)[:,1:self.n_frames-2]/Max, range(self.n_frames-2),1, axis=1), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time',x_coords=np.asarray(times_plotChromDyn),cmap=cmap)
else:
librosa.display.specshow(librosa.amplitude_to_db(np.insert(getattr(self, 'chrom_'+descr)[:,1:self.n_frames-2]/Max, range(self.n_frames-2),1, axis=1), ref=np.max), bins_per_octave=BINS_PER_OCTAVE, fmin=self.fmin, y_axis='cqt_note', x_axis='time',x_coords=np.asarray(times_plotChromDyn),cmap=cmap)
for t in self.onset_times_graph:
ax1.axvline(t, color = 'k',alpha=0.5, ls='--')
if descr == 'harmonicity':
plt.title('Virtual pitch spectrum')
else:
# plt.title('DiffRoughness Spectrum')
plt.title(descr[0].upper()+descr[1:]+' spectrum')
plt.xlim(self.onset_times_graph[0],self.onset_times_graph[-1])
ax1.get_xaxis().set_visible(False)
# Plot Descr
ax2 = plt.subplot(p+2, 1, p+2)
if len(getattr(self, descr)) == self.n_frames:
plt.vlines(self.onset_times_graph[1:self.n_frames], min(getattr(self, descr)), max(getattr(self, descr)), color='k', alpha=0.9, linestyle='--')
else:
plt.vlines(self.onset_times_graph[1:self.n_frames-1], min(getattr(self, descr)),max(getattr(self, descr)), color='k', alpha=0.9, linestyle='--')
if not all(x>=0 for x in getattr(self, descr)):
plt.hlines(0,self.onset_times_graph[0], self.onset_times_graph[self.n_frames], alpha=0.5, linestyle = ':')
# Legend
context = ''
norm = ''
par = ''
if params.plot_norm and (descr in params.dic_norm ): norm = '\n' + params.dic_norm[descr]
if descr in ['harmonicNovelty', 'harmonicityContext','roughnessContext','diffConcordanceContext','diffRoughnessContext'] :
if params.memory_size>=2: context = '\n' + 'Memory: {} chords, decr = {}'.format(params.memory_size, params.memory_decr_ponderation)
else: context = '\n' + 'Memory: {} chord'.format(params.memory_size)
if descr in ['harmonicity']:
par = '\n{} partials'.format(params.κ)
# Descripteurs statiques
if len(getattr(self, descr)) == self.n_frames:
plt.hlines(getattr(self, descr)[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][1], label= descr[0].upper() + descr[1:] + norm + context + par)
# Descripteurs dynamiques
elif len(getattr(self, descr)) == (self.n_frames-1):
plt.plot(self.onset_times_graph[2:(self.n_frames-1)], getattr(self, descr)[1:(self.n_frames-2)],['b','r','g','c','m','y','b','r','g'][0]+'o')
plt.hlines(getattr(self, descr)[1:(self.n_frames-2)], [t-0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], [t+0.5 for t in self.onset_times_graph[2:(self.n_frames-1)]], color=['b','r','g','c','m','y','b','r','g'][0], alpha=0.9, linestyle=':',label = descr[0].upper() + descr[1:] + norm + context)
plt.xlim(self.onset_times_graph[0],self.onset_times_graph[-1])
# plt.ylim(bottom=0)
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))
ax2.get_xaxis().set_visible(False)
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
#Plot représentations abstraites
if params.plot_abstr:
if len(space)==2 :
color = params.color_abstr
l1 = getattr(self, space[0])[1:len(getattr(self, space[0]))-1]
l2 = getattr(self, space[1])[1:len(getattr(self, space[1]))-1]
#Si un descripteur statique et un descripteur dynamique
if len(l1)<len(l2) : l2.pop(0)
elif len(l1)>len(l2) : l1.pop(0)
#Tronquage
if isinstance(end,int):
l1= l1[0:end]
l2= l2[0:end]
plt.figure(8)
ax = plt.subplot()
if params.link_abstr: plt.plot(l1, l2, color+'--')
plt.plot(l1, l2, color+'o')
for i in range(len(l1)):
ax.annotate(' {}'.format(i+1), (l1[i], l2[i]), color=params.color_abstr_numbers)
plt.xlabel(space[0][0].upper() + space[0][1:])
plt.ylabel(space[1][0].upper() + space[1][1:])
plt.title(title +' '+instrument + ' (' + space[0][0].upper() + space[0][1:] + ', ' + space[1][0].upper() + space[1][1:] + ')')
else:
color = params.color_abstr
l1 = getattr(self, space[0])[1:len(getattr(self, space[0]))-1]
l2 = getattr(self, space[1])[1:len(getattr(self, space[0]))-1]
l3 = getattr(self, space[2])[1:len(getattr(self, space[0]))-1]
fig = plt.figure(9)
ax = fig.add_subplot(111, projection='3d')
if params.link_abstr: plt.plot(l1, l2, l3, color+'--')
for i in range(len(l1)):
ax.scatter(l1[i], l2[i], l3[i], c=color, marker='o')
ax.text(l1[i], l2[i], l3[i], i+1, color=params.color_abstr_numbers)
ax.set_xlabel(space[0][0].upper() + space[0][1:])
ax.set_ylabel(space[1][0].upper() + space[1][1:])
ax.set_zlabel(space[2][0].upper() + space[2][1:])
ax.set_title(title +' '+instrument + ' (' + space[0][0].upper() + space[0][1:] + ', ' + space[1][0].upper() + space[1][1:] + ', ' + space[2][0].upper() + space[2][1:] + ')')
if params.plot_compParam:
# Représentation des nouveautés, comparaison des échelles de mémoire
with open ('nouv0', 'rb') as fp:
nouv0 = pickle.load(fp)
with open ('nouv1', 'rb') as fp:
nouv1 = pickle.load(fp)
with open ('nouv2', 'rb') as fp:
nouv2 = pickle.load(fp)
with open ('nouv3', 'rb') as fp:
nouv3 = pickle.load(fp)
with open ('nouv4', 'rb') as fp:
nouv4 = pickle.load(fp)
with open ('nouvFull', 'rb') as fp:
nouvFull = pickle.load(fp)
plt.figure(9,figsize=(13, 7))
img=mpimg.imread(self.score)
score = plt.subplot(2,1,1)
plt.axis('off')
score.imshow(img)
plt.title(title +' '+instrument)
plt.subplot(2, 1, 2)
plt.vlines(self.onset_times_graph[1:self.n_frames], 0, 1, color='k', alpha=0.9, linestyle='--')
plt.hlines(nouv0[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][2], label='Memory: 0 chord')
# plt.hlines(nouv1[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][3], label='Memory: 1 chord')
# plt.hlines(nouv2[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][4], label='Memory: 2 chords')
plt.hlines(nouv3[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][5], label='Memory: 3 chords')
# plt.hlines(nouv4[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][6], label='Memory: 4 chords')
plt.hlines(nouvFull[1:(self.n_frames-1)], self.onset_times_graph[1:(self.n_frames-1)], self.onset_times_graph[2:self.n_frames], color=['b','r','g','c','m','y','b','r','g'][1], label='Memory: All chords')
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
plt.show()
def Points(self, space = ['concordance', 'concordanceTot']):
L = []
for descr in space:
if isinstance(getattr(self,descr), list): L.append(getattr(self,descr)[1:-1])
else : L.append(getattr(self,descr).tolist()[1:-1])
# Si à la fois descripteurs statiques et dynamiques dans space, alors on réduit la longueur des listes de descripteurs statiques en considérant l'évolution du descripteur statique
T = min(map(len,L))
for i in range(len(space)):
if len(L[i])>T:
for t in range(len(L[i])-1):
L[i][t] = L[i][t+1] - L[i][t]
L[i].pop(-1)
Points = np.asarray(L)
return Points
def Sort(self, space = ['concordance']):
descr = space[0]
L = getattr(self,descr)[1:self.n_frames-1]
indices, L_sorted = zip(*sorted(enumerate(L), key=itemgetter(1), reverse=params.sorted_reverse))
if params.plot_sorted:
if params.sorted_reverse: croiss = 'décroissante'
else: croiss = 'croissante'
sorted_score = 'Exemples/'+ title +'-'+descr+'-score.png'
plt.figure(1,figsize=(13, 7))
| |
import torch
import librosa
import numpy as np
import mir_eval
import separation_model as md
import data_set as dts
from shutil import copyfile
import os
class AudioSeparator:
r"""Implements a framework for using a SeparationModel to produce separated source for all files in the
validation set and measure the separation performances in terme of signal to distortion ratio (SDR),
signal to interference ratio (SIR) and signal to artifact ratio (SAR).
"""
@classmethod
def default_config(cls):
r"""Get the required parameters for instantiating a AudioSeparator
The configuration parameters for the model and the AudioDataSet are saved in the model checkpoint. All we
need for instantiation is the path to the check point.
The path to the folder to use for saving the separated audio tracks is also exposed.
Returns:
dict containing the required parameters
"""
config = {
"checkpoint_path": "", # path to model checkpoint
"separated_audio_folder": "" # path to folder where to save the separated audio tracks.
}
return config
def __init__(self, data_set, model, config):
r"""Constructor. Receives the AudioDataSet and the Model and stores them as class members.
Note: The received data_set features should not be scaled or centered.
Args:
data_set (AudioDataSet): The data set with the mixtures to separate
model (SeparationModel): The separation model for performing separation
config (dict): Configuration dictionary with parameters for the model, dataset and self.
"""
self.config = config
self.data_set = data_set
# Normalize or standardize the features, to have them ready to use as model input
self.data_set.shift_and_scale(self.config["shift"], self.config["scaling"])
self.model = model
self.model.eval()
self.device = torch.device("cpu") if not self.config["use_gpu"] \
else torch.device("cuda:" + str(self.config["gpu_no"]))
@classmethod
def from_checkpoint(cls, config, which_data_set="test"):
r"""Instantiate an AudioSeparator from a model checkpoint.
Loads the model from its checkpoint.
The checkpoint also contains the configuration dictionary required to create the validation set related
to the set used to train the model.
Args:
config (dict): Configuration dictionary with the parameters in defined in 'default_config()'
which_data_set (str): Identifier of the set type for the 'split' method of the AudiodataSet. 'train',
'test' or 'val'
Returns:
AudioSeparator using the model loaded from the checkpoint path in 'config'
"""
# Load the checkpoint
filename = config["checkpoint_path"]
if not os.path.isfile(filename):
raise ValueError("File " + filename + " is not a valid file.")
print("Loading model ...'{}'".format(filename))
state = torch.load(filename, 'cpu')
# Get the configuration paramters used during the training of the model.
train_config = state["config"]
# Update those parameters with the AudioSeparator parameters.
train_config.update(config)
# Build the data set containing the audio to separate.
val_set = dts.find_data_set_class(train_config["data_set_type"]).split(train_config, which_data_set)
# Build the SeparationModel and load its parameters
model = md.SeparationModel(train_config, val_set.features_shape(), val_set.n_classes())
model.load_state_dict(state["model_state_dict"])
# Build the AudioSeparator
return cls(val_set, model, train_config)
def separate_spectrogram(self, masks, features, features_idx):
r"""Apply masks to models input features to generate a spectrogram for each audio source.
There are many ways to use separation masks to produce spectrograms for each sources in the input features.
This function does the following:
- Rescale the masks to the shape of the SeparationModel input
(this is only useful if the MaskModel in the SeparationModel does not preserve the shape of its input
with padding)
- Shift the features to [0, +inf[, apply the mask and shift back.
(This is because the features can have negative values, and we want a value of 0 in the mask to
correspond to the lowest possible energy)
- The previous step provides us with 'masked features': these features should correspond to separated
sources. The last step is to convert back these features (scaled and centered log-Mel-spectrogram,
PCEN, ...) to a 'spectrogram' representation that can be converted back to audio with Inverse STFT.
Note: It has be found experimentally that apply the masks at the 'features' level give worst results than
converting the masks to 'spectrogram' representation and applying them directly to the mixture
spectrogram, because converting the features back to the spectrogram scale often implies to take the
exponential of the fetures which amplifies a lot the noise.
The other processing is performed by 'separate_spectrogram_in_lin_scale()'.
Args:
masks (torch.Tensor): Shape: [n_class, ~freq, ~time]. The masks produced by the separation model.
features (torch.Tensor): Shape [channel, freq, time]. The input features to the separation model.
features_idx (int): index of the features in data_set.features
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features.shape[1], features.shape[2]),
mode='bilinear',
align_corners=False)
# Multiply each mask with the features (shape: [n_masks, channel, features.shape[0], features.shape[1]]
shift = features.abs().max()
spectrograms = masks * (features + shift) - shift
# Undo the feature scaling and centering
self.data_set.rescale_to_initial(spectrograms, self.config["shift"], self.config["scaling"])
# From Log Mel spectrogram or PCEN to STFT magnitude (energy spectrogram)
return self.data_set.features_to_stft_magnitudes(spectrograms.cpu().numpy(), features_idx)
def separate_spectrogram_in_lin_scale(self, masks, features_shape, mixture_spectrogram):
r"""Apply masks to the mixture spectrogram to generate spectrograms for each separated sources.
The masks received in argument have the shape of the output of the MaskModel. In this function,
these masks will first be converted to the shape of the mixture energy spectrogram (inverse Mel scaling)
and then be directly applied to the mixture spectrogram.
Args:
masks (torch.tensor): Shape: [n_class, ~freq, ~time] The masks produced by the separation model
features_shape (torch.tensor.shape): Shape of the input features to the separation model.
mixture_spectrogram (np.ndarray): shape: [Frequency, Time] Mixture spectrogram.
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features_shape[1], features_shape[2]),
mode='bilinear',
align_corners=False)
# If Mel spectrogram were used as features: reverse Mel-scaling
# Here we use the same inverse processing as in the implementation of
# <NAME> et al. "A joint-separation-classification model for sound event detection of weakly-labelled
# data"; In: CoRR abs/1711.03037 (2017). axXiv: 1711.03037 URL: http://arxiv.org/abs/1711.03037
if self.config['feature_type'] != 'spectrogram':
masks = np.asarray([np.transpose(
self.data_set.mel_filterbank / (np.sum(self.data_set.mel_filterbank, axis=0) + 1e-8)) @ mask.numpy()
for mask in masks.squeeze()])
# Apply the masks to the mixture spectrogram. Mask.shape: [n_sources, channel=1, Frequency, Time]
# mixture_spectrogram.shape: [Frequency, Time]
# output.shape: [n_sources, channel=1, Frequency, Time]
return masks * mixture_spectrogram
def spectrogram_to_audio(self, spectrogram, phase):
r"""Compute waveform from spectrogram using inverse short-time Fourier transform.
Wrapper to call the istft function from the AudioDataSet class that performs the ISTFT with the
parameters corresponding to the STFT.
Args:
spectrogram (np.ndarray): shape: [Frequency, Time]. Magnitude of STFT result
phase (np.ndarray): shape: [Frequency, Time]. Phase of STFT result
Returns:
audio waveform. (1D np.ndarray)
"""
return self.data_set.istft(spectrogram * phase)
def save_separated_audio(self, audios, filename):
r"""Save the audios tracks in audios, in a subfolder of self.config['separated_audio_folder'].
'audios' should be the sources separated by the SeparationModel for the audio mixture saved in 'filename'.
The separated tracks are saved in a folder with the same name than their corresponding mixture.
The mixture is also copied inside the folder.
Args:
audios (np.ndarray): shape: [n_sources, time]. Audio waveforms of the separated sources
filename (str): Name of the file containing the audio mixture.
"""
# Create folder with mixture name
folder_path = os.path.join(self.config["separated_audio_folder"], os.path.splitext(filename)[0])
os.makedirs(folder_path)
# Save each separated source
for class_idx, audio in enumerate(audios):
librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',
audio.T,
sr=self.data_set.config["sampling_rate"])
# Also copy the mixture in the folder
copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, "original_mix.wav"))
def separate(self, separation_method='in_lin'):
r"""Run separation with self.model for all the files in self.data_set and save the separated sources.
Args:
separation_method (str): Identifier to chose between methods for applying the masks. Chose between
separate at the feature level ('separate_spectrogram') or at the energy
spectrogram level ('separate_spectrogram_in_lin').
Advised: 'in_lin'
"""
# Check if the output folder exists, if not creates it, otherwise inform user and stop execution
if not os.path.exists(self.config["separated_audio_folder"]):
os.makedirs(self.config["separated_audio_folder"])
else:
if os.listdir(self.config["separated_audio_folder"]): # if folder is not empty
raise ValueError('Output folders already exist | |
tegar_ID 1 1 6 1 1 4 1 2 \
1 5 8 1 tegar_ID 3 2 3 2 3 2 3 2 1 1 4 9 7 1 1 tegar_ID 1 te\
gar_ID 3 1 tegar_ID 1 3 2 6 1 3 2 1 2 tegar_ID 1 1 4 9 7 1 1 t\
egar_ID 1 tegar_ID 3 1 tegar_ID 1 1 tegar_ID 1 tegar_ID 1 1 2 teg\
ar_ID 9 9 1 tegar_ID 1 1 1 2 1 1 6 3 2 7 8 9 7 1 tegar_ID 9 1\
tegar_ID 1 6 9 1 1 4 1 1 4 1 1 1 1 1 4 5 8 1 tegar_ID 3 2 3 \
2 3 2 3 2 1 1 2 9 7 1 1 5 1 1 5 1 tegar_ID 1 tegar_ID 6 9 7 \
7 7 9 8 4 7 3 6 7 7 9 7 8 8 3 3 2 6 1 3 2 9 1 3 9 1 1 6 \
1 tegar_ID 1 1 tegar_ID 3 9 7 1 1 4 9 5 7 3 6 8 3 9 4 4 3 2 \
3 9 4 9 3 9 4 4 3 2 3 9 5 tegar_ID 3 9 4 4 3 2 3 9 5 1 3 9\
4 4 3 2 3 9 5 2 3 9 4 4 3 2 3 9 5 3 3 9 4 4 3 2 3 9 5 4\
3 9 4 4 3 2 3 9 5 5 3 9 4 4 3 2 3 9 5 6 3 9 4 4 3 2 3 9\
5 7 3 9 9 3 1 tegar_ID 7 7 6 5 8 8 9 5 8 3 8 4 8 2 9 5 7\
6 6 9 7 8 3 2 6 1 3 2 5 5 4 8 1 tegar_ID 1 tegar_ID 1 tegar\
_ID tegar_ID 1 tegar_ID 1 1 tegar_ID 2 3 2 1 1 4 1 1 7 1 1 tegar\
_ID 9 5 9 7 1 1 4 1 tegar_ID 3 1 1 2 9 7 1 1 4 1 1 5 1 tegar_\
ID 1 4 tegar_ID 4 1 5 8 1 tegar_ID 3 2 3 2 3 2 3 2 1 1 8 9 7\
1 1 4 9 5 9 7 1 1 4 1 tegar_ID 3 1 1 5 3 2 6 1 3 2 9 7 1 1\
4 1 tegar_ID 3 1 1 2 9 7 1 1 4 1 1 5 1 tegar_ID 1 4 6 6 5 1 \
1 4 1 tegar_ID 3 1 1 7 1 tegar_ID 9 1 tegar_ID 1 1 1 tegar_ID 1 \
1 6 8 tegar_ID 9 7 1 1 4 1 1 5 1 tegar_ID 1 1 1 4 4 tegar_ID 1\
tegar_ID tegar_ID 1 tegar_ID 1 1 1 5 9 9 1 1 4 1 tegar_ID 5 1 1\
2 1 1 6 1 tegar_ID 5 1 1 1 1 1 tegar_ID 6 1 3 9 9 2 1 1 tegar\
_ID 3 2 7 9 9 8 1 tegar_ID 2 1 1 7 1 1 5 9 9 9 7 1 1 6 1 teg\
ar_ID 1 3 2 1 2 1 1 1 1 1 1 7 1 1 4 3 2 1 1 2 1 2 1 1 1 6 1 \
tegar_ID 4 1 1 1 1 1 tegar_ID 3 2 1 1 5 9 9 1 1 4 1 tegar_ID 5 \
1 1 2 1 1 6 3 2 9 8 1 2 1 3 2 9 9 1 1 1 1 1 tegar_ID 1 1 8 \
1 tegar_ID 1 1 1 4 1 1 6 1 tegar_ID 5 1 1 tegar_ID 1 tegar_ID 3 \
3 2 9 7 1 1 tegar_ID 3 2 1 tegar_ID 5 1 1 tegar_ID 1 1 2 1 1 7 \
1 1 6 3 2 1 1 5 9 9 1 1 4 1 tegar_ID 5 1 1 2 1 1 6 3 2 1 1 6\
1 1 1 3 2 9 7 1 1 tegar_ID 3 2 1 1 1 1 1 7 1 1 6 1 1 2 1 1 \
7 1 1 6 3 2 1 1 5 9 9 1 1 4 1 tegar_ID 5 1 1 2 1 1 6 9 2 1 1\
tegar_ID 3 2 6 7 1 1 4 1 tegar_ID 1 9 7 1 1 6 1 tegar_ID 1 1 \
tegar_ID tegar_ID 3 2 6 6 1 2 1 3 2 5 8 3 2 8 4 1 tegar_ID 1 \
1 tegar_ID 3 9 7 1 1 4 4 5 7 3 6 8 3 9 4 1 1 tegar_ID 3 2 3 \
2 3 2 3 2 1 1 8 9 7 1 1 4 9 5 9 7 1 1 4 1 tegar_ID 3 1 1 5 \
4 6 9 7 1 tegar_ID tegar_ID 1 tegar_ID tegar_ID 9 5 9 7 1 1 4 1\
tegar_ID 3 1 1 7 1 tegar_ID 9 1 tegar_ID 1 1 1 tegar_ID 1 1 6 4\
tegar_ID 3 9 4 5 1 tegar_ID 5 3 9 4 4 3 2 3 9 4 5 4 5 1 teg\
ar_ID 5 1 1 tegar_ID 1 1 2 1 1 7 1 1 6 3 9 4 4 3 2 1 1 4 1 te\
gar_ID 1 1 1 3 1 1 7 1 tegar_ID 5 1 1 4 1 tegar_ID 1 1 tegar_ID \
tegar_ID 6 1 8 4 1 1 4 1 1 7 1 tegar_ID 1 4 4 3 2 1 tegar_ID 4\
1 tegar_ID 1 1 tegar_ID 8 1 1 2 6 1 3 9 1 tegar_ID 5 1 1 tegar\
_ID 1 1 2 1 1 7 1 1 6 3 2 1 1 2 1 2 1 1 1 6 1 tegar_ID 4 1 1 \
1 1 1 tegar_ID 3 2 1 1 5 9 9 1 1 4 1 tegar_ID 5 1 1 2 1 1 6 3\
2 1 1 tegar_ID 9 7 1 tegar_ID 9 1 tegar_ID 1 3 9 4 1 1 tegar_I\
D 3 2 3 2 3 2 3 2 1 1 8 9 7 1 1 4 9 5 9 7 1 1 4 1 tegar_ID \
3 1 1 5 4 6 9 7 1 tegar_ID tegar_ID 1 tegar_ID tegar_ID 9 5 9 7\
1 1 4 1 tegar_ID 3 1 1 7 1 tegar_ID 9 1 tegar_ID 1 1 1 tegar_ID\
1 1 6 4 tegar_ID 3 9 4 5 1 1 1 3 9 4 4 3 2 3 9 4 5 4 5 1 \
1 1 1 1 7 1 1 6 1 1 2 1 1 7 1 1 6 3 9 4 4 3 2 1 1 4 1 tegar_\
ID 1 1 1 3 1 1 7 1 tegar_ID 5 1 1 4 1 tegar_ID 1 1 tegar_ID tega\
r_ID 6 1 8 4 1 1 4 1 1 7 1 tegar_ID 1 4 4 3 2 1 tegar_ID 4 1 \
tegar_ID 1 1 tegar_ID 8 1 1 2 6 1 3 9 1 1 1 1 1 7 1 1 6 1 1 2 \
1 1 7 1 1 6 3 2 1 1 2 1 2 1 1 1 6 1 tegar_ID 4 1 1 1 1 1 tega\
r_ID 3 2 1 1 5 9 9 1 1 4 1 tegar_ID 5 1 1 2 1 1 6 3 2 1 1 teg\
ar_ID 9 7 1 tegar_ID 9 1 tegar_ID 1 3 9 4 1 1 tegar_ID 3 2 3 2\
3 2 3 2 1 1 4 1 tegar_ID 1 1 1 6 1 | |
#!/usr/bin/env python
# pyroute2 - ss2
# Copyright (C) 2018 <NAME>
#
# ss2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
import json
import socket
import re
import os
import argparse
from socket import (AF_INET,
AF_UNIX
)
try:
import psutil
except ImportError:
psutil = None
from pr2modules.netlink.diag import DiagSocket
from pr2modules.netlink.diag import (SS_ESTABLISHED,
SS_SYN_SENT,
SS_SYN_RECV,
SS_FIN_WAIT1,
SS_FIN_WAIT2,
SS_TIME_WAIT,
SS_CLOSE,
SS_CLOSE_WAIT,
SS_LAST_ACK,
SS_LISTEN,
SS_CLOSING,
SS_ALL,
SS_CONN)
from pr2modules.netlink.diag import (UDIAG_SHOW_NAME,
UDIAG_SHOW_VFS,
UDIAG_SHOW_PEER)
try:
from collections.abc import Mapping
from collections.abc import Callable
except ImportError:
from collections import Mapping
from collections import Callable
# UDIAG_SHOW_ICONS,
# UDIAG_SHOW_RQLEN,
# UDIAG_SHOW_MEMINFO
class UserCtxtMap(Mapping):
_data = {}
_sk_inode_re = re.compile(r"socket:\[(?P<ino>\d+)\]")
_proc_sk_fd_cast = "/proc/%d/fd/%d"
_BUILD_RECURS_PATH = ["inode",
"usr",
"pid",
"fd"]
def _parse_inode(self, sconn):
sk_path = self._proc_sk_fd_cast % (sconn.pid, sconn.fd)
inode = None
sk_inode_raw = os.readlink(sk_path)
inode = self._sk_inode_re.search(sk_inode_raw).group('ino')
if not inode:
raise RuntimeError("Unexpected kernel sk inode outline")
return inode
def __recurs_enter(self,
_sk_inode=None,
_sk_fd=None,
_usr=None,
_pid=None,
_ctxt=None,
_recurs_path=[]):
step = _recurs_path.pop(0)
if self._BUILD_RECURS_PATH[0] == step:
if _sk_inode not in self._data.keys():
self._data[_sk_inode] = {}
elif self._BUILD_RECURS_PATH[1] == step:
if _usr not in self._data[_sk_inode].keys():
self._data[_sk_inode][_usr] = {}
elif self._BUILD_RECURS_PATH[2] == step:
if _pid not in self._data[_sk_inode][_usr].keys():
self._data[_sk_inode][_usr].__setitem__(_pid, _ctxt)
elif self._BUILD_RECURS_PATH[3] == step:
self._data[_sk_inode][_usr][_pid]["fds"].append(_sk_fd)
# end recursion
return
else:
raise RuntimeError("Unexpected step in recursion")
# descend
self.__recurs_enter(_sk_inode=_sk_inode,
_sk_fd=_sk_fd,
_usr=_usr,
_pid=_pid,
_ctxt=_ctxt,
_recurs_path=_recurs_path)
def _enter_item(self, usr, flow, ctxt):
if not flow.pid:
# corner case of eg anonnymous AddressFamily.AF_UNIX
# sockets
return
sk_inode = int(self._parse_inode(flow))
sk_fd = flow.fd
recurs_path = list(self._BUILD_RECURS_PATH)
self.__recurs_enter(_sk_inode=sk_inode,
_sk_fd=sk_fd,
_usr=usr,
_pid=flow.pid,
_ctxt=ctxt,
_recurs_path=recurs_path)
def _build(self):
for flow in psutil.net_connections(kind="all"):
proc = psutil.Process(flow.pid)
usr = proc.username()
ctxt = {"cmd": proc.exe(),
"full_cmd": proc.cmdline(),
"fds": []}
self._enter_item(usr, flow, ctxt)
def __init__(self):
self._build()
def __getitem__(self, key):
return self._data[key]
def __len__(self):
return len(self._data.keys())
def __delitem__(self, key):
raise RuntimeError("Not implemented")
def __iter__(self):
raise RuntimeError("Not implemented")
class Protocol(Callable):
class Resolver:
@staticmethod
def getHost(ip):
try:
data = socket.gethostbyaddr(ip)
host = str(data[0])
return host
except Exception:
# gracefully
return None
def __init__(self, sk_states, fmt='json'):
self._states = sk_states
fmter = "_fmt_%s" % fmt
self._fmt = getattr(self, fmter, None)
def __call__(self, nl_diag_sk, args, usr_ctxt):
raise RuntimeError('not implemented')
def _fmt_json(self, refined_stats):
return json.dumps(refined_stats, indent=4)
class UNIX(Protocol):
def __init__(self, sk_states=SS_CONN, _fmt='json'):
super(UNIX, self).__init__(sk_states, fmt=_fmt)
def __call__(self, nl_diag_sk, args, usr_ctxt):
sstats = nl_diag_sk.get_sock_stats(states=self._states,
family=AF_UNIX,
show=(UDIAG_SHOW_NAME |
UDIAG_SHOW_VFS |
UDIAG_SHOW_PEER))
refined_stats = self._refine_diag_raw(sstats,
usr_ctxt)
printable = self._fmt(refined_stats)
print(printable)
def _refine_diag_raw(self, raw_stats, usr_ctxt):
refined = {'UNIX': {'flows': []}}
def vfs_cb(raw_val):
out = {}
out['inode'] = raw_val['udiag_vfs_ino']
out['dev'] = raw_val['udiag_vfs_dev']
return out
k_idx = 0
val_idx = 1
cb_idx = 1
idiag_attr_refine_map = {'UNIX_DIAG_NAME': ('path_name', None),
'UNIX_DIAG_VFS': ('vfs', vfs_cb),
'UNIX_DIAG_PEER': ('peer_inode', None),
'UNIX_DIAG_SHUTDOWN': ('shutdown', None)}
for raw_flow in raw_stats:
vessel = {}
vessel['inode'] = raw_flow['udiag_ino']
for attr in raw_flow['attrs']:
attr_k = attr[k_idx]
attr_val = attr[val_idx]
k = idiag_attr_refine_map[attr_k][k_idx]
cb = idiag_attr_refine_map[attr_k][cb_idx]
if cb:
attr_val = cb(attr_val)
vessel[k] = attr_val
refined['UNIX']['flows'].append(vessel)
if usr_ctxt:
for flow in refined['UNIX']['flows']:
try:
sk_inode = flow['inode']
flow['usr_ctxt'] = usr_ctxt[sk_inode]
except KeyError:
# might define sentinel val
pass
return refined
class TCP(Protocol):
INET_DIAG_MEMINFO = 1
INET_DIAG_INFO = 2
INET_DIAG_VEGASINFO = 3
INET_DIAG_CONG = 4
def __init__(self, sk_states=SS_CONN, _fmt='json'):
super(TCP, self).__init__(sk_states, fmt=_fmt)
IDIAG_EXT_FLAGS = [self.INET_DIAG_MEMINFO,
self.INET_DIAG_INFO,
self.INET_DIAG_VEGASINFO,
self.INET_DIAG_CONG]
self.ext_f = 0
for f in IDIAG_EXT_FLAGS:
self.ext_f |= (1 << (f - 1))
def __call__(self, nl_diag_sk, args, usr_ctxt):
sstats = nl_diag_sk.get_sock_stats(states=self._states,
family=AF_INET,
extensions=self.ext_f)
refined_stats = self._refine_diag_raw(sstats,
args.resolve,
usr_ctxt)
printable = self._fmt(refined_stats)
print(printable)
def _refine_diag_raw(self, raw_stats, do_resolve, usr_ctxt):
refined = {'TCP': {'flows': []}}
idiag_refine_map = {'src': 'idiag_src',
'dst': 'idiag_dst',
'src_port': 'idiag_sport',
'dst_port': 'idiag_dport',
'inode': 'idiag_inode',
'iface_idx': 'idiag_if',
'retrans': 'idiag_retrans'}
for raw_flow in raw_stats:
vessel = {}
for k1, k2 in idiag_refine_map.items():
vessel[k1] = raw_flow[k2]
for ext_bundle in raw_flow['attrs']:
vessel = self._refine_extension(vessel, ext_bundle)
refined['TCP']['flows'].append(vessel)
if usr_ctxt:
for flow in refined['TCP']['flows']:
try:
sk_inode = flow['inode']
flow['usr_ctxt'] = usr_ctxt[sk_inode]
except KeyError:
# might define sentinel val
pass
if do_resolve:
for flow in refined['TCP']['flows']:
src_host = Protocol.Resolver.getHost(flow['src'])
if src_host:
flow['src_host'] = src_host
dst_host = Protocol.Resolver.getHost(flow['dst'])
if dst_host:
flow['dst_host'] = dst_host
return refined
def _refine_extension(self, vessel, raw_ext):
k, content = raw_ext
ext_refine_map = {'meminfo': {'r': 'idiag_rmem',
'w': 'idiag_wmem',
'f': 'idiag_fmem',
't': 'idiag_tmem'}}
if k == 'INET_DIAG_MEMINFO':
mem_k = 'meminfo'
vessel[mem_k] = {}
for k1, k2 in ext_refine_map[mem_k].items():
vessel[mem_k][k1] = content[k2]
elif k == 'INET_DIAG_CONG':
vessel['cong_algo'] = content
elif k == 'INET_DIAG_INFO':
vessel = self._refine_tcp_info(vessel, content)
elif k == 'INET_DIAG_SHUTDOWN':
pass
return vessel
# interim approach
# tcpinfo call backs
class InfoCbCore:
# normalizer
@staticmethod
def rto_n_cb(key, value, **ctx):
out = None
if value != 3000000:
out = value / 1000.0
return out
@staticmethod
def generic_1k_n_cb(key, value, **ctx):
return value / 1000.0
# predicates
@staticmethod
def snd_thresh_p_cb(key, value, **ctx):
if value < 0xFFFF:
return value
return None
@staticmethod
def rtt_p_cb(key, value, **ctx):
tcp_info_raw = ctx['raw']
try:
if tcp_info_raw['tcpv_enabled'] != 0 and \
tcp_info_raw['tcpv_rtt'] != 0x7fffffff:
return tcp_info_raw['tcpv_rtt']
except KeyError:
# ill practice, yet except quicker path
pass
return tcp_info_raw['tcpi_rtt'] / 1000.0
# converter
@staticmethod
def state_c_cb(key, value, **ctx):
state_str_map = {SS_ESTABLISHED: "established",
SS_SYN_SENT: "syn-sent",
SS_SYN_RECV: "syn-recv",
SS_FIN_WAIT1: "fin-wait-1",
SS_FIN_WAIT2: "fin-wait-2",
SS_TIME_WAIT: "time-wait",
SS_CLOSE: "unconnected",
SS_CLOSE_WAIT: "close-wait",
SS_LAST_ACK: "last-ack",
SS_LISTEN: "listening",
SS_CLOSING: "closing"}
return state_str_map[value]
@staticmethod
def opts_c_cb(key, value, **ctx):
tcp_info_raw = ctx['raw']
# tcp_info opt flags
TCPI_OPT_TIMESTAMPS = 1
TCPI_OPT_SACK = 2
TCPI_OPT_ECN = 8
out = []
opts = tcp_info_raw['tcpi_options']
if (opts & TCPI_OPT_TIMESTAMPS):
out.append("ts")
if (opts & TCPI_OPT_SACK):
out.append("sack")
if (opts & TCPI_OPT_ECN):
out.append("ecn")
return out
def _refine_tcp_info(self, vessel, tcp_info_raw):
ti = TCP.InfoCbCore
info_refine_tabl = {'tcpi_state': ('state', ti.state_c_cb),
'tcpi_pmtu': ('pmtu', None),
'tcpi_retrans': ('retrans', None),
'tcpi_ato': ('ato', ti.generic_1k_n_cb),
'tcpi_rto': ('rto', ti.rto_n_cb),
# TODO consider wscale baking
'tcpi_snd_wscale': ('snd_wscale', None),
'tcpi_rcv_wscale': ('rcv_wscale', None),
# TODO bps baking
'tcpi_snd_mss': ('snd_mss', None),
'tcpi_snd_cwnd': ('snd_cwnd', None),
'tcpi_snd_ssthresh': ('snd_ssthresh',
ti.snd_thresh_p_cb),
# TODO consider rtt agglomeration - needs nesting
'tcpi_rtt': ('rtt', ti.rtt_p_cb),
'tcpi_rttvar': ('rttvar', ti.generic_1k_n_cb),
'tcpi_rcv_rtt': ('rcv_rtt', ti.generic_1k_n_cb),
'tcpi_rcv_space': ('rcv_space', None),
'tcpi_options': ('opts', ti.opts_c_cb),
# unclear, NB not in use by iproute2 ss latest
'tcpi_last_data_sent': ('last_data_sent', None),
'tcpi_rcv_ssthresh': ('rcv_ssthresh', None),
'tcpi_rcv_ssthresh': ('rcv_ssthresh', None),
'tcpi_segs_in': ('segs_in', None),
'tcpi_segs_out': ('segs_out', None),
'tcpi_data_segs_in': ('data_segs_in', None),
'tcpi_data_segs_out': ('data_segs_out', None),
'tcpi_lost': ('lost', None),
'tcpi_notsent_bytes': ('notsent_bytes', None),
'tcpi_rcv_mss': ('rcv_mss', None),
'tcpi_pacing_rate': ('pacing_rate', None),
'tcpi_retransmits': ('retransmits', None),
'tcpi_min_rtt': ('min_rtt', None),
'tcpi_rwnd_limited': ('rwnd_limited', None),
'tcpi_max_pacing_rate': ('max_pacing_rate', None),
'tcpi_probes': ('probes', None),
'tcpi_reordering': ('reordering', None),
'tcpi_last_data_recv': ('last_data_recv', None),
'tcpi_bytes_received': ('bytes_received', None),
'tcpi_fackets': ('fackets', None),
'tcpi_last_ack_recv': ('last_ack_recv', None),
'tcpi_last_ack_sent': ('last_ack_sent', None),
'tcpi_unacked': ('unacked', None),
'tcpi_sacked': ('sacked', None),
'tcpi_bytes_acked': ('bytes_acked', None),
'tcpi_delivery_rate_app_limited':
('delivery_rate_app_limited', None),
'tcpi_delivery_rate': ('delivery_rate', None),
'tcpi_sndbuf_limited': ('sndbuf_limited', None),
'tcpi_ca_state': ('ca_state', None),
'tcpi_busy_time': ('busy_time', None),
'tcpi_total_retrans': ('total_retrans', None),
'tcpi_advmss': ('advmss', None),
'tcpi_backoff': (None, None),
'tcpv_enabled': (None, 'skip'),
'tcpv_rttcnt': (None, 'skip'),
'tcpv_rtt': (None, 'skip'),
'tcpv_minrtt': (None, 'skip'),
# BBR
'bbr_bw_lo': ('bbr_bw_lo', None),
'bbr_bw_hi': ('bbr_bw_hi', None),
'bbr_min_rtt': ('bbr_min_rtt', None),
'bbr_pacing_gain': ('bbr_pacing_gain', None),
'bbr_cwnd_gain': ('bbr_cwnd_gain', None),
# DCTCP
'dctcp_enabled': ('dctcp_enabled', None),
'dctcp_ce_state': ('dctcp_ce_state', None),
'dctcp_alpha': ('dctcp_alpha', None),
'dctcp_ab_ecn': ('dctcp_ab_ecn', None),
'dctcp_ab_tot': ('dctcp_ab_tot', None)}
k_idx = 0
cb_idx = 1
info_k = 'tcp_info'
vessel[info_k] = {}
# BUG - pyroute2 diag - seems always last info instance from kernel
if type(tcp_info_raw) != str:
for k, v in tcp_info_raw.items():
if k not in info_refine_tabl:
continue
refined_k = info_refine_tabl[k][k_idx]
cb = info_refine_tabl[k][cb_idx]
refined_v = v
if cb and cb == 'skip':
continue
elif cb:
ctx = {'raw': tcp_info_raw}
refined_v = cb(k, v, **ctx)
vessel[info_k][refined_k] = refined_v
return vessel
def prepare_args():
parser = argparse.ArgumentParser(description="""
ss2 - socket statistics depictor meant as
a complete and convenient surrogate for
iproute2/misc/ss2""")
parser.add_argument('-x', '--unix',
help='Display Unix domain sockets.',
action='store_true')
parser.add_argument('-t', '--tcp',
help='Display TCP sockets.',
action='store_true')
parser.add_argument('-l', '--listen',
help='Display listening sockets.',
action='store_true')
parser.add_argument('-a', '--all',
help='Display all sockets.',
action='store_true')
parser.add_argument('-p', '--process',
help='show socket holding context',
action='store_true')
parser.add_argument('-r', '--resolve',
help='resolve host names in addition',
action='store_true')
| |
from __future__ import print_function
import os
import re
import sys
#from talibrt import abstract
# FIXME: initialize once, then shutdown at the end, rather than each call?
# FIXME: should we pass startIdx and endIdx into function?
# FIXME: don't return number of elements since it always equals allocation?
functions = []
include_paths = ['/usr/include', '/usr/local/include', '/opt/include', '/opt/local/include']
if sys.platform == 'win32':
include_paths = [r'c:\ta-lib-rt\c\include']
header_found = False
for path in include_paths:
ta_func_header = os.path.join(path, 'ta-lib-rt', 'ta_func.h')
if os.path.exists(ta_func_header):
header_found = True
break
if not header_found:
print('Error: ta-lib-rt/ta_func.h not found', file=sys.stderr)
sys.exit(1)
with open(ta_func_header) as f:
tmp = []
for line in f:
line = line.strip()
if tmp or \
line.startswith('TA_LIB_API TA_RetCode TA_') or \
line.startswith('TA_LIB_API int TA_'):
line = re.sub('/\*[^\*]+\*/', '', line) # strip comments
tmp.append(line)
if not line:
s = ' '.join(tmp)
s = re.sub('\s+', ' ', s)
functions.append(s)
tmp = []
# strip "float" functions
functions = [s for s in functions if not s.startswith('TA_LIB_API TA_RetCode TA_S_')]
# strip non-indicators
functions = [s for s in functions if not s.startswith('TA_RetCode TA_Set')]
functions = [s for s in functions if not s.startswith('TA_RetCode TA_Restore')]
functions = [s for s in functions if "_State("]
# print headers
print("""\
from cpython.ref cimport PyObject
cimport numpy as np
from numpy import nan
from cython import boundscheck, wraparound
#from cpython.pycapsule cimport PyCapsule_New, PyCapsule_IsValid, PyCapsule_GetPointer, PyCapsule_Destructor
# _ta_check_success: defined in _common.pxi
from enum import Enum
class TALibResult(Enum):
OK = 0
LIB_NOT_INITIALIZED = 1
BAD_PARAM = 2
ALLOC_ERR = 3
GROUP_NOT_FOUND = 4
FUNC_NOT_FOUND = 5
INVALID_HANDLE = 6
INVALID_PARAM_HOLDER = 7
INVALID_PARAM_HOLDER_TYPE = 8
INVALID_PARAM_FUNCTION = 9
INPUT_NOT_ALL_INITIALIZE = 10
OUTPUT_NOT_ALL_INITIALIZE = 11
OUT_OF_RANGE_START_INDEX = 12
OUT_OF_RANGE_END_INDEX = 13
INVALID_LIST_TYPE = 14
BAD_OBJECT = 15
NOT_SUPPORTED = 16
NEED_MORE_DATA = 17
IO_FAILED = 18
INTERNAL_ERROR = 5000
UNKNOWN_ERR = 65535
cdef double NaN = nan
cdef extern from "numpy/arrayobject.h":
int PyArray_TYPE(np.ndarray)
np.ndarray PyArray_EMPTY(int, np.npy_intp*, int, int)
int PyArray_FLAGS(np.ndarray)
np.ndarray PyArray_GETCONTIGUOUS(np.ndarray)
np.import_array() # Initialize the NumPy C API
cimport talibrt._ta_lib as lib
from talibrt._ta_lib cimport TA_RetCode
cdef np.ndarray check_array(np.ndarray real):
if PyArray_TYPE(real) != np.NPY_DOUBLE:
raise Exception("input array type is not double")
if real.ndim != 1:
raise Exception("input array has wrong dimensions")
if not (PyArray_FLAGS(real) & np.NPY_C_CONTIGUOUS):
real = PyArray_GETCONTIGUOUS(real)
return real
cdef np.npy_intp check_length2(np.ndarray a1, np.ndarray a2) except -1:
cdef:
np.npy_intp length
length = a1.shape[0]
if length != a2.shape[0]:
raise Exception("input array lengths are different")
return length
cdef np.npy_intp check_length3(np.ndarray a1, np.ndarray a2, np.ndarray a3) except -1:
cdef:
np.npy_intp length
length = a1.shape[0]
if length != a2.shape[0]:
raise Exception("input array lengths are different")
if length != a3.shape[0]:
raise Exception("input array lengths are different")
return length
cdef np.npy_intp check_length4(np.ndarray a1, np.ndarray a2, np.ndarray a3, np.ndarray a4) except -1:
cdef:
np.npy_intp length
length = a1.shape[0]
if length != a2.shape[0]:
raise Exception("input array lengths are different")
if length != a3.shape[0]:
raise Exception("input array lengths are different")
if length != a4.shape[0]:
raise Exception("input array lengths are different")
return length
cdef np.npy_int check_begidx1(np.npy_intp length, double* a1) except -1:
cdef:
double val
for i from 0 <= i < length:
val = a1[i]
if val != val:
continue
return i
else:
raise Exception("inputs are all NaN")
cdef np.npy_int check_begidx2(np.npy_intp length, double* a1, double* a2) except -1:
cdef:
double val
for i from 0 <= i < length:
val = a1[i]
if val != val:
continue
val = a2[i]
if val != val:
continue
return i
else:
raise Exception("inputs are all NaN")
cdef np.npy_int check_begidx3(np.npy_intp length, double* a1, double* a2, double* a3) except -1:
cdef:
double val
for i from 0 <= i < length:
val = a1[i]
if val != val:
continue
val = a2[i]
if val != val:
continue
val = a3[i]
if val != val:
continue
return i
else:
raise Exception("inputs are all NaN")
cdef np.npy_int check_begidx4(np.npy_intp length, double* a1, double* a2, double* a3, double* a4) except -1:
cdef:
double val
for i from 0 <= i < length:
val = a1[i]
if val != val:
continue
val = a2[i]
if val != val:
continue
val = a3[i]
if val != val:
continue
val = a4[i]
if val != val:
continue
return i
else:
raise Exception("inputs are all NaN")
cdef np.ndarray make_double_array(np.npy_intp length, int lookback):
cdef:
np.ndarray outreal
double* outreal_data
outreal = PyArray_EMPTY(1, &length, np.NPY_DOUBLE, np.NPY_DEFAULT)
outreal_data = <double*>outreal.data
for i from 0 <= i < min(lookback, length):
outreal_data[i] = NaN
return outreal
cdef np.ndarray make_int_array(np.npy_intp length, int lookback):
cdef:
np.ndarray outinteger
int* outinteger_data
outinteger = PyArray_EMPTY(1, &length, np.NPY_INT32, np.NPY_DEFAULT)
outinteger_data = <int*>outinteger.data
for i from 0 <= i < min(lookback, length):
outinteger_data[i] = 0
return outinteger
""")
# cleanup variable names to make them more pythonic
def cleanup(name):
if name.startswith('in'):
return name[2:].lower()
elif name.startswith('optIn'):
return name[5:].lower()
else:
return name.lower()
# print functions
names = []
for f in functions:
if 'Lookback' in f: # skip lookback functions
continue
state_init = '_StateInit' in f
state_free = '_StateFree' in f
state_init_or_free = state_init or state_free
state_save = '_StateSave' in f
state_load = '_StateLoad' in f
state_save_or_load = state_save or state_load
state_calc = '_State(' in f
state_batch = '_BatchState' in f
i = f.index('(')
name = f[:i].split()[2]
args = f[i:].split(',')
args = [re.sub('[\(\);]', '', s).strip() for s in args]
shortname = name[3:]
indicator_name = shortname.rsplit('_', 1)[0]
names.append(shortname)
#func_info = abstract.Function(shortname).info
#defaults, documentation = abstract._get_defaults_and_docs(func_info)
defaults = ""
documentation = ""
print('@wraparound(False) # turn off relative indexing from end of lists')
print('@boundscheck(False) # turn off bounds-checking for entire function')
print('def %s(' % shortname, end=' ')
docs = [' %s(' % shortname]
i = 0
for arg in args:
var = arg.split()[-1]
if var in ('startIdx', 'endIdx'):
continue
if '_state' in var:
if state_init or state_load:
continue
elif 'out' in var:
break
if i > 0:
print(',', end=' ')
i += 1
if '_state' in var:
if state_free or state_save or state_calc or state_batch:
print('size_t state', end=' ')
elif '_file' in var:
print('int hFile', end=' ')
elif var.endswith('[]'):
var = cleanup(var[:-2])
assert arg.startswith('const double'), arg
print('np.ndarray %s not None' % var, end=' ')
docs.append(var)
docs.append(', ')
elif state_calc:
var = cleanup(var)
assert arg.startswith('const double'), arg
print('double %s' % var, end=' ')
docs.append(var)
docs.append(', ')
elif not state_batch and var.startswith('opt'):
var = cleanup(var)
default_arg = arg.split()[-1][len('optIn'):] # chop off typedef and 'optIn'
default_arg = default_arg[0].lower() + default_arg[1:] # lowercase first letter
if arg.startswith('double'):
if default_arg in defaults:
print('double %s=%s' % (var, defaults[default_arg]), end=' ')
else:
print('double %s=-4e37' % var, end=' ') # TA_REAL_DEFAULT
elif arg.startswith('int'):
if default_arg in defaults:
print('int %s=%s' % (var, defaults[default_arg]), end=' ')
else:
print('int %s=-2**31' % var, end=' ') # TA_INTEGER_DEFAULT
elif arg.startswith('TA_MAType'):
print('int %s=0' % var, end=' ') # TA_MAType_SMA
else:
assert False, arg
if '[, ' not in docs:
docs[-1] = ('[, ')
docs.append('%s=?' % var)
docs.append(', ')
docs[-1] = '])' if '[, ' in docs else ')'
if documentation:
tmp_docs = []
lower_case = False
documentation = documentation.split('\n')[2:] # discard abstract calling definition
for line in documentation:
if 'prices' not in line and 'price' in line:
line = line.replace('price', 'real')
if not line or line.isspace():
tmp_docs.append('')
else:
tmp_docs.append(' %s' % line) # add an indent of 4 spaces
docs.append('\n\n')
docs.append('\n'.join(tmp_docs))
docs.append('\n ')
print('):')
if (state_init):
state_name = cleanup(args[0].split()[-1])
print(' cdef:')
print(' void * %s' % state_name)
print(' TA_RetCode retCode')
print(' retCode = lib.%s(&' % name, end=' ')
for i, arg in enumerate(args):
if i > 0:
print(',', end=' ')
var = arg.split()[-1]
var = cleanup(var)
print(var, end='')
print(')')
print(' _ta_check_success("%s", retCode)' % name)
print(' return TALibResult(retCode), <size_t>_state')
print('')
print('')
continue
if (state_free):
print(' cdef:')
print(' void * _state;')
print(' TA_RetCode retCode')
print(' _state = <void*>state')
print(' retCode = lib.%s( & _state )' % name)
print(' _ta_check_success("%s", retCode)' % name)
print(' return TALibResult(retCode)')
print('')
continue
if (state_load):
state_name = cleanup(args[0].split()[-1])
print(' cdef:')
print(' void * %s' % state_name)
print(' TA_RetCode retCode')
print(' retCode = lib.%s(&%s, <FILE *>hFile)' % (name, state_name))
print(' _ta_check_success("%s", retCode)' % name)
print(' return TALibResult(retCode), <size_t>%s' % state_name)
print('')
print('')
continue
if (state_save):
save_name = "TA_%s_StateSave" % indicator_name
print(' cdef:')
print(' void * _state;')
print(' TA_RetCode retCode')
print(' const char* name = "%s"' % indicator_name)
print(' _state = <void*> state')
print(' retCode = lib.%s( _state, <FILE | |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
class xena(Exchange):
def describe(self):
return self.deep_extend(super(xena, self).describe(), {
'id': 'xena',
'name': 'Xena Exchange',
'countries': ['VC', 'UK'],
'rateLimit': 100,
'certified': True,
'has': {
'CORS': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87489843-bb469280-c64c-11ea-91aa-69c6326506af.jpg',
'api': {
'public': 'https://trading.xena.exchange/api',
'private': 'https://api.xena.exchange',
},
'www': 'https://xena.exchange',
'doc': 'https://support.xena.exchange/support/solutions/44000808700',
'fees': 'https://trading.xena.exchange/en/platform-specification/fee-schedule',
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'api': {
'public': {
'get': [
'common/currencies',
'common/instruments',
'common/features',
'common/commissions',
'common/news',
'market-data/candles/{marketId}/{timeframe}',
'market-data/market-watch',
'market-data/dom/{symbol}',
'market-data/candles/{symbol}/{timeframe}',
'market-data/trades/{symbol}',
'market-data/server-time',
'market-data/v2/candles/{symbol}/{timeframe}',
'market-data/v2/trades/{symbol}',
'market-data/v2/dom/{symbol}/',
'market-data/v2/server-time',
],
},
'private': {
'get': [
'trading/accounts/{accountId}/order',
'trading/accounts/{accountId}/active-orders',
'trading/accounts/{accountId}/last-order-statuses',
'trading/accounts/{accountId}/positions',
'trading/accounts/{accountId}/positions-history',
'trading/accounts/{accountId}/margin-requirements',
'trading/accounts',
'trading/accounts/{accountId}/balance',
'trading/accounts/{accountId}/trade-history',
# 'trading/accounts/{accountId}/trade-history?symbol=BTC/USDT&client_order_id=EMBB8Veke&trade_id=220143254',
'transfers/accounts',
'transfers/accounts/{accountId}',
'transfers/accounts/{accountId}/deposit-address/{currency}',
'transfers/accounts/{accountId}/deposits',
'transfers/accounts/{accountId}/trusted-addresses',
'transfers/accounts/{accountId}/withdrawals',
'transfers/accounts/{accountId}/balance-history',
# 'transfers/accounts/{accountId}/balance-history?currency={currency}&from={time}&to={time}&kind={kind}&kind={kind}',
# 'transfers/accounts/{accountId}/balance-history?page={page}&limit={limit}',
# 'transfers/accounts/{accountId}/balance-history?txid=3e1db982c4eed2d6355e276c5bae01a52a27c9cef61574b0e8c67ee05fc26ccf',
],
'post': [
'trading/order/new',
'trading/order/heartbeat',
'trading/order/cancel',
'trading/order/mass-cancel',
'trading/order/replace',
'trading/position/maintenance',
'transfers/accounts/{accountId}/withdrawals',
'transfers/accounts/{accountId}/deposit-address/{currency}',
],
},
},
'fees': {
'trading': {
'maker': 0.0005,
'taker': 0.001,
'tierBased': True,
'percentage': True,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'exceptions': {
'exact': {
'Validation failed': BadRequest,
'Unknown derivative symbol': BadSymbol, # {"error":"Unknown derivative symbol"}
'Unknown account': BadRequest, # {"error":"Unknown account"}
'Wrong TransactTime': BadRequest, # {"error":"Wrong TransactTime"}
'ClOrdId is empty': BadRequest, # {"error":"ClOrdId is empty"}
},
'broad': {
'Invalid aggregation ratio or depth': BadRequest,
'address': InvalidAddress,
'Money not enough': InsufficientFunds,
'parse error': BadRequest,
'Not enough': InsufficientFunds, # {"error":"Not enough free margin"}
},
},
'options': {
'defaultType': 'margin', # 'margin',
'accountId': None, # '1012838157',
},
})
async def fetch_time(self, params={}):
response = await self.publicGetMarketDataV2ServerTime(params)
#
# {
# "msgType":"0",
# "transactTime":1594774454112817637
# }
#
transactTime = self.safe_integer(response, 'transactTime')
return int(transactTime / 1000000)
async def fetch_markets(self, params={}):
response = await self.publicGetCommonInstruments(params)
#
# [
# {
# "id":"ETHUSD_3M_250920",
# "type":"Margin",
# "marginType":"XenaFuture",
# "symbol":"ETHUSD_3M_250920",
# "baseCurrency":"ETH",
# "quoteCurrency":"USD",
# "settlCurrency":"BTC",
# "tickSize":2,
# "minOrderQuantity":"1",
# "orderQtyStep":"1",
# "limitOrderMaxDistance":"10",
# "priceInputMask":"0000.00",
# "enabled":true,
# "liquidationMaxDistance":"0.01",
# "contractValue":"1",
# "contractCurrency":"BTC",
# "lotSize":"1",
# "tickValue":"0.00000001", # linear contracts only
# "maxOrderQty":"175000",
# "maxPosVolume":"1750000",
# "mark":".ETHUSD_3M_250920",
# "underlying":".ETHUSD_TWAP",
# "openInterest":".ETHUSD_3M_250920_OpenInterest",
# "floatingPL":"BidAsk", # perpetual contracts only
# "addUvmToFreeMargin":"ProfitAndLoss",
# "margin":{
# "netting":"PositionsAndOrders",
# "rates":[
# {"maxVolume":"175000","initialRate":"0.05","maintenanceRate":"0.0125"},
# {"maxVolume":"350000","initialRate":"0.1","maintenanceRate":"0.025"},
# {"maxVolume":"500000","initialRate":"0.2","maintenanceRate":"0.05"},
# {"maxVolume":"750000","initialRate":"0.3","maintenanceRate":"0.075"},
# {"maxVolume":"1050000","initialRate":"0.4","maintenanceRate":"0.1"},
# {"maxVolume":"1400000","initialRate":"0.5","maintenanceRate":"0.125"},
# {"maxVolume":"1750000","initialRate":"1","maintenanceRate":"0.25"}
# ],
# "rateMultipliers":{
# "LimitBuy":"1",
# "LimitSell":"1",
# "Long":"1",
# "MarketBuy":"1",
# "MarketSell":"1",
# "Short":"1",
# "StopBuy":"0",
# "StopSell":"0"
# }
# },
# "clearing":{"enabled":true,"index":".ETHUSD_3M_250920"},
# "premium":{"enabled":true,"index":".XBTUSD_Premium_IR_Corrected"}, # perpetual contracts only
# "riskAdjustment":{"enabled":true,"index":".RiskAdjustment_IR"},
# "expiration":{"enabled":true,"index":".ETHUSD_TWAP"}, # futures only
# "pricePrecision":3,
# "priceRange":{
# "enabled":true,
# "distance":"0.03",
# "movingBoundary":"0",
# "lowIndex":".ETHUSD_3M_250920_LOWRANGE",
# "highIndex":".ETHUSD_3M_250920_HIGHRANGE"
# },
# "priceLimits":{
# "enabled":true,
# "distance":"0.5",
# "movingBoundary":"0",
# "lowIndex":".ETHUSD_3M_250920_LOWLIMIT",
# "highIndex":".ETHUSD_3M_250920_HIGHLIMIT"
# },
# "inverse":true, # inverse contracts only
# "serie":"ETHUSD", # futures only
# "tradingStartDate":"2020-03-27 07:00:00",
# "expiryDate":"2020-09-25 08:00:00" # futures only
# },
# {
# "type":"Index",
# "symbol":".ETHUSD_Premium_IR_Corrected",
# "tickSize":6,
# "enabled":true,
# "basis":365
# },
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
type = self.safe_string_lower(market, 'type')
id = self.safe_string(market, 'symbol')
numericId = self.safe_string(market, 'id')
marginType = self.safe_string(market, 'marginType')
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = id
if type == 'margin':
if marginType == 'XenaFuture':
type = 'future'
elif marginType == 'XenaListedPerpetual':
type = 'swap'
symbol = base + '/' + quote
future = (type == 'future')
swap = (type == 'swap')
pricePrecision = self.safe_integer_2(market, 'tickSize', 'pricePrecision')
precision = {
'price': pricePrecision,
'amount': 0,
}
maxCost = self.safe_float(market, 'maxOrderQty')
minCost = self.safe_float(market, 'minOrderQuantity')
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': maxCost,
},
}
active = self.safe_value(market, 'enabled', False)
inverse = self.safe_value(market, 'inverse', False)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'numericId': numericId,
'active': active,
'type': type,
'spot': False,
'future': future,
'swap': swap,
'inverse': inverse,
'precision': precision,
'limits': limits,
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetCommonCurrencies(params)
#
# {
# "BAB": {
# "name":"BAB",
# "title":"Bitcoin ABC",
# "blockchain":{
# "name":"BAB",
# "title":"Bitcoin ABC",
# "deposit":{"confirmations":6},
# "withdraw":{"confirmations":1},
# "addressReuseAllowed":false,
# "view":{
# "uriTemplate":"bitcoinabc:%s?message=Xena Exchange",
# "recommendedFee":"0.00001",
# "transactionUrl":"https://blockchair.com/bitcoin-cash/transaction/${txId}",
# "walletUrl":"https://blockchair.com/bitcoin-cash/address/${walletId}"
# }
# },
# "precision":5,
# "withdraw":{"minAmount":"0.01","commission":"0.001"},
# "view":{
# "color":"#DC7C08",
# "site":"https://www.bitcoinabc.org"
# },
# "enabled":true
# },
# }
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = response[id]
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'title')
precision = self.safe_integer(currency, 'precision')
enabled = self.safe_value(currency, 'enabled')
active = (enabled is True)
withdraw = self.safe_value(currency, 'withdraw', {})
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': self.safe_float(withdraw, 'commission'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_float(withdraw, 'minAmount'),
'max': None,
},
},
}
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "symbol":".XBTUSD_3M_250920_MID",
# "firstPx":"9337.49",
# "lastPx":"9355.81",
# "highPx":"9579.42",
# "lowPx":"9157.63",
# "buyVolume":"0",
# "sellVolume":"0",
# "bid":"0",
# "ask":"0"
# }
#
timestamp = self.milliseconds()
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_float(ticker, 'lastPx')
open = self.safe_float(ticker, 'firstPx')
percentage = None
change = None
average = None
if (last is not None) and (open is not None):
change = last - open
average = self.sum(last, open) / 2
if open > 0:
percentage = change / open * 100
buyVolume = self.safe_float(ticker, 'buyVolume')
sellVolume = self.safe_float(ticker, 'sellVolume')
baseVolume = self.sum(buyVolume, sellVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPx'),
'low': self.safe_float(ticker, 'lowPx'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
tickers = await self.fetch_tickers(None, params)
if symbol in tickers:
return tickers[symbol]
raise BadSymbol(self.id + ' fetchTicker could not find a ticker with symbol ' + symbol)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetMarketDataMarketWatch(params)
#
# [
# {
# "symbol":".XBTUSD_3M_250920_MID",
# "firstPx":"9337.49",
# "lastPx":"9355.81",
# "highPx":"9579.42",
# "lowPx":"9157.63",
# "buyVolume":"0",
# "sellVolume":"0",
# "bid":"0",
# "ask":"0"
# }
# ]
#
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketDataV2DomSymbol(self.extend(request, params))
#
# {
# "msgType":"W",
# "mdStreamId":"DOM:XBTUSD:aggregated",
# "lastUpdateTime":1594772683037691997,
# "mdBookType":"2",
# "symbol":"XBTUSD",
# "lowRangePx":"9132.24",
# "highRangePx":"9410.36",
# "lowLimitPx":"9132.24",
# "highLimitPx":"9410.36",
# "clearingPx":"9253.4",
# "bestBid":"9269.8",
# "bestAsk":"9275.9",
# "mdEntry":[
# {"mdEntryType":"1","mdEntryPx":"9275.9","mdEntrySize":"3000","numberOfOrders":1},
# {"mdEntryType":"1","mdEntryPx":"9277.7","mdEntrySize":"50000","numberOfOrders":1},
# {"mdEntryType":"1","mdEntryPx":"9277.8","mdEntrySize":"2000","numberOfOrders":1},
# {"mdEntryType":"0","mdEntryPx":"9269.8","mdEntrySize":"2000","numberOfOrders":1},
# {"mdEntryType":"0","mdEntryPx":"9267.9","mdEntrySize":"3000","numberOfOrders":1},
# {"mdEntryType":"0","mdEntryPx":"9267.8","mdEntrySize":"50000","numberOfOrders":1},
# ]
# }
#
mdEntry = self.safe_value(response, 'mdEntry', [])
mdEntriesByType = self.group_by(mdEntry, 'mdEntryType')
lastUpdateTime = self.safe_integer(response, 'lastUpdateTime')
timestamp = int(lastUpdateTime / 1000000)
return self.parse_order_book(mdEntriesByType, timestamp, '0', '1', 'mdEntryPx', 'mdEntrySize')
async def fetch_accounts(self, params={}):
response = await self.privateGetTradingAccounts(params)
#
# {
# "accounts": [
# {"id":8273231, "kind": "Spot"},
# {"id":10012833469, "kind": "Margin", "currency": "BTC"}
# ]
# }
#
accounts = self.safe_value(response, 'accounts')
result = []
for i in range(0, len(accounts)):
account = accounts[i]
accountId = self.safe_string(account, 'id')
currencyId = self.safe_string(account, 'currency')
code = self.safe_currency_code(currencyId)
type = self.safe_string_lower(account, 'kind')
result.append({
'id': accountId,
'type': type,
'currency': code,
'info': | |
y_train_batch = iter(X_train).next()
except ValueError:
x_train_batch = iter(X_train).next()
y_train_batch = None
else:
x_train_batch = X_train[:batch_size]
y_train_batch = y_train[:batch_size] if y_train is not None else None
if isinstance(X_test, DataLoader):
assert y_test is None, (
"If `X_test` is of type torch.utils.data.DataLoader, `y_test` must be None. The dataloader must " +
"return values for X and y when iterating."
)
if X_test is not None:
try:
x_test_batch, y_test_batch = iter(X_test).next()
except ValueError:
x_test_batch = iter(X_test).next()
y_test_batch = None
else:
x_test_batch = X_test[:batch_size] if X_test is not None else None
y_test_batch = y_test[:batch_size] if y_test is not None else None
return x_train_batch, y_train_batch, x_test_batch, y_test_batch
def _assert_shapes(self, X_train, y_train, X_test, y_test):
assert len(X_train.shape) == 2 or len(X_train.shape) == 4, (
"X_train must be either have 2 or 4 shape dimensions. Given: {}.".format(X_train.shape) +
"Try to use X_train.reshape(-1, 1) or X_train.reshape(-1, 1, height, width)."
)
assert X_train.shape[1:] == self.x_dim, (
"Wrong input shape for adversary / encoder. Given: {}. Needed: {}.".format(X_train.shape, self.x_dim)
)
if X_test is not None:
assert X_train.shape[1:] == X_test.shape[1:], (
"X_train and X_test must have same dimensions. Given: {} and {}.".format(X_train.shape[1:], X_test.shape[1:])
)
def _create_steps(self, steps):
""" Creates the `self.steps` dictionary.
The created dictionary must be of the form {"Network1": steps1, "Network2": steps2, ...}. During training
"Network1" will be trained for `steps1` steps, and so on.
Functionality similar to `self_define_optimizers()`.
Parameters
----------
steps : None, dict
If not None a dictionary of the form {"Network1": steps1, "Network2": steps2, ...} is expected.
This dictionary might also be partially filled.
"""
steps = {}
for name, neural_net in self.neural_nets.items():
steps[name] = 1
if steps is not None:
assert isinstance(steps, dict), "steps parameter must be of type dict. Given: {}.".format(type(steps))
steps = steps
for name, _ in self.neural_nets.items():
if name not in steps:
steps[name] = 1
self._check_dict_keys(steps, where="_create_steps")
return steps
def _set_up_saver(self, print_every, save_model_every, save_images_every, save_losses_every, nr_batches):
""" Calculates saving indicators if strings are passed. Additionally corresponding folders are created
in `self.folder`.
Returns
-------
Returns all saving indicators as integers.
"""
print_every = self._string_to_batchnr(log_string=print_every, nr_batches=nr_batches, name="print_every")
if save_model_every is not None:
save_model_every = self._string_to_batchnr(log_string=save_model_every, nr_batches=nr_batches, name="save_model_every")
assert self.folder is not None, (
"`folder` argument in constructor was set to `None`. `save_model_every` must be None or `folder` needs to be specified."
)
os.mkdir(os.path.join(self.folder, "models/"))
if save_images_every is not None:
save_images_every = self._string_to_batchnr(log_string=save_images_every, nr_batches=nr_batches, name="save_images_every")
assert self.folder is not None, (
"`folder` argument in constructor was set to `None`. `save_images_every` must be None or `folder` needs to be specified."
)
os.mkdir(os.path.join(self.folder, "images/"))
save_losses_every = self._string_to_batchnr(log_string=save_losses_every, nr_batches=nr_batches, name="save_losses_every")
self.total_training_time = 0
self.current_timer = time.perf_counter()
self.batch_training_times = []
return print_every, save_model_every, save_images_every, save_losses_every
def _string_to_batchnr(self, log_string, nr_batches, name):
""" Transforms string of the form "0.2e" into 0.2 and performs basic sanity checks.
"""
if isinstance(log_string, str):
assert log_string.endswith("e"), "If `{}` is string, must end with 'e' (for epoch), e.g. 0.25e.".format(name)
save_epochs = float(log_string.split("e")[0])
log_string = max([int(save_epochs*nr_batches), 1])
return log_string
#########################################################################
# Actions during training
#########################################################################
def fit(self, X_train, X_test=None, epochs=5, batch_size=32, steps=None,
print_every="1e", save_model_every=None, save_images_every=None, save_losses_every="1e", enable_tensorboard=False):
""" Trains the model, iterating over all contained networks.
Parameters
----------
X_train : np.array or torch.utils.data.DataLoader
Training data for the generative network. Usually images.
X_test : np.array, optional
Testing data for the generative network. Must have same shape as X_train.
epochs : int, optional
Number of epochs (passes over the training data set) performed during training.
batch_size : int, optional
Batch size used when creating the data loader from X_train. Ignored if torch.utils.data.DataLoader is passed
for X_train.
steps : dict, optional
Dictionary with names of the networks to indicate how often they should be trained, i.e. {"Generator": 5} indicates
that the generator is trained 5 times while all other networks are trained once.
print_every : int, string, optional
Indicates after how many batches the losses for the train data should be printed to the console. Can also be a string
of the form "0.25e" (4 times per epoch), "1e" (once per epoch) or "3e" (every third epoch).
save_model_every : int, string, optional
Indicates after how many batches the model should be saved. Can also be a string
of the form "0.25e" (4 times per epoch), "1e" (once per epoch) or "3e" (every third epoch).
save_images_every : int, string, optional
Indicates after how many batches the images for the losses and fixed_noise should be saved. Can also be a string
of the form "0.25e" (4 times per epoch), "1e" (once per epoch) or "3e" (every third epoch).
save_losses_every : int, string, optional
Indicates after how many batches the losses for the train and test data should be calculated. Can also be a string
of the form "0.25e" (4 times per epoch), "1e" (once per epoch) or "3e" (every third epoch).
enable_tensorboard : bool, optional
Flag to indicate whether subdirectory folder/tensorboard should be created to log losses and images.
"""
if not self._init_run:
raise ValueError("Run initializer of the AbstractGenerativeModel class is your subclass!")
train_dataloader, test_dataloader, writer_train, writer_test, save_periods = self._set_up_training(
X_train, y_train=None, X_test=X_test, y_test=None, epochs=epochs, batch_size=batch_size, steps=steps,
print_every=print_every, save_model_every=save_model_every, save_images_every=save_images_every,
save_losses_every=save_losses_every, enable_tensorboard=enable_tensorboard
)
max_batches = len(train_dataloader)
test_x_batch = iter(test_dataloader).next().to(self.device).float() if X_test is not None else None
print_every, save_model_every, save_images_every, save_losses_every = save_periods
train_x_batch = iter(train_dataloader).next()
if len(train_x_batch) != batch_size:
raise ValueError(
"Return value from train_dataloader has wrong shape. Should return object of size batch_size. " +
"Did you pass a dataloader to `X_train` containing labels as well?"
)
self.train()
if save_images_every is not None:
self._log_images(images=self.generate(z=self.fixed_noise), step=0, writer=writer_train)
for epoch in range(epochs):
print("---"*20)
print("EPOCH:", epoch+1)
print("---"*20)
for batch, X in enumerate(train_dataloader):
batch += 1
step = epoch*max_batches + batch
X = X.to(self.device).float()
Z = self.sample(n=len(X))
for name, _ in self.neural_nets.items():
for _ in range(self.steps[name]):
self._losses = self.calculate_losses(X_batch=X, Z_batch=Z, who=name)
self._zero_grad(who=name)
self._backward(who=name)
self._step(who=name)
if print_every is not None and step % print_every == 0:
self._losses = self.calculate_losses(X_batch=X, Z_batch=Z)
self._summarise_batch(
batch=batch, max_batches=max_batches, epoch=epoch,
max_epochs=epochs, print_every=print_every
)
if save_model_every is not None and step % save_model_every == 0:
self.save(name="models/model_{}.torch".format(step))
if save_images_every is not None and step % save_images_every == 0:
self._log_images(images=self.generate(z=self.fixed_noise), step=step, writer=writer_train)
self._save_losses_plot()
if save_losses_every is not None and step % save_losses_every == 0:
self._log_losses(X_batch=X, Z_batch=Z, mode="Train")
if enable_tensorboard:
self._log_scalars(step=step, writer=writer_train)
if test_x_batch is not None:
self._log_losses(X_batch=test_x_batch, Z_batch=self.sample(n=len(test_x_batch)), mode="Test")
if enable_tensorboard:
self._log_scalars(step=step, writer=writer_test)
self.eval()
self._clean_up(writers=[writer_train, writer_test])
@abstractmethod
def calculate_losses(self, X_batch, Z_batch, who=None):
pass
def _calculate_feature_loss(self, X_real, X_fake):
""" Calculates feature loss if `self.feature_layer` is not None.
Every network takes the `feature_layer` argument in its constructor.
If it is not None, a layer of the discriminator / critic should be specified.
A feature loss will be calculated which is the MSE between the output for real
and fake samples of the specified `self.feature_layer`.
Parameters
----------
X_real : torch.Tensor
Real samples.
X_fake : torch.Tensor
Fake samples.
"""
X_real_features = self.feature_layer(X_real)
X_fake_features = self.feature_layer(X_fake)
feature_loss = MSELoss()(X_real_features, X_fake_features)
return feature_loss
def _zero_grad(self, who=None):
if who is not None:
self.optimizers[who].zero_grad()
else:
[optimizer.zero_grad() for _, optimizer in self.optimizers.items()]
def _backward(self, who=None):
assert len(self._losses) != 0, "'self._losses' empty when performing '_backward'."
if who is not None:
self._losses[who].backward(retain_graph=True)
else:
[loss.backward(retain_graph=True) for _, loss in self._losses.items()]
def _step(self, who=None):
if who is not None:
self.optimizers[who].step()
else:
[optimizer.step() for _, optimizer in self.optimizers.items()]
#########################################################################
# Logging during training
#########################################################################
def _summarise_batch(self, batch, max_batches, epoch, max_epochs, print_every):
""" Print summary statistics after a specified amount of batches.
Parameters
----------
batch : int
Current batch.
max_batches : int
Maximum number of total batches.
epoch : int
Current epoch.
max_epochs : int
Maximum number of total epochs.
print_every : int
After how many batches the summary should be printed.
"""
step = epoch*max_batches + batch
max_steps = max_epochs*max_batches
remaining_batches = max_epochs*max_batches - step
print("Step: {} / {} (Epoch: {} / {}, Batch: {} / {})".format(
step, max_steps, epoch+1, max_epochs, batch, max_batches)
)
print("---"*20)
for name, loss in self._losses.items():
print("{}: {}".format(name, loss.item()))
self.batch_training_times.append(time.perf_counter() - self.current_timer)
self.total_training_time = np.sum(self.batch_training_times)
time_per_batch = np.mean(self.batch_training_times) / print_every
print("\n")
print("Time left: ~{} minutes (Steps remaining: | |
"Setting hidden line removal"
if ax.getp('hidden'):
# turn hidden line removal on
self._script += "hidden on,...\n"
else:
# turn hidden line removal off
self._script += "hidden off,...\n"
def _set_colorbar(self, ax):
"""Add a colorbar to the axis."""
if DEBUG:
print "Setting colorbar"
cbar = ax.getp('colorbar')
if cbar.getp('visible'):
# turn on colorbar
cbar_title = cbar.getp('cbtitle')
cbar_location = cbar.getp('cblocation')
self._script += "colorbar('%s'),..." % cbar_location
# FIXME: what about the title?
else:
# turn off colorbar
pass
def _set_caxis(self, ax):
"""Set the color axis scale."""
if DEBUG:
print "Setting caxis"
if ax.getp('caxismode') == 'manual':
cmin, cmax = ax.getp('caxis')
# NOTE: cmin and cmax might be None:
if cmin is None or cmax is None:
#cmin, cmax = [0,1]
self._script += "caxis manual,...\n"
else:
# set color axis scaling according to cmin and cmax
self._script += "caxis([%s,%s]),...\n" % (cmin, cmax)
else:
# use autoranging for color axis scale
self._script += "caxis auto,...\n"
def _set_colormap(self, ax):
"""Set the colormap."""
if DEBUG:
print "Setting colormap"
cmap = ax.getp('colormap')
# cmap is plotting package dependent
if cmap is not None:
self._script += "colormap %s,...\n" % cmap
else:
pass #self._script += "colormap default,...\n"
def _set_view(self, ax):
"""Set viewpoint specification."""
if DEBUG:
print "Setting view"
cam = ax.getp('camera')
view = cam.getp('view')
if view == 2:
# setup a default 2D view
self._script += "view(2),...\n"
elif view == 3:
az = cam.getp('azimuth')
el = cam.getp('elevation')
if az is None or el is None:
# azimuth or elevation is not given. Set up a default
# 3D view (az=-37.5 and el=30 is the default 3D view in
# Matlab).
self._script += "view(3),...\n"
else:
# set a 3D view according to az and el
self._script += "view([%s,%s]),...\n" % (az,el)
if cam.getp('cammode') == 'manual':
# for advanced camera handling:
roll = cam.getp('camroll')
#if roll is not None:
# self._g.camroll(roll, nout=0)
zoom = cam.getp('camzoom')
#if zoom != 1: # FIXME: Is this the right way?
# self._g.camzoom(zoom, nout=0)
dolly = cam.getp('camdolly')
#if dolly != (0,0,0):
# self._g.camdolly(list(dolly), nout=0)
target = cam.getp('camtarget')
position = cam.getp('campos')
up_vector = cam.getp('camup')
view_angle = cam.getp('camva')
projection = cam.getp('camproj')
#self._axargs.extend(['CameraTarget', target,
# 'CameraPosition', position,
# 'CameraPosition', position,
# 'CamearUpVector', up_vector,
# 'CameraViewAngle', view_angle,
# 'Projection', projection])
def _set_axis_props(self, ax):
if DEBUG:
print "Setting axis properties"
self._set_title(ax)
self._set_scale(ax)
self._set_axis_method(ax) # should be called before _set_limits.
self._set_limits(ax)
self._set_position(ax)
self._set_daspect(ax)
self._set_coordinate_system(ax)
self._set_hidden_line_removal(ax)
self._set_colorbar(ax)
self._set_caxis(ax)
self._set_colormap(ax)
self._set_view(ax)
if ax.getp('visible'):
self._set_labels(ax)
self._set_box(ax)
self._set_grid(ax)
self._script += "axis on,...\n"
else:
# turn off all axis labeling, tickmarks, and background
self._script += "axis off,...\n"
def _get_linespecs(self, item):
"""
Return the line marker, line color, line style, and
line width of the item.
"""
marker = item.getp('linemarker')
color = item.getp('linecolor')
style = item.getp('linetype')
width = item.getp('linewidth')
return marker, color, style, width
def _add_line(self, item):
"""Add a 2D or 3D curve to the scene."""
if DEBUG:
print "Adding a line"
# get data:
x = item.getp('xdata')
y = item.getp('ydata')
z = item.getp('zdata')
# get line specifiactions:
marker, color, style, width = self._get_linespecs(item)
cmd = ""
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
if z is not None:
# zdata is given, add a 3D curve:
cmd += "z = %s;\n" % list(z)
cmd += "plot3(x,y,z"
else:
# no zdata, add a 2D curve:
cmd += "plot(x,y"
if color:
cmd += ",'Color','%s'" % color
if style:
cmd += ",'LineStyle','%s'" % style
if marker:
cmd += ",'Marker','%s'" % marker
if not style:
cmd += ",'LineStyle','none'"
if width:
cmd += ",'LineWidth',%g" % float(width)
cmd += ")\n"
self._script += cmd
def _add_bar_graph(self, item, shading='faceted'):
if DEBUG:
print "Adding a bar graph"
# get data:
x = item.getp('xdata')
y = item.getp('ydata')
# get line specifiactions:
marker, color, style, width = self._get_linespecs(item)
edgecolor = item.getp('edgecolor')
if not edgecolor:
edgecolor = 'k'
# FIXME: edgecolor should be ax.getp('fgcolor') as default
facecolor = item.getp('facecolor')
if not facecolor:
facecolor = color
cmd = ""
cmd += "x = %s;\n" % list(x)
if rank(y) == 2:
cmd += "y = %s;\n" % str(y.tolist()).replace('],', '];')
else:
cmd += "y = %s;\n" % list(y)
cmd += "bar(x,y"
barwidth = item.getp('barwidth')
if barwidth is not None:
cmd += ",%s" % barwidth
cmd += ",'grouped'"
if facecolor:
cmd += ",'FaceColor', '%s'" % facecolor
# FIXME: Color can also be a three-tuple [r,g,b]
if shading != 'faceted':
cmd += ",'EdgeColor', 'none'"
elif edgecolor:
cmd += ",'EdgeColor', '%s'" % edgecolor
cmd += ")\n"
self._script += cmd
barticks = item.getp('barticks')
if barticks is not None:
barticks = '; '.join(["'%s'" % s for s in barticks])
self._script += "set(gca, 'XTickLabel', [%s])\n" % barticks
if item.getp('rotated_barticks'):
pass
def _add_surface(self, item, shading='faceted'):
if DEBUG:
print "Adding a surface"
x = squeeze(item.getp('xdata')) # grid component in x-direction
y = squeeze(item.getp('ydata')) # grid component in y-direction
z = asarray(item.getp('zdata')) # scalar field
c = item.getp('cdata') # pseudocolor data (can be None)
cmd = ""
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(z) and shape(y) != shape(z)):
x,y = ndgrid(x,y,sparse=False)
if shape(x) != shape(z) and shape(y) != shape(z):
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
if item.getp('indexing') == 'ij':
cmd += "[X,Y] = ndgrid(x,y);\n"
else:
cmd += "[X,Y] = meshgrid(x,y);\n"
else:
cmd += "X = %s;\n" % str(x.tolist()).replace('],', '];')
cmd += "Y = %s;\n" % str(y.tolist()).replace('],', '];')
cmd += "Z = %s;\n" % str(z.tolist()).replace('],', '];')
if c is not None:
c = asarray(c)
cmd += "C = %s;\n" % str(c.tolist()).replace('],', '];')
# get line specifiactions:
marker, color, style, width = self._get_linespecs(item)
edgecolor = item.getp('edgecolor')
facecolor = item.getp('facecolor')
args = ""
if edgecolor:
args += ",'EdgeColor','%s'" % edgecolor
# FIXME: Color can also be a three-tuple [r,g,b]
if facecolor:
args += ",'FaceColor','%s'" % facecolor
if style:
args += ",'LineStyle','%s'" % style
if marker:
args += ",'Marker','%s'" % marker
if width:
args += ",'LineWidth',%s" % float(width)
if shading != 'faceted' and not color:
args += ",'EdgeColor','none','FaceColor','%s'" % shading
contours = item.getp('contours')
if contours:
# the current item is produced by meshc or surfc and we
# should therefore add contours at the bottom:
#self._add_contours(contours, placement='bottom')
pass
if item.getp('wireframe'):
# wireframe mesh (as produced by mesh or meshc)
if contours:
func = 'meshc'
else:
func = 'mesh'
else:
# colored surface (as produced by surf, surfc, or pcolor)
# use keyword argument shading to set the color shading mode
if contours:
func = 'surfc'
else:
if item.getp('function') == 'pcolor':
func = 'pcolor'
else:
func = 'surf'
if func in ['pcolor','meshc']:
# pcolor needs special treatment since it has no support for
# parameter/value pairs.
cmd += "h = %s(X,Y,Z);\n" % func
if c is not None:
args += ",'CData', 'C'"
if args:
cmd += "set(h%s),...\n" % args
else:
if c is not None:
args = ",C" + args
cmd += "%s(X,Y,Z%s),...\n" % (func,args)
self._script += cmd
def _add_contours(self, item, placement=None):
# The placement keyword can be either None or 'bottom'. The
# latter specifies that the contours should be placed at the
# bottom (as in meshc or surfc).
if DEBUG:
print "Adding contours"
x = squeeze(item.getp('xdata')) # grid component in x-direction
y = squeeze(item.getp('ydata')) # grid component in y-direction
z = asarray(item.getp('zdata')) # scalar field
cmd = ""
if item.getp('indexing') == 'ij' and \
(shape(x) != shape(z) and shape(y) != shape(z)):
x,y = ndgrid(x,y,sparse=False)
if shape(x) != shape(z) and shape(y) != shape(z):
cmd += "x = %s;\n" % list(x)
cmd += "y = %s;\n" % list(y)
if item.getp('indexing') == 'ij':
cmd += "[X,Y] = ndgrid(x,y);\n"
else:
cmd += "[X,Y] = meshgrid(x,y);\n"
else:
cmd += "X = %s;\n" % str(x.tolist()).replace('],', '];')
cmd += "Y = %s;\n" % str(y.tolist()).replace('],', '];')
cmd += "Z = %s;\n" % str(z.tolist()).replace('],', '];')
filled = item.getp('filled') # draw filled contour plot if True
args = ""
cvector = item.getp('cvector')
clevels = item.getp('clevels') # number of contour levels
if cvector is None:
# the contour levels are chosen automatically
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 17:28:21 2021
@author: OwYeong
"""
import cv2
import numpy as np
from matplotlib import pyplot as pt
from pykuwahara import kuwahara
import os
import glob
import argparse
import time
from my_utils import *
#Function Define Start
def resize_with_aspect_ratio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def get_dark_channel(image_color, window_size):
"""
Dark channel prior
Select the mininum value of r,g,b channel within a sliding window.
"""
b,g,r = cv2.split(image_color)# Retrive the blue, green and red channel
minimum_bgr_channel = cv2.min(cv2.min(b,g),r);# get minimum value between red,green, and blue channel, This basically generate a Grayscale image
# NOTE: Erode in grayscale image, will help to select the lowest intensity value in the sliding window
structure_element = cv2.getStructuringElement(cv2.MORPH_RECT,(window_size,window_size))
dark_channel = cv2.erode(minimum_bgr_channel,structure_element)
return dark_channel#return result
def is_sky_exist_in_image(imgcolor):
# Section: Generating dark channel
darkChannel = get_dark_channel(imgcolor,105)# use a large window size to reduce the effect of artificial light
thresholdOtsu,thresholdedDarkChannel = cv2.threshold(darkChannel.astype(np.uint8),0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Make the otsu threshold more bias, as we want to get more on sky area.
adjustedThreshold = int(thresholdOtsu * 0.65)
thresholdOtsu,thresholdedDarkChannel = cv2.threshold(darkChannel.astype(np.uint8),adjustedThreshold,255,cv2.THRESH_BINARY)
num_of_labels, labeled_img = cv2.connectedComponents(thresholdedDarkChannel)
M, N = labeled_img.shape
is_sky_exist = False
threshold = int(N * (3/10)) # 7/10 of the total column must stick to top border to indicate a sky exist
# Section: Check if sky exist
# If
# at least 1 labelled region stick to the (threshold) of total image top border OR
# at least 1 labelled region has 80% of the image width AND
# the labelled does not contain any pixel that stick to the bottom border of the image.
# Then Sky exist.
for label in range(1, num_of_labels):
# for each labelled segment in image
label_in_current_region = np.zeros((M, N), dtype=np.uint8)
label_in_current_region[labeled_img == label] = 1 # generate the binary image of current labelled segment
labelledRegionWidthSum = np.sum(label_in_current_region, axis=1)
number_of_pixel_stick_at_top_border = labelledRegionWidthSum[0] # In Current labelled segment, check number of pixel that stick to the top border of image
max_width_of_labelled_region = np.max(labelledRegionWidthSum)
isNoPixelTouchesBorderBottom = (labelledRegionWidthSum[-1] == 0)
if (number_of_pixel_stick_at_top_border > threshold or max_width_of_labelled_region > int(N*0.7)) and isNoPixelTouchesBorderBottom:
# current labelled segment, meet threshold
is_sky_exist = True # sky exist
return is_sky_exist # return result
#After iterating all label, none of the segment meet the threshold.
return is_sky_exist #return result
def variance_filter(img_gray, window_size = 3):
"""
Variance filter
Calculate the variance in the sliding window
"""
img_gray = img_gray.astype(np.float64)
# Variance calculate using vectorized operation. As using a 2d loop to slide the window is too time consuming.
wmean, wsqrmean = (cv2.boxFilter(x, -1, (window_size, window_size), borderType=cv2.BORDER_REPLICATE) for x in (img_gray, img_gray*img_gray))
return wsqrmean - wmean*wmean
def find_hsv_upper_lower_threshold_for_skyarea_and_weather_condition(rough_sky_mask, hls_img):
"""
This function will estimate the upper and lower threshold of sky are in hue, lightness, saturation channel
based on the rough_sky_mask provided. It also estimate the weather condition of the image using hls image
"""
rough_sky_area_hls = cv2.bitwise_and(hls_img, hls_img, mask=rough_sky_mask) # extracted rough sky area
M, N, _ = hls_img.shape
h, l, s = cv2.split(rough_sky_area_hls)
hlist = np.full((M, N), -1, dtype=np.float64)
llist = np.full((M, N), -1, dtype=np.float64)
slist = np.full((M, N), -1, dtype=np.float64)
# Section: Calculate statistical information for hue, lightness, saturation channel
hlist[rough_sky_mask == 255] = h[rough_sky_mask == 255] # copy pixel in answer mask to the list
llist[rough_sky_mask == 255] = l[rough_sky_mask == 255] # copy pixel in answer mask to the list
slist[rough_sky_mask == 255] = s[rough_sky_mask == 255] # copy pixel in answer mask to the list
value_tolerance = 50 # tolerance value for light
hflatten = hlist.flatten()[hlist.flatten() >= 0 ] # extract the pixels in answer mask and flat the array
hflatten.sort()
sflatten = slist.flatten()[slist.flatten() >= 0 ] # extract the pixels in answer mask and flat the array
sflatten.sort()
s_removed_noise = sflatten[int(sflatten.size*0.05): int(sflatten.size - int(sflatten.size*0.05))] # trim 5 percent from forward and backward direction
saturation_max = 0 if (sflatten.size == 0) else s_removed_noise.max()
saturation_min = 0 if (sflatten.size == 0) else s_removed_noise.min()
lflatten = llist.flatten()[llist.flatten() >= 0 ] # extract the pixels in answer mask and flat the array
lflatten.sort()
l_removed_noise = lflatten[int(lflatten.size*0.05): int(lflatten.size - int(lflatten.size*0.05))] # trim 5 percent from forward and backward direction
lightness_mean = 0 if (lflatten.size == 0) else int(np.mean(l_removed_noise))
lightness_max = 0 if (lflatten.size == 0) else lflatten.max()
lightness_min = 0 if (lflatten.size == 0) else lflatten.min()
# Section: Estimate lightness threshold for sky area
if lightness_mean> 200:
#Day images, which high in light as the mean value of lightness in sky area is extremely high
weather_condition = "day"
lUpper = int(lightness_mean + value_tolerance)
lLower = int(150) # sky is extremely bright, hence the minimum lightness would be 150 based on experiment
elif lightness_mean> 100:
weather_condition = "day"
# Day image, but lower in light
# Hence, we use max and min to
lUpper = int(lightness_max + value_tolerance)
lLower = int(lightness_min - value_tolerance)
else:
weather_condition = "night"
#Night images, as mean value of lightness is less than 100
lUpper = int(lightness_max + value_tolerance)
lLower = int(lightness_min - 10)
# Generate hls upper and lower threshold based on lightness channel as we found that lightness is most representative for a sky area
hls_lower_threshold = np.array([0,8 if lLower < 8 else lLower,0])
hsv_upper_threshold = np.array([255,255 if lUpper > 200 else lUpper,255])
# If saturation is high, it is most likely cloudy as clouds is not saturated most of the time while sky area is saturated
if np.abs(saturation_max - saturation_min) > 120 and weather_condition == "day" and lightness_mean<240:
weather_condition = "dayCloudy"
if np.abs(saturation_max - saturation_min) > 50 and weather_condition == "night":
weather_condition = "nightCloudy"
return hls_lower_threshold, hsv_upper_threshold, weather_condition
def generate_final_sky_mask(initialSkyMask):
num_of_labels, labeled_img = cv2.connectedComponents(initialSkyMask)
M, N = labeled_img.shape
largest_labelled_region = None
# Section: Find Largest labelled region in initialSkyMask
for label in range(1, num_of_labels):
# for each labelled segment in image
label_in_current_region = np.zeros((M, N), dtype=np.uint8)
label_in_current_region[labeled_img == label] = 1 # generate the binary image of current labelled segment
number_of_pixel_in_current_region = np.sum(label_in_current_region)
if largest_labelled_region is None:
largest_labelled_region = label_in_current_region
else:
if number_of_pixel_in_current_region > np.sum(largest_labelled_region):
largest_labelled_region = label_in_current_region
# Section: Noise filtering. If black pixels is surrounded by white pixel, change to white pixel
indicies_of_all_labelled_region = np.argwhere(largest_labelled_region == 1)
# Generate padding before performing
# Sky area in initialSkyMask will have a padding of 1 while non-sky area will have a padding of 0
max_row_in_labelled_region_left_border = None
max_row_in_labelled_region_right_border = None
if len(indicies_of_all_labelled_region[indicies_of_all_labelled_region[:,1]==0, :][:,0]) > 0:
max_row_in_labelled_region_left_border = np.max(indicies_of_all_labelled_region[indicies_of_all_labelled_region[:,1]==0, :][:,0])
if len(indicies_of_all_labelled_region[indicies_of_all_labelled_region[:,1]==N-1, :][:,0]) > 0:
max_row_in_labelled_region_right_border = np.max(indicies_of_all_labelled_region[indicies_of_all_labelled_region[:,1]==N-1, :][:,0])
padding_size = 1
paddedlargest_labelled_region = np.pad(largest_labelled_region, ((padding_size,padding_size),(padding_size,padding_size)), 'constant')
paddedlargest_labelled_region[0, :] = 1 # top border is 1
if max_row_in_labelled_region_left_border is not None:
paddedlargest_labelled_region[0:max_row_in_labelled_region_left_border, 0] = 1 # left border padding is 1, up to the max row in labelled region
if max_row_in_labelled_region_right_border is not None:
paddedlargest_labelled_region[0:max_row_in_labelled_region_right_border, N+1] = 1 # right border padding is 1, up to the max row in labelled region
complement_of_largest_region = 1 - largest_labelled_region #invery
num_of_labels_in_largest_labelled_region, complement_of_largest_region_labelled = cv2.connectedComponents(complement_of_largest_region)
padded_complement_of_largest_region_labelled = np.pad(complement_of_largest_region_labelled, ((padding_size,padding_size),(padding_size,padding_size)), 'constant') # allow spaces for dilation for pixel that stick to border
for label in range(1,num_of_labels_in_largest_labelled_region):
# for each black colored segment in image, we check wheter is surrounded by white pixel
label_in_current_region = np.zeros((M, N), dtype=np.uint8)
label_in_current_region[complement_of_largest_region_labelled==label] = 1
paddedlabel_in_current_region = np.pad(label_in_current_region, ((padding_size,padding_size),(padding_size,padding_size)), 'constant') # allow spaces for dilation for pixel that stick to border
crossSe = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)) # 3x3 cross kernel
dilatedlabel_in_current_region = cv2.dilate(paddedlabel_in_current_region, crossSe,iterations = 1)
surrounded_pixel = dilatedlabel_in_current_region - paddedlabel_in_current_region # find surrounded pixel by using dilation
if | |
if (static_img is None or roi_affine is None
or static_affine is None):
raise ValueError("If using an affine to transform an ROI, "
"need to also specify all of the following",
"inputs: `static_img`, `roi_affine`, ",
"`static_affine`")
roi = reg.resample(roi, static_img, roi_affine, static_affine)
else:
# Assume it is a mapping:
if (isinstance(affine_or_mapping, str)
or isinstance(affine_or_mapping, nib.Nifti1Image)):
if reg_template is None or static_img is None:
raise ValueError(
"If using a mapping to transform an ROI, need to ",
"also specify all of the following inputs: ",
"`reg_template`, `static_img`")
affine_or_mapping = reg.read_mapping(affine_or_mapping,
static_img,
reg_template)
roi = auv.patch_up_roi(affine_or_mapping.transform_inverse(
roi,
interpolation='nearest')).astype(bool)
return roi
def load_volume(volume):
"""
Load a volume
Helper function
Parameters
----------
volume : ndarray or str
3d volume to load.
If string, it is used as a file path.
If it is an ndarray, it is simply returned.
Returns
-------
ndarray
"""
viz_logger.info("Loading Volume...")
if isinstance(volume, str):
return nib.load(volume).get_fdata()
else:
return volume
class Viz:
def __init__(self,
backend="fury"):
"""
Set up visualization preferences.
Parameters
----------
backend : str, optional
Should be either "fury" or "plotly".
Default: "fury"
"""
self.backend = backend
if "fury" in backend:
try:
import AFQ.viz.fury_backend
except ImportError:
raise ImportError(viz_import_msg_error("fury"))
self.visualize_bundles = AFQ.viz.fury_backend.visualize_bundles
self.visualize_roi = AFQ.viz.fury_backend.visualize_roi
self.visualize_volume = AFQ.viz.fury_backend.visualize_volume
self.create_gif = AFQ.viz.fury_backend.create_gif
elif "plotly" in backend:
try:
import AFQ.viz.plotly_backend
except ImportError:
raise ImportError(viz_import_msg_error("plotly"))
self.visualize_bundles = AFQ.viz.plotly_backend.visualize_bundles
self.visualize_roi = AFQ.viz.plotly_backend.visualize_roi
self.visualize_volume = AFQ.viz.plotly_backend.visualize_volume
self.create_gif = AFQ.viz.plotly_backend.create_gif
else:
raise TypeError("Visualization backend contain"
+ " either 'plotly' or 'fury'. "
+ "It is currently set to %s"
% backend)
def visualize_tract_profiles(tract_profiles, scalar="dti_fa", ylim=None,
n_boot=1000,
file_name=None,
positions=POSITIONS):
"""
Visualize all tract profiles for a scalar in one plot
Parameters
----------
tract_profiles : string
Path to CSV containing tract_profiles.
scalar : string, optional
Scalar to use in plots. Default: "dti_fa".
ylim : list of 2 floats, optional
Minimum and maximum value used for y-axis bounds.
If None, ylim is not set.
Default: None
n_boot : int, optional
Number of bootstrap resamples for seaborn to use
to estimate the ci.
Default: 1000
file_name : string, optional
File name to save figure to if not None. Default: None
positions : dictionary, optional
Dictionary that maps bundle names to position in plot.
Default: POSITIONS
Returns
-------
Matplotlib figure and axes.
"""
csv_comparison = GroupCSVComparison(
None,
[tract_profiles],
["my_tract_profiles"],
scalar_bounds={'lb': {}, 'ub': {}})
df = csv_comparison.tract_profiles(
scalar=scalar,
ylim=ylim,
positions=positions,
out_file=file_name,
n_boot=n_boot)
return df
class BrainAxes():
'''
Helper class.
Creates and handles a grid of axes.
Each axis corresponds to a bundle.
Axis placement should roughly correspond
to the actual bundle placement in the brain.
'''
def __init__(self, size=(5, 5), positions=POSITIONS):
self.size = size
self.positions = positions
self.on_grid = np.zeros((5, 5), dtype=bool)
self.fig, self.axes = plt.subplots(
self.size[0],
self.size[1],
frameon=False)
plt.subplots_adjust(
left=None,
bottom=None,
right=None,
top=None,
wspace=0.4,
hspace=0.6)
self.fig.set_size_inches((18, 18))
self.temp_fig, self.temp_axis = plt.subplots()
self.temp_axis_owner = None
def get_axis(self, bundle):
'''
Given a bundle, turn on and get an axis.
If bundle not positioned, will claim the temporary axis.
'''
if bundle in self.positions.keys():
self.on_grid[self.positions[bundle]] = True
return self.axes[self.positions[bundle]]
else:
if self.temp_axis_owner != bundle:
plt.close(self.temp_fig)
self.temp_fig, self.temp_axis = plt.subplots()
self.temp_axis_owner = bundle
return self.temp_axis
def plot_line(self, bundle, x, y, data, ylabel, ylim, n_boot, alpha,
lineplot_kwargs, plot_subject_lines=True):
'''
Given a dataframe data with at least columns x, y,
and subjectID, plot the mean of subjects with ci of 95
in alpha and the individual subjects in alpha-0.2
using sns.lineplot()
'''
ax = self.get_axis(bundle)
if plot_subject_lines:
sns.set(style="whitegrid", rc={"lines.linewidth": 1})
else:
sns.set(style="whitegrid", rc={"lines.linewidth": 6})
sns.lineplot(
x=x, y=y,
data=data,
estimator='mean', ci=95, n_boot=n_boot,
legend=False, ax=ax, alpha=alpha,
style=[True] * len(data.index), **lineplot_kwargs)
if plot_subject_lines:
sns.set(style="whitegrid", rc={"lines.linewidth": 0.5})
sns.lineplot(
x=x, y=y,
data=data,
ci=None, estimator=None, units='subjectID',
legend=False, ax=ax, alpha=alpha - 0.2,
style=[True] * len(data.index), **lineplot_kwargs)
ax.set_title(bundle, fontsize=large_font)
ax.set_ylabel(ylabel, fontsize=medium_font)
ax.set_ylim(ylim)
def format(self, disable_x=True):
'''
Call this functions once after all axes that you intend to use
have been plotted on. Automatically formats brain axes.
'''
for i in range(self.size[0]):
for j in range(self.size[1]):
if not self.on_grid[i, j]:
self.axes[i, j].axis("off")
if j != 0 and self.on_grid[i][j - 1]:
self.axes[i, j].set_yticklabels([])
self.axes[i, j].set_ylabel("")
if disable_x or (i != 4 and self.on_grid[i + 1][j]):
self.axes[i, j].set_xticklabels([])
self.axes[i, j].set_xlabel("")
self.fig.tight_layout()
def save_temp_fig(self, fname):
'''
If using a temporary axis, save it out and clear it.
Returns True if temporary axis was saved, false if no
temporary axis was in use
'''
if self.temp_axis_owner is None:
return False
self.temp_fig.tight_layout()
self.temp_fig.savefig(fname)
plt.close(self.temp_fig)
self.temp_axis_owner = None
return True
def is_using_temp_axis(self):
return (self.temp_axis_owner is not None)
def close_all(self):
'''
Close all associated figures.
'''
plt.close(self.temp_fig)
plt.close(self.fig)
class GroupCSVComparison():
"""
Compare different CSVs, using:
tract profiles, contrast indices,
scan-rescan reliability using Pearson's r.
"""
def __init__(self, out_folder, csv_fnames, names, is_mats=False,
subjects=None,
scalar_bounds={'lb': {'dti_fa': 0.2},
'ub': {'dti_md': 0.002}},
bundles=None,
percent_nan_tol=10,
percent_edges_removed=10,
mat_bundle_converter=BUNDLE_MAT_2_PYTHON,
mat_column_converter=CSV_MAT_2_PYTHON,
mat_scale_converter=SCALE_MAT_2_PYTHON):
"""
Load in csv files, converting from matlab if necessary.
Parameters
----------
out_folder : path, optional
Folder where outputs of this class's methods will be saved.
csv_fnames : list of filenames
Filenames for the two CSVs containing tract profiles to compare.
Will obtain subject list from the first file.
names : list of strings
Name to use to identify each CSV dataset.
is_mats : bool or list of bools, optional
Whether or not the csv was generated from Matlab AFQ or pyAFQ.
Default: False
subjects : list of num, optional
List of subjects to consider.
If None, will use all subjects in first dataset.
Default: None
scalar_bounds : dictionary, optional
A dictionary with a lower bound and upper bound containting a
series of scalar / threshold pairs used as a white-matter mask
on the profiles (any values outside of the threshold will be
marked NaN and not used or set to 0, depending on the case).
Default: {'lb': {'dti_fa': 0.2}, 'ub': {'dti_md': 0.002}}
bundles : list of strings, optional
Bundles to compare.
If None, use all bundles in the first profile group.
Default: None
percent_nan_tol : int, optional
Percentage of NaNs tolerable. If a profile has less than this
percentage of NaNs, NaNs are interpolated. If it has more,
the profile is thrown out.
Default: 10
percent_edges_removed : int, optional
Percentage of nodes to remove from the edges of the profile.
Scalar values often change dramatically at the boundary between
the grey matter and the white matter, and these effects can
dominate plots. However, they are generally not interesting to us,
and have low intersubject reliability.
In a profile of 100 nodes, percent_edges_removed=10 would remove
5 nodes from each edge.
Default: 10
mat_bundle_converter : dictionary, optional
Dictionary that maps matlab bundle names to python bundle names.
Default: BUNDLE_MAT_2_PYTHON
mat_column_converter : dictionary, optional
Dictionary that maps matlab column names to python column names.
Default: CSV_MAT_2_PYTHON
mat_scale_converter : dictionary, optional
Dictionary that maps scalar names to how they should be scaled
to match pyAFQ's scale for that scalar.
Default: SCALE_MAT_2_PYTHON
"""
self.logger = logging.getLogger('AFQ.csv')
self.out_folder = out_folder
self.percent_nan_tol = percent_nan_tol
if isinstance(is_mats, bool):
is_mats = [is_mats] * len(csv_fnames)
self.profile_dict = {}
for i, fname in enumerate(csv_fnames):
profile = pd.read_csv(fname)
if 'subjectID' in profile.columns:
profile['subjectID'] = \
profile['subjectID'].apply(
lambda x: int(
''.join(c for c in x if c.isdigit())
) if isinstance(x, str) else x)
else:
profile['subjectID'] = 0
if is_mats[i]:
profile.rename(
columns=mat_column_converter, inplace=True)
profile['tractID'] = \
profile['tractID'].apply(
lambda x: mat_bundle_converter[x])
for scalar, scale in mat_scale_converter.items():
profile[scalar] = \
profile[scalar].apply(lambda x: x * scale)
for bound, constraint in scalar_bounds.items():
for scalar, threshold in constraint.items():
profile[scalar] = \
profile[scalar].apply(
lambda x: self._threshold_scalar(
bound,
threshold,
x))
if percent_edges_removed > 0:
profile = profile.drop(profile[np.logical_or(
(profile["nodeID"] < percent_nan_tol // 2),
(profile["nodeID"] >= 100 - (percent_nan_tol // 2))
)].index)
self.profile_dict[names[i]] = profile
if subjects is None:
self.subjects = self.profile_dict[names[0]]['subjectID'].unique()
else:
self.subjects = subjects
self.prof_len = 100 - (percent_nan_tol // 2) * 2
if bundles is None:
self.bundles = self.profile_dict[names[0]]['tractID'].unique()
self.bundles.sort()
else:
self.bundles = bundles
self.color_dict = gen_color_dict(self.bundles)
def _threshold_scalar(self, bound, threshold, val):
"""
Threshold scalars by a lower and upper bound.
"""
if bound == "lb":
if val > threshold:
return val
else:
return np.nan
| |
from collections import defaultdict, namedtuple
import csv
import itertools
from pathlib import Path
import sys
from csvw.metadata import Column, ForeignKey, Datatype, Link
import pycldf
import pyclts
from segments import Tokenizer, Profile
from online_cognacy_ident.align import normalized_levenshtein
bipa = pyclts.TranscriptionSystem("bipa")
tokenizer = Tokenizer()
"""
Dict mapping the column names that the Dataset class looks for to lists of
possible variants. Used in Dataset._read_header().
"""
RECOGNISED_COLUMN_NAMES = {
'doculect': ['doculect', 'language', 'lang'],
'concept': ['concept', 'gloss'],
'asjp': ['asjp', 'transcription'],
'cog_class': ['cog_class', 'cognate_class', 'cognate class', 'cogid']
}
"""
The named tuple used in the return values of the get_words, get_concepts, and
get_clusters methods of Dataset objects.
"""
Word = namedtuple('Word', ['doculect', 'concept', 'sc', 'id'])
class DatasetError(ValueError):
"""
Raised when something goes wrong with reading a dataset.
"""
pass
class Dataset:
"""
Handles dataset reading. It is assumed that the dataset would be a csv/tsv
file that contains at least one of the columns for each list of recognised
column names.
Usage:
try:
dataset = Dataset(path)
for concept, words in dataset.concepts():
print(concept)
except DatasetError as err:
print(err)
"""
def __init__(self, path, dialect=None, transform=None):
"""
Set the instance's props. Raise a DatasetError if the given file path
does not exist.
The dialect arg should be either a string identifying one of the csv
dialects or None, in which case the dialect is inferred based on the
file extension. Raise a ValueError if the given dialect is specified
but unrecognised.
If is_ipa is set, assume that the transcriptions are in IPA and convert
them into some other sound class model.
"""
if not Path(path).exists():
raise DatasetError('Could not find file: {}'.format(path))
if dialect is None:
dialect = 'excel-tab' if path.endswith('.tsv') else 'excel'
elif dialect not in csv.list_dialects():
raise ValueError('Unrecognised csv dialect: {!s}'.format(dialect))
self.path = path
self.dialect = dialect
self.transform = transform
self.alphabet = None
def _read_header(self, line, exclude=['cog_class']):
"""
Return a {column name: index} dict, excluding the columns listed in the
second func arg.
Raise a DatasetError if not all required columns can be recovered.
"""
d = {}
column_names = {column: names
for column, names in RECOGNISED_COLUMN_NAMES.items()
if column not in exclude}
for index, heading in enumerate(line):
heading = heading.lower()
for column, recognised_names in column_names.items():
if heading in recognised_names:
d[column] = index
break
for column in column_names.keys():
if column not in d:
raise DatasetError('Could not find the column for {}'.format(column))
return d
def _read_asjp(self, raw_trans):
"""
Process a raw transcription value into a sound class transcription, eg. ASJP:
(1) if the input string consists of multiple comma-separated entries,
remove all but the first one;
(2) remove whitespace chars (the symbols +, - and _ are also considered
whitespace and removed);
(3) if this is an IPA dataset, convert the string to ASJP;
(4) remove some common non-ASJP offender symbols.
Helper for the _read_words method.
"""
trans = raw_trans.strip().split(',')[0].strip()
trans = [bipa[x]
for part in trans.split(".")
for x in tokenizer(part, ipa=True).split()]
if self.transform is not None:
trans = [self.transform(s) for s in trans]
return trans
def _read_words(self, cog_sets=False):
"""
Generate the [] of Word entries in the dataset. Raise a DatasetError if
there is a problem reading the file.
If the cog_sets flag is set, then yield (Word, cognate class) tuples.
"""
try:
with open(self.path, encoding='utf-8', newline='') as f:
reader = csv.reader(f, dialect=self.dialect)
header = self._read_header(next(reader),
exclude=[] if cog_sets else ['cog_class'])
self.equilibrium = defaultdict(float)
for line in reader:
asjp = self._read_asjp(line[header['asjp']])
for i in asjp:
self.equilibrium[i] += 1.0
word = Word._make([
line[header['doculect']],
line[header['concept']],
tuple(asjp),
None])
if cog_sets:
yield word, line[header['cog_class']]
else:
yield word
except csv.Error as err:
raise DatasetError('Could not read file: {}'.format(self.path))
def get_equilibrium(self):
"""
Return un-normalized equilibrium counts
"""
if self.equilibrium is None:
self.get_words()
return self.equilibrium
def get_alphabet(self):
"""
Return a sorted list of all characters found throughout transcriptions
in the dataset.
"""
if self.alphabet is not None:
return self.alphabet
self.alphabet = set()
for word in self.get_words():
self.alphabet |= set(word.sc)
self.alphabet = sorted(self.alphabet)
return self.alphabet
def get_words(self):
"""
Return the [] of Word named tuples comprising the dataset, excluding
in-doculect synonyms; i.e. the output should include at most one word
per doculect per concept.
Raise a DatasetError if there is an error reading the file.
"""
words = []
seen = set()
for word in self._read_words():
key = (word.doculect, word.concept,)
if key not in seen:
seen.add(key)
words.append(word)
return words
def get_concepts(self):
"""
Return a {concept: words} dict mapping each concept in the dataset to a
[] of Word tuples that belong to that concept. In-doculect synonyms are
excluded.
Raise a DatasetError if there is an error reading the dataset file.
"""
d = defaultdict(list)
for word in self.get_words():
d[word.concept].append(word)
return d
def get_asjp_pairs(self, cutoff=1.0, as_int_tuples=False):
"""
Return the list of the pairs of transcriptions of words from different
languages but linked to the same concept.
If the cutoff arg is less than 1.0, pairs with edit distance above that
threshold are also ignored. If the other keyword arg is set, return the
transcriptions as tuples of the letters' indices in self.alphabet.
Raise a DatasetError if there is an error reading the dataset file.
"""
pairs = []
if as_int_tuples:
alphabet = self.get_alphabet()
for concept, words in self.get_concepts().items():
for word1, word2 in itertools.combinations(words, 2):
if word1.doculect == word2.doculect:
continue
if normalized_levenshtein(word1.sc, word2.sc) > cutoff:
continue
if as_int_tuples:
pair = (
tuple([alphabet.index(char) for char in word1.sc]),
tuple([alphabet.index(char) for char in word2.sc]))
else:
pair = (word1.sc, word2.sc)
pairs.append(pair)
return pairs
def get_clusters(self):
"""
Return a {concept: cog_sets} dict where the values are frozen sets of
frozen sets of Word tuples, comprising the set of cognate sets for that
concept. In-doculect synonyms are excluded.
Raise a DatasetError if the dataset does not include cognacy info or if
there is a probelm reading the file.
"""
d = defaultdict(set) # {(concept, cog_class): set of words}
seen = set() # set of (doculect, concept) tuples
clusters = defaultdict(list) # {concept: [frozenset of words, ..]}
for word, cog_class in self._read_words(cog_sets=True):
if (word.doculect, word.concept) not in seen:
seen.add((word.doculect, word.concept))
d[(word.concept, cog_class)].add(word)
for (concept, cog_class), cog_set in d.items():
clusters[concept].append(frozenset(cog_set))
return {key: frozenset(value) for key, value in clusters.items()}
class CLDFDataset (Dataset):
"""A Dataset subclass for CLDF wordlists. """
def __init__(self, path, transform=None):
"""Create, based on the path to a CLDF wordlist.
This constructure assumes that a 'forms.csv' file is a metadata-free
wordlist, and that any other file is a Wordlist metadata json file.
Parameters
==========
path: string or Path
The path to a CLDF wordlist metadata file.
(Metadata-free wordlists are not supported yet.)
is_ipa: function Symbol→String or None
A function to convert bipa Sounds into sound class symbols
(Use None for no conversion)
"""
if str(path).endswith("forms.csv"):
dataset = pycldf.Wordlist.from_data(path)
else:
dataset = pycldf.Wordlist.from_metadata(path)
self.dataset = dataset
self.transform = transform
self.alphabet = None
def _read_words(self, cog_sets=False):
"""
"""
c_doculect = self.dataset["FormTable", "languageReference"].name
c_concept = self.dataset["FormTable", "parameterReference"].name
c_segments = self.dataset["FormTable", "segments"].name
c_id = self.dataset["FormTable", "id"].name
if cog_sets:
try:
c_cog = self.dataset["FormTable", "cognatesetReference"].name
lookup = False
except KeyError:
c_cog = self.dataset["CognatesetTable", "cognatesetReference"].name
c_form = self.dataset["CognatesetTable", "formReference"].name
lookup = {}
for row in self.dataset["CognatesetTable"].iterdicts():
lookup[row[c_form]] = row[c_cog]
self.equilibrium = defaultdict(float)
for row in self.dataset["FormTable"].iterdicts():
if self.transform is None:
asjp_segments = row[c_segments]
else:
asjp_segments = [self.transform(bipa[s]) if bipa[s].name else '0'
for s in row[c_segments]]
if not asjp_segments:
continue
word = Word(
row[c_doculect],
row[c_concept],
tuple(asjp_segments),
row[c_id])
for i in asjp_segments:
self.equilibrium[i] += 1.0
if cog_sets:
if lookup:
yield word, lookup.get(word.id, None)
else:
yield word, row[c_cog]
else:
yield word
class PairsDataset:
"""
Handles the reading of datasets stored in the training_data dir. These are
tsv files comprising ASJP word pairs with their respective edit distances.
Usage:
try:
dataset = PairsDataset(path)
word_pairs = dataset.get_asjp_pairs()
except DatasetError as err:
print(err)
"""
def __init__(self, path, transform=None):
"""
Set the instance's props. Raise a DatasetError if the given file path
does not exist.
"""
if not Path(path).exists():
raise DatasetError('Could not find file: {}'.format(path))
self.path = path
self.alphabet = None
self.transform = str if transform is None else transform
def _read_pairs(self):
"""
Generate the (asjp, asjp, edit distance) entries from the dataset.
Raise a | |
<reponame>git-afsantos/hpl-rv-gen
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright © 2021 <NAME>
###############################################################################
# Imports
###############################################################################
from __future__ import print_function, unicode_literals
from builtins import object, range # needed by PropertyMonitor
from collections import deque # needed by PropertyMonitor
from threading import Lock # needed by PropertyMonitor
import unittest
from hpl.parser import property_parser
from hplrv.constants import (
STATE_OFF, STATE_TRUE, STATE_FALSE,
STATE_INACTIVE, STATE_ACTIVE, STATE_SAFE
)
from hplrv.rendering import TemplateRenderer
from .common_data import *
from .absence_traces import *
from .existence_traces import *
from .precedence_traces import *
from .response_traces import *
from .prevention_traces import *
###############################################################################
# Test Case Generation
###############################################################################
def absence_properties():
yield globally_no()
yield globally_no_within()
yield after_no()
yield after_no_within()
yield until_no()
yield until_no_within()
yield after_until_no()
yield after_until_no_within()
def existence_properties():
yield globally_some()
yield globally_some_within()
yield after_some()
yield after_some_within()
yield until_some()
yield until_some_within()
yield after_until_some()
yield after_until_some_within()
def precedence_properties():
yield globally_requires()
yield globally_requires_ref()
yield globally_requires_within()
yield globally_requires_ref_within()
yield after_requires()
yield after_requires_ref()
yield after_requires_within()
yield after_requires_ref_within()
yield until_requires()
yield until_requires_ref()
yield until_requires_within()
yield until_requires_ref_within()
yield after_until_requires()
yield after_until_requires_ref()
yield after_until_requires_within()
yield after_until_requires_ref_within()
def response_properties():
yield globally_causes()
yield globally_causes_ref()
yield globally_causes_within()
yield globally_causes_ref_within()
yield after_causes()
yield after_causes_ref_within()
yield until_causes()
yield until_causes_ref()
yield until_causes_within()
yield until_causes_ref_within()
yield after_until_causes()
yield after_until_causes_ref()
yield after_until_causes_within()
yield after_until_causes_ref_within()
def prevention_properties():
yield globally_forbids()
yield globally_forbids_ref()
yield globally_forbids_within()
yield globally_forbids_ref_within()
yield after_forbids()
yield after_forbids_ref()
yield after_forbids_within()
yield after_forbids_ref_within()
yield until_forbids()
yield until_forbids_ref()
yield until_forbids_within()
yield until_forbids_ref_within()
yield after_until_forbids()
yield after_until_forbids_ref()
yield after_until_forbids_within()
yield after_until_forbids_ref_within()
def all_types_of_property():
# example = text, traces
for example in absence_properties():
yield example
for example in existence_properties():
yield example
for example in precedence_properties():
yield example
for example in response_properties():
yield example
for example in prevention_properties():
yield example
###############################################################################
# Test Loop
###############################################################################
def state_name(s):
if s == STATE_FALSE:
return 'FALSE'
if s == STATE_TRUE:
return 'TRUE'
if s == STATE_OFF:
return 'OFF'
if s == STATE_INACTIVE:
return 'INACTIVE'
if s == STATE_ACTIVE:
return 'ACTIVE'
if s == STATE_SAFE:
return 'SAFE'
if s is None:
return '(none)'
return 'STATE {}'.format(s)
def pretty_trace(trace):
s = []
t = 0
for e in trace:
t += 1
etype = e[0]
if etype == E_TIMER:
s.append('@ {}: (Timer, -{}) -> {}'.format(
t, e.drops, state_name(e.state)))
elif etype == E_ACTIVATOR:
s.append("@ {}: (Activator) '{}' {} -> {}".format(
t, e.topic, e.msg, state_name(e.state)))
elif etype == E_TERMINATOR:
s.append("@ {}: (Terminator) '{}' {} -> {}".format(
t, e.topic, e.msg, state_name(e.state)))
elif etype == E_BEHAVIOUR:
s.append("@ {}: (Behaviour) '{}' {} -> {}".format(
t, e.topic, e.msg, state_name(e.state)))
elif etype == E_TRIGGER:
s.append("@ {}: (Trigger) '{}' {} -> {}".format(
t, e.topic, e.msg, state_name(e.state)))
else:
s.append("@ {}: (Spam) '{}' {}".format(
t, e.topic, e.msg))
return "\n".join(s)
def pretty_monitor(m):
return "\n".join((
'm._state = {}'.format(m._state),
'm.time_launch = {}'.format(m.time_launch),
'm.time_shutdown = {}'.format(m.time_shutdown),
'm.time_state = {}'.format(m.time_state),
'm.witness = {}'.format(m.witness),
'm._pool = {}'.format(getattr(m, '_pool', None)),
))
def prod(iterable):
x = 1
for y in iterable:
x = x * y
if x == 0:
return 0
return x
class TestMonitorClasses(unittest.TestCase):
#def __init__(self):
def setUp(self):
self._reset()
def test_examples(self):
n = 0
p = property_parser()
r = TemplateRenderer()
for text, traces in all_types_of_property():
hp = p.parse(text)
self.pool_decay = ((hp.pattern.is_requirement
or hp.pattern.is_response
or hp.pattern.is_prevention)
and hp.pattern.has_max_time)
py = r.render_monitor(hp)
m = self._make_monitor(py)
for trace in traces:
n += 1
self.hpl_string = text
self._set_trace_string(trace, n)
self._launch(hp, m)
time = 0
for event in trace:
time += 1
self._dispatch(m, event, time)
self._shutdown(m)
self._reset()
print('Tested {} examples.'.format(n))
def _reset(self):
self.debug_string = ''
self.trace_string = ''
self.hpl_string = ''
self.entered_scope = []
self.exited_scope = []
self.found_success = []
self.found_failure = []
def _make_monitor(self, py):
exec(py)
m = PropertyMonitor()
m.on_enter_scope = self._on_enter
m.on_exit_scope = self._on_exit
m.on_success = self._on_success
m.on_violation = self._on_failure
self._update_debug_string(m, -1)
assert m._state == STATE_OFF, self.debug_string
assert m.verdict is None, self.debug_string
assert not m.witness, self.debug_string
assert m.time_launch < 0, self.debug_string
assert m.time_state < 0, self.debug_string
assert m.time_shutdown < 0, self.debug_string
return m
def _launch(self, hp, m):
m.on_launch(0)
self._update_debug_string(m, 0)
if hp.scope.is_global or hp.scope.is_until:
assert len(self.entered_scope) == 1, self.debug_string
assert self.entered_scope[0] == 0, self.debug_string
else:
assert not self.entered_scope, self.debug_string
assert not self.exited_scope, self.debug_string
assert not self.found_success, self.debug_string
assert not self.found_failure, self.debug_string
assert m._state != STATE_OFF, self.debug_string
assert m.verdict is None, self.debug_string
assert not m.witness, self.debug_string
assert m.time_launch == 0, self.debug_string
assert m.time_state == 0, self.debug_string
assert m.time_shutdown < 0, self.debug_string
try:
m.on_launch(0)
self._update_debug_string(m, 0)
assert False, self.debug_string
except RuntimeError:
pass # expected
def _dispatch(self, m, event, time):
etype = event[0]
if etype == E_TIMER:
self._dispatch_timer(m, event, time)
elif etype == E_ACTIVATOR:
self._dispatch_activator(m, event, time)
elif etype == E_TERMINATOR:
self._dispatch_terminator(m, event, time)
elif etype == E_BEHAVIOUR:
self._dispatch_behaviour(m, event, time)
elif etype == E_TRIGGER:
self._dispatch_trigger(m, event, time)
else:
self._dispatch_spam(m, event, time)
def _dispatch_activator(self, m, event, time):
n = len(self.entered_scope)
assert m._state == STATE_INACTIVE, self.debug_string
consumed = self._dispatch_msg(m, event.topic, event.msg, time)
self._update_debug_string(m, time)
assert consumed, self.debug_string
assert len(self.entered_scope) == n + 1, self.debug_string
assert self.entered_scope[-1] == time, self.debug_string
assert len(self.exited_scope) == n, self.debug_string
assert not self.found_success, self.debug_string
assert not self.found_failure, self.debug_string
assert m._state == event.state, self.debug_string
assert m.verdict is None, self.debug_string
assert len(m.witness) == 1, self.debug_string
assert m.witness[-1].topic == event.topic, self.debug_string
assert m.witness[-1].timestamp == time, self.debug_string
assert m.witness[-1].msg == event.msg, self.debug_string
assert m.time_state == time, self.debug_string
def _dispatch_terminator(self, m, event, time):
n = len(self.exited_scope)
assert m._state > STATE_INACTIVE, self.debug_string
consumed = self._dispatch_msg(m, event.topic, event.msg, time)
self._update_debug_string(m, time)
assert consumed, self.debug_string
assert len(self.exited_scope) == n + 1, self.debug_string
assert self.exited_scope[-1] == time, self.debug_string
assert len(self.entered_scope) == n + 1, self.debug_string
self._check_verdict(m, event, time)
if event.state == STATE_INACTIVE:
assert not m.witness, self.debug_string
assert not getattr(m, '_pool', None), self.debug_string
def _dispatch_behaviour(self, m, event, time):
a = len(self.entered_scope)
b = len(self.exited_scope)
s = m._state
assert m._state == STATE_ACTIVE or m._state == STATE_SAFE, \
self.debug_string
consumed = self._dispatch_msg(m, event.topic, event.msg, time)
self._update_debug_string(m, time)
assert consumed, self.debug_string
assert len(self.entered_scope) == a, self.debug_string
assert len(self.exited_scope) == b, self.debug_string
if event.state is None:
assert m._state == s, self.debug_string
self._check_verdict(m, event, time)
def _dispatch_trigger(self, m, event, time):
a = len(self.entered_scope)
b = len(self.exited_scope)
k = -1 if not hasattr(m, '_pool') else len(m._pool)
s = m._state
consumed = self._dispatch_msg(m, event.topic, event.msg, time)
self._update_debug_string(m, time)
assert consumed, self.debug_string
assert len(self.entered_scope) == a, self.debug_string
assert len(self.exited_scope) == b, self.debug_string
if event.state is None:
assert m._state == s, self.debug_string
self._check_verdict(m, event, time)
if k >= 0:
assert len(m._pool) >= k, self.debug_string
assert m._pool[-1].topic == event.topic, self.debug_string
assert m._pool[-1].timestamp == time, self.debug_string
assert m._pool[-1].msg == event.msg, self.debug_string
def _dispatch_spam(self, m, event, time):
a = len(self.entered_scope)
b = len(self.exited_scope)
k = len(getattr(m, '_pool', ()))
n = len(m.witness)
s = m._state
t = m.time_state
consumed = self._dispatch_msg(m, event.topic, event.msg, time)
self._update_debug_string(m, time)
assert not consumed, self.debug_string
assert len(self.entered_scope) == a, self.debug_string
assert len(self.exited_scope) == b, self.debug_string
if self.pool_decay:
assert len(getattr(m, '_pool', ())) <= k, self.debug_string
else:
assert len(getattr(m, '_pool', ())) == k, self.debug_string
self._check_automatic_transition(m, event.state, time, s, t)
def _dispatch_msg(self, m, topic, msg, time):
cb = getattr(m, 'on_msg_' + topic)
return cb(msg, time)
def _dispatch_timer(self, m, event, time):
a = len(self.entered_scope)
b = len(self.exited_scope)
k = len(getattr(m, '_pool', ()))
s = m._state
t = m.time_state
m.on_timer(time)
self._update_debug_string(m, time)
assert len(self.entered_scope) == a, self.debug_string
assert len(self.exited_scope) == b, self.debug_string
assert len(getattr(m, '_pool', ())) <= k, self.debug_string
self._check_automatic_transition(m, event.state, time, s, t)
def _shutdown(self, m):
m.on_shutdown(1000)
self._update_debug_string(m, 1000)
assert m._state == STATE_OFF, self.debug_string
assert m.time_launch == 0, self.debug_string
assert m.time_state >= 0, self.debug_string
assert m.time_shutdown == 1000, self.debug_string
try:
m.on_shutdown(2000)
self._update_debug_string(m, 2000)
assert False, self.debug_string
except RuntimeError:
pass # expected
def _check_verdict(self, m, event, time):
if event.state is not None:
assert m._state == event.state, self.debug_string
if event.state == STATE_TRUE:
assert len(self.found_success) == 1, self.debug_string
assert self.found_success[0][0] == time, self.debug_string
assert self.found_success[0][1] == m.witness, self.debug_string
assert m.verdict is True, self.debug_string
assert len(m.witness) >= 1, self.debug_string
assert m.witness[-1].topic == event.topic, self.debug_string
assert m.witness[-1].timestamp == time, self.debug_string
assert m.witness[-1].msg == event.msg, self.debug_string
assert m.time_state == time, self.debug_string
elif event.state == STATE_FALSE:
assert len(self.found_failure) == 1, self.debug_string
assert self.found_failure[0][0] == time, self.debug_string
assert self.found_failure[0][1] == m.witness, self.debug_string
assert m.verdict is False, self.debug_string
assert len(m.witness) >= 1, self.debug_string
assert m.witness[-1].topic == event.topic, self.debug_string
assert m.witness[-1].timestamp == time, self.debug_string
assert m.witness[-1].msg == event.msg, self.debug_string
assert m.time_state == time, self.debug_string
else:
assert not self.found_success, self.debug_string
assert not self.found_failure, self.debug_string
assert | |
<filename>GA.py
####### PART 1.A - EA #######
# Name : <NAME>
# Student ID : HW00281038
# Date : Oct. 1st 2017
##############################
import random
import math
import numpy as np
import itertools
import copy
import time
import pandas as pd
import matplotlib.pyplot as plt
import profile
import functools
import operator
import time
from random import shuffle
import heapq
from statistics import mean
from operator import methodcaller
# TSP_Cost function implemented in Cython. Note : Would have to recompile in order to rename...
import test_fast
def checkData(data):
return True
if len(data) != 29:
return False
if len(data) > len(set(data)):
return False
return True
def checkCityDistances():
trav = Traveler(range(0,Traveler.encoding['lenght']))
del(trav.data[0])
#trav.data.append(trav.data[0])
#for x in range(1,Traveler.encoding['lenght']-1):
# distance = test_fast.TSP_Cost(Traveler.encoding['dataset'][trav.data[x]][1], Traveler.encoding['dataset'][trav.data[x]][2], Traveler.encoding['dataset'][trav.data[x+1]][1], Traveler.encoding['dataset'][trav.data[x+1]][2])
# print(f"Distance between city {x} and city {x+1} : {distance}")
geoPlot(trav)
def fitnessPlotFromFile():
data = [line.strip() for line in open("logs/last_fitness_record", 'r')][1:] # If non existant ?
lst = []
for x in data:
lst.append(x.split(';'))
lst[-1] = list(map(int,lst[-1])) # Convert strings to int
fitnessPlot(lst, 0, True)
def fitnessPlot(fitness, last, new_figure = False): # Part of this should be moved to the init phase so that it is not executed multiple times unnecessarily
if new_figure:
plt.figure(500)
else:
plt.figure(300)
plt.clf()
gen = [x[0] for x in fitness[-last:]]
fit = [x[1] for x in fitness[-last:]]
plt.plot(gen, fit)
plt.xlabel('Generation count')
plt.ylabel('Best individual fitness')
plt.title('Fitness vs generations')
#plt.text(gen[0]+10, fit[0], f'Current fitness : {fit[-1]}')
plt.legend()
plt.draw()
plt.pause(0.01)
def geoPlot(best):
plt.figure(200)
best.data.append(best.data[0])
DATA = Traveler.encoding['dataset']
for idx in range(len(best.data)-1):
plt.plot((DATA[best.data[idx]][2],DATA[best.data[idx+1]][2]),(DATA[best.data[idx]][1],DATA[best.data[idx+1]][1]), marker = 'o')
plt.draw()
plt.pause(0.001)
class GA:
stall_options = {'abort': 1, 'rm-dup':2, 'rm-dup-bts':3, 'ignore':4}
not_init = True
def __init__(self, config):
self.settings = config.settings
self.settings['encoding']['lenght'] = len(self.settings['encoding']['dataset'])-1
self.settings['encoding']['span'] = list(range(1,len(self.settings['encoding']['dataset'])))
self.pop_size = self.settings['pop']['pop_size'] # Shorter alias
Traveler.setSettings(self.settings)
self.init_pop()
self.fitness_record = []
def init_pop(self):
self.population = []
# Create a 10*$(pop_size) population
for x in range(0,self.pop_size*self.settings['pop']['init_factor']):
self.population.append(Traveler())
# Keep the best ones
self.sortPopulation()
self.population = self.population[:self.pop_size]
def crossover(self, parents_ids):
algo_name = self.settings['algo']['crossover']
#print(f"Using crossover {algo_name}")
if algo_name == 'one-point-co':
for x in pop:
cross_indiv = self.population[random.randrange(0,self.pop_size)]
x.crossover(cross_indiv)
elif algo_name == 'pmx':
p_fit = []
p_fit.append(self.population[parents_ids[0]].getFitness())
p_fit.append(self.population[parents_ids[1]].getFitness())
x1_t = random.randrange(0,self.settings['encoding']['lenght'])
x2_t = random.randrange(0,self.settings['encoding']['lenght'])
x1 = min([x1_t,x2_t]) # x1 > x2 otherwise list slices don't work
x2 = max([x1_t,x2_t])
chunk1 = self.population[parents_ids[0]].data[x1:x2+1]
chunk2 = self.population[parents_ids[1]].data[x1:x2+1]
coor1 = {}
coor2 = {}
for idx, x in enumerate(chunk1):
coor1[x] = chunk2[idx]
for idx, x in enumerate(chunk2):
coor2[x] = chunk1[idx]
child1_data = [None] * self.settings['encoding']['lenght']
child2_data = [None] * self.settings['encoding']['lenght']
child1_data[x1:x2+1] = chunk2[:]
child2_data[x1:x2+1] = chunk1[:]
for idx in range(0, self.settings['encoding']['lenght']):
if idx < x1 or idx > x2:
p1_val = self.population[parents_ids[0]].data[idx]
if p1_val not in coor2:
child1_data[idx] = p1_val
else:
while p1_val in coor2:
p1_val = coor2[p1_val]
child1_data[idx] = p1_val
for idx in range(0, self.settings['encoding']['lenght']):
if idx < x1 or idx > x2:
p2_val = self.population[parents_ids[1]].data[idx]
if p2_val not in coor1:
child2_data[idx] = p2_val
else:
while p2_val in coor1:
p2_val = coor1[p2_val]
child2_data[idx] = p2_val
assert(checkData(child2_data))
assert(checkData(child1_data))
children_arr = []
children_arr.append(Traveler(child1_data))
children_arr.append(Traveler(child2_data))
return children_arr
def select(self, nb, override_algo = None):
if override_algo == None:
select_algo = self.settings['algo']['select']
else:
select_algo = override_algo
ret_pop = []
for _ in range(0,nb):
if select_algo[0] == 'bts':
#print(f"Using select {select_algo}")
# Tournament population
trm_ids = random.sample(range(0, len(self.population)), int(select_algo[1] * len(self.population) / 100)) # Can't use pop size if using elitsm, len(pop) != pop_size for now
best_id = trm_ids[0]
best_fitness = self.population[best_id].getFitness()
# Get best individual from tournament
for idx in trm_ids:
fitness = self.population[idx].getFitness() # Avoid recalculating fitness everytime
if fitness < best_fitness:
best_id = idx
best_fitness = fitness
# Append selected individual to the list
ret_pop.append(best_id)
return ret_pop
def roulette(self, nb, individuals):
# roulette with high biais
if(nb >= len(individuals)):
raise Exception("Roulette must have more input individuals than output individuals : nb < len(individuals)")
if(nb == 0 or len(individuals) <= 1):
raise Exception("Roulette input count must be greater than 1 - output must be greater than 0")
indiv_fitness = []
for indiv in individuals:
indiv_fitness.append(indiv.getFitness())
# Product much faster than exponentiation. 6-7x
sum_fitness = sum(indiv_fitness)
real_fitness = [(sum_fitness-x)*(sum_fitness-x) for x in indiv_fitness]
indiv_fitness_norm = [x/sum(real_fitness) for x in real_fitness]
assert(round(sum(indiv_fitness_norm), 9) == 1.0) # Level to which numpy doesn't complain if sum != 1.0. Ex : p=[0.01,0.98999999] is fine
idx = []
for n in range(nb):
new_id = np.random.choice(range(len(individuals)), p=indiv_fitness_norm)
while new_id in idx: # Not optimized...
new_id = np.random.choice(range(len(individuals)), p=indiv_fitness_norm)
idx.append(new_id)
return [individuals[id_] for id_ in idx]
def nextGeneration(self):
update_algo = self.settings['algo']['update']
co_algo = self.settings['algo']['crossover']
if update_algo[0] == 'elitism':
self.sortPopulation()
# Current best individuals
kept_index = math.floor(update_algo[1] * self.pop_size / 100)
# Keep only the best ones !
old_pop = self.population
self.population = self.population[:kept_index]
if co_algo != None:
# Replenish population with children coming from crossover + mutated
for _ in range(0,int((self.pop_size - kept_index)/2)):
children = self.crossover(self.select(2))
for child in children:
self.population.append(child)
assert(self.population != old_pop)
# Truncation algorithm
else:
# Replenish population with mutated copies of the best ones
while(len(self.population) != self.pop_size):
best = self.population # Temporary variable, can't append to the list being iterated over
for x in best:
new_indiv = Traveler(x.data)
new_indiv.mutate()
self.population.append(new_indiv)
else:
# Update rounds
for _ in range(0, int(update_algo[1] * self.pop_size / 100)):
# Select
parents_ids = self.select(2)
p_fit = []
p_fit.append(self.population[parents_ids[0]].getFitness())
p_fit.append(self.population[parents_ids[1]].getFitness())
# Crossover
if co_algo != None:
children = self.crossover(parents_ids)
assert(len(children) == 2)
assert(checkData(children[0].data))
assert(checkData(children[1].data))
assert(self.population[parents_ids[0]].getFitness() == p_fit[0])
assert(self.population[parents_ids[1]].getFitness() == p_fit[1])
else:
children = [Traveler(self.population[x].data) for x in parents_ids]
# Mutate
for x in children:
x.mutate()
# So that we replace optimally. Ex : p1 = 3, p2 = 7 must be replaced by ch1 = 9, ch2 = 5 in this order -> result : 7,9, otherwise 5,9
children.sort(key=methodcaller('getFitness'), reverse=not self.settings['encoding']['maximize'])
if self.population[parents_ids[0]].getFitness() > self.population[parents_ids[1]].getFitness():
parents_ids[0], parents_ids[1] = parents_ids[1], parents_ids[0]
if update_algo[0] == 'proba-replace-parent':
indiv = children
indiv.extend([self.population[id_] for id_ in parents_ids])
replacement = self.roulette(2,indiv)
for idx in range(len(replacement)):
self.population[parents_ids[idx]] = replacement[idx]
if update_algo[0] == 'replace-parent':
# Replace (parents)
for idx in range(0, 2):
ch_fit = children[idx].getFitness()
if ch_fit < p_fit[idx]:
self.population[parents_ids[idx]] = children[idx]
assert(ch_fit < p_fit[0] or ch_fit < p_fit[1])
elif update_algo[0] == 'replace-worst':
#print(f"Using update {update_algo}")
self.sortPopulation()
for idx in range(0, 2):
ch_fit = children[idx].getFitness()
worst_fit = self.population[-2+idx].getFitness() # -2 + 0 = -2 : 2sd worst, replaced by best children, -2 + 1 = -1 : worst, replaced by worst child
if ch_fit < worst_fit:
self.population[-2+idx] = children[idx]
# Used to check for any "population contamination" - ie. the data field of 2 individuals are pointing at the same memory space - they are linked -> reduced diversity.
#for x in range(0, self.pop_size):
# for y in range(0,self.pop_size):
# if x != y:
# assert(self.population[x] is not self.population[y])
# assert(self.population[x].data is not self.population[y].data)
# Used to re-fill the population. Necessary when removing duplicates or using 'elitism' update scheme
def fill(self):
while(len(self.population) < self.pop_size):
self.population.append(Traveler())
def sortPopulation(self):
self.population.sort(key=methodcaller('getFitness'), reverse=self.settings['encoding']['maximize'])
def getPopFitness(self, size=0):
if size == 0:
size = self.pop_size
return [x.getFitness() for x in self.population[0:size]]
# Returns a string containing information about the current generation population
def getPop(self, size = 0, pop_list = None):
if pop_list == None:
pop_list = self.population
if size == 0:
size = len(pop_list)
text = [str(x.id) + " - Fitness : " + str(x.getFitness()) for x in pop_list[:size]]
string = '\n'.join(str(x) for x in text)
return "Generation : {}\n".format(self.gen_count) + str(string) + "\nTraveler created count : {}".format(Traveler.created_count) + "\n"
# Starts the GA
def start(self):
self.gen_count = 0
# Varibles used to stop the GA on specific goals
max_gen = self.settings['stop']['max_gen']
max_time = self.settings['stop']['max_time']
| |
% i):
layer = classifiers.hidden(layer, 2*hidden_size,
hidden_func=hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top'):
layers = classifiers.hiddens(layer, 2*[hidden_size],
hidden_func=hidden_func,
hidden_keep_prob=hidden_keep_prob)
layer1, layer2 = layers.pop(0), layers.pop(0)
with tf.variable_scope('Classifier'):
if self.diagonal:
logits = classifiers.diagonal_bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
else:
logits = classifiers.bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
bucket_size = tf.shape(layer)[-2]
#-------------------------------------------------------
# Process the targets
# c (*) (n x m) + (n x m)
#targets = len(self) * unlabeled_targets + self.placeholder
targets = bucket_size * self.placeholder + unlabeled_targets
#-------------------------------------------------------
# Process the logits
# (n x m x c x m) -> (n x m x cm)
reshaped_logits = tf.reshape(logits, tf.stack([-1, bucket_size, bucket_size * len(self)]))
#-------------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x cm) -> (n x m x cm)
probabilities = tf.nn.softmax(reshaped_logits)
# (n x m x cm) -> (n x m x c x m)
probabilities = tf.reshape(probabilities, tf.stack([-1, bucket_size, len(self), bucket_size]))
# (n x m x c x m) -> (n x m x m x c)
probabilities = tf.transpose(probabilities, [0,1,3,2])
# (n x m), (n x m x cm), (n x m) -> ()
loss = tf.losses.sparse_softmax_cross_entropy(targets, reshaped_logits, weights=token_weights)
#-------------------------------------------------------
# Compute predictions/accuracy
# (n x m x cm) -> (n x m)
predictions = tf.argmax(reshaped_logits, axis=-1, output_type=tf.int32)
# (n x m), () -> (n x m)
unlabeled_predictions = tf.mod(predictions, bucket_size)
# (n x m) (*) (n x m) -> (n x m)
correct_tokens = nn.equal(predictions, targets) * token_weights
correct_unlabeled_tokens = nn.equal(unlabeled_predictions, unlabeled_targets) * token_weights
# (n x m) -> (n)
tokens_per_sequence = tf.reduce_sum(token_weights, axis=-1)
# (n x m) -> (n)
correct_tokens_per_sequence = tf.reduce_sum(correct_tokens, axis=-1)
correct_unlabeled_tokens_per_sequence = tf.reduce_sum(correct_unlabeled_tokens, axis=-1)
# (n), (n) -> (n)
correct_sequences = nn.equal(tokens_per_sequence, correct_tokens_per_sequence)
correct_unlabeled_sequences = nn.equal(tokens_per_sequence, correct_unlabeled_tokens_per_sequence)
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['unlabeled_targets'] = unlabeled_targets
outputs['probabilities'] = probabilities
outputs['unlabeled_loss'] = tf.constant(0.)
outputs['loss'] = loss
outputs['unlabeled_predictions'] = unlabeled_predictions
outputs['label_predictions'] = predictions
outputs['n_correct_unlabeled_tokens'] = tf.reduce_sum(correct_unlabeled_tokens)
outputs['n_correct_unlabeled_sequences'] = tf.reduce_sum(correct_unlabeled_sequences)
outputs['n_correct_tokens'] = tf.reduce_sum(correct_tokens)
outputs['n_correct_sequences'] = tf.reduce_sum(correct_sequences)
return outputs
#=============================================================
# TODO make this compatible with zipped files
def count(self, train_conllus):
""""""
for train_conllu in train_conllus:
# with codecs.open(train_conllu, encoding='utf-8', errors='ignore') as f:
with open(train_conllu,encoding='utf8') as f:
reader=f.readlines()
for line in reader:
line = line.strip()
if line and not line.startswith('#'):
line = line.split('\t')
token = line[self.conllu_idx] # conllu_idx is provided by the CoNLLUVocab
self._count(token)
self.index_by_counts()
return True
def _count(self, token):
if not self.cased:
token = token.lower()
self.counts[token] += 1
return
#=============================================================
def get_bos(self):
""""""
return self.BOS_STR
#=============================================================
def get_eos(self):
""""""
return self.EOS_STR
#=============================================================
@property
def diagonal(self):
return self._config.getboolean(self, 'diagonal')
@property
def add_linear(self):
return self._config.getboolean(self, 'add_linear')
@property
def loss_interpolation(self):
return self._config.getfloat(self, 'loss_interpolation')
@property
def drop_func(self):
drop_func = self._config.getstr(self, 'drop_func')
if hasattr(embeddings, drop_func):
return getattr(embeddings, drop_func)
else:
raise AttributeError("module '{}' has no attribute '{}'".format(embeddings.__name__, drop_func))
@property
def decomposition_level(self):
return self._config.getint(self, 'decomposition_level')
@property
def n_layers(self):
return self._config.getint(self, 'n_layers')
@property
def factorized(self):
return self._config.getboolean(self, 'factorized')
@property
def hidden_size(self):
return self._config.getint(self, 'hidden_size')
@property
def embed_size(self):
return self._config.getint(self, 'embed_size')
@property
def embed_keep_prob(self):
return self._config.getfloat(self, 'embed_keep_prob')
@property
def hidden_keep_prob(self):
return self._config.getfloat(self, 'hidden_keep_prob')
@property
def hidden_func(self):
hidden_func = self._config.getstr(self, 'hidden_func')
if hasattr(nonlin, hidden_func):
return getattr(nonlin, hidden_func)
else:
raise AttributeError("module '{}' has no attribute '{}'".format(nonlin.__name__, hidden_func))
@property
def compare_precision(self):
try:
if self._config.get('DEFAULT', 'tb')=='ptb' or self._config.get('DEFAULT', 'tb')=='ctb':
return True
else:
return False
except:
return False
#***************************************************************
class GraphTokenVocab(TokenVocab):
""""""
_depth = -1
#=============================================================
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None, None]
super(GraphTokenVocab, self).__init__(*args, **kwargs)
return
#=============================================================
def get_bilinear_discriminator(self, layer, token_weights, variable_scope=None, reuse=False):
""""""
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
add_linear = self.add_linear
with tf.variable_scope(variable_scope or self.classname):
for i in six.moves.range(0, self.n_layers-1):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, 2*self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top' % i):
layers = classifiers.hiddens(layer, 2*[self.hidden_size],
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
layer1, layer2 = layers.pop(0), layers.pop(0)
with tf.variable_scope('Discriminator'):
if self.diagonal:
logits = classifiers.diagonal_bilinear_discriminator(
layer1, layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
else:
logits = classifiers.bilinear_discriminator(
layer1, layer2,
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
#-----------------------------------------------------------
# Process the targets
# (n x m x m) -> (n x m x m)
unlabeled_targets = nn.greater(self.placeholder, 0)
#-----------------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x m) -> (n x m x m)
probabilities = tf.nn.sigmoid(logits)
# (n x m x m), (n x m x m x c), (n x m x m) -> ()
loss = tf.losses.sigmoid_cross_entropy(unlabeled_targets, logits, weights=token_weights)
#-----------------------------------------------------------
# Compute predictions/accuracy
# (n x m x m x c) -> (n x m x m)
predictions = nn.greater(logits, 0, dtype=tf.int32) * token_weights
# (n x m x m) (*) (n x m x m) -> (n x m x m)
true_positives = predictions * unlabeled_targets
# (n x m x m) -> ()
n_predictions = tf.reduce_sum(predictions)
n_targets = tf.reduce_sum(unlabeled_targets)
n_true_positives = tf.reduce_sum(true_positives)
# () - () -> ()
n_false_positives = n_predictions - n_true_positives
n_false_negatives = n_targets - n_true_positives
# (n x m x m) -> (n)
n_targets_per_sequence = tf.reduce_sum(unlabeled_targets, axis=[1,2])
n_true_positives_per_sequence = tf.reduce_sum(true_positives, axis=[1,2])
# (n) x 2 -> ()
n_correct_sequences = tf.reduce_sum(nn.equal(n_true_positives_per_sequence, n_targets_per_sequence))
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['unlabeled_targets'] = unlabeled_targets
outputs['probabilities'] = probabilities
outputs['unlabeled_loss'] = loss
outputs['loss'] = loss
outputs['unlabeled_predictions'] = predictions
outputs['n_unlabeled_true_positives'] = n_true_positives
outputs['n_unlabeled_false_positives'] = n_false_positives
outputs['n_unlabeled_false_negatives'] = n_false_negatives
outputs['n_correct_unlabeled_sequences'] = n_correct_sequences
outputs['predictions'] = predictions
outputs['n_true_positives'] = n_true_positives
outputs['n_false_positives'] = n_false_positives
outputs['n_false_negatives'] = n_false_negatives
outputs['n_correct_sequences'] = n_correct_sequences
return outputs
#=============================================================
def get_bilinear_classifier(self, layer, outputs, token_weights, variable_scope=None, reuse=False, debug=False):
""""""
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
add_linear = self.add_linear
with tf.variable_scope(variable_scope or self.field):
for i in six.moves.range(0, self.n_layers-1):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, 2*self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top'):
layers = classifiers.hiddens(layer, 2*[self.hidden_size],
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
layer1, layer2 = layers.pop(0), layers.pop(0)
with tf.variable_scope('Classifier'):
if self.diagonal:
logits = classifiers.diagonal_bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
else:
logits = classifiers.bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
#-----------------------------------------------------------
# Process the targets
# (n x m x m)
label_targets = self.placeholder
unlabeled_predictions = outputs['unlabeled_predictions']
unlabeled_targets = outputs['unlabeled_targets']
#-----------------------------------------------------------
# Process the logits
# (n x m x c x m) -> (n x m x m x c)
transposed_logits = tf.transpose(logits, [0,1,3,2])
#-----------------------------------------------------------
# Compute the probabilities/cross entropy
# (n x m x m) -> (n x m x m x 1)
head_probabilities = tf.expand_dims(tf.stop_gradient(outputs['probabilities']), axis=-1)
# (n x m x m x c) -> (n x m x m x c)
label_probabilities = tf.nn.softmax(transposed_logits) * tf.to_float(tf.expand_dims(token_weights, axis=-1))
# (n x m x m), (n x m x m x c), (n x m x m) -> ()
label_loss = tf.losses.sparse_softmax_cross_entropy(label_targets, transposed_logits, weights=token_weights*unlabeled_targets)
#-----------------------------------------------------------
# Compute the predictions/accuracy
# (n x m x m x c) -> (n x m x m)
#print('23333')
predictions = tf.argmax(transposed_logits, axis=-1, output_type=tf.int32)
# (n x m x m) (*) (n x m x m) -> (n x m x m)
true_positives = nn.equal(label_targets, predictions) * unlabeled_predictions
correct_label_tokens = nn.equal(label_targets, predictions) * unlabeled_targets
# (n x m x m) -> ()
n_unlabeled_predictions = tf.reduce_sum(unlabeled_predictions)
n_unlabeled_targets = tf.reduce_sum(unlabeled_targets)
n_true_positives = tf.reduce_sum(true_positives)
n_correct_label_tokens = tf.reduce_sum(correct_label_tokens)
# () - () -> ()
n_false_positives = n_unlabeled_predictions - n_true_positives
n_false_negatives = n_unlabeled_targets - n_true_positives
# (n x m x m) -> (n)
n_targets_per_sequence = tf.reduce_sum(unlabeled_targets, axis=[1,2])
n_true_positives_per_sequence = tf.reduce_sum(true_positives, axis=[1,2])
n_correct_label_tokens_per_sequence = tf.reduce_sum(correct_label_tokens, axis=[1,2])
# (n) x 2 -> ()
n_correct_sequences = tf.reduce_sum(nn.equal(n_true_positives_per_sequence, n_targets_per_sequence))
n_correct_label_sequences = tf.reduce_sum(nn.equal(n_correct_label_tokens_per_sequence, n_targets_per_sequence))
#-----------------------------------------------------------
# Populate the output dictionary
rho = self.loss_interpolation
outputs['label_predictions']=predictions
outputs['label_targets'] = label_targets
outputs['probabilities'] = label_probabilities * head_probabilities
outputs['label_loss'] = label_loss
# Combination of labeled loss and unlabeled loss
outputs['loss'] = 2*((1-rho) * outputs['loss'] + rho * label_loss)
# outputs['loss'] = label_loss * self.loss_rel_interpolation + outputs['loss'] * self.loss_edge_interpolation
outputs['n_true_positives'] = n_true_positives
outputs['n_false_positives'] = n_false_positives
outputs['n_false_negatives'] = n_false_negatives
outputs['n_correct_sequences'] = n_correct_sequences
outputs['n_correct_label_tokens'] = n_correct_label_tokens
outputs['n_correct_label_sequences'] = n_correct_label_sequences
return outputs
@property
def loss_rel_interpolation(self):
try:
return self._config.getfloat(self, 'loss_rel_interpolation')
except:
return self._config.getfloat(self, 'loss_interpolation')
@property
def loss_edge_interpolation(self):
try:
return self._config.getfloat(self, 'loss_edge_interpolation')
except:
return 1-self._config.getfloat(self, 'loss_interpolation')
#=============================================================
def get_unfactored_bilinear_classifier(self, layer, token_weights, variable_scope=None, reuse=False):
""""""
recur_layer = layer
hidden_keep_prob = 1 if reuse else self.hidden_keep_prob
add_linear = self.add_linear
with tf.variable_scope(variable_scope or self.field):
for i in six.moves.range(0, self.n_layers-1):
with tf.variable_scope('FC-%d' % i):
layer = classifiers.hidden(layer, 2*self.hidden_size,
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
with tf.variable_scope('FC-top' % i):
layers = classifiers.hidden(layer, 2*[self.hidden_size],
hidden_func=self.hidden_func,
hidden_keep_prob=hidden_keep_prob)
layer1, layer2 = layers.pop(0), layers.pop(0)
with tf.variable_scope('Classifier'):
if self.diagonal:
logits = classifiers.diagonal_bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
else:
logits = classifiers.bilinear_classifier(
layer1, layer2, len(self),
hidden_keep_prob=hidden_keep_prob,
add_linear=add_linear)
#-----------------------------------------------------------
# Process the targets
targets = self.placeholder
# (n x m x m) -> (n x m x m)
unlabeled_targets = nn.greater(targets, 0)
#-----------------------------------------------------------
# Process the logits
# (n x m x c x m) -> (n x m x m x c)
transposed_logits = tf.transpose(logits, [0,1,3,2])
#-----------------------------------------------------------
# Compute probabilities/cross entropy
# (n x m x m x c) -> (n x m x m x c)
probabilities = tf.nn.softmax(transposed_logits) * tf.to_float(tf.expand_dims(token_weights, axis=-1))
# (n x m x m), (n x m x m x c), (n x m x m) -> ()
loss = tf.losses.sparse_softmax_cross_entropy(targets, transposed_logits, weights=token_weights)
#-----------------------------------------------------------
# Compute predictions/accuracy
# (n x m x m x c) -> (n x m x m)
predictions = tf.argmax(transposed_logits, axis=-1, output_type=tf.int32) * token_weights
# (n x m x m) -> (n x m x m)
unlabeled_predictions = nn.greater(predictions, 0)
# (n x m x m) (*) (n x m x m) -> (n x m x m)
unlabeled_true_positives = unlabeled_predictions * unlabeled_targets
true_positives = nn.equal(targets, predictions) * unlabeled_true_positives
# (n x m x m) -> ()
n_predictions = tf.reduce_sum(unlabeled_predictions)
n_targets = tf.reduce_sum(unlabeled_targets)
n_unlabeled_true_positives = tf.reduce_sum(unlabeled_true_positives)
n_true_positives = tf.reduce_sum(true_positives)
# () - () -> ()
n_unlabeled_false_positives = n_predictions - n_unlabeled_true_positives
n_unlabeled_false_negatives = n_targets - n_unlabeled_true_positives
n_false_positives = n_predictions - n_true_positives
n_false_negatives = n_targets - n_true_positives
# (n x m x m) -> (n)
n_targets_per_sequence = tf.reduce_sum(unlabeled_targets, axis=[1,2])
n_unlabeled_true_positives_per_sequence = tf.reduce_sum(unlabeled_true_positives, axis=[1,2])
n_true_positives_per_sequence = tf.reduce_sum(true_positives, axis=[1,2])
# (n) x 2 -> ()
n_correct_unlabeled_sequences = tf.reduce_sum(nn.equal(n_unlabeled_true_positives_per_sequence, n_targets_per_sequence))
n_correct_sequences = tf.reduce_sum(nn.equal(n_true_positives_per_sequence, n_targets_per_sequence))
#-----------------------------------------------------------
# Populate the output dictionary
outputs = {}
outputs['recur_layer'] = recur_layer
outputs['unlabeled_targets'] = unlabeled_targets
outputs['label_targets'] = self.placeholder
outputs['probabilities'] = probabilities
outputs['unlabeled_loss'] = tf.constant(0.)
outputs['loss'] = loss
outputs['unlabeled_predictions'] = unlabeled_predictions
outputs['label_predictions'] = predictions
outputs['n_unlabeled_true_positives'] = n_unlabeled_true_positives
outputs['n_unlabeled_false_positives'] = n_unlabeled_false_positives
outputs['n_unlabeled_false_negatives'] = n_unlabeled_false_negatives
outputs['n_correct_unlabeled_sequences'] = n_correct_unlabeled_sequences
outputs['n_true_positives'] = n_true_positives
outputs['n_false_positives'] = n_false_positives
outputs['n_false_negatives'] = n_false_negatives
outputs['n_correct_sequences'] = n_correct_sequences
return outputs
#=============================================================
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
#=============================================================
def add(self, token):
""""""
return self.index(token)
#=============================================================
# token should be: 1:rel|2:acl|5:dep
def index(self, token):
""""""
nodes = []
if token != '_':
token = token.split('|')
for edge in token:
head, semrel = edge.split(':', 1)
nodes.append( (int(head), super(GraphTokenVocab, self).__getitem__(semrel)) )
return nodes
#=============================================================
# index should be [(1, 12), (2, 4), (5, 2)]
def token(self, | |
#!python
#
# Copyright 2021 <EMAIL>. Internal use only; all rights reserved
#
# L1/L2 stress tester. Starts with one funded "funder" account which allocates
# funds to various child processes. The child processes then randomly perform
# various operations like onboarding L1->L2, exiting L2->L1, staking liquidity
# pools, or sending payments to each other. More capabilities can be added
# in the future e.g. trading ERC20 tokens, simulated gambling, auctions,
# multi-level marketing where a child account recruits others and then collects
# a fee on their future transactions, ...
#
# Child processes will only be performing one action at a time, chosen randomly
# and with probabilities intended to keep most of the activity on the L2 chain.
# However some L1 operations are included to ensure that there is some background
# activity which is not part of the rollup framework.
import os,sys
from web3 import Web3
import threading
import signal
import time
from random import *
import queue
import requests,json
from web3.gas_strategies.time_based import fast_gas_price_strategy
from web3.middleware import geth_poa_middleware
from web3.logs import STRICT, IGNORE, DISCARD, WARN
import logging
from utils import Account,Addrs,Context,LoadEnv,lPrint,wPrint
num_workers = 1
min_active_per = 5 # For every <min_active_per> children, require 1 to stay on L2 (disallows exit operations)
max_fail = 0 # Ignore this many op failures. Next one will start a shutdown
min_balance = Web3.toWei(0.025, 'ether')
fund_balance = Web3.toWei(0.5, 'ether')
min_lp_balance = Web3.toWei(11.0, 'ether')
# Emit warning messages if any child has been waiting "slow_secs" or more. Shut down at "stuck_secs".
slow_secs = 600
stuck_secs = 1800
giveup_secs = 2700
fund_batch = 10
if len(sys.argv) < 3:
print("Usage:",sys.argv[0],"<target> <num_children>")
exit(1)
num_children = int(sys.argv[2])
assert(num_children <= 1000) # Not a hard limit, but does affect log formatting
env = LoadEnv()
A = Addrs(env)
boba_addrs = A.boba_addrs
# Fail if the parameters would exceed the allowed funding limit
assert(num_children * (min_balance * 10) <= Web3.toWei(env['max_fund_eth'],'ether'))
class Child:
def __init__(self, A, num, acct, parent):
self.num = num
self.acct = acct
# Optional parameter to force all children to start on one chain
if 'start_chain' in env and env['start_chain'] > 0:
self.on_chain = env['start_chain']
else:
self.on_chain = 2 - (self.num % 2)
self.approved = [False]*3
self.staked = [False]*3
self.staked_NG = False
self.parent = parent
self.ts = []
self.op = None
self.need_tx = False
self.exiting = False
self.gasEstimate = 0
self.gasUsed = 0
self.preload = [] # Override the RNG to step through a repeatable sequence of operations
A.addr_names[acct.address] = "Child_" + str(num)
# Could cache L1, L2 balances
def buildAndSubmit(self, ctx, func, params):
params['from'] = self.acct.address
params['nonce'] = ctx.rpc[self.on_chain].eth.get_transaction_count(self.acct.address)
params['chainId'] = ctx.chainIds[self.on_chain]
gp = ctx.rpc[self.on_chain].eth.gasPrice
if self.on_chain == 1:
params['maxFeePerGas'] = gp
# FIXME - get from env
params['maxPriorityFeePerGas'] = min(Web3.toWei(1,'gwei'), gp)
tx = func.buildTransaction(params)
#print("TX",tx)
self.gasEstimate = tx['gas']
# FIXME - get gasMultiplier from env
if self.on_chain == 1:
# tx['gas'] = int(tx['gas'] * 2.0)
tx['gas'] = int(tx['gas'] + 50000)
ret = None
try:
signed_tx = ctx.rpc[self.on_chain].eth.account.sign_transaction(tx, self.acct.key)
ret = ctx.rpc[self.on_chain].eth.send_raw_transaction(signed_tx.rawTransaction)
except Exception as e:
# FIXME - check for gas-price error specifically.
print("***FAILED Sumbission, will retry once")
if self.on_chain == 2:
print("Old gas price:", tx['gasPrice'])
tx['gasPrice'] = ctx.rpc[self.on_chain].eth.gasPrice
print("New gas price:", tx['gasPrice'])
signed_tx = ctx.rpc[self.on_chain].eth.account.sign_transaction(tx, self.acct.key)
ret = ctx.rpc[self.on_chain].eth.send_raw_transaction(signed_tx.rawTransaction)
return ret
class shutdown:
level = 0
num_done = 0
num_fails = 0
total_ops = 0 # FIXME - saving time by sticking this here. Move to its own stats object or other thread-safe place.
batchGas = 0
def nuke(sec):
print("*** Forced exit in ",sec,"seconds ***")
# could try to flush buffers, close files, etc
time.sleep(sec)
os._exit(1)
def myAssert(cond):
if not (cond) and shutdown.level < 2:
shutdown.level = 2
threading.Thread(target=nuke, args=(10,)).start()
assert(cond)
def ctrlC(sig, frame):
print("SIGNAL",sig,frame)
shutdown.level += 1
if shutdown.level >= 2:
signal.signal(signal.SIGINT, signal.SIG_DFL)
print("")
print("+---------------------+")
print("Shutdown level: ", shutdown.level)
print("listLock:", listLock.locked())
print("txWatch items:", len(txWatch))
if shutdown.level > 1:
for i in txWatch.keys():
print(" ",Web3.toHex(i))
print("evMissed: ", evMissed)
print("evWatch items:", len(evWatch))
if shutdown.level > 1:
for i in evWatch.keys():
print(" ",i)
print("readyQueue size:", readyQueue.qsize())
print("idleQueue size:", idleQueue.qsize())
print("numDone:", shutdown.num_done,"of",num_children)
if shutdown.level > 1:
for c in children:
if not c.exiting:
print("*** Child",c.num,"acct",c.acct.address,"in op",c.op,"on chain",c.on_chain,"ts",c.ts,"need_tx",c.need_tx)
print("+---------------------+")
print("")
signal.signal(signal.SIGINT, ctrlC)
listLock = threading.Lock()
evWatch = dict()
txWatch = dict()
evMissed = []
readyQueue = queue.Queue()
idleQueue = queue.Queue()
os.makedirs("./logs", exist_ok=True)
account_log = open("./logs/accounts-" + env['name'] + ".log","a")
logLock = threading.Lock()
op_log = open("./logs/op.log","a")
op_log.write("# Started at " + time.asctime(time.gmtime()) + " with " + str(num_children)+ " children and " + str(num_workers) + " worker threads\n")
addrs = []
children = []
gasPrice = [0]*3
# Rinkeby seems to work at 0.5 gwei, ~75s
gasPrice[1] = Web3.toWei(env['gas_price_gwei'][0],'gwei') # FIXME - try to estimate it (fails on local network)
gasPrice[2] = Web3.toWei(env['gas_price_gwei'][1],'gwei') # This one is fixed
funder = Account(Web3.toChecksumAddress(env['funder_acct'][0]),env['funder_acct'][1])
gCtx = Context(env,A,"./logs/mainloop.log","M")
lPrint (gCtx.log, "Versions: L1=" + gCtx.rpc[1].clientVersion + ", L2=" + gCtx.rpc[2].clientVersion)
lPrint (gCtx.log, "Detected chain IDs: L1=" + str(gCtx.chainIds[1]) + ", L2=" + str(gCtx.chainIds[2]))
funder.setNonces(gCtx.rpc)
def Fund(ctx, fr, to, chain, amount, n=None):
if n is None:
n = ctx.rpc[chain].eth.get_transaction_count(fr.address)
tx = {
'nonce': n,
'from':fr.address,
'to':to,
'gas':21000,
'chainId': ctx.chainIds[chain],
}
#tx['gasPrice'] = gasPrice[chain]
tx['gasPrice'] = ctx.rpc[chain].eth.gasPrice
myAssert(21000*tx['gasPrice'] < amount)
tx['value'] = Web3.toWei(amount - 21000*tx['gasPrice'], 'wei')
#print("FundTX",tx)
signed_txn = ctx.rpc[chain].eth.account.sign_transaction(tx, fr.key)
ret = ctx.rpc[chain].eth.send_raw_transaction(signed_txn.rawTransaction)
return ret
def xFund(ctx, c, to, amount, n=None):
amount -= randint(0,65535)
if n is None:
n = ctx.rpc[c.on_chain].eth.get_transaction_count(c.acct.address)
tx = {
'nonce': n,
'from':c.acct.address,
'to':to,
'gas':21000,
'chainId': ctx.chainIds[c.on_chain],
}
c.gasEstimate = 21000
tx['gasPrice'] = ctx.rpc[c.on_chain].eth.gasPrice
myAssert(21000*tx['gasPrice'] < amount)
tx['value'] = Web3.toWei(amount - 21000*tx['gasPrice'], 'wei')
signed_txn = ctx.rpc[c.on_chain].eth.account.sign_transaction(tx, c.acct.key)
ret = ctx.rpc[c.on_chain].eth.send_raw_transaction(signed_txn.rawTransaction)
return ret
def Start(ctx, c, op):
myAssert(c.op is None)
myAssert(not c.ts) # Ensure it's empty
c.op = op
c.gasEstimate = 0
c.gasUsed = 0
c.ts.append(time.time())
s = "OP_START," + "{:03d}".format(c.num) + "," + op + "," + str(c.on_chain) + "," + "{:.8f}".format(c.ts[0])
s += "\n"
logLock.acquire()
op_log.write(s)
op_log.flush()
logLock.release()
# Register the txhash to watch for. All watched operations do this
def Watch(ctx, c, op, tx=None):
#print("Child",c.num,"START for", op)
c.ts.append(time.time())
s = "OP_WATCH," + "{:03d}".format(c.num) + "," + op + "," + str(c.on_chain)
start_at = c.ts[0]
s += "," + "{:014.8f}".format(c.ts[1] - start_at)
if tx:
c.need_tx = True
listLock.acquire()
txWatch[tx] = c
listLock.release()
s = s + "," + Web3.toHex(tx)
s = s + "\n"
#ctx.log.write(s)
logLock.acquire()
op_log.write(s)
op_log.flush()
logLock.release()
# Wrapper to watch for an event as well as a tx receipt
def WatchEv(ctx, c, op, tx=None):
myAssert(tx is not None)
listLock.acquire()
evWatch[c.acct.address] = c
listLock.release()
Watch(ctx, c, op, tx)
def Finish(c,success=1):
myAssert(c.op)
tNow = time.time()
c.ts.append(tNow)
op_str = "OP_DONE_," + "{:03d}".format(c.num) + "," + c.op + "," + str(c.on_chain) + "," + str(success)
op_str += "," + str(c.gasUsed) + "/" + str(c.gasEstimate)
if c.gasUsed > c.gasEstimate:
print("*** Used more gas than estimate, child",c.num,"op",c.op)
op_str += "<<<"
c.gasEstimate = 0
c.gasUsed = 0
start_at = c.ts.pop(0)
for t in c.ts:
op_str += "," + "{:014.8f}".format(t - start_at)
op_str += "\n"
logLock.acquire()
shutdown.total_ops += 1
op_log.write(op_str)
op_log.flush()
logLock.release()
old_op = c.op
c.ts = []
c.op = None
if c.exiting:
print("Child",c.num,"is done")
shutdown.num_done += 1
elif success:
readyQueue.put(c)
else:
print("Putting child",c.num,"into idleQueue after failed operation:", old_op)
shutdown.num_fails += 1
if shutdown.num_fails > max_fail and shutdown.level == 0:
print("*** Maximum failure count reached, starting shutdown")
shutdown.level = 1
idleQueue.put(c)
# Periodically take a child out of the idleQueue and see if it has gained enough funds to be put back
# into the readyQueue.
def idle_manager(env):
loopCheck = None
while shutdown.level < 2:
c = None
items = idleQueue.qsize()
if items == 0:
#print("idle_manager idleQueue empty")
time.sleep(20)
continue
c = idleQueue.get()
if shutdown.level > 0:
readyQueue.put(c)
continue
bal = gCtx.rpc[2].eth.get_balance(c.acct.address)
if bal >= min_balance:
c.on_chain = 2
print("idle_manager re-activating child",c.num,"on chain", c.on_chain)
loopCheck = None
readyQueue.put(c)
continue
bal = gCtx.rpc[1].eth.get_balance(c.acct.address)
if bal >= min_balance:
c.on_chain = 1
print("idle_manager re-activating child",c.num,"on chain", c.on_chain)
loopCheck = None
readyQueue.put(c)
continue
interval = 2
if loopCheck is None:
loopCheck = c.num
elif loopCheck == c.num:
interval = 20
loopCheck = None
# If every child is idle and we've scanned the whole queue once, might as well quit.
if idleQueue.qsize() >= num_children and shutdown.level == 0:
print("Welp, looks like we're done here.")
shutdown.level = 1
idleQueue.put(c)
#print("idle_manager did not reactivate child",c.num,",will sleep for", interval)
time.sleep(interval)
print("idle_manager done")
def AddLiquidity(ctx, c,amount):
if c.staked[c.on_chain]:
# FIXME - do a withdrawal in this case
lPrint(ctx.log, "Child " + str(c.num) | |
<reponame>gthreepwood/yats<gh_stars>0
#
# (C) Copyright 2003-2011 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""DIGEST-MD5 authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 2831 <http://www.ietf.org/rfc/rfc2831.txt>`__
"""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
from binascii import b2a_hex
import re
import logging
import hashlib
from .core import ClientAuthenticator, ServerAuthenticator
from .core import Failure, Response, Challenge, Success, Failure
from .core import sasl_mechanism, default_nonce_factory
logger = logging.getLogger("pyxmpp2.sasl.digest_md5")
QUOTE_RE = re.compile(br"(?<!\\)\\(.)")
PARAM_RE = re.compile(br'^(?P<var>[^=]+)\=(?P<val>(\"(([^"\\]+)|(\\\")'
br'|(\\\\))+\")|([^",]+))(\s*\,\s*(?P<rest>.*))?$')
def _unquote(data):
"""Unquote quoted value from DIGEST-MD5 challenge or response.
If `data` doesn't start or doesn't end with '"' then return it unchanged,
remove the quotes and escape backslashes otherwise.
:Parameters:
- `data`: a quoted string.
:Types:
- `data`: `bytes`
:return: the unquoted string.
:returntype: `bytes`
"""
if not data.startswith(b'"') or not data.endswith(b'"'):
return data
return QUOTE_RE.sub(b"\\1", data[1:-1])
def _quote(data):
"""Prepare a string for quoting for DIGEST-MD5 challenge or response.
Don't add the quotes, only escape '"' and "\\" with backslashes.
:Parameters:
- `data`: a raw string.
:Types:
- `data`: `bytes`
:return: `data` with '"' and "\\" escaped using "\\".
:returntype: `bytes`
"""
data = data.replace(b'\\', b'\\\\')
data = data.replace(b'"', b'\\"')
return data
def _h_value(data):
"""H function of the DIGEST-MD5 algorithm (MD5 sum).
:Parameters:
- `data`: a byte string.
:Types:
- `data`: `bytes`
:return: MD5 sum of the string.
:returntype: `bytes`"""
# pylint: disable-msg=E1101
return hashlib.md5(data).digest()
def _kd_value(k_val, s_val):
"""KD function of the DIGEST-MD5 algorithm.
:Parameters:
- `k_val`: a byte string.
- `s_val`: a byte string.
:Types:
- `k_val`: `bytes`
- `s_val`: `bytes`
:return: MD5 sum of the strings joined with ':'.
:returntype: `bytes`"""
return _h_value(b":".join((k_val, s_val)))
def _make_urp_hash(username, realm, passwd):
"""Compute MD5 sum of username:realm:password.
:Parameters:
- `username`: a username.
- `realm`: a realm.
- `passwd`: a password.
:Types:
- `username`: `bytes`
- `realm`: `bytes`
- `passwd`: `bytes`
:return: the MD5 sum of the parameters joined with ':'.
:returntype: `bytes`"""
if realm is None:
realm = b""
return _h_value(b":".join((username, realm, passwd)))
def _compute_response(urp_hash, nonce, cnonce, nonce_count, authzid,
digest_uri):
"""Compute DIGEST-MD5 response value.
:Parameters:
- `urp_hash`: MD5 sum of username:realm:password.
- `nonce`: nonce value from a server challenge.
- `cnonce`: cnonce value from the client response.
- `nonce_count`: nonce count value.
- `authzid`: authorization id.
- `digest_uri`: digest-uri value.
:Types:
- `urp_hash`: `bytes`
- `nonce`: `bytes`
- `nonce_count`: `int`
- `authzid`: `bytes`
- `digest_uri`: `bytes`
:return: the computed response value.
:returntype: `bytes`"""
# pylint: disable-msg=C0103,R0913
logger.debug("_compute_response{0!r}".format((urp_hash, nonce, cnonce,
nonce_count, authzid,digest_uri)))
if authzid:
a1 = b":".join((urp_hash, nonce, cnonce, authzid))
else:
a1 = b":".join((urp_hash, nonce, cnonce))
a2 = b"AUTHENTICATE:" + digest_uri
return b2a_hex(_kd_value(b2a_hex(_h_value(a1)), b":".join((
nonce, nonce_count, cnonce, b"auth", b2a_hex(_h_value(a2))))))
def _compute_response_auth(urp_hash, nonce, cnonce, nonce_count, authzid,
digest_uri):
"""Compute DIGEST-MD5 rspauth value.
:Parameters:
- `urp_hash`: MD5 sum of username:realm:password.
- `nonce`: nonce value from a server challenge.
- `cnonce`: cnonce value from the client response.
- `nonce_count`: nonce count value.
- `authzid`: authorization id.
- `digest_uri`: digest-uri value.
:Types:
- `urp_hash`: `bytes`
- `nonce`: `bytes`
- `nonce_count`: `int`
- `authzid`: `bytes`
- `digest_uri`: `bytes`
:return: the computed rspauth value.
:returntype: `bytes`"""
# pylint: disable-msg=C0103,R0913
logger.debug("_compute_response_auth{0!r}".format((urp_hash, nonce, cnonce,
nonce_count, authzid, digest_uri)))
if authzid:
a1 = b":".join((urp_hash, nonce, cnonce, authzid))
else:
a1 = b":".join((urp_hash, nonce, cnonce))
a2 = b":" + digest_uri
return b2a_hex(_kd_value(b2a_hex(_h_value(a1)), b":".join((
nonce, nonce_count, cnonce, b"auth", b2a_hex(_h_value(a2))))))
@sasl_mechanism("DIGEST-MD5", 70)
class DigestMD5ClientAuthenticator(ClientAuthenticator):
"""Provides DIGEST-MD5 SASL authentication for a client.
Authentication properties used:
- ``"username"`` - user name (required)
- ``"authzid"`` - authorization id (optional)
- ``"service-type"`` - service type as required by the DIGEST-MD5
protocol (required)
- ``"service-domain"`` - service domain (the 'serv-name' or 'host' part
of diges-uri of DIGEST-MD5) (required)
- ``"service-hostname"`` - service host name (the 'host' par of
diges-uri of DIGEST-MD5) (required)
- ``"realm"`` - the realm to use if needed (optional)
- ``"realms"`` - list of acceptable realms (optional)
Authentication properties returned:
- ``"username"`` - user name
- ``"authzid"`` - authorization id
"""
# pylint: disable-msg=R0902
def __init__(self):
"""Initialize a `DigestMD5ClientAuthenticator` object."""
ClientAuthenticator.__init__(self)
self.username = None
self.rspauth_checked = None
self.response_auth = None
self.authzid = None
self.realm = None
self.nonce_count = None
self.in_properties = None
@classmethod
def are_properties_sufficient(cls, properties):
return ("username" in properties
and "password" in properties
and "service-type" in properties
and "service-domain" in properties)
def start(self, properties):
self.username = properties["username"]
self.authzid = properties.get("authzid", "")
self.in_properties = properties
self.nonce_count = 0
self.response_auth = None
self.rspauth_checked = False
self.realm = None
return Response(None)
def challenge(self, challenge):
"""Process a challenge and return the response.
:Parameters:
- `challenge`: the challenge from server.
:Types:
- `challenge`: `bytes`
:return: the response or a failure indicator.
:returntype: `sasl.Response` or `sasl.Failure`"""
# pylint: disable-msg=R0911,R0912
if not challenge:
logger.debug("Empty challenge")
return Failure("bad-challenge")
# workaround for some buggy implementations
challenge = challenge.split(b'\x00')[0]
if self.response_auth:
return self._final_challenge(challenge)
realms = []
nonce = None
charset = "iso-8859-1"
while challenge:
match = PARAM_RE.match(challenge)
if not match:
logger.debug("Challenge syntax error: {0!r}".format(challenge))
return Failure("bad-challenge")
challenge = match.group("rest")
var = match.group("var")
val = match.group("val")
logger.debug("{0!r}: {1!r}".format(var, val))
if var == b"realm":
realms.append(_unquote(val))
elif var == b"nonce":
if nonce:
logger.debug("Duplicate nonce")
return Failure("bad-challenge")
nonce = _unquote(val)
elif var == b"qop":
qopl = _unquote(val).split(b",")
if b"auth" not in qopl:
logger.debug("auth not supported")
return Failure("not-implemented")
elif var == b"charset":
if val != b"utf-8":
logger.debug("charset given and not utf-8")
return Failure("bad-challenge")
charset = "utf-8"
elif var == b"algorithm":
if val != b"md5-sess":
logger.debug("algorithm given and not md5-sess")
return Failure("bad-challenge")
if not nonce:
logger.debug("nonce not given")
return Failure("bad-challenge")
return self._make_response(charset, realms, nonce)
def _make_response(self, charset, realms, nonce):
"""Make a response for the first challenge from the server.
:Parameters:
- `charset`: charset name from the challenge.
- `realms`: realms list from the challenge.
- `nonce`: nonce value from the challenge.
:Types:
- `charset`: `bytes`
- `realms`: `bytes`
- `nonce`: `bytes`
:return: the response or a failure indicator.
:returntype: `sasl.Response` or `sasl.Failure`"""
# pylint: disable-msg=R0914,R0915
params = []
realm = self._get_realm(realms, charset)
if isinstance(realm, Failure):
return realm
elif realm:
realm = _quote(realm)
params.append(b'realm="' + realm + b'"')
try:
username = self.username.encode(charset)
except UnicodeError:
logger.debug("Couldn't encode username to {0!r}".format(charset))
return Failure("incompatible-charset")
username = _quote(username)
params.append(b'username="' + username + b'"')
cnonce = self.in_properties.get(
"nonce_factory", default_nonce_factory)()
cnonce = _quote(cnonce)
params.append(b'cnonce="' + cnonce + b'"')
params.append(b'nonce="' + nonce + b'"')
self.nonce_count += 1
nonce_count = "{0:08x}".format(self.nonce_count).encode("us-ascii")
params.append(b'nc=' + nonce_count)
params.append(b'qop=auth')
serv_type = self.in_properties["service-type"]
serv_type = serv_type.encode("us-ascii")
serv_name = self.in_properties["service-domain"]
host = self.in_properties.get("service-hostname", serv_name)
serv_name = serv_name.encode("idna")
host = host.encode("idna")
if serv_name and serv_name != host:
digest_uri = b"/".join((serv_type, host, serv_name))
else:
digest_uri = b"/".join((serv_type, host))
digest_uri = _quote(digest_uri)
params.append(b'digest-uri="' + digest_uri + b'"')
if self.authzid:
try:
authzid = self.authzid.encode(charset)
except UnicodeError:
logger.debug("Couldn't encode authzid to {0!r}".format(charset))
return Failure("incompatible-charset")
authzid = _quote(authzid)
else:
authzid = b""
try:
epasswd = self.in_properties["password"].encode(charset)
except UnicodeError:
logger.debug("Couldn't encode password to {0!r}"
.format(charset))
return Failure("incompatible-charset")
logger.debug("Encoded password: {0!r}".format(epasswd))
urp_hash = _make_urp_hash(username, realm, epasswd)
response = _compute_response(urp_hash, nonce, cnonce, nonce_count,
authzid, digest_uri)
self.response_auth = _compute_response_auth(urp_hash, nonce, cnonce,
nonce_count, authzid, digest_uri)
params.append(b'response=' + response)
if authzid:
params.append(b'authzid="' + authzid + b'"')
return Response(b",".join(params))
def _get_realm(self, realms, charset):
"""Choose a realm from the list specified by the server.
:Parameters:
- `realms`: the realm list.
- `charset`: encoding of realms on the list.
:Types:
- `realms`: `list` of `bytes`
- `charset`: `bytes`
:return: the realm chosen or a failure indicator.
:returntype: `bytes` or `Failure`"""
if realms:
realm = realms[0]
ap_realms = self.in_properties.get("realms")
if ap_realms is not None:
realms = (unicode(r, charset) for r in realms)
for ap_realm in ap_realms:
if ap_realm in realms:
realm = ap_realm
break
realm = realm.decode(charset)
else:
realm | |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import json
from mcfw.properties import azzert, long_property, unicode_property, typed_property, bool_property, \
unicode_list_property, long_list_property
from mcfw.rpc import arguments
from mcfw.utils import Enum
from rogerthat.dal.profile import get_search_config
from rogerthat.models import ServiceIdentity, ServiceTranslation, MessageFlowDesign
from rogerthat.settings import get_server_settings
from rogerthat.to import TO
from rogerthat.to.activity import GeoPointWithTimestampTO
from rogerthat.to.friends import GetUserInfoRequestTO, GetUserInfoResponseTO
from rogerthat.to.profile import SearchConfigTO
from rogerthat.to.system import ProfileAddressTO, ProfilePhoneNumberTO
from rogerthat.translations import localize
from rogerthat.utils import is_flag_set, get_epoch_from_datetime
from rogerthat.utils.app import get_human_user_from_app_user
from rogerthat.utils.service import remove_slash_default, get_identity_from_service_identity_user
class APIKeyTO(object):
timestamp = long_property('1')
name = unicode_property('2')
key = unicode_property('3')
@staticmethod
def fromDBAPIKey(model):
m = APIKeyTO()
m.key = model.key().name()
m.name = unicode(model.name)
m.timestamp = model.timestamp
return m
class ServiceLanguagesTO(object):
allLanguages = unicode_list_property('1')
allLanguagesStr = unicode_list_property('2')
nonDefaultSupportedLanguages = unicode_list_property('3')
defaultLanguage = unicode_property('4')
defaultLanguageStr = unicode_property('5')
class ServiceCallbackConfigurationRegexTO(object):
created = long_property('created')
name = unicode_property('name')
uri = unicode_property('uri')
regexes = unicode_list_property('regexes')
callbacks = long_property('callbacks')
custom_headers = unicode_property('custom_headers')
@staticmethod
def fromModel(model):
to = ServiceCallbackConfigurationRegexTO()
to.created = get_epoch_from_datetime(model.created)
to.name = model.name
to.uri = model.uri
to.regexes = model.regexes
to.callbacks = model.callbacks
to.custom_headers = json.dumps(model.custom_headers).decode('utf8') if model.custom_headers else u''
return to
class ServiceConfigurationInfoTO(object):
apiKeys = typed_property('1', APIKeyTO, True)
sik = unicode_property('3')
autoLoginUrl = unicode_property('4')
class ServiceConfigurationTO(ServiceConfigurationInfoTO):
callBackURI = unicode_property('51')
enabled = bool_property('53')
actions = unicode_list_property('54')
callBackFromJid = unicode_property('55')
needsTestCall = bool_property('56')
callbacks = long_property('57')
mobidickUrl = unicode_property('58')
autoUpdating = bool_property('59')
updatesPending = bool_property('60')
regexCallbackConfigurations = typed_property('61', ServiceCallbackConfigurationRegexTO, True)
class ServiceCallbackConfigurationTO(object):
uri = unicode_property('1')
functions = unicode_list_property('3')
class ServiceLogTO(object):
timestamp = long_property('1')
type = long_property('2')
status = long_property('3')
function = unicode_property('4')
request = unicode_property('5')
response = unicode_property('6')
errorCode = long_property('7')
errorMessage = unicode_property('8')
@staticmethod
def fromServiceLog(sl):
slt = ServiceLogTO()
slt.timestamp = sl.timestamp
slt.type = sl.type
slt.status = sl.status
slt.function = sl.function
try:
slt.request = json.dumps(json.loads(sl.request), indent=2, ensure_ascii=False)
except:
slt.request = None
try:
slt.response = json.dumps(json.loads(sl.response), indent=2, ensure_ascii=False)
except:
slt.response = None
slt.errorCode = sl.error_code
slt.errorMessage = sl.error_message
return slt
class ServiceIdentitySummaryTO(TO):
created = long_property('1')
identifier = unicode_property('2')
name = unicode_property('3')
menu_branding = unicode_property('4')
@staticmethod
def fromServiceIdentity(service_identity, to=None):
to = to or ServiceIdentitySummaryTO()
to.created = service_identity.creationTimestamp
to.identifier = service_identity.identifier
to.name = service_identity.name
to.menu_branding = service_identity.menuBranding
return to
class ServiceIdentityDetailsTO(ServiceIdentitySummaryTO):
INHERITANCE_PROPERTIES = ('description_use_default', 'description_branding_use_default', 'phone_number_use_default',
'phone_call_popup_use_default', 'search_use_default', 'app_ids_use_default',
'home_branding_use_default')
description = unicode_property('100')
description_use_default = bool_property('101')
description_branding = unicode_property('102')
description_branding_use_default = bool_property('103')
menu_branding_use_default = bool_property('105')
phone_number = unicode_property('106')
phone_number_use_default = bool_property('107')
phone_call_popup = unicode_property('108')
phone_call_popup_use_default = bool_property('109')
recommend_enabled = bool_property('110')
admin_emails = unicode_list_property('111')
search_use_default = bool_property('112')
search_config = typed_property('113', SearchConfigTO, False)
qualified_identifier = unicode_property('114')
app_data = unicode_property('115')
email_statistics_use_default = bool_property('116')
email_statistics = bool_property('117')
app_ids_use_default = bool_property('119')
app_ids = unicode_list_property('120')
content_branding_hash = unicode_property('124')
home_branding_hash = unicode_property('125')
home_branding_use_default = bool_property('126')
@staticmethod
def fromServiceIdentity(service_identity, service_profile):
identifier = get_identity_from_service_identity_user(service_identity.user)
if identifier == ServiceIdentity.DEFAULT:
azzert(service_identity.inheritanceFlags == 0,
"inheritanceFlags of default must be 0, not %s" % service_identity.inheritanceFlags)
details = ServiceIdentitySummaryTO.fromServiceIdentity(service_identity, ServiceIdentityDetailsTO())
details.description_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_DESCRIPTION,
service_identity.inheritanceFlags)
details.description = service_identity.description
details.description_branding_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_DESCRIPTION_BRANDING,
service_identity.inheritanceFlags)
details.description_branding = service_identity.descriptionBranding
details.menu_branding_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_MENU_BRANDING,
service_identity.inheritanceFlags)
details.phone_number_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_PHONE_NUMBER,
service_identity.inheritanceFlags)
details.phone_number = service_identity.mainPhoneNumber
details.phone_call_popup_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_PHONE_POPUP_TEXT,
service_identity.inheritanceFlags)
details.phone_call_popup = service_identity.callMenuItemConfirmation
details.recommend_enabled = bool(service_identity.shareEnabled)
details.admin_emails = [] if service_identity.metaData is None else [e.strip() for e in
service_identity.metaData.split(',') if
e.strip()]
if service_identity.appData:
details.app_data = service_identity.appData
elif service_identity.serviceData:
service_data = service_identity.serviceData.to_json_dict()
details.app_data = json.dumps(service_data).decode('utf-8') if service_data else None
else:
details.app_data = None
details.search_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_SEARCH_CONFIG,
service_identity.inheritanceFlags)
sc, locs = get_search_config(service_identity.user)
details.search_config = SearchConfigTO.fromDBSearchConfig(sc, locs)
details.qualified_identifier = service_identity.qualifiedIdentifier
details.email_statistics_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_EMAIL_STATISTICS,
service_identity.inheritanceFlags)
details.email_statistics = service_identity.emailStatistics
details.app_ids_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_APP_IDS,
service_identity.inheritanceFlags)
details.app_ids = service_identity.appIds
details.content_branding_hash = service_identity.contentBrandingHash
details.home_branding_use_default = is_flag_set(ServiceIdentity.FLAG_INHERIT_HOME_BRANDING,
service_identity.inheritanceFlags)
details.home_branding_hash = service_identity.homeBrandingHash
return details
class ServiceIdentityListResultTO(object):
cursor = unicode_property('1')
identities = typed_property('2', ServiceIdentityDetailsTO, True)
class ServiceUserTO(object):
name = unicode_property('1')
email = unicode_property('2')
avatarId = long_property('3')
app_id = unicode_property('4')
@staticmethod
def fromFriendServiceIdentityConnection(fsic):
su = ServiceUserTO()
su.name = fsic.friend_name
su.email = get_human_user_from_app_user(fsic.friend).email() # human friend
su.avatarId = fsic.friend_avatarId
su.app_id = fsic.app_id
return su
class GetServiceUsersResponseTO(object):
users = typed_property('1', ServiceUserTO, True)
cursor = unicode_property('2')
class GetServiceActionInfoRequestTO(GetUserInfoRequestTO):
action = unicode_property('100')
class GetServiceActionInfoResponseTO(GetUserInfoResponseTO):
actionDescription = unicode_property('100')
staticFlowHash = unicode_property('101')
staticFlow = unicode_property('102')
staticFlowBrandings = unicode_list_property('103')
class StartServiceActionRequestTO(object):
email = unicode_property('1')
action = unicode_property('2')
context = unicode_property('3')
static_flow_hash = unicode_property('4')
message_flow_run_id = unicode_property('5')
timestamp = long_property('6')
class StartServiceActionResponseTO(object):
pass
class PokeServiceRequestTO(object):
email = unicode_property('1')
hashed_tag = unicode_property('2')
context = unicode_property('3')
timestamp = long_property('4')
class PokeServiceResponseTO(object):
pass
class GetUserLinkRequestTO(TO):
link = unicode_property('1')
class GetUserLinkResponseTO(TO):
link = unicode_property('1')
class GetMenuIconRequestTO(object):
service = unicode_property('1')
coords = long_list_property('2')
size = long_property('3')
class GetMenuIconResponseTO(object):
icon = unicode_property('1')
iconHash = unicode_property('2')
class GetStaticFlowRequestTO(object):
service = unicode_property('1')
coords = long_list_property('2')
staticFlowHash = unicode_property('3')
class GetStaticFlowResponseTO(object):
staticFlow = unicode_property('1')
class PressMenuIconRequestTO(object):
service = unicode_property('1')
coords = long_list_property('2')
context = unicode_property('3')
generation = long_property('4')
message_flow_run_id = unicode_property('5')
static_flow_hash = unicode_property('6')
hashed_tag = unicode_property('7')
timestamp = long_property('8')
class PressMenuIconResponseTO(object):
pass
class ShareServiceRequestTO(object):
service_email = unicode_property('1')
recipient = unicode_property('2')
class ShareServiceResponseTO(object):
pass
class FindServiceRequestTO(object):
search_string = unicode_property('1')
geo_point = typed_property('2', GeoPointWithTimestampTO, False)
organization_type = long_property('3')
cursor = unicode_property('4', default=None)
avatar_size = long_property('5', default=50)
hashed_tag = unicode_property('6')
class FindServiceItemTO(object):
email = unicode_property('1')
name = unicode_property('2')
description = unicode_property('3')
avatar = unicode_property('4')
description_branding = unicode_property('5')
qualified_identifier = unicode_property('6')
avatar_id = long_property('7')
detail_text = unicode_property('8', default=None)
@staticmethod
@arguments(service_identity=ServiceIdentity, target_language=unicode, distance=int, avatar_size=int,
actions=unicode)
def fromServiceIdentity(service_identity, target_language, distance=-1, avatar_size=50, actions=None):
"""
Args:
service_identity (ServiceIdentity)
target_language (unicode)
distance (int)
avatar_size (int)
"""
from rogerthat.pages.profile import get_avatar_cached
from rogerthat.bizz.i18n import get_translator
translator = get_translator(service_identity.service_user, ServiceTranslation.IDENTITY_TYPES, target_language)
entry = FindServiceItemTO()
entry.email = remove_slash_default(service_identity.user).email()
entry.avatar_id = service_identity.avatarId # Web support
entry.avatar = unicode(base64.b64encode(get_avatar_cached(service_identity.avatarId, avatar_size)))
entry.description_branding = translator.translate(ServiceTranslation.IDENTITY_BRANDING,
service_identity.descriptionBranding, target_language)
entry.description = translator.translate(ServiceTranslation.IDENTITY_TEXT, service_identity.description,
target_language)
entry.name = translator.translate(ServiceTranslation.IDENTITY_TEXT, service_identity.name, target_language)
entry.qualified_identifier = translator.translate(ServiceTranslation.IDENTITY_TEXT,
service_identity.qualifiedIdentifier, target_language)
if actions:
entry.detail_text = actions
elif distance >= 0:
entry.detail_text = localize(target_language, 'Distance: %(distance)s km', distance=distance)
else:
entry.detail_text = None
return entry
class FindServiceCategoryTO(object):
category = unicode_property('1')
items = typed_property('2', FindServiceItemTO, True)
cursor = unicode_property('3', default=None)
class FindServiceResponseTO(object):
error_string = unicode_property('1')
matches = typed_property('2', FindServiceCategoryTO, True)
class SendApiCallRequestTO(object):
"""Request sent by mobile"""
service = unicode_property('1')
id = long_property('2')
method = unicode_property('3')
params = unicode_property('4')
hashed_tag = unicode_property('5')
synchronous = bool_property('synchronous', default=False)
class SendApiCallCallbackResultTO(TO):
"""Result sent by TPS or solution"""
result = unicode_property('1')
error = unicode_property('2')
def __init__(self, result=None, error=None):
self.result = result
self.error = error
class ReceiveApiCallResultRequestTO(object):
"""Result sent to mobile"""
id = long_property('1', default=-1)
result = unicode_property('2')
error = unicode_property('3')
class SendApiCallResponseTO(object):
"""Response of request sent by mobile.
`result` is only set when SendApiCallRequestTO.synchronous was True, otherwise the result will be send
asynchronously via a separate backlog call.
"""
result = typed_property('result', SendApiCallCallbackResultTO, False, default=None)
class ReceiveApiCallResultResponseTO(object):
"""Response of request sent to mobile"""
pass
class UpdateUserDataRequestTO(object):
DATA_TYPE_USER = u"user"
DATA_TYPE_APP = u"app"
# deprecated since we use smart updates.
user_data = unicode_property('2', default=None) # deprecated
app_data = unicode_property('3', default=None) # deprecated
# deprecated, now using the `data` property because `values` only supported strings.
keys = unicode_list_property('5', default=[]) # deprecated
values = unicode_list_property('6', default=[]) # deprecated
service = unicode_property('1')
type = unicode_property('4', default=None)
data = unicode_property('7', default=None)
class UpdateUserDataResponseTO(object):
pass
class ServiceInteractionDefTO(object):
url = unicode_property('1')
description = unicode_property('2')
tag = unicode_property('3')
timestamp = long_property('4')
id_ = long_property('5')
embed_url = unicode_property('6')
email_url = unicode_property('7')
sms_url = unicode_property('8')
total_scan_count = long_property('9')
scanned_from_rogerthat_count = long_property('10')
scanned_from_outside_rogerthat_on_supported_platform_count = long_property('11')
scanned_from_outside_rogerthat_on_unsupported_platform_count = long_property('12')
service_identifier = unicode_property('13')
static_flow_name = unicode_property('14')
branding = unicode_property('15')
@staticmethod
def urlFromServiceInteractionDef(sid):
from rogerthat.bizz.friends import userCode
return u"%s/si/%s/%s" % (get_server_settings().baseUrl, userCode(sid.user), sid.key().id())
@staticmethod
def emailUrl(sid):
from rogerthat.bizz.service import get_service_interact_short_url
return get_service_interact_short_url(sid) + "?email"
@staticmethod
def smsUrl(sid):
from rogerthat.bizz.service import get_service_interact_short_url
return get_service_interact_short_url(sid)
@classmethod
def fromServiceInteractionDef(cls, sid):
from rogerthat.bizz.service import get_service_interact_qr_code_url
to = cls()
to.url = ServiceInteractionDefTO.urlFromServiceInteractionDef(sid)
to.description = sid.description
to.tag = sid.tag
to.service_identifier = sid.service_identity
to.timestamp = sid.timestamp
to.id_ = sid.key().id()
to.embed_url = get_service_interact_qr_code_url(sid)
to.email_url = ServiceInteractionDefTO.emailUrl(sid)
to.sms_url = ServiceInteractionDefTO.smsUrl(sid)
to.total_scan_count = sid.totalScanCount
to.scanned_from_rogerthat_count = sid.scannedFromRogerthatCount
to.scanned_from_outside_rogerthat_on_supported_platform_count = sid.scannedFromOutsideRogerthatOnSupportedPlatformCount
to.scanned_from_outside_rogerthat_on_unsupported_platform_count = sid.scannedFromOutsideRogerthatOnUnsupportedPlatformCount
to.static_flow_name = MessageFlowDesign.get(sid.staticFlowKey).name if sid.staticFlowKey else None
to.branding = sid.branding
return to
class GetServiceInteractionDefsResponseTO(object):
defs = typed_property('1', ServiceInteractionDefTO, True)
cursor = unicode_property('2')
class | |
<reponame>revl/anglicize
#!/usr/bin/env python3
"""Perform anglicization of text in UTF-8 encoding.
The script works as a filter: it reads UTF-8 characters from its
standard input and writes the result to its standard output.
Alternatively, it can be used as a Python module:
from anglicize import Anglicize
print(Anglicize.anglicize(utf8_as_bytes))
See README.md for more details."""
from typing import Dict, Optional, Any
class Anglicize(object):
"""Convert a byte sequence of UTF-8 characters to their English
transcriptions."""
def __init__(self) -> None:
self.__state = Anglicize.XLAT_TREE
self.__finite_state: Optional[Dict[int, Any]] = None
self.__buf = bytearray()
self.__capitalization_mode = False
self.__first_capital_and_spaces = bytearray()
self.__output = bytearray()
@staticmethod
def anglicize(text: bytes) -> bytearray:
"""Process a whole string and return its anglicized version."""
anglicize = Anglicize()
return anglicize.process_buf(text) + anglicize.finalize()
def process_buf(self, buf: bytes) -> bytearray:
"""Anglicize a buffer. Expect more to come."""
self.__output = bytearray()
for byte in buf:
self.__push_byte(byte)
return self.__output
def finalize(self) -> bytearray:
"""Process and return the remainder of the internal buffer."""
self.__output = bytearray()
while self.__buf or self.__finite_state:
self.__skip_buf_byte()
if self.__capitalization_mode:
if self.__first_capital_and_spaces:
self.__output += self.__first_capital_and_spaces
self.__capitalization_mode = False
return self.__output
def __push_byte(self, byte: int) -> None:
"""Input another byte. Return the transliteration when it's ready."""
# Check if there is no transition from the current state
# for the given byte.
if byte not in self.__state:
if self.__state == Anglicize.XLAT_TREE:
# We're at the start state, which means that
# no bytes have been accumulated in the
# buffer and the new byte also cannot be
# converted.
self.__hold_spaces_after_capital(byte)
else:
self.__skip_buf_byte()
self.__push_byte(byte)
else:
new_state = self.__state[byte]
if not new_state[1]:
self.__state = Anglicize.XLAT_TREE
self.__finite_state = None
self.__buf = bytearray()
self.__hold_first_capital(new_state[0])
else:
self.__state = new_state[1]
if new_state[0]:
self.__finite_state = new_state
self.__buf = bytearray()
else:
self.__buf.append(byte)
def __skip_buf_byte(self) -> None:
"""Restart character recognition in the internal buffer."""
self.__state = Anglicize.XLAT_TREE
if self.__finite_state:
self.__hold_first_capital(self.__finite_state[0])
self.__finite_state = None
buf = self.__buf
else:
self.__hold_spaces_after_capital(self.__buf[0])
buf = self.__buf[1:]
self.__buf = bytearray()
for byte in buf:
self.__push_byte(byte)
def __hold_first_capital(self, xlat: bytes) -> None:
"""Check for capitalization mode."""
if self.__capitalization_mode:
if self.__first_capital_and_spaces:
if xlat.istitle():
xlat = self.__first_capital_and_spaces + xlat
self.__first_capital_and_spaces = bytearray()
self.__output += xlat.upper()
return
xlat = self.__first_capital_and_spaces + xlat
elif xlat.istitle():
self.__output += xlat.upper()
return
self.__capitalization_mode = False
elif xlat.istitle():
self.__capitalization_mode = True
self.__first_capital_and_spaces = bytearray(xlat)
return
self.__output += xlat
def __hold_spaces_after_capital(self, byte: int) -> None:
"""Buffer spaces after the first capital letter."""
if self.__capitalization_mode:
if self.__first_capital_and_spaces:
if byte == 32:
self.__first_capital_and_spaces.append(byte)
return
else:
self.__capitalization_mode = False
self.__output += self.__first_capital_and_spaces
elif byte != 32:
self.__capitalization_mode = False
self.__output.append(byte)
# This variable is updated by make_xlat_tree.
XLAT_TREE: Dict[int, Any] = {
0xC2: [b"", {
0xAB: [b"\"", None],
0xBB: [b"\"", None]
}],
0xC3: [b"", {
0x80: [b"A", None],
0x81: [b"A", None],
0x82: [b"I", None],
0x83: [b"A", None],
0x84: [b"A", None],
0x85: [b"O", None],
0x86: [b"A", None],
0x87: [b"S", {
0xCC: [b"", {
0x87: [b"C", None]
}]
}],
0x88: [b"E", None],
0x89: [b"E", None],
0x8A: [b"E", None],
0x8B: [b"Yo", None],
0x8C: [b"I", None],
0x8D: [b"I", None],
0x8E: [b"I", None],
0x90: [b"D", None],
0x91: [b"Ny", {
0xC3: [b"", {
0xB3: [b"Nyo", None]
}]
}],
0x92: [b"O", None],
0x93: [b"O", None],
0x94: [b"O", None],
0x95: [b"O", None],
0x96: [b"O", None],
0x98: [b"O", None],
0x99: [b"U", None],
0x9A: [b"U", None],
0x9B: [b"U", None],
0x9C: [b"U", None],
0x9E: [b"Th", None],
0x9F: [b"ss", None],
0xA0: [b"a", None],
0xA1: [b"a", None],
0xA2: [b"i", None],
0xA3: [b"a", None],
0xA4: [b"a", None],
0xA5: [b"o", None],
0xA6: [b"a", None],
0xA7: [b"s", {
0xCC: [b"", {
0x87: [b"c", None]
}]
}],
0xA8: [b"e", None],
0xA9: [b"e", None],
0xAA: [b"e", None],
0xAB: [b"yo", None],
0xAC: [b"i", None],
0xAD: [b"i", None],
0xAE: [b"i", None],
0xB0: [b"d", None],
0xB1: [b"ny", {
0xC3: [b"", {
0xB3: [b"nyo", None]
}]
}],
0xB2: [b"o", None],
0xB3: [b"o", None],
0xB4: [b"o", None],
0xB5: [b"o", None],
0xB6: [b"o", None],
0xB8: [b"o", None],
0xB9: [b"u", None],
0xBA: [b"u", None],
0xBB: [b"u", None],
0xBC: [b"u", None],
0xBE: [b"th", None]
}],
0xC4: [b"", {
0x82: [b"A", None],
0x83: [b"a", None],
0x84: [b"O", None],
0x85: [b"o", None],
0x86: [b"Ch", None],
0x87: [b"ch", None],
0x98: [b"E", None],
0x99: [b"e", None],
0xA2: [b"G", None],
0xA3: [b"g", None],
0xB6: [b"K", None],
0xB7: [b"k", None],
0xBB: [b"L", None],
0xBC: [b"l", None]
}],
0xC5: [b"", {
0x81: [b"W", None],
0x82: [b"w", None],
0x83: [b"Ny", None],
0x84: [b"ny", None],
0x85: [b"N", None],
0x86: [b"n", None],
0x96: [b"R", None],
0x97: [b"r", None],
0x9A: [b"Sh", None],
0x9B: [b"sh", None],
0x9E: [b"Sh", None],
0x9F: [b"sh", None],
0xA0: [b"Sh", None],
0xA1: [b"sh", None],
0xA2: [b"T", None],
0xA3: [b"t", None],
0xB9: [b"Zh", None],
0xBA: [b"zh", None],
0xBB: [b"Zh", None],
0xBC: [b"zh", None],
0xBD: [b"S", None],
0xBE: [b"s", None]
}],
0xC6: [b"", {
0x86: [b"", {
0xCC: [b"", {
0xA7: [b"O", None]
}]
}],
0x8F: [b"", {
0xCC: [b"", {
0xA7: [b"E", None]
}]
}],
0x90: [b"", {
0xCC: [b"", {
0xA7: [b"E", None]
}]
}],
0x97: [b"", {
0xCC: [b"", {
0xA7: [b"I", None]
}]
}]
}],
0xC8: [b"", {
0x98: [b"Sh", None],
0x99: [b"sh", None],
0x9A: [b"Ts", None],
0x9B: [b"ts", None],
0xA8: [b"E", {
0xCC: [b"", {
0x87: [b"E", None]
}]
}],
0xA9: [b"e", {
0xCC: [b"", {
0x87: [b"e", None]
}]
}]
}],
0xC9: [b"", {
0x94: [b"", {
0xCC: [b"", {
0xA7: [b"o", None]
}]
}],
0x99: [b"", {
0xCC: [b"", {
0xA7: [b"e", None]
}]
}],
0x9B: [b"", {
0xCC: [b"", {
0xA7: [b"e", None]
}]
}],
0xA8: [b"", {
0xCC: [b"", {
0xA7: [b"i", None]
}]
}]
}],
0xCE: [b"", {
0x86: [b"A", None],
0x88: [b"E", None],
0x89: [b"I", None],
0x8A: [b"I", None],
0x8C: [b"O", None],
0x8E: [b"U", None],
0x8F: [b"O", None],
0x91: [b"A", {
0xCE: [b"", {
0x99: [b"E", None],
0xA5: [b"AF", None],
0xB9: [b"E", None]
}],
0xCF: [b"", {
0x85: [b"Af", None]
}]
}],
0x92: [b"V", None],
0x93: [b"G", {
0xCE: [b"", {
0x93: [b"NG", None],
0x9A: [b"G", None],
0x9E: [b"NX", None],
0xA7: [b"NKH", None],
0xB3: [b"Ng", None],
0xBA: [b"G", None],
0xBE: [b"Nx", None]
}],
0xCF: [b"", {
0x87: [b"Nkh", None]
}]
}],
0x94: [b"D", None],
0x95: [b"E", {
0xCE: [b"", {
0x99: [b"I", None],
0xA5: [b"EF", None],
0xB9: [b"I", None]
}],
0xCF: [b"", {
0x85: [b"Ef", None]
}]
}],
0x96: [b"Zd", None],
0x97: [b"I", {
0xCE: [b"", {
0x99: [b"I", None],
0xA5: [b"IF", None],
0xB9: [b"I", None]
}],
0xCF: [b"", {
0x85: [b"If", None]
}]
}],
0x98: [b"Th", None],
0x99: [b"I", None],
0x9A: [b"K", None],
0x9B: [b"L", None],
0x9C: [b"M", {
0xCE: [b"", {
0xA0: [b"MB", None]
}],
0xCF: [b"", {
0x80: [b"Mb", None]
}]
}],
0x9D: [b"N", {
0xCE: [b"", {
0xA4: [b"ND", None]
}],
0xCF: [b"", {
0x84: [b"Nd", None]
}]
}],
0x9E: [b"X", None],
0x9F: [b"O", {
0xCE: [b"", {
0x99: [b"I", None],
0xA5: [b"U", None],
0xB9: [b"I", None]
}],
0xCF: [b"", {
0x85: [b"U", None]
}]
}],
0xA0: [b"P", None],
0xA1: [b"R", None],
0xA3: [b"S", None],
0xA4: [b"T", {
0xCE: [b"", {
0x96: [b"TZ", None],
0xB6: [b"Tz", None]
}]
}],
0xA5: [b"U", {
0xCE: [b"", {
0x99: [b"I", None],
0xB9: [b"I", None]
}]
}],
0xA6: [b"F", None],
0xA7: [b"Kh", None],
0xA8: [b"Ps", None],
0xA9: [b"O", {
0xCE: [b"", {
0x99: [b"O", None],
0xA5: [b"OI", None],
0xB9: [b"O", None]
}],
0xCF: [b"", {
0x85: [b"Oi", None]
}]
}],
0xAC: [b"a", None],
0xAD: [b"e", None],
0xAE: [b"i", None],
0xAF: [b"i", None],
0xB1: [b"a", {
0xCE: [b"", {
0xB9: [b"e", None]
}],
0xCF: [b"", {
0x85: [b"af", None]
}]
}],
0xB2: [b"v", None],
0xB3: [b"g", {
0xCE: [b"", {
0xB3: [b"ng", None],
0xBA: [b"g", None],
0xBE: [b"nx", None]
}],
0xCF: [b"", {
0x87: [b"nkh", None]
}]
}],
0xB4: [b"d", None],
0xB5: [b"e", {
0xCE: [b"", {
0xB9: [b"i", None]
}],
0xCF: [b"", {
0x85: [b"ef", None]
}]
}],
0xB6: [b"zd", None],
0xB7: [b"i", {
0xCE: [b"", {
0xB9: [b"i", None]
}],
0xCF: [b"", {
0x85: [b"if", None]
}]
}],
0xB8: [b"th", None],
0xB9: [b"i", None],
0xBA: [b"k", None],
0xBB: [b"l", None],
0xBC: [b"m", {
0xCF: [b"", {
0x80: [b"mb", None]
| |
startZ+5, "bookshelf")
setBlock(startX+9, h+2, startZ+5, "bookshelf")
setBlock(startX+8, h+2, startZ+5, "bookshelf")
#EnchantingCorner
setBlock(startX+5, h+1, startZ+5, "enchanting_table")
setBlock(startX+5, h+3, startZ+5, "soul_lantern")
#Lights
#setBlock(startX+6, h+2, startZ+5, "wall_torch")
#setBlock(startX+9, h+2, startZ+5, "wall_torch")
setBlock(startX+10, h+1, startZ+10, "glowstone")
setBlock(startX+10, h+1, startZ+5, "glowstone")
#Crafting
setBlock(startX+8, h+1, startZ+10, "crafting_table")
setBlock(startX+7, h+1, startZ+10, "furnace")
#Garden
setBlock(startX+9, h, startZ+12, "farmland")
setBlock(startX+8, h, startZ+12, "farmland")
setBlock(startX+7, h, startZ+12, "farmland")
setBlock(startX+6, h, startZ+12, "farmland")
setBlock(startX+10, h+1, startZ+12, "dark_oak_slab")
setBlock(startX+5, h+1, startZ+12, "dark_oak_slab")
setBlock(startX+10, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+9, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+8, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+7, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+6, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+5, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+9, h+1, startZ+12, "red_tulip")
setBlock(startX+8, h+1, startZ+12, "red_tulip")
setBlock(startX+7, h+1, startZ+12, "red_tulip")
setBlock(startX+6, h+1, startZ+12, "red_tulip")
else:
#Doorway
setBlock(startX+11, h+1, startZ+8, "air")
setBlock(startX+11, h+2, startZ+8, "air")
setBlock(startX+11, h+2, startZ+7, "air")
setBlock(startX+11, h+1, startZ+7, "air")
#setBlock(startX+7, h+1, startZ+4, "jungle_door")
#setBlock(startX+8, h+1, startZ+4, "jungle_door")
#setBlock(startX+7, h+2, startZ+4, "jungle_door")
#setBlock(startX+8, h+2, startZ+4, "jungle_door")
#Window
setBlock(startX+8, h+2, startZ+11, "glass")
setBlock(startX+7, h+2, startZ+11, "glass")
#Bookcase
setBlock(startX+9, h+1, startZ+5, "bookshelf")
setBlock(startX+8, h+1, startZ+5, "bookshelf")
setBlock(startX+9, h+2, startZ+5, "bookshelf")
setBlock(startX+8, h+2, startZ+5, "bookshelf")
#EnchantingCorner
setBlock(startX+5, h+1, startZ+5, "enchanting_table")
setBlock(startX+5, h+3, startZ+5, "soul_lantern")
#Lights
#setBlock(startX+6, h+2, startZ+5, "wall_torch")
#setBlock(startX+9, h+2, startZ+5, "wall_torch")
setBlock(startX+10, h+1, startZ+10, "glowstone")
setBlock(startX+10, h+1, startZ+5, "glowstone")
#Crafting
setBlock(startX+8, h+1, startZ+10, "crafting_table")
setBlock(startX+7, h+1, startZ+10, "furnace")
#Garden
setBlock(startX+9, h, startZ+12, "farmland")
setBlock(startX+8, h, startZ+12, "farmland")
setBlock(startX+7, h, startZ+12, "farmland")
setBlock(startX+6, h, startZ+12, "farmland")
setBlock(startX+10, h+1, startZ+12, "dark_oak_slab")
setBlock(startX+5, h+1, startZ+12, "dark_oak_slab")
setBlock(startX+10, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+9, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+8, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+7, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+6, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+5, h+1, startZ+13, "dark_oak_slab")
setBlock(startX+9, h+1, startZ+12, "red_tulip")
setBlock(startX+8, h+1, startZ+12, "red_tulip")
setBlock(startX+7, h+1, startZ+12, "red_tulip")
setBlock(startX+6, h+1, startZ+12, "red_tulip")
if connectingCells[0]:
setBlock(startX+7, h+1, startZ, "air")
setBlock(startX+8, h+1, startZ, "air")
if connectingCells[1]:
setBlock(startX, h+1, startZ+7, "air")
setBlock(startX, h+1, startZ+8, "air")
if connectingCells[2]:
setBlock(startX+7, h+1, startZ+15, "air")
setBlock(startX+8, h+1, startZ+15, "air")
if connectingCells[3]:
setBlock(startX+15, h+1, startZ+7, "air")
setBlock(startX+15, h+1, startZ+8 , "air")
#Manor
elif self.grid[x][z].identifier == 4:
for i in range(startX, startX+16):
for j in range(startZ, startZ+16):
setBlock(i, h, j, "dirt")
for i in range(startX, startX+16):
setBlock(i, h+1, startZ, "oak_fence")
setBlock(i, h+1, startZ+15, "oak_fence")
for j in range(startZ, startZ+16):
setBlock(startX, h+1, j, "oak_fence")
setBlock(startX+15, h+1, j, "oak_fence")
for i in range(startX+3, startX+13):
for k in range(startZ+3, startZ+13):
setBlock(i, h, k, "oak_planks")
setBlock(i, h+4, k, "oak_planks")
setBlock(i, h+8, k, "oak_planks")
for j in range(h+1, h+8):
for i in range(startX+3, startX+13):
setBlock(i, j, startZ+3, "oak_planks")
setBlock(i, j, startZ+12, "oak_planks")
for k in range(startZ+3, startZ+13):
setBlock(startX+3, j, k, "oak_planks")
setBlock(startX+12, j, k, "oak_planks")
if connectingCells[0]:
#Doors
setBlock(startX+7, h+1, startZ+3, "air")
setBlock(startX+8, h+1, startZ+3, "air")
setBlock(startX+7, h+2, startZ+3, "air")
setBlock(startX+8, h+2, startZ+3, "air")
#Balcony
setBlock(startX+6, h+1, startZ+2, "oak_fence")
setBlock(startX+6, h+2, startZ+2, "oak_fence")
setBlock(startX+9, h+1, startZ+2, "oak_fence")
setBlock(startX+9, h+2, startZ+2, "oak_fence")
setBlock(startX+6, h+3, startZ+2, "oak_planks")
setBlock(startX+7, h+3, startZ+2, "oak_planks")
setBlock(startX+8, h+3, startZ+2, "oak_planks")
setBlock(startX+9, h+3, startZ+2, "oak_planks")
setBlock(startX+6, h+4, startZ+2, "oak_fence")
setBlock(startX+7, h+4, startZ+2, "oak_fence")
setBlock(startX+8, h+4, startZ+2, "oak_fence")
setBlock(startX+9, h+4, startZ+2, "oak_fence")
for j in range(h+4, h+8):
for i in range(startX+6, startX+10):
setBlock(i, j, startZ+3, "air")
setBlock(startX+6, h+4, startZ+4, "oak_planks")
setBlock(startX+6, h+5, startZ+4, "oak_planks")
setBlock(startX+6, h+6, startZ+4, "oak_planks")
setBlock(startX+6, h+7, startZ+4, "oak_planks")
setBlock(startX+7, h+4, startZ+4, "oak_slab")
setBlock(startX+8, h+4, startZ+4, "oak_slab")
setBlock(startX+7, h+7, startZ+4, "oak_planks")
setBlock(startX+8, h+7, startZ+4, "oak_planks")
setBlock(startX+9, h+4, startZ+4, "oak_planks")
setBlock(startX+9, h+5, startZ+4, "oak_planks")
setBlock(startX+9, h+6, startZ+4, "oak_planks")
setBlock(startX+9, h+7, startZ+4, "oak_planks")
#Stairs
setBlock(startX+5, h+1, startZ+10, "oak_planks")
setBlock(startX+5, h+1, startZ+11, "oak_planks")
setBlock(startX+4, h+1, startZ+9, "oak_planks")
setBlock(startX+4, h+1, startZ+8, "oak_planks")
setBlock(startX+4, h+2, startZ+8, "oak_planks")
setBlock(startX+4, h+2, startZ+9, "oak_planks")
setBlock(startX+4, h+2, startZ+10, "oak_planks")
setBlock(startX+4, h+2, startZ+11, "oak_planks")
setBlock(startX+4, h+3, startZ+8, "oak_planks")
setBlock(startX+4, h+3, startZ+9, "oak_planks")
setBlock(startX+5, h+1, startZ+9, "oak_slab")
setBlock(startX+5, h+2, startZ+11, "oak_slab")
setBlock(startX+4, h+3, startZ+10, "oak_slab")
setBlock(startX+4, h+4, startZ+8, "oak_slab")
setBlock(startX+4, h+4, startZ+9, "air")
setBlock(startX+4, h+4, startZ+10, "air")
setBlock(startX+4, h+4, startZ+11, "air")
setBlock(startX+5, h+4, startZ+9, "air")
setBlock(startX+5, h+4, startZ+10, "air")
setBlock(startX+5, h+4, startZ+11, "air")
#Decorations
setBlock(startX+10, h+5, startZ+4, "chest")
setBlock(startX+11, h+5, startZ+4, "chest")
setBlock(startX+4, h+7, startZ+4, "glowstone")
setBlock(startX+4, h+7, startZ+11, "glowstone")
setBlock(startX+11, h+7, startZ+4, "glowstone")
setBlock(startX+11, h+7, startZ+11, "glowstone")
setBlock(startX+11, h+5, startZ+11, "jukebox")
setBlock(startX+9, h+5, startZ+7, "light_gray_wool")
setBlock(startX+9, h+5, startZ+8, "light_gray_wool")
setBlock(startX+10, h+5, startZ+7, "light_gray_wool")
setBlock(startX+10, h+5, startZ+8, "light_gray_wool")
setBlock(startX+11, h+5, startZ+7, "dark_oak_planks")
setBlock(startX+11, h+5, startZ+8, "dark_oak_planks")
setBlock(startX+11, h+6, startZ+7, "dark_oak_planks")
setBlock(startX+11, h+6, startZ+8, "dark_oak_planks")
setBlock(startX+7, h+8, startZ+6, "glass")
setBlock(startX+8, h+8, startZ+6, "glass")
setBlock(startX+7, h+8, startZ+7, "glass")
setBlock(startX+8, h+8, startZ+7, "glass")
setBlock(startX+7, h+8, startZ+8, "glass")
setBlock(startX+8, h+8, startZ+8, "glass")
setBlock(startX+7, h+8, startZ+9, "glass")
setBlock(startX+8, h+8, startZ+9, "glass")
setBlock(startX+11, h+1, startZ+6, "crafting_table")
setBlock(startX+11, h+1, startZ+7, "furnace")
setBlock(startX+11, h+1, startZ+8, "fletching_table")
setBlock(startX+11, h+1, startZ+9, "smithing_table")
setBlock(startX+4, h+1, startZ+4, "cartography_table")
setBlock(startX+4, h+3, startZ+7, "glowstone")
setBlock(startX+11, h+3, startZ+4, "glowstone")
setBlock(startX+11, h+3, startZ+11, "glowstone")
setBlock(startX+12, h+2, startZ+7, "glass")
setBlock(startX+12, h+2, startZ+8, "glass")
elif connectingCells[1]:
#Doors
setBlock(startX+3, h+1, startZ+7, "air")
setBlock(startX+3, h+1, startZ+8, "air")
setBlock(startX+3, h+2, startZ+7, "air")
setBlock(startX+3, h+2, startZ+8, "air")
#Balcony
setBlock(startX+2, h+1, startZ+6, "oak_fence")
setBlock(startX+2, h+2, startZ+6, "oak_fence")
setBlock(startX+2, h+1, startZ+9, "oak_fence")
setBlock(startX+2, h+2, startZ+9, "oak_fence")
setBlock(startX+2, h+3, startZ+6, "oak_planks")
setBlock(startX+2, h+3, startZ+7, "oak_planks")
setBlock(startX+2, h+3, startZ+8, "oak_planks")
setBlock(startX+2, h+3, startZ+9, "oak_planks")
setBlock(startX+2, h+4, startZ+6, "oak_fence")
setBlock(startX+2, h+4, startZ+7, "oak_fence")
setBlock(startX+2, h+4, startZ+8, "oak_fence")
setBlock(startX+2, h+4, startZ+9, "oak_fence")
for j in range(h+4, h+8):
for k in range(startZ+6, startZ+10):
setBlock(startX+3, j, k, "air")
setBlock(startX+4, h+4, startZ+6, "oak_planks")
setBlock(startX+4, h+5, startZ+6, "oak_planks")
setBlock(startX+4, h+6, startZ+6, "oak_planks")
setBlock(startX+4, h+7, startZ+6, "oak_planks")
setBlock(startX+4, h+4, startZ+7, "oak_slab")
setBlock(startX+4, h+4, startZ+8, "oak_slab")
setBlock(startX+4, h+7, startZ+7, "oak_planks")
setBlock(startX+4, h+7, startZ+8, "oak_planks")
setBlock(startX+4, h+4, startZ+9, "oak_planks")
setBlock(startX+4, h+5, startZ+9, "oak_planks")
setBlock(startX+4, h+6, startZ+9, "oak_planks")
setBlock(startX+4, h+7, startZ+9, "oak_planks")
#Stairs
setBlock(startX+10, h+1, startZ+5, "oak_planks")
setBlock(startX+11, h+1, startZ+5, "oak_planks")
setBlock(startX+9, h+1, startZ+4, "oak_planks")
setBlock(startX+8, h+1, startZ+4, "oak_planks")
setBlock(startX+8, h+2, startZ+4, "oak_planks")
setBlock(startX+9, h+2, startZ+4, "oak_planks")
setBlock(startX+10, h+2, startZ+4, "oak_planks")
setBlock(startX+11, h+2, startZ+4, "oak_planks")
setBlock(startX+8, h+3, startZ+4, "oak_planks")
setBlock(startX+9, h+3, startZ+4, "oak_planks")
setBlock(startX+9, h+1, startZ+5, "oak_slab")
setBlock(startX+11, h+2, startZ+5, "oak_slab")
setBlock(startX+10, h+3, startZ+4, "oak_slab")
setBlock(startX+8, h+4, startZ+4, "oak_slab")
setBlock(startX+9, h+4, startZ+4, "air")
setBlock(startX+10, h+4, startZ+4, "air")
setBlock(startX+11, h+4, startZ+4, "air")
setBlock(startX+9, h+4, startZ+5, "air")
setBlock(startX+10, h+4, startZ+5, "air")
setBlock(startX+11, h+4, startZ+5, "air")
#Decorations
setBlock(startX+4, h+5, startZ+10, "chest")
setBlock(startX+4, h+5, startZ+11, "chest")
setBlock(startX+4, h+7, startZ+4, "glowstone")
setBlock(startX+11, h+7, startZ+4, "glowstone")
setBlock(startX+4, h+7, startZ+11, "glowstone")
setBlock(startX+11, h+7, startZ+11, "glowstone")
setBlock(startX+11, h+5, startZ+11, "jukebox")
setBlock(startX+7, h+5, startZ+9, "light_gray_wool")
setBlock(startX+8, h+5, startZ+9, "light_gray_wool")
setBlock(startX+7, h+5, startZ+10, "light_gray_wool")
setBlock(startX+8, h+5, startZ+10, "light_gray_wool")
setBlock(startX+7, h+5, startZ+11, "dark_oak_planks")
setBlock(startX+8, h+5, startZ+11, "dark_oak_planks")
setBlock(startX+7, h+6, startZ+11, "dark_oak_planks")
setBlock(startX+8, h+6, startZ+11, "dark_oak_planks")
setBlock(startX+6, h+8, startZ+7, "glass")
setBlock(startX+6, h+8, startZ+8, "glass")
setBlock(startX+7, h+8, startZ+7, "glass")
setBlock(startX+7, h+8, startZ+8, "glass")
setBlock(startX+8, h+8, startZ+7, "glass")
setBlock(startX+8, h+8, startZ+8, "glass")
setBlock(startX+9, h+8, startZ+7, "glass")
setBlock(startX+9, h+8, startZ+8, "glass")
setBlock(startX+6, h+1, startZ+11, "crafting_table")
setBlock(startX+7, h+1, startZ+11, "furnace")
setBlock(startX+8, h+1, startZ+11, "fletching_table")
setBlock(startX+9, h+1, startZ+11, "smithing_table")
setBlock(startX+4, h+1, startZ+4, "cartography_table")
setBlock(startX+7, h+3, startZ+4, "glowstone")
setBlock(startX+4, h+3, startZ+11, "glowstone")
setBlock(startX+11, h+3, startZ+11, "glowstone")
setBlock(startX+7, h+2, startZ+12, "glass")
setBlock(startX+8, h+2, startZ+12, "glass")
elif connectingCells[2]:
#Doors
setBlock(startX+7, h+1, startZ+12, "air")
setBlock(startX+8, h+1, startZ+12, "air")
setBlock(startX+7, h+2, startZ+12, "air")
setBlock(startX+8, h+2, startZ+12, "air")
#Balcony
setBlock(startX+9, h+1, startZ+13, "oak_fence")
setBlock(startX+9, h+2, startZ+13, "oak_fence")
setBlock(startX+6, h+1, startZ+13, "oak_fence")
setBlock(startX+6, h+2, startZ+13, "oak_fence")
setBlock(startX+9, h+3, startZ+13, "oak_planks")
setBlock(startX+8, h+3, startZ+13, "oak_planks")
setBlock(startX+7, h+3, startZ+13, "oak_planks")
setBlock(startX+6, h+3, startZ+13, "oak_planks")
setBlock(startX+9, h+4, startZ+13, "oak_fence")
setBlock(startX+8, h+4, startZ+13, "oak_fence")
setBlock(startX+7, h+4, startZ+13, "oak_fence")
setBlock(startX+6, h+4, startZ+13, "oak_fence")
for j in range(h+4, h+8):
for i in range(startX+6, startX+10):
setBlock(i, j, startZ+12, "air")
setBlock(startX+6, h+4, startZ+11, "oak_planks")
setBlock(startX+6, h+5, startZ+11, "oak_planks")
setBlock(startX+6, h+6, startZ+11, "oak_planks")
setBlock(startX+6, h+7, startZ+11, "oak_planks")
setBlock(startX+7, h+4, startZ+11, "oak_slab")
setBlock(startX+8, h+4, startZ+11, "oak_slab")
setBlock(startX+7, h+7, startZ+11, "oak_planks")
setBlock(startX+8, h+7, startZ+11, "oak_planks")
setBlock(startX+9, h+4, startZ+11, "oak_planks")
setBlock(startX+9, h+5, startZ+11, "oak_planks")
setBlock(startX+9, h+6, startZ+11, "oak_planks")
setBlock(startX+9, h+7, startZ+11, "oak_planks")
#Stairs
setBlock(startX+10, h+1, startZ+5, "oak_planks")
setBlock(startX+10, h+1, startZ+4, "oak_planks")
setBlock(startX+11, h+1, startZ+6, "oak_planks")
setBlock(startX+11, h+1, startZ+7, "oak_planks")
setBlock(startX+11, h+2, startZ+7, "oak_planks")
setBlock(startX+11, h+2, startZ+6, "oak_planks")
setBlock(startX+11, h+2, startZ+5, "oak_planks")
setBlock(startX+11, h+2, startZ+4, "oak_planks")
setBlock(startX+11, h+3, startZ+7, "oak_planks")
setBlock(startX+11, h+3, startZ+6, "oak_planks")
setBlock(startX+10, h+1, startZ+6, "oak_slab")
setBlock(startX+10, h+2, startZ+4, "oak_slab")
setBlock(startX+11, h+3, | |
<filename>osuAlternative/bot.py
import discord
import datetime
import threading
import csv
import time
import asyncio
import psycopg2
import formatter
import controller
import documentation
from discord.ext import commands
client = commands.Bot(command_prefix = '!')
client.remove_command('help')
diffs = {}
diffs["easy"] = [0, 2]
diffs["normal"] = [2, 2.8]
diffs["hard"] = [2.8, 4]
diffs["insane"] = [4, 5.3]
diffs["extra"] = [5.3, 6.5]
diffs["extreme"] = [6.5, 20]
diffs["all"] = [0, 20]
starts = ["2021-01-01 00:00:00", "2021-01-08 00:00:00", "2021-01-15 00:00:00", "2021-01-22 00:00:00", "2021-01-29 00:00:00", "2021-02-05 00:00:00", "2021-02-12 00:00:00", "2021-02-19 00:00:00", "2021-02-26", "2021-03-05", "2021-03-12", "2021-03-19", "2021-03-26", "2021-04-02", "2021-04-09", "2021-04-16", "2021-04-23", "2021-04-30", "2021-05-07", "2021-05-14", "2021-05-21", "2021-05-28", "2021-06-04"]
conn = psycopg2.connect("dbname=osu user=bot password=<PASSWORD>")
conn.set_session(autocommit=True)
cur = conn.cursor()
today = datetime.datetime.today().strftime('%Y-%m-%d')
lastmonth = (datetime.datetime.today() - datetime.timedelta(days=29)).strftime('%Y-%m-%d')
lastweek = (datetime.datetime.today() - datetime.timedelta(days=6)).strftime('%Y-%m-%d')
@client.event
async def on_ready():
print('Ready')
@client.event
async def on_member_join(member):
print(f'{member} just joined the server.')
def getArgs(arg=None):
args = []
if arg != None:
args = arg.split()
di = {}
for i in range(0,len(args)//2):
di.update({args[2*i]:args[2*i+1]})
return di
@client.command(pass_context=True)
async def help(ctx, arg=None):
await ctx.send(documentation.help(arg))
await updatelists()
async def updateweeklies(week):
channel = client.get_channel(792818934743040052)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\rankedscore_" + str(week) + ".csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if (str(week) + "/52 of project 2021") in str(fetchMessage[0].content):
await fetchMessage[0].edit(content = "**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
else:
await channel.send("**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
channel = client.get_channel(792819496829452290)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\fccount_" + str(week) + ".csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if (str(week) + "/52 of project 2021") in str(fetchMessage[0].content):
await fetchMessage[0].edit(content = "**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
else:
await channel.send("**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
channel = client.get_channel(792819520581664768)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\sscount_" + str(week) + ".csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if (str(week) + "/52 of project 2021") in str(fetchMessage[0].content):
await fetchMessage[0].edit(content = "**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
else:
await channel.send("**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
channel = client.get_channel(792819540567916574)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\clearcount_" + str(week) + ".csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if (str(week) + "/52 of project 2021") in str(fetchMessage[0].content):
await fetchMessage[0].edit(content = "**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
else:
await channel.send("**Week " + str(week) + "/52 of project 2021" + "**\n" + formatter.formatcsv(f, 50))
async def updateyearlies():
channel = client.get_channel(792863236705746954)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_score_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792863272860123156)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_fc_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792863301184913469)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_ss_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792863357563961364)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_clears_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(795159066133004308)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_pp_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
print("update complete")
channel = client.get_channel(795159261755736104)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_top1_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
print("update complete")
channel = client.get_channel(795159304024883230)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\2021_top50_all.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
print("update complete")
async def updateplayers():
channel = client.get_channel(792875271782531102)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\totalhits.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792883515565146112)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\totalscore.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792883547559952415)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\playcount.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
channel = client.get_channel(792920423011844106)
f = open("C:\\Users\\sensa\\Documents\\VSCode\\bot\\data\\playtime.csv", newline='')
fetchMessage = await channel.history(limit = 1).flatten()
if len(fetchMessage) == 0:
await channel.send('hello')
fetchMessage = await channel.history(limit = 1).flatten()
await fetchMessage[0].edit(content = formatter.formatcsv(f, 50))
async def updatelists():
channel = client.get_channel(793570054008340511)
cur.execute("select * from newfcs")
li = cur.fetchall()
if len(li) > 0:
for entry in li:
beatmap = entry[0]
user = entry[1]
date = entry[2]
cur.execute("select artist, title, diffname, approved_date, set_id, stars, length, maxcombo, cs, ar, od, hp from beatmaps where beatmap_id = " + str(beatmap))
b = cur.fetchone()
approved_date = b[3]
minutes = b[6] // 60
seconds = b[6] % 60
differential = ((date.year * 365) + (date.month*31) + (date.day)) - ((approved_date.year * 365) + (approved_date.month*31) + (approved_date.day))
if(differential >= 7):
cur.execute("select username from users where user_id = " + str(user))
result = cur.fetchone()
username = user
if result is not None:
username = result[0]
embed = discord.Embed(title = 'A map has been FCed for the first time after ' + str(differential) + ' days!', colour=discord.Colour(0xE5E242))
s = ""
s += "**Player: **" + str(username) + "\n"
s += "**Map: **[" + b[0] + " - " + b[1] + " [" + b[2] + "]](https://osu.ppy.sh/beatmapsets/" + str(b[4]) + "#osu/" + str(beatmap) + ")\n"
s += "**Time of play: **" + str(date) + "\n"
s += "**Date ranked: **" + str(approved_date) + "\n\n"
s += "**Beatmap information**\n"
s += "CS **" + str(b[8])[0:3] + "** • AR **" + str(b[9])[0:3] + "** • OD **" + str(b[10])[0:3] + "** • HP **" + str(b[11])[0:3] + "** • **" + str(b[5])[0:4] + "★**" + "\n"
s += "**" + str(minutes) + "m" + str(seconds) + "s** • **" + str(b[7]) + " combo**\n"
embed.description = s
await channel.send(embed=embed)
cur.execute("delete from newfcs where beatmap_id = " + str(beatmap))
cur.execute("COMMIT;")
channel = client.get_channel(793594664262303814)
cur.execute("select * from newSSs")
li = cur.fetchall()
if len(li) > 0:
for entry in li:
beatmap = entry[0]
user = entry[1]
date = entry[2]
cur.execute("select artist, title, diffname, approved_date, set_id, stars, length, maxcombo, cs, ar, od, hp from beatmaps where beatmap_id = " + str(beatmap))
b = cur.fetchone()
approved_date = b[3]
minutes = b[6] // 60
seconds = b[6] % 60
differential = ((date.year * 365) + (date.month*31) + (date.day)) - ((approved_date.year * 365) + (approved_date.month*31) + (approved_date.day))
if(differential >= 30):
cur.execute("select username from users where user_id = " + str(user))
result = cur.fetchone()
username = user
if result is not None:
username = result[0]
embed = discord.Embed(title = 'A map has been SSed for the first time after ' + str(differential) + ' days!', colour=discord.Colour(0xE5E242))
s = ""
s += "**Player: **" + str(username) + "\n"
s += "**Map: **[" + b[0] + " - " + b[1] + " [" + b[2] + "]](https://osu.ppy.sh/beatmapsets/" + str(b[4]) + "#osu/" + str(beatmap) + ")\n"
s += "**Time of play: **" + str(date) + "\n"
s += "**Date ranked: **" + str(approved_date) + "\n\n"
s += "**Beatmap information**\n"
s += "CS **" + str(b[8])[0:3] + "** • AR **" + str(b[9])[0:3] + "** • OD **" + str(b[10])[0:3] + "** • HP **" + str(b[11])[0:3] + "** • **" + str(b[5])[0:4] + "★**" + "\n"
s += "**" + str(minutes) + "m" + str(seconds) + "s** • **" + str(b[7]) + " combo**\n"
embed.description = s
await channel.send(embed=embed)
cur.execute("delete from newSSs where beatmap_id = " + str(beatmap))
cur.execute("COMMIT;")
@client.command(pass_context=True)
async def update(ctx):
t = int(time.time())
for i in range(0, 52):
if (t > (1609459200 + (i * 7 * 24 * 60 * 60) + (24*60*60))):
week = i + 1
today = datetime.datetime.today().strftime('%Y-%m-%d')
lastmonth = (datetime.datetime.today() - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
print(lastmonth)
await updateweeklies(week)
await updateyearlies()
await updateplayers()
await updatelists()
@client.command(pass_context=True)
async def register(ctx, | |
<gh_stars>0
"""
dab-seq: single-cell dna genotyping and antibody sequencing
<NAME> 7.9.2019
functions required for the processing pipeline
two classes are defined:
* TapestriTube: variables and methods that operate on the level of a single Tapestri tube library
* SingleCell: variables and methods that operate on the level of one cell barcode
"""
import os
import os.path
import shutil
import csv
from itertools import product, combinations
from collections import Counter
import numpy as np
import subprocess
import sys
import copy
import allel
import pandas as pd
import h5py
import statsmodels.api as sm
from scipy.stats.mstats import gmean
# add the modified umi_tools directory to path
sys.path.append(os.path.join(sys.path[0], 'umi_tools'))
import Utilities as U
import network as network
# define classes
class TapestriTube(object):
# class for storing metadata for each tapestri tube
def __init__(self,
tube_num,
panel_r1,
panel_r2,
panel_r1_temp,
panel_r2_temp,
ab_r1,
ab_r2,
ab_r1_temp,
ab_r2_temp,
ab_reads,
umi_counts):
self.tube_num = tube_num # number identifying tube
self.panel_r1 = panel_r1 # panel R1 fastq path
self.panel_r2 = panel_r2 # panel R2 fastq path
self.panel_r1_temp = panel_r1_temp # temp panel R1 fastq path
self.panel_r2_temp = panel_r2_temp # temp panel R2 fastq path
self.ab_r1 = ab_r1 # ab R1 fastq path
self.ab_r2 = ab_r2 # ab R2 fastq path
self.ab_r1_temp = ab_r1_temp # temp ab R1 fastq path
self.ab_r2_temp = ab_r2_temp # temp ab R2 fastq path
self.ab_reads = ab_reads # file containing filtered ab reads
self.umi_counts = umi_counts # file containing umi counts by barcode
def barcode_reads(self,
r1_start,
r1_end,
r2_end,
mb_barcodes,
bar_ind_1,
bar_ind_2,
r1_min_len,
r2_min_len,
library_type,
chem):
# for valid reads, add barcode header to fastq file and trim
assert library_type == 'ab' or library_type == 'panel', 'Library type must be panel or ab!'
# set filenames according to library type (panel or ab)
if library_type == 'panel':
r1_in = self.panel_r1
r2_in = self.panel_r2
r1_out = open(self.panel_r1_temp, 'w')
r2_out = open(self.panel_r2_temp, 'w')
# for all chemistries except V2.1
if chem != 'V2.1':
trim_cmd = 'cutadapt -a %s -A %s --interleaved -j 8 -u 55 -U 5 -n 2 %s %s --quiet' % (r1_end,
r2_end,
r1_in,
r2_in)
# for V2.1 chemistry, extend the hard cut
else:
trim_cmd = 'cutadapt -a %s -A %s --interleaved -j 8 -u 72 -U 5 -n 2 %s %s --quiet' % (r1_end,
r2_end,
r1_in,
r2_in)
elif library_type == 'ab':
r1_in = self.ab_r1
r2_in = self.ab_r2
r1_out = open(self.ab_r1_temp, 'w')
r2_out = open(self.ab_r2_temp, 'w')
# for all chemistries except V2.1
if chem != 'V2.1':
trim_cmd = 'cutadapt -a %s -A %s --interleaved -j 8 -u 55 -n 2 %s %s --quiet' % (r1_end,
r2_end,
r1_in,
r2_in)
# for V2.1 chemistry, extend the hard cut
else:
trim_cmd = 'cutadapt -a %s -A %s --interleaved -j 8 -u 72 -n 2 %s %s --quiet' % (r1_end,
r2_end,
r1_in,
r2_in)
# trim 5' end of read and check barcode
bar_cmd = 'cutadapt -a %s -O 8 -e 0.2 %s -j 8 --quiet' % (r1_start,
r1_in)
bar_file = subprocess.Popen(bar_cmd, stdout=subprocess.PIPE, shell=True, universal_newlines=True, bufsize=1)
# hard trimming (55 bp off R1, 5 bp off R2 for panel) is used to ensure entire cell barcode region is removed
# barcode bases for V1 chemistry: 47 bp
# max barcode bases for V2 chemistry: 50 bp
trim_file = subprocess.Popen(trim_cmd, stdout=subprocess.PIPE, shell=True, universal_newlines=True, bufsize=1)
total_reads = 0 # total count of all reads
invalid_reads = 0 # no valid barcode found
valid_reads = 0 # valid, barcoded read count
too_short = 0 # reads that are too short count
# iterate through info file (barcodes) and trim file (reads)
for bar_line, trim_line in zip(bar_file.stdout, trim_file.stdout):
assert bar_line.strip() == trim_line.strip(), 'Cluster IDs do not match!'
total_reads += 1
# extract trimmed barcode region
bar_seq = bar_file.stdout.readline().strip()
# if trimmed adapter is too long - no barcode present
if len(bar_seq) > 52:
invalid_reads += 1
# advance through files
for i in range(7):
next(trim_file.stdout)
for i in range(2):
next(bar_file.stdout)
continue
# contains valid adapter
else:
# find barcodes and check that they are a valid MB barcode
check = check_seq(bar_seq, bar_ind_1, bar_ind_2, mb_barcodes)
# not a valid barcode
if check == 'fail':
invalid_reads += 1
# advance through files
for i in range(7):
next(trim_file.stdout)
for i in range(2):
next(bar_file.stdout)
continue
# valid barcode
else:
# adavance through barcode file
for i in range(2):
next(bar_file.stdout)
barcode = check[0] + check[1] + '-' + str(self.tube_num)
# R1 from trimmed file
header_1 = trim_line.strip()
id_1 = header_1.split(' ')[0][1:]
seq_1 = next(trim_file.stdout).strip()
next(trim_file.stdout)
qual_1 = next(trim_file.stdout).strip()
# R2 from trimmed file
header_2 = next(trim_file.stdout).strip()
id_2 = header_2.split(' ')[0][1:]
assert id_1 == id_2, 'Cluster IDs in interleaved input do not match!'
seq_2 = next(trim_file.stdout).strip()
next(trim_file.stdout)
qual_2 = next(trim_file.stdout).strip()
# check reads for length
if len(seq_1) < r1_min_len or len(seq_2) < r2_min_len:
too_short += 1
continue
# add barcoded headers and reads to file
id = '@' + id_1 + '_' + barcode
# write to output fastq files
r1_out.write('%s\n%s\n+\n%s\n' % (id, seq_1, qual_1))
r2_out.write('%s\n%s\n+\n%s\n' % (id, seq_2, qual_2))
valid_reads += 1
# TODO print barcode extraction stats to file
# print('total: %d' % total_reads)
# print('valid: %d' % valid_reads)
# print('invalid: %d' % invalid_reads)
# print('too short: %d' % too_short)
print('Tube %d-%s: %d valid trimmed pairs saved to file.' % (self.tube_num,
library_type,
valid_reads))
def process_abs(self,
ab_barcodes,
barcode_descriptions,
ab_handles,
ab_bar_coord,
ab_umi_coord,
min_umi_qual):
# extract ab barcodes and umis from raw ab reads
# write passed ab reads to file
ab_reads_file = open(self.ab_reads, 'w')
# use cutadapt to select reads with correct structure
ab_cmd = 'cutadapt -j 8 %s -O 12 -e 0.2 -n 2 %s --quiet --discard-untrimmed' % (ab_handles,
self.ab_r2_temp)
ab_process = subprocess.Popen(ab_cmd, stdout=subprocess.PIPE, shell=True, universal_newlines=True, bufsize=1)
# count valid ab reads
valid_ab_reads = 0
invalid_ab_reads = 0
# iterate through ab reads with correct adapters
for line in ab_process.stdout:
cell_barcode = line.strip().split('_')[1] # extract corrected barcode from header
# extract sequences
seq = next(ab_process.stdout).strip()
next(ab_process.stdout)
qual = next(ab_process.stdout).strip()
# try ab matching to custom or totalseq tags using read length
valid_length = False
# check all valid trimmed ab tag lengths
# TODO make this work even with two equal-sized tags
for k in range(len(ab_bar_coord)):
# check trimmed read length
if len(seq) == len(ab_bar_coord[k] + ab_umi_coord[k]):
valid_length = True
break
# if sequence length is not valid, continue to next read
if not valid_length:
invalid_ab_reads += 1
continue
# check ab barcode is valid
bar = ''.join([seq[i] for i in ab_bar_coord[k]])
bar = correct_barcode(ab_barcodes, bar)
if bar == 'invalid':
invalid_ab_reads += 1
continue
# check umi quality
umi = ''.join([seq[i] for i in ab_umi_coord[k]])
umi_qual = [ord(qual[i]) - 33 for i in ab_umi_coord[k]]
if not all(q >= min_umi_qual for q in umi_qual):
invalid_ab_reads += 1
continue
# if a read passes all filters, write it to file
valid_ab_reads += 1
ab_reads_file.write(cell_barcode + '\t')
ab_reads_file.write(barcode_descriptions[bar] + '\t')
ab_reads_file.write(umi + '\n')
ab_reads_file.close()
# print number of valid/invalid ab reads
print('Tube ' + str(self.tube_num) + ': ' + str(valid_ab_reads) + ' VALID ab reads')
print('Tube ' + str(self.tube_num) + ': ' + str(invalid_ab_reads) + ' INVALID ab reads')
# TODO save ab and dna metrics to file
def count_umis(self, clustering_method):
# count umis using selected clustering methods
# assumes ab reads file is sorted
# extract ab umis from ab reads file
ab_reads = open(self.ab_reads, 'r')
# write to umi counts file
umi_counts = open(self.umi_counts, 'w')
umi_counts.write('cell_barcode\tab_description\traw\tunique')
if clustering_method == 'all':
umi_counts.write('\tadjacency\tdirectional\tpercentile\tcluster')
# initialize with group id (cell barcode + ab) and group umis
group_id_prev = ''
group_umis = []
first_line = True
for line in ab_reads:
group_id_curr = line.split('\t')[:2]
try:
umi = line.strip().split('\t')[2]
except IndexError:
umi =''
# still in same umi group
if group_id_curr == group_id_prev or first_line:
group_umis.append(umi)
first_line = False
# new umi group
else:
# cluster umis from previous group
counts = self.umi_tools_cluster(group_umis, clustering_method)
# write umi count to file
self.write_umis(counts, group_id_prev, umi_counts, clustering_method)
# reset group umis
group_umis = [umi]
group_id_prev = group_id_curr
# process final group
counts = self.umi_tools_cluster(group_umis, clustering_method)
self.write_umis(counts, group_id_prev, umi_counts, clustering_method)
umi_counts.write('\n')
umi_counts.close()
@staticmethod
def umi_tools_cluster(group_umis, clustering_method):
# cluster a umi group with umi-tools
# | |
description by adding new port classes. Without the ability to
# extend the port class list here we would have to enumerate all
# possible combinations of ports in the base interface classes.
#
def injectAfuIfcChanges(args, afu_ifc_db, afu_ifc_req):
fname = afu_ifc_req['file_path']
if ('module-ports' not in afu_ifc_req):
return
if (not isinstance(afu_ifc_req['module-ports'], list)):
errorExit("module-ports is not a list in {0}".format(
fname))
# Walk all the updated classes
for port in afu_ifc_req['module-ports']:
# Is the port descriptor a dictionary?
if (not isinstance(port, dict)):
errorExit(("module-ports in {0} must be " +
"dictionaries ({1})").format(fname, port))
if ('class' not in port):
errorExit(("Each module-ports must have a class " +
"in {0}").format(fname))
c = port['class']
# Is the class already be present in the AFU interface?
if (c not in afu_ifc_db['module-ports']):
# No, this is a new addition to the base list of ports.
# It must name an interface.
if ('interface' not in port):
errorExit(("module port {0} is missing 'interface' " +
"in {1}").format(port, fname))
afu_ifc_db['module-ports'][c] = port
if (args.verbose):
print((" AFU {0} adds new module-port class {1}").format(
fname, c))
else:
# Yes, this is an update of a port already defined.
# Restrict the fields it may update.
for k in list(port.keys()):
if (k != 'class'):
# Only legal_afu_ifc_update_classes may be modified by the
# AFU's JSON database
if (k not in legal_afu_ifc_update_classes):
errorExit(
("AFU may not update module-port class '{0}', " +
"field '{1}' ({2})").format(
c, k, fname))
if (args.verbose):
print((" AFU {0} overrides module-port class" +
" '{1}', field '{2}': {3}").format(
fname, c, k, port[k]))
# Do the update
afu_ifc_db['module-ports'][c][k] = port[k]
#
# Dump a database for debugging.
#
def emitDebugJsonDb(args, name, db):
# Path prefix for emitting configuration files
f_prefix = ""
if (args.tgt):
f_prefix = args.tgt
fn = os.path.join(f_prefix, 'debug_' + name + '.json')
print("Writing debug {0}".format(fn))
db.dump(fn)
#
# Dump a data structure for debugging.
#
def emitDebugData(args, name, data):
# Path prefix for emitting configuration files
f_prefix = ""
if (args.tgt):
f_prefix = args.tgt
fn = os.path.join(f_prefix, 'debug_' + name + '.data')
print("Writing debug {0}".format(fn))
try:
with open(fn, "w") as f:
pprint.pprint(data, stream=f, indent=4)
except Exception:
errorExit("failed to open {0} for writing.".format(fn))
#
# Return a list of all platform names found on the search path.
#
def findPlatforms(db_path):
platforms = set()
# Walk all the directories
for db_dir in db_path:
# Look for JSON files in each directory
for json_file in glob.glob(os.path.join(db_dir, "*.json")):
try:
with open(json_file, 'r') as f:
# Does it have a platform name field?
db = json.load(f)
platforms.add(db['platform-name'])
except Exception:
# Give up on this file if there is any error
None
return sorted(list(platforms))
#
# Return a list of all AFU top-level interface names found on the search path.
#
def findAfuIfcs(db_path):
afus = set()
# Walk all the directories
for db_dir in db_path:
# Look for JSON files in each directory
for json_file in glob.glob(os.path.join(db_dir, "*.json")):
try:
with open(json_file, 'r') as f:
db = json.load(f)
# If it has a module-ports entry assume the file is
# valid
if ('module-ports' in db):
base = os.path.basename(json_file)
afus.add(os.path.splitext(base)[0])
except Exception:
# Give up on this file is any error
None
return sorted(list(afus))
#
# Compute a directory search path given an environment variable name.
# The final entry on the path is set to default_dir.
#
def getSearchPath(env_name, default_dir):
path = []
if (env_name in os.environ):
# Break path string using ':' and drop empty entries
path = [p for p in os.environ[env_name].split(':') if p]
# Append the database directory shipped with a release if
# the release containts hw/lib/platform/<default_dir>.
if ('OPAE_PLATFORM_ROOT' in os.environ):
release_db_dir = os.path.join(os.environ['OPAE_PLATFORM_ROOT'],
'hw', 'lib', 'platform',
default_dir)
if (os.path.isdir(release_db_dir)):
path.append(release_db_dir)
# Append the default directory from OPAE SDK
path.append(os.path.join(getDBRootPath(), default_dir))
return path
#
# Does the release define platform components?
#
def getOfsPlatIfPath():
# Documented variable, pointing to a platform release
if ('OPAE_PLATFORM_ROOT' in os.environ):
plat_dir = os.path.join(os.environ['OPAE_PLATFORM_ROOT'].rstrip('/'),
'hw/lib/build/platform/ofs_plat_if')
if (os.path.isdir(plat_dir)):
return plat_dir
# Alternate method
if ('BBS_LIB_PATH' in os.environ):
plat_dir = os.path.join(os.environ['BBS_LIB_PATH'].rstrip('/'),
'build/platform/ofs_plat_if')
if (os.path.isdir(plat_dir)):
return plat_dir
return None
def main():
# Users can extend the AFU and platform database search paths beyond
# the OPAE SDK defaults using environment variables.
afu_top_ifc_db_path = getSearchPath(
'OPAE_AFU_TOP_IFC_DB_PATH', 'afu_top_ifc_db')
platform_db_path = getSearchPath('OPAE_PLATFORM_DB_PATH', 'platform_db')
msg = '''
Given a platform and an AFU, afu_platform_config attempts to map the top-level
interfaces offered by the platform to the requirements of the AFU. If the
AFU's requirements are satisfiable, afu_platform_config emits header files
that describe the interface.
Databases describe both top-level AFU and platform interfaces. The search
paths for database files are configurable with environment variables using
standard colon separation between paths:
Platform database directories (OPAE_PLATFORM_DB_PATH):
'''
for p in platform_db_path[:-1]:
msg += ' ' + p + '\n'
msg += ' ' + platform_db_path[-1] + ' [default]\n'
platform_names = findPlatforms(platform_db_path)
if (platform_names):
msg += "\n Platforms found:\n"
for p in platform_names:
msg += ' ' + p + '\n'
msg += "\nAFU database directories (OPAE_AFU_TOP_IFC_DB_PATH):\n"
for p in afu_top_ifc_db_path[:-1]:
msg += ' ' + p + '\n'
msg += ' ' + afu_top_ifc_db_path[-1] + ' [default]\n'
afu_names = findAfuIfcs(afu_top_ifc_db_path)
if (afu_names):
msg += "\n AFU top-level interfaces found:\n"
for a in afu_names:
msg += ' ' + a + '\n'
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Match AFU top-level interface's requirements and a " +
"specific platform.",
epilog=msg)
# Positional arguments
parser.add_argument(
"platform",
help="""Either the name of a platform or the name of a platform
JSON file. If the argument is a platform name, the
platform JSON file will be loaded from the platform
database directory search path (see below).""")
parser.add_argument(
"-t", "--tgt",
help="""Target directory to which configuration files will be written.
Defaults to current working directory.""")
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-i", "--ifc",
help="""The AFU's top-level interface class or the full pathname of a
JSON top-level interface descriptor. (E.g. ccip_std_afu)""")
group.add_argument(
"-s", "--src",
help="""The AFU sources, where a JSON file that specifies the AFU's
top-level interface is found. Use either the --ifc argument
or this one, but not both. The argument may either be the
full path of a JSON file describing the application or the
argument may be a directory in which the JSON file is found.
If the argument is a directory, there must be exactly one
JSON file in the directory.""")
parser.add_argument(
"--default-ifc",
help="""The default top-level interface class if no interface is
specified in the AFU's JSON descriptor.""")
# Pick a default platform interface RTL tree. Start by looking for
# the OFS platform tree in the currently configured release.
ofs_plat_if_default = getOfsPlatIfPath()
# If there is no current release or the release is old and does not
# provide an OFS platform interface tree then resort to the interface
# defined in the OPAE SDK.
if_default = ofs_plat_if_default
if not if_default:
if_default = os.path.join(getDBRootPath(), "platform_if")
parser.add_argument(
"--platform_if", default=if_default,
help="""The directory containing AFU top-level SystemVerilog
interfaces. (Default: """ + if_default + ")")
group = parser.add_mutually_exclusive_group()
group.add_argument("--sim",
action="store_true",
default=False,
help="""Emit a configuration for RTL simulation.""")
group.add_argument("--qsf",
action="store_true",
default=True,
help="""Emit a configuration for Quartus. (default)""")
parser.add_argument(
"--debug", action='store_true', default=False, help=argparse.SUPPRESS)
# Verbose/quiet
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v", "--verbose", action="store_true", help="Verbose output")
group.add_argument(
"-q", "--quiet", action="store_true", help="Reduce output")
args = parser.parse_args()
if (args.sim):
args.qsf = False
# Get the AFU top-level interface request, either from the command
# line or from the AFU source's JSON descriptor.
afu_ifc_req = getAfuIfc(args)
# If the AFU interface is ofs_plat_if then there must be a platform
# defined and that platform must provide an ofs_plat_if tree.
if (afu_ifc_req['class'] == 'ofs_plat_afu' and not ofs_plat_if_default):
errorExit("AFU is type 'ofs_plat_afu' but the release is either " +
"not defined or does not provide\n" +
" an ofs_plat_if.\n\n" +
" *** Either OPAE_PLATFORM_ROOT is not set correctly " +
"or the release at\n" +
" *** $OPAE_PLATFORM_ROOT is missing the directory " +
"hw/lib/build/platform/ofs_plat_if.")
# Now that arguments are parsed, canonicalize the ofs_plat_if. If it
# hasn't been changed from the default then make the path relative
# to the platform directory. The tree will be copied into the AFU's build
# tree. Using | |
)
if size.width() >= 0: widget.setMinimumWidth( size.width() )
if size.height() >= 0: widget.setMinimumHeight( size.height() )
class StatusBar( QW.QStatusBar ):
def __init__( self, status_widths ):
QW.QStatusBar.__init__( self )
self._labels = []
for w in status_widths:
label = QW.QLabel()
self._labels.append( label )
if w < 0:
self.addWidget( label, -1 * w )
else:
label.setFixedWidth( w )
self.addWidget( label )
def SetStatusText( self, text, index, tooltip = None ):
if tooltip is None:
tooltip = text
cell = self._labels[ index ]
if cell.text() != text:
cell.setText( text )
if cell.toolTip() != tooltip:
cell.setToolTip( tooltip )
class AboutDialogInfo:
def __init__( self ):
self.name = ''
self.version = ''
self.description = ''
self.license = ''
self.developers = []
self.website = ''
def SetName( self, name ):
self.name = name
def SetVersion( self, version ):
self.version = version
def SetDescription( self, description ):
self.description = description
def SetLicense( self, license ):
self.license = license
def SetDevelopers( self, developers_list ):
self.developers = developers_list
def SetWebSite( self, url ):
self.website = url
class UIActionSimulator:
def __init__( self ):
pass
def Char( self, widget, key, text = None ):
if widget is None:
widget = QW.QApplication.focusWidget()
ev1 = QG.QKeyEvent( QC.QEvent.KeyPress, key, QC.Qt.NoModifier, text = text )
ev2 = QG.QKeyEvent( QC.QEvent.KeyRelease, key, QC.Qt.NoModifier, text = text )
QW.QApplication.instance().postEvent( widget, ev1 )
QW.QApplication.instance().postEvent( widget, ev2 )
class AboutBox( QW.QDialog ):
def __init__( self, parent, about_info ):
QW.QDialog.__init__( self, parent )
self.setWindowFlag( QC.Qt.WindowContextHelpButtonHint, on = False )
self.setAttribute( QC.Qt.WA_DeleteOnClose )
self.setWindowIcon( QG.QIcon( HG.client_controller.frame_icon_pixmap ) )
layout = QW.QVBoxLayout( self )
self.setWindowTitle( 'About ' + about_info.name )
icon_label = QW.QLabel( self )
name_label = QW.QLabel( about_info.name, self )
version_label = QW.QLabel( about_info.version, self )
tabwidget = QW.QTabWidget( self )
desc_panel = QW.QWidget( self )
desc_label = QW.QLabel( about_info.description, self )
url_label = QW.QLabel( '<a href="{0}">{0}</a>'.format( about_info.website ), self )
credits = QW.QTextEdit( self )
license = QW.QTextEdit( self )
close_button = QW.QPushButton( 'close', self )
icon_label.setPixmap( HG.client_controller.frame_icon_pixmap )
layout.addWidget( icon_label, alignment = QC.Qt.AlignHCenter )
name_label_font = name_label.font()
name_label_font.setBold( True )
name_label.setFont( name_label_font )
layout.addWidget( name_label, alignment = QC.Qt.AlignHCenter )
layout.addWidget( version_label, alignment = QC.Qt.AlignHCenter )
layout.addWidget( tabwidget, alignment = QC.Qt.AlignHCenter )
tabwidget.addTab( desc_panel, 'Description' )
tabwidget.addTab( credits, 'Credits' )
tabwidget.addTab( license, 'License' )
tabwidget.setCurrentIndex( 0 )
credits.setPlainText( 'Created by ' + ', '.join(about_info.developers) )
credits.setReadOnly( True )
credits.setAlignment( QC.Qt.AlignHCenter )
license.setPlainText( about_info.license )
license.setReadOnly( True )
desc_layout = QW.QVBoxLayout()
desc_layout.addWidget( desc_label, alignment = QC.Qt.AlignHCenter )
desc_label.setWordWrap( True )
desc_label.setAlignment( QC.Qt.AlignHCenter | QC.Qt.AlignVCenter )
desc_layout.addWidget( url_label, alignment = QC.Qt.AlignHCenter )
url_label.setTextFormat( QC.Qt.RichText )
url_label.setTextInteractionFlags( QC.Qt.TextBrowserInteraction )
url_label.setOpenExternalLinks( True )
desc_panel.setLayout( desc_layout )
layout.addWidget( close_button, alignment = QC.Qt.AlignRight )
close_button.clicked.connect( self.accept )
self.setLayout( layout )
self.exec_()
class RadioBox( QW.QFrame ):
radioBoxChanged = QC.Signal()
def __init__( self, parent = None, choices = [], vertical = False ):
QW.QFrame.__init__( self, parent )
self.setFrameStyle( QW.QFrame.Box | QW.QFrame.Raised )
if vertical:
self.setLayout( VBoxLayout() )
else:
self.setLayout( HBoxLayout() )
self._choices = []
for choice in choices:
radiobutton = QW.QRadioButton( choice, self )
self._choices.append( radiobutton )
radiobutton.clicked.connect( self.radioBoxChanged )
self.layout().addWidget( radiobutton )
if vertical and len( self._choices ):
self._choices[0].setChecked( True )
elif len( self._choices ):
self._choices[-1].setChecked( True )
def GetCurrentIndex( self ):
for i in range( len( self._choices ) ):
if self._choices[ i ].isChecked(): return i
return -1
def SetStringSelection( self, str ):
for i in range( len( self._choices ) ):
if self._choices[ i ].text() == str:
self._choices[ i ].setChecked( True )
return
def GetStringSelection( self ):
for i in range( len( self._choices ) ):
if self._choices[ i ].isChecked(): return self._choices[ i ].text()
return None
def SetValue( self, data ):
pass
def Select( self, idx ):
self._choices[ idx ].setChecked( True )
# Adapted from https://doc.qt.io/qt-5/qtwidgets-widgets-elidedlabel-example.html
class EllipsizedLabel( QW.QLabel ):
def __init__( self, parent = None, ellipsize_end = False ):
QW.QLabel.__init__( self, parent )
self._ellipsize_end = ellipsize_end
def minimumSizeHint( self ):
if self._ellipsize_end:
return self.sizeHint()
else:
return QW.QLabel.minimumSizeHint( self )
def setText( self, text ):
try:
QW.QLabel.setText( self, text )
except ValueError:
QW.QLabel.setText( self, repr( text ) )
self.update()
def sizeHint( self ):
if self._ellipsize_end:
num_lines = self.text().count( '\n' ) + 1
line_width = self.fontMetrics().lineWidth()
line_height = self.fontMetrics().lineSpacing()
size_hint = QC.QSize( 3 * line_width, num_lines * line_height )
else:
size_hint = QW.QLabel.sizeHint( self )
return size_hint
def paintEvent( self, event ):
if not self._ellipsize_end:
QW.QLabel.paintEvent( self, event )
return
painter = QG.QPainter( self )
fontMetrics = painter.fontMetrics()
text_lines = self.text().split( '\n' )
line_spacing = fontMetrics.lineSpacing()
current_y = 0
done = False
my_width = self.width()
for text_line in text_lines:
elided_line = fontMetrics.elidedText( text_line, QC.Qt.ElideRight, my_width )
x = 0
width = my_width
height = line_spacing
flags = self.alignment()
painter.drawText( x, current_y, width, height, flags, elided_line )
# old hacky line that doesn't support alignment flags
#painter.drawText( QC.QPoint( 0, current_y + fontMetrics.ascent() ), elided_line )
current_y += line_spacing
# old code that did multiline wrap width stuff
'''
text_layout = QG.QTextLayout( text_line, painter.font() )
text_layout.beginLayout()
while True:
line = text_layout.createLine()
if not line.isValid(): break
line.setLineWidth( self.width() )
next_line_y = y + line_spacing
if self.height() >= next_line_y + line_spacing:
line.draw( painter, QC.QPoint( 0, y ) )
y = next_line_y
else:
last_line = text_line[ line.textStart(): ]
elided_last_line = fontMetrics.elidedText( last_line, QC.Qt.ElideRight, self.width() )
painter.drawText( QC.QPoint( 0, y + fontMetrics.ascent() ), elided_last_line )
done = True
break
text_layout.endLayout()
if done: break
'''
class Dialog( QW.QDialog ):
def __init__( self, parent = None, **kwargs ):
title = None
if 'title' in kwargs:
title = kwargs['title']
del kwargs['title']
QW.QDialog.__init__( self, parent, **kwargs )
self.setWindowFlag( QC.Qt.WindowContextHelpButtonHint, on = False )
if title is not None:
self.setWindowTitle( title )
self._closed_by_user = False
def closeEvent( self, event ):
if event.spontaneous():
self._closed_by_user = True
QW.QDialog.closeEvent( self, event )
# True if the dialog was closed by the user clicking on the X on the titlebar (so neither reject nor accept was chosen - the dialog result is still reject in this case though)
def WasCancelled( self ):
return self._closed_by_user
def SetCancelled( self, closed ):
self._closed_by_user = closed
def __enter__( self ):
return self
def __exit__( self, exc_type, exc_val, exc_tb ):
if isValid( self ):
self.deleteLater()
class PasswordEntryDialog( Dialog ):
def __init__( self, parent, message, caption ):
Dialog.__init__( self, parent )
self.setWindowTitle( caption )
self._ok_button = QW.QPushButton( 'OK', self )
self._ok_button.clicked.connect( self.accept )
self._cancel_button = QW.QPushButton( 'Cancel', self )
self._cancel_button.clicked.connect( self.reject )
self._password = QW.QLineEdit( self )
self._password.setEchoMode( QW.QLineEdit.Password )
self.setLayout( QW.QVBoxLayout() )
entry_layout = QW.QHBoxLayout()
entry_layout.addWidget( QW.QLabel( message, self ) )
entry_layout.addWidget( self._password )
button_layout = QW.QHBoxLayout()
button_layout.addStretch( 1 )
button_layout.addWidget( self._cancel_button )
button_layout.addWidget( self._ok_button )
self.layout().addLayout( entry_layout )
self.layout().addLayout( button_layout )
def GetValue( self ):
return self._password.text()
class DirDialog( QW.QFileDialog ):
def __init__( self, parent = | |
<reponame>Julian/cardboard
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Words of Wind")
def words_of_wind(card, abilities):
def words_of_wind():
return AbilityNotImplemented
return words_of_wind,
@card("Discombobulate")
def discombobulate(card, abilities):
def discombobulate():
return AbilityNotImplemented
return discombobulate,
@card("Foothill Guide")
def foothill_guide(card, abilities):
def foothill_guide():
return AbilityNotImplemented
def foothill_guide():
return AbilityNotImplemented
return foothill_guide, foothill_guide,
@card("Spitfire Handler")
def spitfire_handler(card, abilities):
def spitfire_handler():
return AbilityNotImplemented
def spitfire_handler():
return AbilityNotImplemented
return spitfire_handler, spitfire_handler,
@card("Imagecrafter")
def imagecrafter(card, abilities):
def imagecrafter():
return AbilityNotImplemented
return imagecrafter,
@card("Airborne Aid")
def airborne_aid(card, abilities):
def airborne_aid():
return AbilityNotImplemented
return airborne_aid,
@card("Wirewood Herald")
def wirewood_herald(card, abilities):
def wirewood_herald():
return AbilityNotImplemented
return wirewood_herald,
@card("Krosan Colossus")
def krosan_colossus(card, abilities):
def krosan_colossus():
return AbilityNotImplemented
return krosan_colossus,
@card("Shepherd of Rot")
def shepherd_of_rot(card, abilities):
def shepherd_of_rot():
return AbilityNotImplemented
return shepherd_of_rot,
@card("Catapult Squad")
def catapult_squad(card, abilities):
def catapult_squad():
return AbilityNotImplemented
return catapult_squad,
@card("Blistering Firecat")
def blistering_firecat(card, abilities):
def blistering_firecat():
return AbilityNotImplemented
def blistering_firecat():
return AbilityNotImplemented
def blistering_firecat():
return AbilityNotImplemented
return blistering_firecat, blistering_firecat, blistering_firecat,
@card("Gravespawn Sovereign")
def gravespawn_sovereign(card, abilities):
def gravespawn_sovereign():
return AbilityNotImplemented
return gravespawn_sovereign,
@card("Frightshroud Courier")
def frightshroud_courier(card, abilities):
def frightshroud_courier():
return AbilityNotImplemented
def frightshroud_courier():
return AbilityNotImplemented
return frightshroud_courier, frightshroud_courier,
@card("Words of Wilding")
def words_of_wilding(card, abilities):
def words_of_wilding():
return AbilityNotImplemented
return words_of_wilding,
@card("Catapult Master")
def catapult_master(card, abilities):
def catapult_master():
return AbilityNotImplemented
return catapult_master,
@card("Break Open")
def break_open(card, abilities):
def break_open():
return AbilityNotImplemented
return break_open,
@card("Complicate")
def complicate(card, abilities):
def complicate():
return AbilityNotImplemented
def complicate():
return AbilityNotImplemented
def complicate():
return AbilityNotImplemented
return complicate, complicate, complicate,
@card("Goblin Sharpshooter")
def goblin_sharpshooter(card, abilities):
def goblin_sharpshooter():
return AbilityNotImplemented
def goblin_sharpshooter():
return AbilityNotImplemented
def goblin_sharpshooter():
return AbilityNotImplemented
return goblin_sharpshooter, goblin_sharpshooter, goblin_sharpshooter,
@card("Backslide")
def backslide(card, abilities):
def backslide():
return AbilityNotImplemented
def backslide():
return AbilityNotImplemented
return backslide, backslide,
@card("Pearlspear Courier")
def pearlspear_courier(card, abilities):
def pearlspear_courier():
return AbilityNotImplemented
def pearlspear_courier():
return AbilityNotImplemented
return pearlspear_courier, pearlspear_courier,
@card("Supreme Inquisitor")
def supreme_inquisitor(card, abilities):
def supreme_inquisitor():
return AbilityNotImplemented
return supreme_inquisitor,
@card("Wooded Foothills")
def wooded_foothills(card, abilities):
def wooded_foothills():
return AbilityNotImplemented
return wooded_foothills,
@card("Inspirit")
def inspirit(card, abilities):
def inspirit():
return AbilityNotImplemented
return inspirit,
@card("Mage's Guile")
def mages_guile(card, abilities):
def mages_guile():
return AbilityNotImplemented
def mages_guile():
return AbilityNotImplemented
return mages_guile, mages_guile,
@card("Essence Fracture")
def essence_fracture(card, abilities):
def essence_fracture():
return AbilityNotImplemented
def essence_fracture():
return AbilityNotImplemented
return essence_fracture, essence_fracture,
@card("Infest")
def infest(card, abilities):
def infest():
return AbilityNotImplemented
return infest,
@card("Bloodstained Mire")
def bloodstained_mire(card, abilities):
def bloodstained_mire():
return AbilityNotImplemented
return bloodstained_mire,
@card("Elvish Pathcutter")
def elvish_pathcutter(card, abilities):
def elvish_pathcutter():
return AbilityNotImplemented
return elvish_pathcutter,
@card("Rotlung Reanimator")
def rotlung_reanimator(card, abilities):
def rotlung_reanimator():
return AbilityNotImplemented
return rotlung_reanimator,
@card("Gluttonous Zombie")
def gluttonous_zombie(card, abilities):
def gluttonous_zombie():
return AbilityNotImplemented
return gluttonous_zombie,
@card("Crown of Ascension")
def crown_of_ascension(card, abilities):
def crown_of_ascension():
return AbilityNotImplemented
def crown_of_ascension():
return AbilityNotImplemented
def crown_of_ascension():
return AbilityNotImplemented
return crown_of_ascension, crown_of_ascension, crown_of_ascension,
@card("Birchlore Rangers")
def birchlore_rangers(card, abilities):
def birchlore_rangers():
return AbilityNotImplemented
def birchlore_rangers():
return AbilityNotImplemented
return birchlore_rangers, birchlore_rangers,
@card("Ravenous Baloth")
def ravenous_baloth(card, abilities):
def ravenous_baloth():
return AbilityNotImplemented
return ravenous_baloth,
@card("Thrashing Mudspawn")
def thrashing_mudspawn(card, abilities):
def thrashing_mudspawn():
return AbilityNotImplemented
def thrashing_mudspawn():
return AbilityNotImplemented
return thrashing_mudspawn, thrashing_mudspawn,
@card("Kaboom!")
def kaboom(card, abilities):
def kaboom():
return AbilityNotImplemented
return kaboom,
@card("Gustcloak Runner")
def gustcloak_runner(card, abilities):
def gustcloak_runner():
return AbilityNotImplemented
return gustcloak_runner,
@card("Mistform Wall")
def mistform_wall(card, abilities):
def mistform_wall():
return AbilityNotImplemented
def mistform_wall():
return AbilityNotImplemented
return mistform_wall, mistform_wall,
@card("Forgotten Cave")
def forgotten_cave(card, abilities):
def forgotten_cave():
return AbilityNotImplemented
def forgotten_cave():
return AbilityNotImplemented
def forgotten_cave():
return AbilityNotImplemented
return forgotten_cave, forgotten_cave, forgotten_cave,
@card("Silent Specter")
def silent_specter(card, abilities):
def silent_specter():
return AbilityNotImplemented
def silent_specter():
return AbilityNotImplemented
def silent_specter():
return AbilityNotImplemented
return silent_specter, silent_specter, silent_specter,
@card("Chain of Smog")
def chain_of_smog(card, abilities):
def chain_of_smog():
return AbilityNotImplemented
return chain_of_smog,
@card("Rummaging Wizard")
def rummaging_wizard(card, abilities):
def rummaging_wizard():
return AbilityNotImplemented
return rummaging_wizard,
@card("Slipstream Eel")
def slipstream_eel(card, abilities):
def slipstream_eel():
return AbilityNotImplemented
def slipstream_eel():
return AbilityNotImplemented
return slipstream_eel, slipstream_eel,
@card("Thoughtbound Primoc")
def thoughtbound_primoc(card, abilities):
def thoughtbound_primoc():
return AbilityNotImplemented
def thoughtbound_primoc():
return AbilityNotImplemented
return thoughtbound_primoc, thoughtbound_primoc,
@card("Goblin Sledder")
def goblin_sledder(card, abilities):
def goblin_sledder():
return AbilityNotImplemented
return goblin_sledder,
@card("Screeching Buzzard")
def screeching_buzzard(card, abilities):
def screeching_buzzard():
return AbilityNotImplemented
def screeching_buzzard():
return AbilityNotImplemented
return screeching_buzzard, screeching_buzzard,
@card("Biorhythm")
def biorhythm(card, abilities):
def biorhythm():
return AbilityNotImplemented
return biorhythm,
@card("Goblin Machinist")
def goblin_machinist(card, abilities):
def goblin_machinist():
return AbilityNotImplemented
return goblin_machinist,
@card("Dive Bomber")
def dive_bomber(card, abilities):
def dive_bomber():
return AbilityNotImplemented
def dive_bomber():
return AbilityNotImplemented
return dive_bomber, dive_bomber,
@card("Death Match")
def death_match(card, abilities):
def death_match():
return AbilityNotImplemented
return death_match,
@card("False Cure")
def false_cure(card, abilities):
def false_cure():
return AbilityNotImplemented
return false_cure,
@card("Harsh Mercy")
def harsh_mercy(card, abilities):
def harsh_mercy():
return AbilityNotImplemented
return harsh_mercy,
@card("Symbiotic Elf")
def symbiotic_elf(card, abilities):
def symbiotic_elf():
return AbilityNotImplemented
return symbiotic_elf,
@card("Nantuko Husk")
def nantuko_husk(card, abilities):
def nantuko_husk():
return AbilityNotImplemented
return nantuko_husk,
@card("Goblin Sky Raider")
def goblin_sky_raider(card, abilities):
def goblin_sky_raider():
return AbilityNotImplemented
return goblin_sky_raider,
@card("Spined Basher")
def spined_basher(card, abilities):
def spined_basher():
return AbilityNotImplemented
return spined_basher,
@card("Fleeting Aven")
def fleeting_aven(card, abilities):
def fleeting_aven():
return AbilityNotImplemented
def fleeting_aven():
return AbilityNotImplemented
return fleeting_aven, fleeting_aven,
@card("Accursed Centaur")
def accursed_centaur(card, abilities):
def accursed_centaur():
return AbilityNotImplemented
return accursed_centaur,
@card("Crown of Fury")
def crown_of_fury(card, abilities):
def crown_of_fury():
return AbilityNotImplemented
def crown_of_fury():
return AbilityNotImplemented
def crown_of_fury():
return AbilityNotImplemented
return crown_of_fury, crown_of_fury, crown_of_fury,
@card("Flooded Strand")
def flooded_strand(card, abilities):
def flooded_strand():
return AbilityNotImplemented
return flooded_strand,
@card("Mythic Proportions")
def mythic_proportions(card, abilities):
def mythic_proportions():
return AbilityNotImplemented
def mythic_proportions():
return AbilityNotImplemented
return mythic_proportions, mythic_proportions,
@card("Psychic Trance")
def psychic_trance(card, abilities):
def psychic_trance():
return AbilityNotImplemented
return psychic_trance,
@card("Callous Oppressor")
def callous_oppressor(card, abilities):
def callous_oppressor():
return AbilityNotImplemented
def callous_oppressor():
return AbilityNotImplemented
def callous_oppressor():
return AbilityNotImplemented
return callous_oppressor, callous_oppressor, callous_oppressor,
@card("Doomed Necromancer")
def doomed_necromancer(card, abilities):
def doomed_necromancer():
return AbilityNotImplemented
return doomed_necromancer,
@card("Convalescent Care")
def convalescent_care(card, abilities):
def convalescent_care():
return AbilityNotImplemented
return convalescent_care,
@card("Broodhatch Nantuko")
def broodhatch_nantuko(card, abilities):
def broodhatch_nantuko():
return AbilityNotImplemented
def broodhatch_nantuko():
return AbilityNotImplemented
return broodhatch_nantuko, broodhatch_nantuko,
@card("Gangrenous Goliath")
def gangrenous_goliath(card, abilities):
def gangrenous_goliath():
return AbilityNotImplemented
return gangrenous_goliath,
@card("Mistform Stalker")
def mistform_stalker(card, abilities):
def mistform_stalker():
return AbilityNotImplemented
def mistform_stalker():
return AbilityNotImplemented
return mistform_stalker, mistform_stalker,
@card("Contested Cliffs")
def contested_cliffs(card, abilities):
def contested_cliffs():
return AbilityNotImplemented
def contested_cliffs():
return AbilityNotImplemented
return contested_cliffs, contested_cliffs,
@card("Skirk Prospector")
def skirk_prospector(card, abilities):
def skirk_prospector():
return AbilityNotImplemented
return skirk_prospector,
@card("Elvish Guidance")
def elvish_guidance(card, abilities):
def elvish_guidance():
return AbilityNotImplemented
def elvish_guidance():
return AbilityNotImplemented
return elvish_guidance, elvish_guidance,
@card("Wretched Anurid")
def wretched_anurid(card, abilities):
def wretched_anurid():
return AbilityNotImplemented
return wretched_anurid,
@card("Trickery Charm")
def trickery_charm(card, abilities):
def trickery_charm():
return AbilityNotImplemented
return trickery_charm,
@card("Snapping Thragg")
def snapping_thragg(card, abilities):
def snapping_thragg():
return AbilityNotImplemented
def snapping_thragg():
return AbilityNotImplemented
return snapping_thragg, snapping_thragg,
@card("Nosy Goblin")
def nosy_goblin(card, abilities):
def nosy_goblin():
return AbilityNotImplemented
return nosy_goblin,
@card("Fallen Cleric")
def fallen_cleric(card, abilities):
def fallen_cleric():
return AbilityNotImplemented
def fallen_cleric():
return AbilityNotImplemented
return fallen_cleric, fallen_cleric,
@card("Demystify")
def demystify(card, abilities):
def demystify():
return AbilityNotImplemented
return demystify,
@card("Thunder of Hooves")
def thunder_of_hooves(card, abilities):
def thunder_of_hooves():
return AbilityNotImplemented
return thunder_of_hooves,
@card("Grand Melee")
def grand_melee(card, abilities):
def grand_melee():
return AbilityNotImplemented
def grand_melee():
return AbilityNotImplemented
return grand_melee, grand_melee,
@card("Sea's Claim")
def seas_claim(card, abilities):
def seas_claim():
return AbilityNotImplemented
def seas_claim():
return AbilityNotImplemented
return seas_claim, seas_claim,
@card("Kamahl's Summons")
def kamahls_summons(card, abilities):
def kamahls_summons():
return AbilityNotImplemented
return kamahls_summons,
@card("Nameless One")
def nameless_one(card, abilities):
def nameless_one():
return AbilityNotImplemented
def nameless_one():
return AbilityNotImplemented
return nameless_one, nameless_one,
@card("Mistform Skyreaver")
def mistform_skyreaver(card, abilities):
def mistform_skyreaver():
return AbilityNotImplemented
def mistform_skyreaver():
return AbilityNotImplemented
return mistform_skyreaver, mistform_skyreaver,
@card("Wirewood Pride")
def wirewood_pride(card, abilities):
def wirewood_pride():
return AbilityNotImplemented
return wirewood_pride,
@card("Elvish Pioneer")
def elvish_pioneer(card, abilities):
def elvish_pioneer():
return AbilityNotImplemented
return elvish_pioneer,
@card("Read the Runes")
def read_the_runes(card, abilities):
def read_the_runes():
return AbilityNotImplemented
return read_the_runes,
@card("Ebonblade Reaper")
def ebonblade_reaper(card, abilities):
def ebonblade_reaper():
return AbilityNotImplemented
def ebonblade_reaper():
return AbilityNotImplemented
def ebonblade_reaper():
return AbilityNotImplemented
return ebonblade_reaper, ebonblade_reaper, ebonblade_reaper,
@card("Symbiotic Beast")
def symbiotic_beast(card, abilities):
def symbiotic_beast():
return AbilityNotImplemented
return symbiotic_beast,
@card("Disciple of Malice")
def disciple_of_malice(card, abilities):
def disciple_of_malice():
return AbilityNotImplemented
def disciple_of_malice():
return AbilityNotImplemented
return disciple_of_malice, disciple_of_malice,
@card("Misery Charm")
def misery_charm(card, abilities):
def misery_charm():
return AbilityNotImplemented
return misery_charm,
@card("Cabal Archon")
def cabal_archon(card, abilities):
def cabal_archon():
return AbilityNotImplemented
return cabal_archon,
@card("Nova Cleric")
def nova_cleric(card, abilities):
def nova_cleric():
return AbilityNotImplemented
return nova_cleric,
@card("Silvos, Rogue Elemental")
def silvos_rogue_elemental(card, abilities):
def silvos_rogue_elemental():
return AbilityNotImplemented
def silvos_rogue_elemental():
return AbilityNotImplemented
return silvos_rogue_elemental, silvos_rogue_elemental,
@card("Grand Coliseum")
def grand_coliseum(card, abilities):
def grand_coliseum():
return AbilityNotImplemented
def grand_coliseum():
return AbilityNotImplemented
def grand_coliseum():
return AbilityNotImplemented
return grand_coliseum, grand_coliseum, grand_coliseum,
@card("Astral Slide")
def astral_slide(card, abilities):
def astral_slide():
return AbilityNotImplemented
return astral_slide,
@card("Aurification")
def aurification(card, abilities):
def aurification():
return AbilityNotImplemented
def aurification():
return AbilityNotImplemented
def aurification():
return AbilityNotImplemented
return aurification, aurification, aurification,
@card("Daunting Defender")
def daunting_defender(card, abilities):
def daunting_defender():
return AbilityNotImplemented
return daunting_defender,
@card("Crown of Vigor")
def crown_of_vigor(card, abilities):
def crown_of_vigor():
return AbilityNotImplemented
def crown_of_vigor():
return AbilityNotImplemented
def crown_of_vigor():
return AbilityNotImplemented
return crown_of_vigor, crown_of_vigor, crown_of_vigor,
@card("Grinning Demon")
def grinning_demon(card, abilities):
def grinning_demon():
return AbilityNotImplemented
def grinning_demon():
return AbilityNotImplemented
return grinning_demon, grinning_demon,
@card("Spurred Wolverine")
def spurred_wolverine(card, abilities):
def spurred_wolverine():
return AbilityNotImplemented
return spurred_wolverine,
@card("Weathered Wayfarer")
def weathered_wayfarer(card, abilities):
def weathered_wayfarer():
return AbilityNotImplemented
return weathered_wayfarer,
@card("Aura Extraction")
def aura_extraction(card, abilities):
def aura_extraction():
return AbilityNotImplemented
def aura_extraction():
return AbilityNotImplemented
return aura_extraction, aura_extraction,
@card("Aggravated Assault")
def aggravated_assault(card, abilities):
def aggravated_assault():
return AbilityNotImplemented
return aggravated_assault,
@card("Goblin Pyromancer")
def goblin_pyromancer(card, abilities):
def goblin_pyromancer():
return AbilityNotImplemented
def goblin_pyromancer():
return AbilityNotImplemented
return goblin_pyromancer, goblin_pyromancer,
@card("Chain of Acid")
def chain_of_acid(card, abilities):
def chain_of_acid():
return AbilityNotImplemented
return chain_of_acid,
@card("Daru Lancer")
def daru_lancer(card, abilities):
def daru_lancer():
return AbilityNotImplemented
def daru_lancer():
return AbilityNotImplemented
return daru_lancer, daru_lancer,
@card("Mistform Mask")
def mistform_mask(card, abilities):
def mistform_mask():
return AbilityNotImplemented
def mistform_mask():
return AbilityNotImplemented
return mistform_mask, mistform_mask,
@card("Patriarch's Bidding")
def patriarchs_bidding(card, abilities):
def patriarchs_bidding():
return AbilityNotImplemented
return patriarchs_bidding,
@card("Sage Aven")
def sage_aven(card, abilities):
def sage_aven():
return AbilityNotImplemented
def sage_aven():
return AbilityNotImplemented
| |
row = layout.row(align=True, heading="Evaluation Time")
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_eval_time", text="")
subsub = sub.row(align=True)
subsub.active = con.use_eval_time
subsub.prop(con, "eval_time", text="")
row.prop_decorator(con, "eval_time")
layout.prop(con, "mix_mode", text="Mix")
self.draw_influence(layout, con)
def draw_lock_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "track_axis", expand=True)
layout.prop(con, "lock_axis", expand=True)
self.draw_influence(layout, con)
def draw_dist_limit(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row()
row.prop(con, "distance")
row.operator("constraint.limitdistance_reset", text="", icon="X")
layout.prop(con, "limit_mode", text="Clamp Region")
layout.prop(con, "use_transform_limit")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_stretch_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row()
row.prop(con, "rest_length")
row.operator("constraint.stretchto_reset", text="", icon="X")
layout.separator()
col = layout.column()
col.prop(con, "bulge", text="Volume Variation")
row = col.row(heading="Volume Min", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_bulge_min", text="")
subsub = sub.row(align=True)
subsub.active = con.use_bulge_min
subsub.prop(con, "bulge_min", text="")
row.prop_decorator(con, "bulge_min")
row = col.row(heading="Max", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_bulge_max", text="")
subsub = sub.row(align=True)
subsub.active = con.use_bulge_max
subsub.prop(con, "bulge_max", text="")
row.prop_decorator(con, "bulge_max")
row = col.row()
row.active = con.use_bulge_min or con.use_bulge_max
row.prop(con, "bulge_smooth", text="Smooth")
layout.prop(con, "volume", expand=True)
layout.prop(con, "keep_axis", text="Rotation", expand=True)
self.draw_influence(layout, con)
def draw_min_max(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "offset")
layout.prop(con, "floor_location", expand=True, text="Min/Max")
layout.prop(con, "use_rotation")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_clamp_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "main_axis", expand=True)
layout.prop(con, "use_cyclic")
self.draw_influence(layout, con)
def draw_transform(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "use_motion_extrapolate", text="Extrapolate")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_shrinkwrap(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con, False)
layout.prop(con, "distance")
layout.prop(con, "shrinkwrap_type", text="Mode")
layout.separator()
if con.shrinkwrap_type == 'PROJECT':
layout.prop(con, "project_axis", expand=True, text="Project Axis")
layout.prop(con, "project_axis_space", text="Space")
layout.prop(con, "project_limit", text="Distance")
layout.prop(con, "use_project_opposite")
layout.separator()
col = layout.column()
row = col.row()
row.prop(con, "cull_face", expand=True)
row = col.row()
row.active = con.use_project_opposite and con.cull_face != 'OFF'
row.prop(con, "use_invert_cull")
layout.separator()
if con.shrinkwrap_type in {'PROJECT', 'NEAREST_SURFACE', 'TARGET_PROJECT'}:
layout.prop(con, "wrap_mode", text="Snap Mode")
row = layout.row(heading="Align to Normal", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_track_normal", text="")
subsub = sub.row(align=True)
subsub.active = con.use_track_normal
subsub.prop(con, "track_axis", text="")
row.prop_decorator(con, "track_axis")
self.draw_influence(layout, con)
def draw_damp_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "track_axis", expand=True)
self.draw_influence(layout, con)
def draw_spline_ik(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
self.draw_influence(layout, con)
def draw_pivot(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
if con.target:
layout.prop(con, "offset", text="Pivot Offset")
else:
layout.prop(con, "use_relative_location")
if con.use_relative_location:
layout.prop(con, "offset", text="Pivot Point")
else:
layout.prop(con, "offset", text="Pivot Point")
col = layout.column()
col.prop(con, "rotation_range", text="Rotation Range")
self.draw_influence(layout, con)
def draw_follow_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
clip = None
if con.use_active_clip:
clip = context.scene.active_clip
else:
clip = con.clip
layout.prop(con, "use_active_clip")
layout.prop(con, "use_3d_position")
row = layout.row()
row.active = not con.use_3d_position
row.prop(con, "use_undistorted_position")
if not con.use_active_clip:
layout.prop(con, "clip")
layout.prop(con, "frame_method")
if clip:
tracking = clip.tracking
layout.prop_search(con, "object", tracking, "objects", icon='OBJECT_DATA')
tracking_object = tracking.objects.get(con.object, tracking.objects[0])
layout.prop_search(con, "track", tracking_object, "tracks", icon='ANIM_DATA')
layout.prop(con, "camera")
row = layout.row()
row.active = not con.use_3d_position
row.prop(con, "depth_object")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_camera_solver(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_object_solver(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
clip = None
if con.use_active_clip:
clip = context.scene.active_clip
else:
clip = con.clip
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
if clip:
layout.prop_search(con, "object", clip.tracking, "objects", icon='OBJECT_DATA')
layout.prop(con, "camera")
row = layout.row()
row.operator("constraint.objectsolver_set_inverse")
row.operator("constraint.objectsolver_clear_inverse")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_transform_cache(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.template_cache_file(con, "cache_file")
cache_file = con.cache_file
if cache_file is not None:
layout.prop_search(con, "object_path", cache_file, "object_paths")
self.draw_influence(layout, con)
def draw_python_constraint(self, context):
layout = self.layout
layout.label(text="Blender 2.6 doesn't support python constraints yet")
def draw_armature(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
col.prop(con, "use_deform_preserve_volume")
col.prop(con, "use_bone_envelopes")
if context.pose_bone:
col.prop(con, "use_current_location")
layout.operator("constraint.add_target", text="Add Target Bone")
layout.operator("constraint.normalize_target_weights")
self.draw_influence(layout, con)
if not con.targets:
layout.label(text="No target bones added", icon='ERROR')
def draw_kinematic(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
if context.object.pose.ik_solver == 'ITASC':
layout.prop(con, "ik_type")
# This button gives itself too much padding, so put it in a column with the subtarget
col = layout.column()
col.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
col.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
col = layout.column()
if con.pole_target:
col.prop(con, "pole_angle")
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
col.prop(con, "chain_count")
if con.ik_type == 'COPY_POSE':
layout.prop(con, "reference_axis", expand=True)
# Use separate rows and columns here to avoid an alignment issue with the lock buttons
loc_col = layout.column()
loc_col.prop(con, "use_location")
row = loc_col.row()
row.active = con.use_location
row.prop(con, "weight", text="Weight", slider=True)
row = loc_col.row(heading="Lock", align=True)
row.use_property_decorate = False
row.active = con.use_location
sub = row.row(align=True)
sub.prop(con, "lock_location_x", text="X", toggle=True)
sub.prop(con, "lock_location_y", text="Y", toggle=True)
sub.prop(con, "lock_location_z", text="Z", toggle=True)
row.label(icon='BLANK1')
rot_col = layout.column()
rot_col.prop(con, "use_rotation")
row = rot_col.row()
row.active = con.use_rotation
row.prop(con, "orient_weight", text="Weight", slider=True)
row = rot_col.row(heading="Lock", align=True)
row.use_property_decorate = False
row.active = con.use_rotation
sub = row.row(align=True)
sub.prop(con, "lock_rotation_x", text="X", toggle=True)
sub.prop(con, "lock_rotation_y", text="Y", toggle=True)
sub.prop(con, "lock_rotation_z", text="Z", toggle=True)
row.label(icon='BLANK1')
elif con.ik_type == 'DISTANCE':
layout.prop(con, "limit_mode")
col = layout.column()
col.prop(con, "weight", text="Weight", slider=True)
col.prop(con, "distance", text="Distance", slider=True)
else:
# Standard IK constraint
col = layout.column()
col.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
col.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
col = layout.column()
if con.pole_target:
col.prop(con, "pole_angle")
col.prop(con, "iterations")
col.prop(con, "chain_count")
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
col = layout.column()
row = col.row(align=True, heading="Weight Position")
row.prop(con, "use_location", text="")
sub = row.row(align=True)
sub.active = con.use_location
sub.prop(con, "weight", text="", slider=True)
row = col.row(align=True, heading="Rotation")
row.prop(con, "use_rotation", text="")
sub = row.row(align=True)
sub.active = con.use_rotation
sub.prop(con, "orient_weight", text="", slider=True)
self.draw_influence(layout, con)
# Parent class for constraint subpanels
class ConstraintButtonsSubPanel(Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_label = ""
bl_options = {'DRAW_BOX'}
def get_constraint(self, context):
con = self.custom_data
self.layout.context_pointer_set("constraint", con)
return con
def draw_transform_from(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.prop(con, "map_from", expand=True)
layout.use_property_split = True
layout.use_property_decorate = True
from_axes = [con.map_to_x_from, con.map_to_y_from, con.map_to_z_from]
if con.map_from == 'ROTATION':
layout.prop(con, "from_rotation_mode", text="Mode")
ext = "" if con.map_from == 'LOCATION' else "_rot" if con.map_from == 'ROTATION' else "_scale"
col = layout.column(align=True)
col.active = "X" in from_axes
col.prop(con, "from_min_x" + ext, text="X Min")
col.prop(con, "from_max_x" + ext, text="Max")
col = layout.column(align=True)
col.active = "Y" in from_axes
col.prop(con, "from_min_y" + ext, text="Y Min")
col.prop(con, "from_max_y" + ext, text="Max")
col = layout.column(align=True)
col.active = "Z" in from_axes
col.prop(con, "from_min_z" + ext, text="Z Min")
col.prop(con, "from_max_z" + ext, text="Max")
def draw_transform_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.prop(con, "map_to", expand=True)
layout.use_property_split = True
layout.use_property_decorate = True
if con.map_to == 'ROTATION':
layout.prop(con, "to_euler_order", text="Order")
ext = "" if con.map_to == 'LOCATION' else "_rot" if con.map_to == 'ROTATION' else "_scale"
col = layout.column(align=True)
col.prop(con, "map_to_x_from", expand=False, text="X Source Axis")
col.prop(con, "to_min_x" + ext, text="Min")
col.prop(con, "to_max_x" + ext, text="Max")
col = layout.column(align=True)
col.prop(con, "map_to_y_from", expand=False, text="Y Source Axis")
col.prop(con, "to_min_y" + ext, text="Min")
col.prop(con, "to_max_y" + ext, text="Max")
col = layout.column(align=True)
col.prop(con, "map_to_z_from", expand=False, text="Z Source Axis")
col.prop(con, "to_min_z" + ext, text="Min")
col.prop(con, "to_max_z" + ext, text="Max")
layout.prop(con, "mix_mode" + ext, text="Mix")
def draw_armature_bones(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
for i, tgt in enumerate(con.targets):
has_target = tgt.target is not None
box = layout.box()
header = box.row()
header.use_property_split = False
split = header.split(factor=0.45, align=True)
split.prop(tgt, "target", text="")
row = split.row(align=True)
row.active = has_target
if has_target:
row.prop_search(tgt, "subtarget", tgt.target.data, "bones", text="")
else:
row.prop(tgt, "subtarget", text="", icon='BONE_DATA')
header.operator("constraint.remove_target", text="", icon='X').index = i
row = box.row()
row.active = has_target and tgt.subtarget != ""
row.prop(tgt, "weight", slider=True, text="Weight")
def draw_spline_ik_fitting(self, context):
| |
== 'e_cblp':
e_cblp_ = child_.text
e_cblp_ = self.gds_validate_string(e_cblp_, node, 'e_cblp')
self.e_cblp = e_cblp_
elif nodeName_ == 'e_cp':
e_cp_ = child_.text
e_cp_ = self.gds_validate_string(e_cp_, node, 'e_cp')
self.e_cp = e_cp_
elif nodeName_ == 'e_crlc':
e_crlc_ = child_.text
e_crlc_ = self.gds_validate_string(e_crlc_, node, 'e_crlc')
self.e_crlc = e_crlc_
elif nodeName_ == 'e_cparhdr':
e_cparhdr_ = child_.text
e_cparhdr_ = self.gds_validate_string(e_cparhdr_, node, 'e_cparhdr')
self.e_cparhdr = e_cparhdr_
elif nodeName_ == 'e_minalloc':
e_minalloc_ = child_.text
e_minalloc_ = self.gds_validate_string(e_minalloc_, node, 'e_minalloc')
self.e_minalloc = e_minalloc_
elif nodeName_ == 'e_maxalloc':
e_maxalloc_ = child_.text
e_maxalloc_ = self.gds_validate_string(e_maxalloc_, node, 'e_maxalloc')
self.e_maxalloc = e_maxalloc_
elif nodeName_ == 'e_ss':
e_ss_ = child_.text
e_ss_ = self.gds_validate_string(e_ss_, node, 'e_ss')
self.e_ss = e_ss_
elif nodeName_ == 'e_sp':
e_sp_ = child_.text
e_sp_ = self.gds_validate_string(e_sp_, node, 'e_sp')
self.e_sp = e_sp_
elif nodeName_ == 'e_csum':
e_csum_ = child_.text
e_csum_ = self.gds_validate_string(e_csum_, node, 'e_csum')
self.e_csum = e_csum_
elif nodeName_ == 'e_ip':
e_ip_ = child_.text
e_ip_ = self.gds_validate_string(e_ip_, node, 'e_ip')
self.e_ip = e_ip_
elif nodeName_ == 'e_cs':
e_cs_ = child_.text
e_cs_ = self.gds_validate_string(e_cs_, node, 'e_cs')
self.e_cs = e_cs_
elif nodeName_ == 'e_lfarlc':
e_lfarlc_ = child_.text
e_lfarlc_ = self.gds_validate_string(e_lfarlc_, node, 'e_lfarlc')
self.e_lfarlc = e_lfarlc_
elif nodeName_ == 'e_ovro':
e_ovro_ = child_.text
e_ovro_ = self.gds_validate_string(e_ovro_, node, 'e_ovro')
self.e_ovro = e_ovro_
elif nodeName_ == 'reserved1':
reserved1_ = child_.text
reserved1_ = self.gds_validate_string(reserved1_, node, 'reserved1')
self.reserved1.append(reserved1_)
elif nodeName_ == 'e_oemid':
e_oemid_ = child_.text
e_oemid_ = self.gds_validate_string(e_oemid_, node, 'e_oemid')
self.e_oemid = e_oemid_
elif nodeName_ == 'e_oeminfo':
e_oeminfo_ = child_.text
e_oeminfo_ = self.gds_validate_string(e_oeminfo_, node, 'e_oeminfo')
self.e_oeminfo = e_oeminfo_
elif nodeName_ == 'reserved2':
reserved2_ = child_.text
reserved2_ = self.gds_validate_string(reserved2_, node, 'reserved2')
self.reserved2 = reserved2_
elif nodeName_ == 'e_lfanew':
e_lfanew_ = child_.text
e_lfanew_ = self.gds_validate_string(e_lfanew_, node, 'e_lfanew')
self.e_lfanew = e_lfanew_
elif nodeName_ == 'Hashes':
Hashes_ = child_.text
Hashes_ = self.gds_validate_string(Hashes_, node, 'Hashes')
self.Hashes = Hashes_
# end class DOSHeaderType
class PEHeadersType(GeneratedsSuper):
"""PEHeaderType specifies the headers found in PE and COFF files."""
subclass = None
superclass = None
def __init__(self, DOS_Header=None, Signature=None, File_Header=None, Optional_Header=None, Entropy=None, Hashes=None):
self.DOS_Header = DOS_Header
self.Signature = Signature
self.File_Header = File_Header
self.Optional_Header = Optional_Header
self.Entropy = Entropy
self.Hashes = Hashes
def factory(*args_, **kwargs_):
if PEHeadersType.subclass:
return PEHeadersType.subclass(*args_, **kwargs_)
else:
return PEHeadersType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_DOS_Header(self): return self.DOS_Header
def set_DOS_Header(self, DOS_Header): self.DOS_Header = DOS_Header
def get_Signature(self): return self.Signature
def set_Signature(self, Signature): self.Signature = Signature
def get_File_Header(self): return self.File_Header
def set_File_Header(self, File_Header): self.File_Header = File_Header
def get_Optional_Header(self): return self.Optional_Header
def set_Optional_Header(self, Optional_Header): self.Optional_Header = Optional_Header
def get_Entropy(self): return self.Entropy
def set_Entropy(self, Entropy): self.Entropy = Entropy
def get_Hashes(self): return self.Hashes
def set_Hashes(self, Hashes): self.Hashes = Hashes
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEHeadersType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEHeadersType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEHeadersType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEHeadersType', fromsubclass_=False):
if self.DOS_Header is not None:
self.DOS_Header.export(outfile, level, namespace_, name_='DOS_Header')
if self.Signature is not None:
self.Signature.export(outfile, level, namespace_, name_='Signature')
if self.File_Header is not None:
self.File_Header.export(outfile, level, namespace_, name_='File_Header')
if self.Optional_Header is not None:
self.Optional_Header.export(outfile, level, namespace_, name_='Optional_Header')
if self.Entropy is not None:
self.Entropy.export(outfile, level, namespace_, name_='Entropy')
if self.Hashes is not None:
self.Hashes.export(outfile, level, namespace_, name_='Hashes')
def hasContent_(self):
if (
self.DOS_Header is not None or
self.Signature is not None or
self.File_Header is not None or
self.Optional_Header is not None or
self.Entropy is not None or
self.Hashes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEHeadersType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.DOS_Header is not None:
showIndent(outfile, level)
outfile.write('DOS_Header=model_.DOSHeaderType(\n')
self.DOS_Header.exportLiteral(outfile, level, name_='DOS_Header')
showIndent(outfile, level)
outfile.write('),\n')
if self.Signature is not None:
showIndent(outfile, level)
outfile.write('Signature=%s,\n' % quote_python(self.Signature).encode(ExternalEncoding))
if self.File_Header is not None:
showIndent(outfile, level)
outfile.write('File_Header=model_.PEFileHeaderType(\n')
self.File_Header.exportLiteral(outfile, level, name_='File_Header')
showIndent(outfile, level)
outfile.write('),\n')
if self.Optional_Header is not None:
showIndent(outfile, level)
outfile.write('Optional_Header=model_.PEOptionalHeaderType(\n')
self.Optional_Header.exportLiteral(outfile, level, name_='Optional_Header')
showIndent(outfile, level)
outfile.write('),\n')
if self.Entropy is not None:
showIndent(outfile, level)
outfile.write('Entropy=%s,\n' % quote_python(self.Entropy).encode(ExternalEncoding))
if self.Hashes is not None:
showIndent(outfile, level)
outfile.write('Hashes=%s,\n' % quote_python(self.Hashes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'DOS_Header':
obj_ = DOSHeaderType.factory()
obj_.build(child_)
self.set_DOS_Header(obj_)
elif nodeName_ == 'Signature':
Signature_ = child_.text
Signature_ = self.gds_validate_string(Signature_, node, 'Signature')
self.Signature = Signature_
elif nodeName_ == 'File_Header':
obj_ = PEFileHeaderType.factory()
obj_.build(child_)
self.set_File_Header(obj_)
elif nodeName_ == 'Optional_Header':
obj_ = PEOptionalHeaderType.factory()
obj_.build(child_)
self.set_Optional_Header(obj_)
elif nodeName_ == 'Entropy':
Entropy_ = child_.text
Entropy_ = self.gds_validate_string(Entropy_, node, 'Entropy')
self.Entropy = Entropy_
elif nodeName_ == 'Hashes':
Hashes_ = child_.text
Hashes_ = self.gds_validate_string(Hashes_, node, 'Hashes')
self.Hashes = Hashes_
# end class PEHeadersType
class PEFileHeaderType(GeneratedsSuper):
"""The PEFileHeaderType type refers to the PE file header (somtimes
referred to as the COFF header) and its associated attributes."""
subclass = None
superclass = None
def __init__(self, Machine=None, Number_Of_Sections=None, Time_Date_Stamp=None, Pointer_To_Symbol_Table=None, Number_Of_Symbols=None, Size_Of_Optional_Header=None, Characteristics=None, Hashes=None):
self.Machine = Machine
self.Number_Of_Sections = Number_Of_Sections
self.Time_Date_Stamp = Time_Date_Stamp
self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table
self.Number_Of_Symbols = Number_Of_Symbols
self.Size_Of_Optional_Header = Size_Of_Optional_Header
self.Characteristics = Characteristics
self.Hashes = Hashes
def factory(*args_, **kwargs_):
if PEFileHeaderType.subclass:
return PEFileHeaderType.subclass(*args_, **kwargs_)
else:
return PEFileHeaderType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Machine(self): return self.Machine
def set_Machine(self, Machine): self.Machine = Machine
def get_Number_Of_Sections(self): return self.Number_Of_Sections
def set_Number_Of_Sections(self, Number_Of_Sections): self.Number_Of_Sections = Number_Of_Sections
def get_Time_Date_Stamp(self): return self.Time_Date_Stamp
def set_Time_Date_Stamp(self, Time_Date_Stamp): self.Time_Date_Stamp = Time_Date_Stamp
def get_Pointer_To_Symbol_Table(self): return self.Pointer_To_Symbol_Table
def set_Pointer_To_Symbol_Table(self, Pointer_To_Symbol_Table): self.Pointer_To_Symbol_Table = Pointer_To_Symbol_Table
def get_Number_Of_Symbols(self): return self.Number_Of_Symbols
def set_Number_Of_Symbols(self, Number_Of_Symbols): self.Number_Of_Symbols = Number_Of_Symbols
def get_Size_Of_Optional_Header(self): return self.Size_Of_Optional_Header
def set_Size_Of_Optional_Header(self, Size_Of_Optional_Header): self.Size_Of_Optional_Header = Size_Of_Optional_Header
def get_Characteristics(self): return self.Characteristics
def set_Characteristics(self, Characteristics): self.Characteristics = Characteristics
def get_Hashes(self): return self.Hashes
def set_Hashes(self, Hashes): self.Hashes = Hashes
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PEFileHeaderType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='PEFileHeaderType', fromsubclass_=False):
if self.Machine is not None:
self.Machine.export(outfile, level, namespace_, name_='Machine')
if self.Number_Of_Sections is not None:
self.Number_Of_Sections.export(outfile, level, namespace_, name_='Number_Of_Sections')
if self.Time_Date_Stamp is not None:
self.Time_Date_Stamp.export(outfile, level, namespace_, name_='Time_Date_Stamp')
if self.Pointer_To_Symbol_Table is not None:
self.Pointer_To_Symbol_Table.export(outfile, level, namespace_, name_='Pointer_To_Symbol_Table')
if self.Number_Of_Symbols is not None:
self.Number_Of_Symbols.export(outfile, level, namespace_, name_='Number_Of_Symbols')
if self.Size_Of_Optional_Header is not None:
self.Size_Of_Optional_Header.export(outfile, level, namespace_, name_='Size_Of_Optional_Header')
if self.Characteristics is not None:
self.Characteristics.export(outfile, level, namespace_, name_='Characteristics')
if self.Hashes is not None:
self.Hashes.export(outfile, level, namespace_, name_='Hashes')
def hasContent_(self):
if (
self.Machine is not None or
self.Number_Of_Sections is not None or
self.Time_Date_Stamp is not None or
self.Pointer_To_Symbol_Table is not None or
self.Number_Of_Symbols is not None or
self.Size_Of_Optional_Header is not None or
self.Characteristics is not None or
self.Hashes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PEFileHeaderType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Machine is not None:
showIndent(outfile, level)
outfile.write('Machine=%s,\n' % quote_python(self.Machine).encode(ExternalEncoding))
if self.Number_Of_Sections is not None:
showIndent(outfile, level)
outfile.write('Number_Of_Sections=%s,\n' % quote_python(self.Number_Of_Sections).encode(ExternalEncoding))
if self.Time_Date_Stamp is not None:
showIndent(outfile, level)
outfile.write('Time_Date_Stamp=%s,\n' % quote_python(self.Time_Date_Stamp).encode(ExternalEncoding))
if self.Pointer_To_Symbol_Table is not None:
showIndent(outfile, level)
outfile.write('Pointer_To_Symbol_Table=%s,\n' % quote_python(self.Pointer_To_Symbol_Table).encode(ExternalEncoding))
if self.Number_Of_Symbols is not None:
showIndent(outfile, level)
outfile.write('Number_Of_Symbols=%s,\n' % quote_python(self.Number_Of_Symbols).encode(ExternalEncoding))
if self.Size_Of_Optional_Header is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Optional_Header=%s,\n' % quote_python(self.Size_Of_Optional_Header).encode(ExternalEncoding))
if self.Characteristics is not None:
showIndent(outfile, level)
outfile.write('Characteristics=%s,\n' % quote_python(self.Characteristics).encode(ExternalEncoding))
if self.Hashes is not None:
showIndent(outfile, level)
outfile.write('Hashes=%s,\n' % quote_python(self.Hashes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Machine':
Machine_ = child_.text
Machine_ = self.gds_validate_string(Machine_, node, 'Machine')
self.Machine = Machine_
elif nodeName_ == 'Number_Of_Sections':
Number_Of_Sections_ = child_.text
Number_Of_Sections_ = self.gds_validate_string(Number_Of_Sections_, node, 'Number_Of_Sections')
self.Number_Of_Sections = Number_Of_Sections_
elif nodeName_ == 'Time_Date_Stamp':
Time_Date_Stamp_ = child_.text
Time_Date_Stamp_ = self.gds_validate_string(Time_Date_Stamp_, node, 'Time_Date_Stamp')
self.Time_Date_Stamp | |
[]
while True:
batch = self.c_queue.get()
if batch is None:
self.params.logger.info("queue sizes: %s"%str(qsizes))
for gate in self.gates:
self.params.logger.debug("begin gate.c_queue.put")
gate.c_queue.put(None)
self.params.logger.debug("end gate.c_queue.put")
break
qsizes = []
for gate in self.gates:
qsizes.append(gate.c_queue.qsize())
self.gates[np.argmin(qsizes)].c_queue.put(batch)
self.params.logger.info("#queue sizes: %s"%str(qsizes))
self.params.logger.info("end GateDispatcher")
class GateCaller(WorkNode):
def __init__(self, params, c_queue = None, r_queue = None, id = 0):
super().__init__(params, c_queue, r_queue)
self.id = id
def run(self):
self.params.logger.info("begin GateCaller %d" % self.id)
#gate = None
meta = Meta("meta_%d" % self.id, "/device:GPU:%d"%get_device_id(self.params.gate_device, self.id), self.params)
meta.start()
gate = Gate("/device:GPU:%d"%get_device_id(self.params.gate_device, self.id), self.params, meta)
gate.start()
self.params.logger.debug("begin GateCaller %d ready" % self.id)
gate.r_queue.get()
self.params.logger.debug("end GateCaller %d ready" % self.id)
bi = 0
while True:
self.params.logger.debug("begin GateCaller %d c_queue.get" % self.id)
batch = self.c_queue.get()
self.params.logger.debug("end GateCaller %d c_queue.get" % self.id)
if batch is None:
#self.r_queue.put(None)
break
[batch_xs, batch_ys], uncs = batch
#if bi % (10*self.params.stale_interval) == 0:
if gate is not None:
print("GateCaller - gate failure: %d"%gate.failure.value)
if gate is not None and gate.failure.value >= 3:
if gate is not None:
gate.c_queue.put(["exit"])
gate.r_queue.get()
gate.join()
gate = Gate("/device:GPU:%d"%get_device_id(self.params.gate_device, self.id), self.params, meta)
gate.start()
self.params.logger.debug("begin GateCaller %d ready" % self.id)
gate.r_queue.get()
self.params.logger.debug("end GateCaller %d ready" % self.id)
self.params.logger.debug("begin GateCaller %d train" % self.id)
gate.c_queue.put(["train",uncs])
inds = gate.r_queue.get()
self.params.logger.debug("end GateCaller %d train" % self.id)
self.params.logger.debug("begin GateCaller %d r_queue.put" % self.id)
batch = self.r_queue.put([[batch_xs, batch_ys],inds])
self.params.logger.debug("end GateCaller %d r_queue.put" % self.id)
bi += 1
if gate is not None:
gate.c_queue.put(["exit"])
gate.r_queue.get()
gate.join()
meta.c_queue.put(["exit"])
meta.r_queue.get()
meta.join()
self.params.logger.info("end GateCaller %d" % self.id)
class StaleObserver(WorkNode):
def run(self):
self.params.logger.info("begin StaleObserver")
model_name = self.params.model
stale_experts = []
#gate = None
bi = 0
while True:
batch = self.c_queue.get()
if batch is None:
self.r_queue.put(None)
break
batch_xs, batch_ys = batch
if bi % (10 *self.params.stale_interval) == 0:
self.params.logger.info("creating stale experts")
for stale_expert in stale_experts:
stale_expert.c_queue.put(["exit"])
stale_expert.r_queue.get()
stale_expert.join()
stale_experts = []
for i in range(self.params.num_experts):
stale_expert = Expert(model_name + "_e%d" % i, "/device:GPU:%d"%get_device_id(self.params.device, i), self.params)
stale_experts.append(stale_expert)
stale_expert.start()
stale_expert.r_queue.get()
if bi % self.params.stale_interval == 0 and bi % (10 *self.params.stale_interval) != 0:
for stale_expert in stale_experts:
stale_expert.c_queue.put(["restore"])
for stale_expert in stale_experts:
stale_expert.r_queue.get()
for stale_expert in stale_experts:
stale_expert.c_queue.put(["predict",[batch_xs, batch_ys]])
accs = []
uncs = []
for expert in stale_experts:
acc, unc,_ = expert.r_queue.get()
accs.append(acc)
uncs.append(unc)
if bi % self.params.log_frequency == 0:
self.params.logger.info("accuracy: %f" % np.mean(accs))
uncs = np.transpose(uncs)
self.r_queue.put([batch,uncs])
bi += 1
for stale_expert in stale_experts:
stale_expert.c_queue.put(["exit"])
stale_expert.r_queue.get()
stale_expert.join()
self.params.logger.info("end StaleObserver")
'''
num_experts must be 1
'''
def fast_train(params):
reader = DataReader("reader", None, params)
reader.start()
params.logger.info("creating model sessions")
model_name = params.model
expert = Expert(model_name + "_e%d" % 0, "/device:GPU:%d"%get_device_id(params.device, 0), params)
expert.start()
expert.r_queue.get()
while True:
values = reader.r_queue.get()
if values is None:
break
batch_xs, batch_ys = values
expert.c_queue.put(["train",[batch_xs, batch_ys]])
expert.r_queue.get()
expert.c_queue.put(["exit"])
expert.r_queue.get()
expert.join()
def train(params):
moe_trainer = MoETrainer(params)
moe_trainer.start()
moe_trainer.join()
'''
num_experts must be 1
'''
def fast_predict(params):
reader = DataReader("reader", None, params)
reader.start()
params.logger.info("creating model sessions")
model_name = params.model
expert = Expert(model_name + "_e%d" % 0, None, params)
expert.start()
expert.r_queue.get()
all_accs = []
all_uncs = []
all_ts = []
while True:
values = reader.r_queue.get()
if values is None:
break
batch_xs, batch_ys = values
expert.c_queue.put(["predict",[batch_xs, batch_ys]])
acc, unc, t = expert.r_queue.get()
all_accs.append(acc)
all_uncs.append(unc)
all_ts.append(t)
params.logger.info('elapsed time: %.3f ms' % t)
expert.c_queue.put(["exit"])
expert.r_queue.get()
expert.join()
all_accs = np.concatenate(all_accs, axis=0)
all_uncs = np.concatenate(all_uncs, axis=0)
params.logger.info('%s: precision: %.3f , elapsed time: %.3f ms' % (datetime.now(), np.mean(all_accs),
1e3*np.sum(all_ts)/len(all_accs)))
def predict(params):
reader = DataReader("reader", None, params)
reader.start()
params.logger.info("creating model sessions")
model_name = params.model
experts = []
for i in range(params.num_experts):
expert = Expert(model_name + "_e%d" % i, None, params)
experts.append(expert)
expert.start()
expert.r_queue.get()
all_accs = []
all_uncs = []
all_ts = []
all_labels = []
i = 0
while True:
values = reader.r_queue.get()
if values is None:
break
batch_xs, batch_ys = values
# warm up
if i == 0:
for expert in experts:
expert.c_queue.put(["predict",[batch_xs, batch_ys]])
expert.r_queue.get()
accs = []
uncs = []
ts = []
labels = []
for expert in experts:
expert.c_queue.put(["predict",[batch_xs, batch_ys]])
acc, unc, t = expert.r_queue.get()
accs.append(acc)
uncs.append(unc)
ts.append(t)
labels.append(np.reshape(batch_ys, -1))
all_accs.append(np.transpose(accs))
all_uncs.append(np.transpose(uncs))
all_ts.append(ts)
all_labels.append(np.transpose(labels))
i += 1
for expert in experts:
expert.c_queue.put(["exit"])
expert.r_queue.get()
expert.join()
all_accs = np.concatenate(all_accs, axis=0)
all_uncs = np.concatenate(all_uncs, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
for i in range(params.num_experts):
params.logger.info('%s: precision (expert %d): %.3f' % (datetime.now(), i+1, np.mean(all_accs, axis=0)[i]))
'''
final_accs = []
for i in range(len(all_uncs)):
j = np.argmin(all_uncs[i])
final_accs.append(all_accs[i][j])
'''
final_accs = cal_accuracy(all_uncs,all_accs)
params.logger.info('%s: precision: %.3f , elapsed time: %.3f ms' % (datetime.now(), np.mean(final_accs),
1e3*np.max(np.sum(all_ts, axis=0))/len(final_accs)))
# calculate drop one, two, ... accuracies
import itertools
for r in range(1,params.num_experts):
drop_accs = []
for t in itertools.combinations(range(params.num_experts), r):
tmp_uncs = all_uncs[:,np.array(t)]
tmp_accs = all_accs[:,np.array(t)]
drop_accs.append(cal_accuracy(tmp_uncs,tmp_accs))
drop_accs = np.transpose(drop_accs)
mean_drop_acc = np.mean(drop_accs,axis=1)
std_drop_acc = np.std(drop_accs, axis=1)
params.logger.info('%s: drop %d precision: mean: %.3f std: %.3f' %(datetime.now(), params.num_experts - r, np.mean(mean_drop_acc), np.mean(std_drop_acc)) )
if params.num_experts >= 2:
analyze(all_accs, all_uncs, all_labels, params)
def split_and_send(conn, data, socket_buffer_size):
if data is None:
data_size = 0
conn.send(pack("I", data_size))
else:
raw_data = pickle.dumps(data)
data_size = len(raw_data)
conn.send(pack("I", data_size))
packets = [raw_data[i * socket_buffer_size: (i + 1)* socket_buffer_size] for i in range(data_size // socket_buffer_size +1)]
for packet in packets:
conn.send(packet)
def recv_and_concat(conn, socket_buffer_size):
raw_data = b''
data_size = unpack("I",(conn.recv(calcsize("I"))))[0]
while len(raw_data) < data_size:
buf = conn.recv(socket_buffer_size)
raw_data += buf
if len(raw_data) == 0:
data = None
else:
data = pickle.loads(raw_data)
return data
class ServerSocketHandler(WorkNode):
def __init__(self, params, addr):
super().__init__(params)
self.addr = addr
def run(self):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(self.addr)
server_socket.listen()
try:
conn, addr = server_socket.accept()
print('Connection address:', addr)
while True:
Command = self.c_queue.get()
if Command is None:
break
if Command[0] == "send":
data = Command[1]
split_and_send(conn, data, self.params.socket_buffer_size)
self.r_queue.put(None)
elif Command[0] == "recv":
data = recv_and_concat(conn, self.params.socket_buffer_size)
self.r_queue.put(data)
finally:
conn.close()
server_socket.close()
class ClientSocketHandler(WorkNode):
def __init__(self, params, addr):
super().__init__(params)
self.addr = addr
def run(self):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
conn.connect(self.addr)
while True:
Command = self.c_queue.get()
if Command is None:
break
if Command[0] == "send":
data = Command[1]
split_and_send(conn, data, self.params.socket_buffer_size)
self.r_queue.put(None)
elif Command[0] == "recv":
data = recv_and_concat(conn, self.params.socket_buffer_size)
self.r_queue.put(data)
finally:
conn.close()
def co_predict(params):
'''
def split_and_broadcast(conns, raw_data):
data_size = len(raw_data)
packets = [raw_data[i * params.socket_buffer_size: (i + 1)* params.socket_buffer_size] for i in range(data_size // params.socket_buffer_size +1)]
for conn in conns:
conn.send(pickle.dumps(data_size))
for packet in packets:
conn.send(packet)
'''
params.logger.info("creating model sessions")
model_name = params.model
if params.socket_node_index == 0:
expert = Expert(model_name + "_e%d" % params.socket_node_index, None, params)
expert.start()
expert.r_queue.get()
try:
client_handlers = []
for k in range(params.num_experts):
if k == 0:
client_handlers.append(None)
else:
ip, port = params.socket_nodes[k].split(':')
addr = (ip, int(port))
client_handler = ClientSocketHandler(params, addr)
client_handler.start()
client_handlers.append(client_handler)
reader = DataReader("reader", None, params)
reader.start()
all_accs = []
all_uncs = []
all_tts = []
all_pts = []
all_cts = []
all_labels = []
i = 0
while True:
values = reader.r_queue.get()
if values is None:
break
batch_xs, batch_ys = values
# warm up
if i == 0:
expert.c_queue.put(["predict",[batch_xs, batch_ys]])
for client_handler in client_handlers[1:]:
client_handler.c_queue.put(["send",["predict",[batch_xs, batch_ys]]])
for client_handler in client_handlers[1:]:
client_handler.r_queue.get()
expert.r_queue.get()
for client_handler in client_handlers[1:]:
client_handler.c_queue.put(["recv"])
for client_handler in client_handlers[1:]:
client_handler.r_queue.get()
accs = []
uncs = []
tts = []
pts = []
cts = []
labels = []
start_time = time.time()
expert.c_queue.put(["predict",[batch_xs, batch_ys]])
for client_handler in client_handlers[1:]:
client_handler.c_queue.put(["send",["predict",[batch_xs, batch_ys]]])
for client_handler in client_handlers[1:]:
client_handler.r_queue.get()
acc, unc, t = expert.r_queue.get()
accs.append(acc)
uncs.append(unc)
pts.append(t)
for client_handler in client_handlers[1:]:
client_handler.c_queue.put(["recv"])
for client_handler in client_handlers[1:]:
acc, unc, t = client_handler.r_queue.get()
accs.append(acc)
uncs.append(unc)
pts.append(t)
for k in range(params.num_experts):
labels.append(np.reshape(batch_ys, -1))
end_time = time.time()
for k | |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Tests for :module:`flocker.volume.service`."""
from __future__ import absolute_import
import json
import os
from unittest import skipIf
from uuid import uuid4
from zope.interface.verify import verifyObject
from twisted.application.service import IService
from twisted.internet.task import Clock
from twisted.python.filepath import FilePath, Permissions
from twisted.trial.unittest import TestCase
from ..service import (
VolumeService, CreateConfigurationError, Volume,
WAIT_FOR_VOLUME_INTERVAL
)
from ..filesystems.memory import FilesystemStoragePool
from .._ipc import RemoteVolumeManager, LocalVolumeManager
from ...common import FakeNode
from ...testtools import skip_on_broken_permissions
class VolumeServiceStartupTests(TestCase):
"""
Tests for :class:`VolumeService` startup.
"""
def test_interface(self):
""":class:`VolumeService` implements :class:`IService`."""
self.assertTrue(verifyObject(IService,
VolumeService(FilePath(""), None,
reactor=Clock())))
def test_no_config_UUID(self):
"""If no config file exists in the given path, a new UUID is chosen."""
service = VolumeService(FilePath(self.mktemp()), None, reactor=Clock())
service.startService()
service2 = VolumeService(FilePath(self.mktemp()), None,
reactor=Clock())
service2.startService()
self.assertNotEqual(service.uuid, service2.uuid)
def test_no_config_written(self):
"""If no config file exists, a new one is written with the UUID."""
path = FilePath(self.mktemp())
service = VolumeService(path, None, reactor=Clock())
service.startService()
config = json.loads(path.getContent())
self.assertEqual({u"uuid": service.uuid, u"version": 1}, config)
def test_no_config_directory(self):
"""The config file's parent directory is created if it
doesn't exist."""
path = FilePath(self.mktemp()).child(b"config.json")
service = VolumeService(path, None, reactor=Clock())
service.startService()
self.assertTrue(path.exists())
@skipIf(os.getuid() == 0, "root doesn't get permission errors.")
@skip_on_broken_permissions
def test_config_makedirs_failed(self):
"""If creating the config directory fails then CreateConfigurationError
is raised."""
path = FilePath(self.mktemp())
path.makedirs()
path.chmod(0)
self.addCleanup(path.chmod, 0o777)
path = path.child(b"dir").child(b"config.json")
service = VolumeService(path, None, reactor=Clock())
self.assertRaises(CreateConfigurationError, service.startService)
@skipIf(os.getuid() == 0, "root doesn't get permission errors.")
@skip_on_broken_permissions
def test_config_write_failed(self):
"""If writing the config fails then CreateConfigurationError
is raised."""
path = FilePath(self.mktemp())
path.makedirs()
path.chmod(0)
self.addCleanup(path.chmod, 0o777)
path = path.child(b"config.json")
service = VolumeService(path, None, reactor=Clock())
self.assertRaises(CreateConfigurationError, service.startService)
def test_config(self):
"""If a config file exists, the UUID is loaded from it."""
path = self.mktemp()
service = VolumeService(FilePath(path), None, reactor=Clock())
service.startService()
service2 = VolumeService(FilePath(path), None, reactor=Clock())
service2.startService()
self.assertEqual(service.uuid, service2.uuid)
class VolumeServiceAPITests(TestCase):
"""Tests for the ``VolumeService`` API."""
def test_create_result(self):
"""``create()`` returns a ``Deferred`` that fires with a ``Volume``."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
d = service.create(u"myvolume")
self.assertEqual(
self.successResultOf(d),
Volume(uuid=service.uuid, name=u"myvolume", _pool=pool))
def test_create_filesystem(self):
"""``create()`` creates the volume's filesystem."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = self.successResultOf(service.create(u"myvolume"))
self.assertTrue(pool.get(volume).get_path().isdir())
@skip_on_broken_permissions
def test_create_mode(self):
"""The created filesystem is readable/writable/executable by anyone.
A better alternative will be implemented in
https://github.com/ClusterHQ/flocker/issues/34
"""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = self.successResultOf(service.create(u"myvolume"))
self.assertEqual(pool.get(volume).get_path().getPermissions(),
Permissions(0777))
def test_push_different_uuid(self):
"""Pushing a remotely-owned volume results in a ``ValueError``."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = Volume(uuid=u"wronguuid", name=u"blah", _pool=pool)
self.assertRaises(ValueError, service.push, volume,
RemoteVolumeManager(FakeNode()))
def test_push_writes_filesystem(self):
"""Pushing a locally-owned volume writes its filesystem to the remote
process."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = self.successResultOf(service.create(u"myvolume"))
filesystem = volume.get_filesystem()
filesystem.get_path().child(b"foo").setContent(b"blah")
with filesystem.reader() as reader:
data = reader.read()
node = FakeNode()
service.push(volume, RemoteVolumeManager(node))
self.assertEqual(node.stdin.read(), data)
def test_receive_local_uuid(self):
"""If a volume with same uuid as service is received, ``ValueError`` is
raised."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
self.assertRaises(ValueError, service.receive,
service.uuid.encode("ascii"), b"lalala", None)
def test_receive_creates_volume(self):
"""Receiving creates a volume with the given uuid and name."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = self.successResultOf(service.create(u"myvolume"))
filesystem = volume.get_filesystem()
manager_uuid = unicode(uuid4())
with filesystem.reader() as reader:
service.receive(manager_uuid, u"newvolume", reader)
new_volume = Volume(uuid=manager_uuid, name=u"newvolume", _pool=pool)
d = service.enumerate()
def got_volumes(volumes):
# Consume the generator into a list. Using `assertIn` on a
# generator produces bad failure messages.
volumes = list(volumes)
self.assertIn(new_volume, volumes)
d.addCallback(got_volumes)
return d
def test_receive_creates_files(self):
"""Receiving creates filesystem with the given push data."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volume = self.successResultOf(service.create(u"myvolume"))
filesystem = volume.get_filesystem()
filesystem.get_path().child(b"afile").setContent(b"lalala")
manager_uuid = unicode(uuid4())
with filesystem.reader() as reader:
service.receive(manager_uuid, u"newvolume", reader)
new_volume = Volume(uuid=manager_uuid, name=u"newvolume", _pool=pool)
root = new_volume.get_filesystem().get_path()
self.assertTrue(root.child(b"afile").getContent(), b"lalala")
def test_enumerate_no_volumes(self):
"""``enumerate()`` returns no volumes when there are no volumes."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
volumes = self.successResultOf(service.enumerate())
self.assertEqual([], list(volumes))
def test_enumerate_some_volumes(self):
"""``enumerate()`` returns all volumes previously ``create()``ed."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
names = {u"somevolume", u"anotherone", u"lastone"}
expected = {
self.successResultOf(service.create(name))
for name in names}
service2 = VolumeService(FilePath(self.mktemp()), pool,
reactor=Clock())
service2.startService()
actual = self.successResultOf(service2.enumerate())
self.assertEqual(expected, set(actual))
def test_enumerate_a_volume_with_period(self):
"""``enumerate()`` returns a volume previously ``create()``ed when its
name includes a period."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
expected = self.successResultOf(service.create(u"some.volume"))
actual = self.successResultOf(service.enumerate())
self.assertEqual([expected], list(actual))
def test_enumerate_skips_other_filesystems(self):
"""
The result of ``enumerate()`` does not include any volumes representing
filesystems named outside of the Flocker naming convention (which may
have been created directly by the user).
"""
path = FilePath(self.mktemp())
path.child(b"arbitrary stuff").makedirs()
path.child(b"stuff\tarbitrary").makedirs()
path.child(b"non-uuid.stuff").makedirs()
pool = FilesystemStoragePool(path)
service = VolumeService(FilePath(self.mktemp()), pool, reactor=Clock())
service.startService()
name = u"good volume name"
self.successResultOf(service.create(name))
volumes = list(self.successResultOf(service.enumerate()))
self.assertEqual(
[Volume(uuid=service.uuid, name=name, _pool=pool)],
volumes)
def test_acquire_rejects_local_volume(self):
"""
``VolumeService.acquire()`` errbacks with a ``ValueError`` if given a
locally-owned volume.
"""
service = VolumeService(FilePath(self.mktemp()),
FilesystemStoragePool(FilePath(self.mktemp())),
reactor=Clock())
service.startService()
self.addCleanup(service.stopService)
self.failureResultOf(service.acquire(service.uuid, u"blah"),
ValueError)
# Further tests for acquire() are done in
# test_ipc.make_iremote_volume_manager.
def create_service(self):
"""
Create a new ``VolumeService``.
:return: The ``VolumeService`` created.
"""
service = VolumeService(FilePath(self.mktemp()),
FilesystemStoragePool(FilePath(self.mktemp())),
reactor=Clock())
service.startService()
self.addCleanup(service.stopService)
return service
def test_handoff_rejects_remote_volume(self):
"""
``VolumeService.handoff()`` errbacks with a ``ValueError`` if given a
remotely-owned volume.
"""
service = self.create_service()
remote_volume = Volume(uuid=u"remote", name=u"blah",
_pool=service._pool)
self.failureResultOf(service.handoff(remote_volume, None),
ValueError)
def test_handoff_destination_acquires(self):
"""
``VolumeService.handoff()`` makes the remote node owner of the volume
previously owned by the original owner.
"""
origin_service = self.create_service()
destination_service = self.create_service()
created = origin_service.create(u"avolume")
def got_volume(volume):
volume.get_filesystem().get_path().child(b"afile").setContent(
b"exists")
return origin_service.handoff(
volume, LocalVolumeManager(destination_service))
created.addCallback(got_volume)
def handed_off(_):
expected_volume = Volume(uuid=destination_service.uuid,
name=u"avolume",
_pool=destination_service._pool)
root = expected_volume.get_filesystem().get_path()
self.assertEqual(root.child(b"afile").getContent(), b"exists")
created.addCallback(handed_off)
return created
def test_handoff_changes_uuid(self):
"""
```VolumeService.handoff()`` changes the owner UUID of the local
volume to the new owner's UUID.
"""
origin_service = self.create_service()
destination_service = self.create_service()
created = origin_service.create(u"avolume")
def got_volume(volume):
return origin_service.handoff(
volume, LocalVolumeManager(destination_service))
created.addCallback(got_volume)
created.addCallback(lambda _: origin_service.enumerate())
def got_origin_volumes(volumes):
expected_volume = Volume(uuid=destination_service.uuid,
name=u"avolume",
_pool=origin_service._pool)
self.assertEqual(list(volumes), [expected_volume])
created.addCallback(got_origin_volumes)
return created
def test_handoff_preserves_data(self):
"""
``VolumeService.handoff()`` preserves the data from the relinquished
volume in the newly owned resulting volume in the local volume manager.
"""
origin_service = self.create_service()
destination_service = self.create_service()
created = origin_service.create(u"avolume")
def got_volume(volume):
volume.get_filesystem().get_path().child(b"afile").setContent(
b"exists")
return origin_service.handoff(
volume, LocalVolumeManager(destination_service))
created.addCallback(got_volume)
def handed_off(volumes):
expected_volume = Volume(uuid=destination_service.uuid,
name=u"avolume",
_pool=origin_service._pool)
root = expected_volume.get_filesystem().get_path()
self.assertEqual(root.child(b"afile").getContent(), b"exists")
created.addCallback(handed_off)
return created
class VolumeTests(TestCase):
"""Tests for ``Volume``."""
def test_equality(self):
"""Volumes are equal if they have the same name, uuid and pool."""
pool = object()
v1 = Volume(uuid=u"123", name=u"456", _pool=pool)
v2 = Volume(uuid=u"123", name=u"456", _pool=pool)
self.assertTrue(v1 == v2)
self.assertFalse(v1 != v2)
def test_inequality_uuid(self):
"""Volumes are unequal if they have different uuids."""
pool = object()
v1 = Volume(uuid=u"123", name=u"456", _pool=pool)
v2 = Volume(uuid=u"123zz", name=u"456", _pool=pool)
self.assertTrue(v1 != v2)
self.assertFalse(v1 == v2)
def test_inequality_name(self):
"""Volumes are unequal if they have different names."""
pool = object()
v1 = Volume(uuid=u"123", name=u"456", _pool=pool)
v2 = Volume(uuid=u"123", name=u"456zz", _pool=pool)
self.assertTrue(v1 != v2)
self.assertFalse(v1 == v2)
def test_inequality_pool(self):
"""Volumes are unequal if they have different pools."""
v1 = Volume(uuid=u"123", name=u"456", _pool=object())
v2 = Volume(uuid=u"123", name=u"456", _pool=object())
self.assertTrue(v1 != v2)
self.assertFalse(v1 == v2)
def test_get_filesystem(self):
"""``Volume.get_filesystem`` returns the filesystem for the volume."""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
volume = Volume(uuid=u"123", name=u"456", _pool=pool)
self.assertEqual(volume.get_filesystem(), pool.get(volume))
def test_container_name(self):
"""
The volume's container name adds ``"-data"`` suffix to the volume name.
This ensures that geard will automatically mount it into a
container whose name matches that of the volume.
"""
volume = Volume(uuid=u"123", name=u"456", _pool=object())
self.assertEqual(volume._container_name, b"456-data")
class VolumeOwnerChangeTests(TestCase):
"""
Tests for ``Volume.change_owner``.
"""
def setUp(self):
"""
Create a ``VolumeService`` pointing at a new pool.
"""
pool = FilesystemStoragePool(FilePath(self.mktemp()))
self.service = VolumeService(FilePath(self.mktemp()), pool,
reactor=Clock())
self.service.startService()
self.other_uuid = unicode(uuid4())
def test_return(self):
"""
``Volume.change_owner`` returns a ``Deferred`` that fires with a new
``Volume`` with the new owner UUID and the same name.
"""
volume = self.successResultOf(self.service.create(u"myvolume"))
new_volume = self.successResultOf(volume.change_owner(self.other_uuid))
self.assertEqual({'uuid': new_volume.uuid, 'name': new_volume.name},
{'uuid': self.other_uuid, 'name': u"myvolume"})
def test_filesystem(self):
"""
The filesystem for the new ``Volume`` preserves data from the old one.
"""
volume = self.successResultOf(self.service.create(u"myvolume"))
mount = volume.get_filesystem().get_path()
mount.child(b'file').setContent(b'content')
new_volume = self.successResultOf(volume.change_owner(self.other_uuid))
new_mount = new_volume.get_filesystem().get_path()
self.assertEqual(new_mount.child(b'file').getContent(), b'content')
def test_enumerate(self):
"""
The volumes returned from ``VolumeService.enumerate`` replace the old
volume with the one returned by ``Volume.change_owner``.
"""
volume = self.successResultOf(self.service.create(u"myvolume"))
new_volume = self.successResultOf(volume.change_owner(self.other_uuid))
volumes = set(self.successResultOf(self.service.enumerate()))
self.assertEqual({new_volume}, volumes)
class WaitForVolumeTests(TestCase):
""""
Tests for ``VolumeService.wait_for_volume``.
"""
| |
<filename>graphql_compiler/tests/schema_transformation_tests/test_rename_schema.py
# Copyright 2019-present Kensho Technologies, LLC.
from textwrap import dedent
from typing import Set
import unittest
from graphql import parse
from graphql.language.printer import print_ast
from graphql.language.visitor import QUERY_DOCUMENT_KEYS
from graphql.pyutils import snake_to_camel
from ...schema_transformation.rename_schema import RenameSchemaTypesVisitor, rename_schema
from ...schema_transformation.utils import (
CascadingSuppressionError,
InvalidNameError,
NoOpRenamingError,
SchemaRenameNameConflictError,
SchemaTransformError,
)
from ..test_helpers import compare_schema_texts_order_independently
from .input_schema_strings import InputSchemaStrings as ISS
class TestRenameSchema(unittest.TestCase):
def test_rename_visitor_type_coverage(self) -> None:
"""Check that all types are covered without overlap."""
type_sets = [
RenameSchemaTypesVisitor.noop_types,
RenameSchemaTypesVisitor.rename_types,
]
all_types = {snake_to_camel(node_type) + "Node" for node_type in QUERY_DOCUMENT_KEYS}
type_sets_union: Set[str] = set()
for type_set in type_sets:
self.assertTrue(type_sets_union.isdisjoint(type_set))
type_sets_union.update(type_set)
self.assertEqual(all_types, type_sets_union)
def test_no_rename(self) -> None:
renamed_schema = rename_schema(parse(ISS.basic_schema), {}, {})
compare_schema_texts_order_independently(
self, ISS.basic_schema, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_basic_rename(self) -> None:
renamed_schema = rename_schema(parse(ISS.basic_schema), {"Human": "NewHuman"}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type NewHuman {
id: String
}
type SchemaQuery {
NewHuman: NewHuman
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({"NewHuman": "Human"}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_type_directive_same_name(self) -> None:
# Types, fields, and directives have different namespaces, so this schema and renaming are
# both valid and the renaming only affects the object type.
renamed_schema = rename_schema(
parse(ISS.type_field_directive_same_name_schema), {"stitch": "NewStitch"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type NewStitch {
stitch: String
}
type SchemaQuery {
NewStitch: NewStitch
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({"NewStitch": "stitch"}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_original_unmodified_rename(self) -> None:
original_ast = parse(ISS.basic_schema)
rename_schema(original_ast, {"Human": "NewHuman"}, {})
self.assertEqual(original_ast, parse(ISS.basic_schema))
def test_original_unmodified_suppress(self) -> None:
original_ast = parse(ISS.multiple_objects_schema)
rename_schema(original_ast, {"Human": None}, {})
self.assertEqual(original_ast, parse(ISS.multiple_objects_schema))
def test_rename_illegal_noop_unused_renaming(self) -> None:
with self.assertRaises(NoOpRenamingError):
rename_schema(parse(ISS.basic_schema), {"Dinosaur": "NewDinosaur"}, {})
def test_rename_illegal_noop_renamed_to_self(self) -> None:
with self.assertRaises(NoOpRenamingError):
rename_schema(parse(ISS.basic_schema), {"Human": "Human"}, {})
def test_basic_suppress(self) -> None:
renamed_schema = rename_schema(parse(ISS.multiple_objects_schema), {"Human": None}, {})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Droid {
id: String
}
type Dog {
nickname: String
}
type SchemaQuery {
Droid: Droid
Dog: Dog
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_multiple_type_suppress(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_objects_schema), {"Human": None, "Droid": None}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Dog {
nickname: String
}
type SchemaQuery {
Dog: Dog
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_suppress_illegal_noop_unused_suppression(self) -> None:
with self.assertRaises(NoOpRenamingError):
rename_schema(parse(ISS.multiple_objects_schema), {"Dinosaur": None}, {})
def test_various_illegal_noop_type_renamings_error_message(self) -> None:
with self.assertRaises(NoOpRenamingError) as e:
rename_schema(
parse(ISS.basic_schema), {"Dinosaur": None, "Human": "Human", "Bird": "Birdie"}, {}
)
self.assertEqual(
"type_renamings cannot have no-op renamings. However, the following "
"entries exist in the type_renamings argument, which either rename a type to itself or "
"would rename a type that doesn't exist in the schema, both of which are invalid: "
"['Bird', 'Dinosaur', 'Human']",
str(e.exception),
)
def test_swap_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_objects_schema), {"Human": "Droid", "Droid": "Human"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Droid {
name: String
}
type Human {
id: String
}
type Dog {
nickname: String
}
type SchemaQuery {
Droid: Droid
Human: Human
Dog: Dog
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({"Human": "Droid", "Droid": "Human"}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_rename_into_suppressed(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_objects_schema), {"Human": None, "Droid": "Human"}, {}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Human {
id: String
}
type Dog {
nickname: String
}
type SchemaQuery {
Human: Human
Dog: Dog
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({"Human": "Droid"}, renamed_schema.reverse_name_map)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_cyclic_rename(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_objects_schema),
{"Human": "Droid", "Droid": "Dog", "Dog": "Human"},
{},
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
type Droid {
name: String
}
type Dog {
id: String
}
type Human {
nickname: String
}
type SchemaQuery {
Droid: Droid
Dog: Dog
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual(
{"Dog": "Droid", "Human": "Dog", "Droid": "Human"}, renamed_schema.reverse_name_map
)
self.assertEqual({}, renamed_schema.reverse_field_name_map)
def test_field_rename(self) -> None:
renamed_schema = rename_schema(parse(ISS.basic_schema), {}, {"Human": {"id": {"new_id"}}})
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type Human {
new_id: String
}
type SchemaQuery {
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({"Human": {"new_id": "id"}}, renamed_schema.reverse_field_name_map)
def test_field_rename_includes_original_name(self) -> None:
renamed_schema = rename_schema(
parse(ISS.basic_schema), {}, {"Human": {"id": {"new_id", "id"}}}
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @stitch(source_field: String!, sink_field: String!) on FIELD_DEFINITION
type Human {
id: String
new_id: String
}
type SchemaQuery {
Human: Human
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual({"Human": {"new_id": "id"}}, renamed_schema.reverse_field_name_map)
def test_field_rename_only_affects_names_in_original_schema(self) -> None:
renamed_schema = rename_schema(
parse(ISS.multiple_fields_schema),
{},
{"Human": {"id": {"age", "id"}, "age": {"new_age"}}},
)
renamed_schema_string = dedent(
"""\
schema {
query: SchemaQuery
}
directive @output(
\"\"\"What to designate the output field generated from this property field.\"\"\"
out_name: String!
) on FIELD
type Human {
id: String
name: String
age: String
new_age: Int
pet: Dog
droid: Droid
}
type Dog {
id: String
nickname: String
}
type Droid {
id: String
model: String
}
type SchemaQuery {
Human: Human
Dog: Dog
Droid: Droid
}
"""
)
compare_schema_texts_order_independently(
self, renamed_schema_string, print_ast(renamed_schema.schema_ast)
)
self.assertEqual({}, renamed_schema.reverse_name_map)
self.assertEqual(
{"Human": {"age": "id", "new_age": "age"}}, renamed_schema.reverse_field_name_map
)
def test_field_renaming_illegal_noop_unused_renaming(self) -> None:
expected_error_message = (
"The field renamings for the following types would either rename a field to itself or "
"would rename a field that doesn't exist in the schema, both of which are invalid. The "
"following is a list of tuples that describes what needs to be fixed for field "
"renamings. Each tuple is of the form (type_name, field_renamings) where type_name is "
"the name of the type in the original schema and field_renamings is a list of the "
"fields that would be no-op renamed: [('Human', ['nonexistent_field'])]"
)
with self.assertRaises(NoOpRenamingError) as e:
rename_schema(
parse(ISS.multiple_fields_schema), {}, {"Human": {"nonexistent_field": {"foo"}}}
)
self.assertEqual(
expected_error_message,
str(e.exception),
)
def test_field_renaming_illegal_noop_renamed_to_self(self) -> None:
# This would be legal if the field named "id" were 1-many renamed to something else as well
# (e.g. renaming to "id" and "new_id" so that both fields in the renamed schema correspond
# to the field named "id" in the original schema). However, since this is a 1-1 renaming,
# this renaming would have no effect.
expected_error_message = (
"The field renamings for the following types would either rename a field to itself or "
"would rename a field that doesn't exist in the schema, both of which are invalid. The "
"following is a list of tuples that describes what needs to be fixed for field "
"renamings. Each tuple is of the form (type_name, field_renamings) where type_name is "
"the name of the type in the original schema and field_renamings is a list of the "
"fields that would be no-op renamed: [('Human', ['id'])]"
)
with self.assertRaises(NoOpRenamingError) as e:
rename_schema(parse(ISS.multiple_fields_schema), {}, {"Human": {"id": {"id"}}})
self.assertEqual(
expected_error_message,
str(e.exception),
)
def test_field_renaming_illegal_noop_rename_fields_of_nonexistent_type(self) -> None:
expected_error_message = (
"The following entries exist in the field_renamings argument that correspond to names "
"of object types that either don't exist in the original schema or would get "
"suppressed. In other words, the field renamings for each of these types would be "
"no-ops: ['Television']"
)
with self.assertRaises(NoOpRenamingError) as e:
rename_schema(parse(ISS.basic_schema), {}, {"Television": {"id": {"new_id"}}})
self.assertEqual(
expected_error_message,
str(e.exception),
)
def test_field_renaming_illegal_noop_rename_fields_of_suppressed_type(self) -> None:
# Like field renamings for a type that doesn't exist in the schema, this is illegal because
# the field renamings will have no effect because the type itself gets suppressed.
expected_error_message = (
"The following entries exist in the field_renamings argument that correspond to names "
"of object types that either don't exist in the original schema or would get "
"suppressed. In other words, the field renamings for each of these types would be "
"no-ops: ['Human']"
)
with self.assertRaises(NoOpRenamingError) as e:
rename_schema(
parse(ISS.multiple_objects_schema), | |
<filename>api/registrations/views.py
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Registration, OSFUser
from api.base import permissions as base_permissions
from api.base import generic_bulk_views as bulk_views
from api.base.filters import ListFilterMixin
from api.base.views import (
JSONAPIBaseView,
BaseChildrenList,
BaseContributorDetail,
BaseContributorList,
BaseNodeLinksDetail,
BaseNodeLinksList,
WaterButlerMixin,
)
from api.base.serializers import HideIfWithdrawal, LinkedRegistrationsRelationshipSerializer
from api.base.serializers import LinkedNodesRelationshipSerializer
from api.base.pagination import NodeContributorPagination
from api.base.parsers import JSONAPIRelationshipParser, JSONAPIMultipleRelationshipsParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON, JSONAPIMultipleRelationshipsParserForRegularJSON
from api.base.utils import get_user_auth, default_node_list_permission_queryset, is_bulk_request, is_truthy
from api.comments.serializers import RegistrationCommentSerializer, CommentCreateSerializer
from api.identifiers.serializers import RegistrationIdentifierSerializer
from api.nodes.views import NodeIdentifierList, NodeBibliographicContributorsList
from api.users.views import UserMixin
from api.users.serializers import UserSerializer
from api.nodes.permissions import (
ReadOnlyIfRegistration,
ContributorDetailPermissions,
ContributorOrPublic,
ContributorOrPublicForRelationshipPointers,
AdminOrPublic,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.registrations.serializers import (
RegistrationSerializer,
RegistrationDetailSerializer,
RegistrationContributorsSerializer,
RegistrationStorageProviderSerializer,
)
from api.nodes.filters import NodesFilterMixin
from api.nodes.views import (
NodeMixin, NodeRegistrationsList, NodeLogList,
NodeCommentsList, NodeStorageProvidersList, NodeFilesList, NodeFileDetail,
NodeInstitutionsList, NodeForksList, NodeWikiList, LinkedNodesList,
NodeViewOnlyLinksList, NodeViewOnlyLinkDetail, NodeCitationDetail, NodeCitationStyleDetail,
NodeLinkedRegistrationsList, NodeLinkedByNodesList, NodeLinkedByRegistrationsList, NodeInstitutionsRelationship,
)
from api.registrations.serializers import RegistrationNodeLinksSerializer, RegistrationFileSerializer
from api.wikis.serializers import RegistrationWikiSerializer
from api.base.utils import get_object_or_error
class RegistrationMixin(NodeMixin):
"""Mixin with convenience methods for retrieving the current registration based on the
current URL. By default, fetches the current registration based on the node_id kwarg.
"""
serializer_class = RegistrationSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
AbstractNode,
self.kwargs[self.node_lookup_url_kwarg],
self.request,
display_name='node',
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection or not node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class RegistrationList(JSONAPIBaseView, generics.ListAPIView, bulk_views.BulkUpdateJSONAPIView, NodesFilterMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_list).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'registration-list'
ordering = ('-modified',)
model_class = Registration
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
# overrides BulkUpdateJSONAPIView
def get_serializer_class(self):
"""
Use RegistrationDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH'):
return RegistrationDetailSerializer
else:
return RegistrationSerializer
# overrides NodesFilterMixin
def get_default_queryset(self):
return default_node_list_permission_queryset(user=self.request.user, model_cls=Registration)
def is_blacklisted(self):
query_params = self.parse_query_params(self.request.query_params)
for key, field_names in query_params.items():
for field_name, data in field_names.items():
field = self.serializer_class._declared_fields.get(field_name)
if isinstance(field, HideIfWithdrawal):
return True
return False
# overrides ListAPIView, ListBulkCreateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
auth = get_user_auth(self.request)
registrations = Registration.objects.filter(guids___id__in=[registration['id'] for registration in self.request.data])
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
has_permission = registrations.filter(contributor__user_id=auth.user.id, contributor__write=True).values_list('guids___id', flat=True)
return Registration.objects.filter(guids___id__in=has_permission)
for registration in registrations:
if not registration.can_edit(auth):
raise PermissionDenied
return registrations
blacklisted = self.is_blacklisted()
registrations = self.get_queryset_from_request()
# If attempting to filter on a blacklisted field, exclude withdrawals.
if blacklisted:
registrations = registrations.exclude(retraction__isnull=False)
return registrations.select_related(
'root',
'root__embargo',
'root__embargo_termination_approval',
'root__retraction',
'root__registration_approval',
)
class RegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, RegistrationMixin, WaterButlerMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationDetailSerializer
view_category = 'registrations'
view_name = 'registration-detail'
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
def get_renderer_context(self):
context = super(RegistrationDetail, self).get_renderer_context()
show_counts = is_truthy(self.request.query_params.get('related_counts', False))
if show_counts:
registration = self.get_object()
context['meta'] = {
'templated_by_count': registration.templated_list.count(),
}
return context
class RegistrationContributorsList(BaseContributorList, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_list).
"""
view_category = 'registrations'
view_name = 'registration-contributors'
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
def get_default_queryset(self):
node = self.get_node(check_object_permissions=False)
return node.contributor_set.all().include('user__guids')
class RegistrationContributorDetail(BaseContributorDetail, RegistrationMixin, UserMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_contributors_read).
"""
view_category = 'registrations'
view_name = 'registration-contributor-detail'
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
class RegistrationBibliographicContributorsList(NodeBibliographicContributorsList, RegistrationMixin):
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
view_category = 'registrations'
view_name = 'registration-bibliographic-contributors'
class RegistrationImplicitContributorsList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
permission_classes = (
AdminOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CONTRIBUTORS_READ]
required_write_scopes = [CoreScopes.NULL]
model_class = OSFUser
serializer_class = UserSerializer
view_category = 'registrations'
view_name = 'registration-implicit-contributors'
ordering = ('_order',) # default ordering
def get_default_queryset(self):
node = self.get_node()
return node.parent_admin_contributors
def get_queryset(self):
queryset = self.get_queryset_from_request()
return queryset
class RegistrationChildrenList(BaseChildrenList, generics.ListAPIView, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_children_list).
"""
view_category = 'registrations'
view_name = 'registration-children'
serializer_class = RegistrationSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
model_class = Registration
class RegistrationCitationDetail(NodeCitationDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citations_list).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-citation'
class RegistrationCitationStyleDetail(NodeCitationStyleDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_citation_read).
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-style-citation'
class RegistrationForksList(NodeForksList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_forks_list).
"""
view_category = 'registrations'
view_name = 'registration-forks'
class RegistrationCommentsList(NodeCommentsList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_comments_list).
"""
serializer_class = RegistrationCommentSerializer
view_category = 'registrations'
view_name = 'registration-comments'
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return RegistrationCommentSerializer
class RegistrationLogList(NodeLogList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_logs_list).
"""
view_category = 'registrations'
view_name = 'registration-logs'
class RegistrationStorageProvidersList(NodeStorageProvidersList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_providers_list).
"""
serializer_class = RegistrationStorageProviderSerializer
view_category = 'registrations'
view_name = 'registration-storage-providers'
class RegistrationNodeLinksList(BaseNodeLinksList, RegistrationMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointers'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: This class doesn't exist
# model_class = Pointer
class RegistrationNodeLinksDetail(BaseNodeLinksDetail, RegistrationMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointer-detail'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: this class doesn't exist
# model_class = Pointer
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationLinkedByNodesList(NodeLinkedByNodesList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-linked-by-nodes'
class RegistrationLinkedByRegistrationsList(NodeLinkedByRegistrationsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-linked-by-registrations'
class RegistrationRegistrationsList(NodeRegistrationsList, RegistrationMixin):
"""List of registrations of a registration."""
view_category = 'registrations'
view_name = 'registration-registrations'
class RegistrationFilesList(NodeFilesList, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_list).
"""
view_category = 'registrations'
view_name = 'registration-files'
serializer_class = RegistrationFileSerializer
class RegistrationFileDetail(NodeFileDetail, RegistrationMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/registrations_files_read).
"""
view_category = 'registrations'
view_name = 'registration-file-detail'
serializer_class = RegistrationFileSerializer
class RegistrationInstitutionsList(NodeInstitutionsList, RegistrationMixin):
"""The documentation for this endpoint can | |
bragger braggers braille brainchild
brainchildren brainteaser brainteasers brakeman brakemen bramble
brambles brashly brashness brattier brattiest bratty bravura bravuras
brawler brawlers brawniness brazenly brazenness breadbasket
breadbaskets breadfruit breadfruits breakage breakages breaker
breakers breakup breakups breastbone breastbones breastplate
breastplates breaststroke breaststrokes breastwork breastworks
breathable breathier breathiest breathlessly breathlessness
breathtakingly breathy breech breeches breezily breeziness breviaries
breviary brewer brewers brickbat brickbats bricklaying bridgehead
bridgeheads bridgework briefings briefness brier briers brig brigand
brigandage brigands brigantine brigantines brigs brilliancy brimful
brindled brinkmanship briquette briquettes brisket briskets briskness
bristlier bristliest bristly brittleness broadcaster broadcasters
broadcloth broadloom broadness broadsword broadswords brogan brogans
brogue brogues brokenhearted brokerage brokerages bromide bromides
bromine bronchi bronchial bronchus brontosaur brontosaurs brontosaurus
brontosauruses brooder brooders broomstick broomsticks brothel
brothels brotherliness brouhaha brouhahas brownish brownout brownouts
brownstone brownstones browser browsers brr bruin bruins bruiser
bruisers brunet brunets brushwood brusquely brusqueness brutishly
buccaneer buccaneered buccaneering buccaneers buckboard buckboards
bucketful bucketfuls buckeye buckeyes buckler bucklers buckram bucksaw
bucksaws buckshot buckskin buckskins buckteeth bucktooth bucktoothed
buckwheat bucolic bucolics buddings budgerigar budgerigars budgetary
budgie budgies buffoonery bugaboo bugaboos bugbear bugbears buildup
buildups bulgier bulgiest bulgy bulimia bulimic bulimics bulkhead
bulkheads bulkiness bulletproof bulletproofed bulletproofing
bulletproofs bullfighting bullfinch bullfinches bullheaded bullhorn
bullhorns bullish bullock bullocks bullpen bullpens bullring bullrings
bullshit bullshits bullshitted bullshitting bulrush bulrushes bulwark
bulwarks bumble bumbled bumbler bumblers bumbles bumbling bumblings
bummers bumpkin bumpkins bumptious bunged bunghole bungholes bunging
bungs bunkhouse bunkhouses bunkum bunt bunted bunting buntings bunts
buoyantly bur burdock bureaucratically burg burgeon burgeoned
burgeoning burgeons burgher burghers burgled burgles burgling burgs
burlesque burlesqued burlesques burlesquing burliness burnoose
burnooses burnout burnouts burrito burritos burs bursars bursitis
busbies busboy busboys busby bushiness bushings bushman bushmen
bushwhack bushwhacked bushwhacker bushwhackers bushwhacking bushwhacks
businesslike buster busters busyness busywork butane butch butches
butterfat butterfingers butterier butteries butteriest butternut
butternuts buttocked buttocking buyout buyouts buzzword buzzwords
bylaw bylaws byline bylines byplay byproduct byproducts byword bywords
c cabal cabals cabana cabanas cabinetmaker cabinetmakers cablecast
cablecasting cablecasts cablegram cablegrams caboodle cachet cacheted
cacheting cachets cacophonies cacophonous cacophony cadaver cadaverous
cadavers caddish cadenza cadenzas cadge cadged cadger cadgers cadges
cadging cadmium cadre cadres cads caducei caduceus caesura caesuras
caftan caftans cagily caginess cahoot cahoots cairn cairns caisson
caissons cajolery calabash calabashes calamine calamined calamines
calamining calamitous calcified calcifies calcify calcifying calcine
calcined calcines calcining calcite calculable calfskin calibrator
calibrators caliph caliphate caliphates caliphs calisthenic
calisthenics calligrapher calligraphers calliope calliopes callously
callousness callower callowest caloric calorific calumniate
calumniated calumniates calumniating calumnies calumny calved calving
calypso calypsos calyx calyxes camber cambered cambering cambers
cambium cambiums cambric camcorder camcorders camellia camellias
cameraman cameramen camerawoman camerawomen camisole camisoles
campanile campaniles campfire campfires campground campgrounds camphor
campier campiest campsite campsites campy cams camshaft camshafts
canape canapes canard canards canasta cancan cancans cancerous
candelabra candelabras candelabrum candidness candlelight cankerous
cannabis cannabises cannibalistic cannily canniness cannonade
cannonaded cannonades cannonading cannonball cannonballed
cannonballing cannonballs canoeist canoeists canonicals cantankerously
cantankerousness cantata cantatas canted canticle canticles cantilever
cantilevered cantilevering cantilevers canting canto canton cantons
cantor cantors cantos cants canvasback canvasbacks capacious
capaciously capaciousness caparison caparisoned caparisoning
caparisons capitalistic capitol capitols capitulation capitulations
caplet caplets capon capons cappuccino cappuccinos capriciousness
capstan capstans captaincies captaincy captious captivation carafe
carafes carapace carapaces caraway caraways carbide carbides carbine
carbines carbonate carbonated carbonates carbonating carbonation
carboy carboys carbuncle carbuncles carcase carcinogen carcinogens
carcinoma carcinomas cardiogram cardiograms cardiologist cardiologists
cardiology cardiopulmonary cardiovascular cardsharp cardsharps careen
careened careening careens caregiver caregivers caret carets careworn
carfare caricaturist caricaturists caries carillon carillonned
carillonning carillons carjack carjacked carjacker carjackers
carjacking carjackings carjacks carmine carmines carnally carnelian
carnelians carom caromed caroming caroms carotid carotids carousal
carousals carousel carousels carouser carousers carpal carpals carpel
carpels carpetbag carpetbagged carpetbagger carpetbaggers
carpetbagging carpetbags carpi carport carports carpus carrel carrels
carryall carryalls carryout carryouts carsick carsickness
cartilaginous carver carvers carvings caryatid caryatids casein
caseload caseloads casement casements casework caseworker caseworkers
cassava cassavas cassia cassias cassock cassocks castanet castanets
castigation castigator castigators castration castrations casualness
casuist casuistry casuists catacomb catacombs catafalque catafalques
catalepsy cataleptic cataleptics catalpa catalpas catalysis catalyst
catalysts catalytic catamaran catamarans catarrh catastrophically
catatonic catatonics catbird catbirds catboat catboats catchall
catchalls catcher catchers catchphrase catchword catchwords caterings
caterwaul caterwauled caterwauling caterwauls catgut catharses
catharsis cathartic cathartics catheter catheters cathode cathodes
catholicity cation cations catkin catkins cattail cattails cattier
cattiest cattily cattiness cattleman cattlemen catty caudal cauldron
cauldrons causalities causally causals causation causative causeless
caustically cautionary cautiousness cavalcade cavalcades cavalryman
cavalrymen caveatted caveatting caveman cavemen cavernous cavil cavils
cayenne cedilla cedillas celebrant celebrants celebratory celerity
celesta celestas cellulite celluloid cenotaph cenotaphs censer censers
censorious censoriously centaur centaurs centenarian centenarians
centenaries centenary centigrade centime centimes centrifugal
centrifuged centrifuges centrifuging centripetal centrist centrists
centurion centurions cephalic cephalics ceramics cerebellum
cerebellums cerebra cerebrum cerebrums ceremonially ceremoniously
cerise certifiable certification certifications certitude cerulean
cervices cervix cesarean cesareans cession cessions cesspool cesspools
cetacean cetaceans chaffinch chaffinches chainsawed chainsawing
chainsaws chairlift chairlifts chairmanship chairwoman chairwomen
chaise chaises chalkboard chalkboards chamberlain chamberlains
chambermaid chambermaids chambray chamois chamomile chamomiles chancel
chancelleries chancellery chancels chanceries chancery chancier
chanciest chancy chandler chandlers changeling changeovers chanter
chanters chantey chanteys chanticleer chanticleers chaotically
chaparral chaparrals chaplaincies chaplaincy chaplet chaplets charade
charades charbroil charbroiled charbroiling charbroils chargers
charier chariest charily charioteer charioteered charioteering
charioteers charmer charmers charmingly chartreuse charwoman charwomen
chary chaser chasers chastely chasuble chasubles chateau chateaus
chateaux chatelaine chatelaines chattel chattels chatterer chatterers
chattily chattiness chauvinism chauvinistic cheapskate cheapskates
cheater cheaters checklist checklists checkmate checkmated checkmates
checkmating checkouts checkpoints checkroom checkrooms cheddar
cheekbone cheekbones cheekier cheekiest cheekily cheekiness cheeky
cheerily cheeriness cheerleader cheerleaders cheerless cheerlessly
cheerlessness cheerses cheeseburger cheeseburgers cheesecake
cheesecakes cheesier cheesiest cheesy chemise chemises chemotherapy
chenille cheroot cheroots cherubic chervil chessboard chessboards
chessman chessmen chevron chevrons chewer chewers chiaroscuro
chicaneries chicanery chichi chichier chichiest chichis chickadee
chickadees chickenpox chickpea chickpeas chickweed chicle chicories
chicory chiffon chigger chiggers chignon chignons chilblain chilblains
childbearing childcare childishly childishness childless childlessness
childproof childproofed childproofing childproofs chillers chilliness
chillings chimera chimeras chimerical chinchilla chinchillas chino
chinos chinstrap chinstraps chintzier chintziest chintzy chiropodist
chiropodists chiropody chiropractic chiropractics chirrup chirruped
chirruping chirrups chiseler chiselers chit chitchat chitchats
chitchatted chitchatting chitin chits chitterlings chivalrously chive
chived chives chiving chloride chlorides chlorinate chlorinated
chlorinates chlorinating chlorination chlorofluorocarbon
chlorofluorocarbons chock chocked chocking chocks choker chokers
choler choleric chomp chomped chomping chomps choppily choppiness
chopstick chopsticks chorale chorales choreograph choreographed
choreographic choreographing choreographs chorister choristers
chromatic chromatics chronicler chroniclers chronometer chronometers
chrysalis chrysalises chubbiness chuckhole chuckholes chumminess chump
chumps chunkiness churchgoer churchgoers churchman churchmen
churchyard churchyards churl churlish churlishly churlishness churls
chutney chutzpah cicada cicadas cicatrice cicatrices cigarillo
cigarillos cilantro cilia cilium cinchona cinchonas cincture cinctures
cinematic cinematographer cinematographers cinematography cinnabar
circadian circlet circlets circuitously circularity circumflexes
circumlocution circumlocutions circumnavigate circumnavigated
circumnavigates circumnavigating circumnavigation circumnavigations
circumscribe circumscribed circumscribes circumscribing
circumscription circumscriptions circumspect circumspection
circumstantially cirrhosis cirrus citadel citadels citizenry citron
citronella citrons civet civets civilly civvies claimant claimants
clambake clambakes clamminess clamorous clampdown clampdowns
clandestinely clannish clapboard clapboarded clapboarding clapboards
clareted clareting clarets clarinetist clarinetists clarion clarioned
clarioning clarions classically classicism classicist classicists
classier classiest classifiable classifieds classiness classless
claustrophobic clavichord clavichords clavicle clavicles clayey
clayier clayiest cleanings cleanness cleanup cleanups clearinghouse
clearinghouses clematis clematises clement clerestories clerestory
clergywoman clergywomen clew clewed clewing clews cliched cliffhanger
cliffhangers climatic clincher clinchers clingier clingiest clingy
clinician clinicians clinker clinkers clipper clippers cliquish
clitoral clitorises cloakroom cloakrooms clobber clobbered clobbering
clobbers cloche cloches clodhopper clodhoppers clomp clomped clomping
clomps cloned cloning clop clopped clopping clops closefisted
closemouthed closeout closeouts clothesline clotheslined clotheslines
clotheslining clothier clothiers cloture clotures cloudiness cloudless
cloverleaf cloverleafs clownish clownishly clownishness cloy cloyed
cloying cloys clubfeet clubfoot clunk clunked clunker clunkers
clunkier clunkiest clunking clunks clunky coachman coachmen coagulant
coagulants coalescence coatings coauthor coauthored coauthoring
coauthors cobbled cobbles cobblestone cobblestones cobbling cocci
coccis coccus coccyges coccyx cochlea cochleae cochleas cockade
cockades cockamamie cockatoo cockatoos cockerel cockerels cockfight
cockfights cockily cockiness cockle cockles cockleshell cockleshells
cockney cockneys cockscomb cockscombs cocksucker cocksuckers cocksure
coda codas coddle coddled coddles coddling codeine codependency
codependent codependents codex codfish codfishes codger codgers
codices codicil codicils codification codifications codified codifies
codify codifying coed coeds coeducation coeducational coequal coequals
coercive coeval coevals coffeecake coffeecakes coffeehouse
coffeehouses coffeepot coffeepots cogently cogitate cogitated
cogitates cogitating cogitation cognate cognates cognition cognomen
cognomens cogwheel cogwheels cohabit cohabitation cohabited cohabiting
cohabits cohere cohered coheres cohering cohesion cohesive cohesively
cohesiveness cohort cohorts coif coiffed coiffing coiffure coiffured
coiffures coiffuring coifs coincident coital coitus cola colas coled
coleslaw colicky coliseum coliseums colitis collaboratives collations
collectible collectibles collectivism collectivist collectivists
colleen colleens collegian collegians collier collieries colliers
colliery colling collocate collocated collocates collocating
collocation collocations colloid colloids colloquially colloquies
colloquium colloquiums colloquy collude colluded colludes colluding
collusive cologne colognes colonialism colonialist colonialists
colonist colonists colonnade colonnades colossally colossi colossus
cols coltish columbine columbines columned columnist columnists
comatose combative combo combos combustibility comebacks comedic
comedienne comediennes comedown comedowns comeliness comer comers
comeuppance comeuppances comfier comfiest comforter comforters
comfortingly comfy comically comity commemorative commendably
commensurable commensurate commentate commentated commentates
commentating commingle commingled commingles commingling commissar
commissariat commissariats commissaries commissars commissary
committal committals commode commodes commodious commoners communally
communicant communicants communicators communistic commutation
commutations compactly compactness compactor compactors companionable
companionway companionways comparability comparably compassionately
compatibly compellingly compendium compendiums compensatory
competencies competency competitively competitiveness complacence
complacently complainant complainants complainer complainers
complaisance complaisant | |
<filename>misc/user_agents.py
USER_AGENTS = {
# Browser
'Chrome': [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/98.0.4758.97 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPad; CPU OS 15_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/98.0.4758.97 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPod; CPU iPhone OS 15_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/98.0.4758.97 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (Linux; Android 10) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; SM-A205U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; SM-A102U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; SM-N960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; LM-Q720) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; LM-X420) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 10; LM-Q710(FGN)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36"
],
'Firefox': [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12.2; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (X11; Linux i686; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (Linux x86_64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/97.0 Mobile/15E148 Safari/605.1.15",
"Mozilla/5.0 (iPad; CPU OS 12_2_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/97.0 Mobile/15E148 Safari/605.1.15",
"Mozilla/5.0 (iPod touch; CPU iPhone OS 12_2_1 like Mac OS X) AppleWebKit/604.5.6 (KHTML, like Gecko) FxiOS/97.0 Mobile/15E148 Safari/605.1.15",
"Mozilla/5.0 (Android 12; Mobile; rv:68.0) Gecko/68.0 Firefox/97.0",
"Mozilla/5.0 (Android 12; Mobile; LG-M255; rv:97.0) Gecko/97.0 Firefox/97.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12.2; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (X11; Linux i686; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0",
"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0"
],
'Safari': [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPad; CPU OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPod touch; CPU iPhone 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Mobile/15E148 Safari/604.1"
],
'InternetExplorer': [
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)",
"Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.2; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Trident/7.0; rv:11.0) like Gecko"
],
'Edge': [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/97.0.1072.69",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/97.0.1072.69",
"Mozilla/5.0 (Linux; Android 10; HD1913) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 EdgA/97.0.1072.69",
"Mozilla/5.0 (Linux; Android 10; SM-G973F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 EdgA/97.0.1072.69",
"Mozilla/5.0 (Linux; Android 10; Pixel 3 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 EdgA/97.0.1072.69",
"Mozilla/5.0 (Linux; Android 10; ONEPLUS A6003) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 EdgA/97.0.1072.69",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 EdgiOS/97.1072.69 Mobile/15E148 Safari/605.1.15",
"Mozilla/5.0 (Windows Mobile 10; Android 10.0; Microsoft; Lumia 950XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Mobile Safari/537.36 Edge/40.15254.603",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; Xbox; Xbox One) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edge/44.18363.8131"
],
'Opera': [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 OPR/83.0.4254.27",
"Mozilla/5.0 (Windows NT 10.0; WOW64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 OPR/83.0.4254.27",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 OPR/83.0.4254.27",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 OPR/83.0.4254.27",
"Mozilla/5.0 (Linux; Android 10; VOG-L29) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 OPR/63.3.3216.58675",
"Mozilla/5.0 (Linux; Android 10; SM-G970F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 OPR/63.3.3216.58675",
"Mozilla/5.0 (Linux; Android 10; SM-N975F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36 OPR/63.3.3216.58675"
],
'Vivaldi': [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3"
],
'Yandex': [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 YaBrowser/22.1.0 Yowser/2.5 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 YaBrowser/22.1.0 Yowser/2.5 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 YaBrowser/22.1.0 Yowser/2.5 Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 YaBrowser/22.1.6.583 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPad; CPU OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 YaBrowser/22.1.6.583 Mobile/15E148 Safari/605.1",
"Mozilla/5.0 (iPod touch; CPU iPhone 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 YaBrowser/22.1.6.583 Mobile/15E148 Safari/605.1",
"Mozilla/5.0 (Linux; arm_64; Android 12; SM-G965F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 YaBrowser/21.3.4.59 Mobile Safari/537.36"
],
# OS
'ChromeOS': [
"Mozilla/5.0 (X11; CrOS x86_64 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36",
"Mozilla/5.0 (X11; CrOS armv7l 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36",
"Mozilla/5.0 (X11; CrOS aarch64 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36",
"Mozilla/5.0 (X11; CrOS x86_64 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36",
"Mozilla/5.0 (X11; CrOS armv7l 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36",
"Mozilla/5.0 (X11; CrOS aarch64 14388.61.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.107 Safari/537.36"
],
'macOS': [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12.2; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 12_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/97.0.1072.69"
],
'iOS': [
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/98.0.4758.97 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 15_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/97.0 Mobile/15E148 Safari/605.1.15"
],
'Windows': [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/97.0.1072.69",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Vivaldi/4.3"
],
'Android': [
"Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 12; SM-A205U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 12; SM-A102U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; Android 12; SM-G960U) AppleWebKit/537.36 (KHTML, | |
<gh_stars>1-10
import unittest
import mahjongpy
class TestPlayer(unittest.TestCase):
HANDS1 = mahjongpy.MahjongTile.make_hands_set('22456', '333567', '234') #タンヤオ
HANDS2 = mahjongpy.MahjongTile.make_hands_set('888', '333678', '123','11') #役無し
HANDS3 = mahjongpy.MahjongTile.make_hands_set('22345', '567', '123567') #平和
HANDS4 = mahjongpy.MahjongTile.make_hands_set('345345', '123', '56788') #一盃口
HANDS5 = mahjongpy.MahjongTile.make_hands_set('1155', '77', '3399', '22', '33') #七対子
HANDS6 = mahjongpy.MahjongTile.make_hands_set('123', '77', '', '', '111222333') #大三元
HANDS7 = mahjongpy.MahjongTile.make_hands_set('19', '19', '19', '1234', '1233') #国士無双
HANDS8 = mahjongpy.MahjongTile.make_hands_set('1155', '77', '3399', '22', '32') #七対子(テンパイ)
HANDS9 = mahjongpy.MahjongTile.make_hands_set('1199', '77', '3399', '22', '32') #九種九牌
HANDS10 = mahjongpy.MahjongTile.make_hands_set('123456789', '', '', '222', '33') #一気通貫
HANDS11 = mahjongpy.MahjongTile.make_hands_set('123', '123', '123', '222', '33') #三色同順
HANDS12 = mahjongpy.MahjongTile.make_hands_set('222', '222', '222', '222', '33') #三色同刻
HANDS13 = mahjongpy.MahjongTile.make_hands_set('123', '123999', '', '222', '33') #チャンタ
HANDS14 = mahjongpy.MahjongTile.make_hands_set('345345', '345345', '', '', '33') #二盃口
HANDS15 = mahjongpy.MahjongTile.make_hands_set('123', '123999', '11789') #ジュンチャン
HANDS16 = mahjongpy.MahjongTile.make_hands_set('111456678', '', '', '333', '22') #混一色
HANDS17 = mahjongpy.MahjongTile.make_hands_set('11122245667899') #清一色
HANDS18 = mahjongpy.MahjongTile.make_hands_set('111666', '444', '11', '444') #四暗刻
HANDS19 = mahjongpy.MahjongTile.make_hands_set('11122345678999') #九蓮宝燈
HANDS20 = mahjongpy.MahjongTile.make_hands_set('1346', '36', '578', '14', '112') #何もなし
def test_make_player(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertEqual(p.points, 25000)
self.assertEqual(p.turn, 0)
self.assertEqual(p.riichi_turn, None)
self.assertFalse(p.oya)
self.assertFalse(p.is_riichi)
self.assertFalse(p.is_tumo)
self.assertFalse(p.is_ron)
self.assertRaises(ValueError, mahjongpy.MahjongPlayer())
def test_shanten(self):
h = mahjongpy.MahjongTile.make_hands_set('11345', '267', '123567')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 1)
h = mahjongpy.MahjongTile.make_hands_set('11345', '267', '123569')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 2)
h = mahjongpy.MahjongTile.make_hands_set('11345', '266', '123569')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 2)
h = mahjongpy.MahjongTile.make_hands_set('22345', '267', '123567')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 1)
h = mahjongpy.MahjongTile.make_hands_set('223348', '567', '12366')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 1)
h = mahjongpy.MahjongTile.make_hands_set('223348', '567', '12338')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 2)
h= mahjongpy.MahjongTile.make_hands_set('19', '129', '19', '1234', '123')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 1)
h= mahjongpy.MahjongTile.make_hands_set('123', '19', '19', '1234', '123')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 2)
h= mahjongpy.MahjongTile.make_hands_set('19', '123', '19', '1234', '123')
p = mahjongpy.MahjongPlayer(hands=h)
self.assertEqual(p.shanten(), 2)
def test_riichi(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertFalse(p.is_riichi)
p.riichi()
self.assertTrue(p.is_riichi)
self.assertTrue(p.is_doubleriichi)
def test_tenpai(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS8)
self.assertTrue(p.is_tenpai())
p = mahjongpy.MahjongPlayer(hands=self.HANDS20)
self.assertTrue(p.is_tenpai())
def test_furiten(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS8, discards=[mahjongpy.MahjongTile('tyun')])
self.assertTrue(p.is_furiten())
def test_kyusyukyuhai(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS9)
self.assertTrue(p.is_kyusyukyuhai())
def test_hora(self):
hands = [self.HANDS1, self.HANDS3, self.HANDS4, self.HANDS5, self.HANDS6, self.HANDS7, self.HANDS6, \
self.HANDS10, self.HANDS11, self.HANDS12, self.HANDS13, self.HANDS14, self.HANDS15, self.HANDS16, \
self.HANDS17, self.HANDS19]
for i in hands:
p = mahjongpy.MahjongPlayer(hands=i)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('123', '123', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '123',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('222', '222', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '222',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('222', '333', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '567',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('111', '', '777', '', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '777',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('111', '55',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('444', '', '', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '999', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False))
minkans = [mahjongpy.MahjongTile.make_hands_set('4444', '', '', '', '',checkamount=False)]
ankans = [mahjongpy.MahjongTile.make_hands_set('', '', '9999', '', '',checkamount=False)]
ankans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '3333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkans=minkans, ankans=ankans)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('222', '345', '', '', '11222',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('111999', '', '111', '', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
self.assertIn('sanankou', p.yakus())
self.assertIn('toitoi', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('111999', '', '111', '444', '11',checkamount=False)
p = mahjongpy.MahjongPlayer(hands=h, is_tumo=True)
self.assertTrue(p.is_hora())
self.assertIn('suankou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('22', '345', '', '', '111222',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '345', '', '11222333',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '33', '', '111222333',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '234444666', '', '', '22',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '888', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '', '', '111333', '22233',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('11999', '111', '999', '', '',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '999', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '55',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('666', '', '', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '222',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '111', '', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False))
minkans = [mahjongpy.MahjongTile.make_hands_set('6666', '', '', '', '',checkamount=False)]
minkans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '2222',checkamount=False))
ankans = [mahjongpy.MahjongTile.make_hands_set('', '1111', '', '', '',checkamount=False)]
ankans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '3333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkans=minkans, ankans=ankans)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('123456', '234', '99', '', '',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('789', '', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('123', '', '', '', '22',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '123', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '789', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '999', '',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, ankans=[m[-1]])
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('11123', '', '', '', '',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '123', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '999', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '789', '',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, ankans=[m[-2]])
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '111456', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '678', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertTrue(p.is_hora())
h = mahjongpy.MahjongTile.make_hands_set('', '', '11122245699', '', '',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '678', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertTrue(p.is_hora())
def test_displayed_doras(self):
t = mahjongpy.MahjongTable()
t.dora_tiles = [mahjongpy.MahjongTile('manzu', 1)]
p = mahjongpy.MahjongPlayer(hands=self.HANDS1, table=t)
self.assertEqual(p.displayed_doras(), 2)
def test_akadoras(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
p.hands[0].akadora = True
p.hands[1].akadora = True
self.assertEqual(p.akadoras(), 2)
def test_shuntus(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS3)
self.assertEqual(len(p.shuntus()), 4)
def test_ankos(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS6)
self.assertEqual(len(p.ankos()), 3)
def test_minkos(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertEqual(len(p.minkos), 9)
def test_ankans(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertEqual(len(p.ankans), 9)
def test_minkans(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertEqual(len(p.minkans), 9)
def test_kantus(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS1)
self.assertEqual(len(p.kantus()), 9)
def test_yakus(self):
p = mahjongpy.MahjongPlayer(hands=self.HANDS2, turn=5)
self.assertEqual(p.yakus(), [])
p = mahjongpy.MahjongPlayer(hands=self.HANDS18, is_tumo=True)
self.assertIn('suankou', p.yakus())
yakus = {0:'tanyao', 1:'pinfu', 2:'ipeikou', 3:'chitoitu', 4:'daisangen', 5:'kokushimusou', 6:'yakuhai', \
7:'ikkituukan', 8:'sansyokudouzyun', 9:'sansyokudoukou', 10:'chanta', 11:'ryanpeikou', \
12:'zyuntyan', 13:'honitu', 14:'chinitu', 15:'tyurenboutou'}
hands = [self.HANDS1, self.HANDS3, self.HANDS4, self.HANDS5, self.HANDS6, self.HANDS7, self.HANDS6, \
self.HANDS10, self.HANDS11, self.HANDS12, self.HANDS13, self.HANDS14, self.HANDS15, self.HANDS16, \
self.HANDS17, self.HANDS19]
for i in yakus:
p = mahjongpy.MahjongPlayer(hands=hands[i])
self.assertIn(yakus[i], p.yakus())
def test_yakus_with_melds(self):
h = mahjongpy.MahjongTile.make_hands_set('123', '123', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '123',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertIn('sansyokudouzyun', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('222', '222', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '222',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('sansyokudoukou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('222', '333', '', '222', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '567',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m)
self.assertIn('sanankou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('111', '', '777', '', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '777',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('toitoi', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('111', '55',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('444', '', '', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '999', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False))
minkans = [mahjongpy.MahjongTile.make_hands_set('4444', '', '', '', '',checkamount=False)]
ankans = [mahjongpy.MahjongTile.make_hands_set('', '', '9999', '', '',checkamount=False)]
ankans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '3333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkans=minkans, ankans=ankans)
self.assertIn('sankantu', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('222', '345', '', '', '11222',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('syousangen', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('111999', '', '111', '', '11',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('honroutou', p.yakus())
self.assertIn('sanankou', p.yakus())
self.assertIn('toitoi', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('111999', '', '111', '444', '11',checkamount=False)
p = mahjongpy.MahjongPlayer(hands=h, is_tumo=True)
self.assertIn('honroutou', p.yakus())
self.assertIn('suankou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('22', '345', '', '', '111222',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('daisangen', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('', '345', '', '11222333',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('syoususi', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('', '33', '', '111222333',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('daisusi', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('', '234444666', '', '', '22',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '888', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('ryuisou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('', '', '', '111333', '22233',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '', '', '444',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('tuisou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('11999', '111', '999', '', '',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('', '999', '', '',checkamount=False)]
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkos=m)
self.assertIn('chinroutou', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('', '55',checkamount=False)
m = [mahjongpy.MahjongTile.make_hands_set('666', '', '', '', '',checkamount=False)]
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '222',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '111', '', '', '',checkamount=False))
m.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '333',checkamount=False))
minkans = [mahjongpy.MahjongTile.make_hands_set('6666', '', '', '', '',checkamount=False)]
minkans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '2222',checkamount=False))
ankans = [mahjongpy.MahjongTile.make_hands_set('', '1111', '', '', '',checkamount=False)]
ankans.append(mahjongpy.MahjongTile.make_hands_set('', '', '', '', '3333',checkamount=False))
p = mahjongpy.MahjongPlayer(hands=h, melds=m, minkans=minkans, ankans=ankans)
self.assertIn('sukantu', p.yakus())
h = mahjongpy.MahjongTile.make_hands_set('123456', '234', '99', '', '',checkamount=False)
m = | |
"https://4new.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/4newNetwork",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/4newcoin",
"youtube": ""
}
},
"BTM": {
"symbol": "BTM",
"address": "0xcB97e65F07DA24D46BcDD078EBebd7C6E6E3d750",
"decimals": 8,
"name": "Bytom",
"ens_address": "",
"website": "https://bytom.io",
"logo": {
"src": "https://etherscan.io/token/images/bytom_28.png",
"width": 28,
"height": 28,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/bytom",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Bytom_Official",
"youtube": ""
}
},
"PAX": {
"symbol": "PAX",
"name": "Paxos Standard (PAX)",
"type": "ERC20",
"address": "0x8E870D67F660D95d5be530380D0eC0bd388289E1",
"ens_address": "",
"decimals": 18,
"website": "https://www.paxos.com/standard",
"logo": {
"src": "https://static.standard.paxos.com/logos/square_120_120.png",
"width": "120",
"height": "120",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://www.paxos.com/contact/"
},
"social": {
"blog": "https://www.paxos.com/news-blogs/engineering/",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/paxosglobal",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/paxos",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/paxosglobal",
"youtube": ""
}
},
"PNK": {
"symbol": "PNK",
"name": "Pinakion",
"type": "ERC20",
"address": "0x93ED3FBe21207Ec2E8f2d3c3de6e058Cb73Bc04d",
"ens_address": "",
"decimals": 18,
"website": "https://kleros.io",
"logo": {
"src": "https://kleros.io/favicon-32x32.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://kleros.io"
},
"social": {
"blog": "https://blog.kleros.io/",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/kleros",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "http://slack.kleros.io/",
"telegram": "",
"twitter": "https://twitter.com/Kleros_io",
"youtube": ""
}
},
"HORSE": {
"symbol": "HORSE",
"address": "0x5B0751713b2527d7f002c0c4e2a37e1219610A6B",
"decimals": 18,
"name": "Ethorse",
"ens_address": "",
"website": "https://ethorse.com",
"logo": {
"src": "https://ethorse.com/images/ethorse-logo.png",
"width": "480",
"height": "695",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@ethorse",
"chat": "https://discordapp.com/invite/vdTXRmT",
"facebook": "",
"forum": "https://bitcointalk.org/index.php?topic=2573978.0",
"github": "https://github.com/ethorse",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Ethorse/",
"slack": "",
"telegram": "https://telegram.me/ethorse",
"twitter": "https://twitter.com/EthorseTeam",
"youtube": "https://www.youtube.com/channel/UC2lOnpQUPVE13E_Mpp5TVsA"
}
},
"IOST": {
"symbol": "IOST",
"address": "0xFA1a856Cfa3409CFa145Fa4e20Eb270dF3EB21ab",
"decimals": 18,
"name": "IOSToken",
"ens_address": "",
"website": "https://iost.io/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@iostoken",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/officialios",
"twitter": "https://twitter.com/iostoken",
"youtube": ""
}
},
"CRT": {
"symbol": "CRT",
"address": "0xF0da1186a4977226b9135d0613ee72e229EC3F4d",
"decimals": 18,
"name": "CreamtoeCoin",
"ens_address": "",
"website": "http://creamtoecoin.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"ENTRP": {
"symbol": "ENTRP",
"address": "0x5BC7e5f0Ab8b2E10D2D0a3F21739FCe62459aeF3",
"decimals": 18,
"name": "Hut34 Entropy Token",
"ens_address": "",
"website": "https://hut34.io/",
"logo": {
"src": "https://hut34.io/images/comms/Hut34-logo-orange.jpg",
"width": "300",
"height": "300"
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@hut34project",
"chat": "",
"facebook": "https://www.facebook.com/hut34project",
"forum": "",
"github": "https://github.com/hut34",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/18132913/",
"reddit": "",
"slack": "",
"telegram": "https://t.me/hut34",
"twitter": "https://twitter.com/hut34project",
"youtube": "https://www.youtube.com/channel/UCiemFFyT2Sv2ulrRQfNI89Q"
}
},
"KEY": {
"symbol": "KEY",
"address": "0x4CC19356f2D37338b9802aa8E8fc58B0373296E7",
"decimals": 18,
"name": "SelfKey",
"ens_address": "",
"website": "https://selfkey.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/selfkey",
"chat": "",
"facebook": "https://www.facebook.com/SelfKeyNetwork",
"forum": "https://bitcointalk.org/index.php?topic=2310691",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/18232422",
"reddit": "https://www.reddit.com/r/selfkey",
"slack": "",
"telegram": "https://t.me/selfkeyfoundation",
"twitter": "http://twitter.com/SelfKey",
"youtube": "https://www.youtube.com/channel/UCsilze3-MhbCY3_QkKul3PQ"
}
},
"RHOC": {
"symbol": "RHOC",
"name": "RChain",
"type": "ERC20",
"address": "0x168296bb09e24A88805CB9c33356536B980D3fC5",
"ens_address": "",
"decimals": 8,
"website": "https://www.rchain.coop",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/rchain-cooperative",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/RChain",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/rchain_coop",
"youtube": ""
}
},
"MAD": {
"symbol": "MAD",
"address": "0x5B09A0371C1DA44A8E24D36Bf5DEb1141a84d875",
"decimals": 18,
"name": "MAD",
"ens_address": "",
"website": "https://madnetwork.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"NCC": {
"symbol": "NCC",
"name": "NeedsCoin",
"type": "ERC20",
"address": "0x9344b383b1D59b5ce3468B234DAB43C7190ba735",
"ens_address": "",
"decimals": 18,
"website": "https://needscoin.info",
"logo": {
"src": "https://res.cloudinary.com/cloudimgstorage/image/upload/v1542398850/NeedsCoinLogo/ncc.png",
"width": "128",
"height": "128",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://needscoin.info/contact"
},
"social": {
"blog": "https://steemit.com/blockchain/@okane81/needscoin-token",
"chat": "",
"facebook": "https://facebook.com/needscoin",
"forum": "https://bitcointalk.org/index.php?topic=4862385",
"github": "https://github.com/NeedsCoinProject",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/NeedsCoin",
"slack": "",
"telegram": "https://t.me/ethernetcash",
"twitter": "https://twitter.com/NeedsCoin",
"youtube": "https://www.youtube.com/watch?v=H-svrLj7mPA"
}
},
"VERI": {
"symbol": "VERI",
"address": "0x8f3470A7388c05eE4e7AF3d01D8C722b0FF52374",
"decimals": 18,
"name": "Veritaseum",
"ens_address": "",
"website": "https://veritas.veritaseum.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/Veritaseuminc",
"youtube": ""
}
},
"BC": {
"symbol": "BC",
"name": "Block-Chain.com",
"type": "ERC20",
"address": "0x2ecB13A8c458c379c4d9a7259e202De03c8F3D19",
"ens_address": "",
"decimals": 18,
"website": "https://block-chain.com",
"logo": {
"src": "https://www.dropbox.com/s/quqlk3xywx6982r/BC.png",
"width": "128",
"height": "128",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://block-chain.com"
},
"social": {
"blog": "",
"chat": "https://t.me/BCtoken",
"facebook": "https://www.facebook.com/blockchaincom",
"forum": "https://bitcointalk.org/index.php?topic=4797839.new",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/block_chaincom",
"twitter": "https://twitter.com/Block_Chain_com",
"youtube": ""
}
},
"WOLK": {
"symbol": "WOLK",
"address": "0xF6B55acBBC49f4524Aa48D19281A9A77c54DE10f",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://www.wolk.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://blog.wolk.com",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/wolktoken",
"slack": "",
"telegram": "https://t.me/joinchat/GkePIg2-n4y5VQn4epAQOw",
"twitter": "https://twitter.com/wolkinc",
"youtube": ""
}
},
"CXO": {
"symbol": "CXO",
"address": "0xb6EE9668771a79be7967ee29a63D4184F8097143",
"decimals": 18,
"name": "CargoX",
"ens_address": "",
"website": "https://cargox.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/cargoxio",
"chat": "",
"facebook": "https://www.facebook.com/cargox.io",
"forum": "",
"github": "https://github.com/cargoxio",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://reddit.com/r/cargoxio",
"slack": "",
"telegram": "https://t.me/joinchat/GAKhBQ48675fRRMEd-kLcw",
"twitter": "https://twitter.com/cargoxio",
"youtube": ""
}
},
"CFI": {
"symbol": "CFI",
"address": "0x12FEF5e57bF45873Cd9B62E9DBd7BFb99e32D73e",
"decimals": 18,
"name": "Cofound.it",
"ens_address": "",
"website": "https://cofound.it/",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://support.cofound.it/"
},
"social": {
"blog": "https://blog.cofound.it/",
"chat": "https://t.me/cofoundit",
"facebook": "https://www.facebook.com/CofounditEcosystem/",
"forum": "",
"github": "",
"gitter": "",
"instagram": "https://www.instagram.com/cofound.it/",
"linkedin": "https://www.linkedin.com/company/cofound.it/",
"reddit": "https://www.reddit.com/r/cofoundit/",
"slack": "",
"telegram": "https://t.me/cofounditnews",
"twitter": "https://twitter.com/cofound_it",
"youtube": "https://www.youtube.com/c/Cofoundit"
}
},
"RED": {
"symbol": "RED",
"address": "0x76960Dccd5a1fe799F7c29bE9F19ceB4627aEb2f",
"decimals": 18,
"name": "Red Community Token",
"ens_address": "",
"website": "https://ico.red-lang.org",
"logo": {
"src": "https://static.red-lang.org/pr/logo/red-token-logo_sq_128x128.png",
"width": "128",
"height": "128",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://t.me/redofficial"
},
"social": {
"blog": "https://www.red-lang.org",
"chat": "",
"facebook": "https://www.facebook.com/groups/redlanguage/about",
"forum": "",
"github": "https://github.com/red",
"gitter": "https://gitter.im/red/red/welcome",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/redlang",
"slack": "",
"telegram": "https://t.me/redofficial",
"twitter": "https://twitter.com/red_lang",
"youtube": ""
}
},
"BCPT": {
"symbol": "BCPT",
"address": "0x1c4481750daa5Ff521A2a7490d9981eD46465Dbd",
"decimals": 18,
"name": "BlockMason Credit Protocol Token",
"ens_address": "",
"website": "https://blockmason.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/@blockmason",
"chat": "",
"facebook": "https://www.facebook.com/blockmasonio",
"forum": "https://bitcointalk.org/index.php?topic=2129993.0",
"github": "https://github.com/blockmason",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/blockmason",
"slack": "",
"telegram": "https://t.me/blockmason",
"twitter": "https://twitter.com/blockmasonio",
"youtube": "https://www.youtube.com/channel/UCqv0UBWjgjM5JZkxdQR7DYw"
}
},
"SNIP": {
"symbol": "SNIP",
"address": "0x44F588aEeB8C44471439D1270B3603c66a9262F1",
"decimals": 18,
"name": "SNIP",
"ens_address": "",
"website": "https://www.snip.network",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/sniptoday",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/sniptoday",
"twitter": "https://www.twitter.com/sniptoday",
"youtube": ""
}
},
"GROW": {
"symbol": "GROW",
"address": "0x0a9A9ce600D08BF9b76F49FA4e7b38A67EBEB1E6",
"decimals": 8,
"name": "Growchain",
"ens_address": "",
"website": "http://www.growchain.us",
"logo": {
"src": "http://www.growchain.us/data/logo.png",
"width": "300",
"height": "286",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>"
},
"social": {
"blog": "",
"chat": "",
| |
Whether to setup the flux ramp at the end.
"""
if show_plot:
plt.ion()
else:
plt.ioff()
if reset_rate_khz is None:
reset_rate_khz = self._reset_rate_khz
self.log('reset_rate_khz is None. ',
f'Using default: {reset_rate_khz}')
n_channels = self.get_number_channels(band)
old_fb = self.get_feedback_enable_array(band)
# Turn off feedback
self.set_feedback_enable_array(band, np.zeros_like(old_fb))
d, df, sync = self.tracking_setup(band,0, reset_rate_khz=reset_rate_khz,
fraction_full_scale=fraction_full_scale, make_plot=False,
save_plot=False, show_plot=False, lms_enable1=False,
lms_enable2=False, lms_enable3=False, flux_ramp=flux_ramp,
setup_flux_ramp=setup_flux_ramp)
nsamp, n_chan = np.shape(df)
dd = np.ravel(np.where(np.diff(sync[:,0]) !=0))
first_idx = dd[0]//n_channels
second_idx = dd[4]//n_channels
dt = int(second_idx-first_idx) # In slow samples
n_fr = int(len(sync[:,0])/n_channels/dt)
reset_idx = np.arange(first_idx, n_fr*dt + first_idx+1, dt)
# Reset to the previous FB state
self.set_feedback_enable_array(band, old_fb)
fs = self.get_digitizer_frequency_mhz(band) * 1.0E6 /2/n_channels
# Only plot channels that are on - group by subband
chan = self.which_on(band)
freq = np.zeros(len(chan), dtype=float)
subband = np.zeros(len(chan), dtype=int)
for i, c in enumerate(chan):
freq[i] = self.channel_to_freq(band, c)
(subband[i], _) = self.freq_to_subband(band, freq[i])
unique_subband = np.unique(subband)
cm = plt.get_cmap('viridis')
timestamp = self.get_timestamp()
self.log('Making plots...')
scale = 1.0E3
for sb in unique_subband:
idx = np.ravel(np.where(subband == sb))
chs = chan[idx]
# fig, ax = plt.subplots(1, 2, figsize=(8,4), sharey=True)
fig = plt.figure(figsize=(8,6))
gs = GridSpec(2,2)
ax0 = fig.add_subplot(gs[0,:])
ax1 = fig.add_subplot(gs[1,0])
ax2 = fig.add_subplot(gs[1,1])
for i, c in enumerate(chs):
color = cm(i/len(chs))
ax0.plot(np.arange(nsamp)/fs*scale,
df[:,c], label=f'ch {c}',
color=color)
holder = np.zeros((n_fr-1, dt))
for i in np.arange(n_fr-1):
holder[i] = df[first_idx+dt*i:first_idx+dt*(i+1),c]
ds = np.mean(holder, axis=0)
ax1.plot(np.arange(len(ds))/fs*scale, ds, color=color)
ff, pp = signal.welch(df[:,c], fs=fs)
ax2.semilogy(ff/1.0E3, pp, color=color)
for k in reset_idx:
ax0.axvline(k/fs*scale, color='k', alpha=.6, linestyle=':')
ax0.legend(loc='upper left')
ax1.set_xlabel('Time [ms]')
ax2.set_xlabel('Freq [kHz]')
fig.suptitle(f'Band {band} Subband {sb}')
if save_plot:
save_name = timestamp
if not flux_ramp:
save_name = save_name + '_no_FR'
save_name = (
save_name +
f'_b{band}_sb{sb:03}_flux_ramp_check.png')
path = os.path.join(self.plot_dir, save_name)
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'flux_ramp', plot=True)
if not show_plot:
plt.close()
return d, df, sync
def _feedback_frac_to_feedback(self, band, frac):
"""
Convenience function for convervting from feedback_start/end_frac
to feedback_start/end.
Args
----
band : int
The 500 MHz band
frac : float
The fraction of the flux ramp to start/end
Ret
---
feedback : int
The feedback value to put into the PV
"""
channel_frequency_mhz = self.get_channel_frequency_mhz(band)
digitizer_frequency_mhz = self.get_digitizer_frequency_mhz(band)
return int(frac*(self.get_ramp_max_cnt()+1)/
(digitizer_frequency_mhz/channel_frequency_mhz/2. ) )
def _feedback_to_feedback_frac(self, band, feedback):
"""
Convenience function for converting from feedback_start/end to
feedback_start/end_frac.
Args
----
band : int
The 500 MHz band
feedback : int
The feedback value to put into the PV
Ret
---
frac : float
The fraction of the flux ramp to start/end
"""
channel_frequency_mhz = self.get_channel_frequency_mhz(band)
digitizer_frequency_mhz = self.get_digitizer_frequency_mhz(band)
return feedback * digitizer_frequency_mhz / (2 *
channel_frequency_mhz *(self.get_ramp_max_cnt()+1))
@set_action()
def tracking_setup(self, band, channel=None, reset_rate_khz=None,
write_log=False, make_plot=False, save_plot=True, show_plot=True,
nsamp=2**19, lms_freq_hz=None, meas_lms_freq=False,
meas_flux_ramp_amp=False, n_phi0=4, flux_ramp=True,
fraction_full_scale=None, lms_enable1=True, lms_enable2=True,
lms_enable3=True, feedback_gain=None, lms_gain=None, return_data=True,
new_epics_root=None, feedback_start_frac=None,
feedback_end_frac=None, setup_flux_ramp=True, plotname_append=''):
"""
The function to start tracking. Starts the flux ramp and if requested
attempts to measure the lms (demodulation) frequency. Otherwise this
just tracks at the input lms frequency. This will also make plots for
the channels listed in {channel} input.
Args
----
band : int
The band number.
channel : int or int array or None, optional, default None
The channels to plot.
reset_rate_khz : float or None, optional, default None
The flux ramp frequency.
write_log : bool, optional, default False
Whether to write output to the log.
make_plot : bool, optional, default False
Whether to make plots.
save_plot : bool, optional, default True
Whether to save plots.
show_plot : bool, optional, default True
Whether to display the plot.
nsamp : int, optional, default 2**19
The number of samples to take of the flux ramp.
lms_freq_hz : float or None, optional, default None
The frequency of the tracking algorithm.
meas_lms_freq : bool, optional, default False
Whether or not to try to estimate the carrier rate using
the flux_mod2 function. lms_freq_hz must be None.
meas_flux_ramp_amp : bool, optional, default False
Whether or not to adjust fraction_full_scale to get the number of
phi0 defined by n_phi0. lms_freq_hz must be None for this to work.
n_phi0 : float, optional, default 4
The number of phi0 to match using meas_flux_ramp_amp.
flux_ramp : bool, optional, default True
Whether to turn on flux ramp.
fraction_full_scale : float or None, optional, default None
The flux ramp amplitude, as a fraction of the maximum.
lms_enable1 : bool, optional, default True
Whether to use the first harmonic for tracking.
lms_enable2 : bool, optional, default True
Whether to use the second harmonic for tracking.
lms_enable3 : bool, optional, default True
Whether to use the third harmonic for tracking.
feedback_gain : Int16, optional, default None.
The tracking feedback gain parameter.
This is applied to all channels within a band.
1024 corresponds to approx 2kHz bandwidth.
2048 corresponds to approx 4kHz bandwidth.
Tune this parameter to track the demodulated band
of interest (0...2kHz for 4kHz flux ramp).
High gains may affect noise performance and will
eventually cause the tracking loop to go unstable.
lms_gain : int or None, optional, default None
** Internal register dynamic range adjustment **
** Use with caution - you probably want feedback_gain**
Select which bits to slice from accumulation over
a flux ramp period.
Tracking feedback parameters are integrated over a flux
ramp period at 2.4MHz. The internal register allows for up
to 9 bits of growth (from full scale).
lms_gain = 0 : select upper bits from accumulation register (9 bits growth)
lms_gain = 1 : select upper bits from accumulation register (8 bits growth)
lms_gain = 2 : select upper bits from accumulation register (7 bits growth)
lms_gain = 3 : select upper bits from accumulation register (6 bits growth)
lms_gain = 4 : select upper bits from accumulation register (5 bits growth)
lms_gain = 5 : select upper bits from accumulation register (4 bits growth)
lms_gain = 6 : select upper bits from accumulation register (3 bits growth)
lms_gain = 7 : select upper bits from accumulation register (2 bits growth)
The max bit gain is given by ceil(log2(2.4e6/FR_rate)).
For example a 4kHz FR can accumulate ceil(log2(2.4e6/4e3)) = 10 bits
if the I/Q tracking parameters are at full scale (+/- 2.4MHz)
Typical SQUID frequency throws of 100kHz have a bit growth of
ceil(log2( (100e3/2.4e6)*(2.4e6/FR_rate) ))
So 100kHz SQUID throw at 4kHz has bit growth ceil(log2(100e3/4e3)) = 5 bits.
Try lms_gain = 4.
This should be approx 9 - ceil(log2(100/reset_rate_khz)) for CMB applications.
Too low of lms_gain will use only a small dynamic range of the streaming
registers and contribute to incrased noise.
Too high of lms_gain will overflow the register and greatly incrase noise.
return_data : bool, optional, default True
Whether or not to return f, df, sync.
new_epics_root : str or None, optional, default None
Override the original epics root.
feedback_start_frac : float or None, optional, default None
The fraction of the full flux ramp at which to stop
applying feedback in each flux ramp cycle. Must be in
[0,1). Defaults to whatever's in the cfg file.
feedback_end_frac : float or None, optional, default None
The fraction of the full flux ramp at which to stop
applying feedback in each flux ramp cycle. Must be >0.
Defaults to whatever's in the cfg file.
setup_flux_ramp : bool, optional, default True
Whether to setup the flux ramp.
plotname_append : str, optional, default ''
Optional string to append plots with.
"""
if reset_rate_khz is None:
reset_rate_khz = self._reset_rate_khz
if lms_gain is None:
lms_gain = int(9 - np.ceil(np.log2(100/reset_rate_khz)))
if lms_gain > 7:
lms_gain = 7
else:
self.log("Using LMS gain is now an advanced feature.")
self.log("Unless you are an expert, you probably want feedback_gain.")
self.log("See tracking_setup docstring.")
if feedback_gain is None:
feedback_gain = self._feedback_gain[band]
##
## Load unprovided optional args from cfg
if feedback_start_frac is None:
feedback_start_frac = self._feedback_start_frac[band]
if feedback_end_frac is None:
feedback_end_frac = self._feedback_end_frac[band]
## End loading unprovided optional args from cfg
##
##
## Argument validation
# Validate feedback_start_frac and feedback_end_frac
if (feedback_start_frac < 0) or (feedback_start_frac >= 1):
raise ValueError(
f"feedback_start_frac = {feedback_start_frac} " +
| |
<filename>src/litprog/build.py<gh_stars>0
# This file is part of the litprog project
# https://github.com/litprog/litprog
#
# Copyright (c) 2018-2021 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
import os
import re
import sys
import json
import time
import typing as typ
import fnmatch
import logging
import tempfile
import collections
from pathlib import Path
from concurrent.futures import Future
from concurrent.futures import ThreadPoolExecutor
from . import parse
from . import session
from . import common_types as ct
from . import capture_cache
logger = logging.getLogger(__name__)
COLOR_CODE_RE = re.compile(r"\d+(;\d)?")
TERM_COLORS = {
'black' : "30",
'red' : "31",
'green' : "32",
'yellow' : "33",
'blue' : "34",
'magenta': "35",
'cyan' : "36",
'white' : "37",
}
DEFUALT_TIMEOUT = 9.0
Chapters = typ.Iterable[parse.Chapter]
# The expanded files are no different in structure/datatype, it's just
# that directives such as dep and include are expanded.
ExpandedChapter = parse.Chapter
ExpandedChapters = typ.Iterator[ExpandedChapter]
class BlockError(Exception):
block : ct.Block
include_contents: list[str]
def __init__(self, msg: str, block: ct.Block, include_contents: typ.Optional[list[str]] = None) -> None:
self.block = block
self.include_contents = include_contents or []
loc = parse.location(block).strip()
super().__init__(f"{loc} - " + msg)
def get_directive(
block: ct.Block, name: str, missing_ok: bool = True, many_ok: bool = True
) -> typ.Optional[ct.Directive]:
found: list[ct.Directive] = []
for directive in block.directives:
if directive.name == name:
if many_ok:
return directive
else:
found.append(directive)
if len(found) == 0:
if missing_ok:
return None
else:
errmsg = f"Could not find block with expected '{name}'"
raise BlockError(errmsg, block)
elif len(found) > 1 and not many_ok:
errmsg = f"Block with multiple '{name}'"
raise BlockError(errmsg, block)
else:
return found[0]
def iter_directives(block: ct.Block, *directive_names) -> typ.Iterable[ct.Directive]:
_names = set(directive_names)
for directive in block.directives:
if directive.name in _names:
yield directive
def _match_indent(directive_text: str, include_val: str) -> str:
unindented = directive_text.lstrip()
indent = directive_text[: -len(unindented)].strip("\n")
if indent:
return "\n".join(indent + line for line in include_val.splitlines())
else:
return include_val
def _indented_include(
content : str,
directive_text: str,
include_val : str,
is_dep : bool = True,
) -> str:
if is_dep:
# dependencies are only included once (at the first occurance of a dep directive)
return content.replace(directive_text, include_val, 1).replace(directive_text, "")
else:
return content.replace(directive_text, include_val)
# TODO: Maybe use this to parse all directive values, and thereby enable quoting.
def _parse_directive_val(directive: ct.Directive) -> str:
val = directive.value.strip()
if val.startswith("'") and val.endswith("'"):
return val[1:-1]
elif val.startswith('"') and val.endswith('"'):
return val[1:-1]
else:
return val
BlockId = str
ScopedBlockId = str
BlockIds = list[BlockId]
BlockListBySid = dict[ScopedBlockId, list[ct.Block ]]
DependencyMap = dict[ScopedBlockId, list[ScopedBlockId]]
def _namespaced_lp_id(block: ct.Block, lp_id: BlockId) -> ScopedBlockId:
if "." in lp_id:
return lp_id.strip()
else:
return block.namespace + "." + lp_id.strip()
def _iter_directive_sids(block: ct.Block, *directive_names) -> typ.Iterable[ScopedBlockId]:
for directive in iter_directives(block, *directive_names):
for dep_id in directive.value.split(","):
yield _namespaced_lp_id(block, dep_id)
def _resolve_dep_sids(
raw_dep_sid: ScopedBlockId, blocks_by_sid: BlockListBySid
) -> typ.Iterable[ScopedBlockId]:
if raw_dep_sid in blocks_by_sid:
yield raw_dep_sid
elif "*" in raw_dep_sid:
dep_sid_re = re.compile(fnmatch.translate(raw_dep_sid))
for maybe_dep_sid in blocks_by_sid:
if dep_sid_re.match(maybe_dep_sid):
yield maybe_dep_sid
def _build_dep_map(blocks_by_sid: BlockListBySid) -> DependencyMap:
"""Build a mapping of block_ids to the Blocks they depend on.
The mapped block_ids (keys) are only the direct (non-recursive) dependencies.
"""
dep_map: DependencyMap = {}
for def_id, blocks in blocks_by_sid.items():
for block in blocks:
for raw_dep_sid in _iter_directive_sids(block, 'dep'):
dep_sids = list(_resolve_dep_sids(raw_dep_sid, blocks_by_sid))
if not any(dep_sids):
# TODO (mb 2021-07-18): pylev for better message:
# "Maybe you meant {closest_sids}"
raise BlockError(f"Invalid block id: {raw_dep_sid}", block)
for dep_sid in dep_sids:
if def_id in dep_map:
dep_map[def_id].append(dep_sid)
else:
dep_map[def_id] = [dep_sid]
if logger.isEnabledFor(logging.DEBUG):
logger.debug("include map")
for lp_id, dep_ids in sorted(dep_map.items()):
dep_ids_str = ", ".join(sorted(dep_ids))
logger.debug(f" deps {lp_id:<20}: {dep_ids_str}")
return dep_map
def _get_dep_cycle(
lp_id : BlockId,
dep_map: DependencyMap,
root_id: BlockId,
depth : int = 0,
) -> list[str]:
if lp_id in dep_map:
dep_sids = dep_map[lp_id]
if root_id in dep_sids:
return [lp_id]
for _dep_sid in dep_sids:
cycle_ids = _get_dep_cycle(_dep_sid, dep_map, root_id, depth + 1)
if cycle_ids:
return [lp_id] + cycle_ids
return []
def _err_on_include_cycle(
block : ct.Block,
lp_id : BlockId,
blocks_by_sid: BlockListBySid,
dep_map : DependencyMap,
root_id : BlockId,
) -> None:
cycle_ids = _get_dep_cycle(lp_id, dep_map, root_id=root_id)
if not cycle_ids:
return
cycle_ids = [root_id] + cycle_ids
for cycle_id in cycle_ids:
cycle_block = blocks_by_sid[cycle_id][0]
loc = parse.location(cycle_block).strip()
logger.warning(f"{loc} - {cycle_id} (trace for include cycle)")
path = " -> ".join(f"'{cycle_id}'" for cycle_id in cycle_ids)
errmsg = f"dep/include cycle {path}"
raise BlockError(errmsg, block)
def _expand_block_content(
blocks_by_sid: BlockListBySid,
block : ct.Block,
added_deps : set[str],
dep_map : DependencyMap,
keep_fence : bool,
lvl : int = 1,
) -> tuple[str, set[Path]]:
# NOTE (mb 2020-12-20): Depth first expansion of content.
# This ensures that the first occurance of an dep
# directive is expanded at the earliest possible point
# even if it is a recursive dep.
def_ = get_directive(block, 'def', missing_ok=True, many_ok=False)
new_md_paths = {block.md_path}
if keep_fence:
new_content = block.content
else:
new_content = block.includable_content + "\n"
for directive in iter_directives(block, 'dep', 'include'):
is_dep = directive.name == 'dep'
dep_contents: list[str] = []
for raw_dep_sid in directive.value.split(","):
raw_dep_sid = _namespaced_lp_id(block, raw_dep_sid)
dep_sids = list(_resolve_dep_sids(raw_dep_sid, blocks_by_sid))
if not dep_sids:
# TODO (mb 2021-07-18): pylev for better message:
# "Maybe you meant {closest_sids}"
raise BlockError(f"Invalid block id: {raw_dep_sid}", block)
for dep_sid in dep_sids:
if def_:
def_id = _namespaced_lp_id(block, def_.value)
_err_on_include_cycle(block, dep_sid, blocks_by_sid, dep_map, root_id=def_id)
if is_dep:
if dep_sid in added_deps:
# skip already included dependencies
continue
else:
added_deps.add(dep_sid)
for dep_block in blocks_by_sid[dep_sid]:
dep_content, dep_md_paths = _expand_block_content(
blocks_by_sid,
dep_block,
added_deps,
dep_map,
keep_fence=False,
lvl=lvl + 1,
)
dep_content = _match_indent(directive.raw_text, dep_content)
dep_contents.append(dep_content)
new_md_paths.update(dep_md_paths)
include_content = "".join(dep_contents)
dep_text = directive.raw_text.lstrip("\n")
new_content = _indented_include(new_content, dep_text, include_content, is_dep=is_dep)
return (new_content, new_md_paths)
def _expand_directives(blocks_by_sid: BlockListBySid, chapter: parse.Chapter) -> parse.Chapter:
new_chapter = chapter.copy()
dep_map = _build_dep_map(blocks_by_sid)
for block in list(new_chapter.iter_blocks()):
added_deps: set[str] = set()
new_content, new_md_paths = _expand_block_content(
blocks_by_sid, block, added_deps, dep_map, keep_fence=True
)
if new_content != block.content:
elements = new_chapter.elements[block.md_path]
elem = elements[block.elem_index]
elements[block.elem_index] = parse.MarkdownElement(
elem.md_path,
elem.first_line,
elem.elem_index,
elem.md_type,
new_content,
None,
new_md_paths | {elem.md_path},
)
return new_chapter
def _get_blocks_by_id(chapters: Chapters) -> BlockListBySid:
blocks_by_sid: BlockListBySid = {}
for chapter in chapters:
for block in chapter.iter_blocks():
lp_def = get_directive(block, 'def', missing_ok=True, many_ok=False)
if lp_def:
lp_def_id = lp_def.value
if "." in lp_def_id and not lp_def_id.startswith(block.namespace + "."):
errmsg = f"Invalid block id: {lp_def_id} for namespace {block.namespace}"
raise BlockError(errmsg, block)
block_sid = _namespaced_lp_id(block, lp_def_id)
if block_sid in blocks_by_sid:
prev_block = blocks_by_sid[block_sid][0]
prev_loc = parse.location(prev_block).strip()
errmsg = f"Block already defined: {lp_def_id} at {prev_loc}"
raise BlockError(errmsg, block)
blocks_by_sid[block_sid] = [block]
# NOTE (mb 2021-08-19): The amend directive has been removed as it
# it would lead to confusion. When a reader wants to understand a block,
# it will be more easy for them if they need not worry about any other
# block in the project. They need only consider the block they
# see before them and they can see all contents either directly or
# as explicitly named expansions in the form of a dep/include
# directive.
#
# for chapter in chapters:
# for block in chapter.iter_blocks():
# for lp_amend in iter_directives(block, 'amend'):
# lp_amend_sid = _namespaced_lp_id(block, lp_amend.value)
# if lp_amend_sid in blocks_by_sid:
# blocks_by_sid[lp_amend_sid].append(block)
# else:
# errmsg = f"Unknown block id: {lp_amend_sid}"
# raise BlockError(errmsg, block)
return blocks_by_sid
def _iter_expanded_chapters(chapters: Chapters) -> ExpandedChapters:
# NOTE (mb 2020-05-24): To do the expansion, we have to first
# build a graph so that we can resolve blocks for each dep/include.
# pass 1. collect all blocks (globally) with def directives
# NOTE (mb 2020-05-31): block ids are always absulute/fully qualified
blocks_by_sid = _get_blocks_by_id(chapters)
# pass 2. expand dep directives in markdown files
for chapter in chapters:
yield _expand_directives(blocks_by_sid, chapter)
def _iter_block_errors(parse_ctx: parse.Context, build_ctx: parse.Context) -> typ.Iterable[str]:
"""Validate that expansion worked correctly."""
# NOTE: the main purpose of the parse_ctx is to produce
# better error messages. It allows us to point at the
# original block, whereas the build_ctx has been
# modified and line numbers no longer correspond to
# the original file.
assert len(parse_ctx.chapters) == len(build_ctx.chapters)
for orig_chapter, chapter in zip(parse_ctx.chapters, build_ctx.chapters):
assert orig_chapter.md_paths == chapter.md_paths
for md_path in orig_chapter.md_paths:
assert len(orig_chapter.elements[md_path]) == len(chapter.elements[md_path])
for block in chapter.iter_blocks():
orig_elem = orig_chapter.elements[block.md_path][block.elem_index]
for directive in block.directives:
if directive.name in ('dep', 'include'):
elem = chapter.elements[block.md_path][block.elem_index]
rel_line_no = 0
for line in orig_elem.content.splitlines():
if directive.raw_text in line:
break
rel_line_no += 1
# TODO (mb 2020-12-30): These line numbers appear to be wrong,
# I think for recursive dep directives in particular.
line_no = | |
<gh_stars>0
"""
Given a rectangular 2D array, write a function that prints the elements in a clockwise
spiral order, starting at the upper left corner and ending at the center of the array.
For example:
1 2 3 4
5 6 7 8 --> 1 2 3 4 8 12 11 10 9 5 6 7
9 10 11 12
"""
import numpy as np
def array_spiral(arr):
"""
Given a rectangular 2D array, yields the elements in a clockwise spiral order,
starting at the upper left corner and ending at the center of the array.
For example:
1 2 3 4
5 6 7 8 --> 1 2 3 4 8 12 11 10 9 5 6 7
9 10 11 12
"""
#Orient the array like a matrix, so the 1st index is the row
#and the 2nd index is the column.
#That is, arr[i][j] is the element in the ith row and jth column.
#Initialize variables to keep track of the current top and bottom rows
#and the current left and right columns. These will be updated as we
#spiral inward in the array.
#Note that these are the actual indices (as opposed to, e.g. bottom
#being the bottom index plus 1 as is typical with array indexing),
#so the number of rows will be bottom-top+1 and the number of columns
#will be right-left+1.
#If the first row of the array is empty, we will have top=0, bottom=#rows-1,
#left=0, right=-1.
top = 0 #index of top row
bottom =len(arr)-1 #index of bottom row
left = 0 #index of left column
right = len(arr[0])-1 #index of right column
#Our spiral pattern will be symmetric with respect to the four directions:
#For each row or column in a single spiral, start at the corner, and move
#until you hit the index just BEFORE the next corner.
#In the example above, the first iteration would be:
# top=0, bottom=2, left=0, right=3
# top: [1,2,3], right: [4,8], bottom: [12,11,10], left: [9,5]
#After the first iteration, we will have:
# top=1, bottom=1, left=1, right=2,
#and the spiraling loop will exit since top==bottom.
#This leaves the final single row [6,7] to be handled after the loop.
#Spiral until there is either one or zero rows or
#one or zero columns remaining (or both).
#
#There will be exactly one row remaining if ever top == bottom,
#and there will be zero rows remaining if ever top > bottom.
#There will be exactly one column remaining if ever left == right
#and there will be zero columns remainig if left > right.
#
#If both top == bottom and left == right occur simultaneously,
#then there is excatly one entry remaining, which could be
#considered as either one row or one column.
while top < bottom and left < right:
#Top row: (top,left) right to (top,right-1)
for j in range(left, right):
yield arr[top][j]
#Right column: (top,right) down to (bottom-1,right)
for i in range(top, bottom):
yield arr[i][right]
#Bottom row: (bottom,right) left to (bottom,left+1)
for j in range(right, left, -1):
yield arr[bottom][j]
#Left column: (bottom,left) up to (top+1,left)
for i in range(bottom, top, -1):
yield arr[i][left]
top += 1
bottom -= 1
left += 1
right -= 1
#Once we're down to one or zero rows or one or zero columns, we don't want to spiral.
#Instead just iterate through the single row or column, or do nothing
#if there's nothing left.
#
#Note that if both top==bottom and left==right, we have exactly
#one element left in the array, so we only want to iterate through
#either one row or one column, but not both. Thus we use elif.
#
#If we have top > bottom or left > right, then there are either no rows or
#no columns remaining in the array, so we we have already output everything,
#and we want to skip this code and not yield any more elements.
if top == bottom:
#Array is a single row; return the entry in each column
for entry in arr[top][left:right+1]: yield entry
elif left == right:
#Array is a single column; return the first entry of each row
for row in arr[top:bottom+1]: yield row[left]
def array_spiral_orig(arr):
"""
DOES NOT WORK!
Original version I came up with (has a bug).
Given a rectangular 2D array, yields the elements in a clockwise spiral order,
starting at the upper left corner and ending at the center of the array.
For example:
1 2 3 4
5 6 7 8 --> 1 2 3 4 8 12 11 10 9 5 6 7
9 10 11 12
"""
#Orient the array like a matrix, so the 1st index is the row
#and the 2nd index is the column.
#That is, arr[i][j] is the element in the ith row and jth column.
top = 0
bottom =len(arr)
left = 0
right = len(arr[0])
while top < bottom and left < right:
for j in range(left, right):
yield arr[top][j]
for i in range(top+1, bottom):
yield arr[i][right-1]
for j in range(right-2, left-1, -1):
yield arr[bottom-1][j]
for i in range(bottom-2, top, -1):
yield arr[i][left]
top += 1
bottom -= 1
left += 1
right -= 1
def array_spiral_orig_fixed(arr):
"""
Given a rectangular 2D array, yields the elements in a clockwise spiral order,
starting at the upper left corner and ending at the center of the array.
Fixed(?) version of the original I came up with.
Use same strategy as I did to fix the better version above:
Stop the loop one iteration early to handle single rows or columns
as a special case at the end.
For example:
1 2 3 4
5 6 7 8 --> 1 2 3 4 8 12 11 10 9 5 6 7
9 10 11 12
"""
#Orient the array like a matrix, so the 1st index is the row
#and the 2nd index is the column.
#That is, arr[i][j] is the element in the ith row and jth column.
top = 0
bottom =len(arr)
left = 0
right = len(arr[0])
while top < bottom-1 and left < right-1:
for j in range(left, right):
yield arr[top][j]
for i in range(top+1, bottom):
yield arr[i][right-1]
for j in range(right-2, left-1, -1):
yield arr[bottom-1][j]
for i in range(bottom-2, top, -1):
yield arr[i][left]
top += 1
bottom -= 1
left += 1
right -= 1
if top == bottom-1:
# for j in range(left, right):
# yield arr[top][j]
for element in arr[top][left:right]: yield element
elif left == right-1:
# for i in range(top, bottom):
# yield arr[i][right-1]
for row in arr[top:bottom]: yield row[left]
def test_spiral():
for spiral in [array_spiral, array_spiral_orig_fixed]:
a = np.arange(12).reshape(3,4) + 1
assert list(spiral(a)) == [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7]
a = np.arange(2*3).reshape(2,3)+1
assert list(spiral(a)) == [1, 2, 3, 6, 5, 4]
assert list(spiral([[]])) == []
assert list(spiral([[], [], []])) == []
assert list(spiral([[45]])) == [45]
assert list(spiral([[1,2]])) == [1,2]
assert list(spiral([[1,2,3]])) == [1,2,3]
assert list(spiral([[1],[2]])) == [1,2]
assert list(spiral([[1],[2],[3]])) == [1,2,3]
a = np.arange(5).reshape(1,5) + 1
assert list(spiral(a)) == [1,2,3,4,5]
a = np.arange(7).reshape(7,1) + 1
assert list(spiral(a)) == [1,2,3,4,5,6,7]
a = np.arange(9*6).reshape(9,6)
assert list(spiral(a)) == [0, 1, 2, 3, 4, 5, 11, 17, 23, 29, 35, 41, 47, 53, 52, 51, 50, 49, 48, 42, 36, 30, 24, 18, 12, 6, 7, 8, 9, 10, 16, 22, 28, 34, 40, 46, 45, 44, 43, 37, 31, 25, 19, 13, 14, 15, 21, 27, 33, 39, 38, 32, 26, 20]
a = [[1,2],[3,4]]
assert list(spiral(a)) == [1,2,4,3]
a = [[1,2,3], [4,5,6], [7,8,9]]
assert list(spiral(a)) == [1, 2, 3, 6, 9, 8, 7, 4, 5]
a = np.arange(4*4).reshape(4,4)+1
assert list(spiral(a)) == [1, 2, 3, 4, 8, 12, 16, 15, 14, 13, 9, 5, 6, 7, 11, 10]
a = np.arange(5*5).reshape(5,5)+1
assert list(spiral(a)) == [1, 2, 3, 4, 5, 10, 15, 20, 25, 24, 23, | |
<reponame>valentinalatorre/mocksurvey<gh_stars>1-10
"""
Classes
-------
SimBox: (and subclasses HaloBox and GalBox)
Contains information about the simulation box (e.g., the halo data), and populates galaxies given an HOD model available from `halotools`.
BoxField:
Basic class used to select a rectangular prism of galaxies (or all galaxies by default) populated by the SimBox. Data and randoms can be accessed via methods get_data and get_rands
MockField:
A more sophisticated version of BoxField, with identical data access methods, in which galaxies are selected by celestial coordinates by a given scheme (shape) on the sky. Data access methods work in the same way as BoxField.
MockSurvey:
A collection of MockFields, centered at nearby places on the sky. Data access methods work in the same way as BoxField.
"""
import gc
import warnings
# import functools
import math
from scipy import optimize
import numpy as np
# import pandas as pd
from inspect import getfullargspec
from halotools import sim_manager, empirical_models
from halotools.mock_observables import return_xyz_formatted_array
from astropy import cosmology, table as astropy_table
from .. import util
# from .. import mocksurvey as ms
from ..stats import cf
bplcosmo = cosmology.FlatLambdaCDM(name="Bolshoi-Planck",
H0=67.8,
Om0=0.307,
Ob0=0.048)
class RedshiftSelector:
def __init__(self, mockfield):
self.mean_redshift = mockfield.simbox.redshift
self.cosmo = mockfield.simbox.cosmo
self.delta_z = mockfield.delta_z
self.zlim = self.mean_redshift + np.array([-.5, .5]) * self.delta_z
self.dlim = util.comoving_disth(self.zlim, self.cosmo)
def __call__(self, redshift, input_is_distance=False):
lower, upper = self.dlim if input_is_distance else self.zlim
return (lower < redshift) & (redshift < upper)
class FieldSelector:
def __init__(self, mockfield):
self.mean_redshift = mockfield.simbox.redshift
self.sqdeg = mockfield.sqdeg
self.center = mockfield.center
self.center_rdz = mockfield.center_rdz
self.cosmo = mockfield.simbox.cosmo
self.delta_z = mockfield.delta_z
self.scheme = mockfield.scheme
self.make_selection, self.get_fieldshape = self.choose_selector()
def __call__(self, *args, **kwargs):
return self.make_selection(*args, **kwargs)
def choose_selector(self):
scheme = self.scheme
if scheme.lower().startswith("cir"):
return self.circle_selector, self.circle_fieldshape
elif scheme.lower().startswith("sq"):
return self.square_selector, self.square_fieldshape
elif scheme.lower().startswith("hex"):
return self.hexagon_selector, self.hexagon_fieldshape
elif scheme[0].isdigit():
self.n_vertices = int(scheme.split("-")[0])
return self.npoly_selector, self.npoly_fieldshape
else:
raise ValueError("scheme = `%s` is invalid." % scheme)
def npoly_sqdeg2radius(self, n, return_angle=False):
"""
Input: solid angle of entire field (in sq degrees)
Output: The radius (of circumscribed circle, in Mpc/h OR radians)
"""
omega = self.sqdeg * np.pi ** 2 / 180. ** 2
f = lambda angle: util.make_npoly(angle, n).area() - omega
angle = optimize.brentq(f, 0, np.pi / 2.)
if return_angle:
return angle
else:
return util.angle_to_dist(angle, self.mean_redshift, self.cosmo)
def circle_sqdeg2radius(self, return_angle=False):
"""
Input: solid angle of entire field (in sq degrees)
Output: The radius of a circular field (in Mpc/h OR radians)
"""
omega = self.sqdeg * np.pi ** 2 / 180. ** 2
angle = math.acos(1 - omega / 2. / np.pi)
if return_angle:
return angle
else:
return util.angle_to_dist(angle, self.mean_redshift, self.cosmo)
def square_sqdeg2apothem(self, return_angle=False):
"""
Input: solid angle of entire field (in sq degrees)
Output: The apothem of a square field (in Mpc/h OR radians)
"""
angle0 = math.sqrt(self.sqdeg)
omega = self.sqdeg * np.pi ** 2 / 180. ** 2
f = lambda angle: 2 * angle * math.sin(angle / 2.) - omega
fp = lambda angle: 2 * math.sin(angle / 2.
) + angle * math.cos(angle / 2.)
if angle0 < np.pi / 6.:
angle = optimize.newton(f, fprime=fp, x0=angle0) / 2.
else:
angle = optimize.brentq(f, 0, np.pi) / 2.
if return_angle:
return angle
else:
return util.angle_to_dist(angle, self.mean_redshift, self.cosmo)
def hexagon_sqdeg2apothem(self, return_angle=False):
"""
Input: solid angle of entire field (in sq degrees)
Output: The apothem of a hexagonal field (in Mpc/h OR radians)
"""
angle0 = self.circle_sqdeg2radius(return_angle=True)
omega = self.sqdeg * np.pi ** 2 / 180. ** 2
cnst = 1 - math.sqrt(3) / 4 * omega
f = lambda angle: angle * math.sin(angle) - math.cos(angle) + cnst
fp = lambda angle: angle * math.cos(angle) + 2 * math.sin(angle)
if angle0 < np.pi / 6.:
angle = optimize.newton(f, fprime=fp, x0=angle0)
else:
angle = optimize.brentq(f, 0, np.pi / 2.)
if return_angle:
return angle
else:
return util.angle_to_dist(angle, self.mean_redshift, self.cosmo)
def _z_length(self):
return util.redshift_lim_to_dist(self.delta_z,
self.mean_redshift, self.cosmo)
class CartesianSelector(FieldSelector):
def __init__(self, mockfield):
FieldSelector.__init__(self, mockfield)
zlim = (self.mean_redshift - self.delta_z / 2.,
self.mean_redshift + self.delta_z / 2.)
omega = self.sqdeg * (np.pi / 180.) ** 2
d1, d2 = util.comoving_disth(zlim, self.cosmo)
volume = omega / 3. * (d2 ** 3 - d1 ** 3)
depth = d2 - d1
# Calculate area such that volume is preserved in
# the conversion from Celestial -> Cartesian
self.area = volume / depth
def circle_selector(self, xyz):
"""Select galaxies in a circle centered at x,y = field.center"""
field_radius = np.sqrt(self.area / np.pi)
xy = xyz[:, :2] - self.center[np.newaxis, :2]
rad2 = np.sum(xy ** 2, axis=1)
return rad2 < field_radius ** 2
def square_selector(self, xyz):
"""Select galaxies in a square centered at x,y = field.center"""
field_apothem = np.sqrt(self.area / 4.)
xy = xyz[:, :2] - self.center[np.newaxis, :2]
b1 = xy[:, 0] < field_apothem
b2 = xy[:, 0] > -field_apothem
b3 = xy[:, 1] < field_apothem
b4 = xy[:, 1] > -field_apothem
return b1 & b2 & b3 & b4
def hexagon_selector(self, xyz):
"""Select galaxies in a hexagon centered at x,y = field.center"""
field_apothem = np.sqrt(self.area / (2 * np.sqrt(3)))
xy = xyz[:, :2] - self.center[np.newaxis, :2]
diagonal = math.sqrt(3.) * xy[:, 0]
b1 = xy[:, 1] < field_apothem
b2 = xy[:, 1] > -field_apothem
b3 = xy[:, 1] < 2 * field_apothem - diagonal
b4 = xy[:, 1] < 2 * field_apothem + diagonal
b5 = xy[:, 1] > -2 * field_apothem - diagonal
b6 = xy[:, 1] > -2 * field_apothem + diagonal
return b1 & b2 & b3 & b4 & b5 & b6
def circle_fieldshape(self, rdz=False):
if rdz: raise NotImplementedError(
"Why would you need to know the Celestial "
"field shape of a Cartesian field?...")
field_radius = self.circle_sqdeg2radius()
return np.array([2. * field_radius] * 2 + [self._z_length()],
dtype=np.float32)
def square_fieldshape(self, rdz=False):
if rdz: raise NotImplementedError(
"Why would you need to know the Celestial "
"field shape of a Cartesian field?...")
field_apothem = self.square_sqdeg2apothem()
return np.array([2. * field_apothem] * 2 + [self._z_length()],
dtype=np.float32)
def hexagon_fieldshape(self, rdz=False):
if rdz: raise NotImplementedError(
"Why would you need to know the Celestial "
"field shape of a Cartesian field?...")
field_apothem = self.hexagon_sqdeg2apothem()
return np.array([4. / math.sqrt(3.) * field_apothem,
2. * field_apothem, self._z_length()],
dtype=np.float32)
class CelestialSelector(FieldSelector):
def __init__(self, mockfield):
FieldSelector.__init__(self, mockfield)
def circle_selector(self, rdz, deg=False):
"""Select galaxies in a circle centered at ra,dec = (0,0) radans"""
field_radius = self.circle_sqdeg2radius(return_angle=True)
if deg:
field_radius *= 180./np.pi
rd = rdz[:, :2] - self.center_rdz[np.newaxis, :2]
z = np.cos(rd[:, 0]) * np.cos(rd[:, 1])
y = np.sin(rd[:, 0]) * np.cos(rd[:, 1])
x = -np.sin(rd[:, 1])
theta = np.arctan2(np.sqrt(x ** 2 + y ** 2), z)
return theta < field_radius
def square_selector(self, rdz, deg=False):
"""Select galaxies in a square centered at ra,dec = (0,0) radians"""
field_apothem = self.square_sqdeg2apothem(return_angle=True)
if deg:
field_apothem *= 180./np.pi
rd = rdz[:, :2] - self.center_rdz[np.newaxis, :2]
b1 = rd[:, 0] < field_apothem
b2 = rd[:, 0] > -field_apothem
b3 = rd[:, 1] < field_apothem
b4 = rd[:, 1] > -field_apothem
return b1 & b2 & b3 & b4
def hexagon_selector(self, rdz, deg=False):
"""Select galaxies in a hexagon centered at ra,dec = (0,0) radians"""
field_apothem = self.hexagon_sqdeg2apothem(return_angle=True)
if deg:
field_apothem *= 180./np.pi
rd = rdz[:, :2] - self.center_rdz[np.newaxis, :2]
diagonal = np.sqrt(3.) * rd[:, 0]
b1 = rd[:, 1] < field_apothem
b2 = rd[:, 1] > -field_apothem
b3 = rd[:, 1] < 2 * field_apothem - diagonal
b4 = rd[:, 1] < 2 * field_apothem + diagonal
b5 = rd[:, 1] > -2 * field_apothem - diagonal
b6 = rd[:, 1] > -2 * field_apothem + diagonal
return b1 & b2 & b3 & b4 & b5 & b6
def circle_fieldshape(self, rdz=False):
field_radius = self.circle_sqdeg2radius(return_angle=True)
if rdz:
return np.array([2. * field_radius] * 2 + [self.delta_z], dtype=np.float32)
else:
field_radius = util.angle_to_dist(field_radius, self.mean_redshift + self.delta_z / 2., self.cosmo)
return np.array([2. * field_radius] * 2 + [self._z_length()], dtype=np.float32)
def square_fieldshape(self, rdz=False):
field_apothem = self.square_sqdeg2apothem(return_angle=True)
if rdz:
return np.array([2. * field_apothem] * 2 + [self.delta_z], dtype=np.float32)
else:
field_apothem = util.angle_to_dist(field_apothem, self.mean_redshift + self.delta_z / 2., self.cosmo)
return np.array([2. * field_apothem] * 2 + [self._z_length()], dtype=np.float32)
def hexagon_fieldshape(self, rdz=False):
field_apothem = self.hexagon_sqdeg2apothem(return_angle=True)
if rdz:
return np.array([4. / math.sqrt(3.) * field_apothem, 2. | |
provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ApplicationTypeResource, self).__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = None
class ApplicationTypeResourceList(msrest.serialization.Model):
"""The list of application type names.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value:
list[~service_fabric_managed_clusters_management_client.models.ApplicationTypeResource]
:ivar next_link: URL to get the next set of application type list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationTypeResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ApplicationTypeResource"]] = None,
**kwargs
):
super(ApplicationTypeResourceList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ApplicationTypeUpdateParameters(msrest.serialization.Model):
"""Application type update request.
:param tags: A set of tags. Application type update parameters.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ApplicationTypeUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class ApplicationTypeVersionResource(ProxyResource):
"""An application type version resource for the specified application type name resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Resource location depends on the parent resource.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~service_fabric_managed_clusters_management_client.models.SystemData
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param app_package_url: The URL to the application package.
:type app_package_url: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'app_package_url': {'key': 'properties.appPackageUrl', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
app_package_url: Optional[str] = None,
**kwargs
):
super(ApplicationTypeVersionResource, self).__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = None
self.app_package_url = app_package_url
class ApplicationTypeVersionResourceList(msrest.serialization.Model):
"""The list of application type version resources for the specified application type name resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param value:
:type value:
list[~service_fabric_managed_clusters_management_client.models.ApplicationTypeVersionResource]
:ivar next_link: URL to get the next set of application type version list results if there are
any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationTypeVersionResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ApplicationTypeVersionResource"]] = None,
**kwargs
):
super(ApplicationTypeVersionResourceList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ApplicationTypeVersionsCleanupPolicy(msrest.serialization.Model):
"""The policy used to clean up unused versions.
All required parameters must be populated in order to send to Azure.
:param max_unused_versions_to_keep: Required. Number of unused versions per application type to
keep.
:type max_unused_versions_to_keep: int
"""
_validation = {
'max_unused_versions_to_keep': {'required': True, 'minimum': 0},
}
_attribute_map = {
'max_unused_versions_to_keep': {'key': 'maxUnusedVersionsToKeep', 'type': 'int'},
}
def __init__(
self,
*,
max_unused_versions_to_keep: int,
**kwargs
):
super(ApplicationTypeVersionsCleanupPolicy, self).__init__(**kwargs)
self.max_unused_versions_to_keep = max_unused_versions_to_keep
class ApplicationTypeVersionUpdateParameters(msrest.serialization.Model):
"""Application type version update request.
:param tags: A set of tags. Application type version update parameters.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ApplicationTypeVersionUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class ApplicationUpdateParameters(msrest.serialization.Model):
"""Application update request.
:param tags: A set of tags. Application update parameters.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ApplicationUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class ApplicationUpgradePolicy(msrest.serialization.Model):
"""Describes the policy for a monitored application upgrade.
:param application_health_policy: Defines a health policy used to evaluate the health of an
application or one of its children entities.
:type application_health_policy:
~service_fabric_managed_clusters_management_client.models.ApplicationHealthPolicy
:param force_restart: If true, then processes are forcefully restarted during upgrade even when
the code version has not changed (the upgrade only changes configuration or data).
:type force_restart: bool
:param rolling_upgrade_monitoring_policy: The policy used for monitoring the application
upgrade.
:type rolling_upgrade_monitoring_policy:
~service_fabric_managed_clusters_management_client.models.RollingUpgradeMonitoringPolicy
:param instance_close_delay_duration: Duration in seconds, to wait before a stateless instance
is closed, to allow the active requests to drain gracefully. This would be effective when the
instance is closing during the application/cluster upgrade, only for those instances which have
a non-zero delay duration configured in the service description. See
InstanceCloseDelayDurationSeconds property in StatelessServiceDescription for details. Note,
the default value of InstanceCloseDelayDurationInSeconds is 4294967295, which indicates that
the behavior will entirely depend on the delay configured in the stateless service description.
:type instance_close_delay_duration: long
:param upgrade_mode: The mode used to monitor health during a rolling upgrade. The values are
Monitored, and UnmonitoredAuto. Possible values include: "Monitored", "UnmonitoredAuto".
:type upgrade_mode: str or
~service_fabric_managed_clusters_management_client.models.RollingUpgradeMode
:param upgrade_replica_set_check_timeout: The maximum amount of time to block processing of an
upgrade domain and prevent loss of availability when there are unexpected issues. When this
timeout expires, processing of the upgrade domain will proceed regardless of availability loss
issues. The timeout is reset at the start of each upgrade domain. Valid values are between 0
and 42949672925 inclusive. (unsigned 32-bit integer).
:type upgrade_replica_set_check_timeout: long
:param recreate_application: Determines whether the application should be recreated on update.
If value=true, the rest of the upgrade policy parameters are not allowed.
:type recreate_application: bool
"""
_attribute_map = {
'application_health_policy': {'key': 'applicationHealthPolicy', 'type': 'ApplicationHealthPolicy'},
'force_restart': {'key': 'forceRestart', 'type': 'bool'},
'rolling_upgrade_monitoring_policy': {'key': 'rollingUpgradeMonitoringPolicy', 'type': 'RollingUpgradeMonitoringPolicy'},
'instance_close_delay_duration': {'key': 'instanceCloseDelayDuration', 'type': 'long'},
'upgrade_mode': {'key': 'upgradeMode', 'type': 'str'},
'upgrade_replica_set_check_timeout': {'key': 'upgradeReplicaSetCheckTimeout', 'type': 'long'},
'recreate_application': {'key': 'recreateApplication', 'type': 'bool'},
}
def __init__(
self,
*,
application_health_policy: Optional["ApplicationHealthPolicy"] = None,
force_restart: Optional[bool] = False,
rolling_upgrade_monitoring_policy: Optional["RollingUpgradeMonitoringPolicy"] = None,
instance_close_delay_duration: Optional[int] = None,
upgrade_mode: Optional[Union[str, "RollingUpgradeMode"]] = None,
upgrade_replica_set_check_timeout: Optional[int] = None,
recreate_application: Optional[bool] = None,
**kwargs
):
super(ApplicationUpgradePolicy, self).__init__(**kwargs)
self.application_health_policy = application_health_policy
self.force_restart = force_restart
self.rolling_upgrade_monitoring_policy = rolling_upgrade_monitoring_policy
self.instance_close_delay_duration = instance_close_delay_duration
self.upgrade_mode = upgrade_mode
self.upgrade_replica_set_check_timeout = upgrade_replica_set_check_timeout
self.recreate_application = recreate_application
class ApplicationUserAssignedIdentity(msrest.serialization.Model):
"""ApplicationUserAssignedIdentity.
All required parameters must be populated in order to send to Azure.
:param name: Required. The friendly name of user assigned identity.
:type name: str
:param principal_id: Required. The principal id of user assigned identity.
:type principal_id: str
"""
_validation = {
'name': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
principal_id: str,
**kwargs
):
super(ApplicationUserAssignedIdentity, self).__init__(**kwargs)
self.name = name
self.principal_id = principal_id
class AvailableOperationDisplay(msrest.serialization.Model):
"""Operation supported by the Service Fabric resource provider.
:param provider: The name of the provider.
:type provider: str
:param resource: The resource on which the operation is performed.
:type resource: str
:param operation: The operation that can be performed.
:type operation: str
:param description: Operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(AvailableOperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ScalingTrigger(msrest.serialization.Model):
"""Describes the trigger for performing a scaling operation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AveragePartitionLoadScalingTrigger, AverageServiceLoadScalingTrigger.
All required parameters | |
<reponame>cartoon-raccoon/dotfiles<gh_stars>0
##### cartoon-raccoon's qtile config file #####
from typing import List # noqa: F401
from libqtile import bar, layout, widget
from libqtile.config import Click, Drag, Group, ScratchPad, DropDown, Key, KeyChord, Match, Screen, Rule
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
from libqtile import hook
import os
import subprocess
import datetime
import cgi
mod = "mod4"
terminal = guess_terminal()
#####! KEYBINDS !#####
keys = [
# Switch between windows
Key([mod], "h", lazy.layout.left(), desc="Move focus to left"),
Key([mod], "l", lazy.layout.right(), desc="Move focus to right"),
Key([mod], "j", lazy.layout.down(), desc="Move focus down"),
Key([mod], "k", lazy.layout.up(), desc="Move focus up"),
Key([mod], "space", lazy.layout.next(),
desc="Move window focus to other window"),
# Move windows between left/right columns or move up/down in current stack.
# Moving out of range in Columns layout will create new column.
Key([mod, "shift"], "h", lazy.layout.shuffle_left(),
desc="Move window to the left"),
Key([mod, "shift"], "l", lazy.layout.shuffle_right(),
desc="Move window to the right"),
Key([mod, "shift"], "j", lazy.layout.shuffle_down(),
desc="Move window down"),
Key([mod, "shift"], "k", lazy.layout.shuffle_up(), desc="Move window up"),
# Grow windows. If current window is on the edge of screen and direction
# will be to screen edge - window would shrink.
Key([mod, "control"], "h", lazy.layout.decrease_ratio(),
desc="Grow window to the left"),
Key([mod, "control"], "l", lazy.layout.increase_ratio(),
desc="Grow window to the right"),
Key([mod, "control"], "j", lazy.layout.grow_down(),
desc="Grow window down"),
Key([mod, "control"], "k", lazy.layout.grow_up(), desc="Grow window up"),
Key([mod], "n", lazy.layout.normalize(), desc="Reset all window sizes"),
Key([mod], "Left", lazy.screen.prev_group()),
Key([mod], "Right", lazy.screen.next_group()),
# Toggle between split and unsplit sides of stack.
# Split = all windows displayed
# Unsplit = 1 window displayed, like Max layout, but still with
# multiple stack panes
Key([mod, "shift"], "Return", lazy.layout.toggle_split(),
desc="Toggle between split and unsplit sides of stack"),
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "q", lazy.window.kill(), desc="Kill focused window"),
# Toggle fullscreen and floating
Key([mod], "f", lazy.window.toggle_fullscreen(), desc="Toggle fullscreen."),
Key([mod], "t", lazy.window.toggle_floating(), desc="Toggle floating,"),
Key([mod], "m", lazy.group.setlayout(" max ")),
Key([mod], "w", lazy.group.setlayout("tabbed ")),
# Basic QTile commands
Key([mod, "control"], "r", lazy.restart(), desc="Restart Qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc="Shutdown Qtile"),
Key([mod], "r", lazy.spawncmd(),
desc="Spawn a command using a prompt widget"),
# dropdown commands
#Key([], 'F11', lazy.group['dropdowns'].dropdown_toggle('term')),
#Key([], 'F12', lazy.group['dropdowns'].dropdown_toggle('qshell')),
# music control keys
Key([mod], "grave", lazy.spawn("mpc toggle")),
Key([mod], "period", lazy.spawn("mpc next")),
Key([mod], "comma", lazy.spawn("mpc prev")),
Key([], "XF86AudioPlay", lazy.spawn("/home/sammy/.config/spotify-dbus.sh -t")),
Key([mod, "shift"], "period", lazy.spawn("/home/sammy/.config/spotify-dbus.sh -n")),
Key([mod, "shift"], "comma", lazy.spawn("/home/sammy/.config/spotify-dbus.sh -p")),
# volume and brightness control
Key([], 'XF86AudioRaiseVolume', lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ +5%")),
Key([], 'XF86AudioLowerVolume', lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ -5%")),
Key([], 'XF86AudioMute', lazy.spawn("pactl set-sink-mute @DEFAULT_SINK@ toggle")),
Key([], 'XF86MonBrightnessUp', lazy.spawn("brightnessctl set +10%")),
Key([], 'XF86MonBrightnessDown', lazy.spawn("brightnessctl set 10%-")),
# screenshot keys
Key([mod],"Print", lazy.spawn("/home/sammy/.config/scrot/run.sh")),
Key([mod, "shift"], "Print", lazy.spawn("/home/sammy/.config/scrot/run.sh -u")),
Key([mod, "shift"], "f", lazy.spawn("flameshot")),
# Launch mode: keyboard shortcuts to launch a bunch of programs.
KeyChord([mod],"p", [
Key([], "f", lazy.spawn("firefox")),
Key([], "s", lazy.spawn("spotify")),
Key([], "d", lazy.spawn("discord")),
Key([], "c", lazy.spawn("code")),
Key([], "r", lazy.spawn("alacritty -e ranger")),
Key([], "t", lazy.spawn("thunar")),
Key([], "m", lazy.spawn("multimc"))
], mode = "launch"),
Key([mod], "g", lazy.spawn("/home/sammy/.config/i3/i3lock"))
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
groups = [
# main
Group(' ', spawn = ["firefox"], layout = " max "),
# dev
Group(' ', spawn = ["code"], layout = " max "),
# files
Group(' ', spawn = ["thunar"]),
# social
Group(' '),
# music
Group(' ', spawn = ["spotify"]),
# misc
Group(' '),
# reading
Group(' '),
# dropdowns
# ScratchPad("dropdowns",
# DropDown("term", "alacritty", opacity = 0.9),
# DropDown("qshell", "alacritty -e qtile shell", opacity = 0.9)
# )
]
# Bind group to its index in the group list and define mappings for window management.
for i in range(1, len(groups) + 1):
group = groups[i - 1]
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], str(i), lazy.group[group.name].toscreen(),
desc="Switch to group {}".format(group.name)),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([mod, "shift"], str(i), lazy.window.togroup(group.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(group.name)),
# mod1 + ctrl + letter of group = move focused to group but do not switch
Key([mod, "control"], str(i), lazy.window.togroup(group.name),
desc="Move focused window to group {}".format(group.name)),
])
#####! LAYOUTS !#####
layouts = [
layout.Tile(
add_after_last = True,
add_on_top = False,
border_focus = "#efefef",
border_normal = "#5f676a",
margin = 4,
ratio = 0.55,
ratio_increment = 0.05,
name = " tile "
),
layout.MonadTall(
border_focus = "#efefef",
border_normal = "#5f676a",
margin = 4,
ratio = 0.55,
name = "monadt "
),
layout.TreeTab(
active_bg = "#efefef",
active_fg = "#222222",
bg_color = "#202020",
border_width = 2,
font = "FiraCode Nerd Font",
fontsize = 12,
inactive_bg = "#5f676a",
inactive_fg = "#efefef",
sections = ['Tabs'],
name = "tabbed "
),
layout.Max(
name = " max "
),
#layout.Columns(
# border_focus_stack='#efefef',
# border_focus='#efefef',
# border_normal='#5f676a',
# margin = 4,
# name = "columns"
#),
#layout.Stack(
# border_focus = "#efefef",
# border_normal = "#5f676a",
# num_stacks=2,
# margin = 4,
# name = " stack "
#),
#layout.Bsp(
# border_focus = "#efefef",
# border_normal = "#5f676a",
# margin = 4,
# name = " bsp "
#),
#layout.MonadWide(
# border_focus = "#efefef",
# border_normal = "#5f676a",
# margin = 4,
# name = "monadw "
#),
# layout.Matrix(),
# layout.RatioTile(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
#####! SCREENS AND WIDGETS !#####
widget_defaults = dict(
font='FiraCode Nerd Font',
fontsize=14,
padding=3,
foreground="#efefef",
)
extension_defaults = widget_defaults.copy()
# Used in the MPD widget to truncate titles if they get too long
def title_truncate(s):
if len(s) > 30:
return f"{s[:30]}..."
else:
return s
# Used in the MPD widget to truncate artist lists
def artist_truncate(s):
splits = s.split(",")
if len(splits) > 2:
return ",".join(splits[:2]) + ", Various"
else:
return s
# the top bar. not currently in use.
top_bar = bar.Bar(
[
widget.Mpd2(
status_format = "{play_status} {artist}: {title} ({elapsed}/{duration}) [ {repeat}{random}{single}{consume}]",
idle_format = " {idle_message} ",
idle_message = "Nothing playing",
format_fns = dict(
#all=lambda s: cgi.escape(s),
artist=artist_truncate,
title=title_truncate,
elapsed=lambda s: str(datetime.timedelta(seconds=int(float(s))))[2:],
duration=lambda s: str(datetime.timedelta(seconds=int(float(s))))[2:],
),
padding = 10,
fontsize = 13,
play_states = {'play': ' ', 'pause': ' ', 'stop' : ' '},
prepare_status = {
'consume': ' ',
'random' : '咽 ',
'repeat' : '凌 ',
'single' : '綾 ',
'updating_db': 'ﮮ ',
},
space = '- ',
update_interval = 0.5,
markup = False,
),
widget.Volume(
fmt = '墳 {}',
fontsize = 13,
),
widget.Spacer(length = bar.STRETCH),
widget.TextBox(text = '',
foreground = '#2d728f',
fontsize = 60,
padding = -9,
),
widget.DF(
fmt = '/ {}',
fontsize = 13,
partition = '/home',
format = '{uf}{m} ({r:.0f}%)',
visible_on_warn = False,
background = '#2d728f',
padding = 5,
),
widget.TextBox(text = '',
background = '#2d728f',
foreground = '#659157',
fontsize = 60,
padding = -9,
),
widget.Memory(
fmt = " {}",
format = '{MemUsed: .0f}M ({MemPercent: .1f}%)',
fontsize = 13,
background = '#659157',
padding = 5,
),
widget.TextBox(text = '',
background = '#659157',
foreground = '#932546',
fontsize = 60,
padding = -9,
),
widget.CPU(
fmt = " {}",
format = "{freq_current}GHz ({load_percent}%)",
fontsize = 13,
background = '#932546',
padding = 5,
),
widget.TextBox(text = '',
background = '#932546',
foreground = '#4a314d',
fontsize = 60,
padding = -9,
),
widget.Net(
interface = "wlp6s0",
format = " {down} {up} ",
fontsize = 13,
background = '#4a314d',
padding = 5,
),
widget.TextBox(text = '',
background = '#4a314d',
foreground = '#d79921',
fontsize = 60,
padding = -9,
),
widget.Battery(
fmt = "{}",
format = "[{char}] {percent:2.0%} {hour:d}:{min:02d} ",
charge_char = 'C',
discharge_char = 'D',
empty_char = 'E',
fontsize = 13,
background = '#d79921',
padding = 5,
),
widget.TextBox(text = '',
background = '#d79921',
foreground = '#d16014',
fontsize = 60,
padding = -9,
),
widget.ThermalSensor(
fmt = ' {}',
fontsize = 13,
background = '#d16014',
padding = 5,
)
],
30,
margin = [0, 0, 4, 0],
background = "#202020",
)
# the bottom bar.
bottom_bar = bar.Bar(
[
widget.CurrentLayout(),
widget.GroupBox(
highlight_method = 'line',
highlight_color = ['#202020', '#343434'],
this_current_screen_border = '#fabd2f',
this_screen_border = '#fabd2f',
),
widget.Spacer(length = 15),
widget.Prompt(),
widget.WindowName(),
#widget.Mpris2(
# fmt = '{title}',
# name = 'spotify',
# objname = 'org.mpris.MediaPlayer2.spotify'
| |
iters = 0
while ((distSqP > 0.000001) and (iters < self.IKMaxIters)):
JPullPInv = np.linalg.pinv(self.reachBody.linear_jacobian(offset=self.reachBodyOffset))
delQ = JPullPInv.dot(delPt)
alpha = 5.0
#reuse same jacobian
while True :#apply current jacobian repeatedly until doesn't improve
newQ = self.skel.q + alpha*delQ
delPt, distSqP, effWorldPos = self._IK_setSkelAndCompare(newQ, pos)
#print('iters : {} | alpha : {} | distP (sqDist) : {} | old dist : {}'.format(iters, alpha, distP,oldDist))
if (distSqP > oldDistSq):#got worse, try again with alpha *= .5
#return to previous state
delPt, oldDistSq, effWorldPos = self._IK_setSkelAndCompare(oldQ, pos)
if(alpha > .00001): #alpha not too small
alpha *= .5
else :#alpha too small and getting too
distSqP = oldDistSq
break
else :#got better, continue
oldDistSq = distSqP
oldQ = newQ
#input()
self.trackBodyCurPos = pos
if(self.debug_IK):
print('iter:{} delPt : {} | delQ {} | eff world pos {}'.format(iters,delPt, delQ, effWorldPos))
iters +=1
if(self.debug_IK):
print('IK Done : final world position {}\n'.format(effWorldPos))
######################################################
# cost/reward methods
######################################################
#generic objective/cost function calculation
#returns .5 * diff^T * wtMat * diff and gradient (wtMat * diff)
#assumes d(newVal) == np.ones(newVal.shape) (all 1s for individual newVal gradient, so newval is not product of actual indep variable and some other object, like jacobian)
def genericObj(self, costwt, newVal, oldVal, wts):
diff = newVal-oldVal
wtDotDiff = wts.dot(diff)
y = costwt * (.5 * np.transpose(diff).dot(wtDotDiff))
dy = costwt * wtDotDiff
return y, dy
#calculate pose matching objective - performed in generalized coordinate space
#minimize difference between some preset pose config and the current velocity fwd integrated
#requires optPoseUseIDXs to be defined (idxs of dofs to match)
def pose_objFunc(self, poseWt, qdotPrime):
#pose matching via accelerations
poseAccel = np.zeros(self.ndofs)
desPoseAccel = np.zeros(self.ndofs)
poseAccel[(self.optPoseUseIDXs)] = (qdotPrime[(self.optPoseUseIDXs)] - self.curMatchPoseDot)/self.timestep
#desire pose acceleration TODO : this is in pose space, not world space, and only of dofs we are trying to match
desPoseAccel[(self.optPoseUseIDXs)] = (self.matchPose - self.curMatchPose) * self.kPose - self.tSqrtKpose * self.curMatchPoseDot
poseAccelDiff = poseAccel-desPoseAccel
# #Only Pose here
posePart = poseWt * .5 * (poseAccelDiff.dot(poseAccelDiff))
poseGrad = poseWt * poseAccelDiff
return posePart, poseGrad
# #minimize exerted force objective - requires JtPullPInv to be calculated, to find force
# def force_objFunc(self, frcWt, tau):
# #difference between pull force as determined by torques at point of application and actual desired force
# #JtPullPInv dot tau is actual force at application point
# #derivative is JtPullPInv
# #
# jtDotTau = self.JtPullPInv.dot(tau)
# frcDiff = jtDotTau - self.useForce
## #isn't this below torques of whole body, vs Torques responsible for external force
## #really is tau difference
# #frcDiff = tau - self.Tau_JtFpull
# #print('difference between actual and desired force {}'.format(frcDiff))
#
# frcPart = frcWt * (.5 * (frcDiff.dot(frcDiff)))
# #torque gradient here is derivative w/respect to tau
# #deriv of f(g(x)) is f'(g(x))*g'(x)
# #f(x)== frcWt * .5 * x^2 :: f'(x)= frcWt * x
# #g(x) == jtDotTau - self.useForce -> JtPullPInv.dot(tau) - self.useForce :: g'(x) == JtPullPInv
# frcGrad = frcWt * frcDiff.dot(self.JtPullPInv)
# return frcPart, frcGrad
#minimize end effector distance from sphere location - for tracking
#using future velocity to fwd integrate current position
def effPos_objFunc(self, locWt, qdotPrime):
curLoc = self.curEffPos
curWrldVel = self.JpullLin.dot(qdotPrime)
#new location in world will be current world location + timestep* (worldVel == J_eff * new qdot)
newLoc = curLoc + self.timestep * curWrldVel
#trackBody needs to be set for this objective
locDiff = newLoc - self.trackBodyCurPos
locPart = locWt * (.5 * (locDiff.dot(locDiff)))
#gradient of locPrt = locDiff * d(locDiff)
#d(locDiff) = timestep * self.JpullLin
locGrad = locWt * locDiff.dot(self.timestep*self.JpullLin)
return locPart, locGrad
#match a world-space velocity with end effector velocity
def effVel_objFunc(self, pVelWt, qdotPrime):
curWrldVel = self.JpullLin.dot(qdotPrime)
pVelDiff = curWrldVel - self.trackBodyVel
pVelPart = pVelWt *(.5*(pVelDiff.dot(pVelDiff)))
#gradient of vel part == pVelDiff * d(pVelDiff)
#d(pVelDiff) == self.JpullLin
pVelGrad = pVelWt * pVelDiff.dot(self.JpullLin)
return pVelPart,pVelGrad
@abstractmethod
def doStepTestsAndSetTau_OptCntrl(self):
pass
@abstractmethod
def setSimValsPriv(self):
pass
#objective function referenced by optimization process
@abstractmethod
def objFunc(self, x, grad):
pass
#MA constraint equation
@abstractmethod
def MAcnstVec(self, result, x, grad):
pass
def MAconstVecPos(self, result, x, grad):
return self.MAcnstVec(result, x, grad)
def MAconstVecNeg(self, result, x, grad):
return -self.MAcnstVec(result, x, grad)
#set all instance-specific constraints
@abstractmethod
def setInstanceConstrnts(self):
pass
#test a proposed optimization solution to see if it satisfies constraint
def dbg_testMAconstVec(self, x, debug=False):
result = np.zeros(np.size(self.cnstTolMA))
tar = np.zeros(np.size(self.cnstTolMA))
self.MAcnstVec(result, x, np.empty(shape=(0,0)))
passed = True
#result should be all zeros
if (not np.allclose(result, tar, self.cnstTolMA)):
print('!!!!!!!!!!!!!!!!! MAcnstVec constraint violated : ')
passed = False
if(not debug):
for x in result:
print('\t{}'.format(x))
else:
print('MAcnstVec satisfied by result')
if(debug):
for x in result:
print('\t{}'.format(x))
return passed
#build force dictionary to be used to verify pull force - this is called after sim step
def dbgBuildFrcDict(self):
ma = self.skel.M.dot(self.skel.ddq )
cg = self.skel.coriolis_and_gravity_forces()
cnstrntFrc = self.skel.constraint_forces()
#torque cntrol desired to provide pulling force at contact location on reaching hand
Tau_JtFpull_new, JtPullPInv_new, Jpull_new, JpullLin_new = self.getPullTau(self.useLinJacob,debug=True)
self.frcD = {}
self.frcD['tau']=np.copy(self.tau)
self.frcD['ma']=ma
self.frcD['cg']=cg
self.frcD['cnstf']=cnstrntFrc
#jacob<NAME> inverse to pull contact point
self.frcD['JtPullPInv_new'] = JtPullPInv_new
self.frcD['jtDotCGrav'] = JtPullPInv_new.dot(cg)
self.frcD['jtDotCnstFrc'] = JtPullPInv_new.dot(cnstrntFrc)
self.frcD['jtDotMA'] = JtPullPInv_new.dot(ma)
self.frcD['jtDotTau'] = JtPullPInv_new.dot(self.frcD['tau'])
#total pull force of listed forces (doesn't include contact force calc if present)
self.frcD['totPullFrc'] = self.frcD['jtDotTau'] - self.frcD['jtDotCGrav'] - self.frcD['jtDotMA']
self.frcD['totPullFrcCnst'] = self.frcD['totPullFrc'] + self.frcD['jtDotCnstFrc']
if(self.monitorGenForce):
self._checkiMinMaxVals(self.frcD['totPullFrcCnst'], self.minMaxFrcDict)
#self._checkiMinMaxVals(self.frcD['totPullFrcCnst'], self.minMaxFrcDict, self.maxGenFrc)
self.totGenFrc.append(self.frcD['totPullFrcCnst'])
#self.frcD['totPullFrcOld'] = self.frcD['jtDotTau'] - self.frcD['jtDotCGrvOld'] #- self.frcD['jtDotMAOld']
def dbg_dispMinMaxGuesses(self):
self.dbgDispGuess_priv(self.minMaxGuessDict['min'], 'Min Guess Vals Seen ')
print('\n')
self.dbgDispGuess_priv(self.minMaxGuessDict['max'], 'Max Guess Vals Seen ')
print('\n')
def dbg_dispMinMaxForce(self):
if(self.monitorGenForce):
print('Desired Force value : \t{}'.format(self.useForce) )
print('Min Force Value Seen : \t{}'.format(self.minMaxFrcDict['min']))
print('Max Force Value Seen : \t{}'.format(self.minMaxFrcDict['max']))
# print('Min Force Value Seen : \t{}'.format(self.minGenFrc))
# print('Max Force Value Seen : \t{}'.format(self.maxGenFrc))
mean = np.mean(self.totGenFrc, axis=0)
print('Mean Force Seen : \t{}'.format(mean))
stdVal = np.std(self.totGenFrc, axis=0)
print('Std of Force Seen : \t{}\n'.format(stdVal))
else:
print('Min/Max Force generated not monitored. Set self.monitorGenForce to true in constructor' )
#display instance classes partition of guess values for debugging
@abstractmethod
def dbgDispGuess_priv(self, guess, name=' '):
pass
#test results, display calculated vs simulated force results
@abstractmethod
def testFrcRes(self):
pass
#perform post-step calculations for robot - no reward for Inv Dyn
def calcRewardAndCheckDone(self, debug):
if (not self.isFrwrdSim):
if (self.debug):
print('helperBotSkelHolder::calcRewardAndCheckDone : No Optimization since {} set to not mobile'.format(self.skel.name))
else:
self.testFrcRes()
done=False
rwd=0
#preserve this format even though rllab uses different format for result dictionary
dct = {'broke_sim': False, 'raiseVelScore': 0, 'height_rew':0,
'actionPenalty':0, 'is_done': done}
dbgDict = defaultdict(list)
return rwd, done, dct, dbgDict
#class to hold assisting robot
class robotSkelHolder(helperBotSkelHolder):
def __init__(self, env, skel, widx, stIdx, fTipOffset):
helperBotSkelHolder.__init__(self,env, skel,widx,stIdx, fTipOffset)
self.name = 'Humanoid Helper Bot'
self.numOptIters = 1000
#const bound magnitude
self.bndCnst = 200
#robot biped uses contacts in optimization process
self.numCntctDims = 12
#robot optimization attempts to match pose
self.doMatchPose = True
#self.nOptDims = self.getNumOptDims()
#called for each skeleton type, to configure multipliers for skeleton-specific action space
def _setupSkelSpecificActionSpace(self, action_scale):
action_scale[[1,2,8,9]] *= .75
#head
action_scale[[17,18]] *= .5
#2 of each bicep
action_scale[[20,22,26,28]]*= .5
# shoulders
action_scale[[19,25]]*= .75
#scale ankles actions less
action_scale[[4,5,11,12]]*= .5
#scale feet and forearms, hands much less
action_scale[[6,13, 23,24, 29,30]]*= .25
return action_scale
#build the configuration of the initial pose of the figure
def _makeInitPoseIndiv(self):
initPose = np.zeros(self.skel.ndofs)
#move to initial body position
initPose[1] = 3.14
initPose[3] = 0.98
initPose[4] = .85
#bend at waist
initPose[21] = -.4
#stretch out left arm at shoulder to align with hand
initPose[26] = .25
#stretch out left hand
initPose[27] = -1.2
#set reach hand to be left hand - name of body node
self.setReachHand('h_hand_left')
return initPose
def getNumOptDims(self):
# dim of optimization parameters/decision vars-> 2 * ndofs + 12 for full body bot : qdot, cntctFrcs, Tau
return 2*self.ndofs + 12
#return idx's of dofs that are to be matched if pose matching objective is used
def initOptPoseData_Indiv(self):
#dof idxs | |
<reponame>KBIbiopharma/pybleau<filename>pybleau/app/plotting/plot_config.py
""" Module containing plot configurator objects. They are responsible for
holding/collecting all data (arrays and styling info) to make a Chaco plot.
They display data and styling choices using TraitsUI views, and use user
selections to collect all necessary data from a source DataFrame. This is where
the translation between dataFrame and numpy arrays consumed by Chaco is done.
"""
import logging
import pandas as pd
from traits.api import Any, Bool, cached_property, Constant, Dict, \
HasStrictTraits, Instance, Int, List, on_trait_change, Property, Str
from traitsui.api import CheckListEditor, EnumEditor, HGroup, InstanceEditor, \
Item, Label, ListStrEditor, OKCancelButtons, Spring, Tabbed, VGroup, View
from pybleau.app.model.dataframe_analyzer import CATEGORICAL_COL_TYPES
from pybleau.app.plotting.bar_plot_style import BarPlotStyle
from pybleau.app.plotting.heatmap_plot_style import HeatmapPlotStyle
from pybleau.app.plotting.histogram_plot_style import HistogramPlotStyle
from pybleau.app.plotting.plot_style import BaseColorXYPlotStyle, \
BaseXYPlotStyle, SingleLinePlotStyle, SingleScatterPlotStyle
from pybleau.app.plotting.renderer_style import BarRendererStyle, \
CmapScatterRendererStyle, LineRendererStyle, ScatterRendererStyle
from pybleau.app.utils.chaco_colors import assign_renderer_colors
from pybleau.app.utils.string_definitions import BAR_PLOT_TYPE, \
CMAP_SCATTER_PLOT_TYPE, HEATMAP_PLOT_TYPE, HIST_PLOT_TYPE, \
LINE_PLOT_TYPE, SCATTER_PLOT_TYPE
X_COL_NAME_LABEL = "Column to plot along X"
Y_COL_NAME_LABEL = "Column to plot along Y"
logger = logging.getLogger(__name__)
class BasePlotConfigurator(HasStrictTraits):
""" Base class for configuring a plot or a group of plots.
"""
#: Source DataFrame to extract the data to plot from
data_source = Instance(pd.DataFrame)
#: Transformed DataFrame if data transformation are needed before plotting
transformed_data = Property(depends_on="data_source, x_col_name, "
"y_col_name, z_col_name")
#: Grouped plot style information
plot_style = Instance(BaseXYPlotStyle)
#: Title of the future plot, or plot pattern for MultiConfigurators
plot_title = Str
#: Class to use to create TraitsUI window to open controls
view_klass = Any(View)
_plot_type_item = Property
# List of attributes to export to pass to the factory
_dict_keys = List
# List of columns in the data source DF columns
_available_columns = Property(depends_on="data_source")
# DF columns in the data source that are non-categorical
_numerical_columns = Property(depends_on="data_source")
# Traits methods ----------------------------------------------------------
def traits_view(self):
""" Break the view building in 2 sets to simplify sub-classing.
"""
view = self.view_klass(
Tabbed(
VGroup(
self._plot_type_item,
Item("plot_title"),
*self._data_selection_items(),
show_border=True, label="Data Selection"
),
VGroup(
Item("plot_style", editor=InstanceEditor(), style="custom",
show_label=False),
show_border=True, label="Plot Style"
),
),
resizable=True,
buttons=OKCancelButtons,
title="Configure plot",
)
return view
def _get__plot_type_item(self):
return HGroup(
Spring(),
Item('plot_type', style="readonly"),
Spring(),
)
# Public interface --------------------------------------------------------
def to_dict(self):
""" Export self to a description dict, to be fed to a PlotFactory.
Raises
------
ValueError
If no data source is available.
KeyError
If a column is requested, but not available.
"""
if self.transformed_data is None:
msg = "A configurator must be provided the dataframe the column" \
" names are referring to."
logger.exception(msg)
raise ValueError(msg)
out = {}
for key in self._dict_keys:
if isinstance(key, str):
out[key] = getattr(self, key)
else:
name, target_name = key
out[target_name] = getattr(self, name)
out["plot_style"] = self.plot_style
return out
def df_column2array(self, col_name, df=None):
""" Collect a DF column and convert to numpy array, including index.
"""
if df is None:
df = self.transformed_data
if col_name in ["index", df.index.name]:
return df.index.values
return df[col_name].values
# Traits property getters/setters -----------------------------------------
def _get_transformed_data(self):
return self.data_source
def _get__available_columns(self):
index_name = self.data_source.index.name
if index_name is None:
index_name = "index"
return list(self.data_source.columns) + [index_name]
def _get__numerical_columns(self):
cat_types = CATEGORICAL_COL_TYPES
num_cols = self.data_source.select_dtypes(exclude=cat_types).columns
index_name = self.data_source.index.name
if index_name is None:
index_name = "index"
return list(num_cols) + [index_name]
class BaseSinglePlotConfigurator(BasePlotConfigurator):
""" Configuration for a single plot.
"""
#: Type of plot generated
plot_type = Str
#: Template that the plot is made from (if exists)
source_template = Str
#: Column name to display along the x-axis
x_col_name = Str
#: Column name to display along the y-axis
y_col_name = Str
#: Column name to display along the z-axis
z_col_name = Str
#: Title to display along the x-axis
x_axis_title = Str
#: Title to display along the y-axis
y_axis_title = Str
#: Column name(s) to display along the secondary y-axis
second_y_col_name = Str
#: Title to display along the secondary y-axis
second_y_axis_title = Str
#: Title to display along the z-axis
z_axis_title = Str
#: List of columns to display in an overlay on hover
hover_col_names = List(Str)
#: Data to be displayed on hover
hover_data = Property(Dict)
# Traits listeners --------------------------------------------------------
def _x_col_name_changed(self, new):
self.x_axis_title = col_name_to_title(new)
def _y_col_name_changed(self, new):
self.y_axis_title = col_name_to_title(new)
def _z_col_name_changed(self, new):
self.z_axis_title = col_name_to_title(new)
# Traits property getters/setters -----------------------------------------
def _get_hover_data(self):
return {}
# Traits initializers -----------------------------------------------------
def _x_axis_title_default(self):
return col_name_to_title(self.x_col_name)
def _y_axis_title_default(self):
return col_name_to_title(self.y_col_name)
def _z_axis_title_default(self):
return col_name_to_title(self.y_col_name)
class BaseSingleXYPlotConfigurator(BaseSinglePlotConfigurator):
""" GUI configurator to create a new Chaco Plot.
Note: may contain multiple renderers.
"""
#: X coordinates of the data points
x_arr = Property
#: Y coordinates of the data points
y_arr = Property
#: Z (color) coordinates of the data points to generate colored renderers
z_arr = Property
#: Force floating point or integer column to be treated as discrete values?
force_discrete_colors = Bool
#: Supports hovering to display more data?
_support_hover = Bool
#: Whether this configuration will lead to 1 or multiple renderers
_single_renderer = Property(Bool)
plot_style = Instance(BaseXYPlotStyle)
#: Whether the column selected to colorize the renderers contains floats
colorize_by_float = Property(Bool,
depends_on="transformed_data, z_col_name, "
"force_discrete_colors")
#: Flag setting whether to use only numerical columns in x/y selections
_numerical_only = Bool
@cached_property
def _get_colorize_by_float(self):
if not self.z_col_name:
return False
if self.transformed_data is None:
# Skip for now: can't compute that property
return False
df = self.transformed_data
color_by_discrete = (self.force_discrete_colors or
df[self.z_col_name].dtype in [bool, object])
return not color_by_discrete
# Traits property getters/setters -----------------------------------------
def _get_x_arr(self):
""" Collect the x array from the dataframe and the column name for x.
Returns
-------
np.array or dict
Collect either an array to display along the x axis or a dictionary
of arrays mapped to z-values if a coloring column was selected.
"""
if not self.x_col_name:
return None
if self._single_renderer:
return self.df_column2array(self.x_col_name)
else:
grpby = self.transformed_data.groupby(self.z_col_name)
all_x_arr = {}
for z_val, subdf in grpby:
all_x_arr[z_val] = self.df_column2array(self.x_col_name,
df=subdf)
return all_x_arr
def _get_y_arr(self):
""" Collect the y array from the dataframe and the column name for y.
Returns
-------
np.array or dict
Collect either an array to display along the y axis or a dictionary
of arrays mapped to z-values if a coloring column was selected.
"""
if not self.y_col_name:
return None
if self._single_renderer:
return self.df_column2array(self.y_col_name)
else:
grpby = self.transformed_data.groupby(self.z_col_name)
all_y_arr = {}
for z_val, subdf in grpby:
all_y_arr[z_val] = self.df_column2array(self.y_col_name,
df=subdf)
return all_y_arr
def _get_hover_data(self):
""" Collect additional arrays to store in the future ArrayPlotData to
display on hover.
Returns
-------
dict(str: np.array) or dict(str: dict)
Map column names to arrays or to dictionaries mapping hue values to
arrays if a coloring column was selected.
"""
if not self.hover_col_names:
return {}
hover_data = {}
if self._single_renderer:
# No coloring of the scatter points: hover_data will contain arrays
# for each of the properties to display.
for col in self.hover_col_names:
hover_data[col] = self.df_column2array(col)
else:
for col in self.hover_col_names:
hover_data[col] = {}
grpby = self.transformed_data.groupby(self.z_col_name)
for z_val, subdf in grpby:
hover_data[col][z_val] = self.df_column2array(col,
df=subdf)
return hover_data
def _get__single_renderer(self):
if not self.z_col_name:
# No coloring, so single renderer
return True
if self.transformed_data[self.z_col_name].dtype in [bool, object]:
# Coloring by a string column so multiple renderers
return False
else:
# Coloring by a numerical column so single renderer, unless
# numerical values are forced to be treated as discrete values:
return not self.force_discrete_colors
def _get_z_arr(self):
return None
# Traits private interface ------------------------------------------------
def _data_selection_columns(self):
return self._numerical_columns if self._numerical_only else \
self._available_columns
def _color_selection_columns(self):
return [""] + self._available_columns
def _data_selection_items(self):
""" Build the default list of items to select data to plot in XY plots.
"""
columns = self._data_selection_columns()
enum_data_columns = EnumEditor(values=columns)
col_list_empty_option = self._color_selection_columns()
optional_enum_data_columns = EnumEditor(values=col_list_empty_option)
items = [
HGroup(
Item("x_col_name", editor=enum_data_columns,
label=X_COL_NAME_LABEL),
Item("x_axis_title")
),
HGroup(
Item("y_col_name", editor=enum_data_columns,
label=Y_COL_NAME_LABEL),
Item("y_axis_title")
),
VGroup(
HGroup(
Item("z_col_name", editor=optional_enum_data_columns,
label="Color column"),
Item("z_axis_title", label="Legend title",
visible_when="z_col_name"),
Item("force_discrete_colors",
tooltip="Treat floats as unrelated discrete "
"values?",
visible_when="z_col_name")
),
Item("_available_columns", label="Display on hover",
editor=ListStrEditor(selected="hover_col_names",
multi_select=True),
visible_when="_support_hover"),
show_border=True, label="Optional Data Selection"
)
]
return items
# Traits initialization methods -------------------------------------------
def __dict_keys_default(self):
return ["plot_title", "x_col_name", "y_col_name", "z_col_name",
"x_axis_title", "y_axis_title", "z_axis_title", "x_arr",
"y_arr", "z_arr", "hover_data", "hover_col_names",
"second_y_col_name", "second_y_axis_title"]
class BarPlotConfigurator(BaseSingleXYPlotConfigurator):
""" Configuration object for building a bar | |
values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace dicom-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace dicom-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace iot-connector show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('ingestion_endpoint_configuration',
options_list=['--ingestion-endpoint-configuration', '-c'],
action=AddIngestionEndpointConfiguration, nargs='*', help='Source configuration.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Device Mapping')
with self.argument_context('healthcareapis workspace iot-connector update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace iot-connector delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace iot-connector wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
c.ignore('iot_fhir_destination')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace fhir-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace fhir-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace fhir-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('kind', arg_type=get_enum_type(['fhir-Stu3', 'fhir-R4']), help='The kind of the service.')
c.argument('access_policies', action=AddFhirservicesAccessPolicies, nargs='*', help='Fhir Service access '
'policies.')
c.argument('authentication_configuration', options_list=['--authentication-configuration', '-c'],
action=AddFhirservicesAuthenticationConfiguration, nargs='*',
help='Fhir Service authentication configuration.')
c.argument('cors_configuration', action=AddFhirservicesCorsConfiguration, nargs='*', help='Fhir Service Cors '
'configuration.')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
c.argument('default', arg_type=get_enum_type(['no-version', 'versioned', 'versioned-update']), help='The '
'default value for tracking history across all resources.', arg_group='Resource Version Policy '
'Configuration')
c.argument('resource_type_overrides', options_list=['--resource-type-overrides', '-r'],
action=AddResourceTypeOverrides, nargs='*', help='A list of FHIR '
'Resources and their version policy overrides. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...',
arg_group='Resource Version Policy Configuration')
c.argument('export_configuration_storage_account_name',
options_list=['--export-configuration-storage-account-name', '-s'],
type=str, help='The name of the default export storage account.',
arg_group='Export Configuration')
c.argument('login_servers', nargs='*', help='The list of the Azure container registry login servers.',
arg_group='Acr Configuration')
c.argument('oci_artifacts', action=AddFhirservicesOciArtifacts, nargs='*', help='The list of Open Container '
'Initiative (OCI) artifacts.', arg_group='Acr Configuration')
with self.argument_context('healthcareapis workspace fhir-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace fhir-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace fhir-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state | |
<filename>bots/invaders/agent.py<gh_stars>0
import os
import math
import sys
from typing import List, Tuple
# for kaggle-environments
from abn.game_ext import GameExtended
from abn.jobs import Task, Job, JobBoard
from abn.actions import Actions
from lux.game_map import Position, Cell, RESOURCE_TYPES
from lux.game_objects import City
from lux.game_constants import GAME_CONSTANTS
from lux import annotate
## DEBUG ENABLE
DEBUG_SHOW_TIME = False
DEBUG_SHOW_CITY_JOBS = False
DEBUG_SHOW_CITY_FULLED = False
DEBUG_SHOW_EXPAND_MAP = True
DEBUG_SHOW_EXPAND_LIST = False
DEBUG_SHOW_INPROGRESS = True
DEBUG_SHOW_TODO = True
DEBUG_SHOW_ENERGY_MAP = False
DEBUG_SHOW_ENEMY_CITIES = False
DEBUG_SHOW_INVASION_MAP = False
DEBUG_SHOW_EXPLORE_MAP = False
MAX_CITY_SIZE = 10
DISTANCE_BETWEEN_CITIES = 5
def find_closest_city_tile(pos, player):
closest_city_tile = None
if len(player.cities) > 0:
closest_dist = math.inf
# the cities are stored as a dictionary mapping city id to the city object, which has a citytiles field that
# contains the information of all citytiles in that city
for k, city in player.cities.items():
for city_tile in city.citytiles:
dist = city_tile.pos.distance_to(pos)
if dist < closest_dist:
closest_dist = dist
closest_city_tile = city_tile
return closest_city_tile
def can_build_worker(player) -> int:
# get nr of cytititles
nr_cts = 0
for k, c in player.cities.items():
nr_cts += len(c.citytiles)
return max(0, nr_cts - len(player.units))
def city_can_expand(city: City, jobs: JobBoard) -> bool:
# City can expand if has fuel to pass the night
has_energy = city.isFulled()
# City can expand to MAX_CITY_SIZE tiles
can_expand = len(city.citytiles) + jobs.count(Task.BUILD, city_id=city.cityid) < MAX_CITY_SIZE
return has_energy & can_expand
# Define global variables
game_state = GameExtended()
actions = Actions(game_state)
lets_build_city = False
build_pos = None
jobs = game_state.job_board
completed_cities = []
def agent(observation, configuration, DEBUG=False):
global game_state
global actions
global lets_build_city
global build_pos
global completed_cities
### Do not edit ###
game_state._update(observation)
actions.update()
path: List[Tuple] = []
### AI Code goes down here! ###
player = game_state.player
opponent = game_state.opponent
# width, height = game_state.map.width, game_state.map.height
if DEBUG_SHOW_TIME:
actions.append(annotate.sidetext(f"Time : {game_state.time}"))
actions.append(annotate.sidetext(f" {game_state.lux_time}h till night"))
if game_state.isMorning() : dbg = "Morning"
elif game_state.isEvening() : dbg = "Evening"
elif game_state.isNight() : dbg = "Night"
else: dbg = "Daytime"
actions.append(annotate.sidetext(f"it is {dbg}"))
#---------------------------------------------------------------------------------------------------------
# Cities Management
#---------------------------------------------------------------------------------------------------------
for _, city in player.cities.items():
city_size = len(city.citytiles)
#--- EXPAND THE CITY ---
if DEBUG_SHOW_EXPAND_LIST:
exp_pos = game_state.expand_map.get(city.cityid)
actions.append(annotate.sidetext(f"{city.cityid} expand in "))
for x, y, v in exp_pos:
actions.append(annotate.sidetext(f" ({x}; {y}) {v}"))
if city_can_expand(city, jobs) and city.isFulled():
exp_pos = game_state.expand_map.get(city.cityid)
if exp_pos:
x, y, v = exp_pos[0]
#if v: # expand only if there is a resource nearby
jobs.addJob(Task.BUILD, Position(x, y), city_id=city.cityid)
#else:
# jobs.addJob(Task.INVASION, None, city_id=city.cityid)
#--- SPAWN WORKERS OR RESEARCH ---
for ct in city.citytiles:
pxy = ct.pos
if DEBUG_SHOW_CITY_FULLED:
actions.append(annotate.text(pxy.x, pxy.y, f"{city.isFulled()}"))
if ct.can_act():
if can_build_worker(player) - actions.new_workers > 0:
actions.build_worker(ct)
# actions.append(ct.build_worker())
elif not player.researched_uranium():
actions.append(ct.research())
if not city.isFulled(): # and not game_state.isNight():
if jobs.count(Task.ENERGIZE, city_id=city.cityid) < (city_size + 1) // 2:
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
dbg2 = (city_size + 1) // 2
if DEBUG_SHOW_CITY_JOBS:
actions.append(annotate.sidetext(f"{city.cityid}: NRG {dbg} < {dbg2}"))
jobs.addJob(Task.ENERGIZE, ct.pos, city_id = city.cityid)
# Debug jobs.count
if DEBUG_SHOW_CITY_JOBS:
dbg = jobs.count(Task.BUILD, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} BLD"))
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} NRG"))
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
# Units Management
#---------------------------------------------------------------------------------------------------------
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"[INPROGRESS]"))
sorted_units = sorted(player.units, key=lambda u: u.cooldown, reverse=True)
for unit in sorted_units:
# if the unit is a worker (can mine resources) and can perform an action this turn
if unit.is_worker():
my_job = jobs.jobRequest(unit)
if not unit.can_act():
actions.stay(unit)
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"!{my_job}"))
continue
else:
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f">{my_job}"))
# Check if is evening time, if so, to survive, every
# job with risk of not having enough energy is dropped
# and a new HARVEST job is taken.
# if game_state.isNight():
# if (my_job.task == Task.BUILD and my_job.subtask > 0) or \
# (my_job.task == Task.EXPLORE and my_job.subtask > 0):
# actions.stay(unit)
# jobs.jobDrop(unit.id)
# continue
if my_job.task == Task.HARVEST:
# if not in a city and in a cell with energy available stay here to harvest
if game_state.getEnergy(unit.pos.x, unit.pos.y) != 0 and \
not game_state.map.get_cell_by_pos(unit.pos).citytile:
actions.stay(unit) # stay in the same position
else: # find a new resource position
if unit.pos == my_job.pos:
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no more resources to harvest
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else: # move to resource
my_job.pos = tile.pos
if unit.pos != my_job.pos:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if unit.get_cargo_space_left() == 0:
actions.stay(unit)
jobs.jobDone(unit.id)
elif my_job.task == Task.ENERGIZE:
if my_job.subtask == 0: # search for resource
if game_state.getEnergy(my_job.pos.x, my_job.pos.y) != 0:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
# If unit is in the citytile and can grab energy then job is done (unit stay there)
elif unit.energy >= 10 * unit.light_upkeep:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
elif unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile:
actions.stay(unit) # stay in the same position
jobs.jobReject(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if my_job.subtask == 1: # go to citytile
if unit.pos == my_job.pos:
actions.stay(unit) # stay in the same position
jobs.jobDone(unit.id)
else:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobReject(unit.id)
elif my_job.task == Task.BUILD:
if my_job.subtask == 0: # First need to full up unit
if unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no reacheable resource
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
if my_job.subtask == 1: # Go to Build position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
actions.build_city(unit)
my_job.subtask = 2
else:
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True)
if move.path:
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
# actions.append(unit.move(move_dir))
# Draw the path
actions.append(annotate.x(my_job.pos.x, my_job.pos.y))
for i in range(len(move.path)-1):
actions.append(annotate.line(
move.path[i][1], move.path[i][2],
move.path[i+1][1], move.path[i+1][2]))
else: # not path found
jobs.jobDone(unit.id)
elif my_job.subtask == 2:
# if city has adiacent energy then Unit Stay until new day
if game_state.getEnergy(unit.pos.x, unit.pos.y) > 0:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
jobs.jobDone(unit.id)
elif my_job.task == Task.SLEEP:
if unit.pos == my_job.pos:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
move_dir = unit.pos.direction_to(my_job.pos)
if not actions.move(unit, move_dir):
jobs.jobReject(unit.id)
elif my_job.task == Task.EXPLORE:
# this is a multistate task so my_job.subtask is the state
if my_job.subtask == 0: # find the position of resource (min 4 step from city)
# get position of city that emitted the job
if my_job.city_id in player.cities:
pos = player.cities[my_job.city_id].citytiles[0].pos
else:
pos = my_job.pos
explore_pos = game_state.getClosestExploreTarget(pos, min_distance=DISTANCE_BETWEEN_CITIES)
if explore_pos:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = explore_pos
else:
jobs.jobDone(unit.id)
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
if not game_state.map.get_cell_by_pos(unit.pos).has_resource:
#jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
else: # next subtask
my_job.pos = game_state.find_closest_freespace(unit.pos)
my_job.subtask = 2 # BUILD A NEW CITY
else:
# move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 2: # BUILD A NEW CITY
if unit.pos == my_job.pos:
# TODO: need to wait until next day
actions.build_city(unit)
my_job.subtask = 3 # WAIT UNTIL NEXT DAY
else:
#move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True, playerid=game_state.id)
if not actions.move(unit, move.direction):
action = unit.build_city()
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 3: # Now feed that city
my_job.task = Task.ENERGIZE
my_job.subtask = 0
actions.stay(unit)
elif my_job.task == Task.INVASION:
if my_job.subtask == 0:
# get an invasion target position
target_pos = game_state.getClosestInvasionTarget(unit.pos)
if not target_pos:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
my_job.data["target"] = target_pos
if unit.get_cargo_space_left() == 0: # if unit is full
my_job.pos = target_pos
my_job.subtask = 2
else:
# find a resource in the halfway to the target
res_cell = game_state.find_closest_resources(unit.pos.halfway(target_pos))
if res_cell:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = res_cell.pos
else:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() == 0:
my_job.pos = my_job.data["target"]
my_job.subtask | |
import requests
import os
import ast
import random
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to WordBox. Ask for a synonym, an antonym, rhyme, definition, and more for a word by saying something like 'synonym for happy'. Hear all commands by saying 'all commands'."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, frequency, or pronunciation!"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_all_commands():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "All Commands"
speech_output = "You can ask for a synonym, antonym, rhyme, definition, part of speech, syllables, or frequency of a word by saying something like 'synonym for happy'. You can also ask for a random synonym, antonym, definition, or rhyme by saying something like 'random synonym for happy'. If you want all of them, say something like 'all synonyms for happy.'"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask for a synonym, antonym, part of speech, rhyme, definition, syllables, or frequency of a word! Or say 'all commands' to get hear all commands."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Bye!"
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def get_synonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING SYNONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["synonyms"]) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
else:
speech_output = "A common synonym for " + word + " is " + ast.literal_eval(r.text)["synonyms"][0] + "."
response = build_speechlet_response("Synonym", speech_output, None, True)
return build_response({}, response)
def get_random_synonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM SYNONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["synonyms"]) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
else:
speech_output = "A synonym for " + word + " is " + random.choice(ast.literal_eval(r.text)["synonyms"]) + "."
response = build_speechlet_response("Synonym", speech_output, None, True)
return build_response({}, response)
def get_all_synonyms(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Synonyms", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ALL SYNONYMS OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/synonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
synonyms_list = ast.literal_eval(r.text)["synonyms"]
if len(synonyms_list) == 0:
speech_output = "Sorry, I couldn't find any synonyms for " + word + "."
elif len(synonyms_list) == 1:
speech_output = "The only synonym for " + word + " is " + synonyms_list[0] + "."
else:
speech_output = "The synonyms for " + word + " are " + ", ".join([synonym for synonym in synonyms_list[:-1]]) + ", and " + synonyms_list[-1] + "."
response = build_speechlet_response("Synonyms", speech_output, None, True)
return build_response({}, response)
def get_antonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ANTONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["antonyms"]) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
else:
speech_output = "A common antonym for " + word + " is " + ast.literal_eval(r.text)["antonyms"][0] + "."
response = build_speechlet_response("Antonym", speech_output, None, True)
return build_response({}, response)
def get_random_antonym(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonym", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM ANTONYM OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["antonyms"]) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
else:
speech_output = "An antonym for " + word + " is " + random.choice(ast.literal_eval(r.text)["antonyms"]) + "."
response = build_speechlet_response("Antonym", speech_output, None, True)
return build_response({}, response)
def get_all_antonyms(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Antonyms", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING ALL ANTONYMS OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/antonyms"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
antonyms_list = ast.literal_eval(r.text)["antonyms"]
if len(antonyms_list) == 0:
speech_output = "Sorry, I couldn't find any antonyms for " + word + "."
elif len(antonyms_list) == 1:
speech_output = "The only antonym for " + word + " is " + antonyms_list[0] + "."
else:
speech_output = "The antonyms for " + word + " are " + ", ".join([antonym for antonym in antonyms_list[:-1]]) + ", and " + antonyms_list[-1] + "."
response = build_speechlet_response("Antonyms", speech_output, None, True)
return build_response({}, response)
def get_pos(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Part of Speech", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING PART OF SPEECH OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
speech_output = word + " is a " + ast.literal_eval(r.text)["results"][0]["partOfSpeech"] + "."
response = build_speechlet_response("Part of Speech", speech_output, None, True)
return build_response({}, response)
def get_rhyme(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Rhyme", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RHYME OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/rhymes"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["rhymes"]) == 0:
speech_output = "Sorry, I couldn't find anything that rhymes with " + word + "."
else:
speech_output = "A common rhyme for " + word + " is " + ast.literal_eval(r.text)["rhymes"]["all"][0] + "."
response = build_speechlet_response("Rhyme", speech_output, None, True)
return build_response({}, response)
def get_random_rhyme(intent, session):
if "value" not in intent["slots"]["WORD"]:
response = build_speechlet_response("Rhyme", "Sorry, I couldn't recognize that word.", None, True)
return build_response({}, response)
word = intent["slots"]["WORD"]["value"]
print("---GETTING RANDOM RHYME OF " + word)
url = "https://wordsapiv1.p.mashape.com/words/" + word + "/rhymes"
headers = {
"X-Mashape-Key": os.environ["MASHAPE_KEY_PRODUCTION"],
"Accept": "application/json"
}
r = requests.get(url, headers = headers)
if len(ast.literal_eval(r.text)["rhymes"]) == 0:
speech_output = "Sorry, I couldn't find anything that rhymes with " + word + "."
else:
speech_output = "A rhyme for " + word + " is " + random.choice(ast.literal_eval(r.text)["rhymes"]["all"]) + "."
response = build_speechlet_response("Rhyme", speech_output, None, True)
return build_response({}, response)
def get_definition(intent, session):
if "value" not in | |
129
fov_pix = 257
self._fov_pix = fov_pix
self._oversample = oversample
# Setting these to one choose default values at runtime
self._npsf = None
self._ndeg = None
# Legendre polynomials are more stable
self.use_legendre = True
# Turning on quick perform fits over filter bandpasses independently
# The smaller wavelength range requires fewer monochromaic wavelengths
# and lower order polynomial fits
self._quick = None
# Set up initial OPD file info
opd_name = f'OPD_RevW_ote_for_{self.name}_predicted.fits'
opd_name = _check_fitsgz(self, opd_name)
self._opd_default = (opd_name, 0)
self.pupilopd = self._opd_default
# Name to save array of oversampled coefficients
self._save_dir = None
self._save_name = None
# Max FoVs for calculating drift and field-dependent coefficient residuals
# Any pixels beyond this size will be considered to have 0 residual difference
self._fovmax_wfedrift = 256
self._fovmax_wfefield = 128
self._fovmax_wfemask = 256
self.psf_coeff = None
self.psf_coeff_header = None
self._psf_coeff_mod = {
'wfe_drift': None, 'wfe_drift_off': None, 'wfe_drift_lxmap': None,
'si_field': None, 'si_field_v2grid': None, 'si_field_v3grid': None, 'si_field_apname': None,
'si_mask': None, 'si_mask_xgrid': None, 'si_mask_ygrid': None, 'si_mask_apname': None,
'si_mask_large': False
}
if self.image_mask is not None:
self.options['coron_shift_x'] = 0
self.options['coron_shift_y'] = 0
def _gen_save_dir(self):
"""
Generate a default save directory to store PSF coefficients.
If the directory doesn't exist, try to create it.
"""
if self._save_dir is None:
base_dir = Path(conf.WEBBPSF_EXT_PATH) / 'psf_coeffs/'
# Name to save array of oversampled coefficients
inst_name = self.name
save_dir = base_dir / f'{inst_name}/'
else:
save_dir = self._save_dir
if isinstance(save_dir, str):
save_dir = Path(save_dir)
self._save_dir = save_dir
# Create directory (and all intermediates) if it doesn't already exist
if not os.path.isdir(save_dir):
_log.info(f"Creating directory: {save_dir}")
os.makedirs(save_dir, exist_ok=True)
return save_dir
def _clear_coeffs_dir(self):
"""
Remove contents of a instrument coefficient directory.
"""
import shutil
# Should be a pathlib.Path object
save_dir = self.save_dir
if isinstance(save_dir, str):
save_dir = Path(save_dir)
if save_dir.exists() and save_dir.is_dir():
_log.warn(f"Remove contents from '{save_dir}/'?")
_log.warn("Type 'Y' to continue...")
response = input("")
if response=="Y":
# Delete directory and contents
shutil.rmtree(save_dir)
# Recreate empty directory
os.makedirs(save_dir, exist_ok=True)
_log.warn("Directory emptied.")
else:
_log.warn("Process aborted.")
else:
_log.warn(f"Directory '{save_dir}/' does not exist!")
def _gen_save_name(self, wfe_drift=0):
"""
Create save name for polynomial coefficients output file.
"""
# Prepend filter name if using quick keyword
fstr = '{}_'.format(self.filter) if self.quick else ''
# Mask and pupil names
pstr = 'CLEAR' if self.pupil_mask is None else self.pupil_mask
mstr = 'NONE' if self.image_mask is None else self.image_mask
## 9/14/2022 - PSF weighting for substrate and ND mask should not be necessary
## since these are included in bandpass throughputs, which are then
## applied to input spectrum to get flux-dependent PSFs. Therefore, the
## saved PSF coefficients are similar for all three scenario:
## 1) coron_substrate=False; 2) coron_substrate=True; 3) ND_acq=True
# Check for coron substrate if image mask is None
# if (mstr == 'NONE') and self.coron_substrate:
# mstr = 'CORONSUB'
# Only need coron substrate for PSF weighting
# if (mstr == 'NONE'):
# if self.ND_acq:
# mstr = 'NDACQ'
# elif self.coron_substrate:
# mstr = 'CORONSUB'
fmp_str = f'{fstr}{pstr}_{mstr}'
# PSF image size and sampling
fov_pix = self.fov_pix
osamp = self.oversample
if self.name=='NIRCam':
# Prepend module and channel to filter/pupil/mask
module = self.module
chan_str = 'LW' if 'long' in self.channel else 'SW'
fmp_str = f'{chan_str}{module}_{fmp_str}'
# Set bar offset if specified
# bar_offset = self.options.get('bar_offset', None)
bar_offset = self.get_bar_offset()
bar_str = '' if bar_offset is None else '_bar{:.2f}'.format(bar_offset)
else:
bar_str = ''
# Jitter settings
jitter = self.options.get('jitter')
jitter_sigma = self.options.get('jitter_sigma', 0)
if (jitter is None) or (jitter_sigma is None):
jitter_sigma = 0
jsig_mas = jitter_sigma*1000
# Source positioning
offset_r = self.options.get('source_offset_r', 0)
offset_theta = self.options.get('source_offset_theta', 0)
if offset_r is None:
offset_r = 0
if offset_theta is None:
offset_theta = 0
rth_str = f'r{offset_r:.2f}_th{offset_theta:+.1f}'
# Mask offsetting
coron_shift_x = self.options.get('coron_shift_x', 0)
coron_shift_y = self.options.get('coron_shift_y', 0)
if coron_shift_x is None:
coron_shift_x = 0
if coron_shift_y is None:
coron_shift_y = 0
moff_str1 = '' if coron_shift_x==0 else f'_mx{coron_shift_x:.3f}'
moff_str2 = '' if coron_shift_y==0 else f'_my{coron_shift_y:.3f}'
moff_str = moff_str1 + moff_str2
opd_dict = self.get_opd_info()
opd_str = opd_dict['opd_str']
if wfe_drift!=0:
opd_str = '{}-{:.0f}nm'.format(opd_str,wfe_drift)
fname = f'{fmp_str}_pix{fov_pix}_os{osamp}_jsig{jsig_mas:.0f}_{rth_str}{moff_str}{bar_str}_{opd_str}'
# Add SI WFE tag if included
if self.include_si_wfe:
fname = fname + '_siwfe'
# Add distortions tag if included
if self.include_distortions:
fname = fname + '_distort'
if self.use_legendre:
fname = fname + '_legendre'
fname = fname + '.fits'
return fname
def _get_opd_info(self, opd=None, HDUL_to_OTELM=True):
"""
Parse out OPD information for a given OPD, which can be a
file name, tuple (file,slice), HDUList, or OTE Linear Model.
Returns dictionary of some relevant information for logging purposes.
The dictionary has an OPD version as an OTE LM.
This outputs an OTE Linear Model.
In order to update instrument class:
>>> opd_dict = inst.get_opd_info()
>>> opd_new = opd_dict['pupilopd']
>>> inst.pupilopd = opd_new
>>> inst.pupil = opd_new
"""
# Pupil OPD file name
if opd is None:
opd = self.pupilopd
# If OPD is None or a string, make into tuple
if opd is None: # Default OPD
opd = self._opd_default
elif isinstance(opd, six.string_types):
opd = (opd, 0)
# Change log levels to WARNING
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
# Parse OPD info
if isinstance(opd, tuple):
if not len(opd)==2:
raise ValueError("opd passed as tuple must have length of 2.")
# Filename info
opd_name = opd[0] # OPD file name
opd_num = opd[1] # OPD slice
rev = [s for s in opd_name.split('_') if "Rev" in s]
rev = '' if len(rev)==0 else rev[0]
opd_str = '{}slice{:.0f}'.format(rev,opd_num)
opd = OPDFile_to_HDUList(opd_name, opd_num)
elif isinstance(opd, fits.HDUList):
# A custom OPD is passed.
opd_name = 'OPD from FITS HDUlist'
opd_num = 0
opd_str = 'OPDcustomFITS'
elif isinstance(opd, poppy.OpticalElement):
# OTE Linear Model
opd_name = 'OPD from OTE LM'
opd_num = 0
opd_str = 'OPDcustomLM'
else:
raise ValueError("OPD must be a string, tuple, HDUList, or OTE LM.")
# OPD should now be an HDUList or OTE LM
# Convert to OTE LM if HDUList
if HDUL_to_OTELM and isinstance(opd, fits.HDUList):
hdul = opd
header = hdul[0].header
header['ORIGINAL'] = (opd_name, "Original OPD source")
header['SLICE'] = (opd_num, "Slice index of original OPD")
#header['WFEDRIFT'] = (self.wfe_drift, "WFE drift amount [nm]")
name = 'Modified from ' + opd_name
opd = OTE_Linear_Model_WSS(name=name, opd=hdul, opd_index=opd_num, transmission=self.pupil)
setup_logging(log_prev, verbose=False)
out_dict = {'opd_name':opd_name, 'opd_num':opd_num, 'opd_str':opd_str, 'pupilopd':opd}
return out_dict
def _drift_opd(self, wfe_drift, opd=None,
wfe_therm=None, wfe_frill=None, wfe_iec=None):
"""
A quick method to drift the pupil OPD. This function applies
some WFE drift to input OPD file by breaking up the wfe_drift
parameter into thermal, frill, and IEC components. If we want
more realistic time evolution, then we should use the procedure
in dev_utils/WebbPSF_OTE_LM.ipynb to create a time series of OPD
maps, which can then be passed directly to create unique PSFs.
This outputs an OTE Linear Model. In order to update instrument class:
>>> opd_dict = inst.drift_opd()
>>> inst.pupilopd = opd_dict['opd']
>>> inst.pupil = opd_dict['opd']
Parameters
----------
wfe_drift : float
Desired WFE drift (delta RMS) in nm.
opd : Various
file name, tuple (file,slice), HDUList, or OTE Linear Model
of the OPD map.
wfe_therm : None or float
Option to specify thermal component of WFE drift (nm RMS).
`wfe_drift` is ignored.
wfe_frill : None or float
Option to specify frill component of WFE drift (nm RMS).
`wfe_drift` is ignored.
wfe_iec : None or float
Option to specify IEC component of WFE drift (nm RMS).
`wfe_drift` is ignored.
"""
# Get Pupil OPD info and convert to OTE LM
opd_dict = self.get_opd_info(opd)
opd_name = opd_dict['opd_name']
opd_num = opd_dict['opd_num']
opd_str = opd_dict['opd_str']
opd = deepcopy(opd_dict['pupilopd'])
# Apply drift components
wfe_dict = {'therm':0, 'frill':0, 'iec':0, 'opd':opd}
if (wfe_therm is not None) or (wfe_frill is not None) or (wfe_iec is not None):
wfe_therm = 0 if wfe_therm is None else wfe_therm
wfe_frill = 0 if wfe_frill is None | |
is not None:
self.ResourceTags = []
for item in params.get("ResourceTags"):
obj = ResourceTag()
obj._deserialize(item)
self.ResourceTags.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateInstancesResponse(AbstractModel):
"""CreateInstances返回参数结构体
"""
def __init__(self):
"""
:param DealId: 交易的ID
:type DealId: str
:param InstanceIds: 实例ID
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.InstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.InstanceIds = params.get("InstanceIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DelayDistribution(AbstractModel):
"""延时分布详情
"""
def __init__(self):
"""
:param Ladder: 分布阶梯,延时和Ladder值的对应关系:
[0ms,1ms]: 1;
[1ms,5ms]: 5;
[5ms,10ms]: 10;
[10ms,50ms]: 50;
[50ms,200ms]: 200;
[200ms,∞]: -1。
:type Ladder: int
:param Size: 延时处于当前分布阶梯的命令数量,个。
:type Size: int
:param Updatetime: 修改时间。
:type Updatetime: int
"""
self.Ladder = None
self.Size = None
self.Updatetime = None
def _deserialize(self, params):
self.Ladder = params.get("Ladder")
self.Size = params.get("Size")
self.Updatetime = params.get("Updatetime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteInstanceAccountRequest(AbstractModel):
"""DeleteInstanceAccount请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param AccountName: 子账号名称
:type AccountName: str
"""
self.InstanceId = None
self.AccountName = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AccountName = params.get("AccountName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteInstanceAccountResponse(AbstractModel):
"""DeleteInstanceAccount返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAutoBackupConfigRequest(AbstractModel):
"""DescribeAutoBackupConfig请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAutoBackupConfigResponse(AbstractModel):
"""DescribeAutoBackupConfig返回参数结构体
"""
def __init__(self):
"""
:param AutoBackupType: 备份类型。自动备份类型: 1 “定时回档”
:type AutoBackupType: int
:param WeekDays: Monday,Tuesday,Wednesday,Thursday,Friday,Saturday,Sunday。
:type WeekDays: list of str
:param TimePeriod: 时间段。
:type TimePeriod: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AutoBackupType = None
self.WeekDays = None
self.TimePeriod = None
self.RequestId = None
def _deserialize(self, params):
self.AutoBackupType = params.get("AutoBackupType")
self.WeekDays = params.get("WeekDays")
self.TimePeriod = params.get("TimePeriod")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeBackupUrlRequest(AbstractModel):
"""DescribeBackupUrl请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param BackupId: 备份ID,通过DescribeInstanceBackups接口可查
:type BackupId: str
"""
self.InstanceId = None
self.BackupId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.BackupId = params.get("BackupId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeBackupUrlResponse(AbstractModel):
"""DescribeBackupUrl返回参数结构体
"""
def __init__(self):
"""
:param DownloadUrl: 外网下载地址(6小时)
:type DownloadUrl: list of str
:param InnerDownloadUrl: 内网下载地址(6小时)
:type InnerDownloadUrl: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DownloadUrl = None
self.InnerDownloadUrl = None
self.RequestId = None
def _deserialize(self, params):
self.DownloadUrl = params.get("DownloadUrl")
self.InnerDownloadUrl = params.get("InnerDownloadUrl")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeCommonDBInstancesRequest(AbstractModel):
"""DescribeCommonDBInstances请求参数结构体
"""
def __init__(self):
"""
:param VpcIds: 实例Vip信息列表
:type VpcIds: list of int
:param SubnetIds: 子网id信息列表
:type SubnetIds: list of int
:param PayMode: 计费类型过滤列表;0表示包年包月,1表示按量计费
:type PayMode: int
:param InstanceIds: 实例id过滤信息列表
:type InstanceIds: list of str
:param InstanceNames: 实例名称过滤信息列表
:type InstanceNames: list of str
:param Status: 实例状态信息过滤列表
:type Status: list of str
:param OrderBy: 排序字段
:type OrderBy: str
:param OrderByType: 排序方式
:type OrderByType: str
:param Vips: 实例vip信息列表
:type Vips: list of str
:param UniqVpcIds: vpc网络统一Id列表
:type UniqVpcIds: list of str
:param UniqSubnetIds: 子网统一id列表
:type UniqSubnetIds: list of str
:param Limit: 数量限制,默认推荐100
:type Limit: int
:param Offset: 偏移量,默认0
:type Offset: int
"""
self.VpcIds = None
self.SubnetIds = None
self.PayMode = None
self.InstanceIds = None
self.InstanceNames = None
self.Status = None
self.OrderBy = None
self.OrderByType = None
self.Vips = None
self.UniqVpcIds = None
self.UniqSubnetIds = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.VpcIds = params.get("VpcIds")
self.SubnetIds = params.get("SubnetIds")
self.PayMode = params.get("PayMode")
self.InstanceIds = params.get("InstanceIds")
self.InstanceNames = params.get("InstanceNames")
self.Status = params.get("Status")
self.OrderBy = params.get("OrderBy")
self.OrderByType = params.get("OrderByType")
self.Vips = params.get("Vips")
self.UniqVpcIds = params.get("UniqVpcIds")
self.UniqSubnetIds = params.get("UniqSubnetIds")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeCommonDBInstancesResponse(AbstractModel):
"""DescribeCommonDBInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 实例数
:type TotalCount: int
:param InstanceDetails: 实例信息
:type InstanceDetails: list of RedisCommonInstanceList
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.InstanceDetails = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("InstanceDetails") is not None:
self.InstanceDetails = []
for item in params.get("InstanceDetails"):
obj = RedisCommonInstanceList()
obj._deserialize(item)
self.InstanceDetails.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBSecurityGroupsRequest(AbstractModel):
"""DescribeDBSecurityGroups请求参数结构体
"""
def __init__(self):
"""
:param Product: 数据库引擎名称,本接口取值:redis。
:type Product: str
:param InstanceId: 实例ID,格式如:cdb-c1nl9rpv或者cdbro-c1nl9rpv,与云数据库控制台页面中显示的实例ID相同。
:type InstanceId: str
"""
self.Product = None
self.InstanceId = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBSecurityGroupsResponse(AbstractModel):
"""DescribeDBSecurityGroups返回参数结构体
"""
def __init__(self):
"""
:param Groups: 安全组规则
:type Groups: list of SecurityGroup
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = SecurityGroup()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstanceAccountRequest(AbstractModel):
"""DescribeInstanceAccount请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param Limit: 分页大小
:type Limit: int
:param Offset: 分页偏移量
:type Offset: int
"""
self.InstanceId = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstanceAccountResponse(AbstractModel):
"""DescribeInstanceAccount返回参数结构体
"""
def __init__(self):
"""
:param Accounts: 账号详细信息
注意:此字段可能返回 null,表示取不到有效值。
:type Accounts: list of Account
:param TotalCount: 账号个数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Accounts = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Accounts") is not None:
self.Accounts = []
for item in params.get("Accounts"):
obj = Account()
obj._deserialize(item)
self.Accounts.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstanceBackupsRequest(AbstractModel):
"""DescribeInstanceBackups请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 待操作的实例ID,可通过 DescribeInstance 接口返回值中的 InstanceId 获取。
:type InstanceId: str
:param Limit: 实例列表大小,默认大小20
:type Limit: int
:param Offset: 偏移量,取Limit整数倍
:type Offset: int
:param BeginTime: 开始时间,格式如:2017-02-08 16:46:34。查询实例在 [beginTime, endTime] 时间段内开始备份的备份列表。
:type BeginTime: str
:param EndTime: 结束时间,格式如:2017-02-08 19:09:26。查询实例在 [beginTime, endTime] 时间段内开始备份的备份列表。
:type EndTime: str
:param Status: 1:备份在流程中,2:备份正常,3:备份转RDB文件处理中,4:已完成RDB转换,-1:备份已过期,-2:备份已删除。
:type Status: list of int
"""
self.InstanceId = None
self.Limit = None
self.Offset = None
self.BeginTime = None
self.EndTime = None
self.Status = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.BeginTime = params.get("BeginTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value | |
None
class GBPLiborON(BaseModel):
resource_name: Optional[Literal["GBPLiborON"]] = "GBPLiborON"
h: Optional[YieldTermStructureHandle] = None
class USDLiborON(BaseModel):
resource_name: Optional[Literal["USDLiborON"]] = "USDLiborON"
h: Optional[YieldTermStructureHandle] = None
class Aonia(BaseModel):
resource_name: Optional[Literal["Aonia"]] = "Aonia"
h: Optional[YieldTermStructureHandle] = None
class Eonia(BaseModel):
resource_name: Optional[Literal["Eonia"]] = "Eonia"
h: Optional[YieldTermStructureHandle] = None
class Sonia(BaseModel):
resource_name: Optional[Literal["Sonia"]] = "Sonia"
h: Optional[YieldTermStructureHandle] = None
class FedFunds(BaseModel):
resource_name: Optional[Literal["FedFunds"]] = "FedFunds"
h: Optional[YieldTermStructureHandle] = None
class Nzocr(BaseModel):
resource_name: Optional[Literal["Nzocr"]] = "Nzocr"
h: Optional[YieldTermStructureHandle] = None
class Sofr(BaseModel):
resource_name: Optional[Literal["Sofr"]] = "Sofr"
h: Optional[YieldTermStructureHandle] = None
class BiborSW(BaseModel):
resource_name: Optional[Literal["BiborSW"]] = "BiborSW"
h: Optional[YieldTermStructureHandle] = None
class Bibor1M(BaseModel):
resource_name: Optional[Literal["Bibor1M"]] = "Bibor1M"
h: Optional[YieldTermStructureHandle] = None
class Bibor2M(BaseModel):
resource_name: Optional[Literal["Bibor2M"]] = "Bibor2M"
h: Optional[YieldTermStructureHandle] = None
class Bibor3M(BaseModel):
resource_name: Optional[Literal["Bibor3M"]] = "Bibor3M"
h: Optional[YieldTermStructureHandle] = None
class Bibor6M(BaseModel):
resource_name: Optional[Literal["Bibor6M"]] = "Bibor6M"
h: Optional[YieldTermStructureHandle] = None
class Bibor9M(BaseModel):
resource_name: Optional[Literal["Bibor9M"]] = "Bibor9M"
h: Optional[YieldTermStructureHandle] = None
class Bibor1Y(BaseModel):
resource_name: Optional[Literal["Bibor1Y"]] = "Bibor1Y"
h: Optional[YieldTermStructureHandle] = None
class Brent(BaseModel):
resource_name: Optional[Literal["Brent"]] = "Brent"
class Bisection(BaseModel):
resource_name: Optional[Literal["Bisection"]] = "Bisection"
class FalsePosition(BaseModel):
resource_name: Optional[Literal["FalsePosition"]] = "FalsePosition"
class Ridder(BaseModel):
resource_name: Optional[Literal["Ridder"]] = "Ridder"
class Secant(BaseModel):
resource_name: Optional[Literal["Secant"]] = "Secant"
class Newton(BaseModel):
resource_name: Optional[Literal["Newton"]] = "Newton"
class NewtonSafe(BaseModel):
resource_name: Optional[Literal["NewtonSafe"]] = "NewtonSafe"
class BoundaryConstraint(BaseModel):
resource_name: Optional[Literal["BoundaryConstraint"]] = "BoundaryConstraint"
lower: float
upper: float
class NoConstraint(BaseModel):
resource_name: Optional[Literal["NoConstraint"]] = "NoConstraint"
class PositiveConstraint(BaseModel):
resource_name: Optional[Literal["PositiveConstraint"]] = "PositiveConstraint"
class CompositeConstraint(BaseModel):
resource_name: Optional[Literal["CompositeConstraint"]] = "CompositeConstraint"
c1: Constraint
c2: Constraint
class NonhomogeneousBoundaryConstraint(BaseModel):
resource_name: Optional[
Literal["NonhomogeneousBoundaryConstraint"]
] = "NonhomogeneousBoundaryConstraint"
l: Array
u: Array
class EndCriteria(BaseModel):
resource_name: Optional[Literal["EndCriteria"]] = "EndCriteria"
maxIteration: int
maxStationaryStateIterations: int
rootEpsilon: float
functionEpsilon: float
gradientNormEpsilon: float
class ConjugateGradient(BaseModel):
resource_name: Optional[Literal["ConjugateGradient"]] = "ConjugateGradient"
class Simplex(BaseModel):
resource_name: Optional[Literal["Simplex"]] = "Simplex"
lambda_: float = Field(..., alias="lambda")
class SteepestDescent(BaseModel):
resource_name: Optional[Literal["SteepestDescent"]] = "SteepestDescent"
class BFGS(BaseModel):
resource_name: Optional[Literal["BFGS"]] = "BFGS"
class LevenbergMarquardt(BaseModel):
resource_name: Optional[Literal["LevenbergMarquardt"]] = "LevenbergMarquardt"
epsfcn: Optional[float] = None
xtol: Optional[float] = None
gtol: Optional[float] = None
useCostFunctionsJacobian: Optional[bool] = None
class DifferentialEvolution(BaseModel):
resource_name: Optional[Literal["DifferentialEvolution"]] = "DifferentialEvolution"
class SamplerGaussian(BaseModel):
resource_name: Optional[Literal["SamplerGaussian"]] = "SamplerGaussian"
seed: Optional[int] = None
class SamplerLogNormal(BaseModel):
resource_name: Optional[Literal["SamplerLogNormal"]] = "SamplerLogNormal"
seed: Optional[int] = None
class SamplerMirrorGaussian(BaseModel):
resource_name: Optional[Literal["SamplerMirrorGaussian"]] = "SamplerMirrorGaussian"
lower: Array
upper: Array
seed: Optional[int] = None
class ProbabilityBoltzmannDownhill(BaseModel):
resource_name: Optional[
Literal["ProbabilityBoltzmannDownhill"]
] = "ProbabilityBoltzmannDownhill"
seed: Optional[int] = None
class TemperatureExponential(BaseModel):
resource_name: Optional[
Literal["TemperatureExponential"]
] = "TemperatureExponential"
initialTemp: float
dimension: int
power: Optional[float] = None
class ReannealingTrivial(BaseModel):
resource_name: Optional[Literal["ReannealingTrivial"]] = "ReannealingTrivial"
class Optimizer(BaseModel):
resource_name: Optional[Literal["Optimizer"]] = "Optimizer"
class BlackVolTermStructureHandle(BaseModel):
resource_name: Optional[
Literal["BlackVolTermStructureHandle"]
] = "BlackVolTermStructureHandle"
value: Optional[BlackVolTermStructure] = None
class RelinkableBlackVolTermStructureHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableBlackVolTermStructureHandle"]
] = "RelinkableBlackVolTermStructureHandle"
value: Optional[BlackVolTermStructure] = None
class LocalVolTermStructureHandle(BaseModel):
resource_name: Optional[
Literal["LocalVolTermStructureHandle"]
] = "LocalVolTermStructureHandle"
value: Optional[LocalVolTermStructure] = None
class RelinkableLocalVolTermStructureHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableLocalVolTermStructureHandle"]
] = "RelinkableLocalVolTermStructureHandle"
value: Optional[LocalVolTermStructure] = None
class OptionletVolatilityStructureHandle(BaseModel):
resource_name: Optional[
Literal["OptionletVolatilityStructureHandle"]
] = "OptionletVolatilityStructureHandle"
value: Optional[OptionletVolatilityStructure] = None
class RelinkableOptionletVolatilityStructureHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableOptionletVolatilityStructureHandle"]
] = "RelinkableOptionletVolatilityStructureHandle"
value: Optional[OptionletVolatilityStructure] = None
class SwaptionVolatilityStructureHandle(BaseModel):
resource_name: Optional[
Literal["SwaptionVolatilityStructureHandle"]
] = "SwaptionVolatilityStructureHandle"
value: Optional[SwaptionVolatilityStructure] = None
class RelinkableSwaptionVolatilityStructureHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableSwaptionVolatilityStructureHandle"]
] = "RelinkableSwaptionVolatilityStructureHandle"
value: Optional[SwaptionVolatilityStructure] = None
class BlackConstantVol0(BaseModel):
resource_name: Optional[Literal["BlackConstantVol"]] = "BlackConstantVol"
settlementDays: float
calendar: Calendar
volatility: Union[float, QuoteHandle]
dayCounter: DayCounter
class BlackConstantVol1(BaseModel):
resource_name: Optional[Literal["BlackConstantVol"]] = "BlackConstantVol"
referenceDate: Date
c: Calendar
volatility: Union[float, QuoteHandle]
dayCounter: DayCounter
class BlackVarianceCurve(BaseModel):
resource_name: Optional[Literal["BlackVarianceCurve"]] = "BlackVarianceCurve"
referenceDate: Date
dates: List[Date]
volatilities: List[float]
dayCounter: DayCounter
forceMonotoneVariance: Optional[bool] = None
class LocalConstantVol0(BaseModel):
resource_name: Optional[Literal["LocalConstantVol"]] = "LocalConstantVol"
settlementDays: int
calendar: Calendar
volatility: Union[float, QuoteHandle]
dayCounter: DayCounter
class LocalConstantVol1(BaseModel):
resource_name: Optional[Literal["LocalConstantVol"]] = "LocalConstantVol"
referenceDate: Date
volatility: Union[float, QuoteHandle]
dayCounter: DayCounter
class LocalVolSurface(BaseModel):
resource_name: Optional[Literal["LocalVolSurface"]] = "LocalVolSurface"
blackTS: BlackVolTermStructureHandle
riskFreeTS: YieldTermStructureHandle
dividendTS: YieldTermStructureHandle
underlying: Union[QuoteHandle, float]
class SabrSmileSection0(BaseModel):
resource_name: Optional[Literal["SabrSmileSection"]] = "SabrSmileSection"
timeToExpiry: float
forward: float
sabrParameters: List[float]
shift: Optional[float] = None
class SabrSmileSection1(BaseModel):
resource_name: Optional[Literal["SabrSmileSection"]] = "SabrSmileSection"
d: Date
forward: float
sabrParameters: List[float]
dc: Optional[DayCounter] = None
shift: Optional[float] = None
class KahaleSmileSection(BaseModel):
resource_name: Optional[Literal["KahaleSmileSection"]] = "KahaleSmileSection"
source: SmileSection
atm: Optional[float] = None
interpolate: Optional[bool] = None
exponentialExtrapolation: Optional[bool] = None
deleteArbitragePoints: Optional[bool] = None
moneynessGrid: Optional[List[float]] = None
gap: Optional[float] = None
forcedLeftIndex: Optional[int] = None
forcedRightIndex: Optional[int] = None
class ZabrShortMaturityLognormal(BaseModel):
resource_name: Optional[
Literal["ZabrShortMaturityLognormal"]
] = "ZabrShortMaturityLognormal"
class ZabrShortMaturityNormal(BaseModel):
resource_name: Optional[
Literal["ZabrShortMaturityNormal"]
] = "ZabrShortMaturityNormal"
class ZabrLocalVolatility(BaseModel):
resource_name: Optional[Literal["ZabrLocalVolatility"]] = "ZabrLocalVolatility"
class ZabrFullFd(BaseModel):
resource_name: Optional[Literal["ZabrFullFd"]] = "ZabrFullFd"
class NoArbSabrSmileSection0(BaseModel):
resource_name: Optional[Literal["NoArbSabrSmileSection"]] = "NoArbSabrSmileSection"
d: Date
forward: float
sabrParameters: List[float]
dc: Optional[DayCounter] = None
shift: Optional[float] = None
class NoArbSabrSmileSection1(BaseModel):
resource_name: Optional[Literal["NoArbSabrSmileSection"]] = "NoArbSabrSmileSection"
timeToExpiry: float
forward: float
sabrParameters: List[float]
shift: Optional[float] = None
class NoArbSabrInterpolatedSmileSection0(BaseModel):
resource_name: Optional[
Literal["NoArbSabrInterpolatedSmileSection"]
] = "NoArbSabrInterpolatedSmileSection"
optionDate: Date
forward: float
strikes: List[float]
hasFloatingStrikes: bool
atmVolatility: float
vols: List[float]
alpha: float
beta: float
nu: float
rho: float
isAlphaFixed: Optional[bool] = None
isBetaFixed: Optional[bool] = None
isNuFixed: Optional[bool] = None
isRhoFixed: Optional[bool] = None
vegaWeighted: Optional[bool] = None
endCriteria: Optional[EndCriteria] = None
method: Optional[OptimizationMethod] = None
dc: Optional[DayCounter] = None
class NoArbSabrInterpolatedSmileSection1(BaseModel):
resource_name: Optional[
Literal["NoArbSabrInterpolatedSmileSection"]
] = "NoArbSabrInterpolatedSmileSection"
optionDate: Date
forward: QuoteHandle
strikes: List[float]
hasFloatingStrikes: bool
atmVolatility: QuoteHandle
volHandles: List[QuoteHandle]
alpha: float
beta: float
nu: float
rho: float
isAlphaFixed: Optional[bool] = None
isBetaFixed: Optional[bool] = None
isNuFixed: Optional[bool] = None
isRhoFixed: Optional[bool] = None
vegaWeighted: Optional[bool] = None
endCriteria: Optional[EndCriteria] = None
method: Optional[OptimizationMethod] = None
dc: Optional[DayCounter] = None
class GeneralizedBlackScholesProcess0(BaseModel):
resource_name: Optional[
Literal["GeneralizedBlackScholesProcess"]
] = "GeneralizedBlackScholesProcess"
s0: QuoteHandle
dividendTS: YieldTermStructureHandle
riskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
class GeneralizedBlackScholesProcess1(BaseModel):
resource_name: Optional[
Literal["GeneralizedBlackScholesProcess"]
] = "GeneralizedBlackScholesProcess"
x0: QuoteHandle
dividendTS: YieldTermStructureHandle
riskFreeTS: YieldTermStructureHandle
blackVolTS: BlackVolTermStructureHandle
localVolTS: LocalVolTermStructureHandle
class BlackScholesProcess(BaseModel):
resource_name: Optional[Literal["BlackScholesProcess"]] = "BlackScholesProcess"
s0: QuoteHandle
riskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
class BlackScholesMertonProcess(BaseModel):
resource_name: Optional[
Literal["BlackScholesMertonProcess"]
] = "BlackScholesMertonProcess"
s0: QuoteHandle
dividendTS: YieldTermStructureHandle
riskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
class BlackProcess(BaseModel):
resource_name: Optional[Literal["BlackProcess"]] = "BlackProcess"
s0: QuoteHandle
riskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
class GarmanKohlagenProcess(BaseModel):
resource_name: Optional[Literal["GarmanKohlagenProcess"]] = "GarmanKohlagenProcess"
s0: QuoteHandle
foreignRiskFreeTS: YieldTermStructureHandle
domesticRiskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
class Merton76Process(BaseModel):
resource_name: Optional[Literal["Merton76Process"]] = "Merton76Process"
stateVariable: QuoteHandle
dividendTS: YieldTermStructureHandle
riskFreeTS: YieldTermStructureHandle
volTS: BlackVolTermStructureHandle
jumpIntensity: QuoteHandle
meanLogJump: QuoteHandle
jumpVolatility: QuoteHandle
class GeometricBrownianMotionProcess(BaseModel):
resource_name: Optional[
Literal["GeometricBrownianMotionProcess"]
] = "GeometricBrownianMotionProcess"
initialValue: float
mu: float
sigma: float
class VarianceGammaProcess(BaseModel):
resource_name: Optional[Literal["VarianceGammaProcess"]] = "VarianceGammaProcess"
s0: QuoteHandle
dividendYield: YieldTermStructureHandle
riskFreeRate: YieldTermStructureHandle
sigma: float
nu: float
theta: float
class HestonProcessBase(BaseModel):
resource_name: Optional[Literal["HestonProcess"]] = "HestonProcess"
riskFreeTS: YieldTermStructureHandle
dividendTS: YieldTermStructureHandle
s0: QuoteHandle
v0: float
kappa: float
theta: float
sigma: float
rho: float
class BatesProcess(BaseModel):
resource_name: Optional[Literal["BatesProcess"]] = "BatesProcess"
riskFreeRate: YieldTermStructureHandle
dividendYield: YieldTermStructureHandle
s0: QuoteHandle
v0: float
kappa: float
theta: float
sigma: float
rho: float
lambda_: float = Field(..., alias="lambda")
nu: float
delta: float
class HullWhiteProcess(BaseModel):
resource_name: Optional[Literal["HullWhiteProcess"]] = "HullWhiteProcess"
riskFreeTS: YieldTermStructureHandle
a: float
sigma: float
class HullWhiteForwardProcess(BaseModel):
resource_name: Optional[
Literal["HullWhiteForwardProcess"]
] = "HullWhiteForwardProcess"
riskFreeTS: YieldTermStructureHandle
a: float
sigma: float
class G2Process(BaseModel):
resource_name: Optional[Literal["G2Process"]] = "G2Process"
a: float
sigma: float
b: float
eta: float
rho: float
class G2ForwardProcess(BaseModel):
resource_name: Optional[Literal["G2ForwardProcess"]] = "G2ForwardProcess"
a: float
sigma: float
b: float
eta: float
rho: float
class GsrProcess(BaseModel):
resource_name: Optional[Literal["GsrProcess"]] = "GsrProcess"
times: Array
vols: Array
reversions: Array
T: Optional[float] = None
class OrnsteinUhlenbeckProcess(BaseModel):
resource_name: Optional[
Literal["OrnsteinUhlenbeckProcess"]
] = "OrnsteinUhlenbeckProcess"
speed: float
vol: float
x0: Optional[float] = None
level: Optional[float] = None
class Stock(BaseModel):
resource_name: Optional[Literal["Stock"]] = "Stock"
quote: QuoteHandle
class CompositeInstrument(BaseModel):
resource_name: Optional[Literal["CompositeInstrument"]] = "CompositeInstrument"
class MakeSchedule(BaseModel):
resource_name: Optional[Literal["MakeSchedule"]] = "MakeSchedule"
class SimpleCashFlow(BaseModel):
resource_name: Optional[Literal["SimpleCashFlow"]] = "SimpleCashFlow"
amount: float
date: Date
class Redemption(BaseModel):
resource_name: Optional[Literal["Redemption"]] = "Redemption"
amount: float
date: Date
class AmortizingPayment(BaseModel):
resource_name: Optional[Literal["AmortizingPayment"]] = "AmortizingPayment"
amount: float
date: Date
class FixedRateCoupon(BaseModel):
resource_name: Optional[Literal["FixedRateCoupon"]] = "FixedRateCoupon"
paymentDate: Date
nominal: float
rate: float
dayCounter: DayCounter
startDate: Date
endDate: Date
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
exCouponDate: Optional[Date] = None
class OvernightIndexedCoupon(BaseModel):
resource_name: Optional[
Literal["OvernightIndexedCoupon"]
] = "OvernightIndexedCoupon"
paymentDate: Date
nominal: float
startDate: Date
endDate: Date
overnightIndex: OvernightIndex
gearing: Optional[float] = None
spread: Optional[float] = None
refPeriodStart: Optional[Date] = None
refPeriodEnd: Optional[Date] = None
dayCounter: Optional[DayCounter] = None
telescopicValueDates: Optional[bool] = None
class CappedFlooredCouponBase(BaseModel):
resource_name: Optional[Literal["CappedFlooredCoupon"]] = "CappedFlooredCoupon"
underlying: FloatingRateCoupon
cap: Optional[float] = None
floor: Optional[float] = None
class BlackIborCouponPricer(BaseModel):
resource_name: Optional[Literal["BlackIborCouponPricer"]] = "BlackIborCouponPricer"
v: Optional[OptionletVolatilityStructureHandle] = None
class GFunctionFactory(BaseModel):
resource_name: Optional[Literal["GFunctionFactory"]] = "GFunctionFactory"
class LinearTsrPricerSettings(BaseModel):
resource_name: Optional[
Literal["LinearTsrPricerSettings"]
] = "LinearTsrPricerSettings"
class Duration(BaseModel):
resource_name: Optional[Literal["Duration"]] = "Duration"
class CashFlows(BaseModel):
resource_name: Optional[Literal["CashFlows"]] = "CashFlows"
value: Optional[CashFlows] = None
class TermStructureConsistentModel(BaseModel):
resource_name: Optional[
Literal["TermStructureConsistentModel"]
] = "TermStructureConsistentModel"
class CalibratedModelHandle(BaseModel):
resource_name: Optional[Literal["CalibratedModelHandle"]] = "CalibratedModelHandle"
value: Optional[CalibratedModel] = None
class RelinkableCalibratedModelHandle(BaseModel):
resource_name: Optional[
Literal["RelinkableCalibratedModelHandle"]
] = "RelinkableCalibratedModelHandle"
value: Optional[CalibratedModel] = None
class TimeGrid(BaseModel):
resource_name: Optional[Literal["TimeGrid"]] = "TimeGrid"
end: Optional[float] = None
steps: Optional[int] = None
class ConstantParameter0(BaseModel):
resource_name: Optional[Literal["ConstantParameter"]] = "ConstantParameter"
constraint: Constraint
class ConstantParameter1(BaseModel):
resource_name: Optional[Literal["ConstantParameter"]] = "ConstantParameter"
value: float
constraint: Constraint
class NullParameter(BaseModel):
resource_name: Optional[Literal["NullParameter"]] = "NullParameter"
class PiecewiseConstantParameter(BaseModel):
resource_name: Optional[
Literal["PiecewiseConstantParameter"]
] = "PiecewiseConstantParameter"
times: List[float]
constraint: Optional[Constraint] = None
class Barrier(BaseModel):
resource_name: Optional[Literal["Barrier"]] = "Barrier"
class DoubleBarrier(BaseModel):
resource_name: | |
<filename>picarServer/picar.py<gh_stars>0
#!/usr/bin/python3
# File name : picar.py
# Description : encapsulate an Adeept Robotic PiCar
import time
import threading
from iotServerLib import piStripLed, piIotNode, piLineTracking
from iotServerLib.iotCommon import ON, OFF, RGB
from piServices.piUtils import timePrint, startThread
import picarDrive
import picarHead
import picarConst
class PiCar(piIotNode.PiIotNode):
""" encapsulate an Adeept Robotic PiCar """
def __init__(self, name, parent, config):
""" construct a PiCar """
super(PiCar, self).__init__(name, parent)
self.config = config
self.drive = picarDrive.PiCarDrive('drive', self, self.config)
self.head = picarHead.PiCarHead('head', self, self.config)
self.strip = piStripLed.PiStripLed('StripLed', self, ledCount=self.config.getOrAddInt('stripLed.ledCount', 12), ledPin=self.config.getOrAddInt('stripLed.ledPin', 12))
self.lineTracking = piLineTracking.PiLineTracking('lineTracking', self, self.config.getOrAddInt('lineTracking.leftPin', 38),
self.config.getOrAddInt('lineTracking.middlePin', 36), self.config.getOrAddInt('lineTracking.rightPin', 35))
self.httpVideoPort = self.config.getOrAddInt('picar.httpVideoPort', 8000)
# initialize
self._resetModes()
self.distance = 0
def startUp(self):
""" start up all PiCar functions """
# modes initialization
self._resetModes()
# start threads for picar
self.ws2812Thread=startThread('ws2812', target=self._ws2812Worker) # thread for stripled lights (ws_2812 leds)
self.scanThread=startThread('DistanceScan', target=self._distanceScanWorker) # thread for distance scan (ultrasonic)
self.httpvideoThread=startThread('HttpVideoStream', target=self._httpVideoWorker) # thread for http video streaming (flask)
self.modeThread=startThread('ModeManager', target=self._modeWorker) # thread for managing PiCar operation modes
# stop motor, move servos to center,
self.stop()
# turn on green lights
self.drive.setLedsRGB(picarConst.GREEN)
def shutDown(self):
""" stop all components """
# todo: stop all threads
self._resetModes()
self.stop()
def stop(self):
""" stop PiCar """
self.drive.stop()
self.head.stop()
self.stopStrip()
def setOperationMode(self, mode):
""" set PiCar's operation mode """
if mode >= picarConst.ManualMode and mode <= picarConst.FaceTrackingMode:
self.mode = mode
return True
else:
timePrint('Invalid operation mode: %i' %mode)
return False
def setStrip(self, valueStr):
""" set value for strip valid values (in string)
- off - stop lightshow (StripModeManual)
- cycle - rainbowCycle lightshow (StripModeRainbowCycle)
- chase - theaterChaseRainbow lightshow (StripModeChaseRainbow)
- auto - auto mode (StripModeAuto)
- color value in the format of "rrr, ggg, bbb" separated by comma
"""
if 'off' in valueStr:
self.stripMode = picarConst.StripModeManual
elif 'cycle' in valueStr:
self.stripMode = picarConst.StripModeRainbowCycle
elif 'chase' in valueStr:
self.stripMode = picarConst.StripModeChaseRainbow
elif 'auto' in valueStr:
self.stripMode = picarConst.StripModeAuto
else:
try:
self.stripMode = picarConst.StripModeManual
self.strip.setAllPixelsRGBStr(valueStr, delay_ms=0)
except:
print('Invalid RGB value: ' + valueStr)
def stopStrip(self):
""" stop strip and turn off all LEDs """
if self.stripMode == picarConst.StripModeManual:
# todo: handling strip to complete its cycle
self.stripMode = picarConst.StripModeManual
self.strip.setAllPixels(0, 0, 0, delay_ms=0)
def _resetModes(self):
""" initialize PiCar's modes """
self.mode = picarConst.ManualMode
self.stripMode = picarConst.StripModeManual
self.distanceScan = True
def _stopAuto(self):
""" internal method to stop auto modes """
self.mode = picarConst.ManualMode
self.stop()
def _ws2812Worker(self, interval=0.1):
""" internal thread for handling strip led operations """
oldMode = self.stripMode
while True:
if self.stripMode == picarConst.StripModeManual:
if oldMode != picarConst.StripModeManual:
self.strip.setAllPixels(0, 0, 0, delay_ms=0)
elif self.stripMode == picarConst.StripModeRainbowCycle:
self.strip.rainbowCycle(delay_ms=50, iterations=1)
elif self.stripMode == picarConst.StripModeChaseRainbow:
self.strip.theaterChaseRainbow(delay_ms=50)
elif self.stripMode == picarConst.StripModeAuto:
if self.drive.motor.speed > 10: # moving forward
self.strip.rainbowCycle(delay_ms=50, iterations=1)
elif self.drive.motor.speed < -10: # moving backward
self.strip.setAllPixels(255, 0, 0, delay_ms=0)
else:
if self.drive.steering.angle < -10: # turn left
self.strip.setPixelColor(0, Color(255,255,0))
self.strip.setPixelColor(1, Color(255,255,0))
self.strip.setPixelColor(2, Color(255,255,0))
elif self.drive.steering.angle > 10: # turn right
self.strip.setPixelColor(3, Color(255,255,0))
self.strip.setPixelColor(4, Color(255,255,0))
self.strip.setPixelColor(5, Color(255,255,0))
else:
self.strip.setAllPixels(0, 0, 0, delay_ms=0)
oldMode = self.stripMode
time.sleep(interval)
def _httpVideoWorker(self): # video via http
""" internal thread for video streaming """
self.head.camera.httpVideoStreaming(self.httpVideoPort)
def _distanceScanWorker(self):
""" internal thread for measuring distance at specified interval """
interval = self.config.getOrAddFloat('distanceScan.scanCycleInSecond', 0.2) # delay interval for the worker
stopDistance = self.config.getOrAddFloat('distanceScan.stopDistanceInMeter', 0.2) # the distance to stop forward movement
slowDistance = self.config.getOrAddFloat('distanceScan.slowDistanceInMeter', 1.0) # the distance to stop forward movement
headingAngleLimit = self.config.getOrAddInt('distanceScan.headingAngleLimit', 20) # the angle limit considered as measuring straight ahead
while True:
slowdown = 0
if self.distanceScan and not self.head.scanning:
distance = self.head.ultrasonic.pingDistance()
if distance < 0:
# give it one more try
distance = self.head.ultrasonic.pingDistance()
self.distance = distance
# check distance to stop drive
if stopDistance > 0 and distance > 0:
# check heading and forward (speed > 0) before stopping
hAngle, vAngle = self.head.heading
if abs(hAngle) < headingAngleLimit and abs(vAngle) < headingAngleLimit and self.drive.motor.speed > 0:
if distance < stopDistance:
timePrint('Stopping drive at distance: %f' %distance)
self.drive.stop()
slowdown=0
elif distance < slowDistance:
slowdown = -int(30 * (slowDistance - distance) / (slowDistance - stopDistance))
#timePrint('Slowing down %i at distance: %f' %(slowdown, distance))
self.drive.extraSpeed(slowdown)
time.sleep(interval)
def _initMode(self, mode):
""" initialization of the mode - will be called only when first time swith to the mode """
if mode == picarConst.ManualMode:
# stop auto mode when switching from auto to manual mode
self._stopAuto()
elif mode == picarConst.FollowDistanceMode:
self.head.lookStraight()
self.drive.turnStraight()
self.distanceScan = True # make sure distance scan worker thread to stop before hitting obstacle
elif mode == picarConst.FollowLineMode:
pass
elif mode == picarConst.AutoWanderMode:
self._wanderState = picarConst.WanderStateInit # wander states: 0-init, 1-move, 2-stop, 3-scan, 4-turn, 5-back
self._wanderOldState = picarConst.WanderStateInit
self._wanderCounter = 0
self.distanceScan = True # make sure distance scan worker thread to stop before hitting obstacle
elif mode == picarConst.FaceTrackingMode:
self._faceId = -1 # valid face ID should be >= 0
def _stopMode(self, mode):
""" initialization of the mode - will be called only when first time swith to the mode """
if mode == picarConst.ManualMode:
pass
elif mode == picarConst.FollowDistanceMode:
self.stop()
elif mode == picarConst.FollowLineMode:
self.stop()
elif mode == picarConst.AutoWanderMode:
self.stop()
elif mode == picarConst.FaceTrackingMode:
pass
def _modeWorker(self, interval=0.2):
""" internal thread for handling PiCar's operation modes """
oldMode = self.mode
while True:
if oldMode != self.mode:
# mode change - stop old mode and init new mode
self._stopMode(oldMode)
self._initMode(self.mode)
if self.mode == picarConst.ManualMode:
pass
elif self.mode == picarConst.FollowDistanceMode:
self._followByDistance()
elif self.mode == picarConst.FollowLineMode:
self._followLine()
elif self.mode == picarConst.AutoWanderMode:
self._wander(interval)
elif self.mode == picarConst.FaceTrackingMode:
self._faceTracking()
else:
self._stopAuto()
oldMode = self.mode
time.sleep(interval)
def _followByDistance(self):
""" internal function for followDistance mode
follow with Ultrasonic by keeping the same distance to target
this function leverage _distanceScanWorker to stop
"""
maxDistance = self.config.getOrAddFloat('follow.maxFollowDistance', 2.0)
dis = self.distance
if dis < maxDistance: #Check if the target is in diatance range
distanceToFollow = self.config.getOrAddFloat('distanceScan.stopDistanceInMeter', 0.2) # keep the distance to the target
distanceOffset = self.config.getOrAddFloat('follow.distanceOffset', 0.1) # controls the sensitivity
if dis > (distanceToFollow + distanceOffset) : #If the target is in distance range and out of distanceToFollow, then move forward
if self.drive.motor.speed > 0:
pass
else:
self.drive.setLedsRGB(picarConst.CYAN)
self.drive.forward(self.config.getOrAddInt('auto.forwardSpeed', 60))
timePrint('followByDistance - move forward. distance: %s' %dis)
elif dis < (distanceToFollow - distanceOffset) : #Check if the target is too close, if so, the car move back to keep distance at distance
if self.drive.motor.speed < 0:
pass
else:
self.drive.setLedsRGB(picarConst.PINK)
self.drive.backward(self.config.getOrAddInt('auto.backwardSpeed', 60))
timePrint('followByDistance - move backward. distance: %s' %dis)
else: #If the target is at distance, then the car stay still
self.drive.setLedsRGB(picarConst.GREEN)
if abs(self.drive.motor.speed) > 5:
self.drive.stopMotor()
else:
if abs(self.drive.motor.speed) > 5:
self.drive.stopMotor()
def _followLine(self):
left, middle, right = self.lineTracking.status()
if middle:
self.drive.run(speed=self.config.getOrAddInt('auto.forwardSpeed', 60), steeringAngle=0)
self.drive.setLedsRGB(picarConst.YELLOW)
elif left:
self.drive.forward(speed=self.config.getOrAddInt('auto.forwardSpeed', 60))
self.drive.turnLeft(angle=45, turnSignal=True)
elif right:
self.drive.forward(speed=self.config.getOrAddInt('auto.forwardSpeed', 60))
self.drive.turnRight(angle=45, turnSignal=True)
else:
self.drive.backward(speed=self.config.getOrAddInt('auto.backwardSpeed', 60))
self.drive.setLedsRGB(picarConst.CYAN)
def _wander(self, interval):
""" autonomous wander around mindlessly
wander states:
0 - WanderStateInit - this is the initial state.
1 - WanderStateMove - the piCar is moving forward.
2 - WanderStateStop - the picar is stopped due to obstacle
3 - WanderStateScan - scan distance for surroundings and pick the direction with farest distance
4 - WanderStateTurn - turning to the direction with the best distance. then go to init state.
5 - WanderStateBack - move the car backward if failed to find the best distance from the scan then repeat scan
"""
if self._wanderState == picarConst.WanderStateInit:
# start move forward
self.head.lookStraight()
self.drive.forward(speed=self.config.getOrAddInt('auto.forwardSpeed', 60))
self._wanderState = picarConst.WanderStateMove
timePrint('Wander state %i' %self._wanderState)
elif self._wanderState == picarConst.WanderStateMove:
# check whether the drive stopped
if abs(self.drive.motor.speed) < 5:
self._wanderState = picarConst.WanderStateStop
timePrint('Wander state %i' %self._wanderState)
elif self._wanderState == picarConst.WanderStateStop:
# for now, just move to scan
self._wanderState = picarConst.WanderStateScan
timePrint('Wander state %i' %self._wanderState)
elif self._wanderState == picarConst.WanderStateScan:
# scan distance
value = []
posh = []
posv = []
starth = self.config.getOrAddInt('wander.scan.starth', -90)
startv = self.head.servoV.angle # use currently vertical angle
endh = self.config.getOrAddInt('wander.scan.endh', 90)
endv = startv
inch = self.config.getOrAddInt('wander.scan.inc', 10)
incv = inch
value, posh, posv = self.head.scan(starth, startv, endh, endv, inch, incv)
timePrint('Scan: %s' %(str(value)))
# find max
max = 0
maxindex = -1
| |
if len(seq) > seqLength:
seq = seq[:seqLength]
elif len(seq) < seqLength:
seq = string.ljust(seq, seqLength, '.')
mers = []
inds = []
for n in nmer:
mers.extend([seq[i:i+n] for i in range(len(seq)-n+1)])
inds.extend([np.arange(n)+i for i in range(len(seq)-n+1)])
return mers, inds
def itermer(seq, k=9, gapped=True, yield_inds=False):
"""Generator over all k-mers in seq.
There are [len(seq) - k + 1] k-mers in seq.
Parameters
----------
seq : str
Sequence which will be broken into kmers.
k : int
Length of peptides to return.
gapped : bool
If True (default), yield the k-mer including gaps.
If False, yield the "non-gapped" k-mer from grabKmer
return_inds : bool
If True, also yield an array of indices from grabKmerInds
Yields
------
mer : str
If gapped, then a k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not gapped then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)
inds : nd.array (optional)
An array of indices for the mer"""
for i in range(len(seq) - k + 1):
g, ng = grabKmer(seq, i, k=k)
if gapped:
mer = g
else:
mer = ng
if yield_inds:
ginds, nginds = grabKmerInds(seq, i, k=k)
if gapped:
inds = ginds
else:
inds = nginds
yield (mer, inds)
else:
yield (mer,)
def grabKmer(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the gapped and non-gapped kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide starting at starti from seq.
nonGapped : str
A k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = seq[starti:]
full = tmp[:k]
if full[0] == '-':
return None, None
elif '-' in full:
ng = tmp.replace('-', '')
if len(ng) >= k:
ng = ng[:k]
else:
ng = None
else:
ng = full
return full, ng
else:
return None, None
def grabKmerInds(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the indices of the gapped and non-gapped kmers
i.e. indices are such that seq[ind] == kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : ndarray
A k-length vector starting with starti containing the indices for the kmer
nonGapped : ndarray
A k-length vector starting at starti.
If seq[starti] is a gap then returns an empty array.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is an empty array)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = np.arange(starti, len(seq))
full = tmp[:k]
"""If it starts with a gap then it is invalid (arbitary rule)"""
if seq[starti] == '-':
return np.empty(0), np.empty(0)
elif '-' in seq[starti:starti+k]:
"""If there's a gap somewhere else then go through one by one adding non-gapped indices"""
ng = []
for sitei in tmp:
if not seq[sitei] == '-':
ng.append(sitei)
"""If we get to k non-gapped AAs then return full,ng"""
if len(ng) == k:
return full, np.array(ng)
"""If we get to then end of the seq then return ng=None"""
return full, np.empty(0)
else:
"""If there are no gaps anywhere then just return k indices starting with starti"""
return full, full
else:
"""If its an invalid request then return None,None"""
return np.empty(0), np.empty(0)
def findpeptide(pep, seq, returnEnd = False):
"""Find pep in seq ignoring gaps but returning a start position that counts gaps
pep must match seq exactly (otherwise you should be using pairwise alignment)
Parameters
----------
pep : str
Peptide to be found in seq.
seq : str
Sequence to be searched.
returnEnd : bool
Flag to return the end position such that:
seq[startPos:endPos] = pep
Returns
-------
startPos : int
Start position (zero-indexed) of pep in seq or -1 if not found"""
ng = seq.replace('-', '')
ngInd = ng.find(pep)
ngCount = 0
pos = 0
"""Count the number of gaps prior to the non-gapped position. Add them to it to get the gapped position"""
while ngCount < ngInd or seq[pos] == '-':
if not seq[pos] == '-':
ngCount += 1
pos += 1
startPos = ngInd + (pos - ngCount)
if returnEnd:
if startPos == -1:
endPos = -1
else:
count = 0
endPos = startPos
while count < len(pep):
if not seq[endPos] == '-':
count += 1
endPos += 1
return startPos, endPos
else:
return startPos
def grabOverlappingKmer(seq, sitei, pos=0, k=9):
"""Grab the kmer from seq for which it is in the pos position at sitei
Return the gapped and non-gapped kmer
This is a generalization of grabKmer for pos = 0
If seq[sitei] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return before/after sitei then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
sitei : int
Key position of the kmer (zero-based indexing)
pos : int
The position of the key sitei in the kmer.
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide that overlaps sitei
nonGapped : str
A k-length peptide that overlaps sitei
If seq[sitei] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
aaRight = k - pos
aaLeft = pos
if seq[sitei] == '-':
return None, None
if (sitei + aaRight) <= len(seq) and (sitei - aaLeft) >= 0:
if pos<k:
rh = seq[sitei:]
fullRH = rh[:aaRight]
if '-' in fullRH:
ngRH = rh.replace('-', '')
if len(ngRH) >= aaRight:
ngRH = ngRH[:aaRight]
else:
ngRH = None
else:
ngRH = fullRH
else:
fullRH = ''
ngRH = ''
if pos>0:
lh = seq[:sitei]
fullLH = lh[-aaLeft:]
if '-' in fullLH:
ngLH = lh.replace('-', '')
if len(ngLH) >= aaLeft:
ngLH = ngLH[-aaLeft:]
else:
ngLH = None
else:
ngLH = fullLH
else:
fullLH = ''
ngLH = ''
full = fullLH + fullRH
#print aaLeft,fullLH,",", aaRight,fullRH
if ngLH is None or ngRH is None:
ng = None
else:
ng = ngLH + ngRH
return full, ng
else:
return None, None
def overlappingMers(seq, sitei, nmer = [8, 9, 10, 11], padding = 0):
"""Create a list of kmers that overlap sitei in seq
Returns parallel lists of the mers, start positions and lengths
Parameters
----------
seq : str
sitei : int
Zero-based index into seq
nmer : list
Lengths of kmers to consider
padding : int
Allow kmer to be within padding.
Defalut is no padding (must overlap)
Returns
-------
mers : list
List of overlapping peptides
starti : list
List of start positions"""
def _overlappingMersNoPadding(seq, sitei, nmer):
mers = []
starti = []
for k in nmer:
for posi in range(k):
ng = grabOverlappingKmer(seq, sitei, pos=posi, k=k)[1]
if not ng is None:
mers.append(ng)
starti.append(sitei-posi)
#print sitei, posi, k, ng
mers, uniqi = np.unique(mers, return_index = True)
starti = np.array(starti)[uniqi]
return mers, | |
pb = probe_designer.ProbeDesigner(genomes_grouped, filters,
probe_length=args.probe_length,
probe_stride=args.probe_stride,
allow_small_seqs=args.small_seq_min,
seq_length_to_skip=args.small_seq_skip,
cluster_threshold=cluster_threshold,
cluster_merge_after=cluster_merge_after,
cluster_fragment_length=cluster_fragment_length)
pb.design()
# Write the final probes to the file args.output_probes
seq_io.write_probe_fasta(pb.final_probes, args.output_probes)
if (args.print_analysis or args.write_analysis_to_tsv or
args.write_sliding_window_coverage or
args.write_probe_map_counts_to_tsv):
analyzer = coverage_analysis.Analyzer(
pb.final_probes,
args.mismatches,
args.lcf_thres,
genomes_grouped,
genomes_grouped_names,
island_of_exact_match=args.island_of_exact_match,
custom_cover_range_fn=custom_cover_range_fn,
cover_extension=args.cover_extension,
kmer_probe_map_k=kmer_probe_map_k_analyzer,
rc_too=args.add_reverse_complements)
analyzer.run()
if args.write_analysis_to_tsv:
analyzer.write_data_matrix_as_tsv(
args.write_analysis_to_tsv)
if args.write_sliding_window_coverage:
analyzer.write_sliding_window_coverage(
args.write_sliding_window_coverage)
if args.write_probe_map_counts_to_tsv:
analyzer.write_probe_map_counts(
args.write_probe_map_counts_to_tsv)
if args.print_analysis:
analyzer.print_analysis()
else:
# Just print the number of probes
print(len(pb.final_probes))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input data
parser.add_argument('dataset',
nargs='+',
help=("One or more target datasets (e.g., one per species). Each "
"dataset can be specified in one of multiple ways. (a) If "
"dataset is in the format 'download:TAXID', then CATCH downloads "
"from NCBI all whole genomes for the NCBI taxonomy with id "
"TAXID, and uses these sequences as input. (b) If dataset is "
"a path to a FASTA file, then its sequences are read and used "
"as input. (c) Otherwise, it is assumed that this is a label "
"for a dataset included in this package (e.g., 'zika'). If "
"the label starts with 'collection:' (e.g., 'collection:viruses"
"_with_human_host'), then this reads from an available "
"collection of datasets. For segmented viruses, the format "
"for NCBI downloads can also be 'download:TAXID-SEGMENT'."))
# Outputting probes
parser.add_argument('-o', '--output-probes',
required=True,
help=("The file to which all final probes should be "
"written; they are written in FASTA format"))
# Outputting downloaed data
parser.add_argument('--write-taxid-acc',
help=("If 'download:' labels are used in datasets, write downloaded "
"accessions to a file in this directory. Accessions are written "
"to WRITE_TAXID_ACC/TAXID.txt"))
# Parameters on probe length and stride
parser.add_argument('-pl', '--probe-length',
type=int,
default=100,
help=("(Optional) Make probes be PROBE_LENGTH nt long"))
parser.add_argument('-ps', '--probe-stride',
type=int,
default=50,
help=("(Optional) Generate candidate probes from the input "
"that are separated by PROBE_STRIDE nt"))
# Parameters governing probe hybridization
parser.add_argument('-m', '--mismatches',
type=int,
default=0,
help=("(Optional) Allow for MISMATCHES mismatches when determining "
"whether a probe covers a sequence"))
parser.add_argument('-l', '--lcf-thres',
type=int,
help=("(Optional) Say that a portion of a probe covers a portion "
"of a sequence if the two share a substring with at most "
"MISMATCHES mismatches that has length >= LCF_THRES "
"nt; if unspecified, this is set to PROBE_LENGTH"))
parser.add_argument('--island-of-exact-match',
type=int,
default=0,
help=("(Optional) When determining whether a probe covers a "
"sequence, require that there be an exact match (i.e., "
"no mismatches) of length at least ISLAND_OF_EXACT_"
"MATCH nt between a portion of the probe and a portion "
"of the sequence"))
# Custom function (dynamically loaded) to determine probe hybridization
# When set, this makes values of the above arguments (--mismatches,
# --lcf-thres, and --island-of-exact-match) meaningless
parser.add_argument('--custom-hybridization-fn',
nargs=2,
help=("(Optional) Args: <PATH> <FUNC>; PATH is a path to a Python "
"module (.py file) and FUNC is a string giving the name of "
"a function in that module. FUNC provides a custom model of "
"hybridization between a probe and target sequence to use in "
"the probe set design. If this is set, the arguments "
"--mismatches, --lcf-thres, and --island-of-exact-match are "
"not used because these are meant for the default model of "
"hybridization. The function FUNC in PATH is dynamically "
"loaded to use when determining whether a probe hybridizes to "
"a target sequence (and, if so, what portion). FUNC must "
"accept the following arguments in order, though it "
"may choose to ignore some values: (1) array giving sequence "
"of a probe; (2) str giving subsequence of target sequence to "
"which the probe may hybridize, of the same length as the "
"given probe sequence; (3) int giving the position in the "
"probe (equivalently, the target subsequence) of the start "
"of a k-mer around which the probe and target subsequence "
"are anchored (the probe and target subsequence are aligned "
"using this k-mer as an anchor); (4) int giving the end "
"position (exclusive) of the anchor k-mer; (5) int giving the "
"full length of the probe (the probe provided in (1) may be "
"cutoff on an end if it extends further than where the "
"target sequence ends); (6) int giving the full length of the "
"target sequence of which the subsequence in (2) is part. "
"FUNC must return None if it deems that the probe does not "
"hybridize to the target subsequence; otherwise, it must "
"return a tuple (start, end) where start is an int giving "
"the start position in the probe (equivalently, in the "
"target subsequence) at which the probe will hybridize to "
"the target subsequence, and end is an int (exclusive) giving "
"the end position of the hybridization."))
# Desired coverage of target genomes
def check_coverage(val):
fval = float(val)
ival = int(fval)
if fval >= 0 and fval <= 1:
# a float in [0,1] giving fractional coverage
return fval
elif fval > 1 and fval == ival:
# an int > 1 giving number of bp to cover
return ival
else:
raise argparse.ArgumentTypeError(("%s is an invalid coverage "
"value") % val)
parser.add_argument('-c', '--coverage',
type=check_coverage,
default=1.0,
help=("If this is a float in [0,1], it gives the fraction of "
"each target genome that must be covered by the selected "
"probes; if this is an int > 1, it gives the number of "
"bp of each target genome that must be covered by the "
"selected probes"))
# Amount of cover extension to assume
parser.add_argument('-e', '--cover-extension',
type=int,
default=0,
help=("Extend the coverage of each side of a probe by COVER_EXTENSION "
"nt. That is, a probe covers a region that consists of the "
"portion of a sequence it hybridizes to, as well as this "
"number of nt on each side of that portion. This is useful "
"in modeling hybrid selection, where a probe hybridizes to"
"a fragment that includes the region targeted by the probe, "
"along with surrounding portions of the sequence. Increasing "
"its value should reduce the number of probes required to "
"achieve the desired coverage."))
# Differential identification and blacklisting
parser.add_argument('-i', '--identify',
dest="identify",
action="store_true",
help=("Design probes meant to make it possible to identify "
"nucleic acid from a particular input dataset against "
"the other datasets; when set, the coverage should "
"generally be small"))
parser.add_argument('--blacklist-genomes',
nargs='+',
help=("One or more blacklisted genomes; penalize probes based "
"on how much of each of these genomes they cover. If "
"the value is a path to a file, then that file is treated "
"as a FASTA file and its sequences are read. Otherwise, "
"it is assumed that this is a label for a dataset included "
"in this package (e.g., 'zika')."))
parser.add_argument('-mt', '--mismatches-tolerant',
type=int,
help=("(Optional) A more tolerant value for 'mismatches'; "
"this should be greater than the value of MISMATCHES. "
"Allows for capturing more possible hybridizations "
"(i.e., more sensitivity) when designing probes for "
"identification or when genomes are blacklisted."))
parser.add_argument('-lt', '--lcf-thres-tolerant',
type=int,
help=("(Optional) A more tolerant value for 'lcf_thres'; "
"this should be less than LCF_THRES. "
"Allows for capturing more possible hybridizations "
"(i.e., more sensitivity) when designing probes for "
"identification or when genomes are blacklisted."))
parser.add_argument('--island-of-exact-match-tolerant',
type=int,
default=0,
help=("(Optional) A more tolerant value for 'island_of_"
"exact_match'; this should be less than ISLAND_OF_ "
"EXACT_MATCH. Allows for capturing more "
"possible hybridizations (i.e., more sensitivity) "
"when designing probes for identification or when "
"genomes are blacklisted."))
parser.add_argument('--custom-hybridization-fn-tolerant',
nargs=2,
help=("(Optional) A more tolerant model than the one "
"implemented in custom_hybridization_fn. This should capture "
"more possible hybridizations (i.e., be more sensitive) "
"when designing probes for identification or when genomes "
"are blacklisted. See --custom-hybridization-fn for details "
"of how this function should be implemented and provided."))
# Outputting coverage analyses
parser.add_argument('--print-analysis',
dest="print_analysis",
action="store_true",
help="Print analysis of | |
data associated with them.
Returns
-------
list of tuples of int
list of unique antpair + pol tuples (ant1, ant2, pol) with data
associated with them.
"""
pols = self.get_pols()
bls = self.get_antpairs()
return [(bl) + (pol,) for bl in bls for pol in pols]
def get_feedpols(self):
"""
Get the unique antenna feed polarizations in the data.
Returns
-------
list of str
list of antenna feed polarizations (e.g. ['X', 'Y']) in the data.
Raises
------
ValueError
If any pseudo-Stokes visibilities are present
"""
if np.any(self.polarization_array > 0):
raise ValueError(
"Pseudo-Stokes visibilities cannot be interpreted as feed polarizations"
)
else:
return list(set("".join(self.get_pols())))
def get_data(self, key1, key2=None, key3=None, squeeze="default", force_copy=False):
"""
Get the data corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all data for
that pol.
else:
interpreted as a baseline number, get all data for that baseline.
if key is length 2: interpreted as an antenna pair, get all data
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all data for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data.
If data exists conjugate to requested antenna pair, it will be conjugated
before returning.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.data_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_flags(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the flags corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all flags for
that pol.
else:
interpreted as a baseline number, get all flags for that baseline.
if key is length 2: interpreted as an antenna pair, get all flags
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all flags for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of flags.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
# When we select conjugated baselines, there is a call to np.conj()
# inside of _smart_slicing to correct the data array. This has the
# unintended consequence of promoting the dtype of an array of np.bool_
# to np.int8. Rather than having a bunch of special handling for this
# ~corner case, we instead explicitly cast back to np.bool_ before we
# hand back to the user.
out = self._smart_slicing(
self.flag_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
).astype(np.bool_)
return out
def get_nsamples(
self, key1, key2=None, key3=None, squeeze="default", force_copy=False
):
"""
Get the nsamples corresonding to a baseline and/or polarization.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all nsamples for
that pol.
else:
interpreted as a baseline number, get all nsamples for that
baseline.
if key is length 2: interpreted as an antenna pair, get all nsamples
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all nsamples for that baseline, pol. pol may be a string or int.
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of
nsample_array.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
ind1, ind2, indp = self._key2inds(key)
out = self._smart_slicing(
self.nsample_array, ind1, ind2, indp, squeeze=squeeze, force_copy=force_copy
)
return out
def get_times(self, key1, key2=None, key3=None):
"""
Get the times for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
-------
ndarray
times from the time_array for the given antpair or baseline.
"""
key = []
for val in [key1, key2, key3]:
if isinstance(val, str):
key.append(val)
elif val is not None:
key += list(uvutils._get_iterable(val))
if len(key) > 3:
raise ValueError("no more than 3 key values can be passed")
inds1, inds2, indp = self._key2inds(key)
return self.time_array[np.append(inds1, inds2)]
def get_lsts(self, key1, key2=None, key3=None):
"""
Get the LSTs for a given antpair or baseline number.
Meant to be used in conjunction with get_data function.
Parameters
----------
key1, key2, key3 : int or tuple of ints
Identifier of which data to get, can be passed as 1, 2, or 3 arguments
or as a single tuple of length 1, 2, or 3. These are collectively
called the key.
If key is length 1:
if (key < 5) or (type(key) is str):
interpreted as a polarization number/name, get all times.
else:
interpreted as a baseline number, get all times for that baseline.
if key is length 2: interpreted as an antenna pair, get all times
for that baseline.
if key is length 3: interpreted as antenna pair and pol (ant1, ant2, pol),
get all times for that baseline.
Returns
| |
<reponame>akki2825/CorpusTools
import os
import codecs
from corpustools.corpus.io.textgrid11_pct import TextGrid, IntervalTier, readFile, Interval, Point, PointTier, _getMark
#from corpustools.corpus.io.textgrid import Interval, Point, PointTier , _getMark
from corpustools.corpus.classes import SpontaneousSpeechCorpus, Attribute, Corpus
from corpustools.corpus.classes.spontaneous import Discourse
from corpustools.exceptions import PCTError
from corpustools.corpus.io.binary import load_binary
import corpustools.gui.modernize as modernize
from .helper import (parse_transcription, DiscourseData,
AnnotationType, data_to_discourse2, find_wav_path,
Annotation,)
class PCTTextGrid(TextGrid):
def __init__(self):
super().__init__()
def name_filter(self, name):
"""
Captialize the initial letter to match the specifications in PCT
"""
return name.capitalize() if not all([x.isupper() for x in name]) else name
def read(self, f, round_digits=15):
"""
Read the tiers contained in the Praat-formated TextGrid file
indicated by string f
"""
source = readFile(f)
self.minTime = round(float(source.readline().split()[2]), 5)
self.maxTime = round(float(source.readline().split()[2]), 5)
source.readline() # more header junk
m = int(source.readline().rstrip().split()[2]) # will be self.n
source.readline()
for i in range(m): # loop over grids
source.readline()
if source.readline().rstrip().split()[2] == '"IntervalTier"':
inam = source.readline().rstrip().split(' = ')[1].strip('"')
inam = self.name_filter(inam)
imin = round(float(source.readline().rstrip().split()[2]),
round_digits)
imax = round(float(source.readline().rstrip().split()[2]),
round_digits)
itie = IntervalTier(inam)
for j in range(int(source.readline().rstrip().split()[3])):
source.readline().rstrip().split() # header junk
jmin = round(float(source.readline().rstrip().split()[2]),
round_digits)
jmax = round(float(source.readline().rstrip().split()[2]),
round_digits)
jmrk = _getMark(source)
if jmin < jmax: # non-null
itie.addInterval(Interval(jmin, jmax, jmrk))
self.append(itie)
else: # pointTier
inam = source.readline().rstrip().split(' = ')[1].strip('"')
inam = self.name_filter(inam)
imin = round(float(source.readline().rstrip().split()[2]),
round_digits)
imax = round(float(source.readline().rstrip().split()[2]),
round_digits)
itie = PointTier(inam)
n = int(source.readline().rstrip().split()[3])
for j in range(n):
source.readline().rstrip() # header junk
jtim = round(float(source.readline().rstrip().split()[2]),
round_digits)
jmrk = _getMark(source)
itie.addPoint(Point(jtim, jmrk))
self.append(itie)
source.close()
def uniqueLabels(tier):
return set(x.mark for x in tier.intervals)
def averageLabelLen(tier):
labels = uniqueLabels(tier)
return sum(len(lab) for lab in labels)/len(labels)
def inspect_discourse_textgrid(path):
"""
Generate a list of AnnotationTypes for a specified TextGrid file
Parameters
----------
path : str
Full path to TextGrid file
Returns
-------
list of AnnotationTypes
Autodetected AnnotationTypes for the TextGrid file
"""
trans_delimiters = ['.',' ', ';', ',']
textgrids = []
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
for filename in files:
if not filename.lower().endswith('.textgrid'):
continue
textgrids.append(os.path.join(root,filename))
else:
textgrids.append(path)
anno_types = []
for t in textgrids:
tg = load_textgrid(t)
spellings, segments, attributes = guess_tiers(tg)
if len(segments) == 0:
base = None
else:
base = segments[0]
if len(spellings) == 0:
anchor = None
else:
anchor = spellings[0]
interval_tiers = [x for x in tg.tiers if isinstance(x, IntervalTier)]
if len(anno_types) == 0:
for ti in interval_tiers:
if ti.name in spellings:
a = AnnotationType(ti.name, base, None, anchor = True, token = False)
elif ti.name in segments:
a = AnnotationType(ti.name, None, anchor, base = True, token = True)
else:
labels = uniqueLabels(ti)
cat = Attribute.guess_type(labels, trans_delimiters)
att = Attribute(Attribute.sanitize_name(ti.name), cat, ti.name)
a = AnnotationType(ti.name, None, anchor, token = False, attribute = att)
if cat == 'tier':
for l in labels:
for delim in trans_delimiters:
if delim in l:
a.trans_delimiter = delim
break
if a.trans_delimiter is not None:
break
a.add((x.mark for x in ti), save = False)
anno_types.append(a)
else:
if len(anno_types) != len(interval_tiers):
raise(PCTError("The TextGrids must have the same number of tiers."))
for i, ti in enumerate(interval_tiers):
anno_types[i].add((x.mark for x in ti), save = False)
return anno_types
def load_textgrid(path):
tg = PCTTextGrid()
tg.read(path)
return tg
def guess_tiers(tg):
segment_tiers = []
spelling_tiers = []
attribute_tiers = []
tier_properties = {}
interval_tiers = [x for x in tg.tiers if isinstance(x, IntervalTier)]
for i,t in enumerate(interval_tiers):
tier_properties[t.name] = (i, len(t), averageLabelLen(t), len(uniqueLabels(t)))
max_labels = max(tier_properties.values(), key = lambda x: x[2])
likely_segment = [k for k,v in tier_properties.items() if v == max_labels]
if len(likely_segment) == 1:
segment_tiers.append(likely_segment)
likely_spelling = min((x for x in tier_properties.keys() if x not in segment_tiers),
key = lambda x: tier_properties[x][0])
spelling_tiers.append(likely_spelling)
for k in tier_properties.keys():
if k in segment_tiers:
continue
attribute_tiers.append(k)
return spelling_tiers, segment_tiers, attribute_tiers
def textgrid_to_data(corpus_name, path, annotation_types, stop_check = None,
call_back = None):
tg = load_textgrid(path)
name = corpus_name
for a in annotation_types:
a.reset()
data = DiscourseData(name, annotation_types)
if call_back is not None:
call_back('Loading...')
cur = 0
for word_name in data.word_levels:
#data.word_levels = [k for k,v in data.data.items() if not v.token and v.anchor]
#this should return the names of just the spelling tiers, and in most cases len(word_levels)==1
if stop_check is not None and stop_check():
return
if call_back is not None:
cur += 1
call_back(cur)
spelling_tier = tg.getFirst(word_name)
for si in spelling_tier:
if not si.mark:# is None:
continue
annotations = dict()
word = Annotation(si.mark)
# si.mark is the actual text, e.g the spelling of a word
for n in data.base_levels:
#data.base_levels should return a list of names of transcription-type tiers
#compare with data.word_levels a few lines back in the nesting loop
if data[word_name].speaker != data[n].speaker and data[n].speaker is not None:
continue
t = tg.getFirst(n)
# t is a list of Intervals
tier_elements = list()
for ti in t:
if ti.maxTime <= si.minTime:
continue
if ti.minTime >= si.maxTime:
break
phoneBegin = ti.minTime
phoneEnd = ti.maxTime
if phoneBegin < si.minTime:
phoneBegin = si.minTime
if phoneEnd > si.maxTime:
phoneEnd = si.maxTime
parsed = parse_transcription(ti.mark, data[n])
if parsed:
parsed[0].begin = phoneBegin
parsed[-1].end = phoneEnd
tier_elements.extend(parsed)
# if not tier_elements:
# continue
if len(tier_elements) > 1:
for j,_ in enumerate(tier_elements):
if j == 0:
tier_elements[j].end = None
elif j == len(tier_elements)-1:
tier_elements[j].begin = None
else:
tier_elements[j].begin = None
tier_elements[j].end = None
level_count = data.level_length(n)
word.references.append(n)
word.begins.append(level_count)
word.ends.append(level_count + len(tier_elements))
annotations[n] = tier_elements
#mid_point = si.minTime + (si.maxTime - si.minTime)
mid_point = (si.maxTime + si.minTime)/2
for at in annotation_types:
#this catches only things marked as "Other (character)"
if at.ignored:
continue
if at.base:
continue
if at.anchor:
continue
t = tg.getFirst(at.attribute.name)
ti = t.intervalContaining(mid_point)
if ti is None:
#value = None
continue
else:
value = ti.mark
if not value:
continue
value = [Annotation(value)]
if at.delimited:
value = parse_transcription(ti.mark, at)
# elif at.ignored: #this block will never be reached because at.ignored is checked above already
# value = ''.join(x for x in value if x not in at.ignored)
if at.token:
word.token[at.attribute.name] = value
else:
word.additional[at.attribute.name] = value
annotations[at.attribute.name] = value
annotations[word_name] = [word]
data.add_annotations(**annotations)
#the add_annotations function appears to do nothing
#it is supposed to update the dictionary data.data but the contents of the dictionary remain the
#same after the function call
#the annotations dictionary seems to contain useful information about words, but none of it is ever used
return data
def load_discourse_textgrid(corpus_name, path, annotation_types,
feature_system_path = None, support_corpus_path = None,
stop_check = None, call_back = None):
"""
Load a discourse from a TextGrid file
Parameters
----------
corpus_name : str
Informative identifier to refer to corpus
path : str
Full path to TextGrid file
annotation_types : list of AnnotationType
List of AnnotationType specifying how to parse the TextGrids.
Can be generated through ``inspect_discourse_textgrid``.
lexicon : Corpus, optional
Corpus to store Discourse word information
feature_system_path : str
Full path to pickled FeatureMatrix to use with the Corpus
stop_check : callable or None
Optional function to check whether to gracefully terminate early
call_back : callable or None
Optional function to supply progress information during the loading
Returns
-------
Discourse
Discourse object generated from the TextGrid file
"""
data = textgrid_to_data(corpus_name, path, annotation_types, call_back=call_back, stop_check=stop_check)
#textgrid_to_data has side-effects that change annotation_types
wav_path = find_wav_path(path)
if support_corpus_path is not None:
if isinstance(support_corpus_path, Corpus):
#the corpus is 'preloaded' if this function is called by load_directory_textgrid
#otherwise the corpus has to be loaded once per file in a directory, which could be slow
support = support_corpus_path
else:
#otherwise, it's a string representing a path to the corpus
support = load_binary(support_corpus_path)
else:
support = None
discourse = data_to_discourse2(corpus_name, wav_path,
annotation_types=annotation_types, support_corpus = support,
stop_check=stop_check, call_back=call_back)
if feature_system_path is not None:
feature_matrix = load_binary(feature_system_path)
discourse.lexicon.set_feature_matrix(feature_matrix)
discourse.lexicon.specifier = modernize.modernize_specifier(discourse.lexicon.specifier)
return discourse
def load_directory_textgrid(corpus_name, path, annotation_types,
feature_system_path = None, support_corpus_path = None,
stop_check = None, call_back = None):
"""
Loads a directory of TextGrid files
Parameters
----------
corpus_name : str
Name of corpus
path : str
Path to directory of TextGrid files
annotation_types : list of AnnotationType
List of AnnotationType specifying how to parse the TextGrids.
Can be generated through ``inspect_discourse_textgrid``.
feature_system_path : str, optional
File path of FeatureMatrix binary to specify segments
stop_check : callable or None
Optional function to check whether to | |
PyenvError as e:
click.echo(u"Something went wrong…")
click.echo(crayons.blue(e.err), err=True)
# Print the results, in a beautiful blue…
click.echo(crayons.blue(c.out), err=True)
# Find the newly installed Python, hopefully.
version = str(version)
path_to_python = find_a_system_python(version)
try:
assert python_version(path_to_python) == version
except AssertionError:
click.echo(
"{0}: The Python you just installed is not available on your {1}, apparently."
"".format(
crayons.red("Warning", bold=True),
crayons.normal("PATH", bold=True),
),
err=True,
)
sys.exit(1)
return path_to_python
def ensure_virtualenv(three=None, python=None, site_packages=False, pypi_mirror=None):
"""Creates a virtualenv, if one doesn't exist."""
from .environments import PIPENV_USE_SYSTEM
def abort():
sys.exit(1)
global USING_DEFAULT_PYTHON
if not project.virtualenv_exists:
try:
# Ensure environment variables are set properly.
ensure_environment()
# Ensure Python is available.
python = ensure_python(three=three, python=python)
# Create the virtualenv.
# Abort if --system (or running in a virtualenv).
if PIPENV_USE_SYSTEM:
click.echo(
crayons.red(
"You are attempting to re–create a virtualenv that "
"Pipenv did not create. Aborting."
)
)
sys.exit(1)
do_create_virtualenv(
python=python, site_packages=site_packages, pypi_mirror=pypi_mirror
)
except KeyboardInterrupt:
# If interrupted, cleanup the virtualenv.
cleanup_virtualenv(bare=False)
sys.exit(1)
# If --three, --two, or --python were passed…
elif (python) or (three is not None) or (site_packages is not False):
USING_DEFAULT_PYTHON = False
# Ensure python is installed before deleting existing virtual env
ensure_python(three=three, python=python)
click.echo(crayons.red("Virtualenv already exists!"), err=True)
# If VIRTUAL_ENV is set, there is a possibility that we are
# going to remove the active virtualenv that the user cares
# about, so confirm first.
if "VIRTUAL_ENV" in os.environ:
if not (
PIPENV_YES or click.confirm("Remove existing virtualenv?", default=True)
):
abort()
click.echo(
crayons.normal(u"Removing existing virtualenv…", bold=True), err=True
)
# Remove the virtualenv.
cleanup_virtualenv(bare=True)
# Call this function again.
ensure_virtualenv(
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
def ensure_project(
three=None,
python=None,
validate=True,
system=False,
warn=True,
site_packages=False,
deploy=False,
skip_requirements=False,
pypi_mirror=None,
clear=False,
):
"""Ensures both Pipfile and virtualenv exist for the project."""
from .environments import PIPENV_USE_SYSTEM
# Clear the caches, if appropriate.
if clear:
print("clearing")
sys.exit(1)
# Automatically use an activated virtualenv.
if PIPENV_USE_SYSTEM:
system = True
if not project.pipfile_exists and not deploy:
project.touch_pipfile()
# Skip virtualenv creation when --system was used.
if not system:
ensure_virtualenv(
three=three,
python=python,
site_packages=site_packages,
pypi_mirror=pypi_mirror,
)
if warn:
# Warn users if they are using the wrong version of Python.
if project.required_python_version:
path_to_python = which("python") or which("py")
if path_to_python and project.required_python_version not in (
python_version(path_to_python) or ""
):
click.echo(
"{0}: Your Pipfile requires {1} {2}, "
"but you are using {3} ({4}).".format(
crayons.red("Warning", bold=True),
crayons.normal("python_version", bold=True),
crayons.blue(project.required_python_version),
crayons.blue(python_version(path_to_python)),
crayons.green(shorten_path(path_to_python)),
),
err=True,
)
click.echo(
" {0} and rebuilding the virtual environment "
"may resolve the issue.".format(crayons.green("$ pipenv --rm")),
err=True,
)
if not deploy:
click.echo(
" {0} will surely fail."
"".format(crayons.red("$ pipenv check")),
err=True,
)
else:
click.echo(crayons.red("Deploy aborted."), err=True)
sys.exit(1)
# Ensure the Pipfile exists.
ensure_pipfile(
validate=validate, skip_requirements=skip_requirements, system=system
)
def shorten_path(location, bold=False):
"""Returns a visually shorter representation of a given system path."""
original = location
short = os.sep.join(
[s[0] if len(s) > (len("2long4")) else s for s in location.split(os.sep)]
)
short = short.split(os.sep)
short[-1] = original.split(os.sep)[-1]
if bold:
short[-1] = str(crayons.normal(short[-1], bold=True))
return os.sep.join(short)
# return short
def do_where(virtualenv=False, bare=True):
"""Executes the where functionality."""
if not virtualenv:
location = project.pipfile_location
# Shorten the virtual display of the path to the virtualenv.
if not bare:
location = shorten_path(location)
if not location:
click.echo(
"No Pipfile present at project home. Consider running "
"{0} first to automatically generate a Pipfile for you."
"".format(crayons.green("`pipenv install`")),
err=True,
)
elif not bare:
click.echo(
"Pipfile found at {0}.\n Considering this to be the project home."
"".format(crayons.green(location)),
err=True,
)
pass
else:
click.echo(project.project_directory)
else:
location = project.virtualenv_location
if not bare:
click.echo(
"Virtualenv location: {0}".format(crayons.green(location)), err=True
)
else:
click.echo(location)
def do_install_dependencies(
dev=False,
only=False,
bare=False,
requirements=False,
allow_global=False,
ignore_hashes=False,
skip_lock=False,
concurrent=True,
requirements_dir=None,
pypi_mirror=False,
):
""""Executes the install functionality.
If requirements is True, simply spits out a requirements format to stdout.
"""
from .vendor.requirementslib.models.requirements import Requirement
def cleanup_procs(procs, concurrent):
for c in procs:
if concurrent:
c.block()
if "Ignoring" in c.out:
click.echo(crayons.yellow(c.out.strip()))
elif environments.is_verbose():
click.echo(crayons.blue(c.out or c.err))
# The Installation failed…
if c.return_code != 0:
# Save the Failed Dependency for later.
failed_deps_list.append((c.dep, c.ignore_hash))
# Alert the user.
click.echo(
"{0} {1}! Will try again.".format(
crayons.red("An error occurred while installing"),
crayons.green(c.dep.as_line()),
)
)
if requirements:
bare = True
blocking = not concurrent
# Load the lockfile if it exists, or if only is being used (e.g. lock is being used).
if skip_lock or only or not project.lockfile_exists:
if not bare:
click.echo(
crayons.normal(u"Installing dependencies from Pipfile…", bold=True)
)
lockfile = split_file(project._lockfile)
else:
with open(project.lockfile_location) as f:
lockfile = split_file(simplejson.load(f))
if not bare:
click.echo(
crayons.normal(
u"Installing dependencies from Pipfile.lock ({0})…".format(
lockfile["_meta"].get("hash", {}).get("sha256")[-6:]
),
bold=True,
)
)
# Allow pip to resolve dependencies when in skip-lock mode.
no_deps = not skip_lock
deps_list, dev_deps_list = merge_deps(
lockfile,
project,
dev=dev,
requirements=requirements,
ignore_hashes=ignore_hashes,
blocking=blocking,
only=only,
)
failed_deps_list = []
if requirements:
index_args = prepare_pip_source_args(project.sources)
index_args = " ".join(index_args).replace(" -", "\n-")
deps_list = [dep for dep, ignore_hash, block in deps_list]
dev_deps_list = [dep for dep, ignore_hash, block in dev_deps_list]
# Output only default dependencies
click.echo(index_args)
if not dev:
click.echo(
"\n".join(d.partition("--hash")[0].strip() for d in sorted(deps_list))
)
sys.exit(0)
# Output only dev dependencies
if dev:
click.echo(
"\n".join(
d.partition("--hash")[0].strip() for d in sorted(dev_deps_list)
)
)
sys.exit(0)
procs = []
deps_list_bar = progress.bar(
deps_list, label=INSTALL_LABEL if os.name != "nt" else ""
)
for dep, ignore_hash, block in deps_list_bar:
if len(procs) < PIPENV_MAX_SUBPROCESS:
# Use a specific index, if specified.
indexes, trusted_hosts, dep = parse_indexes(dep)
index = None
extra_indexes = []
if indexes:
index = indexes[0]
if len(indexes) > 0:
extra_indexes = indexes[1:]
dep = Requirement.from_line(" ".join(dep))
if index:
_index = None
try:
_index = project.find_source(index).get("name")
except SourceNotFound:
_index = None
dep.index = _index
dep._index = index
dep.extra_indexes = extra_indexes
# Install the module.
prev_no_deps_setting = no_deps
if dep.is_file_or_url and any(
dep.req.uri.endswith(ext) for ext in ["zip", "tar.gz"]
):
no_deps = False
c = pip_install(
dep,
ignore_hashes=ignore_hash,
allow_global=allow_global,
no_deps=no_deps,
block=block,
index=index,
requirements_dir=requirements_dir,
extra_indexes=extra_indexes,
pypi_mirror=pypi_mirror,
trusted_hosts=trusted_hosts
)
c.dep = dep
c.ignore_hash = ignore_hash
c.index = index
c.extra_indexes = extra_indexes
procs.append(c)
no_deps = prev_no_deps_setting
if len(procs) >= PIPENV_MAX_SUBPROCESS or len(procs) == len(deps_list):
cleanup_procs(procs, concurrent)
procs = []
cleanup_procs(procs, concurrent)
# Iterate over the hopefully-poorly-packaged dependencies…
if failed_deps_list:
click.echo(
crayons.normal(u"Installing initially failed dependencies…", bold=True)
)
for dep, ignore_hash in progress.bar(failed_deps_list, label=INSTALL_LABEL2):
# Use a specific index, if specified.
# Install the module.
prev_no_deps_setting = no_deps
if dep.is_file_or_url and any(
dep.req.uri.endswith(ext) for ext in ["zip", "tar.gz"]
):
no_deps = False
c = pip_install(
dep,
ignore_hashes=ignore_hash,
allow_global=allow_global,
no_deps=no_deps,
index=getattr(dep, "_index", None),
requirements_dir=requirements_dir,
extra_indexes=getattr(dep, "extra_indexes", None),
)
no_deps = prev_no_deps_setting
# The Installation failed…
if c.return_code != 0:
# We echo both c.out and c.err because pip returns error details on out.
click.echo(crayons.blue(format_pip_output(c.out)))
click.echo(crayons.blue(format_pip_error(c.err)), err=True)
# Return the subprocess' return code.
sys.exit(c.return_code)
else:
click.echo(
"{0} {1}{2}".format(
crayons.green("Success installing"),
crayons.green(dep.name),
crayons.green("!"),
)
)
def convert_three_to_python(three, python):
"""Converts a Three flag into a Python flag, and raises customer warnings
in the process, if needed.
"""
if not python:
if three is False:
return "2"
elif three is True:
return "3"
else:
return python
def do_create_virtualenv(python=None, site_packages=False, pypi_mirror=None):
"""Creates a virtualenv."""
click.echo(
crayons.normal(u"Creating a virtualenv for this project…", bold=True), err=True
)
click.echo(
u"Pipfile: {0}".format(crayons.red(project.pipfile_location, bold=True)),
err=True,
)
# Default to using sys.executable, if Python wasn't provided.
if not python:
python = sys.executable
click.echo(
u"{0} {1} {3} {2}".format(
crayons.normal("Using", bold=True),
crayons.red(python, bold=True),
crayons.normal(u"to create virtualenv…", bold=True),
crayons.green("({0})".format(python_version(python))),
),
err=True,
)
cmd = [
sys.executable,
"-m",
"virtualenv",
"--prompt=({0})".format(project.name),
"--python={0}".format(python),
project.get_location_for_virtualenv(),
]
# Pass site-packages flag to virtualenv, if desired…
if site_packages:
click.echo(
crayons.normal(u"Making site-packages available…", bold=True), err=True
)
cmd.append("--system-site-packages")
if pypi_mirror:
pip_config = {"PIP_INDEX_URL": vistir.misc.fs_str(pypi_mirror)}
else:
pip_config = {}
# Actually create the virtualenv.
with spinner():
c = delegator.run(cmd, block=False, timeout=PIPENV_TIMEOUT, env=pip_config)
c.block()
click.echo(crayons.blue("{0}".format(c.out)), err=True)
if c.return_code != 0:
click.echo(crayons.blue("{0}".format(c.err)), err=True)
click.echo(
u"{0}: Failed to create virtual environment.".format(
crayons.red("Warning", bold=True)
),
err=True,
)
sys.exit(1)
# Associate project directory with the environment.
# This mimics Pew's "setproject".
project_file_name = os.path.join(project.virtualenv_location, ".project")
with open(project_file_name, "w") as f:
f.write(vistir.misc.fs_str(project.project_directory))
# Say where the virtualenv is.
do_where(virtualenv=True, bare=False)
def parse_download_fname(fname, name):
fname, fextension = os.path.splitext(fname)
if fextension == ".whl":
fname = | |
from django import forms
from elegislative_app.models import User
from elegislative_app import models
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
dob = forms.DateField(
widget=forms.DateInput(
format='%m/%d/%Y',
attrs={
'id': 'dob',
'type': 'text',
'class': 'form-control pull-right',
'placeholder': 'Date of birth',
'readonly':'readonly',
}
),
input_formats=('%m/%d/%Y', )
)
class Meta:
model = User
fields = ("email", "f_name", "m_name", "l_name", "gender","dob" ,"age", "address", "image",)
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'E-Mail',
}
self.fields['f_name'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'First Name',
}
self.fields['m_name'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Middle Name',
}
self.fields['l_name'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Last Name',
}
self.fields['gender'].widget.attrs = {
'type': 'text',
'class': 'form-control select2',
'placeholder': 'Gender',
}
self.fields['age'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Age',
}
self.fields['address'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Address',
}
self.fields['password1'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Password',
}
self.fields['password2'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Confirm Password',
}
def clean_email(self):
email = self.cleaned_data.get('email')
qs = User.objects.filter(email=email)
if qs.exists():
raise forms.ValidationError("E-mail is taken")
return email
def clean_password2(self):
"""
http://www.learningaboutelectronics.com/Articles/How-to-check-a-password-in-Django.php
from django.contrib.auth.hashers import check_password
def changepassword(requests):
currentpassword= request.user.password #user's current password
form = ChangePasswordform(request.POST or None)
if form.is_valid():
currentpasswordentered= form.cleaned_data.get("lastpassword")
password1= form.cleaned_data.get("<PASSWORD>")
password2= form.cleaned_data.get("<PASSWORD>")
matchcheck= check_password(currentpasswordentered, currentpassword)
if matchcheck:
#change password code
"""
# Check that the two password entries match
# add more complexity
# upper lower
# lenght
# special chars
# Non repeatable
# common dict password
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("<PASSWORD>")
if len(password1) < 8 and len(password2) < 8:
raise forms.ValidationError("Password is too short!")
else:
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(RegisterForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AgendaForm(forms.ModelForm):
class Meta:
model = models.AgendaModel
exclude = ("date_filed","status","is_signed","hard_copy")
def __init__(self, *args, **kwargs):
super(AgendaForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Agenda No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
class EditAgendaForm(forms.ModelForm):
class Meta:
model = models.AgendaModel
exclude = ("date_filed",)
def __init__(self, *args, **kwargs):
super(EditAgendaForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Agenda No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
self.fields['status'].widget.attrs = {
'class': 'form-control select2',
'style': 'width: 100%',
}
class ResolutionForm(forms.ModelForm):
class Meta:
model = models.ResolutionModel
exclude = ("agenda_fk","date_filed","status","is_signed","hard_copy")
def __init__(self, *args, **kwargs):
super(ResolutionForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Resolution No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
class EditResolutionForm(forms.ModelForm):
class Meta:
model = models.ResolutionModel
exclude = ("agenda_fk","date_filed",)
def __init__(self, *args, **kwargs):
super(EditResolutionForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Resolution No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
self.fields['status'].widget.attrs = {
'class': 'form-control select2',
'style': 'width: 100%',
}
class OrdinanceForm(forms.ModelForm):
class Meta:
model = models.OrdinanceModel
exclude = ("agenda_fk","date_filed","status","is_signed","hard_copy","veto_message")
def __init__(self, *args, **kwargs):
super(OrdinanceForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Ordinance No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
class EditOrdinanceForm(forms.ModelForm):
class Meta:
model = models.OrdinanceModel
exclude = ("agenda_fk","date_filed",)
def __init__(self, *args, **kwargs):
super(EditOrdinanceForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Resolution No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['veto_message'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'veto message',
'required': 'required',
}
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
self.fields['status'].widget.attrs = {
'class': 'form-control select2',
'style': 'width: 100%',
}
class CommitteeReportResolutionForm(forms.ModelForm):
class Meta:
model = models.CommitteeReportResolutionModel
exclude = ("resolution_committee_report_fk","date_filed","status","is_signed","hard_copy")
def __init__(self, *args, **kwargs):
super(CommitteeReportResolutionForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Committee No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
class EditCommitteeReportResolutionForm(forms.ModelForm):
class Meta:
model = models.CommitteeReportResolutionModel
exclude = ("resolution_committee_report_fk","date_filed",)
def __init__(self, *args, **kwargs):
super(EditCommitteeReportResolutionForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Resolution No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
self.fields['status'].widget.attrs = {
'class': 'form-control select2',
'style': 'width: 100%',
}
class CommitteeReportOrdinanceForm(forms.ModelForm):
class Meta:
model = models.CommitteeReportOrdinanceModel
exclude = ("ordinance_committee_report_fk","date_filed","status","is_signed","hard_copy")
def __init__(self, *args, **kwargs):
super(CommitteeReportOrdinanceForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Committee No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
class EditCommitteeReportOrdinanceForm(forms.ModelForm):
class Meta:
model = models.CommitteeReportOrdinanceModel
exclude = ("ordinance_committee_report_fk","date_filed",)
def __init__(self, *args, **kwargs):
super(EditCommitteeReportOrdinanceForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Resolution No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Title',
'required': 'required',
}
self.fields['version'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Version',
'required': 'required',
}
# self.fields['author'].widget.attrs = {
# 'type': 'text',
# 'class': 'form-control',
# 'placeholder': 'author',
# 'required': 'required',
# }
self.fields['content'].widget.attrs = {
'id': 'compose_textarea',
'class': 'form-control',
'style': 'height: 300px',
'required': 'required',
}
self.fields['status'].widget.attrs = {
'class': 'form-control select2',
'style': 'width: 100%',
}
class MOMForm(forms.ModelForm):
class Meta:
model = models.MOMModel
exclude = ("date_filed","status","is_signed","hard_copy")
def __init__(self, *args, **kwargs):
super(MOMForm, self).__init__(*args, **kwargs)
self.fields['no'].widget.attrs = {
'type': 'text',
'class': 'form-control',
'placeholder': 'Minutes of the Meeting No',
'required': 'required',
}
self.fields['title'].widget.attrs = {
'type': | |
from pathlib import Path
import argparse
# https://matplotlib.org/stable/api/tri_api.html
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
import seaborn as sns
from matplotlib.tri import Triangulation, UniformTriRefiner, LinearTriInterpolator, \
CubicTriInterpolator
from plotly.figure_factory import create_trisurf
import plotly.graph_objects as go
import plotly.express as px
def plot_time(input_path):
p = Path(input_path)
df = get_data(input_path)
df['total'] = df['datetime_complete'] - df['datetime_start']
df = df.astype({'total': 'timedelta64[s]'})
df['fenia.output.time.total'] = df['total'] - df['gmsh.output.time.total']
ts = ['total',
'fenia.output.time.total',
'gmsh.output.time.total',
'gmsh.output.time.generate',
'gmsh.output.time.register',
'gmsh.output.time.boolean',
'gmsh.output.time.optimize',
'gmsh.output.time.write',
'gmsh.output.time.transform',
'gmsh.output.time.zone',
'gmsh.output.time.synchronize',
'gmsh.output.time.structure',
'gmsh.output.time.pre_unregister',
'gmsh.output.time.quadrate',
'gmsh.output.time.refine',
'gmsh.output.time.size',
'gmsh.output.time.smooth']
ids = ['number',
'gmsh.output.mesh.elements',
'gmsh.output.mesh.nodes',
'gmsh.output.mesh.blocks',
'gmsh.output.mesh.volumes',
'gmsh.input.algorithm.2d',
'gmsh.input.algorithm.3d',
'gmsh.output.mesh.metric.icn.min',
'gmsh.output.mesh.metric.ige.min'
]
df = df.melt(var_name='time',
id_vars=ids,
value_vars=ts,
value_name='value')
df['value'] /= 3600
sns.set(style="ticks")
g = sns.catplot(x="value", y="time",
kind="boxen", data=df,
# scale='width',
height=8, aspect=1.777)
g.set(xscale="log")
g.set_xlabels(label='hours')
g.set_yticklabels(labels=[x.replace('.output.time', '') for x in ts])
plt.grid() # just add this
g.savefig(p.with_name('time').with_suffix('.png'))
hover = ids + ['time', 'value']
fig = px.box(df, x="value", y="time", log_x=True,
hover_data=hover, color="time",
labels={
"value": "hours",
}
# , box=True
)
fig.update_layout(yaxis_visible=False, yaxis_showticklabels=False)
fig.write_html(p.with_name(f'time').with_suffix('.html'))
def plot_tri(input_path):
def tri_by_xyz(p, df, x, y, z, x_label, y_label, title, cb_label='', suffix=''):
xs = df[x].to_numpy()
ys = df[y].to_numpy()
zs = df[z].to_numpy()
plt.figure(figsize=(8, 6))
# Triangulate
try:
tri = Triangulation(xs, ys)
except ValueError as e:
print(e)
print(suffix, z)
return
# Write
fig = create_trisurf(x=tri.x, y=tri.y, z=zs,
colormap='Jet',
show_colorbar=True,
plot_edges=False,
# title=title,
simplices=tri.triangles)
fig.update_layout(scene={
"xaxis_title": x_label,
"yaxis_title": y_label,
"zaxis_title": cb_label})
fig.write_html(p.parent / f'tri_3d_{z}{suffix}.html')
plt.tricontourf(tri, zs, levels=10,
cmap=cm.jet, alpha=1,
# norm=plt.Normalize(vmax=zs.max(), vmin=zs.min())
)
plt.plot(xs, ys, 'ko', ms=1)
plt.colorbar(label=cb_label)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.savefig(p.parent / f'tri_2d_{z}{suffix}.png', bbox_inches='tight')
# Interpolate
xx = np.linspace(min(xs), max(xs))
yy = np.linspace(min(ys), max(ys))
X, Y = np.meshgrid(xx, yy)
interpolators = {'linear': LinearTriInterpolator(tri, zs, trifinder=None),
'cubic': CubicTriInterpolator(tri, zs, kind='min_E',
trifinder=None, dz=None)}
for k, v in interpolators.items():
Z = v(X, Y)
fig = go.Figure(data=[go.Surface(x=xx, y=yy, z=Z, colorscale='Jet')])
fig.update_layout(
autosize=True,
# title=title.replace('\n', '<br>'),
width=500, height=500, scene={
"xaxis_title": x_label,
"yaxis_title": y_label,
"zaxis_title": cb_label})
fig.write_html(p.parent / f'tri_3d_{k}_{z}{suffix}.html')
plt.clf()
plt.contourf(X, Y, Z, levels=10,
cmap=cm.jet, alpha=1,
norm=plt.Normalize(vmax=Z.max(), vmin=Z.min()))
plt.plot(xs, ys, 'ko', ms=1)
plt.colorbar(label=cb_label)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.savefig(p.parent / f'tri_2d_{k}_{z}{suffix}.png', bbox_inches='tight')
# Refine
# refiner = UniformTriRefiner(tri)
# tri_rf, zs_rf = refiner.refine_field(zs, subdiv=3)
# fig = create_trisurf(x=tri_rf.x, y=tri_rf.y, z=zs_rf,
# colormap='Jet',
# show_colorbar=True,
# plot_edges=False,
# # title=title,
# simplices=tri_rf.triangles)
# fig.update_layout(scene={
# "xaxis_title": x_label,
# "yaxis_title": y_label,
# "zaxis_title": cb_label})
# fig.write_html(p.with_name(f'tri_3d_ref_{z}').with_suffix('.html'))
def plot_reg(xs, ys, zs, q1, q2, c='k', ls='--', label=None):
p1, p2 = np.percentile(zs, q1), np.percentile(zs, q2)
ids = [i for i, x in enumerate(zs) if p1 < x <= p2]
coef = np.polyfit(xs[ids], ys[ids], 1)
poly1d_fn = np.poly1d(coef)
plt.plot(xs[ids], poly1d_fn(xs[ids]), color=c, linestyle=ls, label=label)
# plot_reg(xs, ys, ts, 0, 100, c='b')
# # plot_reg(xs, ys, 0, 25, c='g')
# # plot_reg(xs, ys, 95, 100, c='r')
# plot_reg(xs, ys, ts, 0, 50, c='g')
# plot_reg(xs, ys, ys, 0, 50, c='g')
# plot_reg(xs, ys, ts, 50, 100, c='r')
# plot_reg(xs, ys, ts, 0, 100, c='b')
# plot_reg(xs, ys, ts, 0, 100, c='k')
# plot_reg(xs, ys, ts, 0, 5, c='g')
# plot_reg(xs, ys, ys, 0, 5, c='g')
# plot_reg(xs, ys, ys, 95, 100, c='r')
# plot_reg(xs, ys, ts, 95, 100, c='r')
# lgd = plt.legend(loc='center left', bbox_to_anchor=(1.2, 0.5))
p = Path(input_path)
# fixed = {
# # 'gmsh.input.geometry.rock.dz': 50.0,
# 'gmsh.input.geometry.container.per_borehole': 2,
# 'gmsh.input.geometry.ebs.dr': 1.0
# }
fixed = {}
df = get_data(input_path, fixed)
zs = ['fenia.output.heat.temperature.max',
'fenia.output.heat.temperature.filling.max.value',
'fenia.output.heat.temperature.ebs.max.value',
'fenia.output.heat.temperature.rock.max.value',
'fenia.output.heat.temperature.cast_iron.max.value',
'fenia.input.heat.property.filling.source.limit',
'fenia.input.heat.property.filling.source.limit.per_container',
'fenia.input.heat.property.filling.source.limit.150',
'fenia.input.heat.property.filling.source.limit.per_container.150'
]
x = 'gmsh.input.geometry.borehole.dx'
y = 'gmsh.input.geometry.ebs.dh'
c2l = {
'fenia.output.heat.temperature.max': 'Максимальная температура',
'fenia.output.heat.temperature.filling.max.value': 'Максимальная температура внутри контейнера',
'fenia.output.heat.temperature.ebs.max.value': 'Максимальная температура ИББ',
'fenia.output.heat.temperature.rock.max.value': 'Максимальная температура вмещающей породы',
'fenia.output.heat.temperature.cast_iron.max.value': 'Максимальная температура контейнера',
'gmsh.input.geometry.borehole.dx': 'Расстояние между скважинами, м',
'gmsh.input.geometry.ebs.dh': 'Расстояние между контейнерами, м',
'fenia.input.heat.property.filling.source.limit': 'Максимально допустимое тепловыделение, Вт/м3',
'fenia.input.heat.property.filling.source.limit.per_container': 'Максимально допустимое тепловыделение на контейнер, Вт',
'fenia.input.heat.property.filling.source.limit.150': 'Максимально допустимое тепловыделение (150°C), Вт/м3',
'fenia.input.heat.property.filling.source.limit.per_container.150': 'Максимально допустимое тепловыделение на контейнер (150°C), Вт'
}
n_containers = [2, 5, 10]
drs = [0.5, 1.0]
for n in n_containers:
df2 = df[df["gmsh.input.geometry.container.per_borehole"] == n]
for dr in drs:
df3 = df2[df2["gmsh.input.geometry.ebs.dr"] == dr]
for z in zs:
tri_by_xyz(p, df3, x, y, z,
x_label=c2l[x],
y_label=c2l[y],
cb_label='',
suffix=f'_nc-{n}_dr-{dr}',
title=f'{c2l[z]}\n'
f'после {len(df3)} расчетов'
f'\nпри 40 контейнерах, {n} на скважину, '
# f'\nглубине {fixed.get("gmsh.input.geometry.rock.dz", "все")} м, '
f'толщине ИББ {dr} м'
# f'\nтепловыделении 1000 Вт/м³'
)
def plot_slice(input_path):
p = Path(input_path)
fixed = {
'gmsh.input.geometry.rock.dz': 50.0,
'gmsh.input.geometry.ebs.dr': 0.5
}
# 'gmsh.input.geometry.ebs.dh',
# 'gmsh.input.geometry.ebs.dr',
df = get_data(input_path, fixed)
df = df.astype({'gmsh.input.geometry.ebs.dh': 'str'})
fig = sns.lmplot(data=df,
x='gmsh.input.geometry.borehole.dx',
y='fenia.output.heat.temperature.ebs.max.value',
hue='gmsh.input.geometry.ebs.dh',
ci=None,
order=2
)
fig.set(xlabel='Расстояние между скважинами, м',
ylabel='Максимальная температура ИББ, °C')
fig._legend.set_title('Расстояние между\nконтейнерами, м')
fig.savefig(p.with_name('tx_h').with_suffix('.png'))
fig = px.scatter(df,
x="gmsh.input.geometry.borehole.dx",
y='fenia.output.heat.temperature.ebs.max.value',
color='gmsh.input.geometry.ebs.dh',
trendline="lowess",
trendline_options=dict(frac=0.1),
labels={
"gmsh.input.geometry.borehole.dx": "Расстояние между скважинами, м",
"fenia.output.heat.temperature.ebs.max.value": "Максимальная температура ИББ, °C",
"gmsh.input.geometry.ebs.dh": "Расстояние между контейнерами, м"
})
fig.write_html(p.with_name('tx_h').with_suffix('.html'))
p = Path(input_path)
fixed = {
'gmsh.input.geometry.rock.dz': 50.0,
'gmsh.input.geometry.ebs.dr': 0.5
}
df = get_data(input_path, fixed)
df = df.astype({'gmsh.input.geometry.borehole.dx': 'str'})
fig = sns.lmplot(data=df,
x='gmsh.input.geometry.ebs.dh',
y='fenia.output.heat.temperature.ebs.max.value',
hue='gmsh.input.geometry.borehole.dx',
ci=None,
order=2)
fig.set(xlabel='Расстояние между контейнерами, м',
ylabel='Максимальная температура ИББ, °C')
fig._legend.set_title('Расстояние между\nскважинами, м')
fig.savefig(p.with_name('th_x').with_suffix('.png'))
fig = px.scatter(df,
x='gmsh.input.geometry.ebs.dh',
y='fenia.output.heat.temperature.ebs.max.value',
color='gmsh.input.geometry.borehole.dx',
trendline="lowess",
trendline_options=dict(frac=0.1),
labels={
"gmsh.input.geometry.ebs.dh": "Расстояние между контейнерами, м",
"fenia.output.heat.temperature.ebs.max.value": "Максимальная температура ИББ, °C",
"gmsh.input.geometry.borehole.dx": "Расстояние между скважинами, м"
})
fig.write_html(p.with_name('th_x').with_suffix('.html'))
def plot_tsne(input_path):
# Author: <NAME> -- <<EMAIL>>
from collections import OrderedDict
from functools import partial
from time import time
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
# Axes3D
p = Path(input_path)
df = get_data(input_path)
# g = sns.lmplot(data=df,
# x='volume.excavated',
# y='fenia.output.heat.temperature.max')
# g.savefig(csv.with_name('tv').with_suffix('.png'))
cs = ['gmsh.input.geometry.ebs.dh',
'gmsh.input.geometry.ebs.dr',
'gmsh.input.geometry.borehole.dx',
'gmsh.input.geometry.rock.dz',
'gmsh.input.geometry.rock.dnz',
'gmsh.input.geometry.rock.dx',
'gmsh.input.geometry.rock.dy',
'volume.excavated',
'fenia.output.heat.temperature.max']
X = df[cs].to_numpy()
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
# n_points = 1000
# X, color = datasets.make_s_curve(n_points, random_state=0)
# n_neighbors = 10
n_components = 2
# # Create figure
# fig = plt.figure(figsize=(15, 8))
# fig.suptitle(
# "Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14
# )
#
# # Add 3d scatter plot
# ax = fig.add_subplot(251, projection="3d")
# ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
# ax.view_init(4, -72)
# Set-up manifold methods
# LLE = partial(
# manifold.LocallyLinearEmbedding,
# n_neighbors=n_neighbors,
# n_components=n_components,
# eigen_solver="auto",
# )
methods = OrderedDict()
# methods["LLE"] = LLE(method="standard")
# methods["LTSA"] = LLE(method="ltsa")
# methods["Hessian LLE"] = LLE(method="hessian")
# methods["Modified LLE"] = LLE(method="modified")
# methods["Isomap"] = manifold.Isomap(n_neighbors=n_neighbors, n_components=n_components)
methods["MDS"] = manifold.MDS(n_components, max_iter=100, n_init=1)
# methods["SE"] = manifold.SpectralEmbedding(
# n_components=n_components, n_neighbors=n_neighbors
# )
methods["t-SNE"] = manifold.TSNE(n_components=n_components, init="pca", random_state=0)
# Plot results
# for i, (label, method) in enumerate(methods.items()):
# t0 = time()
# Y = method.fit_transform(X)
# t1 = time()
# print("%s: %.2g sec" % (label, t1 - t0))
# ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
# ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
# ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
# ax.xaxis.set_major_formatter(NullFormatter())
# ax.yaxis.set_major_formatter(NullFormatter())
# ax.axis("tight")
import plotly.express as px
Y = methods['t-SNE'].fit_transform(X)
hover = [
'number',
'gmsh.input.geometry.ebs.dh',
'gmsh.input.geometry.ebs.dr',
'gmsh.input.geometry.borehole.dx',
'gmsh.input.geometry.rock.dz',
'gmsh.input.geometry.rock.dnz',
'gmsh.input.geometry.rock.dx',
'gmsh.input.geometry.rock.dy',
'volume.excavated',
'fenia.output.heat.temperature.max']
df = df[hover]
df['x'] = Y[:, 0]
df['y'] = Y[:, 1]
fig = px.scatter(
df,
x='x',
y='y',
color='fenia.output.heat.temperature.max',
# color_continuous_scale=self.results_color_scale,
hover_name="number",
hover_data=hover,
# labels=labels)
)
fig.update_yaxes(visible=False, showticklabels=False)
fig.update_xaxes(visible=False, showticklabels=False)
fig.write_html(p.with_name('t_tsne').with_suffix('.html'))
fig = px.scatter(
df,
x='x',
y='y',
color='volume.excavated',
# color_continuous_scale=self.results_color_scale,
hover_name="number",
hover_data=cs,
# labels=labels)
)
fig.update_yaxes(visible=False, showticklabels=False)
fig.update_xaxes(visible=False, showticklabels=False)
fig.write_html(p.with_name('ev_tsne').with_suffix('.html'))
def plot_tv(input_path):
import plotly.express as px
p = Path(input_path)
df = get_data(input_path)
# g = sns.lmplot(data=df,
# x='volume.excavated',
# y='fenia.output.heat.temperature.max')
# g.savefig(csv.with_name('tv').with_suffix('.png'))
cs = ['number',
'gmsh.input.geometry.ebs.dh',
'gmsh.input.geometry.ebs.dr',
'gmsh.input.geometry.borehole.dx',
'gmsh.input.geometry.rock.dz',
'gmsh.input.geometry.rock.dnz',
'gmsh.input.geometry.rock.dx',
'gmsh.input.geometry.rock.dy',
'volume.excavated',
'fenia.output.heat.temperature.max']
df = df[cs]
fig = px.scatter(
df,
x='volume.excavated',
y='fenia.output.heat.temperature.max',
# color='gmsh.input.geometry.ebs.dh',
# color_continuous_scale=self.results_color_scale,
hover_name="number",
hover_data=cs,
# labels=labels
)
# if color is not None:
# fig.layout.coloraxis.colorbar.title = color
fig.write_html(p.with_name('tv').with_suffix('.html'))
def get_data(input_path, fixed=None):
fixed = {} if fixed is None else fixed
p = Path(input_path)
df = pd.read_csv(p,
parse_dates=['datetime_start', 'datetime_complete', 'duration'],
# date_parser=lambda x: pd.datetime.strptime(x, '%Y%m%d:%H:%M:%S.%f')
)
df = df.rename(columns={x: x[23:] if x.startswith('user_attrs_features_~~.') else x for x in df.columns})
# Filter
df = df[df["state"] == "COMPLETE"]
print(f'{len(df)} completed trials')
df = df[df["gmsh.input.geometry.container.number"] == 40]
# df = df[df["gmsh.input.geometry.container.per_borehole"] == 5]
df = df[df["values_2"] > 110]
df = df[df["values_2"] < 2000]
df = df[df['fenia.input.heat.property.filling.source.value'] == 1000]
| |
<reponame>jkleczar/ttslab<gh_stars>0
# -*- coding: utf-8 -*-
""" Initial phoneset implementation for the Yoruba voice...
"""
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import os
import re
import codecs
import unicodedata
from tempfile import mkstemp
import numpy as np
from .. phoneset import Phoneset
from .. g2p import G2P_Rewrites_Semicolon, GraphemeNotDefined, NoRuleFound
from .. defaultvoice import LwaziMultiHTSVoice
import ttslab.hts_labels_tone as hts_labels_tone
from .. synthesizer_htsme import SynthesizerHTSME
from . yoruba_orth2tones import word2tones
from ttslab.waveform import Waveform
from ttslab.trackfile import Track
from .. pronundict import PronunLookupError
## SOME UPDATES REVERTED IN ORDER TO PROCESS LEGACY DATA
class YorubaPhoneset(Phoneset):
""" Developed for PhD studies, based on Yoruba data received from
Etienne Barnard...
DEMITASSE: check again later when the phoneset/language is more familiar!
"""
def __init__(self):
Phoneset.__init__(self)
self.features = {"name": "Yoruba Phoneset",
"silence_phone": "pau",
"closure_phone": "pau_cl"
}
self.phones = {"pau" : set(["pause"]),
"pau_cl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
#vowels
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ã" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front", "articulation_nasalized"]),
"e" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ̃" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front", "articulation_nasalized"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"ĩ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front", "articulation_nasalized"]),
"o" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"õ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded", "articulation_nasalized"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ɔ̃" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded", "articulation_nasalized"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
"ũ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back", "articulation_nasalized"]),
#consonants
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"gb" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "place_bilabial", "voiced"]),
"h" : set(["consonant", "manner_fricative", "place_glottal"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"dʒ" : set(["class_consonantal", "consonant", "manner_affricate", "place_alveolar", "place_post-alveolar", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"kp" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"])
}
self.map = {"pau" : "pau",
"pau_cl" : "pau_cl",
"ʔ" : "pau_gs",
"a" : "a",
"ã" : "an",
"e" : "e",
"ɛ" : "E",
"ɛ̃" : "En",
"i" : "i",
"ĩ" : "in",
"o" : "o",
"õ" : "on",
"ɔ" : "O",
"ɔ̃" : "On",
"u" : "u",
"ũ" : "un",
"b" : "b",
"d" : "d",
"dʒ" : "dZ",
"f" : "f",
"g" : "g",
"gb" : "gb",
"h" : "h",
"j" : "j",
"k" : "k",
"kp" : "kp",
"l" : "l",
"m" : "m",
"n" : "n",
"r" : "r",
"s" : "s",
"t" : "t",
"ʃ" : "S",
"w" : "w"
}
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_consonant(self, phonename):
return "consonant" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def syllabify(self, phonelist):
""" Basic syllabification, based on the syllabification scheme
devised by <NAME> for isiZulu (Nguni language).
"""
sylls = [[]]
phlist = list(phonelist)
while phlist:
phone = phlist[0]
try:
nphone = phlist[1]
nnphone = phlist[2]
#Syllabic consonant followed by C:
if (self.is_syllabicconsonant(phone) and
self.is_consonant(nphone)):
#sC.C
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
##DEMITASSE: Yoruba doesn't seem to have these:
##########
# #If there is a three phone cluster:
if (self.is_vowel(phone) and
not self.is_vowel(nphone) and
not self.is_vowel(nnphone)):
#VC.C
sylls[-1].append(phlist.pop(0))#phone
sylls[-1].append(phlist.pop(0))#nphone
if phlist: sylls.append([])
continue
except IndexError:
pass
if self.is_vowel(phone):
#V.Any
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
#anything not caught above is added to current syl...
sylls[-1].append(phlist.pop(0))
return sylls
class SynthesizerHTSME_Tone(SynthesizerHTSME):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item),
hts_labels_tone.k(phone_item),
hts_labels_tone.l(phone_item),
hts_labels_tone.m(phone_item),]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
def hts_synth(self, utt, processname):
htsparms = self.engine_parms.copy()
htsparms["-of"] = "%(tempolf0_file)s"
if "htsparms" in utt:
htsparms.update(utt["htsparms"]) #parm overrides for this utt...
#build command string and execute:
cmds = self.hts_bin
for k in htsparms:
if htsparms[k]:
if htsparms[k] is True:
cmds += " " + k
else:
cmds += " " + k + " " + str(htsparms[k])
cmds += " %(tempilab_file)s"
fd1, tempwav_file = mkstemp(prefix="ttslab_", suffix=".wav")
fd2, tempilab_file = mkstemp(prefix="ttslab_")
fd3, tempolab_file = mkstemp(prefix="ttslab_")
fd4, tempolf0_file = mkstemp(prefix="ttslab_")
cmds = cmds % {'models_dir': self.models_dir,
'tempwav_file': tempwav_file,
'tempilab_file': tempilab_file,
'tempolab_file': tempolab_file,
'tempolf0_file': tempolf0_file}
#print(cmds)
with codecs.open(tempilab_file, "w", encoding="utf-8") as outfh:
outfh.write("\n".join(utt["hts_label"]))
os.system(cmds)
#load seg endtimes into utt:
with open(tempolab_file) as infh:
lines = infh.readlines()
segs = utt.get_relation("Segment").as_list()
assert len(segs) == len(lines)
for line, seg in zip(lines, segs):
seg["end"] = hts_labels_tone.htk_int_to_float(line.split()[1])
#load audio:
utt["waveform"] = Waveform(tempwav_file)
#load lf0:
f0 = np.exp(np.fromfile(tempolf0_file, "float32")) #load and lf0 to hertz
#to semitones relative to 1Hz:
f0[f0.nonzero()] = 12.0 * np.log2(f0[f0.nonzero()]) # 12 * log2 (F0 / F0reference) where F0reference = 1
f0t = Track()
f0t.values = f0
f0t.times = np.arange(len(f0), dtype=np.float64) * 0.005
utt["f0"] = f0t
#cleanup tempfiles:
os.close(fd1)
os.close(fd2)
os.close(fd3)
os.close(fd4)
os.remove(tempwav_file)
os.remove(tempolab_file)
os.remove(tempilab_file)
os.remove(tempolf0_file)
return utt
class SynthesizerHTSME_Tone2(SynthesizerHTSME_Tone):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item),
hts_labels_tone.k(phone_item),
hts_labels_tone.l(phone_item),
hts_labels_tone.m(phone_item),
hts_labels_tone.n(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
class SynthesizerHTSME_Tone_NoTone(SynthesizerHTSME_Tone): #no tone labels but loads generated f0
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
class LwaziYorubaMultiHTSVoice(LwaziMultiHTSVoice):
CONJUNCTIONS = ["ẹyin", "ati", # both,and
"sibẹ-sibẹ", "sibẹsibẹ", "afi", "ṣugbọn", #but
"fun", "nitori", "ni", "to", "ri", #for,because
"boya", "tabi", "yala", #either/or/nor
"pẹlu", "jubẹlọ", "bi", "o", "ti", "lẹ", "jẹ", "pe", #yet,although
"lati", "lẹhin", "igbati", # since
"titi", #until
"akoko" #while
] #Unicode NFC form
CGRAVE = "\u0300"
CACUTE = "\u0301"
CUNDOT = "\u0323"
SMALLGRAPHSET = "abdeẹfghijklmnoọprsṣtuwy"
ENGWORD_CHARTHRESHOLD = 4 #Only prefer entry in English lexicon for words longer (num chars) than this
def __init__(self, phoneset, g2p, pronundict, pronunaddendum,
engphoneset, engg2p, engpronundict, engpronunaddendum,
synthesizer):
LwaziMultiHTSVoice.__init__(self, phoneset=phoneset, g2p=g2p,
pronundict=pronundict,
pronunaddendum=pronunaddendum,
engphoneset=engphoneset, engg2p=engg2p,
engpronundict=engpronundict,
engpronunaddendum=engpronunaddendum,
synthesizer=synthesizer)
def normalizer(self, utt, processname):
""" words marked with a prepended pipe character "|" and words
in the English pronunciation dictionary or addendum will
be marked as English...
"""
token_rel = utt.get_relation("Token")
word_rel = utt.new_relation("Word")
for token_item in token_rel:
tokentext = token_item["name"]
tokentext = tokentext.lower()
tokentextlist = tokentext.split("-") #split tokens on dashes to create multiple words...revisit
for wordname in tokentextlist:
pronunform = unicodedata.normalize("NFC", re.sub(u"[%s%s]" % (self.CGRAVE, self.CACUTE), "", wordname))
word_item = word_rel.append_item()
#try to determine language:
if wordname.startswith("|"):
word_item["lang"] = "eng"
wordname = wordname[1:]
pronunform = pronunform[1:]
elif (((wordname in self.engpronunaddendum or
wordname in self.engpronundict) and
len(pronunform) > self.ENGWORD_CHARTHRESHOLD and
pronunform not in self.pronunaddendum) or
not all([c in self.SMALLGRAPHSET for c in pronunform.lower()])):
word_item["lang"] = "eng"
else:
word_item["lang"] = "def" #default language...
#determine type:
if re.search("[\d]+", wordname):
#TODO: normalisation of digits... at the moment
#insert string to make phonetizer fail:
pronunform = "1234567890"
word_item["type"] = "num"
word_item["lang"] = "eng" #will pronounce digits in English...
else:
word_item["type"] = "norm"
#tokenizer does NFKD... for Yoruba pronunciation
#resources are in NFC without ACUTE and GRAVE
#ACCENTS. But we need the ACCENTS to determine tone
| |
<reponame>mkulikf5/bigiq-cloudinit
#!/usr/bin/env python
# coding=utf-8
# pylint: disable=broad-except,unused-argument,line-too-long
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module reads OpenStack metadata from an attached configdrive,
creates TMSH and f5-declarative-onboarding artifacts from the
metadata, and then onboards the TMOS device.
The module must be enabled in the userdata YAML to perform any
onboarding artifact generation or onboarding.
#cloud-config
tmos_configdrive_openstack:
enabled: True
This module will also look for YAML metadata which typically
is handled by the cc_ssh.py module:
#cloud-config
ssh_authorized_keys:
- ssh-rsa [key]
- ssh-rsa [key]
The standard cloud-init cc_ssh.py module alters SELinux
configurations which may not be compatible with TMOS
Additional attributes supported in the YAML declaration
include
rd_enabled - Route domain decoration support, defaults to True.
device_discovery_interface - implicitly define the TMOS configsync interface.
default_route_interface - implicitly defint the TMOS default route interface.
license_key - optional license key for AUTOMATIC license registration for the BIG-IQ
node_type - optional BIG-IQ type, options are cm or dcd, default is cm
#cloud-config
bigiq_configdrive_openstack:
enabled: True
rd_enabled: False
do_enabled: True
device_discovery_interface: 1.1
default_route_interface: 1.3
license_key = <KEY>
node_type = cm
"""
import json
import logging
import os
import subprocess
import time
from cloudinit import bigiq_onboard_utils
# constants
MODULE_NAME = 'bigiq_configdrive_openstack'
OUT_DIR = '/var/lib/cloud/' + MODULE_NAME
EXEC_DIR = '/opt/cloud/' + MODULE_NAME
TMSH_CMD_FILE_DIR = EXEC_DIR + '/initscripts/all'
POST_ONBOARD_CMD_FILE_DIR = EXEC_DIR + '/initscripts/post'
METADATA_FILE_DIR = '/opt/cloud/instance'
ANSIBLE_VAR_FILE = '/var/lib/cloud/ansible/onboard/onboard_vars.yml'
ANSIBLE_PLAYBOOK_DIR = '/var/lib/cloud/ansible/onboard'
ANSIBLE_PLAYBOOK = 'onboard.yml'
NETWORK_DATA_FILE = METADATA_FILE_DIR + '/network_data.json'
META_DATA_FILE = METADATA_FILE_DIR + '/meta_data.json'
LOG_FILE = '/var/log/f5-cloudinit.log'
ERROR = 'ERROR'
SUCCESS = 'SUCCESS'
bigiq_onboard_utils.touch_file(LOG_FILE)
LOG = logging.getLogger(MODULE_NAME)
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
LOGFILE = logging.FileHandler(LOG_FILE)
LOGFILE.setLevel(logging.DEBUG)
LOGFILE.setFormatter(FORMATTER)
LOG.addHandler(LOGFILE)
ONBOARD_COMPLETE_FLAG_FILE = OUT_DIR + '/ONBOARD_COMPLETE'
POST_ONBOARD_FLAG_FILE = OUT_DIR + '/POST_ONBOARD_COMPLETE'
DEFAULT_DNS_SERVERS = ['8.8.8.8', '8.8.4.4']
DEFAULT_NTP_SERVERS = ['0.pool.ntp.org', '1.pool.<EMAIL>']
DEFAULT_TIMEZONE = 'UTC'
DEFAULT_DEVICE_DISCOVERY_INTERFACE = 'mgmt'
REMOVE_METADATA_FILES = False
ONBOARD_TIMEOUT = 120
SYSCMDS = bigiq_onboard_utils.SYSCMDS
SYSDEVPREFIX = 'eth'
def metadata_dir_exists():
"""Ensures metadata copy directory exists"""
if not os.path.isdir(METADATA_FILE_DIR):
os.makedirs(METADATA_FILE_DIR)
def tmsh_cmd_dir_exists():
"""Ensures TMSH generated init script directory exists"""
if not os.path.isdir(TMSH_CMD_FILE_DIR):
os.makedirs(TMSH_CMD_FILE_DIR)
if not os.path.isdir(POST_ONBOARD_CMD_FILE_DIR):
os.makedirs(POST_ONBOARD_CMD_FILE_DIR)
if not os.path.isdir(OUT_DIR):
os.makedirs(OUT_DIR)
def get_metadata():
"""Reads in OpenStack configdrive metadata into dictionaries"""
LOG.debug('parsing metadata files')
os_nmd, os_md = {}, {}
if os.path.isfile(NETWORK_DATA_FILE):
os_nmd = json.load(open(NETWORK_DATA_FILE))
if os.path.isfile(META_DATA_FILE):
os_md = json.load(open(META_DATA_FILE))
return (os_nmd, os_md)
def resolve_resources(rd_enabled=True,
default_route_interface=None,
device_discovery_interface=None,
inject_routes=True,
dhcp_timeout=120):
"""Resolve the resource provisioning dataset from metadata"""
(os_nmd, os_md) = get_metadata()
if not (bool(os_nmd) or bool(os_md)):
LOG.error('OpenStack metadata unavailable.. halting processing')
return ({}, {}, {}, {})
if default_route_interface:
LOG.debug('default_route_interface %s requested',
default_route_interface)
else:
LOG.debug(
'no default_route_interface designated, will attempt to select dynamically'
)
if not device_discovery_interface:
device_discovery_interface = DEFAULT_DEVICE_DISCOVERY_INTERFACE
LOG.debug('setting device_discovery_interface to %s',
device_discovery_interface)
device_discovery_address = None
# supported TMOS network types
ln_types = ['phy', 'bridge', 'ovs', 'vif', 'tap']
n_types = ['ipv4', 'ipv6']
m_l_id = m_ip = m_nm = m_gw = None
m_l_mtu = 1500
# resolved resources
links, selfips, routes, services = {}, {}, {}, {}
number_of_default_routes = 0
forced_tmm_down = False
# resolve L2 interfaces
n_idx = 0
for link in os_nmd['links']:
if not link['mtu']:
link['mtu'] = 1500
if n_idx == 0:
m_l_id = link['id']
m_l_mtu = link['mtu']
bigiq_onboard_utils.force_mgmt_mtu(m_l_mtu)
n_idx += 1
continue
if link['type'] in ln_types:
net_name = 'net_%s' % link['id']
net_name = net_name.replace('-', '_')
links[link['id']] = {
'net_name': net_name,
'mtu': link['mtu'],
'interface': '1.%s' % n_idx,
'route_domain': 0,
'interface_index': n_idx,
'segmentation_id': 4094 - n_idx,
'tagged': False
}
n_idx += 1
# resolve L2 virtual networks
for link in os_nmd['links']:
if link['type'] == 'vlan':
if link['vlan_link'] not in links:
LOG.warning("VLAN %s defined for unsupported link %s",
link['vlan_id'], link['vlan_link'])
else:
if links[link['vlan_link']]['interface_index'] == 0:
LOG.warning(
"VLAN tagging is not supported on management interface"
)
else:
links[link['id']] = {
'net_name':
'vlan_%s' % link['vlan_id'],
'mtu':
links[link['vlan_link']]['mtu'],
'interface':
links[link['vlan_link']]['interface'],
'interface_index':
links[link['vlan_link']]['interface_index'],
'segmentation_id':
link['vlan_id'],
'tagged':
True
}
for net in os_nmd['networks']:
# resolve DHCP discovered attributes
if net['type'] == 'ipv4_dhcp':
if net['link'] == m_l_id:
try:
bigiq_onboard_utils.wait_for_mgmt_dhcp()
mgmt_data = bigiq_onboard_utils.process_dhcp4_lease(
bigiq_onboard_utils.MGMT_DHCP_LEASE_FILE)
if 'fixed-address' in mgmt_data:
m_ip = mgmt_data['fixed-address']
if 'subnet-mask' in mgmt_data:
m_nm = mgmt_data['subnet-mask']
if 'routers' in mgmt_data:
m_gw = mgmt_data['routers']
if 'host-name' in mgmt_data:
services['hostname'] = mgmt_data['host-name']
if 'domain-name' in mgmt_data:
services['domainname'] = mgmt_data['domain-name']
if device_discovery_interface == DEFAULT_DEVICE_DISCOVERY_INTERFACE:
device_discovery_address = m_ip
except Exception as err:
LOG.error(
'exception in processing mgmt DHCP lease file: %s',
err)
else:
if not forced_tmm_down:
bigiq_onboard_utils.force_tmm_down()
forced_tmm_down = True
interface = "%s%d" % (SYSDEVPREFIX,
links[net['link']]['interface_index'])
s_ip = s_nm = s_gw = s_routes = None
try:
if bigiq_onboard_utils.make_dhcp4_request(
interface, dhcp_timeout):
interface_data = bigiq_onboard_utils.process_dhcp4_lease(
interface)
if 'fixed-address' in interface_data:
s_ip = interface_data['fixed-address']
if 'subnet-mask' in interface_data:
s_nm = interface_data['subnet-mask']
if 'routers' in interface_data:
s_gw = interface_data['routers']
if 'classless-static-routes' in interface_data:
s_routes = interface_data[
'classless-static-routes']
use_rd_on_this_link = True
if net['link'] not in routes:
routes[net['link']] = []
# default gateway from DHCPv4
if s_gw and s_gw != m_gw:
route = {}
inject_route = True
route['use_rd'] = True
route['exclude'] = False
if default_route_interface == links[
net['link']]['interface']:
LOG.debug(
'default_route_interface discovered with DHCPv4 as interface: %s gw: %s',
links[net['link']]['interface'], s_gw)
route['use_rd'] = False
number_of_default_routes += 1
if device_discovery_interface == links[
net['link']]['interface']:
LOG.debug(
'device_discovery_interface discovered with DHCPv4 as link: %s',
net['link'])
device_discovery_address = s_ip
route['use_rd'] = False
if not default_route_interface == device_discovery_interface:
inject_route = False
if (not default_route_interface) and (
number_of_default_routes == 0
and inject_route):
LOG.debug(
'dynamically setting default_route_interface with DHCPv4 interface: %s gw: %s',
links[net['link']]['interface'], s_gw)
route['use_rd'] = False
use_rd_on_this_link = False
number_of_default_routes += 1
route['route_name'] = net['link'] + '_default_gw'
route['network'] = '0.0.0.0'
route['netmask'] = '0.0.0.0'
route['gateway'] = s_gw
if inject_route and inject_routes:
routes[net['link']].append(route)
for route in bigiq_onboard_utils.process_dhcp4_routes(
s_routes):
route['use_rd'] = True
route['exclude'] = False
if default_route_interface == links[
net['link']]['interface']:
route['use_rd'] = False
if device_discovery_interface == links[
net['link']]['interface']:
route['use_rd'] = False
if not use_rd_on_this_link:
route['use_rd'] = False
if inject_routes:
routes[net['link']].append(route)
selfip_name = 'selfip_%s' % net['link']
selfip_name = selfip_name.replace('-', '_')
selfips[net['link']] = {
'selfip_name': selfip_name,
'net_name': links[net['link']]['net_name'],
'ip_address': s_ip,
'netmask': s_nm
}
else:
if default_route_interface == links[
net['link']]['interface']:
LOG.warning(
'the requested default_route_interface self IP could not be resolved'
)
if device_discovery_interface == links[
net['link']]['interface']:
LOG.warning(
'the requested device_discovery_interface self IP could not be resolved'
)
except Exception as err:
LOG.error(
'exception processing DHCPv4 for interface %s - %s',
interface, err)
elif net['type'] == 'ipv6_dhcp':
LOG.warning(
'Found IPv6_DHCP network. Since interface-mtu and route prefix delegation are not supported with DHCPv6 and TMOS, skipping.'
)
else:
if net['link'] == m_l_id:
m_ip = net['ip_address']
m_nm = net['netmask']
for route in net['routes']:
if route['network'] == '0.0.0.0' or route[
'network'] == '::':
m_gw = route['gateway']
if device_discovery_interface == DEFAULT_DEVICE_DISCOVERY_INTERFACE:
device_discovery_address = m_ip
else:
# resolve statically configured attributes
if net['type'] in n_types:
# resolve L3 routing information
for route in net['routes']:
if net['link'] in links:
if net['link'] not in routes:
routes[net['link']] = []
inject_route = True
route['use_rd'] = True
route['exclude'] = False
if route['network'] == '0.0.0.0' or route[
'network'] == '::':
if route['gateway'] != m_gw:
route['route_name'] = net['link'] + \
'_default_gw'
if default_route_interface == links[
net['link']]['interface']:
LOG.debug(
'default_route_interface discovered as interface: %s gw: %s',
links[net['link']]['interface'],
route['gateway'])
route['use_rd'] = False
number_of_default_routes += 1
if device_discovery_interface == links[
net['link']]['interface']:
LOG.debug(
'device_discovery_interface discovered as interface: %s',
links[net['link']]['interface'])
route['use_rd'] = False
if not default_route_interface == device_discovery_interface:
inject_route = False
if (not default_route_interface) and (
number_of_default_routes == 0
and inject_route):
LOG.debug(
'dynamically setting default_route_interface as interface: %s gw: %s',
links[net['link']]['interface'],
route['gateway'])
route['use_rd'] = False
number_of_default_routes += 1
route['route_name'] = "route_%s_%s" % (
route['network'], route['netmask'])
route['route_name'] = route['route_name'].replace(
'.', '_').replace(':', '_').replace('/', '_')
if inject_route and inject_routes:
routes[net['link']].append(route)
# resolve interface L3 information
if net['link'] in links:
selfip_name = 'selfip_%s' % net['link']
selfip_name = selfip_name.replace('-', '_')
selfips[net['link']] = {
'selfip_name': selfip_name,
'net_name': links[net['link']]['net_name'],
'ip_address': net['ip_address'],
'netmask': net['netmask']
}
if device_discovery_interface == links[
net['link']]['interface']:
device_discovery_address = net['ip_address']
if rd_enabled:
LOG.debug('decorating route domains')
for n_link in routes:
for route in routes[n_link]:
if route['use_rd']:
r_dom = str(links[n_link]['segmentation_id'])
links[n_link]['route_domain'] = r_dom
if selfips[n_link]['ip_address'].find('%') < 0:
selfips[n_link]['ip_address'] = selfips[n_link][
'ip_address'] + '%' + r_dom
route['route_name'] = route['route_name'] + '_' + r_dom
route['network'] = | |
# -*- coding: utf-8 -*-
# (c) 2009-2021 <NAME> and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 <NAME>.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Implementation of a DAV provider that serves resource from a file system.
:class:`~wsgidav.fs_dav_provider.FilesystemProvider` implements a DAV resource
provider that publishes a file system.
If ``readonly=True`` is passed, write attempts will raise HTTP_FORBIDDEN.
This provider creates instances of :class:`~wsgidav.fs_dav_provider.FileResource`
and :class:`~wsgidav.fs_dav_provider.FolderResource` to represent files and
directories respectively.
"""
import os
import shutil
import stat
import sys
from wsgidav import util
from wsgidav.dav_error import HTTP_FORBIDDEN, DAVError
from wsgidav.dav_provider import DAVCollection, DAVNonCollection, DAVProvider
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
BUFFER_SIZE = 8192
# ========================================================================
# FileResource
# ========================================================================
class FileResource(DAVNonCollection):
"""Represents a single existing DAV resource instance.
See also _DAVResource, DAVNonCollection, and FilesystemProvider.
"""
def __init__(self, path, environ, file_path):
super().__init__(path, environ)
self._file_path = file_path
self.file_stat = os.stat(self._file_path)
# Setting the name from the file path should fix the case on Windows
self.name = os.path.basename(self._file_path)
self.name = util.to_str(self.name)
# Getter methods for standard live properties
def get_content_length(self):
return self.file_stat[stat.ST_SIZE]
def get_content_type(self):
return util.guess_mime_type(self.path)
def get_creation_date(self):
return self.file_stat[stat.ST_CTIME]
def get_display_name(self):
return self.name
def get_etag(self):
return util.get_file_etag(self._file_path)
def get_last_modified(self):
return self.file_stat[stat.ST_MTIME]
def support_etag(self):
return True
def support_ranges(self):
return True
def get_content(self):
"""Open content as a stream for reading.
See DAVResource.get_content()
"""
assert not self.is_collection
# GC issue 28, 57: if we open in text mode, \r\n is converted to one byte.
# So the file size reported by Windows differs from len(..), thus
# content-length will be wrong.
return open(self._file_path, "rb", BUFFER_SIZE)
def begin_write(self, *, content_type=None):
"""Open content as a stream for writing.
See DAVResource.begin_write()
"""
assert not self.is_collection
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
# _logger.debug("begin_write: {}, {}".format(self._file_path, "wb"))
# GC issue 57: always store as binary
return open(self._file_path, "wb", BUFFER_SIZE)
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
os.unlink(self._file_path)
self.remove_all_properties(recursive=True)
self.remove_all_locks(recursive=True)
def copy_move_single(self, dest_path, *, is_move):
"""See DAVResource.copy_move_single()"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
# Copy file (overwrite, if exists)
shutil.copy2(self._file_path, fpDest)
# (Live properties are copied by copy2 or copystat)
# Copy dead properties
propMan = self.provider.prop_manager
if propMan:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
if is_move:
propMan.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=False,
environ=self.environ,
)
else:
propMan.copy_properties(
self.get_ref_url(), destRes.get_ref_url(), self.environ
)
def support_recursive_move(self, dest_path):
"""Return True, if move_recursive() is available (see comments there)."""
return True
def move_recursive(self, dest_path):
"""See DAVResource.move_recursive()"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
assert not os.path.exists(fpDest)
_logger.debug("move_recursive({}, {})".format(self._file_path, fpDest))
shutil.move(self._file_path, fpDest)
# (Live properties are copied by copy2 or copystat)
# Move dead properties
if self.provider.prop_manager:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
self.provider.prop_manager.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=True,
environ=self.environ,
)
def set_last_modified(self, dest_path, time_stamp, *, dry_run):
"""Set last modified time for destPath to timeStamp on epoch-format"""
# Translate time from RFC 1123 to seconds since epoch format
secs = util.parse_time_string(time_stamp)
if not dry_run:
os.utime(self._file_path, (secs, secs))
return True
# ========================================================================
# FolderResource
# ========================================================================
class FolderResource(DAVCollection):
"""Represents a single existing file system folder DAV resource.
See also _DAVResource, DAVCollection, and FilesystemProvider.
"""
def __init__(self, path, environ, file_path):
super().__init__(path, environ)
self._file_path = file_path
self.file_stat = os.stat(self._file_path)
# Setting the name from the file path should fix the case on Windows
self.name = os.path.basename(self._file_path)
self.name = util.to_str(self.name) # .encode("utf8")
# Getter methods for standard live properties
def get_creation_date(self):
return self.file_stat[stat.ST_CTIME]
def get_display_name(self):
return self.name
def get_directory_info(self):
return None
def get_etag(self):
return None
def get_last_modified(self):
return self.file_stat[stat.ST_MTIME]
def get_member_names(self):
"""Return list of direct collection member names (utf-8 encoded).
See DAVCollection.get_member_names()
"""
# On Windows NT/2k/XP and Unix, if path is a Unicode object, the result
# will be a list of Unicode objects.
# Undecodable filenames will still be returned as string objects
# If we don't request unicode, for example Vista may return a '?'
# instead of a special character. The name would then be unusable to
# build a distinct URL that references this resource.
nameList = []
# self._file_path is unicode, so os.listdir returns unicode as well
assert util.is_str(self._file_path)
# if "temp" in self._file_path:
# raise RuntimeError("Oops")
for name in os.listdir(self._file_path):
if not util.is_str(name):
name = name.decode(sys.getfilesystemencoding())
assert util.is_str(name)
# Skip non files (links and mount points)
fp = os.path.join(self._file_path, name)
if not os.path.isdir(fp) and not os.path.isfile(fp):
_logger.debug("Skipping non-file {!r}".format(fp))
continue
# name = name.encode("utf8")
name = util.to_str(name)
nameList.append(name)
return nameList
def get_member(self, name):
"""Return direct collection member (DAVResource or derived).
See DAVCollection.get_member()
"""
assert util.is_str(name), "{!r}".format(name)
fp = os.path.join(self._file_path, util.to_str(name))
# name = name.encode("utf8")
path = util.join_uri(self.path, name)
if os.path.isdir(fp):
res = FolderResource(path, self.environ, fp)
elif os.path.isfile(fp):
res = FileResource(path, self.environ, fp)
else:
_logger.debug("Skipping non-file {}".format(path))
res = None
return res
# --- Read / write -------------------------------------------------------
def create_empty_resource(self, name):
"""Create an empty (length-0) resource.
See DAVResource.create_empty_resource()
"""
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
f = open(fp, "wb")
f.close()
return self.provider.get_resource_inst(path, self.environ)
def create_collection(self, name):
"""Create a new collection as member of self.
See DAVResource.create_collection()
"""
assert "/" not in name
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
path = util.join_uri(self.path, name)
fp = self.provider._loc_to_file_path(path, self.environ)
os.mkdir(fp)
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(recursive=True)
self.remove_all_locks(recursive=True)
def copy_move_single(self, dest_path, *, is_move):
"""See DAVResource.copy_move_single()"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
# Create destination collection, if not exists
if not os.path.exists(fpDest):
os.mkdir(fpDest)
try:
# may raise: [Error 5] Permission denied:
# u'C:\\temp\\litmus\\ccdest'
shutil.copystat(self._file_path, fpDest)
except Exception:
_logger.exception("Could not copy folder stats: {}".format(self._file_path))
# (Live properties are copied by copy2 or copystat)
# Copy dead properties
propMan = self.provider.prop_manager
if propMan:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
if is_move:
propMan.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=False,
environ=self.environ,
)
else:
propMan.copy_properties(
self.get_ref_url(), destRes.get_ref_url(), self.environ
)
def support_recursive_move(self, dest_path):
"""Return True, if move_recursive() is available (see comments there)."""
return True
def move_recursive(self, dest_path):
"""See DAVResource.move_recursive()"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
fpDest = self.provider._loc_to_file_path(dest_path, self.environ)
assert not util.is_equal_or_child_uri(self.path, dest_path)
assert not os.path.exists(fpDest)
_logger.debug("move_recursive({}, {})".format(self._file_path, fpDest))
shutil.move(self._file_path, fpDest)
# (Live properties are copied by copy2 or copystat)
# Move dead properties
if self.provider.prop_manager:
destRes = self.provider.get_resource_inst(dest_path, self.environ)
self.provider.prop_manager.move_properties(
self.get_ref_url(),
destRes.get_ref_url(),
with_children=True,
environ=self.environ,
)
def set_last_modified(self, dest_path, time_stamp, *, dry_run):
"""Set last modified time for destPath to timeStamp on epoch-format"""
# Translate time from RFC 1123 to seconds since epoch format
secs = util.parse_time_string(time_stamp)
if not dry_run:
os.utime(self._file_path, (secs, secs))
return True
# ========================================================================
# FilesystemProvider
# ========================================================================
class FilesystemProvider(DAVProvider):
def __init__(self, root_folder, *, readonly=False, shadow=None):
# and resolve relative to config file
# root_folder = os.path.expandvars(os.xpath.expanduser(root_folder))
root_folder = os.path.abspath(root_folder)
if not root_folder or not os.path.exists(root_folder):
raise ValueError("Invalid root path: {}".format(root_folder))
super().__init__()
self.root_folder_path = root_folder
self.readonly = readonly
if shadow:
self.shadow = {k.lower(): v for k, v in shadow.items()}
else:
self.shadow = {}
def __repr__(self):
rw = "Read-Write"
if self.readonly:
rw = "Read-Only"
return "{} for path '{}' ({})".format(
self.__class__.__name__, self.root_folder_path, rw
)
def _resolve_shadow_path(self, path, environ, file_path):
"""File not found: See if there is a shadow configured."""
shadow = self.shadow.get(path.lower())
# _logger.info(f"Shadow {path} -> {shadow} {self.shadow}")
if not shadow:
return False, file_path
err = None
method = environ["REQUEST_METHOD"].upper()
if method not in ("GET", "HEAD", "OPTIONS"):
err = f"Shadow {path} -> {shadow}: ignored for method {method!r}."
elif os.path.exists(file_path):
err = f"Shadow {path} -> {shadow}: ignored for existing resource {file_path!r}."
elif not os.path.exists(shadow):
err = f"Shadow {path} -> {shadow}: does not exist."
if err:
_logger.warning(err)
return False, file_path
_logger.info(f"Shadow {path} -> {shadow}")
return True, shadow
def _loc_to_file_path(self, path, environ=None):
"""Convert resource path to a unicode absolute file path.
Optional environ argument may be useful e.g. in relation to per-user
sub-folder chrooting inside root_folder_path.
"""
root_path = self.root_folder_path
assert root_path is not None
assert util.is_str(root_path)
assert util.is_str(path)
path_parts = path.strip("/").split("/")
file_path = os.path.abspath(os.path.join(root_path, *path_parts))
is_shadow, file_path = self._resolve_shadow_path(path, environ, file_path)
if not file_path.startswith(root_path) and not is_shadow:
raise RuntimeError(
"Security exception: tried to access file outside root: {}".format(
file_path
)
)
# Convert to unicode
file_path = util.to_unicode_safe(file_path)
return file_path
def is_readonly(self):
return self.readonly
def get_resource_inst(self, path, environ):
"""Return info dictionary for path.
See DAVProvider.get_resource_inst()
"""
self._count_get_resource_inst += | |
of bits and m.bits to a list of bits at
each position"""
bits = []
totbits = 0
bgbits = 0
bg = self.background
UNCERT = lambda x: x*math.log(x)/math.log(2.0)
for letter in ACGT:
bgbits = bgbits + UNCERT(bg[letter])
for i in range(self.width):
tot = 0
for letter in ACGT:
Pij = pow(2.0, self.logP[i][letter])
tot = tot + UNCERT(Pij)
#bit = Pij * self.ll[i][letter]
#if bit > 0:
# tot = tot + bit
#print tot, bgbits, tot-bgbits
bits.append(max(0,tot-bgbits))
totbits = totbits + max(0,tot-bgbits)
self.bits = bits
self.totalbits = totbits
def denoise(self,bitthresh=0.5):
"""set low-information positions (below bitthresh) to Ns"""
for i in range(self.width):
tot = 0
for letter in ACGT:
if self.logP:
Pij = pow(2.0, self.logP[i][letter])
else:
Pij = pow(2.0, self.ll[i][letter]) * self.background[letter]
if Pij > 0.01:
bit = Pij * self.ll[i][letter]
tot = tot + bit
if tot < bitthresh: #Zero Column
for letter in ACGT:
self.ll[i][letter] = 0.0
self.compute_from_ll(self.ll)
def giflogo(self,id,title=None,scale=0.8,info_str=''):
"""make a gif sequence logo"""
return giflogo(self,id,title,scale)
def printlogo(self,norm=2.3, height=10.0):
"""print a text-rendering of the Motif Logo
norm
maximum number of bits to show
height
number of lines of text to use to render logo
"""
self._print_bits(norm,height)
def print_textlogo(self,norm=2.3, height=8.0):
"""print a text-rendering of the Motif Logo
norm
maximum number of bits to show
height
number of lines of text to use to render logo
"""
self._print_bits(norm,height)
def _print_bits(self,norm=2.3, height=8.0):
"""print a text-rendering of the Motif Logo
norm
maximum number of bits to show
height
number of lines of text to use to render logo
"""
bits = []
tots = []
str = []
for i in range(self.width):
D = {}
tot = 0
for letter in ['A', 'C', 'T', 'G']:
if self.logP:
Pij = pow(2.0, self.logP[i][letter])
else:
Pij = pow(2.0, self.ll[i][letter]) * self.background[letter]
if Pij > 0.01:
'''Old'''
D[letter] = Pij * self.ll[i][letter]
#'''new'''
#Q = self.background[letter]
#D[letter] = ( Pij * math.log(Pij) - Pij * math.log(Q) ) / math.log(2.0)
'''for both old and new'''
tot = tot + D[letter]
bits.append(D)
tots.append(tot)
for i in range(self.width):
s = []
_l = bits[i].keys()
_l.sort(lambda x,y,D=bits[i]: cmp(D[y],D[x]))
for key in _l:
for j in range(int(bits[i][key] / norm * height)):
s.append(key)
str.append(''.join(s))
fmt = '%%%ds'%height
print '# %s'%('-'*self.width)
for h in range(int(height)):
sys.stdout.write("# ")
for i in range(self.width):
sys.stdout.write((fmt%str[i])[h])
if h == 0:
sys.stdout.write(' -- %4.2f bits\n'%norm)
elif h == height-1:
sys.stdout.write(' -- %4.2f bits\n'%(norm/height))
else:
sys.stdout.write('\n')
print '# %s'%('-'*self.width)
print '# %s'%self.oneletter
def _compute_ambig_ll(self):
"""extend log-likelihood matrix to include ambiguity codes
e.g. What the score of a 'S'? Here we use the max of C and G."""
for Dll in self.ll:
for L in one2two.keys():
Dll[L] = max(Dll[one2two[L][0]], Dll[one2two[L][1]] )
Dll['N'] = 0.0
Dll['B'] = 0.0
def compute_from_nmer(self,nmer,beta=0.001): #For reverse compatibility
"""See compute_from_text. Here for reverse compatibility"""
self.compute_from_text(nmer,beta)
def compute_from_text(self,text,beta=0.001):
"""compute a matrix values from a text string of ambiguity codes.
Use Motif_from_text utility instead to build motifs on the fly."""
prevlett = {'B':'A', 'D':'C', 'V':'T', 'H':'G'}
countmat = []
text = re.sub('[\.\-]','N',text.upper())
for i in range(len(text)):
D = {'A': 0, 'C': 0, 'T':0, 'G':0}
letter = text[i]
if letter in ['B', 'D', 'V', 'H']: #B == no "A", etc...
_omit = prevlett[letter]
for L in ACGT:
if L != _omit: D[L] = 0.3333
elif one2two.has_key(letter): #Covers WSMYRK
for L in list(one2two[letter]):
D[L] = 0.5
elif letter == 'N':
for L in D.keys():
D[L] = self.background[L]
elif letter == '@':
for L in D.keys():
D[L] = self.background[L]-(0.0001)
D['A'] = D['A'] + 0.0004
else:
D[letter] = 1.0
countmat.append(D)
self.compute_from_counts(countmat,beta)
def new_bg(self,bg):
"""change the ACGT background frequencies to those in the supplied dictionary.
Recompute log-likelihood, etc. with new background.
"""
counts = []
for pos in self.logP:
D = {}
for L,lp in pos.items():
D[L] = math.pow(2.0,lp)
counts.append(D)
self.background = bg
self.compute_from_counts(counts,0)
def addpseudocounts(self,beta=0):
"""add pseudocounts uniformly across the matrix"""
self.compute_from_counts(self.counts,beta)
def compute_from_counts(self,countmat,beta=0):
"""build a motif object from a matrix of letter counts."""
self.counts = countmat
self.width = len(countmat)
self.bgscale = 0
maxcount = 0
#Determine Biggest column
for col in countmat:
tot = pysum(col.values())
if tot > maxcount :
maxcount = tot
#Pad counts of remaining columns
for col in countmat:
tot = pysum(col.values())
pad = maxcount - tot
for L in col.keys():
col[L] = col[L] + pad * self.background.get(L,0.)
self.nseqs = maxcount
nseqs = maxcount
#Add pseudocounts
if beta > 0:
multfactor = {}
bgprob = self.background
pcounts= {}
for L in bgprob.keys():
pcounts[L] = beta*bgprob[L]*nseqs
for i in range(self.width):
for L in countmat[i].keys():
_t = (countmat[i][L] + pcounts[L]) #Add pseudo
_t = _t / (1.0 + beta) #Renomalize
countmat[i][L] = _t
#Build Motif
self.counts = countmat
self._compute_ll()
self._compute_oneletter()
self._maxscore()
def _compute_bg_from_ll(self):
"""compute background model from log-likelihood matrix
by noting that: pA + pT + pC + pG = 1
and bgA + bgT + bgC + bgG = 1
and bgA = bgT, bgC = bgG
and so bgA = 0.5 - bgC
and pA = lA * bgA, etc for T, C, G
so...
(lA + lT)bgA + (lC + lG)bgC = 1
(lA + lT)bgA + (lC + lG)(0.5 - bgA) = 1
(lA + lT - lC - lG)bgA +(lC +lG)*0.5 = 1
bgA = {1 - 0.5(lC + lG)} / (lA + lT - lC - lG)
+ Gain accuracy by taking average of bgA over all positions of PSSM
"""
pow = math.pow
bgATtot = 0
nocount = 0
near0 = lambda x:(-0.01 < x and x < 0.01)
for i in range(self.width):
_D = self.ll[i]
ATtot = pow(2,_D['A']) + pow(2,_D['T'])
GCtot = pow(2,_D['C']) + pow(2,_D['G'])
if near0(_D['A']) and near0(_D['T']) and near0(_D['G']) and near0(_D['C']):
nocount = nocount + 1
continue
if near0(ATtot-GCtot): #Kludge to deal with indeterminate case
nocount = nocount + 1
continue
bgAT = (1.0 - 0.5*GCtot)/(ATtot - GCtot)
if (bgAT < 0.1) or (bgAT > 1.1):
nocount = nocount + 1
continue
bgATtot = bgATtot + bgAT
if nocount == self.width: #Kludge to deal with different indeterminate case
self.background = {'A':0.25, 'C':0.25, 'G':0.25, 'T':0.25}
return
bgAT = bgATtot / (self.width - nocount)
bgGC = 0.5 - bgAT
self.background = {'A':bgAT, 'C':bgGC, 'G':bgGC, 'T':bgAT}
def _compute_logP_from_ll(self):
"""compute self's logP matrix from the self.ll (log-likelihood)"""
log = math.log
logP = []
for i in range(self.width):
D = {}
for L in ACGT:
''' if ll = log(p/b) then
2^ll = p/b
and ll = log(p) - log(b)
so log(p) = ll + log(b)'''
#Pij = pow(2.0, self.ll[i][letter]) * self.background[letter]
D[L] = self.ll[i][L] + log(self.background[L])/log(2.)
logP.append(D)
self.logP = logP
def _print_ll(self):
"""print log-likelihood (scoring) matrix"""
print "# ",
for i in range(self.width):
print " %4d "%i,
print
for L in ['A', 'C', 'T', 'G']:
print "#%s "%L,
for i in range(self.width):
print "%8.3f "%self.ll[i][L],
print
def _print_p(self):
"""print probability (frequency) matrix"""
print "# ",
for i in range(self.width):
print " %4d "%i,
print
for L in ['A', 'C', 'T', 'G']:
print "#%s "%L,
for i in range(self.width):
print "%8.3f "%math.pow(2,self.logP[i][L]),
print
def _print_counts(self):
"""print count matrix"""
print "# ",
for i in range(self.width):
print " %4d "%i,
print
for L in ['A', 'C', 'T', 'G']:
print "#%s "%L,
for i in range(self.width):
print "%8.3f "%self.counts[i][L],
print
def _maxscore(self):
"""sets self.maxscore and self.minscore"""
total = 0
lowtot= 0
for lli in self.ll:
total = total + max(lli.values())
lowtot= lowtot+ min(lli.values())
self.maxscore = total
self.minscore = lowtot
def _compute_threshold(self,z=2.0):
"""for Motif objects assembled from a set of sequence,
compute a self.threshold with a z-score based on the distribution
of scores in among the original input sequences.
"""
scoretally = []
for seq in self.seqs:
matches,endpoints,scores = self.scan(seq,-100)
scoretally.append(scores[0])
ave,std = avestd(scoretally)
self.threshold = ave - z *std
#print '#%s: threshold %5.2f = %5.2f - %4.1f * %5.2f'%(
# self, self.threshold, ave, z, std)
def bestscanseq(self,seq):
"""return score,sequence of the best match to the motif in the supplied | |
GRAY = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
GREY = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
DARKGRAY = (0.6627450980392157, 0.6627450980392157, 0.6627450980392157)
DARKGREY = (0.6627450980392157, 0.6627450980392157, 0.6627450980392157)
SILVER = (0.7529411764705882, 0.7529411764705882, 0.7529411764705882)
LIGHTGRAY = (0.8274509803921568, 0.8274509803921568, 0.8274509803921568)
LIGHTGREY = (0.8274509803921568, 0.8274509803921568, 0.8274509803921568)
GAINSBORO = (0.8627450980392157, 0.8627450980392157, 0.8627450980392157)
WHITESMOKE = (0.9607843137254902, 0.9607843137254902, 0.9607843137254902)
WHITE = (1.0, 1.0, 1.0)
SNOW = (1.0, 0.9803921568627451, 0.9803921568627451)
ROSYBROWN = (0.7372549019607844, 0.5607843137254902, 0.5607843137254902)
LIGHTCORAL = (0.9411764705882353, 0.5019607843137255, 0.5019607843137255)
INDIANRED = (0.803921568627451, 0.3607843137254902, 0.3607843137254902)
BROWN = (0.6470588235294118, 0.16470588235294117, 0.16470588235294117)
FIREBRICK = (0.6980392156862745, 0.13333333333333333, 0.13333333333333333)
MAROON = (0.5019607843137255, 0.0, 0.0)
DARKRED = (0.5450980392156862, 0.0, 0.0)
RED = (1.0, 0.0, 0.0)
MISTYROSE = (1.0, 0.8941176470588236, 0.8823529411764706)
SALMON = (0.9803921568627451, 0.5019607843137255, 0.4470588235294118)
TOMATO = (1.0, 0.38823529411764707, 0.2784313725490196)
DARKSALMON = (0.9137254901960784, 0.5882352941176471, 0.47843137254901963)
CORAL = (1.0, 0.4980392156862745, 0.3137254901960784)
ORANGERED = (1.0, 0.27058823529411763, 0.0)
LIGHTSALMON = (1.0, 0.6274509803921569, 0.47843137254901963)
SIENNA = (0.6274509803921569, 0.3215686274509804, 0.17647058823529413)
SEASHELL = (1.0, 0.9607843137254902, 0.9333333333333333)
CHOCOLATE = (0.8235294117647058, 0.4117647058823529, 0.11764705882352941)
SADDLEBROWN = (0.5450980392156862, 0.27058823529411763, 0.07450980392156863)
SANDYBROWN = (0.9568627450980393, 0.6431372549019608, 0.3764705882352941)
PEACHPUFF = (1.0, 0.8549019607843137, 0.7254901960784313)
PERU = (0.803921568627451, 0.5215686274509804, 0.24705882352941178)
LINEN = (0.9803921568627451, 0.9411764705882353, 0.9019607843137255)
BISQUE = (1.0, 0.8941176470588236, 0.7686274509803922)
DARKORANGE = (1.0, 0.5490196078431373, 0.0)
BURLYWOOD = (0.8705882352941177, 0.7215686274509804, 0.5294117647058824)
ANTIQUEWHITE = (0.9803921568627451, 0.9215686274509803, 0.8431372549019608)
TAN = (0.8235294117647058, 0.7058823529411765, 0.5490196078431373)
NAVAJOWHITE = (1.0, 0.8705882352941177, 0.6784313725490196)
BLANCHEDALMOND = (1.0, 0.9215686274509803, 0.803921568627451)
PAPAYAWHIP = (1.0, 0.9372549019607843, 0.8352941176470589)
MOCCASIN = (1.0, 0.8941176470588236, 0.7098039215686275)
ORANGE = (1.0, 0.6470588235294118, 0.0)
WHEAT = (0.9607843137254902, 0.8705882352941177, 0.7019607843137254)
OLDLACE = (0.9921568627450981, 0.9607843137254902, 0.9019607843137255)
FLORALWHITE = (1.0, 0.9803921568627451, 0.9411764705882353)
DARKGOLDENROD = (0.7215686274509804, 0.5254901960784314, 0.043137254901960784)
GOLDENROD = (0.8549019607843137, 0.6470588235294118, 0.12549019607843137)
CORNSILK = (1.0, 0.9725490196078431, 0.8627450980392157)
GOLD = (1.0, 0.8431372549019608, 0.0)
LEMONCHIFFON = (1.0, 0.9803921568627451, 0.803921568627451)
KHAKI = (0.9411764705882353, 0.9019607843137255, 0.5490196078431373)
PALEGOLDENROD = (0.9333333333333333, 0.9098039215686274, 0.6666666666666666)
DARKKHAKI = (0.7411764705882353, 0.7176470588235294, 0.4196078431372549)
IVORY = (1.0, 1.0, 0.9411764705882353)
BEIGE = (0.9607843137254902, 0.9607843137254902, 0.8627450980392157)
LIGHTYELLOW = (1.0, 1.0, 0.8784313725490196)
LIGHTGOLDENRODYELLOW = (0.9803921568627451, 0.9803921568627451, 0.8235294117647058)
OLIVE = (0.5019607843137255, 0.5019607843137255, 0.0)
YELLOW = (1.0, 1.0, 0.0)
OLIVEDRAB = (0.4196078431372549, 0.5568627450980392, 0.13725490196078433)
YELLOWGREEN = (0.6039215686274509, 0.803921568627451, 0.19607843137254902)
DARKOLIVEGREEN = (0.3333333333333333, 0.4196078431372549, 0.1843137254901961)
GREENYELLOW = (0.6784313725490196, 1.0, 0.1843137254901961)
CHARTREUSE = (0.4980392156862745, 1.0, 0.0)
LAWNGREEN = (0.48627450980392156, 0.9882352941176471, 0.0)
HONEYDEW = (0.9411764705882353, 1.0, 0.9411764705882353)
DARKSEAGREEN = (0.5607843137254902, 0.7372549019607844, 0.5607843137254902)
PALEGREEN = (0.596078431372549, 0.984313725490196, 0.596078431372549)
LIGHTGREEN = (0.5647058823529412, 0.9333333333333333, 0.5647058823529412)
FORESTGREEN = (0.13333333333333333, 0.5450980392156862, 0.13333333333333333)
LIMEGREEN = (0.19607843137254902, 0.803921568627451, 0.19607843137254902)
DARKGREEN = (0.0, 0.39215686274509803, 0.0)
GREEN = (0.0, 0.5019607843137255, 0.0)
LIME = (0.0, 1.0, 0.0)
SEAGREEN = (0.1803921568627451, 0.5450980392156862, 0.3411764705882353)
MEDIUMSEAGREEN = (0.23529411764705882, 0.7019607843137254, 0.44313725490196076)
SPRINGGREEN = (0.0, 1.0, 0.4980392156862745)
MINTCREAM = (0.9607843137254902, 1.0, 0.9803921568627451)
MEDIUMSPRINGGREEN = (0.0, 0.9803921568627451, 0.6039215686274509)
MEDIUMAQUAMARINE = (0.4, 0.803921568627451, 0.6666666666666666)
AQUAMARINE = (0.4980392156862745, 1.0, 0.8313725490196079)
TURQUOISE = (0.25098039215686274, 0.8784313725490196, 0.8156862745098039)
LIGHTSEAGREEN = (0.12549019607843137, 0.6980392156862745, 0.6666666666666666)
MEDIUMTURQUOISE = (0.2823529411764706, 0.8196078431372549, 0.8)
AZURE = (0.9411764705882353, 1.0, 1.0)
LIGHTCYAN = (0.8784313725490196, 1.0, 1.0)
PALETURQUOISE = (0.6862745098039216, 0.9333333333333333, 0.9333333333333333)
DARKSLATEGRAY = (0.1843137254901961, 0.30980392156862746, 0.30980392156862746)
DARKSLATEGREY = (0.1843137254901961, 0.30980392156862746, 0.30980392156862746)
TEAL = (0.0, 0.5019607843137255, 0.5019607843137255)
DARKCYAN = (0.0, 0.5450980392156862, 0.5450980392156862)
AQUA = (0.0, 1.0, 1.0)
CYAN = (0.0, 1.0, 1.0)
DARKTURQUOISE = (0.0, 0.807843137254902, 0.8196078431372549)
CADETBLUE = (0.37254901960784315, 0.6196078431372549, 0.6274509803921569)
POWDERBLUE = (0.6901960784313725, 0.8784313725490196, 0.9019607843137255)
LIGHTBLUE = (0.6784313725490196, 0.8470588235294118, 0.9019607843137255)
DEEPSKYBLUE = (0.0, 0.7490196078431373, 1.0)
SKYBLUE = (0.5294117647058824, 0.807843137254902, 0.9215686274509803)
LIGHTSKYBLUE = (0.5294117647058824, 0.807843137254902, 0.9803921568627451)
STEELBLUE = (0.27450980392156865, 0.5098039215686274, 0.7058823529411765)
ALICEBLUE = (0.9411764705882353, 0.9725490196078431, 1.0)
DODGERBLUE = (0.11764705882352941, 0.5647058823529412, 1.0)
LIGHTSLATEGRAY = (0.4666666666666667, 0.5333333333333333, 0.6)
LIGHTSLATEGREY = (0.4666666666666667, 0.5333333333333333, 0.6)
SLATEGRAY = (0.4392156862745098, 0.5019607843137255, 0.5647058823529412)
SLATEGREY = (0.4392156862745098, 0.5019607843137255, 0.5647058823529412)
LIGHTSTEELBLUE = (0.6901960784313725, 0.7686274509803922, 0.8705882352941177)
CORNFLOWERBLUE = (0.39215686274509803, 0.5843137254901961, 0.9294117647058824)
ROYALBLUE = (0.2549019607843137, 0.4117647058823529, 0.8823529411764706)
GHOSTWHITE = (0.9725490196078431, 0.9725490196078431, 1.0)
LAVENDER = (0.9019607843137255, 0.9019607843137255, 0.9803921568627451)
MIDNIGHTBLUE = (0.09803921568627451, 0.09803921568627451, 0.4392156862745098)
NAVY = (0.0, 0.0, 0.5019607843137255)
DARKBLUE = (0.0, 0.0, 0.5450980392156862)
MEDIUMBLUE = (0.0, 0.0, 0.803921568627451)
BLUE = (0.0, 0.0, 1.0)
SLATEBLUE = (0.41568627450980394, 0.35294117647058826, 0.803921568627451)
DARKSLATEBLUE = (0.2823529411764706, 0.23921568627450981, 0.5450980392156862)
MEDIUMSLATEBLUE = (0.4823529411764706, 0.40784313725490196, 0.9333333333333333)
MEDIUMPURPLE = (0.5764705882352941, 0.4392156862745098, 0.8588235294117647)
REBECCAPURPLE = (0.4, 0.2, 0.6)
BLUEVIOLET = (0.5411764705882353, 0.16862745098039217, 0.8862745098039215)
INDIGO = (0.29411764705882354, 0.0, 0.5098039215686274)
DARKORCHID = (0.6, 0.19607843137254902, 0.8)
DARKVIOLET = (0.5803921568627451, 0.0, 0.8274509803921568)
MEDIUMORCHID = (0.7294117647058823, 0.3333333333333333, 0.8274509803921568)
THISTLE = (0.8470588235294118, 0.7490196078431373, 0.8470588235294118)
PLUM = (0.8666666666666667, 0.6274509803921569, 0.8666666666666667)
VIOLET = (0.9333333333333333, 0.5098039215686274, 0.9333333333333333)
PURPLE = (0.5019607843137255, 0.0, 0.5019607843137255)
DARKMAGENTA = (0.5450980392156862, 0.0, 0.5450980392156862)
FUCHSIA = (1.0, 0.0, 1.0)
MAGENTA = (1.0, 0.0, 1.0)
ORCHID = (0.8549019607843137, 0.4392156862745098, 0.8392156862745098)
MEDIUMVIOLETRED = (0.7803921568627451, 0.08235294117647059, 0.5215686274509804)
DEEPPINK = (1.0, 0.0784313725490196, 0.5764705882352941)
HOTPINK = (1.0, 0.4117647058823529, 0.7058823529411765)
LAVENDERBLUSH = (1.0, 0.9411764705882353, 0.9607843137254902)
PALEVIOLETRED = (0.8588235294117647, 0.4392156862745098, 0.5764705882352941)
CRIMSON = (0.8627450980392157, 0.0784313725490196, 0.23529411764705882)
PINK = (1.0, 0.7529411764705882, 0.796078431372549)
LIGHTPINK = (1.0, 0.7137254901960784, 0.7568627450980392)
class Color(RgbTuple, Enum):
BLACK = (0.0, 0.0, 0.0)
K = (0.0, 0.0, 0.0)
XKCD_BLACK = (0.0, 0.0, 0.0)
DIMGRAY = (0.4117647058823529, 0.4117647058823529, 0.4117647058823529)
DIMGREY = (0.4117647058823529, 0.4117647058823529, 0.4117647058823529)
TAB_GRAY = (0.4980392156862745, 0.4980392156862745, 0.4980392156862745)
TAB_GREY = (0.4980392156862745, 0.4980392156862745, 0.4980392156862745)
GRAY = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
GREY = (0.5019607843137255, 0.5019607843137255, 0.5019607843137255)
DARKGRAY = (0.6627450980392157, 0.6627450980392157, 0.6627450980392157)
DARKGREY = (0.6627450980392157, 0.6627450980392157, 0.6627450980392157)
SILVER = (0.7529411764705882, 0.7529411764705882, 0.7529411764705882)
LIGHTGRAY = (0.8274509803921568, 0.8274509803921568, 0.8274509803921568)
LIGHTGREY = (0.8274509803921568, 0.8274509803921568, 0.8274509803921568)
GAINSBORO = (0.8627450980392157, 0.8627450980392157, 0.8627450980392157)
WHITESMOKE = (0.9607843137254902, 0.9607843137254902, 0.9607843137254902)
W = (1.0, 1.0, 1.0)
WHITE = (1.0, 1.0, 1.0)
XKCD_WHITE = (1.0, 1.0, 1.0)
SNOW = (1.0, 0.9803921568627451, 0.9803921568627451)
ROSYBROWN = (0.7372549019607844, 0.5607843137254902, 0.5607843137254902)
LIGHTCORAL = (0.9411764705882353, 0.5019607843137255, 0.5019607843137255)
INDIANRED = (0.803921568627451, 0.3607843137254902, 0.3607843137254902)
XKCD_DULL_RED = (0.7333333333333333, 0.24705882352941178, 0.24705882352941178)
BROWN = (0.6470588235294118, 0.16470588235294117, 0.16470588235294117)
FIREBRICK = (0.6980392156862745, 0.13333333333333333, 0.13333333333333333)
XKCD_DRIED_BLOOD = (0.29411764705882354, 0.00392156862745098, 0.00392156862745098)
MAROON = (0.5019607843137255, 0.0, 0.0)
XKCD_DARK_RED = (0.5176470588235295, 0.0, 0.0)
DARKRED = (0.5450980392156862, 0.0, 0.0)
XKCD_RED = (0.8980392156862745, 0.0, 0.0)
R = (1.0, 0.0, 0.0)
RED = (1.0, 0.0, 0.0)
XKCD_DEEP_RED = (0.6039215686274509, 0.00784313725490196, 0.0)
XKCD_MAHOGANY = (0.2901960784313726, 0.00392156862745098, 0.0)
XKCD_PASTEL_RED = (0.8588235294117647, 0.34509803921568627, 0.33725490196078434)
XKCD_REDDISH = (0.7686274509803922, 0.25882352941176473, 0.25098039215686274)
XKCD_GRAPEFRUIT = (0.9921568627450981, 0.34901960784313724, 0.33725490196078434)
XKCD_DEEP_BROWN = (0.2549019607843137, 0.00784313725490196, 0.0)
XKCD_DARK_CORAL = (0.8117647058823529, 0.3215686274509804, 0.3058823529411765)
XKCD_PALE_RED = (0.8509803921568627, 0.32941176470588235, 0.30196078431372547)
XKCD_CORAL = (0.9882352941176471, 0.35294117647058826, 0.3137254901960784)
XKCD_DARK_SALMON = (0.7843137254901961, 0.35294117647058826, 0.3254901960784314)
XKCD_BROWNISH_PINK = (0.7607843137254902, 0.49411764705882355, 0.4745098039215686)
XKCD_VERY_DARK_BROWN = (0.11372549019607843, 0.00784313725490196, 0.0)
XKCD_INDIAN_RED = (0.5215686274509804, 0.054901960784313725, 0.01568627450980392)
XKCD_SALMON = (1.0, 0.4745098039215686, 0.4235294117647059)
XKCD_PINKISH_GRAY = (0.7843137254901961, 0.6745098039215687, 0.6627450980392157)
XKCD_PINKISH_GREY = (0.7843137254901961, 0.6745098039215687, 0.6627450980392157)
MISTYROSE = (1.0, 0.8941176470588236, 0.8823529411764706)
SALMON = (0.9803921568627451, 0.5019607843137255, 0.4470588235294118)
XKCD_REDDY_BROWN = (0.43137254901960786, 0.06274509803921569, 0.0196078431372549)
XKCD_REDDISH_GRAY = (0.6, 0.4588235294117647, 0.4392156862745098)
XKCD_REDDISH_GREY = (0.6, 0.4588235294117647, 0.4392156862745098)
XKCD_BRICK_RED = (0.5607843137254902, 0.0784313725490196, 0.00784313725490196)
XKCD_TOMATO = (0.9372549019607843, 0.25098039215686274, 0.14901960784313725)
XKCD_PEACHY_PINK = (1.0, 0.6039215686274509, 0.5411764705882353)
XKCD_ORANGEY_RED = (0.9803921568627451, 0.25882352941176473, 0.1411764705882353)
XKCD_BRICK = (0.6274509803921569, 0.21176470588235294, 0.13725490196078433)
TOMATO = (1.0, 0.38823529411764707, 0.2784313725490196)
XKCD_VERY_LIGHT_PINK = (1.0, 0.9568627450980393, 0.9490196078431372)
XKCD_BROWNISH_RED = (0.6196078431372549, 0.21176470588235294, 0.13725490196078433)
XKCD_ORANGE_RED = (0.9921568627450981, 0.2549019607843137, 0.11764705882352941)
XKCD_BLUSH = (0.9490196078431372, 0.6196078431372549, 0.5568627450980392)
XKCD_VERMILLION = (0.9568627450980393, 0.19607843137254902, 0.047058823529411764)
XKCD_ORANGE_PINK = (1.0, 0.43529411764705883, 0.3215686274509804)
TAB_BROWN = (0.5490196078431373, 0.33725490196078434, 0.29411764705882354)
XKCD_TOMATO_RED = (0.9254901960784314, 0.17647058823529413, 0.00392156862745098)
XKCD_BURNT_RED = (0.6235294117647059, 0.13725490196078433, 0.0196078431372549)
XKCD_REDDISH_ORANGE = (0.9725490196078431, 0.2823529411764706, 0.10980392156862745)
XKCD_ORANGISH_RED = (0.9568627450980393, 0.21176470588235294, 0.0196078431372549)
XKCD_RED_BROWN = (0.5450980392156862, 0.1803921568627451, 0.08627450980392157)
XKCD_LIGHT_SALMON = (0.996078431372549, 0.6627450980392157, 0.5764705882352941)
XKCD_MELON = (1.0, 0.47058823529411764, 0.3333333333333333)
XKCD_RUSTY_RED = (0.6862745098039216, 0.1843137254901961, 0.050980392156862744)
XKCD_RUST_RED = (0.6666666666666666, 0.15294117647058825, 0.01568627450980392)
XKCD_PINKISH_ORANGE = (1.0, 0.4470588235294118, 0.2980392156862745)
XKCD_PINKISH_BROWN = (0.6941176470588235, 0.4470588235294118, 0.3803921568627451)
XKCD_ORANGERED = (0.996078431372549, 0.25882352941176473, 0.058823529411764705)
XKCD_RED_ORANGE = (0.9921568627450981, 0.23529411764705882, 0.023529411764705882)
XKCD_PALE_SALMON = (1.0, 0.6941176470588235, 0.6039215686274509)
DARKSALMON = (0.9137254901960784, 0.5882352941176471, 0.47843137254901963)
XKCD_CLAY = (0.7137254901960784, 0.41568627450980394, 0.3137254901960784)
XKCD_DARK_PEACH = (0.8705882352941177, 0.49411764705882355, 0.36470588235294116)
CORAL = (1.0, 0.4980392156862745, 0.3137254901960784)
XKCD_BROWN_RED = (0.5725490196078431, 0.16862745098039217, 0.0196078431372549)
XKCD_TERRACOTTA = (0.792156862745098, 0.4, 0.2549019607843137)
ORANGERED = (1.0, 0.27058823529411763, 0.0)
XKCD_TERRACOTA = (0.796078431372549, 0.40784313725490196, 0.2627450980392157)
XKCD_REDDISH_BROWN = (0.4980392156862745, 0.16862745098039217, 0.0392156862745098)
LIGHTSALMON = (1.0, 0.6274509803921569, 0.47843137254901963)
XKCD_BLOOD_ORANGE = (0.996078431372549, 0.29411764705882354, 0.011764705882352941)
XKCD_PINKISH_TAN = (0.8509803921568627, 0.6078431372549019, 0.5098039215686274)
XKCD_TERRA_COTTA = (0.788235294117647, 0.39215686274509803, 0.23137254901960785)
XKCD_AUBURN = (0.6039215686274509, 0.18823529411764706, 0.00392156862745098)
XKCD_ADOBE = (0.7411764705882353, 0.4235294117647059, 0.2823529411764706)
XKCD_ORANGISH = (0.9882352941176471, 0.5098039215686274, 0.2901960784313726)
XKCD_WARM_GRAY = (0.592156862745098, 0.5411764705882353, 0.5176470588235295)
XKCD_WARM_GREY = (0.592156862745098, 0.5411764705882353, 0.5176470588235295)
XKCD_BROWNISH = (0.611764705882353, 0.42745098039215684, 0.3411764705882353)
XKCD_RUST = (0.6588235294117647, 0.23529411764705882, 0.03529411764705882)
SIENNA = (0.6274509803921569, 0.3215686274509804, 0.17647058823529413)
XKCD_RUSSET = (0.6313725490196078, 0.2235294117647059, 0.0196078431372549)
XKCD_CHESTNUT = (0.4549019607843137, 0.1568627450980392, 0.00784313725490196)
XKCD_RUST_BROWN = (0.5450980392156862, 0.19215686274509805, 0.011764705882352941)
XKCD_DEEP_ORANGE = (0.8627450980392157, 0.30196078431372547, 0.00392156862745098)
XKCD_BRICK_ORANGE = (0.7568627450980392, 0.2901960784313726, 0.03529411764705882)
XKCD_BRIGHT_ORANGE = (1.0, 0.3568627450980392, 0.0)
XKCD_BURNT_UMBER = (0.6274509803921569, 0.27058823529411763, 0.054901960784313725)
XKCD_ORANGEISH = (0.9921568627450981, 0.5529411764705883, 0.28627450980392155)
XKCD_CHOCOLATE_BROWN = (0.2549019607843137, | |
<gh_stars>0
import os
import sys
import logging
import copy
import math
from abc import ABCMeta, abstractmethod
import mne
import numpy as np
# from scipy.fftpack import rfft, rfftfreq
from scipy.signal import welch, decimate
from scipy.signal import decimate, welch
from cycler import cycler
import matplotlib
from matplotlib import pyplot
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from PySide6.QtCore import Qt
from PySide6 import QtWidgets
from PySide6.QtUiTools import QUiLoader
from PySide6.QtWidgets import QSpacerItem, QSizePolicy
from gcpds.filters import frequency as flt
from gcpds.filters import frequency as flt
from bci_framework.framework.dialogs import Dialogs
# from bci_framework.extensions.data_analysis.utils import thread_this, subprocess_this
from PySide6.QtGui import QCursor
from PySide6.QtWidgets import QApplication
from PySide6.QtCore import Qt
# Set logger
logger = logging.getLogger("mne")
logger.setLevel(logging.CRITICAL)
logging.getLogger('matplotlib.font_manager').disabled = True
logging.getLogger().setLevel(logging.WARNING)
logging.root.name = "TimelockAnalysis"
if ('light' in sys.argv) or ('light' in os.environ.get('QTMATERIAL_THEME', '')):
pass
else:
pyplot.style.use('dark_background')
try:
q = matplotlib.cm.get_cmap('cool')
matplotlib.rcParams['axes.prop_cycle'] = cycler(
color=[q(m) for m in np.linspace(0, 1, 16)])
matplotlib.rcParams['figure.dpi'] = 70
matplotlib.rcParams['font.family'] = 'monospace'
matplotlib.rcParams['font.size'] = 15
matplotlib.rcParams['axes.titlecolor'] = '#000000'
matplotlib.rcParams['xtick.color'] = '#000000'
matplotlib.rcParams['ytick.color'] = '#000000'
# matplotlib.rcParams['legend.facecolor'] = 'red'
except:
# 'rcParams' object does not support item assignment
pass
LEGEND_KWARGS = {'labelcolor': '#000000',
'fontsize': 12,
}
# ----------------------------------------------------------------------
def wait_for_it(fn):
""""""
# ----------------------------------------------------------------------
def wrap(*args, **kwargs):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
try:
fn(*args, **kwargs)
except Exception as e:
logging.warning(e)
QApplication.restoreOverrideCursor()
return wrap
########################################################################
class Canvas(FigureCanvasQTAgg):
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
""""""
self.figure = Figure(*args, **kwargs)
self.configure()
super().__init__(self.figure)
# self.figure.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
# ----------------------------------------------------------------------
def configure(self):
""""""
# if ('light' in sys.argv) or ('light' in os.environ.get('QTMATERIAL_THEME', '')):
# pass
# else:
# pyplot.style.use('dark_background')
for ax in self.figure.axes:
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
ax.xaxis.label.set_size(14)
ax.yaxis.label.set_size(14)
########################################################################
class TimelockWidget(metaclass=ABCMeta):
""""""
# ----------------------------------------------------------------------
def __init__(self, height, *args, **kwargs):
"""Constructor"""
self.title = ''
self.bottom_stretch = []
self.bottom2_stretch = []
self.top_stretch = []
self.top2_stretch = []
self.right_stretch = []
self.left_stretch = []
self._pipeline_output = None
ui = os.path.realpath(os.path.join(
os.environ['BCISTREAM_ROOT'], 'framework', 'qtgui', 'locktime_widget.ui'))
self.widget = QUiLoader().load(ui)
if height:
self.widget.setMinimumHeight(height)
self.canvas = Canvas(*args, **kwargs)
self.figure = self.canvas.figure
self.widget.gridLayout.addWidget(self.canvas)
# ----------------------------------------------------------------------
def draw(self):
""""""
self.canvas.configure()
self.canvas.draw()
# ----------------------------------------------------------------------
def _add_spacers(self):
""""""
for i, s in enumerate(self.bottom_stretch):
self.widget.bottomLayout.setStretch(i, s)
for i, s in enumerate(self.top_stretch):
self.widget.topLayout.setStretch(i, s)
for i, s in enumerate(self.bottom2_stretch):
self.widget.bottom2Layout.setStretch(i, s)
for i, s in enumerate(self.top2_stretch):
self.widget.top2Layout.setStretch(i, s)
for i, s in enumerate(self.right_stretch):
self.widget.rightLayout.setStretch(i, s)
for i, s in enumerate(self.left_stretch):
self.widget.leftLayout.setStretch(i, s)
# ----------------------------------------------------------------------
def add_spacer(self, area='top', fixed=None, stretch=0):
""""""
if fixed:
if area in ['left', 'right']:
getattr(self.widget, f'{area}Layout').addItem(QSpacerItem(
20, fixed, QSizePolicy.Minimum, QSizePolicy.Minimum))
elif area in ['top', 'bottom', 'top2', 'bottom2']:
getattr(self.widget, f'{area}Layout').addItem(QSpacerItem(
fixed, 20, QSizePolicy.Minimum, QSizePolicy.Minimum))
else:
if area in ['left', 'right']:
getattr(self.widget, f'{area}Layout').addItem(QSpacerItem(
20, 20000, QSizePolicy.Minimum, QSizePolicy.Expanding))
elif area in ['top', 'bottom', 'top2', 'bottom2']:
getattr(self.widget, f'{area}Layout').addItem(QSpacerItem(
20000, 20, QSizePolicy.Expanding, QSizePolicy.Minimum))
if stretch:
getattr(self, f'{area}_stretch').append(stretch)
# ----------------------------------------------------------------------
def clear_layout(self, layout):
""""""
i = -1
for _ in range(layout.count()):
i = i + 1
b = layout.itemAt(i)
if b is None:
continue
if w := b.widget(): # widget
w.deleteLater()
if b.spacerItem(): # spacer
layout.removeItem(b)
i = i - 1
if l := b.layout():
self.clear_layout(l)
# layout.removeItem(layout.itemAt(i))
# b = layout.takeAt(2)
# buttons.pop(2)
# b.widget().deleteLater()
# ----------------------------------------------------------------------
def clear_widgets(self, areas=['left', 'right', 'top', 'bottom', 'top2', 'bottom2']):
""""""
for area in areas:
layout = getattr(self.widget, f'{area}Layout')
self.clear_layout(layout)
# ----------------------------------------------------------------------
def add_textarea(self, content='', area='top', stretch=0):
""""""
textarea = QtWidgets.QTextEdit(content)
textarea.setProperty('class', 'clear')
textarea.setMinimumWidth(500)
textarea.setReadOnly(True)
# if callback:
# button.clicked.connect(callback)
getattr(self.widget, f'{area}Layout').addWidget(textarea)
getattr(self, f'{area}_stretch').append(stretch)
return textarea
# ----------------------------------------------------------------------
def add_button(self, label, callback=None, area='top', stretch=0):
""""""
button = QtWidgets.QPushButton(label)
if callback:
button.clicked.connect(callback)
getattr(self.widget, f'{area}Layout').addWidget(button)
getattr(self, f'{area}_stretch').append(stretch)
return button
# ----------------------------------------------------------------------
def add_radios(self, group_name, radios, cols=None, rows=None, callback=None, area='top', stretch=1):
""""""
group = QtWidgets.QGroupBox(group_name)
group.setProperty('class', 'fill_background')
vbox = QtWidgets.QVBoxLayout()
group.setLayout(vbox)
if cols is None:
cols = len(radios)
if rows:
cols = math.ceil(len(radios) / rows)
for i, radio in enumerate(radios):
if (i % cols) == 0:
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# group.setLayout(hbox)
r = QtWidgets.QRadioButton()
r.setText(radio)
r.setChecked(i == 0)
def dec(*args):
def wrap(fn):
return callback(*args)
return wrap
if callback:
r.clicked.connect(dec(group_name, radio))
hbox.addWidget(r)
getattr(self.widget, f'{area}Layout').addWidget(group)
getattr(self, f'{area}_stretch').append(stretch)
# ----------------------------------------------------------------------
def add_checkbox(self, group_name, checkboxes, cols=None, rows=None, callback=None, area='top', stretch=1):
""""""
group = QtWidgets.QGroupBox(group_name)
group.setProperty('class', 'fill_background')
vbox = QtWidgets.QVBoxLayout()
group.setLayout(vbox)
if cols is None:
cols = len(checkboxes)
if rows:
cols = math.ceil(len(checkboxes) / rows)
list_radios = []
for i, checkbox in enumerate(checkboxes):
if (i % cols) == 0:
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
# group.setLayout(hbox)
r = QtWidgets.QCheckBox()
r.setText(checkbox)
r.setChecked(i == 0)
list_radios.append(r)
def dec(*args):
def wrap(fn):
return callback(*args)
return wrap
if callback:
r.clicked.connect(dec(group_name, checkbox))
hbox.addWidget(r)
getattr(self.widget, f'{area}Layout').addWidget(group)
getattr(self, f'{area}_stretch').append(stretch)
return list_radios
# ----------------------------------------------------------------------
def add_channels(self, group_name, channels, callback=None, area='top', stretch=1):
""""""
group = QtWidgets.QGroupBox(group_name)
group.setProperty('class', 'fill_background')
vbox = QtWidgets.QHBoxLayout()
group.setLayout(vbox)
# ncol = len(radios)
vbox_odd = QtWidgets.QVBoxLayout()
vbox_z = QtWidgets.QVBoxLayout()
vbox_even = QtWidgets.QVBoxLayout()
vbox.addLayout(vbox_even)
vbox.addLayout(vbox_z)
vbox.addLayout(vbox_odd)
list_radios = []
for channel in channels:
r = QtWidgets.QCheckBox()
r.setText(channel)
r.setChecked(True)
list_radios.append(r)
if channel[-1].isnumeric() and int(channel[-1]) % 2 != 0: # odd
vbox_even.addWidget(r)
elif channel[-1].isnumeric() and int(channel[-1]) % 2 == 0: # even
vbox_odd.addWidget(r)
else:
vbox_z.addWidget(r)
def dec(*args):
def wrap(fn):
return callback(*args)
return wrap
if callback:
r.clicked.connect(dec(group_name, channel))
getattr(self.widget, f'{area}Layout').addWidget(group)
getattr(self, f'{area}_stretch').append(stretch)
return list_radios
# ----------------------------------------------------------------------
def add_scroll(self, callback=None, area='bottom', stretch=0):
""""""
scroll = QtWidgets.QScrollBar()
scroll.setOrientation(Qt.Horizontal)
# scroll.setMaximum(255)
scroll.sliderMoved.connect(callback)
scroll.setProperty('class', 'big')
# scroll.setPageStep(1000)
getattr(self.widget, f'{area}Layout').addWidget(scroll)
getattr(self, f'{area}_stretch').append(stretch)
return scroll
# ----------------------------------------------------------------------
def add_slider(self, callback=None, area='bottom', stretch=0):
""""""
slider = QtWidgets.QSlider()
slider.setOrientation(Qt.Horizontal)
slider.setMaximum(0)
slider.setMaximum(500)
slider.setValue(500)
slider.valueChanged.connect(callback)
getattr(self.widget, f'{area}Layout').addWidget(slider)
getattr(self, f'{area}_stretch').append(stretch)
return slider
# ----------------------------------------------------------------------
def add_spin(self, label, value, decimals=1, step=0.1, prefix='', suffix='', min_=0, max_=999, callback=None, area='top', stretch=0):
""""""
spin = QtWidgets.QDoubleSpinBox()
spin.setDecimals(decimals)
spin.setSingleStep(step)
spin.setMinimum(min_)
spin.setMaximum(max_)
spin.setValue(value)
if callback:
spin.valueChanged.connect(callback)
if prefix:
spin.setPrefix(f' {prefix}')
if suffix:
spin.setSuffix(f' {suffix}')
layout = QtWidgets.QHBoxLayout()
widget = QtWidgets.QWidget()
widget.setLayout(layout)
if label:
layout.addWidget(QtWidgets.QLabel(label))
layout.addWidget(spin)
getattr(self.widget, f'{area}Layout').addWidget(widget)
getattr(self, f'{area}_stretch').append(stretch)
layout.setStretch(0, 0)
layout.setStretch(1, 1)
return spin
# ----------------------------------------------------------------------
def add_combobox(self, label, items, editable=False, callback=None, area='top', stretch=0):
""""""
combo = QtWidgets.QComboBox()
combo.addItems(items)
combo.activated.connect(callback)
combo.setEditable(editable)
combo.setMinimumWidth(200)
layout = QtWidgets.QHBoxLayout()
widget = QtWidgets.QWidget()
widget.setLayout(layout)
if label:
layout.addWidget(QtWidgets.QLabel(label))
layout.addWidget(combo)
getattr(self.widget, f'{area}Layout').addWidget(widget)
getattr(self, f'{area}_stretch').append(stretch)
layout.setStretch(0, 0)
layout.setStretch(1, 1)
return combo
# ----------------------------------------------------------------------
# @abstractmethod
@property
def pipeline_input(self):
""""""
if hasattr(self, '_previous_pipeline'):
return self._previous_pipeline.pipeline_output
elif hasattr(self, '_pipeline_input'):
return self._pipeline_input
else:
logging.warning("'pipeline_input' does not exist yet.")
# ----------------------------------------------------------------------
# @abstractmethod
@pipeline_input.setter
def pipeline_input(self, input_):
""""""
self._pipeline_input = input_
# ----------------------------------------------------------------------
# @abstractmethod
@property
def pipeline_output(self):
""""""
if hasattr(self, '_pipeline_output'):
return self._pipeline_output
# ----------------------------------------------------------------------
# @abstractmethod
@pipeline_output.setter
def pipeline_output(self, output_):
""""""
self._pipeline_output = output_
try:
self.pipeline_output._original_markers = self.pipeline_output.markers
except:
pass
self._pipeline_propagate()
# ----------------------------------------------------------------------
# @abstractmethod
@property
def pipeline_tunned(self):
""""""
return getattr(self, '_pipeline_tunned', False)
# ----------------------------------------------------------------------
# @abstractmethod
@pipeline_tunned.setter
def pipeline_tunned(self, value):
""""""
self._pipeline_tunned = value
# ----------------------------------------------------------------------
def next_pipeline(self, pipe):
""""""
self._next_pipeline = pipe
# self._next_pipeline._pipeline_input = self._pipeline_output
# ----------------------------------------------------------------------
def previous_pipeline(self, pipe):
""""""
self._previous_pipeline = pipe
# ----------------------------------------------------------------------
def set_pipeline_input(self, in_):
""""""
self._pipeline_input = in_
# ----------------------------------------------------------------------
# @abstractmethod
def _pipeline_propagate(self):
""""""
if hasattr(self, '_next_pipeline'):
if not self._next_pipeline.pipeline_tunned:
return
if next_pipeline := getattr(self, '_next_pipeline', False):
next_pipeline.fit()
# ----------------------------------------------------------------------
@abstractmethod
def fit(self):
""""""
########################################################################
class TimelockSeries(TimelockWidget):
""""""
# ----------------------------------------------------------------------
def __init__(self, height, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.fill_opacity = 0.2
self.fill_color = os.environ.get(
'QTMATERIAL_PRIMARYCOLOR', '#ff0000')
# ----------------------------------------------------------------------
def move_plot(self, value):
""""""
self.ax1.set_xlim(value / 1000, (value / 1000 + self.window_value))
self.ax2.collections.clear()
self.ax2.fill_between([value / 1000, (value / 1000 + self.window_value)],
*self.ax1.get_ylim(), color=self.fill_color, alpha=self.fill_opacity)
self.draw()
# ----------------------------------------------------------------------
def change_window(self):
""""""
self.window_value = self._get_seconds_from_human(
self.combobox.currentText())
eeg = self.pipeline_output.eeg
timestamp = self.pipeline_output.timestamp
timestamp = np.linspace(
0, timestamp[0][-1], eeg.shape[1], endpoint=True) / 1000
self.scroll.setMaximum((timestamp[-1] - self.window_value) * 1000)
self.scroll.setMinimum(0)
self.scroll.setPageStep(self.window_value * 1000)
self.ax1.set_xlim(self.scroll.value() / 1000,
(self.scroll.value() / 1000 + self.window_value))
self.ax2.collections.clear()
self.ax2.fill_between([self.scroll.value() / 1000, (self.scroll.value() + self.window_value) / 1000],
*self.ax1.get_ylim(),
color=self.fill_color,
alpha=self.fill_opacity)
self.draw()
# ----------------------------------------------------------------------
def _get_seconds_from_human(self, human):
""""""
value = human.replace('milliseconds', '0.001')
value = value.replace('second', '1')
value = value.replace('minute', '60')
value = value.replace('hour', '60 60')
return np.prod(list(map(float, value.split())))
# ----------------------------------------------------------------------
def set_data(self, timestamp, eeg, labels, ylabel='', xlabel='', legend=True):
""""""
self.ax1.clear()
self.ax2.clear()
for i, ch in enumerate(eeg):
self.ax1.plot(timestamp, eeg[i], label=labels[i])
self.ax2.plot(timestamp, eeg[i], alpha=0.5)
self.ax1.grid(True, axis='x')
if legend:
self.ax1.legend(loc='upper center', ncol=8,
bbox_to_anchor=(0.5, 1.4), **LEGEND_KWARGS)
self.ax1.set_xlim(0, self.window_value)
self.ax2.grid(True, axis='x')
self.ax2.set_xlim(0, timestamp[-1])
self.ax2.fill_between([0, self.window_value], *self.ax1.get_ylim(),
color=self.fill_color, alpha=self.fill_opacity)
self.scroll.setMaximum((timestamp[-1] - self.window_value) * 1000)
self.scroll.setMinimum(0)
self.ax1.set_ylabel(ylabel)
self.ax2.set_xlabel(xlabel)
self.draw()
# ----------------------------------------------------------------------
def set_window_width_options(self, options):
""""""
self.scroll = self.add_scroll(
callback=self.move_plot, area='bottom', stretch=1)
self.combobox = self.add_combobox('', options,
callback=self.change_window,
area='bottom',
stretch=0)
self.window_value = self._get_seconds_from_human(options[0])
########################################################################
class Filters(TimelockWidget):
""""""
# ----------------------------------------------------------------------
def __init__(self, height, *args, **kwargs):
"""Constructor"""
super().__init__(height, *args, **kwargs)
self.title = 'Filter EEG'
gs = self.figure.add_gridspec(1, 2)
| |
######################################################
#
# The Time Series Model based on SVD
#
######################################################
import copy
import numpy as np
import pandas as pd
from mssa.src.algorithms.svdWrapper import SVDWrapper as SVD
from mssa.src import tsUtils
from sklearn.metrics import r2_score
class SVDModel(object):
# seriesToPredictKey: (string) the time series of interest (key)
# kSingularValuesToKeep: (int) the number of singular values to retain
# N: (int) the number of rows of the matrix for each series
# M: (int) the number of columns for the matrix for each series
# probObservation: (float) the independent probability of observation of each entry in the matrix
# svdMethod: (string) the SVD method to use (optional)
# otherSeriesKeysArray: (array) an array of keys for other series which will be used to predict
# includePastDataOnly: (Boolean) defaults to True. If this is set to False,
# the time series in 'otherSeriesKeysArray' will include the latest data point.
# Note: the time series of interest (seriesToPredictKey) will never include
# the latest data-points for prediction
def __init__(self, seriesToPredictKey, kSingularValuesToKeep, N, M,updated = True, probObservation=1.0, svdMethod='numpy', otherSeriesKeysArray=[],\
includePastDataOnly=True, start = 0, TimesUpdated = 0, TimesReconstructed =0, SSVT = False , no_ts = 1, forecast_model_score = None,forecast_model_score_test = None,\
imputation_model_score = None, norm_mean = [], norm_std = [], fill_in_missing = True):
self.seriesToPredictKey = seriesToPredictKey
self.otherSeriesKeysArray = otherSeriesKeysArray
self.includePastDataOnly = includePastDataOnly
self.fill_in_missing = fill_in_missing
self.N = N
self.M = M
self.start = start
self.TimesUpdated = TimesUpdated
self.TimesReconstructed = TimesReconstructed
self.kSingularValues = kSingularValuesToKeep
if kSingularValuesToKeep is not None:
if self.kSingularValues> min(M,N-1):
self.kSingularValues = min(M,N-1)
self.svdMethod = svdMethod
self.norm_mean = norm_mean
self.norm_std = norm_std
self.Uk = None
self.Vk = None
self.sk = None
self.matrix = None
self.lastRowObservations = None
self.Ukw = None
self.Vkw = None
self.skw = None
self.p = probObservation
if self.fill_in_missing:
self.p = 1.0
self.weights = None
self.SSVT = SSVT
self.soft_threshold = 0
self.updated = updated
self.no_ts = no_ts
if forecast_model_score is None:
forecast_model_score = np.zeros(no_ts)
if imputation_model_score is None:
imputation_model_score = np.zeros(no_ts)
if forecast_model_score_test is None:
forecast_model_score_test = np.full(no_ts,np.nan)
self.forecast_model_score = forecast_model_score
self.forecast_model_score_test = forecast_model_score_test
self.imputation_model_score = imputation_model_score
assert len(self.imputation_model_score) == no_ts
assert len(self.forecast_model_score) == no_ts
assert len(self.forecast_model_score_test) == no_ts
assert len(self.norm_std) == no_ts
assert len(self.norm_mean) == no_ts
# run a least-squares regression of the last row of self.matrix and all other rows of self.matrix
# sets and returns the weights
# DO NOT call directly
def _computeWeights(self):
### This is now the same as ALS
## this is an expensive step because we are computing the SVD all over again
## however, currently, there is no way around it since this is NOT the same matrix as the full
## self.matrix, i.e. we have fewer (or just one less) rows
if (self.lastRowObservations is None):
raise Exception('Do not call _computeWeights() directly. It should only be accessed via class methods.')
# need to decide how to produce weights based on whether the N'th data points are to be included for the other time series or not
# for the seriesToPredictKey we only look at the past. For others, we could be looking at the current data point in time as well.
matrixDim1 = (self.N * len(self.otherSeriesKeysArray)) + self.N-1
matrixDim2 = np.shape(self.matrix)[1]
eachTSRows = self.N
if (self.includePastDataOnly == False):
newMatrix = self.matrix[0:matrixDim1, :]
else:
matrixDim1 = ((self.N - 1) * len(self.otherSeriesKeysArray)) + self.N-1
eachTSRows = self.N - 1
newMatrix = np.zeros([matrixDim1, matrixDim2])
rowIndex = 0
matrixInd = 0
while (rowIndex < matrixDim1):
newMatrix[rowIndex: rowIndex + eachTSRows] = self.matrix[matrixInd: matrixInd +eachTSRows]
rowIndex += eachTSRows
matrixInd += self.N
svdMod = SVD(newMatrix, method='numpy')
(self.skw, self.Ukw, self.Vkw) = svdMod.reconstructMatrix(self.kSingularValues, returnMatrix=False)
soft_threshold = 0
if self.SSVT: soft_threshold = svdMod.next_sigma
matrix = tsUtils.matrixFromSVD(self.skw, self.Ukw, self.Vkw, soft_threshold=soft_threshold, probability = self.p)
newMatrixPInv = tsUtils.pInverseMatrixFromSVD(self.skw, self.Ukw, self.Vkw,soft_threshold=soft_threshold, probability = self.p)
self.weights = np.dot(newMatrixPInv.T, self.lastRowObservations)
# only compute r2 score if there are enoguh samples
if len(self.lastRowObservations) >= 2*self.no_ts:
for i in range(self.no_ts):
self.forecast_model_score[i] = r2_score(self.lastRowObservations[i::self.no_ts]/self.p, np.dot(matrix[:,i::self.no_ts].T,self.weights))
# return the imputed matrix
def denoisedDF(self):
setAllKeys = set(self.otherSeriesKeysArray)
setAllKeys.add(self.seriesToPredictKey)
single_ts_rows = self.N
dataDict = {}
rowIndex = 0
for key in self.otherSeriesKeysArray:
dataDict.update({key: self.matrix[rowIndex*single_ts_rows: (rowIndex+1)*single_ts_rows, :].flatten('F')})
rowIndex += 1
dataDict.update({self.seriesToPredictKey: self.matrix[rowIndex*single_ts_rows: (rowIndex+1)*single_ts_rows, :].flatten('F')})
return pd.DataFrame(data=dataDict)
def denoisedTS(self, ind = None, range = True,return_ = True, ts = None):
if self.matrix is None:
self.matrix = tsUtils.matrixFromSVD(self.sk, self.Uk, self.Vk, self.soft_threshold,probability=self.p)
if not return_:
return
if ts is None:
NewColsDenoised = self.matrix.flatten('F')
else:
NewColsDenoised = self.matrix[:,ts::self.no_ts].flatten('F')
if ind is None:
return NewColsDenoised
if range:
assert len(ind) == 2
return NewColsDenoised[ind[0]:ind[1]]
else:
return NewColsDenoised[ind]
def _assignData(self, keyToSeriesDF):
setAllKeys = set(self.otherSeriesKeysArray)
setAllKeys.add(self.seriesToPredictKey)
if (len(set(keyToSeriesDF.columns.values).intersection(setAllKeys)) != len(setAllKeys)):
raise Exception('keyToSeriesDF does not contain ALL keys provided in the constructor.')
if (self.fill_in_missing == True):
keyToSeriesDF = keyToSeriesDF.fillna(method = 'ffill')
keyToSeriesDF = keyToSeriesDF.fillna(method = 'bfill')
else:
keyToSeriesDF = keyToSeriesDF.fillna(value = 0)
T = self.N * self.M
for key in setAllKeys:
if (len(keyToSeriesDF[key]) < T):
raise Exception('All series (columns) provided must have length >= %d' %T)
# initialize the matrix of interest
single_ts_rows = self.N
matrix_cols = self.M
matrix_rows = int(len(setAllKeys) * single_ts_rows)
self.matrix = np.zeros([matrix_rows, matrix_cols])
seriesIndex = 0
for key in self.otherSeriesKeysArray: # it is important to use the order of keys set in the model
self.matrix[seriesIndex*single_ts_rows: (seriesIndex+1)*single_ts_rows, :] = tsUtils.arrayToMatrix(keyToSeriesDF[key][-1*T:].values, single_ts_rows, matrix_cols)
seriesIndex += 1
# finally add the series of interest at the bottom
# tempMatrix = tsUtils.arrayToMatrix(keyToSeriesDF[self.seriesToPredictKey][-1*T:].values, self.N, matrix_cols)
self.matrix[seriesIndex*single_ts_rows: (seriesIndex+1)*single_ts_rows, :] = tsUtils.arrayToMatrix(keyToSeriesDF[self.seriesToPredictKey][-1*T:].values, single_ts_rows, matrix_cols)
# set the last row of observations
self.lastRowObservations = copy.deepcopy(self.matrix[-1, :])
# keyToSeriesDictionary: (Pandas dataframe) a key-value Series (time series)
# Note that the keys provided in the constructor MUST all be present
# The values must be all numpy arrays of floats.
# This function sets the "de-noised" and imputed data matrix which can be accessed by the .matrix property
def fit(self, keyToSeriesDF):
# assign data to class variables
self._assignData(keyToSeriesDF)
obs = self.matrix.flatten('F')
obs_matrix = self.matrix.copy()
# now produce a thresholdedthresholded/de-noised matrix. this will over-write the original data matrix
svdMod = SVD(self.matrix, method='numpy')
(self.sk, self.Uk, self.Vk) = svdMod.reconstructMatrix(self.kSingularValues, returnMatrix=False)
if self.kSingularValues is None:
self.kSingularValues= len(self.sk)
if self.SSVT: self.soft_threshold = svdMod.next_sigma
# set weights
self.matrix = tsUtils.matrixFromSVD(self.sk, self.Uk, self.Vk, self.soft_threshold,probability=self.p)
for i in range(self.no_ts):
obs = obs_matrix[:,i::self.no_ts].flatten('F')
self.imputation_model_score[i] = r2_score(obs,self.denoisedTS(ts = i))
self._computeWeights()
def updateSVD(self,D, method = 'UP'):
assert (len(D) % self.N == 0)
if (self.fill_in_missing == True):
# impute with the least informative value (middle)
D = pd.DataFrame(D).fillna(method = 'ffill').values
D = pd.DataFrame(D).fillna(method = 'ffill').values
else: D[np.isnan(D)] = 0
D = D.reshape([self.N,int(len(D)/self.N)], order = 'F')
assert D.shape[0] == self.N
assert D.shape[1] <= D.shape[0]
if method == 'UP':
self.Uk, self.sk, self.Vk = tsUtils.updateSVD2(D, self.Uk, self.sk, self.Vk)
self.M = self.Vk.shape[0]
self.Ukw, self.skw, self.Vkw = tsUtils.updateSVD2(D[:-1,:], self.Ukw, self.skw, self.Vkw)
elif method == 'folding-in':
self.Uk, self.sk, self.Vk = tsUtils.updateSVD(D, self.Uk, self.sk ,self.Vk )
self.M = self.Vk.shape[0]
self.Ukw, self.skw, self.Vkw = tsUtils.updateSVD(D[:-1, :], self.Ukw, self.skw, self.Vkw)
# elif method == 'Full':
# raise ValueError
# self.matrix = np.concatenate((self.matrix,D),1)
# U, S, V = np.linalg.svd(self.matrix, full_matrices=False)
# self.sk = S[0:self.kSingularValues]
# self.Uk = U[:, 0:self.kSingularValues]
# self.Vk = V[0:self.kSingularValues,:]
# self.Vk = self.Vk.T
# self.M = self.Vk.shape[0]
else:
raise ValueError
self.matrix = tsUtils.matrixFromSVD(self.sk, self.Uk, self.Vk, self.soft_threshold,probability=self.p)
self.lastRowObservations = self.matrix[-1,:]
self.TimesUpdated +=1
newMatrixPInv = tsUtils.pInverseMatrixFromSVD(self.skw, self.Ukw, self.Vkw,soft_threshold=self.soft_threshold, probability=self.p)
self.weights = np.dot(newMatrixPInv.T, self.lastRowObservations.T)
# otherKeysToSeriesDFNew: (Pandas dataframe) needs to contain all keys provided in the model;
# If includePastDataOnly was set to True (default) in the model, then:
# each series/array MUST be of length >= self.N - 1
# If longer than self.N - 1, then the most recent self.N - 1 points will be used
# If includePastDataOnly was set to False in the model, then:
# all series/array except seriesToPredictKey MUST be of length >= self.N (i.e. includes the current),
# If longer than self.N, then the most recent self.N points will be used
#
# predictKeyToSeriesDFNew: (Pandas dataframe) needs to contain the seriesToPredictKey and self.N - 1 points past | |
and os.path.isdir(pose_dir):
shutil.rmtree(pose_dir)
os.makedirs(pose_dir, exist_ok=True)
return pose_dir
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg', type=str, required=True)
parser.add_argument('--videoFile', type=str, required=True)
parser.add_argument('--outputDir', type=str, default='/output/')
parser.add_argument('--inferenceFps', type=int, default=1)
parser.add_argument('--visthre', type=float, default=0)
parser.add_argument('opts',
help='Modify config options using the command-line',
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
# args expected by supporting codebase
args.modelDir = ''
args.logDir = ''
args.dataDir = ''
args.prevModelDir = ''
return args
def main():
# transformation
pose_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
args = parse_args()
update_config(cfg, args)
pose_dir = prepare_output_dirs(args.outputDir)
csv_output_rows = []
# import model architecture
pose_model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(
cfg, is_train=False
)
# import weights
if cfg.TEST.MODEL_FILE:
print('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
pose_model.load_state_dict(torch.load(
cfg.TEST.MODEL_FILE), strict=False)
else:
raise ValueError('expected model defined in config at TEST.MODEL_FILE')
pose_model.to(CTX)
pose_model.eval()
# Loading an video
vidcap = cv2.VideoCapture(args.videoFile)
fps = vidcap.get(cv2.CAP_PROP_FPS)
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
if fps < args.inferenceFps:
raise ValueError('Video file not found!')
skip_frame_cnt = round(fps / args.inferenceFps)
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
###### PARAMS
# select keypoint joint to display in heatmap, view COCO INDEXES to choose
selected_keypoint = 0
# tag and side are examples by now
# tag:
tag = 'EPT' # EPT para demos de uso de brazo y piernas
# side: True: der, False: izq
side = True
# adjust dimensions if rotation is needed
rotate = False
if rotate:
frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# define writers to save videos
video_dets_name = '{}/{}_basico.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
video_heatmaps_name = '{}/{}_pose_heatmap.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
video_ept_name = '{}/{}_medio.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
outcap = cv2.VideoWriter(video_dets_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
# outcap_heatmap = cv2.VideoWriter(video_heatmaps_name,
# cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
outcap_ept = cv2.VideoWriter(video_ept_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width, frame_height))
video_graph_name = '{}/{}_avanzado.mp4'.format(args.outputDir, os.path.splitext(os.path.basename(args.videoFile))[0])
outcap_graph = cv2.VideoWriter(video_graph_name,
cv2.VideoWriter_fourcc(*'MP4V'), int(skip_frame_cnt), (frame_width+(2*frame_height), frame_height))
count = 0
now_full= time.time()
data = []
# deque: store angle values over frames
buffer_maxlen = 600
angles_buffer={
'Left Elbow':deque([], maxlen=buffer_maxlen),
'Right Elbow':deque([], maxlen=buffer_maxlen),
'Left Shoulder':deque([], maxlen=buffer_maxlen),
'Right Shoulder':deque([], maxlen=buffer_maxlen),
'Left Hip':deque([], maxlen=buffer_maxlen),
'Right Hip':deque([], maxlen=buffer_maxlen),
'Left Knee':deque([], maxlen=buffer_maxlen),
'Right Knee':deque([], maxlen=buffer_maxlen)
}
coords_buffer = deque([],maxlen=30)
while vidcap.isOpened():
total_now = time.time()
ret, image_bgr = vidcap.read()
count += 1
if rotate:
image_bgr = cv2.rotate(image_bgr, cv2.cv2.ROTATE_90_CLOCKWISE)
# image_bgr = cv2.rotate(image_bgr, cv2.cv2.ROTATE_90_COUNTERCLOCKWISE)
# image_bgr = cv2.flip(image_bgr, 0)
# image_bgr = cv2.flip(image_bgr, 1)
if not ret:
break
# if count % skip_frame_cnt != 0:
# continue
print('Processing frame {} out of {}'.format(str(count),str(length)))
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_pose = image_rgb.copy()
# Clone 1 image for debugging purpose
image_debug = image_bgr.copy()
now = time.time()
# added return heatmap_slice
pose_preds, heatmap_slice = get_pose_estimation_prediction(cfg,
pose_model,
image_pose,
args.visthre,
selected_keypoint,
transforms=pose_transform)
## OPTIONAL: keep only the most confident detection
if pose_preds:
pose_preds = [pose_preds[0]]
then = time.time()
# save heatmap_slice as image over original image
# print(heatmap_slice.shape)
# print(np.max(heatmap_slice))
# print(np.min(heatmap_slice))
# plt.imshow(heatmap_slice, cmap='hot', interpolation='nearest')
# plt.show()
# plt.savefig(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)))
# generate 3 chann Gray image
image_gray = np.asarray(cv2.cvtColor(image_debug, cv2.COLOR_BGR2GRAY), np.float32)
image_gray_3chan=cv2.cvtColor(image_gray, cv2.COLOR_GRAY2BGR)
# case where person is detected
if pose_preds:
heatmap_slice_image = (heatmap_slice/np.max(heatmap_slice))*255.0
heatmap_slice_image = cv2.resize(heatmap_slice_image,(frame_width,frame_height))
heatmap_slice_image_3chan=np.zeros((frame_height,frame_width,3), np.float32)
heatmap_slice_image_3chan[:, :, 2] = heatmap_slice_image
image_w_heatmap = cv2.addWeighted(image_gray_3chan,0.5,heatmap_slice_image_3chan,0.5,0)
# write heatmap image
cv2.imwrite(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)), image_w_heatmap)
print("Found person pose at {:03.2f} fps".format(1/(then - now)))
# stop processing if too slow (stuck)
if 1/(then - now) < 0.5:
break
new_csv_row = []
for coords in pose_preds:
# Draw each point on image
for coord in coords:
x_coord, y_coord = int(coord[0]), int(coord[1])
cv2.circle(image_debug, (x_coord, y_coord), 4, (255, 0, 0), 2)
new_csv_row.extend([x_coord, y_coord])
# draw skeleton
draw_skeleton(image_debug, coords, cfg.DATASET.DATASET_TEST)
csv_output_rows.append(new_csv_row)
#################
# format detections as Aictive server mediapipe_test for ex. eval.
#################
# pose_pred[persona][punto][x:0 o y:1]
# ver si estan normalizados
# config depends on train used: COCO or CROWDPOSE
if cfg.DATASET.DATASET_TEST == 'coco':
array_x = [
abs((pose_preds[0][6][0]+pose_preds[0][5][0])/2), # chest mid (artificial)
pose_preds[0][0][0], # nose
0, #
pose_preds[0][5][0], # left_shoulder
pose_preds[0][7][0], # left_elbow
pose_preds[0][9][0], # left_wrist
pose_preds[0][11][0], # left_hip
pose_preds[0][13][0], # left_knee
pose_preds[0][15][0], # left_ankle
pose_preds[0][6][0], # right_shoulder
pose_preds[0][8][0], # right_elbow
pose_preds[0][10][0], # right_wrist
pose_preds[0][12][0], # right_hip
pose_preds[0][14][0], # right_knee
pose_preds[0][16][0], # right_ankle
pose_preds[0][2][0], # right_eye
pose_preds[0][1][0], # left_eye
pose_preds[0][4][0], # right_ear
pose_preds[0][3][0], # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
array_y = [
abs((pose_preds[0][6][1]+pose_preds[0][5][1])/2), # chest mid (artificial)
pose_preds[0][0][1], # nose
0, #
pose_preds[0][5][1], # left_shoulder
pose_preds[0][7][1], # left_elbow
pose_preds[0][9][1], # left_wrist
pose_preds[0][11][1], # left_hip
pose_preds[0][13][1], # left_knee
pose_preds[0][15][1], # left_ankle
pose_preds[0][6][1], # right_shoulder
pose_preds[0][8][1], # right_elbow
pose_preds[0][10][1], # right_wrist
pose_preds[0][12][1], # right_hip
pose_preds[0][14][1], # right_knee
pose_preds[0][16][1], # right_ankle
pose_preds[0][2][1], # right_eye
pose_preds[0][1][1], # left_eye
pose_preds[0][4][1], # right_ear
pose_preds[0][3][1], # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# CROWDPOSE CASE
else:
array_x = [
pose_preds[0][13][1], # chest mid (neck) 0
pose_preds[0][12][0], # nose 1
0, # 2
pose_preds[0][0][0], # left_shoulder 3
pose_preds[0][2][0], # left_elbow 4
pose_preds[0][4][0], # left_wrist 5
pose_preds[0][6][0], # left_hip 6
pose_preds[0][8][0], # left_knee 7
pose_preds[0][10][0], # left_ankle 8
pose_preds[0][1][0], # right_shoulder 9
pose_preds[0][3][0], # right_elbow 10
pose_preds[0][5][0], # right_wrist 11
pose_preds[0][7][0], # right_hip 12
pose_preds[0][9][0], # right_knee 13
pose_preds[0][11][0], # right_ankle 14
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
array_y = [
pose_preds[0][13][1], # chest mid (neck)
pose_preds[0][12][1], # nose
0, #
pose_preds[0][0][1], # left_shoulder
pose_preds[0][2][1], # left_elbow
pose_preds[0][4][1], # left_wrist
pose_preds[0][6][1], # left_hip
pose_preds[0][8][1], # left_knee
pose_preds[0][10][1], # left_ankle
pose_preds[0][1][1], # right_shoulder
pose_preds[0][3][1], # right_elbow
pose_preds[0][5][1], # right_wrist
pose_preds[0][7][1], # right_hip
pose_preds[0][9][1], # right_knee
pose_preds[0][11][1], # right_ankle
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# visibility, NOT AVAILABLE BUT CAN BE INFERRED WITH NOSE AND EARS KPs
array_v = [
0, # chest mid (artificial)
0, # nose
0, #
0, # left_shoulder
0, # left_elbow
0, # left_wrist
0, # left_hip
0, # left_knee
0, # left_ankle
0, # right_shoulder
0, # right_elbow
0, # right_wrist
0, # right_hip
0, # right_knee
0, # right_ankle
0, # right_eye
0, # left_eye
0, # right_ear
0, # left_ear
0 #
# pose_preds[0][][] # right_heel # only in mp
# pose_preds[0][][] # right_foot_index # only in mp
# pose_preds[0][][] # left_heel # only in mp
# pose_preds[0][][] # left_foot_index # only in mp
]
# case no person detected in frame
else:
image_w_heatmap = image_gray_3chan
cv2.imwrite(os.path.join(pose_dir, 'heatmap_{:08d}.jpg'.format(count)), image_w_heatmap)
print("No person pose found at {:03.2f} fps".format(1/(then - now)))
# append empty row on csv
new_csv_row = []
csv_output_rows.append(new_csv_row)
# define detections as empty for ex eval.
array_x=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
array_y=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
array_v=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# write fps in image
total_then = time.time()
text = "{:03.2f} fps".format(1/(total_then - total_now))
cv2.putText(image_debug, text, (100, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 2, cv2.LINE_AA)
# write detections image
img_file = os.path.join(pose_dir, 'pose_{:08d}.jpg'.format(count))
cv2.imwrite(img_file, image_debug)
# write detections and heatmap video
outcap.write(np.uint8(image_debug))
# outcap_heatmap.write(np.uint8(image_w_heatmap))
# after writing both dets and heatmaps videos, calculate angles
poseEstimate = [array_x, array_y, array_v]
poseEstimate = np.array(poseEstimate)
exercise = dataScience.Exercises(tag, poseEstimate, side)
angles = exercise.calculate()
# print(angles)
# case angles are detected
if angles != None:
teta = []
for i in range(0, len(angles)):
teta.append(round(angles[i]['value'], 2))
# time corresponding to the frame [in secs] (-1 to start at 0:00)
frame_time = round((count-1)/fps,3)
| |
"""
Tests for xhr_handlers.py.
"""
import json
import os
import re
import StringIO
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.files.uploadedfile import UploadedFile
from django.core.urlresolvers import reverse
from django.http.request import HttpRequest
from django.test import Client
from django.test import TestCase
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import Project
from main.models import ReferenceGenome
from main.models import Variant
from main.models import VariantSet
from main.models import VariantAlternate
from main.models import VariantCallerCommonData
from main.models import VariantEvidence
from main.testing_util import create_common_entities
from main.testing_util import TEST_EMAIL
from main.testing_util import TEST_PASSWORD
from main.testing_util import TEST_USERNAME
from main.xhr_handlers import create_ref_genome_from_browser_upload
from main.xhr_handlers import create_variant_set
from main.xhr_handlers import ref_genomes_concatenate
from main.xhr_handlers import samples_upload_through_browser_sample_data
from main.xhr_handlers import upload_single_sample
from main.xhr_handlers import VARIANT_LIST_REQUEST_KEY__FILTER_STRING
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__ERROR
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__LIST
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__TOTAL
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__TIME
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__SET_LIST
from main.xhr_handlers import VARIANT_LIST_RESPONSE_KEY__KEY_MAP
from pipeline.pipeline_runner import run_pipeline
from variants.dynamic_snp_filter_key_map import update_filter_key_map
from utils.import_util import _create_sample_and_placeholder_dataset
from utils.import_util import add_dataset_to_entity
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__READ_1
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__READ_2
from utils.import_util import SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME
from settings import PWD as GD_ROOT
from variants.melted_variant_schema import MELTED_SCHEMA_KEY__POSITION
TEST_DIR = os.path.join(GD_ROOT, 'test_data', 'genbank_aligned')
TEST_ANNOTATED_VCF = os.path.join(TEST_DIR, 'bwa_align_annotated.vcf')
TEST_MG1655_GENBANK = os.path.join(TEST_DIR, 'mg1655_tolC_through_zupT.gb')
STATUS_CODE__NOT_FOUND = 404
STATUS_CODE__NOT_LOGGED_IN = 302
STATUS_CODE__SUCCESS = 200
TEST_FQ_DIR = os.path.join(
GD_ROOT,
'test_data',
'fake_genome_and_reads',
'9b19e708')
TEST_FQ1_FILE = os.path.join(TEST_FQ_DIR,
'test_genome_2.snps.simLibrary.1.fq')
TEST_FQ2_FILE = os.path.join(TEST_FQ_DIR,
'test_genome_2.snps.simLibrary.2.fq')
TEST_FA_DIR = os.path.join(
GD_ROOT,
'test_data',
'genome_finish_test')
TEST_FASTA_1_PATH = os.path.join(TEST_FA_DIR, 'random_fasta_1.fa')
TEST_FASTA_2_PATH = os.path.join(TEST_FA_DIR, 'random_fasta_2.fa')
TEST_2_CHROM_FASTA_PATH = os.path.join(GD_ROOT, 'test_data', 'two_chromosome.fa')
LONG_ID_GENBANK = os.path.join(
GD_ROOT,
'test_data',
'long_id_genbank.gb')
TEST_GENBANK = os.path.join(
GD_ROOT,
'test_data',
'test_genbank.gb')
TEST_DIRTY_FQ_1 = os.path.join(
GD_ROOT,
'test_data',
'dirty_genbank_reads.1.fq')
TEST_DIRTY_FQ_2 = os.path.join(
GD_ROOT,
'test_data',
'dirty_genbank_reads.2.fq')
class TestGetVariantList(TestCase):
url = reverse('main.xhr_handlers.get_variant_list')
def setUp(self):
# Useful models.
user = User.objects.create_user(TEST_USERNAME, password=<PASSWORD>,
email=TEST_EMAIL)
self.project = Project.objects.create(owner=user.get_profile(),
title='Test Project')
self.ref_genome = ReferenceGenome.objects.create(project=self.project,
label='refgenome')
self.chromosome = Chromosome.objects.create(
reference_genome=self.ref_genome,
label='Chromosome',
num_bases=9001)
self.sample_obj_1 = ExperimentSample.objects.create(
project=self.project, label='fake sample')
# Make sure the reference genome has the required vcf keys.
update_filter_key_map(self.ref_genome, TEST_ANNOTATED_VCF)
self.vcf_dataset = Dataset.objects.create(
label='test_data_set',
type=Dataset.TYPE.VCF_FREEBAYES,
filesystem_location=TEST_ANNOTATED_VCF)
# Fake web browser client used to make requests.
self.client = Client()
self.client.login(username=TEST_USERNAME, password=<PASSWORD>)
def test__logged_out(self):
"""Test that logged out fails.
"""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(STATUS_CODE__NOT_LOGGED_IN, response.status_code)
def test__missing_params(self):
response = self.client.get(self.url)
self.assertEqual(STATUS_CODE__NOT_FOUND, response.status_code)
def test__basic_function(self):
"""Basic test.
"""
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
TOTAL_NUM_VARIANTS = 10
for pos in range(TOTAL_NUM_VARIANTS):
# We need all these models for testing because this is what the
# materialized view create requires to return non-null results.
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome,
chromosome=self.chromosome,
position=pos,
ref_value='A')
VariantAlternate.objects.create(
variant=variant,
alt_value='G')
common_data_obj = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset=self.vcf_dataset,
alignment_group=alignment_group)
VariantEvidence.objects.create(
experiment_sample=self.sample_obj_1,
variant_caller_common_data=common_data_obj)
# Sanity check that the Variants were actually created.
self.assertEqual(TOTAL_NUM_VARIANTS, Variant.objects.filter(
reference_genome=self.ref_genome).count())
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
# Make sure expected keys in response.
EXPECTED_RESPONSE_KEYS = set([
VARIANT_LIST_RESPONSE_KEY__LIST,
VARIANT_LIST_RESPONSE_KEY__TOTAL,
VARIANT_LIST_RESPONSE_KEY__SET_LIST,
VARIANT_LIST_RESPONSE_KEY__TIME,
VARIANT_LIST_RESPONSE_KEY__KEY_MAP,
])
self.assertEqual(EXPECTED_RESPONSE_KEYS, set(response_data.keys()),
"Missing keys %s\nGot keys %s" % (
str(EXPECTED_RESPONSE_KEYS -
set(response_data.keys())),
str(set(response_data.keys()))))
self.assertEqual(TOTAL_NUM_VARIANTS,
response_data[VARIANT_LIST_RESPONSE_KEY__TOTAL])
# Check total variants returned is correct.
variant_data_obj = json.loads(response_data[
VARIANT_LIST_RESPONSE_KEY__LIST])
variant_obj_list = variant_data_obj['obj_list']
self.assertTrue(TOTAL_NUM_VARIANTS, len(variant_obj_list))
# Check positions are correct.
def _get_position_from_frontend_object(fe_obj):
return int(re.match('([0-9]+)', str(fe_obj[
MELTED_SCHEMA_KEY__POSITION])).group(1))
variant_position_set = set([_get_position_from_frontend_object(obj)
for obj in variant_obj_list])
self.assertEqual(set(range(TOTAL_NUM_VARIANTS)), variant_position_set)
def test_melted(self):
"""Test melted view.
"""
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
TOTAL_NUM_VARIANTS = 10
for pos in range(TOTAL_NUM_VARIANTS):
# We need all these models for testing because this is what the
# materialized view create requires to return non-null results.
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome,
chromosome=self.chromosome,
position=pos,
ref_value='A')
VariantAlternate.objects.create(
variant=variant,
alt_value='G')
common_data_obj = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset=self.vcf_dataset,
alignment_group=alignment_group,
data={u'INFO_DP': 20, u'INFO_PQR': 0.0})
VariantEvidence.objects.create(
experiment_sample=self.sample_obj_1,
variant_caller_common_data=common_data_obj)
# Sanity check that the Variants were actually created.
self.assertEqual(TOTAL_NUM_VARIANTS, Variant.objects.filter(
reference_genome=self.ref_genome).count())
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid,
'melt': '1'
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
# Make sure expected keys in response.
EXPECTED_RESPONSE_KEYS = set([
VARIANT_LIST_RESPONSE_KEY__LIST,
VARIANT_LIST_RESPONSE_KEY__TOTAL,
VARIANT_LIST_RESPONSE_KEY__SET_LIST,
VARIANT_LIST_RESPONSE_KEY__TIME,
VARIANT_LIST_RESPONSE_KEY__KEY_MAP,
])
self.assertEqual(EXPECTED_RESPONSE_KEYS, set(response_data.keys()),
"Missing keys %s\nGot keys %s" % (
str(EXPECTED_RESPONSE_KEYS -
set(response_data.keys())),
str(set(response_data.keys()))))
self.assertEqual(TOTAL_NUM_VARIANTS,
response_data[VARIANT_LIST_RESPONSE_KEY__TOTAL])
# Check total variants returned is correct.
variant_data_obj = json.loads(response_data[
VARIANT_LIST_RESPONSE_KEY__LIST])
variant_obj_list = variant_data_obj['obj_list']
self.assertTrue(TOTAL_NUM_VARIANTS, len(variant_obj_list))
# Check positions are correct.
def _get_position_from_frontend_object(fe_obj):
return int(re.match('([0-9]+)', str(fe_obj[
MELTED_SCHEMA_KEY__POSITION])).group(1))
variant_position_set = set([_get_position_from_frontend_object(obj)
for obj in variant_obj_list])
self.assertEqual(set(range(TOTAL_NUM_VARIANTS)), variant_position_set)
def test_does_not_throw_500_on_server_error(self):
"""For user input errors, get_variant_list should not throw a 500 error.
This test might fail if the dev leaves the debugging clause
"except FakeException" in the code.
"""
request_data = {
'refGenomeUid': self.ref_genome.uid,
'projectUid': self.project.uid,
VARIANT_LIST_REQUEST_KEY__FILTER_STRING: 'nonesense'
}
response = self.client.get(self.url, request_data)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
response_data = json.loads(response.content)
self.assertTrue(VARIANT_LIST_RESPONSE_KEY__ERROR in response_data)
# Make sure FakeException is not imported
with self.assertRaises(ImportError):
# Don't leave FakeException as import.
from main.xhr_handlers import FakeException
class TestModifyVariantInSetMembership(TestCase):
"""Tests for the modify_variant_in_set_membership() xhr endpoint.
"""
def test_add__variants_specified(self):
"""Tests adding a specific list.
"""
# TODO: Implement.
pass
def test_add__all_matching_filter(self):
"""Test adding all matching filter.
"""
# TODO: Implement.
pass
class TestUploadSingleSample(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_single_sample(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
EXPERIMENT_SAMPLE_LABEL = 'my sample'
request.POST['sampleLabel'] = EXPERIMENT_SAMPLE_LABEL
request.FILES['fastq1'] = UploadedFile(
file=open(TEST_FQ1_FILE),
name='read1.fq')
request.FILES['fastq2'] = UploadedFile(
file=open(TEST_FQ2_FILE),
name='read2.fq')
response = upload_single_sample(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
sample = ExperimentSample.objects.get(label=EXPERIMENT_SAMPLE_LABEL)
self.assertTrue(sample)
datasets = sample.dataset_set.all()
# num_datasets: 2 * fastq plus 2 * fastqc = 4
self.assertEqual(4, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
def test_upload_single_sample__unpaired(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
EXPERIMENT_SAMPLE_LABEL = 'my sample'
request.POST['sampleLabel'] = EXPERIMENT_SAMPLE_LABEL
request.FILES['fastq1'] = UploadedFile(
file=open(TEST_FQ1_FILE),
name='read1.fq')
response = upload_single_sample(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
sample = ExperimentSample.objects.get(label=EXPERIMENT_SAMPLE_LABEL)
self.assertTrue(sample)
datasets = sample.dataset_set.all()
# num_datasets: 1 fastq + 1 fastqc = 2
self.assertEqual(2, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
class TestSamplesUploadThroughBrowserSampleData(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_file(self):
project = self.common_entities['project']
request = HttpRequest()
request.POST = {
'projectUid': project.uid
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
# Fake having uploaded a template.
row_data = {
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME: 'red',
SAMPLE_BROWSER_UPLOAD_KEY__READ_1: TEST_FQ1_FILE,
SAMPLE_BROWSER_UPLOAD_KEY__READ_2: TEST_FQ2_FILE
}
_create_sample_and_placeholder_dataset(project, row_data)
datasets = Dataset.objects.all()
self.assertEqual(2, len(datasets))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.AWAITING_UPLOAD, dataset.status)
def _upload_file_and_check_response(full_path):
name = os.path.split(full_path)[1]
# Add mock file to request.
mock_uploaded_file = UploadedFile(
file=open(TEST_FQ1_FILE),
name=name)
request.FILES = {
'file': mock_uploaded_file
}
response = samples_upload_through_browser_sample_data(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse('error' in json.loads(response.content))
_upload_file_and_check_response(TEST_FQ1_FILE)
_upload_file_and_check_response(TEST_FQ2_FILE)
datasets = Dataset.objects.all()
# 2 fastq, 2 fastqc
self.assertEqual(4, len(datasets))
EXPECTED_DATASET_TYPES_SET = set([
Dataset.TYPE.FASTQ1, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQC1_HTML, Dataset.TYPE.FASTQC2_HTML])
self.assertEqual(
EXPECTED_DATASET_TYPES_SET,
set([ds.type for ds in datasets]))
for dataset in datasets:
self.assertEqual(Dataset.STATUS.READY, dataset.status)
class TestVariantSetUploadThroughFile(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_file(self):
VARIANT_SET_NAME = 'newVariant'
self.assertEqual(0, VariantSet.objects.count())
refGenome = self.common_entities['reference_genome']
request = HttpRequest()
request.POST = {
'refGenomeUid': refGenome.uid,
'variantSetName': VARIANT_SET_NAME,
'createSetType': 'from-file'
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
#random test file selected
variant_set_file = os.path.join(GD_ROOT, 'test_data',
'recoli_321UAG_variant_set_upload.vcf')
mock_uploaded_file = UploadedFile(
file=StringIO.StringIO(),
name=variant_set_file)
request.FILES['vcfFile'] = mock_uploaded_file
response = create_variant_set(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
variantsets = VariantSet.objects.all()
self.assertEqual(1, len(variantsets))
self.assertEqual(VARIANT_SET_NAME, VariantSet.objects.get().label)
self.assertEqual(refGenome, VariantSet.objects.get().reference_genome)
class TestReferenceGenomeConcatenation(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def _generate_test_instance(self, rg_files, rg_names=None):
if rg_names is None:
rg_names = [str(i) for i in range(len(rg_files))]
project = self.common_entities['project']
ref_genomes = []
for i, rg_file in enumerate(rg_files):
file_type = 'fasta' if rg_file.endswith('.fa') else 'genbank'
ref_genomes.append(import_reference_genome_from_local_file(
project, rg_names[i], rg_file, file_type, move=False))
test_label = 'concat_test'
request_data = {
'newGenomeLabel': test_label,
'refGenomeUidList': [rg.uid for rg in ref_genomes]
}
request = HttpRequest()
request.POST = {'data': json.dumps(request_data)}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
ref_genomes_concatenate(request)
concat_ref = ReferenceGenome.objects.get(label=test_label)
# Assert correct number of chromosomes
self.assertEqual(
concat_ref.num_chromosomes,
sum([rg.num_chromosomes for rg in ref_genomes]))
# Assert correct number of bases
self.assertEqual(
concat_ref.num_bases,
sum([rg.num_bases for rg in ref_genomes]))
def test_fasta_concatenation(self):
""" Basic test of concatenating two short fastas
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_FASTA_2_PATH])
def test_identical_fasta_concatenation(self):
""" Test concatenating two identical fastas
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_FASTA_1_PATH])
def test_fasta_genbank_concatenation(self):
""" Test concatenating a fasta with a genbank
"""
self._generate_test_instance([TEST_FASTA_1_PATH, TEST_MG1655_GENBANK])
def test_multichromosome_concatenation(self):
""" Test concatenating a fasta containing a single chromosome with
a fasta containing two chromosomes
"""
self._generate_test_instance([TEST_FASTA_1_PATH,
TEST_2_CHROM_FASTA_PATH])
class TestUploadReferenceGenome(TestCase):
def setUp(self):
"""Override.
"""
self.common_entities = create_common_entities()
def test_upload_long_id_genbank(self):
project = self.common_entities['project']
ref_genome_label = 'dirty_upload'
request = HttpRequest()
request.POST = {
'projectUid': project.uid,
'refGenomeLabel': ref_genome_label,
'importFileFormat': 'genbank'
}
request.method = 'POST'
request.user = self.common_entities['user']
authenticate(username=TEST_USERNAME, password=<PASSWORD>)
self.assertTrue(request.user.is_authenticated())
request.FILES['refGenomeFile'] = UploadedFile(
file=open(LONG_ID_GENBANK),
name='dirty_genbank.gb')
response = create_ref_genome_from_browser_upload(request)
self.assertEqual(STATUS_CODE__SUCCESS, response.status_code)
self.assertFalse(json.loads(response.content).get('error', False))
def test_upload_non_ascii_name(self):
"""For now, it's easier | |
<filename>ocs_ci/deployment/baremetal.py
import json
import os
import logging
from time import sleep
import yaml
import requests
from .flexy import FlexyBaremetalPSI
from ocs_ci.utility import psiutils
from ocs_ci.deployment.deployment import Deployment
from ocs_ci.framework import config
from ocs_ci.deployment.ocp import OCPDeployment as BaseOCPDeployment
from ocs_ci.ocs import constants, ocp, exceptions
from ocs_ci.ocs.exceptions import CommandFailed, RhcosImageNotFound
from ocs_ci.ocs.node import get_typed_nodes
from ocs_ci.ocs.openshift_ops import OCP
from ocs_ci.utility.bootstrap import gather_bootstrap
from ocs_ci.utility.connection import Connection
from ocs_ci.utility.csr import wait_for_all_nodes_csr_and_approve, approve_pending_csr
from ocs_ci.utility.templating import Templating
from ocs_ci.utility.utils import (
run_cmd, upload_file, get_ocp_version, load_auth_config,
wait_for_co, configure_chrony_and_wait_for_machineconfig_status, check_for_rhcos_images,
get_infra_id, TimeoutSampler
)
logger = logging.getLogger(__name__)
class BAREMETALUPI(Deployment):
"""
A class to handle Bare metal UPI specific deployment
"""
def __init__(self):
logger.info("BAREMETAL UPI")
super().__init__()
class OCPDeployment(BaseOCPDeployment):
def __init__(self):
super().__init__()
self.helper_node_details = load_auth_config()['baremetal']
self.mgmt_details = load_auth_config()['ipmi']
def deploy_prereq(self):
"""
Pre-Requisites for Bare Metal UPI Deployment
"""
super(BAREMETALUPI.OCPDeployment, self).deploy_prereq()
# check for BM status
logger.info("Checking BM Status")
status = self.check_bm_status_exist()
assert status == constants.BM_STATUS_ABSENT, "BM Cluster still present"
# update BM status
logger.info("Updating BM Status")
result = self.update_bm_status(constants.BM_STATUS_PRESENT)
assert result == constants.BM_STATUS_RESPONSE_UPDATED, "Failed to update request"
# create manifest
self.create_manifest()
# create ignitions
self.create_ignitions()
self.kubeconfig = os.path.join(self.cluster_path, config.RUN.get('kubeconfig_location'))
bootstrap_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.BOOTSTRAP_IGN)
master_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.MASTER_IGN)
worker_path = os.path.join(config.ENV_DATA.get('cluster_path'), constants.WORKER_IGN)
self.host = self.helper_node_details['bm_httpd_server']
self.user = self.helper_node_details['bm_httpd_server_user']
self.private_key = os.path.expanduser(
config.DEPLOYMENT['ssh_key_private']
)
self.helper_node_handler = Connection(self.host, self.user, self.private_key)
cmd = f"rm -rf {self.helper_node_details['bm_path_to_upload']}"
logger.info(self.helper_node_handler.exec_cmd(cmd=cmd))
cmd = f"mkdir -m 755 {self.helper_node_details['bm_path_to_upload']}"
assert self.helper_node_handler.exec_cmd(cmd=cmd), ("Failed to create required folder")
# Upload ignition to public access server
upload_dict = {
bootstrap_path: constants.BOOTSTRAP_IGN,
master_path: constants.MASTER_IGN,
worker_path: constants.WORKER_IGN
}
for key, val in zip(upload_dict.keys(), upload_dict.values()):
upload_file(
self.host,
key,
os.path.join(
self.helper_node_details['bm_path_to_upload'], f"{val}"
),
self.user,
key_file=self.private_key
)
# Perform Cleanup for stale entry's
cmd = f"rm -rf {self.helper_node_details['bm_tftp_base_dir']}"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Delete folder"
# Installing Required packages
cmd = "yum install dnsmasq -y"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to install required package"
# Enable dnsmasq service on boot
cmd = "systemctl enable dnsmasq"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Enable dnsmasq service"
# Starting dnsmasq service
cmd = "systemctl start dnsmasq"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Start dnsmasq service"
cmd = f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to create required folder"
cmd = f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}ocs4qe"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to create required folder"
cmd = f"mkdir -m 755 -p {self.helper_node_details['bm_tftp_base_dir']}ocs4qe/baremetal"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to create required folder"
cmd = f"rm -rf {self.helper_node_details['bm_dnsmasq_dir']}*"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Delete dir"
# Install syslinux
cmd = "yum install syslinux -y"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to install required package"
# Copy syslinux files to the tftp path
cmd = f"cp -ar /usr/share/syslinux/* {self.helper_node_details['bm_tftp_dir']}"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Copy required files"
upload_dict = {
constants.PXE_CONF_FILE: "dnsmasq.pxe.conf",
constants.COMMON_CONF_FILE: "dnsmasq.common.conf"
}
for key, val in zip(upload_dict.keys(), upload_dict.values()):
upload_file(
self.host,
key,
os.path.join(
self.helper_node_details['bm_dnsmasq_dir'],
val
),
self.user,
key_file=self.private_key
)
# Restarting dnsmasq service
cmd = "systemctl restart dnsmasq"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to restart dnsmasq service"
with open(constants.RHCOS_IMAGES_FILE) as file_stream:
rhcos_images_file = yaml.safe_load(file_stream)
ocp_version = get_ocp_version()
logger.info(rhcos_images_file)
image_data = rhcos_images_file[ocp_version]
# Download installer_initramfs
initramfs_image_path = constants.coreos_url_prefix + image_data['installer_initramfs_url']
if check_for_rhcos_images(initramfs_image_path):
cmd = (
"wget -O "
f"{self.helper_node_details['bm_tftp_dir']}"
"/rhcos-installer-initramfs.x86_64.img "
f"{initramfs_image_path}"
)
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Download required File"
else:
raise RhcosImageNotFound
# Download installer_kernel
kernel_image_path = constants.coreos_url_prefix + image_data['installer_kernel_url']
if check_for_rhcos_images(kernel_image_path):
cmd = (
"wget -O "
f"{self.helper_node_details['bm_tftp_dir']}"
"/rhcos-installer-kernel-x86_64 "
f"{kernel_image_path}"
)
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Download required File"
else:
raise RhcosImageNotFound
# Download metal_bios
metal_image_path = constants.coreos_url_prefix + image_data['metal_bios_url']
if check_for_rhcos_images(metal_image_path):
cmd = (
"wget -O "
f"{self.helper_node_details['bm_path_to_upload']}"
"/rhcos-metal.x86_64.raw.gz "
f"{metal_image_path}"
)
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to Download required File"
else:
raise RhcosImageNotFound
# Create pxelinux.cfg directory
cmd = f"mkdir -m 755 {self.helper_node_details['bm_tftp_dir']}/pxelinux.cfg"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to create required folder"
def deploy(self, log_cli_level='DEBUG'):
"""
Deploy
"""
# Uploading pxe files
logger.info("Deploying OCP cluster for Bare Metal platform")
logger.info(
f"Openshift-installer will be using log level:{log_cli_level}"
)
upload_file(
self.host,
constants.COMMON_CONF_FILE,
os.path.join(
self.helper_node_details['bm_dnsmasq_dir'],
"dnsmasq.common.conf"
),
self.user,
key_file=self.private_key
)
logger.info("Uploading PXE files")
for machine in self.mgmt_details:
if self.mgmt_details[machine].get('role') == "bootstrap":
upload_file(
server=self.host,
localpath=constants.BOOTSTRAP_PXE_FILE,
remotepath=f"{self.helper_node_details['bm_tftp_dir']}"
f"/pxelinux.cfg/01-{self.mgmt_details[machine]['mac'].replace(':', '-')}",
user=self.user,
key_file=self.private_key
)
elif self.mgmt_details[machine].get('role') == "master":
upload_file(
server=self.host,
localpath=constants.MASTER_PXE_FILE,
remotepath=f"{self.helper_node_details['bm_tftp_dir']}"
f"/pxelinux.cfg/01-{self.mgmt_details[machine]['mac'].replace(':', '-')}",
user=self.user,
key_file=self.private_key
)
elif self.mgmt_details[machine].get('role') == "worker":
upload_file(
server=self.host,
localpath=constants.WORKER_PXE_FILE,
remotepath=f"{self.helper_node_details['bm_tftp_dir']}"
f"/pxelinux.cfg/01-{self.mgmt_details[machine]['mac'].replace(':', '-')}",
user=self.user,
key_file=self.private_key
)
# Applying Permission
cmd = f"chmod 755 -R {self.helper_node_details['bm_tftp_dir']}"
self.helper_node_handler.exec_cmd(cmd=cmd)
# Applying Permission
cmd = f"chmod 755 -R {self.helper_node_details['bm_path_to_upload']}"
self.helper_node_handler.exec_cmd(cmd=cmd)
# Restarting dnsmasq service
cmd = "systemctl restart dnsmasq"
assert self.helper_node_handler.exec_cmd(cmd=cmd), "Failed to restart dnsmasq service"
# Rebooting Machine with pxe boot
for machine in self.mgmt_details:
if self.mgmt_details[machine].get('cluster_name') == constants.BM_DEFAULT_CLUSTER_NAME:
secrets = [
self.mgmt_details[machine]['mgmt_username'],
self.mgmt_details[machine]['mgmt_password']
]
# Changes boot prioriy to pxe
cmd = (
f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
f"-P {self.mgmt_details[machine]['mgmt_password']} "
f"-H {self.mgmt_details[machine]['mgmt_console']} chassis bootdev pxe"
)
run_cmd(cmd=cmd, secrets=secrets)
sleep(2)
# Power On Machine
cmd = (
f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
f"-P {self.mgmt_details[machine]['mgmt_password']} "
f"-H {self.mgmt_details[machine]['mgmt_console']} chassis power cycle || "
f"ipmitool -I lanplus -U {self.mgmt_details[machine]['mgmt_username']} "
f"-P {self.mgmt_details[machine]['mgmt_password']} "
f"-H {self.mgmt_details[machine]['mgmt_console']} chassis power on"
)
run_cmd(cmd=cmd, secrets=secrets)
logger.info("waiting for bootstrap to complete")
try:
run_cmd(
f"{self.installer} wait-for bootstrap-complete "
f"--dir {self.cluster_path} "
f"--log-level {log_cli_level}",
timeout=3600
)
except CommandFailed as e:
if constants.GATHER_BOOTSTRAP_PATTERN in str(e):
try:
gather_bootstrap()
except Exception as ex:
logger.error(ex)
raise e
OCP.set_kubeconfig(self.kubeconfig)
wait_for_all_nodes_csr_and_approve()
# wait for image registry to show-up
co = "image-registry"
wait_for_co(co)
# patch image registry to null
self.configure_storage_for_image_registry(self.kubeconfig)
# wait for install to complete
logger.info("waiting for install to complete")
run_cmd(
f"{self.installer} wait-for install-complete "
f"--dir {self.cluster_path} "
f"--log-level {log_cli_level}",
timeout=1800
)
# Approving CSRs here in-case if any exists
approve_pending_csr()
self.test_cluster()
logger.info("Performing Disk cleanup")
clean_disk()
# We need NTP for OCS cluster to become clean
configure_chrony_and_wait_for_machineconfig_status(node_type="all")
def create_config(self):
"""
Creates the OCP deploy config for the Bare Metal
"""
# Generate install-config from template
_templating = Templating()
ocp_install_template = (
f"install-config-{self.deployment_platform}-"
f"{self.deployment_type}.yaml.j2"
)
ocp_install_template_path = os.path.join(
"ocp-deployment", ocp_install_template
)
install_config_str = _templating.render_template(
ocp_install_template_path, config.ENV_DATA
)
install_config_obj = yaml.safe_load(install_config_str)
install_config_obj['pullSecret'] = self.get_pull_secret()
install_config_obj['sshKey'] = self.get_ssh_key()
install_config_obj['metadata']['name'] = constants.BM_DEFAULT_CLUSTER_NAME
install_config_str = yaml.safe_dump(install_config_obj)
install_config = os.path.join(self.cluster_path, "install-config.yaml")
install_config_backup = os.path.join(self.cluster_path, "install-config.yaml.backup")
with open(install_config, "w") as f:
f.write(install_config_str)
with open(install_config_backup, "w") as f:
f.write(install_config_str)
def create_manifest(self):
"""
Creates the Manifest files
"""
logger.info("creating manifest files for the cluster")
run_cmd(
f"{self.installer} create manifests "
f"--dir {self.cluster_path} "
)
def create_ignitions(self):
"""
Creates the ignition files
"""
logger.info("creating ignition files for the cluster")
run_cmd(
f"{self.installer} create ignition-configs "
f"--dir {self.cluster_path} "
)
def configure_storage_for_image_registry(self, kubeconfig):
"""
Configures storage for the image registry
"""
logger.info("configuring storage for image registry")
patch = " '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}' "
run_cmd(
f"oc --kubeconfig {kubeconfig} patch "
f"configs.imageregistry.operator.openshift.io "
f"cluster --type merge --patch {patch}"
)
def destroy(self, log_level=''):
"""
Destroy OCP cluster specific to BM UPI
"""
logger.info("Updating BM status")
result = self.update_bm_status(constants.BM_STATUS_ABSENT)
assert result == constants.BM_STATUS_RESPONSE_UPDATED, "Failed to update request"
def check_bm_status_exist(self):
"""
Check if BM Cluster already exist
Returns:
str: response status
"""
headers = {'content-type': "application/json"}
response = requests.get(
url=self.helper_node_details['bm_status_check'],
headers=headers
)
return response.json()[0]['status']
def update_bm_status(self, bm_status):
"""
Update BM status when cluster is deployed/teardown
Args:
bm_status (str): Status to be updated
Returns:
str: response message
"""
payload = {'status': bm_status}
headers = {'content-type': "application/json"}
response = requests.put(
url=self.helper_node_details['bm_status_check'],
json=payload,
headers=headers
)
return response.json()['message']
def clean_disk():
"""
Perform disk cleanup
"""
lvm_to_clean = []
workers = get_typed_nodes(node_type='worker')
ocp_obj = ocp.OCP()
for worker in workers:
out = ocp_obj.exec_oc_debug_cmd(
node=worker.name, cmd_list=["lsblk -nd -e252,7 --output NAME --json"]
)
logger.info(out)
lsblk_output = json.loads(str(out))
lsblk_devices = lsblk_output['blockdevices']
for lsblk_device in lsblk_devices:
base_cmd = (
"""pvs --config "devices{filter = [ 'a|/dev/%s.*|', 'r|.*|' ] }" --reportformat json"""
% lsblk_device['name']
)
cmd = (
f"debug nodes/{worker.name} "
f"-- chroot /host {base_cmd}"
)
out = ocp_obj.exec_oc_cmd(
command=cmd, out_yaml_format=False,
)
logger.info(out)
pvs_output = json.loads(str(out))
pvs_list = pvs_output['report']
for pvs in pvs_list:
pv_list = pvs['pv']
for pv in pv_list:
logger.debug(pv)
device_dict = {
'hostname': f"{worker.name}", 'pv_name': f"{pv['pv_name']}"
}
lvm_to_clean.append(device_dict)
base_cmd = (
"""vgs --config "devices{filter = [ 'a|/dev/%s.*|', 'r|.*|' ] }" --reportformat json"""
% lsblk_device['name']
)
cmd = (
f"debug nodes/{worker.name} "
f"-- chroot /host {base_cmd}"
)
out = ocp_obj.exec_oc_cmd(
command=cmd, out_yaml_format=False,
)
logger.info(out)
vgs_output = json.loads(str(out))
vgs_list = vgs_output['report']
for vgs in vgs_list:
vg_list = vgs['vg']
for vg in vg_list:
logger.debug(vg)
device_dict = {
'hostname': f"{worker.name}", 'vg_name': f"{vg['vg_name']}"
}
lvm_to_clean.append(device_dict)
for devices | |
+ 'DATA' + str(idx) + '_FC_SPLIT_FACTOR - 1);\n\n')
buf_size = desp['DFC_BUF_SIZE'][idx] * desp['RES_ENGINE_NUM'][idx - len(desp['OP_NAME'])] * desp['FC_GROUP_FACTOR'][idx]
width = cal_width(buf_size)
code.append(indent(1) + 'bool more_to_read_from_buffer = true;\n')
code.append(indent(1) + 'bool more_to_collect_from_sys_arr = true;\n');
code.append(indent(1) + 'bool data_is_from_local_buffer;\n')
code.append(indent(1) + 'bool data_is_from_external_buffer;\n')
val = desp['PARAMETERS']['IN_NUM'] / desp['FC_SIMD_FACTOR'][2]
w = cal_width(val)
code.append(indent(1) + 'ap_uint<%d> oo = 0;\n' % (w))
val = desp['PARAMETERS']['OUT_IMG_H_T']
w = cal_width(val)
code.append(indent(1) + 'ap_uint<%d> h = 0;\n' %(w))
code.append(indent(1) + 'ap_uint<%d> h_bound = LAYER_IN_IMG_H_T / LAYER_STRIDE;\n' % (w))
val = desp['PARAMETERS']['OUT_IMG_W_T']
w = cal_width(val)
code.append(indent(1) + 'ap_uint<%d> w = 0;\n' % (w))
code.append(indent(1) + 'ap_uint<%d> w_bound = LAYER_IN_IMG_W_T / LAYER_STRIDE;\n' % (w))
code.append(indent(1) + 'bool done = 0;\n\n')
code.append(indent(1) + 'while(!done){\n')
code.append('#pragma HLS PIPELINE II=1\n')
indent_level = 2
buf_size = desp['PARAMETERS']['OUT_NUM'] * desp['PARAMETERS']['OUT_IMG_H_T'] * desp['COL_IL_FACTOR']
w = cal_width(buf_size)
code.append(indent(indent_level) + 'ap_uint<%d> local_buf_idx = h * LAYER_COL_IL_FACTOR * LAYER_OUT_NUM_T + (w %% LAYER_COL_IL_FACTOR) * LAYER_OUT_NUM_T + oo * %sDATA%d_FC_SIMD_FACTOR;\n' % (w, var_prefix, idx))
code.append(indent(indent_level) + 'if (w >= engine_id * LAYER_COL_IL_FACTOR){\n')
val = desp['PARAMETERS']['OUT_IMG_W_T']
w = cal_width(val)
code.append(indent(indent_level + 1) + 'ap_uint<%d> collector_id = w / LAYER_COL_IL_FACTOR;\n' % (w))
code.append(indent(indent_level + 1) + 'data_is_from_local_buffer = (collector_id == engine_id);\n')
code.append(indent(indent_level + 1) + 'data_is_from_external_buffer = !data_is_from_local_buffer;\n\n' )
code.append(indent(indent_level + 1) + var_prefix + 'Data' + str(idx) + 'TransferChannelType data_write_to_fifo;\n\n')
code.append(indent(indent_level + 1) + 'if (data_is_from_external_buffer){\n')
# code.append(indent(indent_level + 2) + 'data_write_to_fifo = fifo_transfer_in.read();\n')
code.append(indent(indent_level + 1) + '} else {\n')
for dup in range(desp['FC_SIMD_FACTOR'][idx]):
code.append(indent(indent_level + 2) + var_prefix + 'data_t' + str(idx) + ' data' + str(dup) + ' = buffer[0][local_buf_idx / %sDATA%d_FC_SIMD_FACTOR][%d];\n' % (var_prefix, idx, dup))
code.append(indent(indent_level + 2) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH> data' + str(dup) + '_cast = Reinterpret<ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH> >(data' + str(dup) + ');\n')
code.append(indent(indent_level + 2) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR> pack_data = (\n')
for dup in range(desp['FC_SIMD_FACTOR'][idx] - 1, 0, -1):
code.append(indent(indent_level + 3) + 'data' + str(dup) + '_cast,\n')
code.append(indent(indent_level + 3) + 'data0' + '_cast\n')
code.append(indent(indent_level + 2) + ');\n')
code.append(indent(indent_level + 2) + 'data_write_to_fifo.data = pack_data;\n')
code.append(indent(indent_level + 1) + '}\n\n')
code.append(indent(indent_level + 1) + 'fifo_transfer_out.write(data_write_to_fifo);\n')
code.append(indent(indent_level) + '}\n')
code.append(indent(2) + 'w++;\n')
# code.append(indent(2) + 'if (w == LAYER_IN_IMG_W_T / LAYER_STRIDE){\n')
code.append(indent(2) + 'if (w == w_bound){\n')
code.append(indent(3) + 'w = 0;\n')
code.append(indent(3) + 'h++;\n')
# code.append(indent(3) + 'if (h == LAYER_IN_IMG_H_T / LAYER_STRIDE){\n')
code.append(indent(3) + 'if (h == h_bound){\n')
code.append(indent(4) + 'h = 0;\n')
code.append(indent(4) + 'oo++;\n')
code.append(indent(4) + 'if (oo == LAYER_OUT_NUM_T / %sDATA%d_FC_SIMD_FACTOR){\n' % (var_prefix, idx))
code.append(indent(5) + 'oo = 0;\n')
code.append(indent(5) + 'done = 1;\n')
code.append(indent(4) + '}\n')
code.append(indent(3) + '}\n')
code.append(indent(2) + '}\n')
code.append(indent(1) + '}\n\n')
code.append('}\n\n')
idx += 1
return code
def dc(desp, config):
code = []
var_prefix = 'U%s' %(desp['KERNEL_ID']) + '_'
code.extend(dc_write(desp, config))
code.extend(dc_read(desp, config))
idx = len(desp['OP_NAME'])
for res_name in desp['RES_NAME']:
# Engine0
code.append('void ' + var_prefix + 'DataCollect' + str(idx) + 'Engine0(\n')
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'TransferChannelType> &fifo_transfer_in,\n')
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'TransferChannelType> &fifo_transfer_out,\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> &fifo_collect_' + str(gs) + ',\n')
code.append(indent(1) + 'unsigned int engine_id,\n')
code.append(indent(1) + 'stream<uint> &fifo_config_in0, // from PE\n')
code.append(indent(1) + 'stream<uint> &fifo_config_in1, // from other engines\n')
code.append(indent(1) + 'stream<uint> &fifo_config_out\n')
code.append('){\n')
code.append('#pragma HLS DATA_PACK variable=fifo_transfer_in\n')
code.append('#pragma HLS DATA_PACK variable=fifo_transfer_out\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
code.append('#pragma HLS DATA_PACK variable=fifo_collect_' + str(gs) + '\n')
code.append('#pragma HLS INLINE off\n\n')
code.append(indent(1) + 'uint LAYER_OUT_NUM_T_prev;\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H_T_prev;\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_W_T_prev;\n')
code.append(indent(1) + 'uint LAYER_COL_IL_FACTOR_prev;\n')
code.append(indent(1) + 'uint LAYER_STRIDE_prev;\n')
code.append(indent(1) + 'uint task_iter = 0;\n')
code.append(indent(1) + '// read in configurations\n')
code.append(indent(1) + 'uint LAYER_IN_NUM_T = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_OUT_NUM_T = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H_T = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_W_T = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_FILTER_S = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_TASK_NUM1 = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_TASK_NUM2 = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_LOCAL_ACCUM_NUM = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_LOCAL_REG_NUM = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_ROW_IL_FACTOR = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_COL_IL_FACTOR = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_STRIDE = fifo_config_in0.read();\n')
code.append(indent(1) + 'uint LAYER_BATCH = fifo_config_in0.read();\n\n')
code.append(indent(1) + '// dummpy read\n')
code.append(indent(1) + 'LAYER_IN_NUM_T = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_OUT_NUM_T = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_IN_IMG_H_T = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_IN_IMG_W_T = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_FILTER_S = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_TASK_NUM1 = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_TASK_NUM2 = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_LOCAL_ACCUM_NUM = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_LOCAL_REG_NUM = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_ROW_IL_FACTOR = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_COL_IL_FACTOR = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_STRIDE = fifo_config_in1.read();\n')
code.append(indent(1) + 'LAYER_BATCH = fifo_config_in1.read();\n\n')
code.append(indent(1) + '// write out configurations\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_IN_NUM_T);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_OUT_NUM_T);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_IN_IMG_H_T);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_IN_IMG_W_T);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_FILTER_S);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_TASK_NUM1);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_TASK_NUM2);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_LOCAL_ACCUM_NUM);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_LOCAL_REG_NUM);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_ROW_IL_FACTOR);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_COL_IL_FACTOR);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_STRIDE);\n')
code.append(indent(1) + 'fifo_config_out.write(LAYER_BATCH);\n\n')
code.append(indent(1) + var_prefix + 'data_t' + str(idx) + ' ping_buffer[' + var_prefix + 'DATA' + str(idx) + \
'_FC_GROUP_FACTOR][' + var_prefix + 'DATA' + str(idx) + '_BUF_SIZE / %sDATA%d_FC_SIMD_FACTOR][%sDATA%d_FC_SIMD_FACTOR];\n' % (var_prefix, idx, var_prefix, idx))
code.append(indent(1) + var_prefix + 'data_t' + str(idx) + ' pong_buffer[' + var_prefix + 'DATA' + str(idx) + \
'_FC_GROUP_FACTOR][' + var_prefix + 'DATA' + str(idx) + '_BUF_SIZE / %sDATA%d_FC_SIMD_FACTOR][%sDATA%d_FC_SIMD_FACTOR];\n' % (var_prefix, idx, var_prefix, idx))
code.append('#pragma HLS ARRAY_PARTITION variable=ping_buffer dim=3 complete\n')
code.append('#pragma HLS ARRAY_PARTITION variable=pong_buffer dim=3 complete\n')
code.append('#pragma HLS DATA_PACK variable=ping_buffer\n')
code.append('#pragma HLS DATA_PACK variable=pong_buffer\n')
code.append('\n')
code.append(indent(1) + 'unsigned int initial_round = 0;\n')
code.append(indent(1) + 'bool done = 0;\n')
w = cal_width(desp['PARAMETERS']['LAYER_BATCH'])
code.append(indent(1) + 'ap_uint<%d> layer_iter = 0;\n' % (w))
code.append(indent(1) + 'bool layer_start = 0;\n')
code.append(indent(1) + 'while(!done){\n')
code.append(indent(2) + 'if (layer_start){\n')
code.append(indent(3) + '// read in configurations\n')
code.append(indent(3) + 'LAYER_IN_NUM_T = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_OUT_NUM_T = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_IN_IMG_H_T = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_IN_IMG_W_T = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_FILTER_S = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_TASK_NUM1 = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_TASK_NUM2 = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_LOCAL_ACCUM_NUM = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_LOCAL_REG_NUM = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_ROW_IL_FACTOR = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_COL_IL_FACTOR = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_STRIDE = fifo_config_in0.read();\n')
code.append(indent(3) + 'LAYER_BATCH = fifo_config_in0.read();\n\n')
code.append(indent(3) + '// dummpy read\n')
# code.append(indent(3) + 'LAYER_IN_NUM = fifo_config_in1.read();\n')
# code.append(indent(3) + 'LAYER_OUT_NUM = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_IN_NUM_T = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_OUT_NUM_T = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_IN_IMG_H_T = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_IN_IMG_W_T = fifo_config_in1.read();\n')
# code.append(indent(3) + 'LAYER_OUT_IMG_H = fifo_config_in1.read();\n')
# code.append(indent(3) + 'LAYER_OUT_IMG_W = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_FILTER_S = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_TASK_NUM1 = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_TASK_NUM2 = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_LOCAL_ACCUM_NUM = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_LOCAL_REG_NUM = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_ROW_IL_FACTOR = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_COL_IL_FACTOR = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_STRIDE = fifo_config_in1.read();\n')
code.append(indent(3) + 'LAYER_BATCH = fifo_config_in1.read();\n\n')
code.append(indent(3) + '// write out configurations\n')
# code.append(indent(3) + 'fifo_config_out.write(LAYER_IN_NUM);\n')
# code.append(indent(3) + 'fifo_config_out.write(LAYER_OUT_NUM);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_IN_NUM_T);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_OUT_NUM_T);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_IN_IMG_H_T);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_IN_IMG_W_T);\n')
# code.append(indent(3) + 'fifo_config_out.write(LAYER_OUT_IMG_H);\n')
# code.append(indent(3) + 'fifo_config_out.write(LAYER_OUT_IMG_W);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_FILTER_S);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_TASK_NUM1);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_TASK_NUM2);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_LOCAL_ACCUM_NUM);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_LOCAL_REG_NUM);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_ROW_IL_FACTOR);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_COL_IL_FACTOR);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_STRIDE);\n')
code.append(indent(3) + 'fifo_config_out.write(LAYER_BATCH);\n\n')
code.append(indent(3) + 'layer_start = 0;\n')
code.append(indent(2) + '}\n\n')
code.append(indent(2) + 'if (initial_round == 0){\n')
code.append(indent(3) + var_prefix + 'Data' + str(idx) + 'ReadData0(\n')
code.append(indent(3 + 1) + 'ping_buffer,\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
if gs < desp['FC_GROUP_FACTOR'][idx] - 1:
code.append(indent(3 + 1) + 'fifo_collect_' + str(gs) + ',\n')
else:
code.append(indent(3 + 1) + 'fifo_collect_' + str(gs) + ',\n')
code.append(indent(3 + 1) + 'LAYER_IN_IMG_H_T,\n')
code.append(indent(3 + 1) + 'LAYER_ROW_IL_FACTOR,\n')
code.append(indent(3 + 1) + 'LAYER_COL_IL_FACTOR,\n')
code.append(indent(3 + 1) + 'LAYER_STRIDE\n')
code.append(indent(3) + ');\n')
code.append(indent(2) + '} else {\n')
code.append(indent(3) + 'if (initial_round % 2 == 1){\n')
code.append(indent(4) + var_prefix + 'Data' + str(idx) + 'ReadData0(\n')
code.append(indent(4 + 1) + 'pong_buffer,\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
if gs < desp['FC_GROUP_FACTOR'][idx] - 1:
code.append(indent(4 + 1) + 'fifo_collect_' + str(gs) + ',\n')
else:
code.append(indent(4 + 1) + 'fifo_collect_' + str(gs) | |
#!/usr/bin/python3
from __future__ import absolute_import
from builtins import range
from past.builtins import basestring
from builtins import object
from .interface import tss_lib, ffi
from pytss import tspi_exceptions
import hashlib
def uuid_to_tss_uuid(uuid):
"""Converts a Python UUID into a TSS UUID"""
tss_uuid = ffi.new('struct tdTSS_UUID *')[0]
tss_uuid.ulTimeLow = uuid.time_low
tss_uuid.usTimeMid = uuid.time_mid
tss_uuid.usTimeHigh = uuid.time_hi_version
tss_uuid.bClockSeqHigh = uuid.clock_seq_hi_variant
tss_uuid.bClockSeqLow = uuid.clock_seq_low
tss_uuid.rgbNode[0] = (uuid.node >> 40) & 0xff
tss_uuid.rgbNode[1] = (uuid.node >> 32) & 0xff
tss_uuid.rgbNode[2] = (uuid.node >> 24) & 0xff
tss_uuid.rgbNode[3] = (uuid.node >> 16) & 0xff
tss_uuid.rgbNode[4] = (uuid.node >> 8) & 0xff
tss_uuid.rgbNode[5] = uuid.node & 0xff
return tss_uuid
class TspiObject(object):
def __init__(self, context, ctype, tss_type, flags, handle=None):
"""
Init a TSPI object
:param context: The TSS context to use
:param ctype: The C type associated with this TSS object
:param tss_type: The TSS type associated with this TSS object
:param flags: The default attributes of the object
:param handle: Use an existing handle, rather than creating a new
object
"""
self.context = context
if handle is not None:
self.handle = handle
else:
self.handle = ffi.new(ctype)
tss_lib.Tspi_Context_CreateObject(context, tss_type, flags,
self.handle)
def get_handle(self):
"""Return the TSS handle for the object"""
return self.handle[0]
def set_attribute_uint32(self, attrib, sub, val):
"""
Set a 32 bit attribute associated with a given object
:param attrib: The attribute to modify
:param sub: The subattribute to modify
:param val: The value to assign
"""
tss_lib.Tspi_SetAttribUint32(self.get_handle(), attrib, sub, val)
def set_attribute_data(self, attrib, sub, data):
"""
Set an arbitrary datatype attribute associated with the object
:param attrib: The attribute to modify
:param sub: The subattribute to modify
:param val: The data to assign
"""
cdata = ffi.new('BYTE[]', len(data))
for i in range(len(data)):
cdata[i] = data[i]
tss_lib.Tspi_SetAttribData(self.get_handle(), attrib, sub, len(data), cdata)
def get_attribute_data(self, attrib, sub):
"""
Get an arbitrary datatype associated with the object
:param attrib: The attribute to modify
:param sub: The subattribute to modify
:returns: a bytearray containing the data
"""
bloblen = ffi.new('UINT32 *')
blob = ffi.new('BYTE **')
tss_lib.Tspi_GetAttribData(self.handle[0], attrib, sub, bloblen, blob)
ret = bytearray(blob[0][0:bloblen[0]])
tss_lib.Tspi_Context_FreeMemory(self.context, blob[0])
return ret
def get_policy_object(self, poltype):
"""
Get a policy object assigned to the object
:param poltype: The policy object type
:returns: A TspiPolicy
"""
policy = ffi.new('TSS_HPOLICY *')
tss_lib.Tspi_GetPolicyObject(self.get_handle(), poltype, policy)
policy_obj = TspiPolicy(self.context, None, handle=policy)
return policy_obj
class TspiNV(TspiObject):
def __init__(self, context, flags):
super(TspiNV, self).__init__(context, 'TSS_HNVSTORE *',
tss_lib.TSS_OBJECT_TYPE_NV, flags)
def read_value(self, offset, length):
"""
Read a value from TPM NVRAM
:param offset: The offset in NVRAM to start reading
:param length: The number of bytes of NVRAM to read
:returns: A bytearray containing the requested data
"""
lenval = ffi.new('UINT32 *')
data = ffi.new('BYTE **')
lenval[0] = length
tss_lib.Tspi_NV_ReadValue(self.handle[0], offset, lenval, data)
ret = bytearray(data[0][0:lenval[0]])
return ret
def set_index(self, index):
"""
Select the requested NVRAM storage area index
:param index: The storage area index to select
"""
tss_lib.Tspi_SetAttribUint32(self.handle[0], tss_lib.TSS_TSPATTRIB_NV_INDEX,
0, index)
class TspiPolicy(TspiObject):
def __init__(self, context, flags, handle=None):
super(TspiPolicy, self).__init__(context, 'TSS_HPOLICY *',
tss_lib.TSS_OBJECT_TYPE_POLICY, flags,
handle)
def set_secret(self, sectype, secret):
"""
Set the authorisation data of a policy object
:param sectype: The type of the secret, any of the constants
prefixed TSS_SECRET_MODE_ in tspi_defines
:param secret: The secret data blob as either a string or
array of integers in the range 0..255
"""
tss_lib.Tspi_Policy_SetSecret(self.handle[0], sectype, len(secret),
_c_byte_array(secret))
def assign(self, target):
"""
Assign a policy to an object
:param target: The object to which the policy will be assigned
"""
tss_lib.Tspi_Policy_AssignToObject(self.handle[0], target.get_handle())
class TspiPCRs(TspiObject):
def __init__(self, context, flags):
self.pcrs = {}
super(TspiPCRs, self).__init__(context, 'TSS_HPCRS *',
tss_lib.TSS_OBJECT_TYPE_PCRS, flags)
def set_pcrs(self, pcrs):
"""
Set the PCRs referred to by this object
:param pcrs: A list of integer PCRs
"""
for pcr in pcrs:
tss_lib.Tspi_PcrComposite_SelectPcrIndex(self.handle[0], pcr)
self.pcrs[pcr] = ""
def get_pcrs(self):
"""
Get the digest value of the PCRs referred to by this object
:returns: a dictionary of PCR/value pairs
"""
for pcr in self.pcrs:
buf = ffi.new('BYTE **')
buflen = ffi.new('UINT32 *')
tss_lib.Tspi_PcrComposite_GetPcrValue(self.handle[0], pcr, buflen, buf)
self.pcrs[pcr] = bytearray(buf[0][0:buflen[0]])
tss_lib.Tspi_Context_FreeMemory(self.context, buf[0])
return self.pcrs
class TspiHash(TspiObject):
def __init__(self, context, flags):
super(TspiHash, self).__init__(context, 'TSS_HHASH *',
tss_lib.TSS_OBJECT_TYPE_HASH, flags)
def update(self, data):
"""
Update the hash object with new data
:param data: The data to hash
"""
tss_lib.Tspi_Hash_UpdateHashValue(self.get_handle(), len(data),
_c_byte_array(data))
def verify(self, key, signature):
"""
Verify that the hash matches a given signature
:param key: A TspiObject representing the key to use
:param signature: The signature to compare against
"""
tss_lib.Tspi_Hash_VerifySignature(self.get_handle(), key.get_handle(),
len(signature), _c_byte_array(signature))
def sign(self, key):
"""
Sign this hash with the specified key and return a signature
:param key: a TspiKey instance corresponding to a loaded key
:return: a string of bytes containing the signature
"""
csig_size = ffi.new("UINT32*")
csig_data = ffi.new("BYTE**")
tss_lib.Tspi_Hash_Sign(self.get_handle(), key.get_handle(), csig_size, csig_data)
return ffi.buffer(csig_data[0], csig_size[0])
class TspiKey(TspiObject):
def __init__(self, context, flags, handle=None):
self.context = context
super(TspiKey, self).__init__(context, 'TSS_HKEY *',
tss_lib.TSS_OBJECT_TYPE_RSAKEY,
flags, handle)
def __del__(self):
try:
tss_lib.Tspi_Key_UnloadKey(self.get_handle())
# The key may have been implicitly unloaded as part of a previous
# operation
except tspi_exceptions.TSS_E_INVALID_HANDLE:
pass
def set_modulus(self, n):
"""
Set the key modulus
:param n: The key modulus
"""
self.set_attribute_data(tss_lib.TSS_TSPATTRIB_RSAKEY_INFO,
tss_lib.TSS_TSPATTRIB_KEYINFO_RSA_MODULUS, n)
def get_keyblob(self):
"""
Obtain a TSS blob corresponding to the key
:returns: a bytearray containing the TSS key blob
"""
return self.get_attribute_data(tss_lib.TSS_TSPATTRIB_KEY_BLOB,
tss_lib.TSS_TSPATTRIB_KEYBLOB_BLOB)
def get_pubkeyblob(self):
"""
Obtain a TSS blob corresponding to the public portion of the key
:returns: a bytearray containing the TSS key blob
"""
return self.get_attribute_data(tss_lib.TSS_TSPATTRIB_KEY_BLOB,
tss_lib.TSS_TSPATTRIB_KEYBLOB_PUBLIC_KEY)
def get_pubkey(self):
"""
Obtain the public part of the key
:returns: a bytearray containing the public portion of the key
"""
return self.get_attribute_data(tss_lib.TSS_TSPATTRIB_RSAKEY_INFO,
tss_lib.TSS_TSPATTRIB_KEYINFO_RSA_MODULUS)
def seal(self, data, pcrs=None):
"""
Seal data to the local TPM using this key
:param data: The data to seal
:param pcrs: A list of PCRs to seal the data to
:returns: a bytearray of the encrypted data
"""
encdata = TspiObject(self.context, 'TSS_HENCDATA *',
tss_lib.TSS_OBJECT_TYPE_ENCDATA,
tss_lib.TSS_ENCDATA_SEAL)
if pcrs is not None:
pcrobj=TspiPCRs(self.context, tss_lib.TSS_PCRS_STRUCT_INFO)
pcrobj.set_pcrs(pcrs)
pcr_composite = pcrobj.get_handle()
else:
pcr_composite = 0
cdata = ffi.new('BYTE[]', len(data))
for i in range(len(data)):
cdata[i] = data[i]
tss_lib.Tspi_Data_Seal(encdata.get_handle(), self.get_handle(),
len(data), cdata, pcr_composite)
blob = encdata.get_attribute_data(tss_lib.TSS_TSPATTRIB_ENCDATA_BLOB,
tss_lib.TSS_TSPATTRIB_ENCDATABLOB_BLOB)
return bytearray(blob)
def unseal(self, data):
"""
Unseal data from the local TPM using this key
:param data: The data to unseal
:returns: a bytearray of the unencrypted data
"""
encdata = TspiObject(self.context, 'TSS_HENCDATA *',
tss_lib.TSS_OBJECT_TYPE_ENCDATA,
tss_lib.TSS_ENCDATA_SEAL)
encdata.set_attribute_data(tss_lib.TSS_TSPATTRIB_ENCDATA_BLOB,
tss_lib.TSS_TSPATTRIB_ENCDATABLOB_BLOB, data)
bloblen = ffi.new('UINT32 *')
blob = ffi.new('BYTE **')
tss_lib.Tspi_Data_Unseal(encdata.get_handle(), self.get_handle(),
bloblen, blob)
ret = bytearray(blob[0][0:bloblen[0]])
tss_lib.Tspi_Context_FreeMemory(self.context, blob[0])
return ret
class TspiTPM(TspiObject):
def __init__(self, context):
tpm = ffi.new('TSS_HTPM *')
tss_lib.Tspi_Context_GetTpmObject(context, tpm)
self.handle = tpm
self.context = context
def collate_identity_request(self, srk, pubkey, aik):
"""
Generate everything required to authenticate the TPM to a third party
:param srk: The storage root key to use
:param pubkey: The key to use for signing the output key
:param aik: The key to use as the identity key
:returns: A bytearray containing a certificate request
"""
bloblen = ffi.new('UINT32 *')
blob = ffi.new('BYTE **')
tss_lib.Tspi_TPM_CollateIdentityRequest(self.get_handle(),
srk.get_handle(),
pubkey.get_handle(), 0, b"",
aik.get_handle(), tss_lib.TSS_ALG_AES,
bloblen, blob)
ret = bytearray(blob[0][0:bloblen[0]])
tss_lib.Tspi_Context_FreeMemory(self.context, blob[0])
return ret
def get_capability(self, cap, sub):
"""
Get information on the capabilities of the TPM
:param cap: The capability to query
:param sub: The subcapability to query
:returns: A bytearray containing the capability data
"""
resp = ffi.new('BYTE **')
resplen = ffi.new('UINT32 *')
csub = ffi.new('BYTE []', len(sub))
for i in range(len(sub)):
csub[i] = sub[i]
tss_lib.Tspi_TPM_Getcapability(self.handle[0], cap, len(sub), csub,
resplen, resp)
ret = bytearray(resp[0][0:resplen[0]])
tss_lib.Tspi_Context_FreeMemory(self.context, resp[0])
return ret
def get_quote(self, aik, pcrs, challenge):
"""
retrieve a signed set of PCR values
:param aik: A TspiObject representing the Attestation Identity Key
:param pcrs: A TspiPCRs representing the PCRs to be quoted
:param challenge: The challenge to use
:returns: A tuple containing the quote data and the validation block
"""
valid = ffi.new('TSS_VALIDATION *')
chalmd = ffi.new('BYTE[]', 20)
if challenge:
m = hashlib.sha1()
m.update(challenge)
sha1 = bytearray(m.digest())
for i in range(len(sha1)):
chalmd[i] = sha1[i]
valid[0].ulExternalDataLength = ffi.sizeof(chalmd)
valid[0].rgbExternalData = chalmd
tss_lib.Tspi_TPM_Quote(self.handle[0], aik.get_handle(), pcrs.get_handle(),
valid)
data = bytearray(valid[0].rgbData[0:valid[0].ulDataLength])
validation = bytearray(valid[0].rgbValidationData
[0:valid[0].ulValidationDataLength])
tss_lib.Tspi_Context_FreeMemory(self.context, valid[0].rgbData)
tss_lib.Tspi_Context_FreeMemory(self.context, valid[0].rgbValidationData)
return (data, validation)
def activate_identity(self, aik, asymblob, symblob):
"""
Decrypt the challenge provided by the | |
list contained in hpn.
The supplied search_dict has all options, which are culled by the supplied
hpn and exact_match flag.
Parameters
----------
hpn : list
List of HERA part numbers being checked as returned from self._proc_hpnlist
search_dict : dict
Contains information about all parts possible to search, keyed on the "standard"
cm_utils.make_part_key
exact_match : bool
If False, will only check the first characters in each hpn entry. E.g. 'HH1'
would allow 'HH1', 'HH10', 'HH123', etc
Returns
-------
dict
Contains the found entries within search_dict
"""
hpn_upper = [x.upper() for x in hpn]
found_dict = {}
for key in search_dict.keys():
hpn, rev = cm_utils.split_part_key(key.upper())
use_this_one = False
if exact_match:
if hpn in hpn_upper:
use_this_one = True
else:
for hlu in hpn_upper:
if hpn.startswith(hlu):
use_this_one = True
break
if use_this_one:
found_dict[key] = copy.copy(search_dict[key])
return(found_dict)
def _proc_hpnlist(self, hpn_request, exact_match):
"""
Process the hpn request list.
Parameters
----------
hpn_request : str, list
List/string of input hera part number(s) (whole or 'startswith')
If string
- 'default' uses default station prefixes in cm_sysdef
- otherwise converts as csv-list
exact_match : bool
If False, will only check the first characters in each hpn entry. E.g. 'HH1'
would allow 'HH1', 'HH10', 'HH123', etc
Returns
-------
list
updated hpn request list
bool
updated exact_match setting
"""
if isinstance(hpn_request, str) and hpn_request.lower() == 'default':
return cm_sysdef.hera_zone_prefixes, False
return cm_utils.listify(hpn_request), exact_match
def _get_part_types_found(self, hookup_connections):
"""
Take a list of connections, return the part_types and populate 'self.part_type_cache'.
Parameters
----------
hookup_connections : list
List of Connection objects
Returns
-------
list
List of part_types
"""
if not len(hookup_connections):
return []
part_types_found = set()
for c in hookup_connections:
key = cm_utils.make_part_key(c.upstream_part, c.up_part_rev)
part_type = self.active.parts[key].hptype
part_types_found.add(part_type)
self.part_type_cache[c.upstream_part] = part_type
key = cm_utils.make_part_key(c.downstream_part, c.down_part_rev)
part_type = self.active.parts[key].hptype
part_types_found.add(part_type)
self.part_type_cache[c.downstream_part] = part_type
return list(part_types_found)
def _follow_hookup_stream(self, part, rev, port_pol):
"""
Follow a list of connections upstream and downstream.
Parameters
----------
part : str
HERA part number
rev : str
HERA part revision
port_pol : str
Port polarization to follow. Should be 'E<port' or 'N<port'.
Returns
-------
list
List of connections for that hookup.
"""
key = cm_utils.make_part_key(part, rev)
part_type = self.active.parts[key].hptype
pol, port = port_pol.split('<')
port_list = cm_utils.to_upper(self.sysdef.get_ports(pol, part_type))
self.upstream = []
self.downstream = []
current = Namespace(direction='up', part=part.upper(), rev=rev.upper(),
key=key, pol=pol.upper(),
hptype=part_type, port=port.upper(), allowed_ports=port_list)
self._recursive_connect(current)
current = Namespace(direction='down', part=part.upper(), rev=rev.upper(),
key=key, pol=pol.upper(),
hptype=part_type, port=port.upper(), allowed_ports=port_list)
self._recursive_connect(current)
hu = []
for pn in reversed(self.upstream):
hu.append(pn)
for pn in self.downstream:
hu.append(pn)
return hu
def _recursive_connect(self, current):
"""
Find the next connection up the signal chain.
Parameters
----------
current : Namespace object
Namespace containing current information.
"""
conn = self._get_connection(current)
if conn is None:
return None
if current.direction == 'up':
self.upstream.append(conn)
elif current.direction == 'down':
self.downstream.append(conn)
self._recursive_connect(current)
def _get_connection(self, current):
"""
Get next connected part going the given direction.
Parameters
----------
current : Namespace object
Namespace containing current information.
"""
odir = self.sysdef.opposite_direction[current.direction]
try:
options = list(self.active.connections[odir][current.key].keys())
except KeyError:
return None
this_port = self._get_port(current, options)
if this_port is None:
return None
this_conn = self.active.connections[odir][current.key][this_port]
if current.direction == 'up':
current.part = this_conn.upstream_part.upper()
current.rev = this_conn.up_part_rev.upper()
current.port = this_conn.upstream_output_port.upper()
elif current.direction == 'down':
current.part = this_conn.downstream_part.upper()
current.rev = this_conn.down_part_rev.upper()
current.port = this_conn.downstream_input_port.upper()
current.key = cm_utils.make_part_key(current.part, current.rev)
options = list(self.active.connections[current.direction][current.key].keys())
try:
current.type = self.active.parts[current.key].hptype
except KeyError: # pragma: no cover
return None
current.allowed_ports = cm_utils.to_upper(self.sysdef.get_ports(current.pol, current.type))
current.port = self._get_port(current, options)
return this_conn
def _get_port(self, current, options):
if current.port is None:
return None
sysdef_options = []
for p in options:
if p in current.allowed_ports:
sysdef_options.append(p)
if current.hptype in self.sysdef.single_pol_labeled_parts[self.hookup_type]:
if current.part[-1].upper() == current.pol[0]:
return sysdef_options[0]
if len(sysdef_options) == 1:
return sysdef_options[0]
for p in sysdef_options:
if p == current.port:
return p
for p in sysdef_options:
if p[0] == current.pol[0]:
return p
def _sort_hookup_display(self, sortby, hookup_dict, def_sort_order='NRP'):
if sortby is None:
return cm_utils.put_keys_in_order(hookup_dict.keys(), sort_order='NPR')
if isinstance(sortby, str):
sortby = sortby.split(',')
sort_order_dict = {}
for stmp in sortby:
ss = stmp.split(':')
if ss[0] in self.col_list:
if len(ss) == 1:
ss.append(def_sort_order)
sort_order_dict[ss[0]] = ss[1]
if 'station' not in sort_order_dict.keys():
sortby.append('station')
sort_order_dict['station'] = 'NPR'
key_bucket = {}
show = {'revs': True, 'ports': False}
for this_key, this_hu in hookup_dict.items():
pk = list(this_hu.hookup.keys())[0]
this_entry = this_hu.table_entry_row(pk, sortby, self.part_type_cache, show)
ekey = []
for eee in [cm_utils.peel_key(x, sort_order_dict[sortby[i]])
for i, x in enumerate(this_entry)]:
ekey += eee
key_bucket[tuple(ekey)] = this_key
sorted_keys = []
for _k, _v in sorted(key_bucket.items()):
sorted_keys.append(_v)
return sorted_keys
def _make_header_row(self, hookup_dict, cols_to_show):
"""
Generate the appropriate header row for the displayed hookup.
Parameters
----------
hookup_dict : dict
Hookup dictionary generated in self.get_hookup
cols_to_show : list, str
list of columns to include in hookup listing
Returns
-------
list
List of header titles.
"""
self.col_list = []
for h in hookup_dict.values():
for cols in h.columns.values():
if len(cols) > len(self.col_list):
self.col_list = copy.copy(cols)
if isinstance(cols_to_show, str):
cols_to_show = cols_to_show.split(',')
cols_to_show = [x.lower() for x in cols_to_show]
if 'all' in cols_to_show:
return self.col_list
headers = []
for col in self.col_list:
if col.lower() in cols_to_show:
headers.append(col)
return headers
# ############################### Cache file methods #####################################
def write_hookup_cache_to_file(self, log_msg='Write.'):
"""
Write the current hookup to the cache file.
Parameters
----------
log_msg : str
String containing any desired messages for the cm log.
This should be a short description of wny a new cache file is being written.
E.g. "Found new antenna." or "Cronjob to ensure cache file up to date."
"""
self.at_date = cm_utils.get_astropytime('now')
self.hookup_type = 'parts_hera'
self.cached_hookup_dict = self.get_hookup_from_db(
self.hookup_list_to_cache, pol='all', at_date=self.at_date,
exact_match=False, hookup_type=self.hookup_type)
hookup_dict_for_json = copy.deepcopy(self.cached_hookup_dict)
for key, value in self.cached_hookup_dict.items():
if isinstance(value, cm_dossier.HookupEntry):
hookup_dict_for_json[key] = value._to_dict()
save_dict = {'at_date_gps': self.at_date.gps,
'hookup_type': self.hookup_type,
'hookup_list': self.hookup_list_to_cache,
'hookup_dict': hookup_dict_for_json,
'part_type_cache': self.part_type_cache}
with open(self.hookup_cache_file, 'w') as outfile:
json.dump(save_dict, outfile)
cf_info = self.hookup_cache_file_info()
log_dict = {'hu-list': cm_utils.stringify(self.hookup_list_to_cache),
'log_msg': log_msg, 'cache_file_info': cf_info}
cm_utils.log('update_cache', log_dict=log_dict)
def read_hookup_cache_from_file(self):
"""Read the current cache file into memory."""
with open(self.hookup_cache_file, 'r') as outfile:
cache_dict = json.load(outfile)
if self.hookup_cache_file_OK(cache_dict):
print("<<<Cache IS current with database>>>")
else:
print("<<<Cache is NOT current with database>>>")
self.cached_at_date = Time(cache_dict['at_date_gps'], format='gps')
self.cached_hookup_type = cache_dict['hookup_type']
self.cached_hookup_list = cache_dict['hookup_list']
hookup_dict = {}
for key, value in cache_dict['hookup_dict'].items():
# this should only contain dicts made from HookupEntry
# add asserts to make sure
assert(isinstance(value, dict))
assert(sorted(value.keys()) == sorted(['entry_key', 'hookup', 'fully_connected',
'hookup_type', 'columns', 'timing', 'sysdef']))
hookup_dict[key] = cm_dossier.HookupEntry(input_dict=value)
self.cached_hookup_dict = hookup_dict
self.part_type_cache = cache_dict['part_type_cache']
self.hookup_type = self.cached_hookup_type
def hookup_cache_file_OK(self, cache_dict=None):
"""
Determine if the cache file is up-to-date with the cm db and if hookup_type is correct.
There are 4 relevant dates:
cm_hash_time: last time the database was updated per CMVersion
file_mod_time: when the cache file was last changed (ie written)
at_date: the date of the get hookup request (self.at_date)
cached_at_date: the date in the cache file for which it was written.
If the cache_file was written before the latest cm_version, it fails because
anything could have changed within the database.
Returns
-------
bool
True if the cache file is current.
"""
# Get the relevant dates (checking the cache_file/cm_version up front)
if cache_dict is None:
return False
stats = os.stat(self.hookup_cache_file)
result = self.session.query(cm_transfer.CMVersion).order_by(
cm_transfer.CMVersion.update_time).all()
cm_hash_time = Time(result[-1].update_time, format='gps')
file_mod_time = Time(stats.st_mtime, format='unix')
# If CMVersion changed since file was written, don't know so fail...
if file_mod_time < cm_hash_time: # pragma: no cover
log_dict = {'file_mod_time': cm_utils.get_time_for_display(file_mod_time),
'cm_hash_time': cm_utils.get_time_for_display(cm_hash_time)}
cm_utils.log('__hookup_cache_file_date_OK: out of date.', log_dict=log_dict)
return False
cached_at_date = Time(cache_dict['at_date_gps'], format='gps')
cached_hookup_type = cache_dict['hookup_type']
if self.hookup_type is None:
self.hookup_type = cached_hookup_type
if self.hookup_type != cached_hookup_type: # pragma: no cover
return False
# If the cached and query dates are after the last hash time it's ok
if cached_at_date > cm_hash_time and self.at_date > cm_hash_time:
return True
# If not returned above, return False to regenerate
return False
def hookup_cache_file_info(self):
"""
Read in information about the current cache file.
Returns
-------
str
String containing the information.
"""
if not os.path.exists(self.hookup_cache_file): # pragma: no cover
s = "{} does not exist.\n".format(self.hookup_cache_file)
else:
self.read_hookup_cache_from_file()
s = 'Cache file: {}\n'.format(self.hookup_cache_file)
s += 'Cache hookup type: {}\n'.format(self.cached_hookup_type)
s += 'Cached_at_date: {}\n'.format(cm_utils.get_time_for_display(self.cached_at_date))
stats = os.stat(self.hookup_cache_file)
file_mod_time = Time(stats.st_mtime, format='unix')
s += | |
newmatrix = np.dot(T, newmatrix)
elif latticeparameters_has_elements:
B0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("newmatrix", newmatrix)
# print("B0matrix", B0matrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=B0matrix,
offset=sourcedepth,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "B0matrix",B0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print("***********mean pixel deviation ", np.mean(alldistances_array), " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.B0matrix matrices
return alldistances_array, Uxyz, newmatrix, Tc, T, Ts
else:
return alldistances_array
def fit_function_general(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
UBmatrix_start=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
"""
if verbose:
# print("\n\n******************\nfirst error with initial values of:",
# varying_parameters_keys, " \n\n***************************\n")
error_function_general(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n********************\nFitting parameters: ",
# varying_parameters_keys, "\n\n***************************\n")
# print("With initial values", varying_parameters_values_array)
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
error_function_general.__defaults__ = (UBmatrix_start,
B0matrix,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_general,
varying_parameters_values_array,
args=(
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
refined_values = res[0]
# print "res fit in fit function general", res
# print("code results", res[-1])
# print("nb iterations", res[2]["nfev"])
# print("refined_values", refined_values)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if verbose:
# print("\n\n ************** End of Fitting - Final errors (general fit function) ****************** \n\n"
# )
alldata = error_function_general(refined_values,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True)
# alldistances_array, Uxyz, newmatrix, Tc, T, Ts
alldistances_array, Uxyz, refinedUB, refinedTc, refinedT, refinedTs = alldata
# for k, param_key in enumerate(varying_parameters_keys):
# print("%s : start %.4f ---> refined %.4f"
# % (param_key, varying_parameters_values_array[k], refined_values[k]))
# print("results:\n q= refinedT UBstart refinedTc B0 G*\nq = refinedUB B0 G*")
# print("refined UBmatrix", refinedUB)
# print("Uxyz", Uxyz)
# print("refinedTc, refinedT, refinedTs", refinedTc, refinedT, refinedTs)
# print("final mean pixel residues : %f with %d spots"
# % (np.mean(alldistances_array), len(absolutespotsindices)))
return refined_values
dict_lattice_parameters = {"a": 0, "b": 1, "c": 2, "alpha": 3, "beta": 4, "gamma": 5}
def fit_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
UBmatrix_start=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
additional_expression="none",
**kwd):
"""
fit direct (real) unit cell lattice parameters (in refinedB0)
and orientation
q = refinedUzUyUz Ustart refinedB0 G*
with error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
"""
if verbose:
# print("\n\n******************\nfirst error with initial values of:",
# varying_parameters_keys, " \n\n***************************\n",)
error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
additional_expression=additional_expression)
# print("\n\n********************\nFitting parameters: ",
# varying_parameters_keys, "\n\n***************************\n")
# print("With initial values", varying_parameters_values_array)
# print '*************** UBmatrix_start before fit************'
# print UBmatrix_start
# print '*******************************************'
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
error_function_latticeparameters.__defaults__ = (UBmatrix_start,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False,
additional_expression)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_latticeparameters,
varying_parameters_values_array,
args=(
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
refined_values = res[0]
# print "res fit in fit function general", res
# print("code results", res[-1])
# print("nb iterations", res[2]["nfev"])
# print("refined_values", refined_values)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if 1:
# print(
# "\n\n ************** End of Fitting - Final errors (general fit function) ****************** \n\n"
# )
alldata = error_function_latticeparameters(refined_values,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True,
additional_expression=additional_expression)
# alldistances_array, Uxyz, newmatrix, Tc, T, Ts
alldistances_array, Uxyz, refinedUB, refinedB0matrix, refinedLatticeparameters = (
alldata)
# print("\n--------------------\nresults:\n------------------")
# for k, param_key in enumerate(varying_parameters_keys):
# print("%s : start %f ---> refined %f"
# % (param_key, varying_parameters_values_array[k], refined_values[k]))
# print("q= refinedT UBstart refinedTc B0 G*\nq = refinedUB B0 G*")
# print("refined UBmatrix", refinedUB.tolist())
# print("Uxyz", Uxyz.tolist())
# print("refinedB0matrix", refinedB0matrix.tolist())
# print("refinedLatticeparameters", refinedLatticeparameters)
# print("final mean pixel residues : %f with %d spots"
# % (np.mean(alldistances_array), len(absolutespotsindices)))
return refined_values
def error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False,
additional_expression="none"):
"""
q = UzUyUz Ustart B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
q = refinedUzUyUz Ustart refinedB0 G*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
refinedB0 is obtained by refining the 5 /6 lattice parameters
possible keys for parameters to be refined are:
five detector frame calibration parameters:
det_distance,det_xcen,det_ycen,det_beta, det_gamma
three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz:
anglex,angley,anglez
5 lattice parameters among 6 (a,b,c,alpha, beta,gamma)
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
latticeparameters = np.array(allparameters[8:14])
nb_varying_parameters = len(varying_parameters_keys)
# factorscale = 1.
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("alpha", "beta", "gamma"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
elif parameter_name in ("a", "b", "c"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
if additional_expression == "a==b":
indparam = dict_lattice_parameters["b"]
indparam1 = dict_lattice_parameters["a"]
latticeparameters[indparam] = latticeparameters[indparam1]
newB0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("\n-------\nvarying_parameters_keys", varying_parameters_keys)
# print("varying_parameters_values_array", varying_parameters_values_array)
# print("Uxyz", Uxyz)
# print("latticeparameters", latticeparameters)
# print("newB0matrix", newB0matrix)
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
newmatrix = np.dot(Uxyz, initrot)
# if 0: # verbose:
# print("initrot", initrot)
# print("newmatrix", newmatrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
| |
# BSD 2-Clause License
#
# Copyright (c) 2021, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ..config import CONFIG
from ..entity import DBNode
from ..error import SmartSimError, SSUnsupportedError
from ..settings import MpirunSettings, SbatchSettings, SrunSettings
from ..utils import get_logger
from .orchestrator import Orchestrator
logger = get_logger(__name__)
class SlurmOrchestrator(Orchestrator):
def __init__(
self,
port=6379,
db_nodes=1,
batch=True,
hosts=None,
run_command="srun",
account=None,
time=None,
alloc=None,
db_per_host=1,
interface="ipogif0",
**kwargs,
):
"""Initialize an Orchestrator reference for Slurm based systems
The orchestrator launches as a batch by default. The Slurm orchestrator
can also be given an allocation to run on. If no allocation is provided,
and batch=False, at launch, the orchestrator will look for an interactive
allocation to launch on.
The SlurmOrchestrator port provided will be incremented if multiple
databases per node are launched.
SlurmOrchestrator supports launching with both ``srun`` and ``mpirun``
as launch binaries. If mpirun is used, the hosts parameter should be
populated with length equal to that of the ``db_nodes`` argument.
:param port: TCP/IP port
:type port: int
:param db_nodes: number of database shards, defaults to 1
:type db_nodes: int, optional
:param batch: Run as a batch workload, defaults to True
:type batch: bool, optional
:param hosts: specify hosts to launch on
:type hosts: list[str]
:param run_command: specify launch binary. Options are "mpirun" and "srun", defaults to "srun"
:type run_command: str, optional
:param account: account to run batch on
:type account: str, optional
:param time: walltime for batch 'HH:MM:SS' format
:type time: str, optional
:param alloc: allocation to launch on, defaults to None
:type alloc: str, optional
:param db_per_host: number of database shards per system host (MPMD), defaults to 1
:type db_per_host: int, optional
"""
super().__init__(
port,
interface,
db_nodes=db_nodes,
batch=batch,
run_command=run_command,
alloc=alloc,
db_per_host=db_per_host,
**kwargs,
)
self.batch_settings = self._build_batch_settings(
db_nodes, alloc, batch, account, time, **kwargs
)
if hosts:
self.set_hosts(hosts)
elif not hosts and run_command == "mpirun":
raise SmartSimError(
"hosts argument is required when launching SlurmOrchestrator with mpirun"
)
self._reserved_run_args = {}
self._reserved_batch_args = {}
self._fill_reserved()
def set_cpus(self, num_cpus):
"""Set the number of CPUs available to each database shard
This effectively will determine how many cpus can be used for
compute threads, background threads, and network I/O.
:param num_cpus: number of cpus to set
:type num_cpus: int
"""
if self.batch:
self.batch_settings.batch_args["cpus-per-task"] = num_cpus
for db in self:
db.run_settings.set_cpus_per_task(num_cpus)
def set_walltime(self, walltime):
"""Set the batch walltime of the orchestrator
Note: This will only effect orchestrators launched as a batch
:param walltime: amount of time e.g. 10 hours is 10:00:00
:type walltime: str
:raises SmartSimError: if orchestrator isn't launching as batch
"""
if not self.batch:
raise SmartSimError("Not running as batch, cannot set walltime")
self.batch_settings.set_walltime(walltime)
def set_hosts(self, host_list):
"""Specify the hosts for the ``SlurmOrchestrator`` to launch on
:param host_list: list of host (compute node names)
:type host_list: str, list[str]
:raises TypeError: if wrong type
"""
if isinstance(host_list, str):
host_list = [host_list.strip()]
if not isinstance(host_list, list):
raise TypeError("host_list argument must be a list of strings")
if not all([isinstance(host, str) for host in host_list]):
raise TypeError("host_list argument must be list of strings")
# TODO check length
if self.batch:
self.batch_settings.set_hostlist(host_list)
for host, db in zip(host_list, self.entities):
db.set_host(host)
db.run_settings.set_hostlist([host])
def set_batch_arg(self, arg, value):
"""Set a Sbatch argument the orchestrator should launch with
Some commonly used arguments such as --job-name are used
by SmartSim and will not be allowed to be set.
:param arg: batch argument to set e.g. "exclusive"
:type arg: str
:param value: batch param - set to None if no param value
:type value: str | None
:raises SmartSimError: if orchestrator not launching as batch
"""
if not self.batch:
raise SmartSimError("Not running as batch, cannot set batch_arg")
if arg in self._reserved_batch_args:
logger.warning(
f"Can not set batch argument {arg}: it is a reserved keyword in SlurmOrchestrator"
)
else:
self.batch_settings.batch_args[arg] = value
def set_run_arg(self, arg, value):
"""Set a run argument the orchestrator should launch
each node with (it will be passed to `jrun`)
Some commonly used arguments are used
by SmartSim and will not be allowed to be set.
For example, "n", "N", etc.
:param arg: run argument to set
:type arg: str
:param value: run parameter - set to None if no parameter value
:type value: str | None
"""
if arg in self._reserved_run_args[type(self.entities[0].run_settings)]:
logger.warning(
f"Can not set run argument {arg}: it is a reserved keyword in SlurmOrchestrator"
)
else:
for db in self.entities:
db.run_settings.run_args[arg] = value
def _build_batch_settings(self, db_nodes, alloc, batch, account, time, **kwargs):
batch_settings = None
db_per_host = kwargs.get("db_per_host", 1)
# enter this conditional if user has not specified an allocation to run
# on or if user specified batch=False (alloc will be found through env)
if not alloc and batch:
batch_args = {"ntasks-per-node": db_per_host}
batch_settings = SbatchSettings(
nodes=db_nodes, time=time, account=account, batch_args=batch_args
)
return batch_settings
def _build_run_settings(self, exe, exe_args, **kwargs):
run_command = kwargs.get("run_command", "srun")
if run_command == "srun":
return self._build_srun_settings(exe, exe_args, **kwargs)
if run_command == "mpirun":
return self._build_mpirun_settings(exe, exe_args, **kwargs)
raise SSUnsupportedError(
f"SlurmOrchestrator does not support {run_command} as a launch binary"
)
def _build_srun_settings(self, exe, exe_args, **kwargs):
alloc = kwargs.get("alloc", None)
db_per_host = kwargs.get("db_per_host", 1)
run_args = kwargs.get("run_args", {})
# if user specified batch=False
# also handles batch=False and alloc=False (alloc will be found by launcher)
run_args["nodes"] = 1
run_args["ntasks"] = db_per_host
run_args["ntasks-per-node"] = db_per_host
run_settings = SrunSettings(exe, exe_args, run_args=run_args, alloc=alloc)
if db_per_host > 1:
# tell step to create a mpmd executable
run_settings.mpmd = True
return run_settings
def _build_mpirun_settings(self, exe, exe_args, **kwargs):
alloc = kwargs.get("alloc", None)
db_per_host = kwargs.get("db_per_host", 1)
if alloc:
msg = (
"SlurmOrchestrator using OpenMPI cannot specify allocation to launch in"
)
msg += "\n User must launch in interactive allocation or as batch."
logger.warning(msg)
if db_per_host > 1:
msg = "SlurmOrchestrator does not support multiple databases per node when launching with mpirun"
raise SmartSimError(msg)
run_args = kwargs.get("run_args", {})
run_settings = MpirunSettings(exe, exe_args, run_args=run_args)
run_settings.set_tasks(1)
return run_settings
def _initialize_entities(self, **kwargs):
"""Initialize DBNode instances for the orchestrator."""
db_nodes = kwargs.get("db_nodes", 1)
cluster = not bool(db_nodes < 3)
if int(db_nodes) == 2:
raise SSUnsupportedError("Orchestrator does not support clusters of size 2")
db_per_host = kwargs.get("db_per_host", 1)
port = kwargs.get("port", 6379)
db_conf = CONFIG.redis_conf
redis_exe = CONFIG.redis_exe
ai_module = self._get_AI_module()
start_script = self._find_redis_start_script()
for db_id in range(db_nodes):
db_node_name = "_".join((self.name, str(db_id)))
# create the exe_args list for launching multiple databases
# per node. also collect port range for dbnode
ports = []
exe_args = []
for port_offset in range(db_per_host):
next_port = int(port) + port_offset
start_script_args = [
start_script, # redis_starter.py
f"+ifname={self._interface}", # pass interface to start script
"+command", # command flag for argparser
redis_exe, # redis-server
db_conf, # redis6.conf file
ai_module, # redisai.so
"--port", # redis port
str(next_port), # port number
]
if cluster:
start_script_args += self._get_cluster_args(db_node_name, next_port)
exe_args.append(" ".join(start_script_args))
ports.append(next_port)
# if only launching 1 db_per_host, we don't need a list of exe args lists
if db_per_host == 1:
exe_args = exe_args[0]
run_settings = self._build_run_settings("python", exe_args, **kwargs)
| |
<gh_stars>1-10
"""
This script creates figures for all ligands which parented a given ligand.
All compounds for the entire AutoGrow run will be compiled into a dictionary \
which is used to search when tracing lineages. We pickle these dictionaries so \
that if this script is run multiple times these dictionaries do not need to be \
recreated. For this reason the 1st time running this script on a data set will \
take longer than future runs.
"""
import os
import sys
import argparse
import json
import copy
import pickle
import matplotlib.pyplot as plt
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Draw, AllChem
from PIL import Image
#Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog('rdApp.*')
##################################################################
##################################################################
########### BASIC OPERATIONS #####################################
##################################################################
##################################################################
def get_obj_from_pickle_file(file_path):
"""
This functions retrieves objects from a pickle_file
Inputs:
:param str file_path: path to pickle File
Returns:
:returns: unknown objects: object(s) from a pickle file
"""
with open(file_path, 'rb') as handle:
objects = pickle.load(handle)
return objects
def write_pickle_to_file(file_path, obj):
"""
This functions pickles an object into a pickle_file
Inputs:
:param str file_path: path to output pickle File
:param unknown obj: object(s) to pickle
"""
with open(file_path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def get_usable_format(infile):
"""
This code takes a string for an file which is formatted as an .smi file. It
opens the file and reads in the components into a usable list.
The .smi must follow the following format for each line:
MANDATORY INFO
part 1 is the SMILES string
part 2 is the SMILES name/ID
Optional info
part -1 (the last piece of info) is the SMILES diversity score
relative to its population
part -2 (the second to last piece of info) is the fitness metric
for evaluating
- For default setting this is the Docking score
- If you add a unique scoring function Docking score should be
-3 and that score function should be -2
Any other information MUST be between part 2 and part -2 (this
allows for the expansion of features without disrupting the rest of the code)
Inputs:
:param str infile: the string of the PATHname of a formatted .smi file to
be read into the program
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
# IMPORT SMILES FROM THE PREVIOUS GENERATION
usable_list_of_smiles = []
if os.path.exists(infile) is False:
print("\nFile of Source compounds does not exist: {}\n".format(infile))
raise Exception("File of Source compounds does not exist")
with open(infile) as smiles_file:
for line in smiles_file:
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
usable_list_of_smiles.append(choice_list)
return usable_list_of_smiles
#####################################################################
# Make images
#####################################################################
def get_image_dimensions(imagefile):
"""
Helper function that returns the image dimensions.
:param: imagefile str (path to image)
:return dict (of the form: {width:<int>, height=<int>, size_bytes=<size_bytes>)
"""
# Inline import for PIL because it is not a common library
with Image.open(imagefile) as img:
# Calculate the width and hight of an image
width, height = img.size
# calculate the size in bytes
size_bytes = os.path.getsize(imagefile)
return dict(width=width, height=height, size_bytes=size_bytes)
def get_grid_img(img_files_list, list_printout_info, result_grid_filename):
"""
This will plot a row of imgs and save them to a file.
Inputs:
:param list img_files_list: list of paths to img file for each subplot
:param list list_printout_info: list of info to add as
caption to each subplot in order
:param str result_grid_filename: path to outfile
"""
images_count = len(img_files_list)
dimmension_dict = get_image_dimensions(img_files_list[0])
width = dimmension_dict["width"] / 30
height = dimmension_dict["height"]/ 30
# size_bytes = dimmension_dict["size_bytes"]
fig, axs_list = plt.subplots(1, images_count, figsize=(width*images_count, height))
if len(img_files_list) == 1:
sub_ax = axs_list
image_filename = img_files_list[0]
printout = list_printout_info[0]
plt_image = plt.imread(os.path.abspath(image_filename), printout)
sub_ax.imshow(plt_image)
sub_ax.set_title(printout, fontsize=40, fontweight="bold")
sub_ax.grid(False)
sub_ax.axis(False)
sub_ax.autoscale_view('tight')
else:
for sub_ax, image_filename, printout in zip(axs_list, img_files_list, list_printout_info):
plt_image = plt.imread(os.path.abspath(image_filename), printout)
sub_ax.imshow(plt_image)
sub_ax.set_title(printout, fontsize=40)
sub_ax.grid(False)
sub_ax.axis(False)
sub_ax.autoscale_view('tight')
del plt_image
plt.savefig(result_grid_filename)
del fig
def make_single_image_files(vars, lineage_dict, mol_dict):
"""
This function will create individual image files for each ligand in
an ancestry
Inputs:
:param dict vars: dictionary of variable to use
:param dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
:param str mol_name: full-length name of child ligand to find parents.
Returns:
:returns: dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
"""
if len(list(lineage_dict.keys())) <= 6:
img_size = 500
else:
img_size = 250
# make single img files for each ligand
# make a blank None image used later for spacers
mol_none = Chem.MolFromSmiles("")
img = Draw.MolsToGridImage([mol_none], molsPerRow=1, subImgSize=(img_size, img_size))
img_file_name = vars["single_image_folder"]+ "None.png"
img.save(img_file_name)
for mol_name in mol_dict.keys():
mol = copy.deepcopy(mol_dict[mol_name][-1])
tmp = AllChem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol], molsPerRow=1,
subImgSize=(img_size, img_size))
img_file_name = vars["single_image_folder"]+ mol_name + ".png"
img.save(img_file_name)
del tmp
#
def make_image_files(vars, lineage_dict, mol_dict):
"""
This function will create individual image files for each ligand in
an ancestry and a full ancestry
Inputs:
:param dict vars: dictionary of variable to use
:param dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
:param str mol_name: full-length name of child ligand to find parents.
Returns:
:returns: dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
"""
# make individual ligand images
make_single_image_files(vars, lineage_dict, mol_dict)
if os.path.exists(vars["ancestry_image_folder"]) is False:
os.mkdir(vars["ancestry_image_folder"])
for gen_num in lineage_dict.keys():
result_grid_filename = vars["ancestry_image_folder"] + \
str(gen_num) + ".png"
lineage_name_list = lineage_dict[gen_num]
img_files_list = []
list_printout_info = []
for mol_name in lineage_name_list:
if mol_name is None:
img_files_list.append(vars["single_image_folder"] + \
"None.png")
list_printout_info.append("")
else:
img_files_list.append(vars["single_image_folder"] + \
mol_name + ".png")
# set properties
printout = str(mol_dict[mol_name][2])
list_printout_info.append(printout)
get_grid_img(img_files_list, list_printout_info, result_grid_filename)
#####################################################################
# get parents for a ligand
#####################################################################
def get_parents_full_names(child_name, master_shortname_mol_dict):
"""
Get full-length names for each parent for a given child ligand.
Will return as list of names for parents. This will always be a list of 2.
There are three options of what is returned:
1) child ligand has no parents: ie) source/complementary ligand)
will return [None, None]
2) child ligand has 1 parent: ie) single reactant mutant
will return ["parent_1_name", None]
3) child ligand has 2 parent: ie) crossover or two reactant mutation
will return ["parent_1_name", "parent_2_name"]
Inputs:
:param str child_name: full-length name of child ligand to find parents.
ligand from the AutoGrow run. keys are full-length name of the ligands.
:param dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
Returns:
:returns: list parent_list: a list of string or Nones for each parent.
1) child ligand has no parents: ie) source/complementary ligand)
will return [None, None]
2) child ligand has 1 parent: ie) single reactant mutant
will return ["parent_1_name", None]
3) child ligand has 2 parent: ie) crossover or two reactant mutation
will return ["parent_1_name", "parent_2_name"]
"""
# Handle if no parents
if "(" not in child_name and ")" not in child_name:
return [None, None]
parents_info = child_name.split(")")[0].replace("(", "")
# Handle single parent cases
if "+" not in parents_info:
parent_1_short = parents_info
if parent_1_short not in master_shortname_mol_dict.keys():
printout = "a parent is not in master_shortname_mol_dict " \
+ "this means that the dictionary is missing information on" \
+ " a ligand. missing parrent is: {}".format(parent_1_short)
raise Exception(printout)
parent_1_name = master_shortname_mol_dict[parent_1_short]
return [parent_1_name, None]
parent_1_short = parents_info.split("+")[0]
parent_2_short = parents_info.split("+")[1]
if parent_1_short not in master_shortname_mol_dict.keys():
print(master_shortname_mol_dict.keys())
printout = "a parent is not in master_shortname_mol_dict " \
| |
<reponame>robertsmoto/sodavault
from django.db import models
from itemsapp.models import Item, Part, ProductPartJoin
from itemsapp.models import Product, SimpleProduct, DigitalProduct, BundleProduct, VariableProduct
from contactapp.models import Company, Supplier, Location
from django.db.models import Sum, Avg
from django.db import models
from contactapp.models import Person
from django.forms import ValidationError
from ledgerapp.models import Entry, Lot, Batch
import datetime
from django.db.models import Prefetch
from decimal import Decimal
class Bid(models.Model):
parts = models.ForeignKey(
Part,
related_name = "bid_parts",
blank=True,
null=True,
on_delete=models.CASCADE)
products = models.ForeignKey(
Product,
related_name = "bid_products",
blank=True,
null=True,
on_delete=models.CASCADE)
simple_products = models.ForeignKey(
SimpleProduct,
related_name = "bid_simple_products",
blank=True,
null=True,
on_delete=models.CASCADE)
digital_products = models.ForeignKey(
DigitalProduct,
related_name = "bid_digital_products",
blank=True,
null=True,
on_delete=models.CASCADE)
bundle_products = models.ForeignKey(
BundleProduct,
related_name = "bid_bundle_products",
blank=True,
null=True,
on_delete=models.CASCADE)
variable_products = models.ForeignKey(
VariableProduct,
related_name = "bid_variable_products",
blank=True,
null=True,
on_delete=models.CASCADE)
suppliers = models.ForeignKey(
Supplier,
related_name = "bid_suppliers",
blank=True,
null=True,
on_delete=models.CASCADE)
date_requested = models.DateField(
blank=True,
null=True)
date_submitted = models.DateField(
blank=True,
null=True)
cost = models.DecimalField(
decimal_places=2,
max_digits=11,
blank=True,
null=True)
shipping = models.DecimalField(
decimal_places=2,
max_digits=11,
blank=True,
null=True)
quantity = models.IntegerField(
blank=True,
null=True,
default=1,
help_text="Divides by this number. 1 box if used by box, or 24 pcs per box if used by piece")
units = models.CharField(
max_length = 100,
blank=True)
is_winning_bid = models.BooleanField(default=False)
@property
def cost_per_unit(self):
cost = self.cost if self.cost is not None else 0
shipping = self.shipping if self.shipping is not None else 0
quantity = self.quantity if self.quantity is not None else 1
return round((cost + shipping) / quantity, 4)
def __str__(self):
if self.parts:
return "{} {}".format(self.suppliers, self.parts)
else:
return "{} {}".format(self.suppliers, self.products)
def useall_check(self, detail=None, *args, **kwargs):
_products_shipped = 0
_parts_per_product = 0
_parts_needed = 0
_quantity = 0
_add_cogm = 0
if detail.products:
_products_shipped = detail.quantity_shipped
ppj_q = detail.products.ppj_products.all().prefetch_related(
Prefetch("parts"))
for ppj in ppj_q:
if ppj.use_all:
_parts_per_product = ppj.quantity
_parts_needed = _products_shipped * _parts_per_product
# query ledger sum debit_quantity and credit quantity
# then _remaining_parts = d_qnty - c_qnty
""" use a function here """
le_q = Entry.objects.filter(parts=ppj.parts, account='IRAW')
# and reused here
agg_stats = le_q.aggregate(
Sum('debit_amount'),
Sum('credit_amount'),
Sum('debit_quantity'),
Sum('credit_quantity')
)
def check_for_zero(self, myvariable):
return myvariable if myvariable is not None else 0
_d_amount = check_for_zero(self, agg_stats['debit_amount__sum'])
_c_amount = check_for_zero(self, agg_stats['credit_amount__sum'])
_d_quantity = check_for_zero(self, agg_stats['debit_quantity__sum'])
_c_quantity = check_for_zero(self, agg_stats['credit_quantity__sum'])
_cost = _d_amount - _c_amount
_quantity = _d_quantity - _c_quantity
_ave_cpu = _cost / _quantity if _quantity > 0 else 0
_add_cogm = _quantity * _ave_cpu
return _quantity, _add_cogm
def adj_qnty_descrepancies(self, le_list, today, product, part, lot, batch,
location, qnty_shipped, qnty_received, shipping_cpu, *args, **kwargs):
_le_list = le_list
_today = today
_product = product
_part = part
_lot = lot
_batch = batch
_d_lctn = location
_c_lctn = location
_qnty_shipped = qnty_shipped
_qnty_received = qnty_received
_shipping_cpu = shipping_cpu
_item = _part if _part else _product
_ecpu_plus_shipping = _item.ecpu + _shipping_cpu
_qnty_discrepancy = 0
if qnty_shipped > qnty_received:
_qnty_discrepancy = qnty_shipped - qnty_received
_d_acct = "ILOS" # ILOS is an expense accnt for inventory write-off
_d_qnty = _qnty_discrepancy
_d_amnt = _qnty_discrepancy * _ecpu_plus_shipping
_c_qnty = _qnty_discrepancy
_c_amnt = _qnty_discrepancy * _ecpu_plus_shipping
if _part:
_c_acct = "IRAW"
_note = "ASN: {} inventory loss".format(_part.sku)
else: # is product
_c_acct = "IMER"
_note = "ASN: {} inventory loss".format(_product.sku)
if qnty_received > qnty_shipped:
_qnty_discrepancy = qnty_received - qnty_shipped
_c_acct = "APAY" # making the assumption that extra parts and products were paid for
_d_qnty = _qnty_discrepancy
_d_amnt = _qnty_discrepancy * _ecpu_plus_shipping
_c_qnty = _qnty_discrepancy
_c_amnt = _qnty_discrepancy * _ecpu_plus_shipping
if _part:
_d_acct = "IRAW"
_note = "ASN: {} inventory loss".format(_part.sku)
else: # is prooduct
_d_acct = "IMER"
_note = "ASN: {} inventory loss".format(_product.sku)
_le_list = append_le_list(
self, le_list = _le_list, entry_type="debit", date = _today,
acct = _d_acct, lctn = _d_lctn, product = _product,
part = _part, lot = _lot, batch = _batch, qnty = _d_qnty,
amnt = _d_amnt, note = _note, *args, **kwargs)
_le_list = append_le_list(
self, le_list = _le_list, entry_type="credit", date = _today,
acct = _c_acct, lctn = _c_lctn,
product = _product, part = _part, lot = _lot, batch = _batch,
qnty = _c_qnty, amnt = _c_amnt, note = _note,
*args, **kwargs)
return _le_list
def append_le_list(self, le_list, entry_type="", date=None, acct="", lctn=None,
product=None, part=None, lot=None, batch=None, qnty=0, amnt=0, note="", *args, **kwargs):
_le_dict = {}
_le_dict['date'] = date
_le_dict['acct'] = acct
_le_dict['product'] = product
_le_dict['part'] = part
_le_dict['lot'] = lot
_le_dict['batch'] = batch
_le_dict['lctn'] = lctn
_le_dict['d_qnty'] = qnty if entry_type == "debit" else None
_le_dict['d_amnt'] = amnt if entry_type == "debit" else None
_le_dict['c_qnty'] = qnty if entry_type == "credit" else None
_le_dict['c_amnt'] = amnt if entry_type == "credit" else None
_le_dict['note'] = note
le_list.append(_le_dict)
return le_list
def qnty_by_batch(self, lid=None, part=None, product=None, total_qnty=0, *args, **kwargs):
"""
Returns a list of dictionaries, prioritizes by batch.
Filter by location by specifiying lid (location id)
"""
_total_qnty = total_qnty
_batch_list = []
if _total_qnty == 0:
return _batch_list
_part = part
_product = product
_q_type = "part" if _part else "product"
_lid = lid
if _q_type == "part":
if _lid:
batch_agg_q = Entry.inventory.parts(pid=_part.id).by_location(lid=_lid).with_batch_agg()
else:
batch_agg_q = Entry.inventory.parts(pid=_part.id).with_batch_agg()
# _le_q = Entry.objects.filter(parts__id=_part.id)
if _q_type == "product":
if _lid:
batch_agg_q = Entry.inventory.products(pid=_product.id).by_location(lid=_lid).with_batch_agg()
else:
batch_agg_q = Entry.inventory.products(pid=_product.id).with_batch_agg()
for batch in batch_agg_q:
_batch_dict = {}
_b_d_qnty = batch['d_quantity'] if batch['d_quantity'] else 0
_b_d_amnt = batch['d_amount'] if batch['d_amount'] else 0
_b_c_qnty = batch['c_quantity'] if batch['c_quantity'] else 0
_b_c_amnt = batch['c_amount'] if batch['c_amount'] else 0
_batch_qnty = _b_d_qnty - _b_c_qnty
_batch_amnt = _b_d_amnt - _b_c_amnt
_lctn = Location.objects.get(id=batch['locations'])
_lot = Lot.objects.get(id=batch['lots'])
_batch = Batch.objects.get(id=batch['batches'])
if _total_qnty - _batch_qnty >= 0:
_qnty = _batch_qnty
_amnt = _batch_amnt
else:
_qnty = _total_qnty
# this calculates the amount based on cpu if not consuming the entire batch
_amnt = (_batch_amnt / _batch_qnty) * _total_qnty
# add data to dict and append _le_list
_batch_dict['part'] = _part
_batch_dict['product'] = _product
_batch_dict['lctn'] = _lctn
_batch_dict['lot'] = _lot
_batch_dict['batch'] = _batch
_batch_dict['qnty'] = _qnty
_batch_dict['amnt'] = round(_amnt, 4)
# only append dict if _qnty is > 0
if _qnty > 0:
_batch_list.append(_batch_dict)
_total_qnty -= _batch_qnty
if _total_qnty <= 0:
# print("return _batch_list", _batch_list)
return _batch_list
def additional_shipping(
self,
le_list=[],
qnty_shipped=0,
total_pcs_shipped=0,
part=None,
product=None,
today = None,
d_lctn = None,
c_lctn = None,
lot = None,
batch = None,
):
_le_list = le_list
_qnty_shipped = qnty_shipped
_total_pcs_shipped = total_pcs_shipped
_part = part
_product = product
_shipping = self.shipping
_today = today
_d_lctn = d_lctn
_c_lctn = c_lctn
_lot = lot
_batch = batch
_proportional_shipping = Decimal(_qnty_shipped / _total_pcs_shipped) if _total_pcs_shipped > 0 else Decimal(0)
_d_acct = "IMER" if _product else "IRAW"
_d_qnty = None
_d_amnt = round(_shipping * _proportional_shipping, 4)
_c_acct = "APAY"
_c_qnty = None
_c_amnt = round(_shipping * _proportional_shipping, 4)
_note = "ASN: Additional shipping cost."
_le_list = append_le_list(
self, le_list = _le_list, entry_type="debit", date = _today,
acct = _d_acct, lctn = _d_lctn, product = _product,
part = _part, lot = _lot, batch = _batch, qnty = _d_qnty,
amnt = _d_amnt, note = _note)
_le_list = append_le_list(
self, le_list = _le_list, entry_type="credit", date = _today,
acct = _c_acct, lctn = _d_lctn,
product = _product, part = _part, lot = _lot, batch = _batch,
qnty = _c_qnty, amnt = _c_amnt, note = _note)
_shipping_cpu = (_shipping * _proportional_shipping) / _qnty_shipped if _qnty_shipped > 0 else 0
return _le_list, _shipping_cpu
def create_le_list(self, ttype=None, *args, **kwargs):
# need to consider that this function can be for existing inventory
# as well as bringing in new inventory
_ttype = ttype
_error = 0
_le_list = []
_detail_q = self.TransactionDetails.all().prefetch_related(
Prefetch('parts'),
Prefetch('products')
)
_pcs_shipped_q = _detail_q.aggregate(Sum('quantity_shipped'))
_total_pcs_shipped = _pcs_shipped_q['quantity_shipped__sum']
for detail in _detail_q:
_part = None
_part_id = None
_product = None
_product_id = None
_sku = ""
_ecpu = 0
# these are temporary
_lot = None
_batch = None
if detail.parts:
# _item = detail.parts
_part = detail.parts
_part_id = detail.parts.id
_sku = detail.parts.sku
_ecpu = detail.parts.ecpu
# # probleem retrieving lots and batches
# _lot = detail.parts.le_parts.lots if detail.parts.le_parts.lots else None
# _batch = detail.parts.le_parts.batches if detail.parts.le_parts.batches else None
_part.save() # | |
<reponame>meddhafer97/Risk-management-khnowledge-based-system<filename>djangoProject1/venv/Lib/site-packages/owlready2/pymedtermino2/model.py
# -*- coding: utf-8 -*-
# Owlready2
# Copyright (C) 2019 <NAME>
# LIMICS (Laboratoire d'informatique médicale et d'ingénierie des connaissances en santé), UMR_S 1142
# University Paris 13, Sorbonne paris-Cité, Bobigny, France
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import operator
from functools import reduce
from owlready2 import *
from owlready2.triplelite import _SearchList
class Concepts(set):
"""A set of concepts. The set can contain each concept only once, and it
inherits from Python's :class:`set` the methods for computing intersection, union, difference, ..., of two sets.
.. automethod:: __rshift__
"""
def __repr__ (self): return u"%s([\n %s])" % (self.__class__.__name__, ", ".join([repr(t) for t in self]))
def __rshift__(self, destination):
"""Maps the set of concepts to the destination_terminology. See :doc:`tuto_en` for more info."""
#terminology_2_concepts = defaultdict(list)
#for concept in self: terminology_2_concepts[concept.terminology].append(concept)
#r = Concepts()
#for terminology, concepts in terminology_2_concepts.items():
# r.update((terminology >> destination).map_concepts(concepts))
#return r
r = Concepts( j for i in self for j in i >> destination)
return r
def find(self, parent_concept):
"""returns the first concept of the set that is a descendant of parent_concept (including parent_concept itself)."""
for c in self:
if issubclass(c, parent_concept): return c
#def find_graphically(self, concept):
# for c in self:
# if hasattr(c, "is_graphically_a"):
# if c.is_graphically_a(concept): return c
# else:
# if c.is_a(concept): return c
def imply(self, other):
"""returns true if all concepts in the OTHER set are descendants of (at least) one of the concepts in this set."""
for cb in other:
for ca in self:
if issubclass(ca, cb): break
else:
return False
return True
def is_semantic_subset(self, other):
"""returns true if all concepts in this set are descendants of (at least) one of the concept in the OTHER set."""
for c1 in self:
for c2 in other:
if issubclass(c1, c2): break
else:
return False
return True
def is_semantic_superset(self, other):
"""returns true if all concepts in this set are ancestors of (at least) one of the concept in the OTHER set."""
for c1 in self:
for c2 in other:
if issubclass(c2, c1): break
else:
return False
return True
def is_semantic_disjoint(self, other):
"""returns true if all concepts in this set are semantically disjoint from all concepts in the OTHER set."""
for c1 in self:
for c2 in other:
if issubclass(c1, c2) or issubclass(c2, c1): return False
return True
def semantic_intersection(self, other):
r = Concepts()
for c1 in self:
for c2 in other:
if issubclass(c1, c2): r.add(c1)
elif issubclass(c2, c1): r.add(c2)
return r
def keep_most_specific(self, more_specific_than = None):
"""keeps only the most specific concepts, i.e. remove all concepts that are more general that another concept in the set."""
clone = self.copy()
for t1 in clone:
for t2 in more_specific_than or clone:
if (not t1 is t2) and issubclass(t1, t2): # t2 is more generic than t1 => we keep t1
self.discard(t2)
def keep_most_generic(self, more_generic_than = None):
"""keeps only the most general concepts, i.e. remove all concepts that are more specific that another concept in the set."""
clone = self.copy()
clone2 = (more_generic_than or self).copy()
for t1 in clone:
for t2 in clone2:
if (not t1 is t2) and issubclass(t1, t2): # t2 is more generic than t1 => we keep t2
self .discard(t1)
clone2.discard(t1)
break
def extract(self, parent_concept):
"""returns all concepts of the set that are descendant of parent_concept (including parent_concept itself)."""
return Concepts([c for c in self if issubclass(c, parent_concept)])
def subtract(self, parent_concept):
"""returns a new set after removing all concepts that are descendant of parent_concept (including parent_concept itself)."""
return Concepts([c for c in self if not issubclass(c, parent_concept)])
def subtract_update(self, parent_concept):
"""same as `func`:subtract, but modify the set *in place*."""
for c in set(self):
if issubclass(c, parent_concept): self.discard(c)
def remove_entire_families(self, only_family_with_more_than_one_child = True):
modified = 1
while modified:
modified = 0
clone = self.copy()
if only_family_with_more_than_one_child:
parents = set([p for i in self for p in i.parents if len(p.children) > 1])
else:
parents = set([p for i in self for p in i.parents])
while parents:
t = parents.pop()
children = set(t.children)
if children.issubset(clone):
modified = 1
for i in self.copy():
if issubclass(i, t): self.remove(i)
for i in parents.copy():
if issubclass(i, t): parents.remove(i)
self.add(t)
def lowest_common_ancestors(self):
"""returns the lowest common ancestors between this set of concepts."""
if len(self) == 0: return None
if len(self) == 1: return Concepts(self)
ancestors = [set(concept.ancestor_concepts()) for concept in self]
common_ancestors = Concepts(reduce(operator.and_, ancestors))
r = Concepts()
common_ancestors.keep_most_specific()
return common_ancestors
def all_subsets(self):
"""returns all the subsets included in this set."""
l = [Concepts()]
for concept in self:
for concepts in l[:]:
l.append(concepts | set([concept]))
return l
def __and__ (s1, s2): return s1.__class__(set.__and__(s1, s2))
def __or__ (s1, s2): return s1.__class__(set.__or__(s1, s2))
def __sub__ (s1, s2): return s1.__class__(set.__sub__(s1, s2))
def __xor__ (s1, s2): return s1.__class__(set.__xor__(s1, s2))
def difference (s1, s2): return s1.__class__(set.difference(s1, s2))
def intersection (s1, s2): return s1.__class__(set.intersection(s1, s2))
def symmetric_difference(s1, s2): return s1.__class__(set.symmetric_difference(s1, s2))
def union (s1, s2): return s1.__class__(set.union(s1, s2))
def copy (s1): return s1.__class__(s1)
class MetaConcept(ThingClass):
def __new__(MetaClass, name, superclasses, obj_dict):
#if superclasses == (Thing,): return ThingClass.__new__(MetaClass, name, superclasses, obj_dict)
#else: return type.__new__(MetaClass, name, superclasses, obj_dict)
if LOADING:
return type.__new__(MetaClass, name, superclasses, obj_dict)
else:
return ThingClass.__new__(MetaClass, name, superclasses, obj_dict)
def __iter__(Class): raise ValueError # Avoid some suprizing behavior when calling list(concept)
def __repr__(Class):
terminology = Class.terminology
if not terminology: return ThingClass.__repr__(Class)
return """%s["%s"] # %s\n""" % ("PYM" if terminology.name == "SRC" else terminology.name, Class.name, Class.label.first())
def __getattr__(Class, attr):
attr2 = "__%s" % attr
r = Class.__dict__.get(attr2, Ellipsis)
if not r is Ellipsis: return r
if attr == "children":
return sorted(Class.subclasses(), key = _sort_by_name)
elif attr == "parents":
terminology = Class.terminology
r = [i for i in Class.is_a if isinstance(i, ThingClass) and i.terminology is terminology]
r.sort(key = _sort_by_name)
type.__setattr__(Class, "__parents", r)
return r
elif attr == "terminology":
r = Class.namespace.world._get_obj_triple_sp_o(Class.storid, Class.namespace.world["http://PYM/terminology"].storid)
r = Class.namespace.world._get_by_storid(r)
type.__setattr__(Class, "__terminology", r)
return r
return ThingClass.__getattr__(Class, attr)
def __getitem__(Class, code):
if Class.terminology.name == "SRC":
return Class.namespace.world["http://PYM/%s/%s" % (Class.name, code)]
else:
return Class.namespace.world["http://PYM/%s/%s" % (Class.terminology.name, code)]
def imply(Class, other): return issubclass(Class, other)
def search(Class, keywords, **kargs):
return Class.namespace.world.search(label = FTS(keywords), terminology = Class, **kargs) | Class.namespace.world.search(synonyms = FTS(keywords), terminology = Class, **kargs)
def full_code(Class):
return u"%s:%s" % (Class.terminology.name, Class.name)
def has_concept(Class, code):
return not Class[code] is None
def ancestor_concepts(Class, include_self = True, no_double = True):
l = []
Class._fill_ancestor_concepts(l, { Class.namespace.world["http://PYM/Concept"] }, include_self, no_double)
return l
def _fill_ancestor_concepts(Class, l, s, include_self, no_double):
if include_self and (not Class in s):
l.append(Class)
if no_double: s.add(Class)
for equivalent in Class.equivalent_to.indirect():
if isinstance(equivalent, MetaConcept) and not equivalent in s:
equivalent._fill_ancestor_concepts(l, s, True, no_double)
for parent in Class.parents:
if parent.terminology is Class.terminology:
parent._fill_ancestor_concepts(l, s, True, no_double)
def descendant_concepts(Class, include_self = True, no_double = True):
return _DescendantList(Class, include_self, no_double)
def _generate_descendant_concepts(Class, s, include_self, no_double):
if include_self:
yield Class
if no_double: s.add(Class)
for equivalent in Class.equivalent_to.indirect():
if isinstance(equivalent, Class.__class__) and not equivalent in s:
yield from equivalent._generate_descendant_concepts(s, True, no_double)
for child in sorted(Class.subclasses(), key = _sort_by_name):
if not child in s: yield from child._generate_descendant_concepts(s, True, no_double)
def __rshift__(Class, destination_terminology):
if Class.terminology.name == "SRC": # Property creation
return ThingClass.__rshift__(Class, destination_terminology)
return Class._map(_get_mapper(Class.terminology, destination_terminology))
def _map(Class, mapper):
r = Concepts(mapper(Class))
if r: return r
return Concepts( i for parent in Class.parents for i in | |
<filename>portal/models/qb_status.py
""" Questionnaire Bank Status Module
API to lookup user's status with respect to assigned questionnaire banks.
"""
from .overall_status import OverallStatus
from .qb_timeline import ordered_qbs, QBT, update_users_QBT
from .questionnaire_response import qnr_document_id, QNR_results
from ..trace import trace
class NoCurrentQB(Exception):
"""Exception to raise when no current QB is available yet required"""
pass
class QB_Status(object):
def __init__(self, user, as_of_date):
self.user = user
self.as_of_date = as_of_date
for state in OverallStatus:
setattr(self, "_{}_date".format(state.name), None)
self._overall_status = None
self._sync_timeline()
self._indef_stats()
def _sync_timeline(self):
"""Sync QB timeline and obtain status"""
self.prev_qbd, self.next_qbd = None, None
# Update QB_Timeline for user, if necessary
update_users_QBT(self.user.id)
# Every QB should have "due" - filter by to get one per QB
users_qbs = QBT.query.filter(QBT.user_id == self.user.id).filter(
QBT.status == OverallStatus.due).order_by(QBT.at.asc())
# Obtain withdrawal date if applicable
withdrawn = QBT.query.filter(QBT.user_id == self.user.id).filter(
QBT.status == OverallStatus.withdrawn).first()
self._withdrawal_date = withdrawn.at if withdrawn else None
# convert query to list of tuples for easier manipulation
self.__ordered_qbs = [qbt.qbd() for qbt in users_qbs]
if not self.__ordered_qbs:
# May have withdrawn prior to first qb
if self._withdrawal_date:
self._overall_status = OverallStatus.withdrawn
trace("found user withdrawn; no valid qbs")
else:
self._overall_status = OverallStatus.expired
trace("no qb timeline data for {}".format(self.user))
self._enrolled_in_common = False
self._current = None
return
self._enrolled_in_common = True
# locate current qb - last found with start <= self.as_of_date
cur_index, cur_qbd = None, None
for i, qbd in zip(range(len(self.__ordered_qbs)), self.__ordered_qbs):
if qbd.relative_start <= self.as_of_date:
cur_index = i
cur_qbd = qbd
if qbd.relative_start > self.as_of_date:
break
# w/o a cur, probably hasn't started, set expired and leave
if not cur_qbd and (
self.__ordered_qbs[0].relative_start > self.as_of_date):
if self.withdrawn_by(self.as_of_date):
trace("user withdrawn prior to first qb start")
self._overall_status = OverallStatus.withdrawn
else:
trace(
"no current QBD (too early); first qb doesn't start till"
" {} vs as_of {}".format(
self.__ordered_qbs[0].relative_start, self.as_of_date))
self._overall_status = OverallStatus.expired
self.next_qbd = self.__ordered_qbs[0]
self._current = None
return
if cur_index > 0:
self.prev_qbd = self.__ordered_qbs[cur_index-1]
else:
self.prev_qbd = None
if cur_index < len(self.__ordered_qbs) - 1:
self.next_qbd = self.__ordered_qbs[cur_index+1]
else:
self.next_qbd = None
self._status_from_current(cur_qbd)
def _status_from_current(self, cur_qbd):
"""Obtain status from QB timeline given current QBD"""
# We order by at (to get the latest status for a given QB) and
# secondly by id, as on rare occasions, the time (`at`) of
# `due` == `completed`, but the row insertion defines priority
cur_rows = QBT.query.filter(QBT.user_id == self.user.id).filter(
QBT.qb_id == cur_qbd.qb_id).filter(
QBT.qb_recur_id == cur_qbd.recur_id).filter(
QBT.qb_iteration == cur_qbd.iteration).order_by(
QBT.at, QBT.id)
# whip through ordered rows picking up available status
for row in cur_rows:
if row.at <= self.as_of_date:
self._overall_status = row.status
if row.status == OverallStatus.due:
self._due_date = row.at
if row.status == OverallStatus.overdue:
self._overdue_date = row.at
if row.status == OverallStatus.completed:
self._completed_date = row.at
if row.status == OverallStatus.in_progress:
self._in_progress_date = row.at
# If we didn't already pass the overdue date, obtain now
if not self._overdue_date and self._due_date:
self._overdue_date = (
cur_qbd.questionnaire_bank.calculated_overdue(
self._due_date))
if row.status in (
OverallStatus.expired,
OverallStatus.partially_completed):
self._expired_date = row.at
# If the current is already expired, then no current was found,
# as current is actually the previous
if self._expired_date and self._expired_date < self.as_of_date:
self.prev_qbd = cur_qbd
self._current = None
else:
self._current = cur_qbd
# Withdrawn sanity check
if self.withdrawn_by(self.as_of_date) and (
self.overall_status != OverallStatus.withdrawn):
raise RuntimeError(
"Unexpected state {}, should be withdrawn".format(
self.overall_status))
def older_qbds(self, last_known):
"""Generator to return QBDs and status prior to last known
Expected use in reporting scenarios, where full history is needed,
this generator will continue to return previous QBDs (from last_known)
until exhausted.
:param last_known: typically a valid QBD for the user, typically the
``current_qbd()`` or possibly the previous. None safe
:returns: (QBD, status) until exhausted
"""
if last_known is None:
return
index = self.__ordered_qbs.index(last_known)
while index > 0:
index -= 1
cur_qbd = self.__ordered_qbs[index]
# We order by at (to get the latest status for a given QB) and
# secondly by id, as on rare occasions, the time (`at`) of
# `due` == `completed`, but the row insertion defines priority
status = QBT.query.filter(QBT.user_id == self.user.id).filter(
QBT.qb_id == cur_qbd.qb_id).filter(
QBT.qb_recur_id == cur_qbd.recur_id).filter(
QBT.qb_iteration == cur_qbd.iteration).order_by(
QBT.at.desc(), QBT.id.desc()).with_entities(
QBT.status).first()
yield self.__ordered_qbs[index], str(status[0])
def _indef_stats(self):
"""Lookup stats for indefinite case - requires special handling"""
qbs = ordered_qbs(self.user, classification='indefinite')
self._current_indef = None
for q in qbs:
if self._current_indef is not None:
raise RuntimeError("unexpected second indef qb")
self._current_indef = q
def _response_lookup(self):
"""Lazy init - only lookup associated QNRs if needed"""
if hasattr(self, '_responses_looked_up'):
return
# As order counts, required is a list; partial and completed are sets
if self._current:
user_qnrs = QNR_results(
self.user, qb_id=self._current.qb_id,
qb_iteration=self._current.iteration)
self._required = user_qnrs.required_qs(self._current.qb_id)
self._partial = user_qnrs.partial_qs(
qb_id=self._current.qb_id, iteration=self._current.iteration)
self._completed = user_qnrs.completed_qs(
qb_id=self._current.qb_id, iteration=self._current.iteration)
# Indefinite is similar, but *special*
if self._current_indef:
user_indef_qnrs = QNR_results(
self.user, qb_id=self._current_indef.qb_id)
self._required_indef = user_indef_qnrs.required_qs(
qb_id=self._current_indef.qb_id)
self._partial_indef = user_indef_qnrs.partial_qs(
qb_id=self._current_indef.qb_id, iteration=None)
self._completed_indef = user_indef_qnrs.completed_qs(
qb_id=self._current_indef.qb_id, iteration=None)
self._responses_looked_up = True
@property
def assigning_authority(self):
"""Returns the best string available for the assigning authority
Typically, the top-level organization used to associate the user
with the questionnaire bank. For organizations that have moved
to a newer research protocol, we no longer have this lookup available.
In this case, we're currently left to guessing - as the data model
doesn't capture the authority (say organization or intervention)
behind the assignment. But given the typical scenario, the user will
have one organization, and the top level of that will be the correct
guess.
If nothing is available, return an empty string as it can safely
be used in string formatting.
:returns: string for assigning authority or empty string
"""
org = self.user.first_top_organization()
return getattr(org, 'name', '')
def current_qbd(self, classification=None):
""" Looks for current QBD for given parameters
If the user has a valid questionnaire bank for the given as_of_date
and classification, return the matching QuestionnaireBankDetails
(QBD), which fully defines the questionnaire bank, iteration, recur
and start date.
:param as_of_date: point in time for reference, frequently utcnow
:param classification: None defaults to all, special case for
``indefinite``
:return: QBD for best match, on None
"""
if self.withdrawn_by(self.as_of_date):
# User effectively withdrawn, no current
return None
if classification == 'indefinite':
return self._current_indef
if self._current:
self._current.relative_start = self._due_date
return self._current
@property
def overall_status(self):
return self._overall_status
@property
def completed_date(self):
return self._completed_date
@property
def due_date(self):
return self._due_date
@property
def expired_date(self):
return self._expired_date
@property
def overdue_date(self):
return self._overdue_date
def __instruments_by_strategy(self, classification, strategy):
"""Common logic for differing strategy to obtain instrument lists
Given a strategy function, returns the appropriate list.
"""
if classification not in (None, 'all', 'indefinite'):
raise ValueError("can't handle classification {}".format(
classification))
self._response_lookup() # force lazy load if not done
results = []
if classification in ('all', None) and self._current:
results = strategy(
required_list=self._required,
completed_set=self._completed,
partial_set=self._partial)
if classification in ('indefinite', 'all') and self._current_indef:
results += strategy(
required_list=self._required_indef,
completed_set=self._completed_indef,
partial_set=self._partial_indef)
return results
def instruments_needing_full_assessment(self, classification=None):
def needing_full(required_list, completed_set, partial_set):
# maintain order from required list, include all not
# stared nor completed
return [
i for i in required_list if i not in completed_set
and i not in partial_set]
return self.__instruments_by_strategy(classification, needing_full)
def instruments_completed(self, classification=None):
def completed(required_list, completed_set, partial_set):
# maintain order from required list, include only if completed
return [
i for i in required_list if i in completed_set]
return self.__instruments_by_strategy(classification, completed)
def instruments_in_progress(self, classification=None):
"""Return list of questionnaire ids in-progress for classification
NB - if the questionnaire is outside the valid date range, such as in
an expired state, it will not be included in the list regardless of
its in-progress status.
:param classification: set to 'indefinite' to consider that
classification, or 'all' for both. Default None uses current.
:returns: list of external questionnaire identifiers, that is, the
id needed to resume work on the same questionnaire that was
in progress. The `document['identifier']` from the previously
submitted QuestionnaireResponse.
"""
def need_completion(required_list, completed_set, partial_set):
# maintain order from required list, include if started (partial)
# and not completed
return [
i for i in required_list if i not in completed_set
and i in partial_set]
in_progress = self.__instruments_by_strategy(
classification, need_completion)
def doc_id_lookup(instrument):
"""Obtain lookup keys from appropriate internals"""
# don't have instrument to qb association | |
data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0 # noqa: E501
Updates the variables associated with the tenant. NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the resource (required)
:return: TenantVariableResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenant_variables_put_responder_spaces_0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/tenants/{id}/variables', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TenantVariableResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(**kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(self, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/tenants/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MultiTenancyStatusResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces # noqa: E501
Reports back the status of multi-tenancy NOTE: This definition is not complete. We will be adding more detail in future releases of Octopus. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: MultiTenancyStatusResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `custom_action_response_descriptor_octopus_server_web_api_actions_tenants_configuration_get_action_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] | |
cidr_ipv6)
if description is not None:
pulumi.set(__self__, "description", description)
if from_port is not None:
pulumi.set(__self__, "from_port", from_port)
if source_prefix_list_id is not None:
pulumi.set(__self__, "source_prefix_list_id", source_prefix_list_id)
if source_security_group_id is not None:
pulumi.set(__self__, "source_security_group_id", source_security_group_id)
if source_security_group_name is not None:
pulumi.set(__self__, "source_security_group_name", source_security_group_name)
if source_security_group_owner_id is not None:
pulumi.set(__self__, "source_security_group_owner_id", source_security_group_owner_id)
if to_port is not None:
pulumi.set(__self__, "to_port", to_port)
@property
@pulumi.getter(name="ipProtocol")
def ip_protocol(self) -> pulumi.Input[str]:
return pulumi.get(self, "ip_protocol")
@ip_protocol.setter
def ip_protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_protocol", value)
@property
@pulumi.getter(name="cidrIp")
def cidr_ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cidr_ip")
@cidr_ip.setter
def cidr_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr_ip", value)
@property
@pulumi.getter(name="cidrIpv6")
def cidr_ipv6(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cidr_ipv6")
@cidr_ipv6.setter
def cidr_ipv6(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr_ipv6", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="fromPort")
def from_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "from_port")
@from_port.setter
def from_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "from_port", value)
@property
@pulumi.getter(name="sourcePrefixListId")
def source_prefix_list_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_prefix_list_id")
@source_prefix_list_id.setter
def source_prefix_list_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_prefix_list_id", value)
@property
@pulumi.getter(name="sourceSecurityGroupId")
def source_security_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_security_group_id")
@source_security_group_id.setter
def source_security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_security_group_id", value)
@property
@pulumi.getter(name="sourceSecurityGroupName")
def source_security_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_security_group_name")
@source_security_group_name.setter
def source_security_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_security_group_name", value)
@property
@pulumi.getter(name="sourceSecurityGroupOwnerId")
def source_security_group_owner_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source_security_group_owner_id")
@source_security_group_owner_id.setter
def source_security_group_owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_security_group_owner_id", value)
@property
@pulumi.getter(name="toPort")
def to_port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "to_port")
@to_port.setter
def to_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "to_port", value)
@pulumi.input_type
class SecurityGroupTagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str]):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class SpotFleetAcceleratorCountRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class SpotFleetAcceleratorTotalMemoryMiBRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class SpotFleetBaselineEbsBandwidthMbpsRequestArgs:
def __init__(__self__, *,
max: Optional[pulumi.Input[int]] = None,
min: Optional[pulumi.Input[int]] = None):
if max is not None:
pulumi.set(__self__, "max", max)
if min is not None:
pulumi.set(__self__, "min", min)
@property
@pulumi.getter
def max(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "max")
@max.setter
def max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max", value)
@property
@pulumi.getter
def min(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "min")
@min.setter
def min(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min", value)
@pulumi.input_type
class SpotFleetBlockDeviceMappingArgs:
def __init__(__self__, *,
device_name: pulumi.Input[str],
ebs: Optional[pulumi.Input['SpotFleetEbsBlockDeviceArgs']] = None,
no_device: Optional[pulumi.Input[str]] = None,
virtual_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "device_name", device_name)
if ebs is not None:
pulumi.set(__self__, "ebs", ebs)
if no_device is not None:
pulumi.set(__self__, "no_device", no_device)
if virtual_name is not None:
pulumi.set(__self__, "virtual_name", virtual_name)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter
def ebs(self) -> Optional[pulumi.Input['SpotFleetEbsBlockDeviceArgs']]:
return pulumi.get(self, "ebs")
@ebs.setter
def ebs(self, value: Optional[pulumi.Input['SpotFleetEbsBlockDeviceArgs']]):
pulumi.set(self, "ebs", value)
@property
@pulumi.getter(name="noDevice")
def no_device(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "no_device")
@no_device.setter
def no_device(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "no_device", value)
@property
@pulumi.getter(name="virtualName")
def virtual_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "virtual_name")
@virtual_name.setter
def virtual_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_name", value)
@pulumi.input_type
class SpotFleetClassicLoadBalancersConfigArgs:
def __init__(__self__, *,
classic_load_balancers: pulumi.Input[Sequence[pulumi.Input['SpotFleetClassicLoadBalancerArgs']]]):
pulumi.set(__self__, "classic_load_balancers", classic_load_balancers)
@property
@pulumi.getter(name="classicLoadBalancers")
def classic_load_balancers(self) -> pulumi.Input[Sequence[pulumi.Input['SpotFleetClassicLoadBalancerArgs']]]:
return pulumi.get(self, "classic_load_balancers")
@classic_load_balancers.setter
def classic_load_balancers(self, value: pulumi.Input[Sequence[pulumi.Input['SpotFleetClassicLoadBalancerArgs']]]):
pulumi.set(self, "classic_load_balancers", value)
@pulumi.input_type
class SpotFleetClassicLoadBalancerArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SpotFleetEbsBlockDeviceArgs:
def __init__(__self__, *,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops: Optional[pulumi.Input[int]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
volume_size: Optional[pulumi.Input[int]] = None,
volume_type: Optional[pulumi.Input['SpotFleetEbsBlockDeviceVolumeType']] = None):
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops is not None:
pulumi.set(__self__, "iops", iops)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if volume_size is not None:
pulumi.set(__self__, "volume_size", volume_size)
if volume_type is not None:
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter
def iops(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "iops")
@iops.setter
def iops(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter(name="volumeSize")
def volume_size(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "volume_size")
@volume_size.setter
def volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_size", value)
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> Optional[pulumi.Input['SpotFleetEbsBlockDeviceVolumeType']]:
return pulumi.get(self, "volume_type")
@volume_type.setter
def volume_type(self, value: Optional[pulumi.Input['SpotFleetEbsBlockDeviceVolumeType']]):
pulumi.set(self, "volume_type", value)
@pulumi.input_type
class SpotFleetFleetLaunchTemplateSpecificationArgs:
def __init__(__self__, *,
version: pulumi.Input[str],
launch_template_id: Optional[pulumi.Input[str]] = None,
launch_template_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "version", version)
if launch_template_id is not None:
pulumi.set(__self__, "launch_template_id", launch_template_id)
if launch_template_name is not None:
pulumi.set(__self__, "launch_template_name", launch_template_name)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="launchTemplateId")
def launch_template_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_id")
@launch_template_id.setter
def launch_template_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_id", value)
@property
@pulumi.getter(name="launchTemplateName")
def launch_template_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "launch_template_name")
@launch_template_name.setter
def launch_template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "launch_template_name", value)
@pulumi.input_type
class SpotFleetGroupIdentifierArgs:
def __init__(__self__, *,
group_id: pulumi.Input[str]):
pulumi.set(__self__, "group_id", group_id)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "group_id", value)
@pulumi.input_type
class SpotFleetIamInstanceProfileSpecificationArgs:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None):
if arn is not None:
pulumi.set(__self__, "arn", arn)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@pulumi.input_type
class SpotFleetInstanceIpv6AddressArgs:
def __init__(__self__, *,
ipv6_address: pulumi.Input[str]):
pulumi.set(__self__, "ipv6_address", ipv6_address)
@property
@pulumi.getter(name="ipv6Address")
def ipv6_address(self) -> pulumi.Input[str]:
return pulumi.get(self, "ipv6_address")
@ipv6_address.setter
def ipv6_address(self, value: pulumi.Input[str]):
pulumi.set(self, "ipv6_address", value)
@pulumi.input_type
class SpotFleetInstanceNetworkInterfaceSpecificationArgs:
def __init__(__self__, *,
associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
delete_on_termination: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
device_index: Optional[pulumi.Input[int]] = None,
groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ipv6_address_count: Optional[pulumi.Input[int]] = None,
ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetInstanceIpv6AddressArgs']]]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetPrivateIpAddressSpecificationArgs']]]] = None,
secondary_private_ip_address_count: Optional[pulumi.Input[int]] = None,
subnet_id: Optional[pulumi.Input[str]] = None):
if associate_public_ip_address is not None:
pulumi.set(__self__, "associate_public_ip_address", associate_public_ip_address)
if delete_on_termination is not None:
pulumi.set(__self__, "delete_on_termination", delete_on_termination)
if description is not None:
pulumi.set(__self__, "description", description)
if device_index is not None:
pulumi.set(__self__, "device_index", device_index)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if ipv6_address_count is not None:
pulumi.set(__self__, "ipv6_address_count", ipv6_address_count)
if ipv6_addresses is not None:
pulumi.set(__self__, "ipv6_addresses", ipv6_addresses)
if network_interface_id is not None:
pulumi.set(__self__, "network_interface_id", network_interface_id)
if private_ip_addresses is not None:
pulumi.set(__self__, "private_ip_addresses", private_ip_addresses)
if secondary_private_ip_address_count is not None:
pulumi.set(__self__, "secondary_private_ip_address_count", secondary_private_ip_address_count)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
@property
@pulumi.getter(name="associatePublicIpAddress")
def associate_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "associate_public_ip_address")
@associate_public_ip_address.setter
def associate_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "associate_public_ip_address", value)
@property
@pulumi.getter(name="deleteOnTermination")
def delete_on_termination(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_on_termination")
@delete_on_termination.setter
def delete_on_termination(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_on_termination", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="deviceIndex")
def device_index(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "device_index")
@device_index.setter
def device_index(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "device_index", value)
@property
@pulumi.getter
def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="ipv6AddressCount")
def ipv6_address_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "ipv6_address_count")
@ipv6_address_count.setter
def ipv6_address_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ipv6_address_count", value)
@property
@pulumi.getter(name="ipv6Addresses")
def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetInstanceIpv6AddressArgs']]]]:
return pulumi.get(self, "ipv6_addresses")
@ipv6_addresses.setter
def ipv6_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetInstanceIpv6AddressArgs']]]]):
pulumi.set(self, "ipv6_addresses", value)
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "network_interface_id")
@network_interface_id.setter
def network_interface_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_interface_id", value)
@property
@pulumi.getter(name="privateIpAddresses")
def private_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetPrivateIpAddressSpecificationArgs']]]]:
return pulumi.get(self, "private_ip_addresses")
@private_ip_addresses.setter
def private_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SpotFleetPrivateIpAddressSpecificationArgs']]]]):
pulumi.set(self, "private_ip_addresses", value)
@property
@pulumi.getter(name="secondaryPrivateIpAddressCount")
def secondary_private_ip_address_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "secondary_private_ip_address_count")
@secondary_private_ip_address_count.setter
def secondary_private_ip_address_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "secondary_private_ip_address_count", value)
@property
| |
<reponame>Bhaskers-Blu-Org2/petridishnn
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import scipy.io.arff as arff
import bisect
import json
import os, sys
import subprocess
import tensorflow as tf
from tensorpack.dataflow import RNGDataFlow, BatchData, PrefetchData
from tensorpack.callbacks import Inferencer
from tensorpack.dataflow import DataFlow, PrefetchDataZMQ, \
PrefetchData, \
MapDataComponent, AugmentImageComponent, BatchData
from tensorpack.dataflow import imgaug
from tensorpack.utils import logger
def maybe_download_dataset(dataset_idx, json_dir=None, data_dir=None,
force_download=False, disable_download=True):
json_fn = os.path.join(json_dir, str(dataset_idx) + '.json')
data_fn = os.path.join(data_dir, str(dataset_idx) + '.arff')
if os.path.exists(json_fn) and not force_download:
print("Json info and data already exists.")
else:
if disable_download:
raise ValueError("{} should exist but not".format(json_fn))
import wget, glob
url = "https://www.openml.org/d/{dataset_idx}/json".format(dataset_idx=dataset_idx)
print("Downloading JSON file from url {}".format(url))
json_fn = wget.download(url, json_fn)
fns = glob.glob('{}*tmp'.format(json_fn))
for fn in fns:
cmd = 'rm {}'.format(fn)
print("remove tmp file with cmd : {}".format(cmd))
subprocess.call(cmd, shell=True)
with open(json_fn, 'rt') as json_in:
lines = []
for line in json_in:
lines.append(line.strip())
ss = ''.join(lines)
data_info = json.loads(ss)
#target_attr = data_info.get('default_target_attribute', None)
target_attr = None
if target_attr is None:
n_targets = 0
for feat_info in data_info['features']:
if int(feat_info.get('target', 0)) > 0:
target_attr = feat_info['name']
n_targets += 1
if n_targets != 1:
raise Exception("current logic only support 1d prediction at dataset_idx {}".format(dataset_idx))
if os.path.exists(data_fn) and not force_download:
print("data arff already exists")
else:
if disable_download:
raise ValueError("{} should exist but not".format(data_fn))
import wget
import glob
# dataset url
url = data_info['url']
print("Downloading dataset {} from url {}".format(dataset_idx, url))
data_fn = wget.download(url, out=data_fn)
fns = glob.glob('{}*tmp'.format(data_fn))
for fn in fns:
cmd = 'rm {}'.format(fn)
print("remove tmp file with cmd : {}".format(cmd))
subprocess.call(cmd, shell=True)
return data_fn, target_attr
def get_arff_data(fn, target_attr='class', check_size_only=False):
file_stat = os.stat(fn)
if check_size_only:
print("{} has size {}MB".format(fn, file_stat.st_size * 1e-6))
return None
data, meta = arff.loadarff(fn)
if not target_attr in meta.names():
raise Exception("Dataset {} is broken: target_attr {} not in meta".format(fn, target_attr))
# problem type regression/classification
if meta[target_attr][0] == 'numeric':
num_classes = 0
pred_type = tf.float32
else:
num_classes = len(meta[target_attr][1])
pred_type = tf.int32
pred_val2idx = dict()
for vi, val in enumerate(meta[target_attr][1]):
pred_val2idx[val] = vi
# feature names, types and ranges
feat_names = list(filter(lambda x : x != target_attr, meta.names()))
n_feats = len(feat_names)
feat_types = [tf.float32 for _ in range(n_feats)]
feat_dims = [None for _ in range(n_feats)]
feat_val2idx = [None for _ in range(n_feats)]
for i, name in enumerate(feat_names):
if meta[name][0] == 'numeric':
continue
feat_types[i] = tf.int32
feat_dims[i] = len(meta[name][1])
feat_val2idx[i] = dict()
for vi, val in enumerate(meta[name][1]):
feat_val2idx[i][val] = vi
n_data = len(data)
dps = [[None] * n_data for _ in range(n_feats + 1) ]
feat_means = [ 0. for _ in range(n_feats)]
feat_vars = [ 0. for _ in range(n_feats)]
for xi, x in enumerate(data):
for di, dname in enumerate(feat_names):
val = x[dname]
if feat_types[di] == tf.float32:
val = float(val)
dps[di][xi] = val
feat_means[di] += val
feat_vars[di] += val * val
else:
val = val.decode("utf-8")
dps[di][xi] = int(feat_val2idx[di][val])
if num_classes == 0:
dps[-1][xi] = float(x[target_attr])
else:
val = x[target_attr].decode("utf-8")
dps[-1][xi] = int(pred_val2idx[val])
feat_types.append(pred_type)
feat_dims.append(None)
feat_means = [ z / float(n_data) for z in feat_means ]
feat_stds = [ np.sqrt((sq / float(n_data) - m * m)) for sq, m in zip(feat_vars, feat_means)]
return dps, feat_types, feat_dims, n_data, num_classes, feat_means, feat_stds
class LoadedArffDataFlow(RNGDataFlow):
def __init__(self, dps_ys, split, shuffle=True, do_validation=False):
super(LoadedArffDataFlow, self).__init__()
self.shuffle = shuffle
self.dps = dps_ys # this should be a list of n x d_i mat, the last one is pred ys
n_samples = len(dps_ys[-1])
self.init_indices = list(range(n_samples))
np.random.seed(180451613)
np.random.shuffle(self.init_indices)
if split == 'all':
self._offset = 0
self._size = n_samples
elif split == 'train':
self._offset = 0
if do_validation:
self._size = n_samples * 8 // 10
else:
self._size = n_samples * 9 // 10
elif split == 'val' or split == 'validation':
if do_validation:
self._offset = n_samples * 8 // 10
self._size = n_samples * 9 // 10 - self._offset
else:
self._offset = n_samples * 9 // 10
self._size = n_samples - self._offset
elif do_validation and split == 'test':
self._offset = n_samples * 9 // 10
self._size = n_samples - self._offset
def size(self):
return self._size
def get_data(self):
idxs = [ i for i in self.init_indices[self._offset:(self._offset + self._size)]]
if self.shuffle:
np.random.shuffle(idxs)
for k in idxs:
yield [dp[k] for dp in self.dps]
def get_dataset_by_id(idx, data_dir_root, check_size_only=False, disable_download=True):
data_dir = os.path.join(data_dir_root, 'openml')
json_dir = os.path.join(data_dir_root, 'openml', 'json_dir')
fn, target_attr = maybe_download_dataset(idx, json_dir=json_dir, data_dir=data_dir,
disable_download=disable_download)
return get_arff_data(fn, target_attr, check_size_only)
def get_openml_dataflow(idx, data_root, splits=[], do_validation=False):
(dps_ys, types, dims, n_data,
num_classes, feat_means, feat_stds) = get_dataset_by_id(idx, data_root)
l_ds = dict()
for split in splits:
l_ds[split] = LoadedArffDataFlow(
dps_ys, split, shuffle=True, do_validation=do_validation)
return l_ds, types, dims, n_data, num_classes, feat_means, feat_stds
# copy paste from the paper: https://arxiv.org/pdf/1802.04064.pdf
cbb_openml_indices = [
3, 6, 8, 10, 11, 12, 14, 16, 18, 20, 21, 22, 23, 26, 28, 30, 31, 32,
36, 37, 39, 40, 41, 43, 44, 46, 48, 50, 53, 54, 59, 60, 61, 62, 150,
151, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 180, 181, 182, 183,
184, 187, 189, 197, 209, 223, 227, 273, 275, 276, 277, 278, 279, 285, 287,
292, 293, 294, 298, 300, 307, 310, 312, 313, 329, 333, 334, 335, 336, 337,
338, 339, 343, 346, 351, 354, 357, 375, 377, 383, 384, 385, 386, 387, 388,
389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 444, 446,
448, 450, 457, 458, 459, 461, 462, 463, 464, 465, 467, 468, 469, 472, 475,
476, 477, 478, 479, 480, 554, 679, 682, 683, 685, 694, 713, 714, 715, 716,
717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731,
732, 733, 734, 735, 736, 737, 740, 741, 742, 743, 744, 745, 746, 747, 748,
749, 750, 751, 752, 753, 754, 755, 756, 758, 759, 761, 762, 763, 764, 765,
766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
782, 783, 784, 785, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797,
799, 800, 801, 803, 804, 805, 806, 807, 808, 811, 812, 813, 814, 815, 816,
817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 832,
833, 834, 835, 836, 837, 838, 841, 843, 845, 846, 847, 848, 849, 850, 851,
853, 855, 857, 859, 860, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871,
872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 884, 885, 886, 888,
891, 892, 893, 894, 895, 896, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
924, 925, 926, 927, 928, 929, 931, 932, 933, 934, 935, 936, 937, 938, 941,
942, 943, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 958,
959, 962, 964, 965, 969, 970, 971, 973, 974, 976, 977, 978, 979, 980, 983,
987, 988, 991, 994, 995, 996, 997, 1004, 1005, 1006, 1009, 1011, 1012, 1013,
1014, 1015, 1016, 1019, 1020, 1021, 1022, 1025, 1026, 1036, 1038, 1040,
1041, 1043, 1044, 1045, 1046, 1048, 1049, 1050, 1054, 1055, 1056, 1059,
1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1071, 1073,
1075, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087,
1088, 1100, 1104, 1106, 1107, 1110, 1113, 1115, 1116, 1117, 1120, 1121,
1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133,
1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146,
1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158,
1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1169, 1216, 1217, 1218,
1233, 1235, 1236, 1237, 1238, 1241, 1242, 1412, 1413, 1441, 1442, 1443,
1444, 1449, 1451, 1453, 1454, 1455, 1457, 1459, 1460, 1464, 1467, 1470,
1471, 1472, 1473, 1475, 1481, 1482, 1483, 1486, 1487, 1488, 1489, 1496, 1498
]
# 21 could not convert sparse str to float; 6 cannot convert sparse nominal of 1/-1
# 2 could not find nominal field ; 1 exception due to target_attr not | |
import json
from app.tests import BaseTestClass
class TestParcelView(BaseTestClass):
"""This class contains all tests regarding parcels"""
def test__create_order(self):
"""This will test POST /parcels"""
res = self.client.post(
"api/v2/users/parcels", data=json.dumps(self.generic_parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(res.status_code, 201)
self.assertEqual(result["Success"], "Your parcel order has been saved")
def test_invalid_parcel_name(self):
"""Parcels must have valid names in order to be sent"""
fake_parcel = {"parcel_name": " ",
"recipient_name": "Generic Recipient",
"pickup_location": "Generic Pickup",
"destination": "Generic Destination",
"weight": "420"
}
res = self.client.post("/api/v2/users/parcels",
data=json.dumps(fake_parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Please enter valid parcel name")
self.assertEqual(res.status_code, 400)
def test_invalid_pickup_location(self):
"""Parcels must have valid pickup location"""
fake_parcel = {"parcel_name": "fake",
"recipient_name": "Generic Recipient",
"pickup_location": " ",
"destination": "Generic Destination",
"weight": "420"
}
res = self.client.post("/api/v2/users/parcels",
data=json.dumps(fake_parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Please enter valid pickup location")
self.assertEqual(res.status_code, 400)
def test_invalid_destination(self):
"""Parcels must have valid destination"""
fake_parcel = {"parcel_name": "fake",
"recipient_name": "<NAME>",
"pickup_location": "Over here",
"destination": " ",
"weight": "420"
}
res = self.client.post("/api/v2/users/parcels",
data=json.dumps(fake_parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Please enter valid destination")
self.assertEqual(res.status_code, 400)
def test_valid_weight(self):
"""Parcels must have valid weight"""
fake_parcel = {"parcel_name": "fake",
"recipient_name": "<NAME>",
"pickup_location": "Over here",
"destination": "Over there",
"weight": "so fake"
}
res = self.client.post("/api/v2/users/parcels",
data=json.dumps(fake_parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(
result["Error"], "Please enter postive weight in integers")
self.assertEqual(res.status_code, 400)
def test_admin_can_create_parcel(self):
"""Admins should not be able to create parcels"""
res = self.client.post("/api/v2/users/parcels", data=json.dumps(
self.generic_parcel), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Admins cannot create parcels. Forbidden!")
self.assertEqual(res.status_code, 403)
def test_user_change_destination(self):
"""User should be able to change destination of parcels
that are pending"""
self.client.post("/api/v2/users/parcels", data=json.dumps(self.generic_parcel),
content_type="application/json", headers=self.headers)
update_destination = {"destination": "Malibu"}
res = self.client.put("/api/v2/parcels/1/destination", data=json.dumps(
update_destination), content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Success"],
"Destination for parcel 1 succesfully changed")
self.assertEqual(res.status_code, 200)
def test_user_enters_numbers_as_destination(self):
"""User should not be able to add numbers as destination"""
parcel = {"parcel_name": "Contracts",
"recipient_name": "Irelia",
"pickup_location": "Mount DOOM",
"destination": "1234123452",
"weight": "323"}
res = self.client.post("api/v2/users/parcels", data=json.dumps(parcel),
content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Please enter valid destination")
self.assertEqual(res.status_code, 400)
def test_nonexistent_parcel_destination(self):
"""User should not be able to change destination of parcels
that don't exist"""
des = {"destination": "Nairoberry"}
res = self.client.put("/api/v2/parcels/5/destination",
data=json.dumps(des), content_type="application/json",
headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Parcel not found")
self.assertEqual(res.status_code, 404)
def test_admin_change_destination(self):
"""Admin should not be able to change the destination of parcels"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
des = {"destination": "Nairoberry"}
res = self.client.put("/api/v2/parcels/1/destination",
data=json.dumps(des), content_type="application/json",
headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Admins cannot change destinaion of parcels. Forbidden!")
self.assertEqual(res.status_code, 403)
def test_user_cannot_change_destination_of_parcel_they_did_not_create(self):
"""Users should not be able to change destination of parcels that are
not theirs"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
self.client.post("/api/v2/auth/signup",
data=json.dumps(self.generic_user),
content_type="application/json")
log = self.client.post("/api/v2/auth/login",
data=json.dumps(self.generic_user_details),
content_type="application/json")
logs = json.loads(log.get_data(as_text=True))
log_token = logs["token"]
temp_headers = {"AUTHORIZATION": "Bearer " + log_token}
update_destination = {"destination": "Nairoberry"}
res = self.client.put("api/v2/parcels/1/destination", data=json.dumps(
update_destination), content_type="application/json", headers=temp_headers)
result = json.loads(res.data)
self.assertEqual(
result["Error"], "You can only update destination of your own parcels. Unauthorized!")
self.assertEqual(res.status_code, 401)
def test_user_can_get_their_parcel(self):
"""Users can only see parcels if they made one"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
res = self.client.get("/api/v2/parcels", headers=self.headers)
self.assertEqual(res.status_code, 200)
def test_user_cannot_see_parcels_not_theirs(self):
""""""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
self.client.post("/api/v2/auth/signup",
data=json.dumps(self.generic_user),
content_type="application/json")
log = self.client.post("/api/v2/auth/login",
data=json.dumps(self.generic_user_details),
content_type="application/json")
logs = json.loads(log.get_data(as_text=True))
log_token = logs["token"]
temp_headers = {"AUTHORIZATION": "Bearer " + log_token}
res = self.client.get("/api/v2/parcels", headers=temp_headers)
self.assertEqual(res.status_code, 404)
def test_user_change_status(self):
"""User should not be able to change the status of deliveries"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
status = {"status": "transit"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Only admins can change status of parcels. Forbidden!")
self.assertEqual(res.status_code, 403)
def test_admin_change_status(self):
"""Admins should be able to change status of parcels that are not delivered or cancelled"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
status = {"status": "transit"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Success"],
"The status for parcel number 1 was successfully changed")
self.assertEqual(res.status_code, 200)
def test_admin_change_invalid_status(self):
"""Admin should only be able to change status to being on transit or delivered"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
status = {"status": "invalid"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Status can only be changed to 'transit' or 'delivered'.")
self.assertEqual(res.status_code, 400)
def test_admin_change_status_of_nonexistent_parcel(self):
"""Admin should only change status of parcels that exist"""
status = {"status": "delivered"}
res = self.client.put("/api/v2/parcels/5/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Parcel not found.")
self.assertEqual(res.status_code, 404)
def test_admin_change_status_of_delivered_parcels(self):
"""Admin should not be able to change status of parcels that have been cancelled
or delivered"""
status = {"status": "delivered"}
self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
new_status = {"status": "transit"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
new_status), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Status cannot be changed for delivered or cancelled parcels")
self.assertEqual(res.status_code, 400)
def test_user_can_change_location(self):
"""Users should not be able to change location of parcels"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
location = {"current_location": "invalid"}
res = self.client.put("/api/v2/parcels/1/presentLocation", data=json.dumps(
location), content_type="application/json", headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Only admins can update the present location of a parcel. Forbidden!")
self.assertEqual(res.status_code, 403)
def test_admin_can_change_current_location(self):
"""Admins should be able to update current location of parcels in transit"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
status = {"status": "transit"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
location = {"current_location": "Nairoberry"}
res = self.client.put("/api/v2/parcels/1/presentLocation", data=json.dumps(
location), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Success"],
"Successfully updated current location")
self.assertEqual(res.status_code, 200)
def test_admin_can_change_location_of_nonexistent_parcel(self):
"""Admin should not be able to change location of parcels that don't exist"""
location = {"current_location": "Nairoberry"}
res = self.client.put("/api/v2/parcels/5/presentLocation", data=json.dumps(
location), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Parcel not found")
self.assertEqual(res.status_code, 404)
def test_admin_can_change_location_of_pending_parcels(self):
"""Admins should not be able to change current location of parcels not in transit"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
location = {"current_location": "Nairoberry"}
res = self.client.put("/api/v2/parcels/1/presentLocation", data=json.dumps(
location), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"You can only change location of parcels in transit")
self.assertEqual(res.status_code, 400)
def test_admin_can_add_invalid_current_location(self):
"""Admins should not be able to add current locations that are not valid"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
status = {"status": "transit"}
res = self.client.put("/api/v2/parcels/1/status", data=json.dumps(
status), content_type="application/json", headers=self.admin_header)
location = {"current_location": " "}
res = self.client.put("/api/v2/parcels/1/presentLocation", data=json.dumps(
location), content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Please enter a valid location")
self.assertEqual(res.status_code, 400)
def test_admin_can_cancel_parcel(self):
"""Admin should not be able to cancel parcels"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
res = self.client.put("/api/v2/parcels/1/cancel",
headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Admins cannot cancel parcels. Forbidden!")
self.assertEqual(res.status_code, 403)
def test_user_can_cancel_pending_parcel(self):
"""User should be able to cancel pending or parcels in transit"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
res = self.client.put("/api/v2/parcels/1/cancel",
headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Success"],
"Successfully cancelled your parcel")
self.assertEqual(res.status_code, 200)
def test_user_can_cancel_cancelled_parcel(self):
"""User should not be able to cancel cancelled or delivered parcels"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
self.client.put("/api/v2/parcels/1/cancel",
headers=self.headers)
res = self.client.put("/api/v2/parcels/1/cancel",
headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"You can only cancel parcels in transit")
self.assertEqual(res.status_code, 400)
def test_user_can_cancel_nonexistent_parcels(self):
"""User should not be able to cancel parcels that don't exist"""
res = self.client.put("/api/v2/parcels/5/cancel",
headers=self.headers)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"Parcel not found")
self.assertEqual(res.status_code, 404)
def test_user_can_cancel_parcel_by_another_user(self):
"""User should not be able to cancel parcels they did no create"""
self.client.post(
"api/v2/users/parcels", data=(json.dumps(self.generic_parcel)),
content_type="application/json", headers=self.headers)
self.client.post("/api/v2/auth/signup", data=json.dumps(self.generic_user),
content_type="application/json")
log = self.client.post("/api/v2/auth/login", data=json.dumps(self.generic_user_details),
content_type="application/json")
logs = json.loads(log.get_data(as_text=True))
log_token = logs["token"]
temp_headers = {"AUTHORIZATION": "Bearer " + log_token}
res = self.client.put("/api/v2/parcels/1/cancel",
headers=temp_headers)
result = json.loads(res.data)
self.assertEqual(result["Error"],
"You can only cancel parcels you created")
self.assertEqual(res.status_code, 401)
def test_admin_can_get_parcel_by_id(self):
"""Admins should be able to get any parcel by their ID if the parcels exist"""
res = self.client.get("api/v2/parcels/1", headers=self.admin_header)
# result = json.loads(res.json)
# self.assertEqual(result["Parcel 1"], )
self.assertEqual(res.status_code, 200)
def test_get_nonexistent_parcel_by_id(self):
"""Admins and users should be notified when they try to get non-existent parcels"""
res = self.client.get(
"api/v2/parcels/44", content_type="application/json", headers=self.admin_header)
result = json.loads(res.data)
self.assertEqual(result["Error"], "Parcel 44 does not exist")
self.assertEqual(res.status_code, 404)
def test_user_get_parcel_that_is_not_theirs_by_id(self):
"""Users should not be able to get parcels that are not theirs by ID."""
self.client.post("/api/v2/auth/signup", data=json.dumps(self.generic_user),
content_type="application/json")
log = self.client.post("/api/v2/auth/login", data=json.dumps(self.generic_user_details),
content_type="application/json")
logs = json.loads(log.get_data(as_text=True))
log_token = logs["token"]
temp_headers = {"AUTHORIZATION": "Bearer " + log_token}
res = self.client.get(
"api/v2/parcels/1", content_type="application/json", headers=temp_headers)
result = json.loads(res.data)
self.assertEqual(
result["Error"], "You can only view your own parcels. To view them, search for all your parcels | |
<reponame>davidleonfdez/face2anime
from abc import ABC, abstractmethod
from fastai.vision.all import *
from fastai.vision.gan import *
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
from face2anime.layers import (ConcatPoolHalfDownsamplingOp2d, CondConvX2UpsamplingOp2d, CondResBlockUp,
ConvHalfDownsamplingOp2d, ConvX2UpsamplingOp2d, DownsamplingOperation2d,
FeaturesStats, FeatureStatType, FeaturesStatsSource, InterpConvUpsamplingOp2d,
MiniBatchStdDev, ParamRemoverUpsamplingOp2d, ResBlockDown, ResBlockUp,
UpsamplingOperation2d, ZeroDownsamplingOp2d)
from face2anime.torch_utils import add_sn
__all__ = ['custom_generator', 'res_generator', 'NoiseSplitStrategy', 'NoiseSplitEqualLeave1stOutStrategy',
'NoiseSplitDontSplitStrategy', 'CondResGenerator', 'SkipGenerator', 'CycleGenerator', 'res_critic',
'patch_res_critic', 'PatchResCritic', 'CycleCritic', 'default_encoder', 'basic_encoder',
'default_decoder', 'Img2ImgGenerator']
def custom_generator(out_size, n_channels, up_op:UpsamplingOperation2d, in_sz=100,
n_features=64, n_extra_layers=0, sn=True, **kwargs):
"A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2
layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)]
cur_size = 4
while cur_size < out_size // 2:
layers.append(up_op.get_layer(cur_ftrs, cur_ftrs//2, **kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)]
layers += [up_op.get_layer(cur_ftrs, n_channels, norm_type=None, act_cls=None), nn.Tanh()]
generator = nn.Sequential(*layers)
if sn: add_sn(generator)
return generator
def res_generator(out_sz, n_ch, up_op:UpsamplingOperation2d, id_up_op:UpsamplingOperation2d,
in_sz=100, n_features=64, n_extra_res_blocks=1, n_extra_convs_by_res_block=1,
sn=True, bn_1st=True, upblock_cls=ResBlockUp, hooks_by_sz=None, **kwargs):
"""Builds a residual generator from `in_sz` to images `n_ch` x `out_size` x `out_size`.
kwargs are forwarded to `ConvLayer,` `ResBlock` and `upblock_cls`.
Args:
out_sz: size of each one of the two spatial dimensions of the output (squared images
are assumed).
n_ch: number of channels of the output images.
up_op: upsampling operation to include everywhere but in the identity connections of the
upsampling residual blocks.
id_up_op: upsampling operation used in the identity connections of the upsampling residual
blocks.
n_features: number of input features of the last upsampling layer.
n_extra_res_blocks: number of additional residual blocks included right before the last
upsampling layer.
n_extra_convs_by_res_block: number of additional convolutional layers included in the main
path of upsampling residual blocks.
sn: whether to perform spectral normalization on convolutional layers.
bn_1st: if True, BN/IN layers are placed before the activation, so that the order would be
conv-BN-act, instead of conv-act-BN.
upblock_cls: class of the upsampling residual blocks. It should be a subclass of ResBlockUp
or, at least, share the same __init__ signature.
hooks_by_sz (dict): dictionary with integers as keys and `Hook` as values. The tensor stored
in hook `hooks_by_sz[x]` will be concatenated with the output of the upsamling residual
block whose spatial size is `(x, x)`.
"""
cur_sz, cur_ftrs = 4, n_features//2
while cur_sz < out_sz: cur_sz *= 2; cur_ftrs *= 2
layers = [AddChannels(2),
ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, bn_1st=bn_1st, **kwargs)]
cur_sz = 4
while cur_sz < out_sz // 2:
hook = hooks_by_sz.get(cur_sz*2) if hooks_by_sz is not None else None
layers.append(upblock_cls(cur_ftrs, cur_ftrs//2, up_op, id_up_op,
n_extra_convs=n_extra_convs_by_res_block,
bn_1st=bn_1st, hook=hook, **kwargs))
cur_ftrs //= 2; cur_sz *= 2
layers += [ResBlock(1, cur_ftrs, cur_ftrs, bn_1st=bn_1st, **kwargs)
for _ in range(n_extra_res_blocks)]
layers += [up_op.get_layer(cur_ftrs, n_ch, norm_type=None, act_cls=None), nn.Tanh()]
generator = nn.Sequential(*layers)
if sn: add_sn(generator)
return generator
class NoiseSplitStrategy(ABC):
"Child classes must implement a method of obtaining 'n' segments of whatever size from a given tensor."
@abstractmethod
def calc_cond_sz(self, noise_sz, n_splits):
pass
@abstractmethod
def split_noise(self, noise, n_splits):
pass
class NoiseSplitEqualLeave1stOutStrategy(NoiseSplitStrategy):
"Splits a noise vector into chunks of equal size and discards the first one."
def calc_cond_sz(self, noise_sz, n_splits):
# Divide by `n_splits+1` to next leave first chunk out of conditions
return noise_sz // (n_splits + 1)
def split_noise(self, noise, n_splits):
noise_sz = noise.shape[1]
cond_sz = self.calc_cond_sz(noise_sz, n_splits)
return noise.split(cond_sz, 1)[1:]
class NoiseSplitDontSplitStrategy(NoiseSplitStrategy):
"Doesn't split the noise vector; i.e., returns the whole vector for every required split."
def calc_cond_sz(self, noise_sz, n_splits):
return noise_sz
def split_noise(self, noise, n_splits):
return [noise] * n_splits
class CondResGenerator(nn.Module):
"""Residual generator with conditional BN/IN layers.
The input tensor is split, according to `noise_split_strategy`, in as many
chunks as residual upsampling blocks there are, so that each chunk is fed
into a block to condition its CBN/CIN layers.
Args:
out_sz: size of each one of the two spatial dimensions of the output (squared
images are assumed).
n_ch: number of channels of the output images.
up_op: upsampling operation to include everywhere but in the identity connections of the
upsampling residual blocks.
id_up_op: upsampling operation used in the identity connections of the upsampling residual
blocks.
noise_split_strategy: defines how to divide the input into chunks, so that each one can
be used as the conditional input of a different upsampling residual block.
in_sz: size of the input tensor, batch size excluded.
n_features: number of input features of the last upsampling layer.
n_extra_res_blocks: number of additional residual blocks included right before the last
upsampling layer.
n_extra_convs_by_res_block: number of additional convolutional layers included in the main
path of upsampling residual blocks.
sn: whether to perform spectral normalization on convolutional layers.
bn_1st: if True, BN/IN layers are placed before the activations, so that the order would be
conv-BN-act, instead of conv-act-BN.
upblock_cls: class of the conditional upsampling residual blocks. It should be a subclass of
CondResBlockUp or, at least, share the same __init__ signature.
"""
init_sz = 4
def __init__(self, out_sz:int, n_ch:int, up_op:UpsamplingOperation2d, id_up_op:UpsamplingOperation2d,
noise_split_strategy:NoiseSplitStrategy, in_sz=100, n_features=64,
n_extra_res_blocks=1, n_extra_convs_by_res_block=1, sn=True, bn_1st=True,
upblock_cls=CondResBlockUp, **kwargs):
super().__init__()
self.noise_split_strategy = noise_split_strategy
cur_sz, cur_ftrs = self.init_sz, n_features//2
while cur_sz < out_sz: cur_sz *= 2; cur_ftrs *= 2
self.initial_layers = nn.Sequential(
AddChannels(2),
ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, bn_1st=bn_1st, **kwargs))
cur_sz = self.init_sz
n_splits = self.calc_n_upblocks(out_sz)
self.cond_sz = noise_split_strategy.calc_cond_sz(in_sz, n_splits)
self.up_layers = nn.ModuleList([])
while cur_sz < out_sz // 2:
self.up_layers.append(upblock_cls(cur_ftrs, cur_ftrs//2, self.cond_sz,
up_op, id_up_op, bn_1st=bn_1st,
n_extra_convs=n_extra_convs_by_res_block,
**kwargs))
cur_ftrs //= 2; cur_sz *= 2
self.extra_blocks = nn.Sequential(
*[ResBlock(1, cur_ftrs, cur_ftrs, bn_1st=bn_1st, **kwargs)
for _ in range(n_extra_res_blocks)])
self.final_conv = up_op.get_layer(cur_ftrs, n_ch, norm_type=None, act_cls=None)
self.act = nn.Tanh()
if sn:
add_sn(self.initial_layers)
for up_l in self.up_layers: add_sn(up_l)
add_sn(self.extra_blocks)
add_sn(self.final_conv)
@classmethod
def calc_n_upblocks(cls, out_sz):
return int(math.log2(out_sz // (2 * cls.init_sz)))
def forward(self, z):
n_splits = len(self.up_layers)
z_splits = self.noise_split_strategy.split_noise(z, n_splits)
x = self.initial_layers(z)
for zi, up_layer in zip(z_splits, self.up_layers):
x = up_layer(x, zi)
x = self.extra_blocks(x)
return self.act(self.final_conv(x, None))
class SkipGenerator(nn.Module):
"Residual generator with skip connections that follow StyleGAN structure."
def __init__(self, out_sz, n_ch, up_op:UpsamplingOperation2d, id_up_op:UpsamplingOperation2d,
in_sz=100, n_features=64, n_extra_res_blocks=1, n_extra_convs_by_res_block=1,
sn=True, bn_1st=True, upblock_cls=ResBlockUp, upsample_skips_mode='nearest',
skip2rgb_ks=3, skip_act_cls=nn.Tanh, **kwargs):
super().__init__()
cur_sz, cur_ftrs = 4, n_features//2
while cur_sz < out_sz: cur_sz *= 2; cur_ftrs *= 2
self.initial_layers = nn.Sequential(
AddChannels(2),
ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, bn_1st=bn_1st, **kwargs))
cur_sz = 4
self.up_layers = nn.ModuleList([])
self.skips_torgb = nn.ModuleList([])
self.upsample_skips_mode = upsample_skips_mode
while cur_sz < out_sz // 2:
self.up_layers.append(upblock_cls(cur_ftrs, cur_ftrs//2, up_op, id_up_op,
n_extra_convs=n_extra_convs_by_res_block,
bn_1st=bn_1st, **kwargs))
self.skips_torgb.append(ConvLayer(cur_ftrs//2, n_ch, ks=skip2rgb_ks, norm_type=None,
act_cls=skip_act_cls, bias=False))
cur_ftrs //= 2; cur_sz *= 2
self.extra_blocks = nn.Sequential(
*[ResBlock(1, cur_ftrs, cur_ftrs, bn_1st=bn_1st, **kwargs)
for _ in range(n_extra_res_blocks)])
self.last_up = up_op.get_layer(cur_ftrs, n_ch, norm_type=None, act_cls=None)
self.act = nn.Tanh()
if sn:
add_sn(nn.Sequential(self.initial_layers, self.up_layers, self.skips_torgb,
self.extra_blocks, self.last_up))
def forward(self, x):
x = self.initial_layers(x)
out = None
for up_layer, skip_torgb in zip(self.up_layers, self.skips_torgb):
x = up_layer(x)
skip_x = skip_torgb(x)
out = skip_x if out is None else out + skip_x
out = F.interpolate(out, scale_factor=2, mode=self.upsample_skips_mode)
x = self.extra_blocks(x)
x = self.last_up(x)
return self.act(out + x)
class CycleGenerator(nn.Module):
"""Double generator wrapper, suitable for bidirectional image to image translation"""
def __init__(self, g_a2b, g_b2a):
super().__init__()
self.g_a2b = g_a2b
self.g_b2a = g_b2a
def forward(self, x_a, x_b):
out_b = self.g_a2b(x_a)
out_a = self.g_b2a(x_b)
return out_b, out_a
def res_critic(in_size, n_channels, down_op:DownsamplingOperation2d, id_down_op:DownsamplingOperation2d,
n_features=64, n_extra_res_blocks=1, norm_type=NormType.Batch, n_extra_convs_by_res_block=0,
sn=True, bn_1st=True, downblock_cls=ResBlockDown, flatten_full=False,
include_minibatch_std=False, **kwargs):
"A residual critic for images `n_channels` x `in_size` x `in_size`."
layers = [down_op.get_layer(n_channels, n_features, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ResBlock(1, cur_ftrs, cur_ftrs, norm_type=norm_type, bn_1st=bn_1st, **kwargs)
for _ in range(n_extra_res_blocks)]
while cur_size > 4:
layers.append(downblock_cls(cur_ftrs, cur_ftrs*2, down_op, id_down_op,
n_extra_convs=n_extra_convs_by_res_block,
norm_type=norm_type, bn_1st=bn_1st, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
if include_minibatch_std:
# it may not make sense when using BN, although it, unlike BN, calculates a different
# stdev for any spatial position.
layers.append(MiniBatchStdDev())
cur_ftrs += 1
layers += [init_default(nn.Conv2d(cur_ftrs, | |
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AddLiveDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.AddLiveDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveCallbackTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveCallbackTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveCallbackTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveDomainPlayInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveDomainPlayInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveDomainPlayInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCallbackRecordsList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCallbackRecordsListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeCallbackRecordsList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveRecordRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveRecordRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveRecordRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLivePullStreamTask(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLivePullStreamTaskRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLivePullStreamTask(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveTranscodeTemplates(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveTranscodeTemplatesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveTranscodeTemplates(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveRecordTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveRecordTemplateRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveRecordTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBillBandwidthAndFluxList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBillBandwidthAndFluxListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeBillBandwidthAndFluxList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doForbidLiveDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ForbidLiveDomainRequest()
model.from_json_string(json.dumps(args))
rsp = client.ForbidLiveDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveTranscodeRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveTranscodeRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveTranscodeRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveWatermarkRules(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLiveWatermarkRulesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLiveWatermarkRules(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLiveRecord(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLiveRecordRequest()
model.from_json_string(json.dumps(args))
rsp = client.DeleteLiveRecord(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLiveSnapshotRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLiveSnapshotRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.CreateLiveSnapshotRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeGroupProIspPlayInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeGroupProIspPlayInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeGroupProIspPlayInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAllStreamPlayInfoList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAllStreamPlayInfoListRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeAllStreamPlayInfoList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLivePlayAuthKey(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLivePlayAuthKeyRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeLivePlayAuthKey(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLiveStreamState(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.LiveClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model | |
ics[0], ics[1+i])
self.eval(ic)
return self('desolve(%s, %s(%s))'%(de, vars[1], vars[0]))
def solve_linear(self, eqns,vars):
"""
Wraps maxima's linsolve.
INPUT:
- ``eqns`` - a list of m strings; each representing a linear
question in m = n variables
- ``vars`` - a list of n strings; each
representing a variable
EXAMPLES::
sage: eqns = ["x + z = y","2*a*x - y = 2*a^2","y - 2*z = 2"]
sage: vars = ["x","y","z"]
sage: maxima.solve_linear(eqns, vars)
[x=a+1,y=2*a,z=a-1]
"""
eqs = "["
for i in range(len(eqns)):
if i<len(eqns)-1:
eqs = eqs + eqns[i]+","
if i==len(eqns)-1:
eqs = eqs + eqns[i]+"]"
vrs = "["
for i in range(len(vars)):
if i<len(vars)-1:
vrs = vrs + vars[i]+","
if i==len(vars)-1:
vrs = vrs + vars[i]+"]"
return self('linsolve(%s, %s)'%(eqs, vrs))
def unit_quadratic_integer(self, n):
r"""
Finds a unit of the ring of integers of the quadratic number field
`\QQ(\sqrt{n})`, `n>1`, using the qunit maxima command.
INPUT:
- ``n`` - an integer
EXAMPLES::
sage: u = maxima.unit_quadratic_integer(101); u
a + 10
sage: u.parent()
Number Field in a with defining polynomial x^2 - 101
sage: u = maxima.unit_quadratic_integer(13)
sage: u
5*a + 18
sage: u.parent()
Number Field in a with defining polynomial x^2 - 13
"""
from sage.rings.all import Integer
from sage.rings.number_field.number_field import QuadraticField
# Take square-free part so sqrt(n) doesn't get simplified
# further by maxima
# (The original version of this function would yield wrong answers if
# n is not squarefree.)
n = Integer(n).squarefree_part()
if n < 1:
raise ValueError("n (=%s) must be >= 1" % n)
s = repr(self('qunit(%s)' % n)).lower()
r = re.compile('sqrt\(.*\)')
a = QuadraticField(n, 'a').gen()
s = r.sub('a', s)
return eval(s)
def plot_list(self, ptsx, ptsy, options=None):
r"""
Plots a curve determined by a sequence of points.
INPUT:
- ``ptsx`` - [x1,...,xn], where the xi and yi are
real,
- ``ptsy`` - [y1,...,yn]
- ``options`` - a string representing maxima plot2d
options.
The points are (x1,y1), (x2,y2), etc.
This function requires maxima 5.9.2 or newer.
.. note::
More that 150 points can sometimes lead to the program
hanging. Why?
EXAMPLES::
sage: zeta_ptsx = [ (pari(1/2 + i*I/10).zeta().real()).precision(1) for i in range (70,150)]
sage: zeta_ptsy = [ (pari(1/2 + i*I/10).zeta().imag()).precision(1) for i in range (70,150)]
sage: maxima.plot_list(zeta_ptsx, zeta_ptsy) # not tested
sage: opts='[gnuplot_preamble, "set nokey"], [gnuplot_term, ps], [gnuplot_out_file, "zeta.eps"]'
sage: maxima.plot_list(zeta_ptsx, zeta_ptsy, opts) # not tested
"""
cmd = 'plot2d([discrete,%s, %s]'%(ptsx, ptsy)
if options is None:
cmd += ')'
else:
cmd += ', %s)'%options
self(cmd)
def plot_multilist(self, pts_list, options=None):
r"""
Plots a list of list of points pts_list=[pts1,pts2,...,ptsn],
where each ptsi is of the form [[x1,y1],...,[xn,yn]] x's must be
integers and y's reals options is a string representing maxima
plot2d options.
INPUT:
- ``pts_lst`` - list of points; each point must be of the form [x,y]
where ``x`` is an integer and ``y`` is a real
- ``var`` - string; representing Maxima's plot2d options
Requires maxima 5.9.2 at least.
.. note::
More that 150 points can sometimes lead to the program
hanging.
EXAMPLES::
sage: xx = [ i/10.0 for i in range (-10,10)]
sage: yy = [ i/10.0 for i in range (-10,10)]
sage: x0 = [ 0 for i in range (-10,10)]
sage: y0 = [ 0 for i in range (-10,10)]
sage: zeta_ptsx1 = [ (pari(1/2+i*I/10).zeta().real()).precision(1) for i in range (10)]
sage: zeta_ptsy1 = [ (pari(1/2+i*I/10).zeta().imag()).precision(1) for i in range (10)]
sage: maxima.plot_multilist([[zeta_ptsx1,zeta_ptsy1],[xx,y0],[x0,yy]]) # not tested
sage: zeta_ptsx1 = [ (pari(1/2+i*I/10).zeta().real()).precision(1) for i in range (10,150)]
sage: zeta_ptsy1 = [ (pari(1/2+i*I/10).zeta().imag()).precision(1) for i in range (10,150)]
sage: maxima.plot_multilist([[zeta_ptsx1,zeta_ptsy1],[xx,y0],[x0,yy]]) # not tested
sage: opts='[gnuplot_preamble, "set nokey"]'
sage: maxima.plot_multilist([[zeta_ptsx1,zeta_ptsy1],[xx,y0],[x0,yy]],opts) # not tested
"""
n = len(pts_list)
cmd = '['
for i in range(n):
if i < n-1:
cmd = cmd+'[discrete,'+str(pts_list[i][0])+','+str(pts_list[i][1])+'],'
if i==n-1:
cmd = cmd+'[discrete,'+str(pts_list[i][0])+','+str(pts_list[i][1])+']]'
#print cmd
if options is None:
self('plot2d('+cmd+')')
else:
self('plot2d('+cmd+','+options+')')
class MaximaAbstractElement(InterfaceElement):
r"""
Element of Maxima through an abstract interface.
EXAMPLES:
Elements of this class should not be created directly.
The targeted parent of a concrete inherited class should be used instead::
sage: from sage.interfaces.maxima_lib import maxima_lib
sage: xp = maxima(x)
sage: type(xp)
<class 'sage.interfaces.maxima.MaximaElement'>
sage: xl = maxima_lib(x)
sage: type(xl)
<class 'sage.interfaces.maxima_lib.MaximaLibElement'>
"""
def __str__(self):
"""
Printing an object explicitly gives ASCII art.
INPUT: none
OUTPUT: string
EXAMPLES::
sage: f = maxima('1/(x-1)^3'); f
1/(x-1)^3
sage: print f
1
--------
3
(x - 1)
"""
return self.display2d(onscreen=False)
def bool(self):
"""
Convert ``self`` into a boolean.
INPUT: none
OUTPUT: boolean
EXAMPLES::
sage: maxima(0).bool()
False
sage: maxima(1).bool()
True
"""
P = self._check_valid()
return P.eval('is(%s = 0);'%self.name()) == P._false_symbol() # but be careful, since for relations things like is(equal(a,b)) are what Maxima needs
def __cmp__(self, other):
"""
Compare this Maxima object with ``other``.
INPUT:
- ``other`` - an object to compare to
OUTPUT: integer
EXAMPLES::
sage: a = maxima(1); b = maxima(2)
sage: a == b
False
sage: a < b
True
sage: a > b
False
sage: b < a
False
sage: b > a
True
We can also compare more complicated object such as functions::
sage: f = maxima('sin(x)'); g = maxima('cos(x)')
sage: -f == g.diff('x')
True
"""
# thanks to <NAME> for telling me about using "is".
# but be careful, since for relations things like is(equal(a,b))
# are what Maxima needs
P = self.parent()
try:
if P.eval("is (%s < %s)"%(self.name(), other.name())) == P._true_symbol():
return -1
elif P.eval("is (%s > %s)"%(self.name(), other.name())) == P._true_symbol():
return 1
elif P.eval("is (%s = %s)"%(self.name(), other.name())) == P._true_symbol():
return 0
except TypeError:
pass
return cmp(repr(self),repr(other))
# everything is supposed to be comparable in Python,
# so we define the comparison thus when no comparable
# in interfaced system.
def _sage_(self):
"""
Attempt to make a native Sage object out of this Maxima object.
This is useful for automatic coercions in addition to other
things.
INPUT: none
OUTPUT: Sage object
EXAMPLES::
sage: a = maxima('sqrt(2) + 2.5'); a
sqrt(2)+2.5
sage: b = a._sage_(); b
sqrt(2) + 2.5
sage: type(b)
<type 'sage.symbolic.expression.Expression'>
We illustrate an automatic coercion::
sage: c = b + sqrt(3); c
sqrt(3) + sqrt(2) + 2.5
sage: type(c)
<type 'sage.symbolic.expression.Expression'>
sage: d = sqrt(3) + b; d
sqrt(3) + sqrt(2) + 2.5
sage: type(d)
<type 'sage.symbolic.expression.Expression'>
sage: a = sage.calculus.calculus.maxima('x^(sqrt(y)+%pi) + sin(%e + %pi)')
sage: a._sage_()
x^(pi + sqrt(y)) - sin(e)
sage: var('x, y')
(x, y)
sage: v = sage.calculus.calculus.maxima.vandermonde_matrix([x, y, 1/2])
sage: v._sage_()
[ 1 x x^2]
[ 1 y y^2]
[ 1 1/2 1/4]
Check if :trac:`7661` is fixed::
sage: var('delta')
delta
sage: (2*delta).simplify()
2*delta
"""
import sage.calculus.calculus as calculus
return calculus.symbolic_expression_from_maxima_string(self.name(),
maxima=self.parent())
def _symbolic_(self, R):
"""
Return a symbolic expression equivalent to this Maxima object.
INPUT:
- ``R`` - symbolic ring to convert into
OUTPUT: symbolic expression
EXAMPLES::
sage: t = sqrt(2)._maxima_()
sage: u = t._symbolic_(SR); u
sqrt(2)
sage: u.parent()
Symbolic Ring
This is used when converting Maxima objects to the Symbolic Ring::
sage: SR(t)
sqrt(2)
"""
return R(self._sage_())
def __complex__(self):
"""
Return a complex number equivalent to this Maxima object.
INPUT: none
OUTPUT: complex
EXAMPLES::
sage: complex(maxima('sqrt(-2)+1'))
(1+1.4142135623730951j)
"""
return complex(self._sage_())
def _complex_mpfr_field_(self, C):
"""
Return a mpfr complex number equivalent to this Maxima object.
INPUT:
- ``C`` - complex numbers field to convert into
OUTPUT: complex
EXAMPLES::
sage: CC(maxima('1+%i'))
1.00000000000000 + 1.00000000000000*I
sage: CC(maxima('2342.23482943872+234*%i'))
2342.23482943872 + 234.000000000000*I
sage: ComplexField(10)(maxima('2342.23482943872+234*%i'))
2300. + 230.*I
sage: ComplexField(200)(maxima('1+%i'))
1.0000000000000000000000000000000000000000000000000000000000 + 1.0000000000000000000000000000000000000000000000000000000000*I
sage: ComplexField(200)(maxima('sqrt(-2)'))
1.4142135623730950488016887242096980785696718753769480731767*I
sage: N(sqrt(-2), 200)
8.0751148893563733350506651837615871941533119425962889089783e-62 + 1.4142135623730950488016887242096980785696718753769480731767*I
"""
return C(self._sage_())
def _mpfr_(self, R):
"""
Return a mpfr real number equivalent to this Maxima object.
INPUT:
- ``R`` - real numbers field to convert into
OUTPUT: real
EXAMPLES::
sage: RealField(100)(maxima('sqrt(2)+1'))
2.4142135623730950488016887242
"""
return R(self._sage_())
def _complex_double_(self, C):
"""
Return a double precision complex number equivalent to this Maxima object.
INPUT:
- ``C`` - double precision complex numbers field to convert into
OUTPUT: complex
EXAMPLES::
sage: CDF(maxima('sqrt(2)+1'))
2.414213562373095
"""
return C(self._sage_())
def _real_double_(self, R):
"""
Return a double precision real number equivalent to this Maxima object.
INPUT:
- | |
= lambda n, N, M, x: 1. - scipy.stats.hypergeom.cdf(x-1, M, n, N),
HypothesisFunction = lambda data, SignificanceLevel: BenjaminiHochbergFDR(data, SignificanceLevel=SignificanceLevel)["Results"],
FilterSignificant = True, KEGGDictionaryVariable = None, KEGGDictionaryOptions = {}, MultipleListCorrection = None, MultipleList = False,
GeneDictionary = None, Species = "human", MolecularSpecies = "compound", NonUCSC = False, PyIOmicaDataDirectory = None):
"""Calculate input data over-representation analysis for KEGG: Kyoto Encyclopedia of Genes and Genomes pathways.
Input can be a list, a dictionary of lists or a clustering object.
Parameters:
data: pandas.DetaFrame or list
Data to analyze
AnalysisType: str, Default "Genomic"
Analysis methods that may be used, "Genomic", "Molecular" or "All"
GetGeneDictionaryOptions: dictionary, Default {}
A list of options that will be passed to this internal GetGeneDictionary function
AugmentDictionary: boolean, Default True
A choice whether or not to augment the current ConstantGeneDictionary global variable or create a new one
InputID: list, Default ["UniProt ID", "Gene Symbol"]
The kind of identifiers/accessions used as input
OutputID: str, Default "KEGG Gene ID"
A string value that specifies what kind of IDs/accessions to convert the input IDs/accession
numbers in the function's analysis
MolecularInputID: list, Default ["cpd"]
A string list to indicate the kind of ID to use for the input molecule entries
MolecularOutputID: str, Default "cpd"
A string list to indicate the kind of ID to use for the input molecule entries
KEGGAnalysisAssignerOptions: dictionary, Default {}
A list of options that will be passed to this internal KEGGAnalysisAssigner function
BackgroundSet: list, Default []
A list of IDs (e.g. gene accessions) that should be considered as the background for the calculation
KEGGOrganism: str, Default "hsa"
Indicates which organism (org) to use for \"Genomic\" type of analysis (default is human analysis: org=\"hsa\")
KEGGMolecular: str, Default "cpd"
Which database to use for molecular analysis (default is the compound database: cpd)
KEGGDatabase: str, Default "pathway"
KEGG database to use as the target database
PathwayLengthFilter: int, Default 2
Pathways to consider in the computation, by excluding pathways that have fewer items
compared to the PathwayLengthFilter value
ReportFilter: int, Default 1
Provides a cutoff for membership in ontologies/pathways/groups in selecting which terms/categories
to return. It is typically used in conjunction with ReportFilterFunction
ReportFilterFunction: function, Default np.greater_equal
Operator form will be used to compare against ReportFilter option value in selecting
which terms/categories to return
pValueCutoff: float, Default 0.05
A cutoff p-value for (adjusted) p-values to assess statistical significance
TestFunction: function, Default lambda n, N, M, x: 1. - scipy.stats.hypergeom.cdf(x-1, M, n, N)
A function used to calculate p-values
HypothesisFunction: function, Default lambda data, SignificanceLevel: BenjaminiHochbergFDR(data, SignificanceLevel=SignificanceLevel)["Results"]
Allows the choice of function for implementing multiple hypothesis testing considerations
FilterSignificant: boolean, Default True
Can be set to True to filter data based on whether the analysis result is statistically significant,
or if set to False to return all membership computations
KEGGDictionaryVariable: str, Default None
KEGG dictionary, and provides a KEGG annotation variable. If set to None, KEGGDictionary
will be used internally to automatically generate the default KEGG annotation
KEGGDictionaryOptions: dictionary, Default {}
A list of options to be passed to the internal KEGGDictionary function that provides the KEGG annotations
MultipleListCorrection: boolean, Default None
Specifies whether or not to correct for multi-omics analysis.
The choices are None, Automatic, or a custom number
MultipleList: boolean, Default False
Whether the input accessions list constituted a multi-omics list input that is annotated so
GeneDictionary: str, Default None
Existing variable to use as a gene dictionary in annotations. If set to None the default ConstantGeneDictionary will be used
Species: str, Default "human"
The species considered in the calculation, by default corresponding to human
MolecularSpecies: str, Default "compound"
The kind of molecular input
NonUCSC: , Default
If UCSC browser was used in determining an internal GeneDictionary used in ID translations,
where the KEGG identifiers for genes are number strings (e.g. 4790).The NonUCSC option can be set to True
if standard KEGG accessions are used in a user provided GeneDictionary variable,
in the form OptionValue[KEGGOrganism] <>:<>numberString, e.g. hsa:4790
PyIOmicaDataDirectory: str, Default None
Directory where the default package data is stored
Returns:
dictionary
Enrichment dictionary
Usage:
keggExample1 = KEGGAnalysis(["TAB1", "TNFSF13B", "MALT1", "TIRAP", "CHUK", "TNFRSF13C", "PARP1", "CSNK2A1", "CSNK2A2", "CSNK2B", "LTBR", "LYN", "MYD88",
"GADD45B", "ATM", "NFKB1", "NFKB2", "NFKBIA", "IRAK4", "PIAS4", "PLAU", "POLR3B", "NME1", "CTPS1", "POLR3A"])
"""
argsLocal = locals().copy()
global ConstantPyIOmicaDataDirectory
obtainConstantGeneDictionary(None, {}, True)
PyIOmicaDataDirectory = ConstantPyIOmicaDataDirectory if PyIOmicaDataDirectory==None else PyIOmicaDataDirectory
#Gene Identifier based analysis
if AnalysisType=="Genomic":
#Obtain OBO dictionary. If externally defined use user definition for OBODict Var
keggDict = KEGGDictionary(**KEGGDictionaryOptions) if KEGGDictionaryVariable==None else KEGGDictionaryVariable
#Obtain gene dictionary - if it exists can either augment with new information or Species or create new, if not exist then create variable
obtainConstantGeneDictionary(GeneDictionary, GetGeneDictionaryOptions, AugmentDictionary)
#get the right KEGG terms for the BackgroundSet requested and correct Species
Assignment = KEGGAnalysisAssigner(BackgroundSet=BackgroundSet, KEGGQuery1=KEGGDatabase, KEGGQuery2=KEGGOrganism, LengthFilter=PathwayLengthFilter) if KEGGAnalysisAssignerOptions=={} else KEGGAnalysisAssigner(**KEGGAnalysisAssignerOptions)
#Molecular based analysis
elif AnalysisType=="Molecular":
InputID = MolecularInputID
OutputID = MolecularOutputID
Species = MolecularSpecies
NonUCSC = True
KEGGOrganism = KEGGMolecular
MultipleListCorrection = None
keggDict = KEGGDictionary(**({"KEGGQuery1": "pathway", "KEGGQuery2": ""} if KEGGDictionaryOptions=={} else KEGGDictionaryOptions)) if KEGGDictionaryVariable==None else KEGGDictionaryVariable
#Obtain gene dictionary - if it exists can either augment with new information or Species or create new, if not exist then create variable
fileMolDict = os.path.join(PyIOmicaDataDirectory, "PyIOmicaMolecularDictionary.json.gz")
if os.path.isfile(fileMolDict):
GeneDictionary = dataStorage.read(fileMolDict, jsonFormat=True)[1]
else:
fileCSV = os.path.join(PackageDirectory, "data", "MathIOmicaMolecularDictionary.csv")
print('Attempting to read:', fileCSV)
if os.path.isfile(fileCSV):
with open(fileCSV, 'r') as tempFile:
tempLines = tempFile.readlines()
tempData = np.array([line.strip('\n').replace('"', '').split(',') for line in tempLines]).T
tempData = {'compound': {'pumchem': tempData[0].tolist(), 'cpd': tempData[1].tolist()}}
dataStorage.write((datetime.datetime.now().isoformat(), tempData), fileMolDict, jsonFormat=True)
else:
print("Could not find annotation file at " + fileMolDict + " Please either obtain an annotation file from mathiomica.org or provide a GeneDictionary option variable.")
return
GeneDictionary = dataStorage.read(fileMolDict, jsonFormat=True)[1]
obtainConstantGeneDictionary(GeneDictionary, {}, AugmentDictionary)
#Get the right KEGG terms for the BackgroundSet requested and correct Species
#If no specific options for function use BackgroundSet, Species request, length request
Assignment = KEGGAnalysisAssigner(BackgroundSet=BackgroundSet, KEGGQuery1=KEGGDatabase, KEGGQuery2=KEGGOrganism , LengthFilter=PathwayLengthFilter) if KEGGAnalysisAssignerOptions=={} else KEGGAnalysisAssigner(**KEGGAnalysisAssignerOptions)
#Gene Identifier and Molecular based analysis done concurrently
elif AnalysisType=='All':
argsMolecular = argsLocal.copy()
argsMolecular['AnalysisType'] = 'Molecular'
argsGenomic = argsLocal.copy()
argsGenomic['AnalysisType'] = 'Genomic'
return {"Molecular": KEGGAnalysis(**argsMolecular), "Genomic": KEGGAnalysis(**argsGenomic)}
#Abort
else:
print("AnalysisType %s is not a valid choice."%AnalysisType)
return
listToggle = False
#If the input is simply a list
if type(data) is list:
data = {'dummy': data}
listToggle = True
#The data may be a subgroup from a clustering object, i.e. a pd.DataFrame
if type(data) is pd.DataFrame:
id = list(data.index.get_level_values('id'))
source = list(data.index.get_level_values('source'))
data = [[id[i], source[i]] for i in range(len(data))]
data = {'dummy': data}
listToggle = True
returning = {}
#Check if a clustering object
if "linkage" in data.keys():
if MultipleListCorrection==None:
multiCorr = 1
elif MultipleListCorrection=='Automatic':
multiCorr = 1
for keyGroup in sorted([item for item in list(data.keys()) if not item=='linkage']):
for keySubGroup in sorted([item for item in list(data[keyGroup].keys()) if not item=='linkage']):
multiCorr = max(max(np.unique(data[keyGroup][keySubGroup]['data'].index.get_level_values('id'), return_counts=True)[1]), multiCorr)
else:
multiCorr = MultipleListCorrection
#Loop through the clustering object, calculate GO for each SubGroup
for keyGroup in sorted([item for item in list(data.keys()) if not item=='linkage']):
returning[keyGroup] = {}
for keySubGroup in sorted([item for item in list(data[keyGroup].keys()) if not item=='linkage']):
SubGroupMultiIndex = data[keyGroup][keySubGroup]['data'].index
SubGroupGenes = list(SubGroupMultiIndex.get_level_values('id'))
SubGroupMeta = list(SubGroupMultiIndex.get_level_values('source'))
SubGroupList = [[SubGroupGenes[i], SubGroupMeta[i]] for i in range(len(SubGroupMultiIndex))]
returning[keyGroup][keySubGroup] = internalAnalysisFunction({keySubGroup:SubGroupList},
multiCorr, MultipleList, OutputID, InputID, Species, len(Assignment[KEGGOrganism]["IDToPath"]),
pValueCutoff, ReportFilterFunction, ReportFilter, TestFunction, HypothesisFunction, FilterSignificant,
AssignmentForwardDictionary=Assignment[KEGGOrganism]['IDToPath'],
AssignmentReverseDictionary=Assignment[KEGGOrganism]['PathToID'],
prefix='hsa:' if AnalysisType=='Genomic' else '', infoDict=keggDict)[keySubGroup]
#The data is a dictionary of type {'Name1': [data1], 'Name2': [data2], ...}
else:
for key in list(data.keys()):
if MultipleListCorrection==None:
multiCorr = 1
elif MultipleList and MultipleListCorrection=='Automatic':
multiCorr = max(np.unique([item[0] for item in data[key]], return_counts=True)[1])
else:
multiCorr = MultipleListCorrection
returning.update(internalAnalysisFunction({key:data[key]}, multiCorr, MultipleList, OutputID, InputID, Species, len(Assignment[KEGGOrganism]["IDToPath"]),
pValueCutoff, ReportFilterFunction, ReportFilter, TestFunction, HypothesisFunction, FilterSignificant,
AssignmentForwardDictionary=Assignment[KEGGOrganism]['IDToPath'],
AssignmentReverseDictionary=Assignment[KEGGOrganism]['PathToID'],
prefix='hsa:' if AnalysisType=='Genomic' else '', infoDict=keggDict))
#If a single list was provided
returning = returning['dummy'] if listToggle else returning
return returning
def MassMatcher(data, accuracy, MassDictionaryVariable = None, MolecularSpecies | |
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/dialogflow/v2beta1/agent.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.dialogflow.v2beta1 Agents API."""
import functools
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import google.api_core.protobuf_helpers
from dialogflow_v2beta1.gapic import agents_client_config
from dialogflow_v2beta1.gapic import enums
from dialogflow_v2beta1.proto import agent_pb2
from google.protobuf import empty_pb2
from google.protobuf import struct_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution('dialogflow').version
class AgentsClient(object):
"""
Manages conversational agents.
Refer to `agents documentation <https://dialogflow.com/docs/agents>`_ for
more details about agents.
Standard methods.
"""
SERVICE_ADDRESS = 'dialogflow.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary
_INTERFACE_NAME = ('google.cloud.dialogflow.v2beta1.Agents')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project, )
def __init__(self,
channel=None,
credentials=None,
client_config=agents_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. If specified, then the ``credentials``
argument is ignored.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict):
A dictionary of call options for each method. If not specified
the default configuration is used. Generally, you only need
to set this if you're developing your own client library.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
if channel is not None and credentials is not None:
raise ValueError(
'channel and credentials arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__))
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES)
self.agents_stub = (agent_pb2.AgentsStub(channel))
# Operations client for methods that return long-running operations
# futures.
self.operations_client = (
google.api_core.operations_v1.OperationsClient(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
interface_config = client_config['interfaces'][self._INTERFACE_NAME]
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
interface_config)
self._get_agent = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.GetAgent,
default_retry=method_configs['GetAgent'].retry,
default_timeout=method_configs['GetAgent'].timeout,
client_info=client_info)
self._search_agents = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.SearchAgents,
default_retry=method_configs['SearchAgents'].retry,
default_timeout=method_configs['SearchAgents'].timeout,
client_info=client_info)
self._train_agent = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.TrainAgent,
default_retry=method_configs['TrainAgent'].retry,
default_timeout=method_configs['TrainAgent'].timeout,
client_info=client_info)
self._export_agent = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.ExportAgent,
default_retry=method_configs['ExportAgent'].retry,
default_timeout=method_configs['ExportAgent'].timeout,
client_info=client_info)
self._import_agent = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.ImportAgent,
default_retry=method_configs['ImportAgent'].retry,
default_timeout=method_configs['ImportAgent'].timeout,
client_info=client_info)
self._restore_agent = google.api_core.gapic_v1.method.wrap_method(
self.agents_stub.RestoreAgent,
default_retry=method_configs['RestoreAgent'].retry,
default_timeout=method_configs['RestoreAgent'].timeout,
client_info=client_info)
# Service calls
def get_agent(self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT):
"""
Retrieves the specified agent.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.get_agent(parent)
Args:
parent (str): Required. The project that the agent to fetch is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
Returns:
A :class:`~dialogflow_v2beta1.types.Agent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
request = agent_pb2.GetAgentRequest(parent=parent)
return self._get_agent(request, retry=retry, timeout=timeout)
def search_agents(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT):
"""
Returns the list of agents.
Since there is at most one conversational agent per project, this method is
useful primarily for listing all agents across projects the caller has
access to. One can achieve that with a wildcard project collection id \"-\".
Refer to [List
Sub-Collections](https://cloud.google.com/apis/design/design_patterns#list_sub-collections).
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>>
>>> # Iterate over all results
>>> for element in client.search_agents(parent):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.search_agents(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The project to list agents from.
Format: ``projects/<Project ID or '-'>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~dialogflow_v2beta1.types.Agent` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
request = agent_pb2.SearchAgentsRequest(
parent=parent, page_size=page_size)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._search_agents, retry=retry, timeout=timeout),
request=request,
items_field='agents',
request_token_field='page_token',
response_token_field='next_page_token')
return iterator
def train_agent(self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT):
"""
Trains the specified agent.
Operation<response: google.protobuf.Empty,
metadata: google.protobuf.Struct>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.train_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to train is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
Returns:
A :class:`~dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
request = agent_pb2.TrainAgentRequest(parent=parent)
operation = self._train_agent(request, retry=retry, timeout=timeout)
return google.api_core.operation.from_gapic(
operation,
self.operations_client,
empty_pb2.Empty,
metadata_type=struct_pb2.Struct)
def export_agent(self,
parent,
agent_uri=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT):
"""
Exports the specified agent to a ZIP file.
Operation<response: ExportAgentResponse,
metadata: google.protobuf.Struct>
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.export_agent(parent)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. The project that the agent to export is associated with.
Format: ``projects/<Project ID>``.
agent_uri (str): Optional. The URI to export the agent to. Note: The URI must start with
\"gs://\". If left unspecified, the serialized agent is returned inline.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
Returns:
A :class:`~dialogflow_v2beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and | |
<reponame>0u812/roadrunner<gh_stars>1-10
#
# @file TestXMLToken_newSetters.py
# @brief XMLToken_newSetters unit tests
#
# @author <NAME> (Python conversion)
# @author <NAME>
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/xml/test/TestXMLToken_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestXMLToken_newSetters(unittest.TestCase):
def test_XMLToken_newSetters_addAttributes1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
xt2 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
i = token.addAttr( "name1", "val1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributesLength() == 1 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name1" != token.getAttrName(0) ) == False )
self.assert_( ( "val1" != token.getAttrValue(0) ) == False )
i = token.addAttr( "name2", "val2", "http://name1.org/", "p1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributesLength() == 2 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name2" != token.getAttrName(1) ) == False )
self.assert_( ( "val2" != token.getAttrValue(1) ) == False )
self.assert_( ( "http://name1.org/" != token.getAttrURI(1) ) == False )
self.assert_( ( "p1" != token.getAttrPrefix(1) ) == False )
i = token.addAttr(xt2, "val2")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributesLength() == 3 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name3" != token.getAttrName(2) ) == False )
self.assert_( ( "val2" != token.getAttrValue(2) ) == False )
self.assert_( ( "http://name3.org/" != token.getAttrURI(2) ) == False )
self.assert_( ( "p3" != token.getAttrPrefix(2) ) == False )
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_addAttributes2(self):
triple = libsbml.XMLTriple("test","","")
token = libsbml.XMLToken(triple)
xt2 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
i = token.addAttr( "name1", "val1")
self.assert_( i == libsbml.LIBSBML_INVALID_XML_OPERATION )
self.assert_( token.getAttributesLength() == 0 )
self.assert_( token.isAttributesEmpty() == True )
i = token.addAttr( "name2", "val2", "http://name1.org/", "p1")
self.assert_( i == libsbml.LIBSBML_INVALID_XML_OPERATION )
self.assert_( token.getAttributesLength() == 0 )
self.assert_( token.isAttributesEmpty() == True )
i = token.addAttr(xt2, "val2")
self.assert_( i == libsbml.LIBSBML_INVALID_XML_OPERATION )
self.assert_( token.getAttributesLength() == 0 )
self.assert_( token.isAttributesEmpty() == True )
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_addNamespaces1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
i = token.addNamespace( "http://test1.org/", "test1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getNamespacesLength() == 1 )
self.assert_( token.isNamespacesEmpty() == False )
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_addNamespaces2(self):
triple = libsbml.XMLTriple("test","","")
token = libsbml.XMLToken(triple)
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
i = token.addNamespace( "http://test1.org/", "test1")
self.assert_( i == libsbml.LIBSBML_INVALID_XML_OPERATION )
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_clearAttributes1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
nattr = libsbml.XMLAttributes()
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
nattr.add(xt1, "val1")
i = token.setAttributes(nattr)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.isAttributesEmpty() == False )
i = token.clearAttributes()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.isAttributesEmpty() == True )
_dummyList = [ nattr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_clearNamespaces1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
ns = libsbml.XMLNamespaces()
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
ns.add( "http://test1.org/", "test1")
i = token.setNamespaces(ns)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getNamespacesLength() == 1 )
self.assert_( token.isNamespacesEmpty() == False )
i = token.clearNamespaces()
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_removeAttributes1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
xt2 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
xt1 = libsbml.XMLTriple("name5", "http://name5.org/", "p5")
i = token.addAttr( "name1", "val1")
i = token.addAttr( "name2", "val2", "http://name1.org/", "p1")
i = token.addAttr(xt2, "val2")
i = token.addAttr( "name4", "val4")
self.assert_( token.getAttributes().getLength() == 4 )
i = token.removeAttr(7)
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
i = token.removeAttr( "name7")
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
i = token.removeAttr( "name7", "namespaces7")
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
i = token.removeAttr(xt1)
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
self.assert_( token.getAttributes().getLength() == 4 )
i = token.removeAttr(3)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributes().getLength() == 3 )
i = token.removeAttr( "name1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributes().getLength() == 2 )
i = token.removeAttr( "name2", "http://name1.org/")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributes().getLength() == 1 )
i = token.removeAttr(xt2)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getAttributes().getLength() == 0 )
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_removeNamespaces(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
token.addNamespace( "http://test1.org/", "test1")
self.assert_( token.getNamespacesLength() == 1 )
i = token.removeNamespace(4)
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
self.assert_( token.getNamespacesLength() == 1 )
i = token.removeNamespace(0)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getNamespacesLength() == 0 )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_removeNamespaces1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
token.addNamespace( "http://test1.org/", "test1")
self.assert_( token.getNamespacesLength() == 1 )
i = token.removeNamespace( "test2")
self.assert_( i == libsbml.LIBSBML_INDEX_EXCEEDS_SIZE )
self.assert_( token.getNamespacesLength() == 1 )
i = token.removeNamespace( "test1")
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.getNamespacesLength() == 0 )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_setAttributes1(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
nattr = libsbml.XMLAttributes()
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
nattr.add(xt1, "val1")
i = token.setAttributes(nattr)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( token.isAttributesEmpty() == False )
_dummyList = [ nattr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_newSetters_setAttributes2(self):
triple = libsbml.XMLTriple("test","","")
token = libsbml.XMLToken(triple)
nattr = libsbml.XMLAttributes()
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
nattr.add(xt1, "val1")
i = token.setAttributes(nattr)
self.assert_( i == libsbml.LIBSBML_INVALID_XML_OPERATION )
self.assert_( token.isAttributesEmpty() == True )
_dummyList | |
)
query_filter_string, query_params = self._build_query(
searchable_keywords=searchable_keywords,
fuzzymatching=fuzzymatching,
limit=limit,
offset=offset,
fields=fields,
search_filters=search_params
)
query_filter_string = re.sub(
r'StudyInstanceUID =',
'series.StudyInstanceUID =',
query_filter_string
)
if all_series:
query_string = ' '.join([
'SELECT * FROM series',
'INNER JOIN studies',
'ON series.StudyInstanceUID = studies.StudyInstanceUID',
query_filter_string
])
else:
includefields = [
'Modality',
'SeriesInstanceUID',
'SeriesNumber',
]
if fields is not None:
includefields += [
f
for f in fields
if f in {
'StudyInstanceUID',
'StudyID',
'StudyDate',
'StudyTime',
'PatientName',
'PatientID',
'PatientSex',
'PatientBirthDate',
}
]
includefields_string = ', '.join(includefields)
includefields_string = includefields_string.replace(
'StudyInstanceUID',
'studies.StudyInstanceUID'
)
query_string = ' '.join([
f'SELECT {includefields_string} FROM series',
'INNER JOIN studies',
'ON series.StudyInstanceUID = studies.StudyInstanceUID',
query_filter_string
])
self._cursor.execute(query_string, query_params)
results = self._cursor.fetchall()
collection = []
for row in results:
dataset = Dataset()
for key in row.keys():
if not key.startswith('_'):
setattr(dataset, key, row[key])
if all_series:
n_series_in_study = self._count_series_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.NumberOfStudyRelatedSeries = n_series_in_study
n_instances_in_study = self._count_instances_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.NumberOfStudyRelatedInstances = n_instances_in_study
modalities_in_study = self._get_modalities_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.ModalitiesInStudy = modalities_in_study
n_instances_in_series = self._count_instances_in_series(
series_instance_uid=dataset.SeriesInstanceUID,
)
dataset.NumberOfSeriesRelatedInstances = n_instances_in_series
collection.append(dataset.to_json_dict())
return collection
def search_for_instances(
self,
study_instance_uid: Optional[str] = None,
series_instance_uid: Optional[str] = None,
fuzzymatching: Optional[bool] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
fields: Optional[Sequence[str]] = None,
search_filters: Optional[Dict[str, Any]] = None,
get_remaining: bool = False
) -> List[Dict[str, dict]]:
"""Search for instances.
Parameters
----------
study_instance_uid: Union[str, None], optional
Study Instance UID
series_instance_uid: Union[str, None], optional
Series Instance UID
fuzzymatching: Union[bool, None], optional
Whether fuzzy semantic matching should be performed
limit: Union[int, None], optional
Maximum number of results that should be returned
offset: Union[int, None], optional
Number of results that should be skipped
fields: Union[Sequence[str], None], optional
Names of fields (attributes) that should be included in results
search_filters: Union[dict, None], optional
Search filter criteria as key-value pairs, where *key* is a keyword
or a tag of the attribute and *value* is the expected value that
should match
get_remaining: bool, optional
Whether remaining results should be included
Returns
-------
List[Dict[str, dict]]
Instances
(see `Instance Result Attributes <http://dicom.nema.org/medical/dicom/current/output/chtml/part18/sect_6.7.html#table_6.7.1-2b>`_)
Note
----
No additional `fields` are currently supported.
""" # noqa: E501
if search_filters is None:
search_params = {}
else:
search_params = dict(search_filters)
all_instances = True
study_instances = True
if study_instance_uid is None and series_instance_uid is None:
logger.info('search for instances')
else:
if study_instance_uid is None:
raise TypeError(
'Study Instance UID must be specified if '
'Series Instance UID is specified.'
)
if series_instance_uid is None:
all_instances = False
search_params['StudyInstanceUID'] = study_instance_uid
logger.info(
f'search for instances of study "{study_instance_uid}"'
)
else:
all_instances = False
study_instances = False
search_params['StudyInstanceUID'] = study_instance_uid
search_params['SeriesInstanceUID'] = series_instance_uid
logger.info(
f'search for instances of series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
searchable_keywords = list(self._attributes[_QueryResourceType.STUDIES])
searchable_keywords.extend(
self._attributes[_QueryResourceType.SERIES]
)
searchable_keywords.extend(
self._attributes[_QueryResourceType.INSTANCES]
)
query_filter_string, query_params = self._build_query(
searchable_keywords=searchable_keywords,
fuzzymatching=fuzzymatching,
limit=limit,
offset=offset,
fields=fields,
search_filters=search_params
)
query_filter_string = re.sub(
r'StudyInstanceUID =',
'instances.StudyInstanceUID =',
query_filter_string
)
query_filter_string = re.sub(
r'SeriesInstanceUID =',
'instances.SeriesInstanceUID =',
query_filter_string
)
if all_instances:
query_string = ' '.join([
'SELECT * FROM instances',
'INNER JOIN series',
'ON instances.SeriesInstanceUID = series.SeriesInstanceUID',
'INNER JOIN studies',
'ON instances.StudyInstanceUID = studies.StudyInstanceUID',
query_filter_string
])
else:
includefields = [
'SOPClassUID',
'SOPInstanceUID',
'InstanceNumber',
'Rows',
'Columns',
'BitsAllocated',
'NumberOfFrames',
'TransferSyntaxUID',
]
if study_instances:
includefields += [
'Modality',
'SeriesInstanceUID',
'SeriesNumber',
]
if fields is not None:
includefields += [
f
for f in fields
if f in {
'StudyInstanceUID',
'StudyID',
'StudyDate',
'StudyTime',
'PatientName',
'PatientID',
'PatientSex',
'PatientBirthDate',
}
]
else:
if fields is not None:
includefields += [
f
for f in fields
if f in {
'StudyInstanceUID',
'StudyID',
'StudyDate',
'StudyTime',
'PatientName',
'PatientID',
'PatientSex',
'PatientBirthDate',
'Modality',
'SeriesInstanceUID',
'SeriesNumber',
}
]
includefields_string = ', '.join(includefields)
includefields_string = includefields_string.replace(
'SeriesInstanceUID',
'series.SeriesInstanceUID'
)
includefields_string = includefields_string.replace(
'StudyInstanceUID',
'studies.StudyInstanceUID'
)
query_string = ' '.join([
f'SELECT {includefields_string} FROM instances',
'INNER JOIN series',
'ON instances.SeriesInstanceUID = series.SeriesInstanceUID',
'INNER JOIN studies',
'ON instances.StudyInstanceUID = studies.StudyInstanceUID',
query_filter_string
])
self._cursor.execute(query_string, query_params)
results = self._cursor.fetchall()
collection = []
for row in results:
dataset = Dataset()
for key in row.keys():
if not key.startswith('_'):
setattr(dataset, key, row[key])
if all_instances:
n_series_in_study = self._count_series_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.NumberOfStudyRelatedSeries = n_series_in_study
n_instances_in_study = self._count_instances_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.NumberOfStudyRelatedInstances = n_instances_in_study
modalities_in_study = self._get_modalities_in_study(
study_instance_uid=dataset.StudyInstanceUID
)
dataset.ModalitiesInStudy = modalities_in_study
if all_instances or study_instances:
n_instances_in_series = self._count_instances_in_series(
series_instance_uid=dataset.SeriesInstanceUID,
)
dataset.NumberOfSeriesRelatedInstances = n_instances_in_series
collection.append(dataset.to_json_dict())
return collection
def retrieve_bulkdata(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None,
byte_range: Optional[Tuple[int, int]] = None
) -> List[bytes]:
"""Retrieve bulk data at a given location.
Parameters
----------
url: str
Location of the bulk data
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Union[Tuple[int, int], None], optional
Start and end of byte range
Returns
-------
Iterator[bytes]
Bulk data items
Raises
------
IOError
When requested resource is not found at `url`
""" # noqa: E501
iterator = self.iter_bulkdata(
url=url,
media_types=media_types,
byte_range=byte_range
)
return list(iterator)
def iter_bulkdata(
self,
url: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None,
byte_range: Optional[Tuple[int, int]] = None
) -> Iterator[bytes]:
"""Iterate over bulk data items at a given location.
Parameters
----------
url: str
Location of the bulk data
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
byte_range: Union[Tuple[int, int], None], optional
Start and end of byte range
Returns
-------
Iterator[bytes]
Bulk data items
Raises
------
IOError
When requested resource is not found at `url`
""" # noqa: E501
# The retrieve_study_metadata, retrieve_series_metadata, and
# retrieve_instance_metadata methods currently include all bulkdata
# into metadata resources by value rather than by reference, i.e.,
# using the "InlineBinary" rather than the "BulkdataURI" key.
# Therefore, no valid URL should exist for any bulkdata at this point.
# If that behavior gets changed, i.e., if bulkdata gets included into
# metadata using "BulkdataURI", then the implementation of this method
# will need to change as well.
raise IOError(f'Resource does not exist: "{url}".')
def retrieve_study_metadata(
self,
study_instance_uid: str,
) -> List[Dict[str, dict]]:
"""Retrieve metadata of instances in a study.
Parameters
----------
study_instance_uid: str
Study Instance UID
Returns
-------
List[Dict[str, Any]]
Metadata of each instance in study
"""
logger.info(
'retrieve metadata of all instances '
f'of study "{study_instance_uid}"'
)
series_index = self._get_series(study_instance_uid)
collection = []
for series_instance_uid, study_instance_uid in series_index:
collection.extend(
self.retrieve_series_metadata(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
)
)
return collection
def iter_study(
self,
study_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None
) -> Iterator[Dataset]:
"""Iterate over all instances of a study.
Parameters
----------
study_instance_uid: str
Study Instance UID
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
Iterator[pydicom.dataset.Dataset]
Instances
""" # noqa: E501
logger.info(
f'iterate over all instances of study "{study_instance_uid}"'
)
series_index = self._get_series(study_instance_uid)
for study_instance_uid, series_instance_uid in series_index:
uids = self._get_instances(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
)
for study_instance_uid, series_instance_uid, sop_instance_uid in uids: # noqa
yield self.retrieve_instance(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
sop_instance_uid=sop_instance_uid,
media_types=media_types
)
def retrieve_study(
self,
study_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None
) -> List[Dataset]:
"""Retrieve all instances of a study.
Parameters
----------
study_instance_uid: str
Study Instance UID
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
Sequence[pydicom.dataset.Dataset]
Instances
""" # noqa: E501
logger.info(f'retrieve all instances of study "{study_instance_uid}"')
iterator = self.iter_study(
study_instance_uid=study_instance_uid,
media_types=media_types,
)
return list(iterator)
def iter_series(
self,
study_instance_uid: str,
series_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None
) -> Iterator[Dataset]:
"""Iterate over all instances of a series.
Parameters
----------
study_instance_uid: str
Study Instance UID
series_instance_uid: str
Series Instance UID
media_types: Union[Tuple[Union[str, Tuple[str, str]], ...], None], optional
Acceptable media types and optionally the UIDs of the
corresponding transfer syntaxes
Returns
-------
Iterator[pydicom.dataset.Dataset]
Instances
""" # noqa: E501
logger.info(
f'iterate over all instances of series "{series_instance_uid}" '
f'of study "{study_instance_uid}"'
)
instance_index = self._get_instances(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
)
for i in instance_index:
study_instance_uid, series_instance_uid, sop_instance_uid = i
yield self.retrieve_instance(
study_instance_uid=study_instance_uid,
series_instance_uid=series_instance_uid,
sop_instance_uid=sop_instance_uid,
media_types=media_types
)
def retrieve_series(
self,
study_instance_uid: str,
series_instance_uid: str,
media_types: Optional[Tuple[Union[str, Tuple[str, str]], ...]] = None
) -> | |
<reponame>Defense-Cyber-Crime-Center/ViperMonkey
"""
ViperMonkey - Strip useles lines from Visual Basic code.
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
#=== LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2018 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
# CHANGELOG:
# 2015-02-12 v0.01 PL: - first prototype
# 2015-2016 PL: - many changes
# 2016-10-06 v0.03 PL: - fixed vipermonkey.core import
# 2016-12-11 v0.04 PL: - fixed relative import for core package (issue #17)
# 2018-01-12 v0.05 KS: - lots of bug fixes and additions by <NAME> (PR #23)
# 2018-06-20 v0.06 PL: - fixed issue #28, import prettytable
# 2018-08-17 v0.07 KS: - lots of bug fixes and additions by <NAME> (PR #34)
# PL: - added ASCII art banner
#------------------------------------------------------------------------------
# TODO:
# TODO: detect subs/functions with same name (in different modules)
# TODO: can several projects call each other?
# TODO: Word XML with several projects?
# - cleanup main, use optionparser
# - option -e to extract and evaluate constant expressions
# - option -t to trace execution
# - option --entrypoint to specify the Sub name to use as entry point
# - use olevba to get all modules from a file
# Environ => VBA object
# vbCRLF, etc => Const (parse to string)
# py2vba: convert python string to VBA string, e.g. \" => "" (for olevba to scan expressions) - same thing for ints, etc?
#TODO: expr_int / expr_str
#TODO: eval(parent) => for statements to set local variables into parent functions/procedures + main VBA module
#TODO: __repr__ for printing
#TODO: Environ('str') => '%str%'
#TODO: determine the order of Auto subs for Word, Excel
# TODO later:
# - add VBS support (two modes?)
import sys
import re
from logger import log
import vba_context
from random import randint
def is_useless_dim(line):
"""
See if we can skip this Dim statement and still successfully emulate.
We only use Byte type information when emulating.
"""
# Is this dimming a variable with a type we want to save? Also
# keep Dim statements that set an initial value.
line = line.strip()
if (not line.startswith("Dim ")):
return False
r = (("Byte" not in line) and
("Long" not in line) and
("Integer" not in line) and
(":" not in line) and
("=" not in line) and
(not line.strip().endswith("_")))
# Does the variable name collide with a builtin VBA function name? If so,
# keep the Dim statement.
line = line.lower()
for builtin in vba_context.VBA_LIBRARY.keys():
if (builtin in line):
r = False
# Done.
return r
def is_interesting_call(line, external_funcs, local_funcs):
# Is this an interesting function call?
log_funcs = ["CreateProcessA", "CreateProcessW", ".run", "CreateObject",
"Open", "CreateMutex", "CreateRemoteThread", "InternetOpen",
".Open", "GetObject", "Create", ".Create", "Environ",
"CreateTextFile", ".CreateTextFile", "Eval", ".Eval", "Run",
"SetExpandedStringValue", "WinExec", "URLDownloadToFile", "Print",
"Split"]
log_funcs.extend(local_funcs)
for func in log_funcs:
if (func in line):
return True
# Are we calling an external function?
for ext_func_decl in external_funcs:
if (("Function" in ext_func_decl) and ("Lib" in ext_func_decl)):
start = ext_func_decl.index("Function") + len("Function")
end = ext_func_decl.index("Lib")
ext_func = ext_func_decl[start:end].strip()
if (ext_func in line):
return True
# Not a call we are tracking.
return False
def is_useless_call(line):
"""
See if the given line contains a useless do-nothing function call.
"""
# These are the functions that do nothing if they appear on a line by themselves.
# TODO: Add more functions as needed.
useless_funcs = set(["Cos", "Log", "Cos", "Exp", "Sin", "Tan", "DoEvents"])
# Is this an assignment line?
if ("=" in line):
return False
# Nothing is being assigned. See if a useless function is called and the
# return value is not used.
line = line.replace(" ", "")
called_func = line
if ("(" in line):
called_func = line[:line.index("(")]
called_func = called_func.strip()
for func in useless_funcs:
if (called_func == func):
return True
return False
def collapse_macro_if_blocks(vba_code):
"""
When emulating we only pick a single block from a #if statement. Speed up parsing
by picking the largest block and strip out the rest.
"""
# Pick out the largest #if blocks.
log.debug("Collapsing macro blocks...")
curr_blocks = None
curr_block = None
r = ""
for line in vba_code.split("\n"):
# Are we tracking an #if block?
strip_line = line.strip()
if (curr_blocks is None):
# Is this the start of an #if block?
if (strip_line.startswith("#If")):
# Yes, start tracking blocks.
log.debug("Start block " + strip_line)
curr_blocks = []
curr_block = []
r += "' STRIPPED LINE\n"
continue
# Not the start of an #if. Save the line.
r += line + "\n"
continue
# If we get here we are tracking an #if statement.
# Is this the start of another block in the #if?
if ((strip_line.startswith("#Else")) or
(strip_line.startswith("Else"))):
# Save the current block.
curr_blocks.append(curr_block)
log.debug("Else if " + strip_line)
log.debug("Save block " + str(curr_block))
# Start a new block.
curr_block = []
r += "' STRIPPED LINE\n"
continue
# Have we finished the #if?
if (strip_line.startswith("#End")):
log.debug("End if " + strip_line)
# Save the current block.
curr_blocks.append(curr_block)
# Add back in the largest block and skip the rest.
biggest_block = []
for block in curr_blocks:
if (len(block) > len(biggest_block)):
biggest_block = block
for block_line in biggest_block:
r += block_line + "\n"
log.debug("Pick block " + str(biggest_block))
# Done processing #if.
curr_blocks = None
curr_block = None
continue
# We have a block line. Save it.
curr_block.append(line)
# Return the stripped VBA.
return r
def fix_unbalanced_quotes(vba_code):
"""
Fix lines with missing double quotes.
"""
# Fix invalid string assignments.
vba_code = re.sub(r"(\w+)\s+=\s+\"\r?\n", r'\1 = ""\n', vba_code)
vba_code = re.sub(r"(\w+\s+=\s+\")(:[^\"]+)\r?\n", r'\1"\2\n', vba_code)
vba_code = re.sub(r"([=>])\s*\"\s+[Tt][Hh][Ee][Nn]", r'\1 "" Then', vba_code)
# See if we have lines with unbalanced double quotes.
r = ""
for line in vba_code.split("\n"):
num_quotes = 0
for c in line:
if (c == '"'):
num_quotes += 1
if ((num_quotes % 2) != 0):
last_quote = line.rindex('"')
line = line[:last_quote] + '"' + line[last_quote:]
r += line + "\n"
# Return the balanced code.
return r
def fix_multiple_assignments(line):
# Pull out multiple assignments and the final assignment value.
pat = r"((?:\w+\s*=\s*){2,})(.+)"
items = re.findall(pat, line)
if (len(items) == 0):
return line
items = items[0]
assigns = items[0].replace(" ", "").split("=")
val = items[1]
# Break out each assignment into multiple lines.
r = ""
for var in assigns:
var = var.strip()
if (len(var) == 0):
continue
r += var + " = " + val + "\n"
return r
def fix_skipped_1st_arg(vba_code):
"""
Replace calls like foo(, 1, ...) with foo(SKIPPED_ARG, 1, ...).
"""
# We don't want to replace things like this in string literals. Temporarily
# pull out the string literals from the line.
# Find all the string literals and make up replacement names.
strings = {}
in_str = False
curr_str = None
for c in vba_code:
# Start/end of string?
if (c == '"'):
# Start of string?
if (not in_str):
curr_str = ""
in_str = True
# End of string.
else:
# Map a temporary name to the current string.
str_name = "A_STRING_LITERAL_" + str(randint(0, | |
distance between two points.
- The minimum distance between a point and an edge.
- The minimum distance between two edges.
Parameters
----------
entity1
A ConstrainedSketchVertex, Datum point, MeshNode, or Edge specifying the first entity from which to
measure.
entity2
A ConstrainedSketchVertex, Datum point, MeshNode, or Edge specifying the second entity to which to
measure.
Returns
-------
distance: float
A Float specifying the distance between *entity1* and *entity2*.
"""
pass
def getLength(self, edges: tuple[Edge]):
"""This method returns the length of a given edge or group of edges.
Parameters
----------
edges
A sequence of Edge objects whose total length the method will calculate.
Returns
-------
length: float
A Float specifying the total length
"""
pass
def getPerimeter(self, faces: tuple[Face]):
"""This method returns the total perimeter of a given face or group of faces. All faces
need to be on the same part. If the specified faces have shared edges, these edges are
excluded from the computation, thus providing the length of the outer perimeter of the
specified faces.
Parameters
----------
faces
A sequence of Face objects whose perimeter the method will calculate.
Returns
-------
perimeter: float
A Float specifying the perimeter
"""
pass
def getVolume(self, cells: tuple[Cell], relativeAccuracy: float = 0):
"""This method returns the volume area of a given cell or group of cells.
Parameters
----------
cells
A sequence of Cell objects whose volume the method will calculate.
relativeAccuracy
A Float specifying the relative accuracy of the computation. The default value is
0.000001 (0.0001%).
Returns
-------
volume: float
A Float specifying the sum of the areas of the given faces
"""
pass
def getMassProperties(self, regions: str = '', relativeAccuracy: SymbolicConstant = LOW, useMesh: Boolean = False,
specifyDensity: Boolean = False, density: str = '', specifyThickness: Boolean = False,
thickness: str = '', miAboutCenterOfMass: Boolean = True, miAboutPoint: tuple = ()):
"""This method returns the mass properties of a part or region. Only beams, trusses,
shells, solids, point, nonstructural mass, and rotary inertia elements are supported.
Parameters
----------
regions
A MeshElementArray, CellArray, FaceArray, or EdgeArray specifying the regions whose mass
properties are to be queried. The whole part is queried by default.
relativeAccuracy
A SymbolicConstant specifying the relative accuracy for geometry computation. Possible
values are LOW, MEDIUM and HIGH. The default value is LOW.
useMesh
A Boolean specifying whether the mesh should be used in the computation if the geometry
is meshed. The default value is False.
specifyDensity
A Boolean specifying whether a user-specified density should be used in regions with
density errors such as undefined material density. The default value is False.
density
A double value specifying the user-specified density value to be used in regions with
density errors. The user-specified density should be greater than 0.
specifyThickness
A Boolean specifying whether a user-specified thickness should be used in regions with
thickness errors such as undefined thickness. The default value is False.
thickness
A double value specifying the user-specified thickness value to be used in regions with
thickness errors. The user-specified thickness should be greater than 0.
miAboutCenterOfMass
A Boolean specifying if the moments of inertia should be evaluated about the center of
mass. The default value is True.
miAboutPoint
A tuple of three floats specifying the coordinates of the point about which to evaluate
the moment of inertia. By default if the moments of inertia are not being evaluated
about the center of mass, they will be evaluated about the origin.
Returns
-------
properties: dict
A Dictionary object with the following items:
*area*: None or a Float specifying the sum of the area of the specified faces. The area
is computed only for one side for shells.
*areaCentroid*: None or a tuple of three Floats representing the coordinates of the area
centroid.
*volume*: None or a Float specifying the volume of the specified regions.
*volumeCentroid*: None or a tuple of three Floats representing the coordinates of the
volume centroid.
*massFromMassPerUnitSurfaceArea*: None or a Float specifying the mass due to mass per
unit surface area.
*mass*: None or a Float specifying the mass of the specified regions. It is the total
mass and includes mass from quantities such as mass per unit surface area.
*centerOfMass*: None or a tuple of three Floats representing the coordinates of the
center of mass.
*momentOfInertia*: None or a tuple of six Floats representing the moments of inertia
about the center of mass or about the point specified.
*warnings*: A tuple of SymbolicConstants representing the problems encountered while
computing the mass properties. Possible SymbolicConstants are:
UNSUPPORTED_ENTITIES: Some unsupported entities exist in the specified region. The mass
properties are computed only for beams, trusses, shells, solids, point and
non-structural mass elements and rotary inertia elements. The mass properties are not
computed for axisymmetric elements, springs, connectors, gaskets or any other elements.
MISSING_THICKNESS: For some regions, the section definitions are missing thickness
values.
ZERO_THICKNESS: For some regions, the section definitions have a zero thickness value.
VARIABLE_THICKNESS: The nodal thickness or field thickness specified for some regions
has been ignored.
NON_APPLICABLE_THICKNESS: For some regions, the thickness value is not applicable to the
corresponding sections specified on the regions.
MISSING_DENSITY: For some regions, the section definitions are missing material density
values.
MISSING_MATERIAL_DEFINITION: For some regions, the material definition is missing.
ZERO_DENSITY: For some regions, the section definitions have a zero material density
value.
UNSUPPORTED_DENSITY: For some regions, either a negative material density or a
temperature dependent density has been specified, or the material value is missing for
one or more plies in the composite section.
SHELL_OFFSETS: For shells, this method does not account for any offsets specified.
MISSING_SECTION_DEFINITION: For some regions, the section definition is missing.
UNSUPPORTED_SECTION_DEFINITION: The section definition provided for some regions is not
supported.
REINFORCEMENTS: This method does not account for any reinforcements specified on the
model.
SMEARED_PROPERTIES: For regions with composite section assignments, the density is
smeared across the thickness. The volume centroid and center of mass computations for a
composite shell use a lumped mass approach where the volume and mass is assumed to be
lumped in the plane of the shell. As a result of these approximations the volume
centroid, center of mass and moments of inertia may be slightly inaccurate for regions
with composite section assignments.
UNSUPPORTED_NON_STRUCTURAL_MASS_ENTITIES: This method does not account for any
non-structural mass on wires.
INCORRECT_MOMENT_OF_INERTIA: For geometry regions with non-structural mass per volume,
the non-structural mass is assumed to be a point mass at the centroid of the regions.
Thus, the moments of inertia may be inaccurate as the distribution of the non-structural
mass is not accounted for. Use the mesh for accurately computing the moments of inertia.
MISSING_BEAM_ORIENTATIONS: For some regions with beam section assignments, the beam
section orientations are missing.
UNSUPPORTED_BEAM_PROFILES: This method supports the Box, Pipe, Circular, Rectangular,
Hexagonal, Trapezoidal, I, L, T, Arbitrary, and Tapered beam profiles. Any other beam
profile is not supported.
TAPERED_BEAM_MI: Moment of inertia calculations for tapered beams are not accurate.
SUBSTRUCTURE_INCORRECT_PROPERTIES: The user assigned density and thickness is not
considered for substructures.
UNSUPPORTED_NON_STRUCTURAL_MASS_PROPORTIONAL: Non-structural mass with Mass Proportional
distribution is not supported. Results are computed using Volume Proportional
distribution.
"""
pass
def getFeatureFaces(self, name: str):
"""This method returns a sequence of Face objects | |
<gh_stars>10-100
from collections import defaultdict
from itertools import chain
from enum import Enum
from core.simulator import JIT, iter_simulation_topology
from PySide6.QtWidgets import *
from PySide6.QtCore import *
from PySide6.QtGui import *
from diagram import DIRS, Schematic, Element
from core.descriptors import ExposedPin, Gate, Not, Composite
from editors import *
import networkx as nx
from version import format_version
import pickle
class Mode(Enum):
EDIT, VIEW = range(2)
class EditState(Enum):
NONE, ELEMENT_CLICK, EMPTY_CLICK, WIRE, DRAG, SELECT, PLACE = range(7)
class ViewState(Enum):
NONE, CLICK, MOVE = range(3)
class DiagramEditor(QWidget):
element_selected = Signal(Element)
def __init__(self, schematic: Schematic, grid_size=16):
super().__init__()
self.schematic = schematic
self.grid_size = grid_size
self.executor = None
self._grid = self._make_grid()
self._translation = QPoint()
self._mode = Mode.EDIT
self._state = EditState.NONE
self._cursor_pos = QPoint()
self._selected_element = None
self._placing_element = None
self._start = None
self._end = None
mode_action = QShortcut(QKeySequence(Qt.Key_E), self)
delete_action = QShortcut(QKeySequence(Qt.Key_Delete), self)
cancel_action = QShortcut(QKeySequence(Qt.Key_Escape), self)
delete_action.activated.connect(self.delete_selected_element)
cancel_action.activated.connect(self.cancel_interaction)
mode_action.activated.connect(self.toggle_interaction_mode)
self.redraw_timer = QTimer()
self.redraw_timer.setInterval(250)
def do_stuff():
self.executor.step()
self.update()
self.redraw_timer.timeout.connect(do_stuff)
self.setMouseTracking(True)
def toggle_interaction_mode(self):
self._mode = Mode.EDIT if self._mode == Mode.VIEW else Mode.VIEW
self._state = EditState.NONE if self._mode == Mode.EDIT else ViewState.NONE
if self._mode == Mode.VIEW:
self.setCursor(Qt.PointingHandCursor)
else:
self.setCursor(Qt.ArrowCursor)
self.update()
def cancel_interaction(self):
if self._mode == Mode.VIEW:
self._state = ViewState.NONE
self.setCursor(Qt.PointingHandCursor)
else:
self._state = EditState.NONE
self.setCursor(Qt.ArrowCursor)
self.update()
def select_element(self, element):
self._state = EditState.SELECT
curr = self._selected_element
self._selected_element = element
if curr is not element:
self.element_selected.emit(element)
self.update()
def unselect(self):
curr = self._selected_element
self._selected_element = None
self._state = EditState.NONE
if curr is not None:
self.element_selected.emit(None)
self.update()
def delete_element(self, element):
self.schematic.remove_element(element)
if self._state == EditState.SELECT and self._selected_element is element:
self.unselect()
def delete_selected_element(self):
self.delete_element(self._selected_element)
def _get_wire(self):
if self._state != EditState.WIRE:
return None
ws, we = self._start, self._end
delta = we - ws
if delta == QPoint():
return None
if abs(delta.x()) > abs(delta.y()):
yield (ws.x(), ws.y(), we.x(), ws.y())
yield (we.x(), ws.y(), we.x(), we.y())
else:
yield (ws.x(), ws.y(), ws.x(), we.y())
yield (ws.x(), we.y(), we.x(), we.y())
def start_placing(self, element):
if self._mode != Mode.EDIT:
self.toggle_interaction_mode()
self._state = EditState.PLACE
self._placing_element = element
self.update()
def stop_placing(self):
self._mode = Mode.EDIT
self._state = EditState.NONE
self.update()
def element_at_position(self, pos):
gs = self.grid_size
for element in self.schematic.elements:
facing = element.facing
p = element.position
bb = element.get_bounding_rect()
transform = QTransform()
#transform.scale(gs, gs)
transform.translate(p.x() * gs, p.y() * gs)
transform.rotate(facing * -90)
r = transform.mapRect(
QRect(bb.x() * gs, bb.y() * gs, bb.width() * gs, bb.height() * gs))
for p, _ in chain(element.all_inputs(),
element.all_outputs()):
pt = (p + DIRS[facing]) * gs
if QVector2D(pt - pos).length() <= self.grid_size / 2:
return None
if r.contains(pos):
return element
return None
def mousePressEvent(self, event: QMouseEvent):
gs = self.grid_size
d = event.pos() - self._translation
p = QPoint(round(d.x() / gs), round(d.y() / gs))
if self._mode == Mode.VIEW:
self._state = ViewState.CLICK
self._start = d
self._end = d
self.update()
return
if self._state == EditState.NONE:
selected_element = self.element_at_position(d)
if selected_element is not None:
self._state = EditState.ELEMENT_CLICK
self._selected_element = selected_element
self.element_selected.emit(selected_element)
else:
self._state = EditState.EMPTY_CLICK
self._start = p
self._end = p
self.update()
elif self._state == EditState.SELECT:
selected_element = self.element_at_position(d)
if selected_element is None:
self._state = EditState.EMPTY_CLICK
self.element_selected.emit(selected_element)
self.update()
else:
if self._selected_element is not selected_element:
self.element_selected.emit(selected_element)
self._selected_element = selected_element
self._state = EditState.ELEMENT_CLICK
self.update()
self._start = p
self._end = p
elif self._state != EditState.PLACE:
self._state = EditState.NONE
self.update()
def mouseMoveEvent(self, event: QMouseEvent):
ep = event.pos()
d = ep - self._translation
gs = self.grid_size
p = QPoint(round(d.x() / gs), round(d.y() / gs))
self._cursor_pos = QPoint(round(ep.x() / gs), round(ep.y() / gs)) * gs
self.update()
if self._mode == Mode.VIEW:
if self._state == ViewState.CLICK:
self._state = ViewState.MOVE
self.setCursor(Qt.ClosedHandCursor)
self._end = d
self.update()
elif self._state == ViewState.MOVE:
self.setCursor(Qt.ClosedHandCursor)
self._end = d
self.update()
return
if self._state == EditState.PLACE:
self._placing_element.position = p
self.update()
elif self._state == EditState.ELEMENT_CLICK:
self._state = EditState.DRAG
self._end = p
self.update()
elif self._state == EditState.DRAG:
self._end = p
self.update()
elif self._state == EditState.EMPTY_CLICK:
self._state = EditState.WIRE
self._end = p
self.update()
elif self._state == EditState.WIRE:
self._end = p
self.update()
def mouseReleaseEvent(self, event: QMouseEvent):
ep = event.pos()
d = ep - self._translation
gs = self.grid_size
p = QPoint(round(d.x() / gs), round(d.y() / gs))
if self._mode == Mode.VIEW:
if self._state == ViewState.MOVE:
self.setCursor(Qt.PointingHandCursor)
self._state = ViewState.NONE
self._translation += self._end - self._start
self.update()
elif self._state == ViewState.CLICK:
element = self.element_at_position(d)
if self.executor is not None and element is not None:
if isinstance(element.descriptor, ExposedPin):
desc = element.descriptor
if desc.direction == ExposedPin.IN:
p = element.position
r = element.get_bounding_rect()
transform = QTransform()
transform.translate(p.x() * gs, p.y() * gs)
transform.rotate(element.facing * -90)
state = self.executor.get_pin_state(
'/' + element.name + '/pin')
for i in range(desc.width):
r = QRect(r.x() * gs + gs / 8 + i * gs, r.y() * gs + r.height() / 8 * gs + r.height() * gs / 8 * 6 / 8,
gs / 8 * 6, r.height() * gs / 8 * 6 / 8 * 6)
r = transform.mapRect(r)
if r.contains(d):
state ^= 1 << i
self.executor.set_pin_state(
'/' + element.name + '/pin', state)
break
self._state = ViewState.NONE
self.update()
return
if self._state == EditState.PLACE:
self._state = EditState.SELECT
self.schematic.add_element(self._placing_element)
self._selected_element = self._placing_element
self.element_selected.emit(self._selected_element)
self.update()
elif self._state == EditState.DRAG:
el = self._selected_element
pos = el.position
pos += self._end - self._start
self._state = EditState.NONE
self.update()
elif self._state == EditState.WIRE:
wires = self._get_wire()
if wires is not None:
self.schematic.change_wires(wires)
self._state = EditState.NONE
self.update()
elif self._state == EditState.ELEMENT_CLICK:
self._state = EditState.SELECT
self.update()
elif self._state == EditState.EMPTY_CLICK:
self.schematic.overlap(p)
self._state = EditState.NONE
self.update()
def _make_grid(self):
pixmap = QPixmap(self.grid_size * 16, self.grid_size * 16)
painter = QPainter(pixmap)
painter.setRenderHint(QPainter.Antialiasing)
back_col = QApplication.palette().color(QPalette.Base)
grid_col = QApplication.palette().color(QPalette.WindowText)
painter.fillRect(pixmap.rect(), back_col)
painter.setPen(QPen(grid_col))
for x in range(0, pixmap.width(), self.grid_size):
for y in range(0, pixmap.height(), self.grid_size):
painter.drawPoint(x, y)
return pixmap
def paint_element(self, painter: QPainter, element: Element, position, ghost, selected):
gs = self.grid_size
facing = element.facing
x, y = position.x(), position.y()
bb = element.get_bounding_rect()
xb, yb, w, h = bb.topLeft().x(), bb.topLeft().y(), bb.width(), bb.height()
if ghost:
black = QColor.fromRgbF(0.0, 0.0, 0.0, 0.5)
white = QColor.fromRgbF(1.0, 1.0, 1.0, 0.5)
else:
black = Qt.black
white = Qt.white
painter.save()
painter.translate(x * gs, y * gs)
painter.rotate(facing * -90)
desc = element.descriptor
if isinstance(desc, Not):
path = QPainterPath()
path.moveTo(xb * gs, yb * gs)
path.lineTo(xb * gs + w * gs, yb * gs + h / 2 * gs)
path.lineTo(xb * gs, yb * gs + h * gs)
path.closeSubpath()
painter.fillPath(path, white)
painter.setPen(QPen(black, 2.0))
painter.drawPath(path)
path = QPainterPath()
path.addEllipse(xb * gs + w * gs - gs/2.5, yb * gs +
h * gs / 2 - gs / 2.5, gs * 4 / 5, gs * 4 / 5)
painter.fillPath(path, white)
painter.drawPath(path)
elif isinstance(desc, ExposedPin):
path = QPainterPath()
if desc.direction == ExposedPin.IN:
path.addRect(xb * gs, yb * gs + h / 8 * gs, w * gs,
h * gs / 8 * 6)
else:
path.addRoundedRect(xb * gs, yb * gs + h / 8 * gs, w * gs,
h * gs / 8 * 6, gs / 4, gs / 4)
painter.fillPath(path, white)
painter.setPen(QPen(black, 2.0))
painter.drawPath(path)
if self.executor is None:
state = None
else:
state = self.executor.get_pin_state(
'/' + element.name + '/pin')
painter.setPen(QPen(Qt.black))
for i in range(desc.width):
r = QRect(xb * gs + gs / 8 + i * gs, yb * gs + h / 8 * gs + h * gs / 8 * 6 / 8,
gs / 8 * 6, h * gs / 8 * 6 / 8 * 6)
if state is not None:
painter.drawText(r, Qt.AlignCenter, str(
1 if state & (1 << i) else 0))
elif isinstance(desc, Gate):
op = desc.op
path = QPainterPath()
if op == Gate.OR:
path.moveTo(xb * gs, yb * gs)
path.quadTo(xb * gs + w * gs / 2, yb * gs, xb *
gs + w * gs, yb * gs + h * gs / 2)
path.quadTo(xb * gs + w * gs / 2, yb * gs + h *
gs, xb * gs, yb * gs + h * gs)
path.quadTo(xb * gs + w * gs / 4, yb * gs +
h * gs / 2, xb * gs, yb * gs)
elif op == Gate.AND:
path.moveTo(xb * gs, yb * gs)
path.lineTo(xb | |
rel_absence_ahu = property(__rel_absence_ahu.value, __rel_absence_ahu.set, None, None)
# Element part_load_factor_ahu uses Python identifier part_load_factor_ahu
__part_load_factor_ahu = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'part_load_factor_ahu'), 'part_load_factor_ahu', '__AbsentNamespace0_RoomClimateType_part_load_factor_ahu', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 41, 6), )
part_load_factor_ahu = property(__part_load_factor_ahu.value, __part_load_factor_ahu.set, None, None)
_ElementMap.update({
__set_temp_heat.name() : __set_temp_heat,
__set_temp_cool.name() : __set_temp_cool,
__temp_set_back.name() : __temp_set_back,
__min_temp_heat.name() : __min_temp_heat,
__max_temp_cool.name() : __max_temp_cool,
__rel_humidity.name() : __rel_humidity,
__cooling_time.name() : __cooling_time,
__heating_time.name() : __heating_time,
__min_air_exchange.name() : __min_air_exchange,
__rel_absence_ahu.name() : __rel_absence_ahu,
__part_load_factor_ahu.name() : __part_load_factor_ahu
})
_AttributeMap.update({
})
_module_typeBindings.RoomClimateType = RoomClimateType
Namespace.addCategoryObject('typeBinding', 'RoomClimateType', RoomClimateType)
# Complex type InternalGainsType with content type ELEMENT_ONLY
class InternalGainsType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type InternalGainsType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'InternalGainsType')
_XSDLocation = pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 44, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element persons uses Python identifier persons
__persons = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'persons'), 'persons', '__AbsentNamespace0_InternalGainsType_persons', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 46, 6), )
persons = property(__persons.value, __persons.set, None, None)
# Element profile_persons uses Python identifier profile_persons
__profile_persons = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'profile_persons'), 'profile_persons', '__AbsentNamespace0_InternalGainsType_profile_persons', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 47, 6), )
profile_persons = property(__profile_persons.value, __profile_persons.set, None, None)
# Element machines uses Python identifier machines
__machines = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'machines'), 'machines', '__AbsentNamespace0_InternalGainsType_machines', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 48, 6), )
machines = property(__machines.value, __machines.set, None, None)
# Element profile_machines uses Python identifier profile_machines
__profile_machines = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'profile_machines'), 'profile_machines', '__AbsentNamespace0_InternalGainsType_profile_machines', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 49, 6), )
profile_machines = property(__profile_machines.value, __profile_machines.set, None, None)
# Element lighting_power uses Python identifier lighting_power
__lighting_power = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'lighting_power'), 'lighting_power', '__AbsentNamespace0_InternalGainsType_lighting_power', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 50, 3), )
lighting_power = property(__lighting_power.value, __lighting_power.set, None, None)
# Element profile_lighting uses Python identifier profile_lighting
__profile_lighting = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'profile_lighting'), 'profile_lighting', '__AbsentNamespace0_InternalGainsType_profile_lighting', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 51, 3), )
profile_lighting = property(__profile_lighting.value, __profile_lighting.set, None, None)
_ElementMap.update({
__persons.name() : __persons,
__profile_persons.name() : __profile_persons,
__machines.name() : __machines,
__profile_machines.name() : __profile_machines,
__lighting_power.name() : __lighting_power,
__profile_lighting.name() : __profile_lighting
})
_AttributeMap.update({
})
_module_typeBindings.InternalGainsType = InternalGainsType
Namespace.addCategoryObject('typeBinding', 'InternalGainsType', InternalGainsType)
# Complex type AHUType with content type ELEMENT_ONLY
class AHUType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type AHUType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'AHUType')
_XSDLocation = pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 54, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element min_ahu uses Python identifier min_ahu
__min_ahu = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'min_ahu'), 'min_ahu', '__AbsentNamespace0_AHUType_min_ahu', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 56, 6), )
min_ahu = property(__min_ahu.value, __min_ahu.set, None, None)
# Element max_ahu uses Python identifier max_ahu
__max_ahu = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'max_ahu'), 'max_ahu', '__AbsentNamespace0_AHUType_max_ahu', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 57, 6), )
max_ahu = property(__max_ahu.value, __max_ahu.set, None, None)
# Element with_ahu uses Python identifier with_ahu
__with_ahu = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'with_ahu'), 'with_ahu', '__AbsentNamespace0_AHUType_with_ahu', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 58, 6), )
with_ahu = property(__with_ahu.value, __with_ahu.set, None, None)
# Element use_constant_ach_rate uses Python identifier use_constant_ach_rate
__use_constant_ach_rate = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'use_constant_ach_rate'), 'use_constant_ach_rate', '__AbsentNamespace0_AHUType_use_constant_ach_rate', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 59, 6), )
use_constant_ach_rate = property(__use_constant_ach_rate.value, __use_constant_ach_rate.set, None, None)
# Element base_ach uses Python identifier base_ach
__base_ach = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'base_ach'), 'base_ach', '__AbsentNamespace0_AHUType_base_ach', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 60, 6), )
base_ach = property(__base_ach.value, __base_ach.set, None, None)
# Element max_user_ach uses Python identifier max_user_ach
__max_user_ach = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'max_user_ach'), 'max_user_ach', '__AbsentNamespace0_AHUType_max_user_ach', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 61, 6), )
max_user_ach = property(__max_user_ach.value, __max_user_ach.set, None, None)
# Element max_overheating_ach uses Python identifier max_overheating_ach
__max_overheating_ach = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'max_overheating_ach'), 'max_overheating_ach', '__AbsentNamespace0_AHUType_max_overheating_ach', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 62, 6), )
max_overheating_ach = property(__max_overheating_ach.value, __max_overheating_ach.set, None, None)
# Element max_summer_ach uses Python identifier max_summer_ach
__max_summer_ach = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'max_summer_ach'), 'max_summer_ach', '__AbsentNamespace0_AHUType_max_summer_ach', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 63, 6), )
max_summer_ach = property(__max_summer_ach.value, __max_summer_ach.set, None, None)
# Element winter_reduction uses Python identifier winter_reduction
__winter_reduction = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'winter_reduction'), 'winter_reduction', '__AbsentNamespace0_AHUType_winter_reduction', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 64, 6), )
winter_reduction = property(__winter_reduction.value, __winter_reduction.set, None, None)
_ElementMap.update({
__min_ahu.name() : __min_ahu,
__max_ahu.name() : __max_ahu,
__with_ahu.name() : __with_ahu,
__use_constant_ach_rate.name() : __use_constant_ach_rate,
__base_ach.name() : __base_ach,
__max_user_ach.name() : __max_user_ach,
__max_overheating_ach.name() : __max_overheating_ach,
__max_summer_ach.name() : __max_summer_ach,
__winter_reduction.name() : __winter_reduction
})
_AttributeMap.update({
})
_module_typeBindings.AHUType = AHUType
Namespace.addCategoryObject('typeBinding', 'AHUType', AHUType)
# Complex type BoundaryConditionsType with content type ELEMENT_ONLY
class BoundaryConditionsType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type BoundaryConditionsType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'BoundaryConditionsType')
_XSDLocation = pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 67, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element usage uses Python identifier usage
__usage = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_BoundaryConditionsType_usage', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 69, 6), )
usage = property(__usage.value, __usage.set, None, None)
# Element typical_length uses Python identifier typical_length
__typical_length = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'typical_length'), 'typical_length', '__AbsentNamespace0_BoundaryConditionsType_typical_length', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 70, 6), )
typical_length = property(__typical_length.value, __typical_length.set, None, None)
# Element typical_width uses Python identifier typical_width
__typical_width = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'typical_width'), 'typical_width', '__AbsentNamespace0_BoundaryConditionsType_typical_width', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 71, 6), )
typical_width = property(__typical_width.value, __typical_width.set, None, None)
# Element UsageOperationTime uses Python identifier UsageOperationTime
__UsageOperationTime = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'UsageOperationTime'), 'UsageOperationTime', '__AbsentNamespace0_BoundaryConditionsType_UsageOperationTime', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 72, 6), )
UsageOperationTime = property(__UsageOperationTime.value, __UsageOperationTime.set, None, None)
# Element Lighting uses Python identifier Lighting
__Lighting = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'Lighting'), 'Lighting', '__AbsentNamespace0_BoundaryConditionsType_Lighting', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 73, 6), )
Lighting = property(__Lighting.value, __Lighting.set, None, None)
# Element RoomClimate uses Python identifier RoomClimate
__RoomClimate = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'RoomClimate'), 'RoomClimate', '__AbsentNamespace0_BoundaryConditionsType_RoomClimate', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 74, 6), )
RoomClimate = property(__RoomClimate.value, __RoomClimate.set, None, None)
# Element InternalGains uses Python identifier InternalGains
__InternalGains = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'InternalGains'), 'InternalGains', '__AbsentNamespace0_BoundaryConditionsType_InternalGains', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 75, 6), )
InternalGains = property(__InternalGains.value, __InternalGains.set, None, None)
# Element AHU uses Python identifier AHU
__AHU = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'AHU'), 'AHU', '__AbsentNamespace0_BoundaryConditionsType_AHU', False, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 76, 6), )
AHU = property(__AHU.value, __AHU.set, None, None)
_ElementMap.update({
__usage.name() : __usage,
__typical_length.name() : __typical_length,
__typical_width.name() : __typical_width,
__UsageOperationTime.name() : __UsageOperationTime,
__Lighting.name() : __Lighting,
__RoomClimate.name() : __RoomClimate,
__InternalGains.name() : __InternalGains,
__AHU.name() : __AHU
})
_AttributeMap.update({
})
_module_typeBindings.BoundaryConditionsType = BoundaryConditionsType
Namespace.addCategoryObject('typeBinding', 'BoundaryConditionsType', BoundaryConditionsType)
# Complex type UseConditionsType with content type ELEMENT_ONLY
class UseConditionsType (pyxb.binding.basis.complexTypeDefinition):
"""Complex type UseConditionsType with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'UseConditionsType')
_XSDLocation = pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 79, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element BoundaryConditions uses Python identifier BoundaryConditions
__BoundaryConditions = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'BoundaryConditions'), 'BoundaryConditions', '__AbsentNamespace0_UseConditionsType_BoundaryConditions', True, pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 81, 6), )
BoundaryConditions = property(__BoundaryConditions.value, __BoundaryConditions.set, None, None)
_ElementMap.update({
__BoundaryConditions.name() : __BoundaryConditions
})
_AttributeMap.update({
})
_module_typeBindings.UseConditionsType = UseConditionsType
Namespace.addCategoryObject('typeBinding', 'UseConditionsType', UseConditionsType)
UseConditions = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UseConditions'), UseConditionsType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 3, 2))
Namespace.addCategoryObject('elementBinding', UseConditions.name().localName(), UseConditions)
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'usage_time'), integerList, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 6, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'daily_usage_hours'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 7, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_usage_days'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 8, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_usage_hours_day'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 9, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_usage_hours_night'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 10, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'daily_operation_ahu_cooling'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 11, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_heating_days'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 12, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_ahu_days'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 13, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'yearly_cooling_days'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 14, 6)))
UsageOperationTimeType._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'daily_operation_heating'), pyxb.binding.datatypes.integer, scope=UsageOperationTimeType, location=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 15, 6)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 6, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 7, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 8, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 9, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 10, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 11, 6))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 12, 6))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 13, 6))
counters.add(cc_7)
cc_8 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 14, 6))
counters.add(cc_8)
cc_9 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 15, 6))
counters.add(cc_9)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'usage_time')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 6, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'daily_usage_hours')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 7, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'yearly_usage_days')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 8, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'yearly_usage_hours_day')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 9, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'yearly_usage_hours_night')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 10, 6))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(UsageOperationTimeType._UseForTag(pyxb.namespace.ExpandedName(None, 'daily_operation_ahu_cooling')), pyxb.utils.utility.Location('D:\\GIT\\TEASER\\teaser\\data\\bindings\\schemas\\BoundaryConditions.xsd', 11, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = | |
the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` where the given column has
duplicated entry or an invalid strings.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(valid_str)
problems = check_table(
problems,
table,
f,
cond,
f"Invalid {column}; maybe has extra space characters",
)
cond = f[column].duplicated()
problems = check_table(problems, table, f, cond, f"Repeated {column}")
return problems
def check_column_linked_id(
problems: List,
table: str,
df: DataFrame,
column: str,
target_df: DataFrame,
target_column: Optional[str] = None,
*,
column_required: bool = True,
) -> List:
"""
A modified version of :func:`check_column_id`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
target_df : DataFrame
A GTFS table
target_column : string
A column of ``target_df``; defaults to ``column_name``
Returns
-------
list
The ``problems`` list extended as follows.
Record indices of ``df`` where the following condition is
violated: ``column`` contain IDs that are valid strings and are
present in ``target_df`` under the ``target_column`` name.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
if target_column is None:
target_column = column
f = df.copy()
if target_df is None:
g = pd.DataFrame()
g[target_column] = np.nan
else:
g = target_df.copy()
if target_column not in g.columns:
g[target_column] = np.nan
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
g = g.dropna(subset=[target_column])
cond = ~f[column].isin(g[target_column])
problems = check_table(problems, table, f, cond, f"Undefined {column}")
return problems
def format_problems(
problems: List, *, as_df: bool = False
) -> Union[List, DataFrame]:
"""
Format the given problems list as a DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
as_df : boolean
Returns
-------
list or DataFrame
Return ``problems`` if not ``as_df``; otherwise return a
DataFrame with the problems as rows and the columns
``['type', 'message', 'table', 'rows']``.
"""
if as_df:
problems = pd.DataFrame(
problems, columns=["type", "message", "table", "rows"]
).sort_values(["type", "table"])
return problems
def check_agency(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Check that ``feed.agency`` follows the GTFS.
Return a list of problems of the form described in
:func:`check_table`;
the list will be empty if no problems are found.
"""
table = "agency"
problems = []
# Preliminary checks
if feed.agency is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.agency.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(
problems, table, f, "agency_id", column_required=False
)
# Check agency_name
problems = check_column(problems, table, f, "agency_name", valid_str)
# Check agency_url
problems = check_column(problems, table, f, "agency_url", valid_url)
# Check agency_timezone
problems = check_column(
problems, table, f, "agency_timezone", valid_timezone
)
# Check agency_fare_url
problems = check_column(
problems, table, f, "agency_fare_url", valid_url, column_required=False
)
# Check agency_lang
problems = check_column(
problems, table, f, "agency_lang", valid_lang, column_required=False
)
# Check agency_phone
problems = check_column(
problems, table, f, "agency_phone", valid_str, column_required=False
)
# Check agency_email
problems = check_column(
problems, table, f, "agency_email", valid_email, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_calendar(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar``.
"""
table = "calendar"
problems = []
# Preliminary checks
if feed.calendar is None:
return problems
f = feed.calendar.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(problems, table, f, "service_id")
# Check weekday columns
v = lambda x: x in range(2)
for col in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]:
problems = check_column(problems, table, f, col, v)
# Check start_date and end_date
for col in ["start_date", "end_date"]:
problems = check_column(problems, table, f, col, valid_date)
if include_warnings:
# Check if feed has expired
d = f["end_date"].max()
if feed.calendar_dates is not None and not feed.calendar_dates.empty:
table += "/calendar_dates"
d = max(d, feed.calendar_dates["date"].max())
if d < dt.datetime.today().strftime(DATE_FORMAT):
problems.append(["warning", "Feed expired", table, []])
return format_problems(problems, as_df=as_df)
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df)
def check_fare_attributes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_attributes"
problems = []
# Preliminary checks
if feed.fare_attributes is None:
return problems
f = feed.fare_attributes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_id(problems, table, f, "fare_id")
# Check currency_type
problems = check_column(
problems, table, f, "currency_type", valid_currency
)
# Check payment_method
v = lambda x: x in range(2)
problems = check_column(problems, table, f, "payment_method", v)
# Check transfers
v = lambda x: pd.isna(x) or x in range(3)
problems = check_column(problems, table, f, "transfers", v)
# Check transfer_duration
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "transfer_duration", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_fare_rules(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_rules"
problems = []
# Preliminary checks
if feed.fare_rules is None:
return problems
f = feed.fare_rules.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_linked_id(
problems, table, f, "fare_id", feed.fare_attributes
)
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes, column_required=False
)
# Check origin_id, destination_id, contains_id
for col in ["origin_id", "destination_id", "contains_id"]:
problems = check_column_linked_id(
problems,
table,
f,
col,
feed.stops,
"zone_id",
column_required=False,
)
return | |
"""Class implementation for dictionary.
"""
from typing import Any
from typing import Dict
from typing import Generic
from typing import TypeVar
from typing import Union
from apysc._event.custom_event_interface import CustomEventInterface
from apysc._type.any_value import AnyValue
from apysc._type.boolean import Boolean
from apysc._type.copy_interface import CopyInterface
from apysc._type.dictionary_structure import DictionaryStructure
from apysc._type.expression_string import ExpressionString
from apysc._type.int import Int
from apysc._type.number import Number
from apysc._type.revert_interface import RevertInterface
from apysc._type.string import String
from apysc._type.variable_name_interface import VariableNameInterface
DefaultType = TypeVar('DefaultType')
_BuiltinKeys = Union[str, int, float]
_K = TypeVar('_K')
_V = TypeVar('_V')
class Dictionary(
Generic[_K, _V],
CopyInterface, RevertInterface, DictionaryStructure,
CustomEventInterface):
"""
Dictionary class for the apysc library.
References
----------
- Dictionary document
- https://simon-ritchie.github.io/apysc/dictionary.html
- Dictionary class generic type settings document
- https://bit.ly/3HL8HaF
Examples
--------
>>> import apysc as ap
>>> dictionary: ap.Dictionary = ap.Dictionary({'a': 10})
>>> dictionary
Dictionary({'a': 10})
>>> dictionary['a']
10
>>> dictionary['b'] = 20
>>> dictionary['b']
20
>>> dictionary.length
Int(2)
>>> value_1: int = dictionary.get('c', default=0)
>>> value_1
0
"""
_initial_value: Union[Dict[_K, _V], 'Dictionary']
_value: Dict[_K, _V]
def __init__(self, value: Union[Dict[_K, _V], 'Dictionary']) -> None:
"""
Dictionary class for the apysc library.
Parameters
----------
value : dict or Dictionary
Initial dictionary value.
References
----------
- Dictionary document
- https://simon-ritchie.github.io/apysc/dictionary.html
- Dictionary class generic type settings document
- https://bit.ly/3HL8HaF
Examples
--------
>>> import apysc as ap
>>> dictionary: ap.Dictionary = ap.Dictionary({'a': 10})
>>> dictionary
Dictionary({'a': 10})
>>> dictionary['a']
10
>>> dictionary['b'] = 20
>>> dictionary['b']
20
"""
import apysc as ap
with ap.DebugInfo(
callable_='__init__', locals_=locals(),
module_name=__name__, class_=Dictionary):
from apysc._expression import expression_variables_util
from apysc._expression import var_names
from apysc._expression.event_handler_scope import \
TemporaryNotHandlerScope
with TemporaryNotHandlerScope():
TYPE_NAME: str = var_names.DICTIONARY
self._validate_acceptable_value_type(value=value)
self._initial_value = value
self._type_name = TYPE_NAME
self._value = self._get_dict_value(value=value)
self.variable_name = expression_variables_util.\
get_next_variable_name(type_name=TYPE_NAME)
self._append_constructor_expression()
def _append_constructor_expression(self) -> None:
"""
Append constructor expression.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_constructor_expression,
locals_=locals(),
module_name=__name__, class_=Dictionary):
from apysc._type import value_util
value_str: str = value_util.get_value_str_for_expression(
value=self._initial_value)
expression: str = f'var {self.variable_name} = {value_str};'
ap.append_js_expression(expression=expression)
def _get_dict_value(
self, *,
value: Union[Dict[_K, _V], 'Dictionary']) -> Dict[_K, _V]:
"""
Get a dict value from specified value.
Parameters
----------
value : dict or Dictionary
Specified dictionary value.
Returns
-------
dict_val : dict
Converted dict value.
"""
if isinstance(value, Dictionary):
return value._value
return value
def _validate_acceptable_value_type(
self, *, value: Union[Dict[_K, _V], 'Dictionary']) -> None:
"""
Validate that specified value is acceptable type or not.
Parameters
----------
value : dict or Dictionary
Dictionary value to check.
Raises
------
TypeError
If specified value's type is not dict or Dictionary.
"""
if isinstance(value, (dict, Dictionary)):
return
raise TypeError(
'Not acceptable value type is specified.'
f'\nSpecified valkue type is: {type(value)}'
'\nAcceptable types are: dict and Dictionary'
)
@property
def value(self) -> Union[Dict[_K, _V], 'Dictionary']:
"""
Get a current dict value.
Returns
-------
value : dict
Current dict value.
References
----------
- apysc basic data classes common value interface
- https://bit.ly/3Be1aij
Examples
--------
>>> import apysc as ap
>>> dictionary: ap.Dictionary = ap.Dictionary({})
>>> dictionary.value = {'a': 10}
>>> dictionary.value
{'a': 10}
"""
return self._value
@value.setter
def value(self, value: Union[Dict[_K, _V], 'Dictionary']) -> None:
"""
Set dictionary value.
Parameters
----------
value : dict or Dictionary.
Dictionary value to set.
References
----------
apysc basic data classes common value interface
https://bit.ly/3Be1aij
"""
import apysc as ap
with ap.DebugInfo(
callable_='value', locals_=locals(),
module_name=__name__, class_=Dictionary):
self._validate_acceptable_value_type(value=value)
self._value = self._get_dict_value(value=value)
self._append_value_setter_expression(value=value)
def _append_value_setter_expression(
self, *, value: Union[Dict[_K, _V], 'Dictionary']) -> None:
"""
Append value's setter expression.
Parameters
----------
value : dict or Dictionary.
Dictionary value to set.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_value_setter_expression,
locals_=locals(),
module_name=__name__, class_=Dictionary):
from apysc._type import value_util
value_str: str = value_util.get_value_str_for_expression(
value=value)
expression: str = f'{self.variable_name} = {value_str};'
ap.append_js_expression(expression=expression)
_value_snapshot: Dict[str, Dict[_K, _V]]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make values' snapthot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._set_single_snapshot_val_to_dict(
dict_name='_value_snapshot',
value={**self._value}, snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert values if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._value = self._value_snapshot[snapshot_name]
def __str__(self) -> str:
"""
String conversion method.
Returns
-------
string : str
Converted value string.
"""
if not hasattr(self, '_value'):
return '{}'
return str(self._value)
def __repr__(self) -> str:
"""
Get a representation string of this instance.
Returns
-------
repr_str : str
Representation string of this instance.
"""
if not hasattr(self, '_value'):
repr_str: str = 'Dictionary({})'
else:
repr_str = f'Dictionary({self._value})'
return repr_str
@property
def length(self) -> Int:
"""
Get length of this dictionary values.
Returns
-------
length : Int
This dictionary value's length.
References
----------
- Dictionary length interface document
- https://simon-ritchie.github.io/apysc/dictionary_length.html
Examples
--------
>>> import apysc as ap
>>> dictionary: ap.Dictionary = ap.Dictionary({'a': 1, 'b': 2})
>>> dictionary.length
Int(2)
"""
import apysc as ap
with ap.DebugInfo(
callable_='length', locals_=locals(),
module_name=__name__, class_=Dictionary):
length: ap.Int = ap.Int(len(self._value))
self._append_length_expression(length=length)
return length
def _append_length_expression(self, *, length: Int) -> None:
"""
Append length method expression.
Parameters
----------
length : Int
Created length Int variable.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_length_expression, locals_=locals(),
module_name=__name__, class_=Dictionary):
expression: str = (
f'{length.variable_name} = '
f'Object.keys({self.variable_name}).length;'
)
ap.append_js_expression(expression=expression)
def __len__(self) -> None:
"""
This method is disabled and can't use from Dictionary instance.
"""
raise Exception(
'Dictionary instance can\'t apply len function.'
' Please use length property instead.')
def __getitem__(self, key: Union[_K, ExpressionString]) -> _V:
"""
Get a specified key's single value.
Parameters
----------
key : _K
Dictionary key.
Returns
-------
value : *
Specified key's value.
"""
import apysc as ap
with ap.DebugInfo(
callable_='__getitem__', locals_=locals(),
module_name=__name__, class_=Dictionary):
self._validate_key_type_is_str_or_numeric(key=key)
key_: _BuiltinKeys = self._get_builtin_type_key(key=key)
has_key: bool = key_ in self._value
if has_key:
value: Any = self._value[key_] # type: ignore
else:
value = ap.AnyValue(None)
self._append_getitem_expression(key=key, value=value)
return value
def _get_builtin_type_key(
self, *, key: Union[_K, ExpressionString]) -> _BuiltinKeys:
"""
Get a built-in type's key (str, int, or float) from
a specified key.
Parameters
----------
key : _K
Target key value (including String, Int, and Number).
Returns
-------
key : str or int or float
Built-in type's key.
"""
if isinstance(key, ExpressionString):
return key.value
if isinstance(key, (String, Int, Number)):
return key._value
key_: _BuiltinKeys = key # type: ignore
return key_
def _append_getitem_expression(
self, *, key: Union[_K, ExpressionString], value: Any) -> None:
"""
Append __getitem__ expression.
Parameters
----------
key : _K
Dictionary key.
value : *
Specified key's value.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_getitem_expression, locals_=locals(),
module_name=__name__, class_=Dictionary):
from apysc._type import value_util
if not isinstance(value, VariableNameInterface):
value = ap.AnyValue(None)
key_str: str = value_util.get_value_str_for_expression(value=key)
expression: str = (
f'var {value.variable_name} = '
f'{self.variable_name}[{key_str}];'
)
ap.append_js_expression(expression=expression)
def _validate_key_type_is_str_or_numeric(
self, *, key: Union[_K, ExpressionString]) -> None:
"""
Validate whether key value type is acceptable (str or int or
flaot) or not.
Parameters
----------
key : _K
Dictionary key to check.
Raises
------
ValueError
If key type is not str, String, int, Int, float, or Number.
"""
if isinstance(
key,
(str, String, int, Int, float, Number,
ExpressionString)):
return
raise ValueError(
f'Unsupported key type is specified: {type(key)}, {key}'
f'\nSuppoting types are: str, String, int, Int')
def __setitem__(
self, key: Union[_K, ExpressionString], value: _V) -> None:
"""
Set value to a specified key position.
Parameters
----------
key : _K
Dictionary key to set value.
value : *
Any value to set.
"""
import apysc as ap
with ap.DebugInfo(
callable_='__setitem__', locals_=locals(),
module_name=__name__, class_=Dictionary):
key_: _BuiltinKeys = self._get_builtin_type_key(key=key)
self._value[key_] = value # type: ignore
self._append_setitem_expression(key=key, value=value)
def _append_setitem_expression(
self, *, key: Union[_K, ExpressionString], value: _V) -> None:
"""
Append __setitem__ method expression.
Parameters
----------
key : _K
Dictionary key to check.
value : *
Any value to set.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_setitem_expression, locals_=locals(),
module_name=__name__, class_=Dictionary):
from apysc._type import value_util
key_str: str = value_util.get_value_str_for_expression(value=key)
value_str: str = value_util.get_value_str_for_expression(
value=value)
expression: str = (
f'{self.variable_name}[{key_str}] = {value_str};'
)
ap.append_js_expression(expression=expression)
def __delitem__(self, key: Union[_K, ExpressionString]) -> None:
"""
Delete specified key's value from dictionary.
Parameters
----------
key : _K
Dictionary key to delete.
"""
import apysc as ap
with ap.DebugInfo(
callable_='__delitem__', locals_=locals(),
module_name=__name__, class_=Dictionary):
key_: _BuiltinKeys = self._get_builtin_type_key(key=key)
if key_ in self._value:
del self._value[key_] # type: ignore
self._append_delitem_expression(key=key)
def _append_delitem_expression(
self, *, key: Union[_K, ExpressionString]) -> None:
"""
Append | |
<reponame>awhite862/kelvin
import unittest
import numpy
from pyscf import gto, scf
from cqcpy import ft_utils
from kelvin.scf_system import SCFSystem
from kelvin import cc_utils
class CCUtilsTest(unittest.TestCase):
def setUp(self):
pass
def _test_fd(self, F, B, D, delta, name, thresh):
fd = (F - B)/(2.0*delta)
diff = numpy.linalg.norm(fd - D)
self.assertTrue(
diff < thresh, "Difference in " + name + ": {}".format(diff))
def test_Be_active(self):
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
T = 0.02
mu = 0.0
beta = 1/T
delta = 1e-4
thresh = 1e-10
athresh = 1e-40
m.conv_tol = 1e-12
m.scf()
sys = SCFSystem(m, T, mu+delta, orbtype='u')
ea, eb = sys.u_energies_tot()
foa = ft_utils.ff(beta, ea, mu)
fva = ft_utils.ffv(beta, ea, mu)
fob = ft_utils.ff(beta, eb, mu)
fvb = ft_utils.ffv(beta, eb, mu)
focca = [x for x in foa if x > athresh]
fvira = [x for x in fva if x > athresh]
foccb = [x for x in fob if x > athresh]
fvirb = [x for x in fvb if x > athresh]
iocca = [i for i, x in enumerate(foa) if x > athresh]
ivira = [i for i, x in enumerate(fva) if x > athresh]
ioccb = [i for i, x in enumerate(fob) if x > athresh]
ivirb = [i for i, x in enumerate(fvb) if x > athresh]
Fa, Fb, Ia, Ib, Iabab = cc_utils.uft_active_integrals(
sys, ea, eb, focca, fvira,
foccb, fvirb, iocca, ivira, ioccb, ivirb)
Fga, Fgb, Iga, Igb, Igabab = cc_utils.uft_integrals(
sys, ea, eb, beta, mu)
# test Fock matrix
Foo = Fga.oo[numpy.ix_(iocca, iocca)]
Fov = Fga.oo[numpy.ix_(iocca, ivira)]
Fvo = Fga.oo[numpy.ix_(ivira, iocca)]
Fvv = Fga.oo[numpy.ix_(ivira, ivira)]
doo = numpy.linalg.norm(Fa.oo - Foo)/numpy.sqrt(Foo.size)
dov = numpy.linalg.norm(Fa.ov - Fov)/numpy.sqrt(Fov.size)
dvo = numpy.linalg.norm(Fa.vo - Fvo)/numpy.sqrt(Fvo.size)
dvv = numpy.linalg.norm(Fa.vv - Fvv)/numpy.sqrt(Fvv.size)
self.assertTrue(doo < thresh, "Error in Fooa: {}".format(doo))
self.assertTrue(doo < thresh, "Error in Fova: {}".format(dov))
self.assertTrue(doo < thresh, "Error in Fvoa: {}".format(dvo))
self.assertTrue(doo < thresh, "Error in Fvva: {}".format(dvv))
Foo = Fgb.oo[numpy.ix_(ioccb, ioccb)]
Fov = Fgb.oo[numpy.ix_(ioccb, ivirb)]
Fvo = Fgb.oo[numpy.ix_(ivirb, ioccb)]
Fvv = Fgb.oo[numpy.ix_(ivirb, ivirb)]
doo = numpy.linalg.norm(Fa.oo - Foo)/numpy.sqrt(Foo.size)
dov = numpy.linalg.norm(Fa.ov - Fov)/numpy.sqrt(Fov.size)
dvo = numpy.linalg.norm(Fa.vo - Fvo)/numpy.sqrt(Fvo.size)
dvv = numpy.linalg.norm(Fa.vv - Fvv)/numpy.sqrt(Fvv.size)
self.assertTrue(doo < thresh, "Error in Foob: {}".format(doo))
self.assertTrue(doo < thresh, "Error in Fovb: {}".format(dov))
self.assertTrue(doo < thresh, "Error in Fvob: {}".format(dvo))
self.assertTrue(doo < thresh, "Error in Fvvb: {}".format(dvv))
# test ERIs
Ivvvv = Iga.vvvv[numpy.ix_(ivira, ivira, ivira, ivira)]
Ivvvo = Iga.vvvo[numpy.ix_(ivira, ivira, ivira, iocca)]
Ivovv = Iga.vovv[numpy.ix_(ivira, iocca, ivira, ivira)]
Ivvoo = Iga.vvoo[numpy.ix_(ivira, ivira, iocca, iocca)]
Ioovv = Iga.oovv[numpy.ix_(iocca, iocca, ivira, ivira)]
Ivovo = Iga.vovo[numpy.ix_(ivira, iocca, ivira, iocca)]
Ivooo = Iga.vooo[numpy.ix_(ivira, iocca, iocca, iocca)]
Iooov = Iga.ooov[numpy.ix_(iocca, iocca, iocca, ivira)]
Ioooo = Iga.oooo[numpy.ix_(iocca, iocca, iocca, iocca)]
Dvvvv = numpy.linalg.norm(Ia.vvvv - Ivvvv)/numpy.sqrt(Ivvvv.size)
Dvvvo = numpy.linalg.norm(Ia.vvvo - Ivvvo)/numpy.sqrt(Ivvvo.size)
Dvovv = numpy.linalg.norm(Ia.vovv - Ivovv)/numpy.sqrt(Ivovv.size)
Dvvoo = numpy.linalg.norm(Ia.vvoo - Ivvoo)/numpy.sqrt(Ivvoo.size)
Doovv = numpy.linalg.norm(Ia.oovv - Ioovv)/numpy.sqrt(Ioovv.size)
Dvovo = numpy.linalg.norm(Ia.vovo - Ivovo)/numpy.sqrt(Ivovo.size)
Dvooo = numpy.linalg.norm(Ia.vooo - Ivooo)/numpy.sqrt(Ivooo.size)
Dooov = numpy.linalg.norm(Ia.ooov - Iooov)/numpy.sqrt(Iooov.size)
Doooo = numpy.linalg.norm(Ia.oooo - Ioooo)/numpy.sqrt(Ioooo.size)
self.assertTrue(Dvvvv < thresh, "Error in Ivvvva: {}".format(Dvvvv))
self.assertTrue(Dvvvo < thresh, "Error in Ivvvoa: {}".format(Dvvvo))
self.assertTrue(Dvovv < thresh, "Error in Ivovva: {}".format(Dvovv))
self.assertTrue(Dvvoo < thresh, "Error in Ivvooa: {}".format(Dvvoo))
self.assertTrue(Doovv < thresh, "Error in Ioovva: {}".format(Doovv))
self.assertTrue(Dvovo < thresh, "Error in Ivovoa: {}".format(Dvovo))
self.assertTrue(Dvooo < thresh, "Error in Ivoooa: {}".format(Dvooo))
self.assertTrue(Dooov < thresh, "Error in Iooova: {}".format(Dooov))
Ivvvv = Igb.vvvv[numpy.ix_(ivirb, ivirb, ivirb, ivirb)]
Ivvvo = Igb.vvvo[numpy.ix_(ivirb, ivirb, ivirb, ioccb)]
Ivovv = Igb.vovv[numpy.ix_(ivirb, ioccb, ivirb, ivirb)]
Ivvoo = Igb.vvoo[numpy.ix_(ivirb, ivirb, ioccb, ioccb)]
Ioovv = Igb.oovv[numpy.ix_(ioccb, ioccb, ivirb, ivirb)]
Ivovo = Igb.vovo[numpy.ix_(ivirb, ioccb, ivirb, ioccb)]
Ivooo = Igb.vooo[numpy.ix_(ivirb, ioccb, ioccb, ioccb)]
Iooov = Igb.ooov[numpy.ix_(ioccb, ioccb, ioccb, ivirb)]
Ioooo = Igb.oooo[numpy.ix_(ioccb, ioccb, ioccb, ioccb)]
Dvvvv = numpy.linalg.norm(Ib.vvvv - Ivvvv)/numpy.sqrt(Ivvvv.size)
Dvvvo = numpy.linalg.norm(Ib.vvvo - Ivvvo)/numpy.sqrt(Ivvvo.size)
Dvovv = numpy.linalg.norm(Ib.vovv - Ivovv)/numpy.sqrt(Ivovv.size)
Dvvoo = numpy.linalg.norm(Ib.vvoo - Ivvoo)/numpy.sqrt(Ivvoo.size)
Doovv = numpy.linalg.norm(Ib.oovv - Ioovv)/numpy.sqrt(Ioovv.size)
Dvovo = numpy.linalg.norm(Ib.vovo - Ivovo)/numpy.sqrt(Ivovo.size)
Dvooo = numpy.linalg.norm(Ib.vooo - Ivooo)/numpy.sqrt(Ivooo.size)
Dooov = numpy.linalg.norm(Ib.ooov - Iooov)/numpy.sqrt(Iooov.size)
Doooo = numpy.linalg.norm(Ib.oooo - Ioooo)/numpy.sqrt(Ioooo.size)
self.assertTrue(Dvvvv < thresh, "Error in Ivvvvb: {}".format(Dvvvv))
self.assertTrue(Dvvvo < thresh, "Error in Ivvvob: {}".format(Dvvvo))
self.assertTrue(Dvovv < thresh, "Error in Ivovvb: {}".format(Dvovv))
self.assertTrue(Dvvoo < thresh, "Error in Ivvoob: {}".format(Dvvoo))
self.assertTrue(Doovv < thresh, "Error in Ioovvb: {}".format(Doovv))
self.assertTrue(Dvovo < thresh, "Error in Ivovob: {}".format(Dvovo))
self.assertTrue(Dvooo < thresh, "Error in Ivooob: {}".format(Dvooo))
self.assertTrue(Dooov < thresh, "Error in Iooovb: {}".format(Dooov))
self.assertTrue(Doooo < thresh, "Error in Ioooob: {}".format(Doooo))
Ivvvv = Igabab.vvvv[numpy.ix_(ivira, ivirb, ivira, ivirb)]
Ivvvo = Igabab.vvvo[numpy.ix_(ivira, ivirb, ivira, ioccb)]
Ivvov = Igabab.vvov[numpy.ix_(ivira, ivirb, iocca, ivirb)]
Ivovv = Igabab.vovv[numpy.ix_(ivira, ioccb, ivira, ivirb)]
Iovvv = Igabab.ovvv[numpy.ix_(iocca, ivirb, ivira, ivirb)]
Ivvoo = Igabab.vvoo[numpy.ix_(ivira, ivirb, iocca, ioccb)]
Ioovv = Igabab.oovv[numpy.ix_(iocca, ioccb, ivira, ivirb)]
Ivovo = Igabab.vovo[numpy.ix_(ivira, ioccb, ivira, ioccb)]
Ivoov = Igabab.voov[numpy.ix_(ivira, ioccb, iocca, ivirb)]
Iovvo = Igabab.ovvo[numpy.ix_(iocca, ivirb, ivira, ioccb)]
Iovov = Igabab.ovov[numpy.ix_(iocca, ivirb, iocca, ivirb)]
Ivooo = Igabab.vooo[numpy.ix_(ivira, ioccb, iocca, ioccb)]
Iovoo = Igabab.ovoo[numpy.ix_(iocca, ivirb, iocca, ioccb)]
Ioovo = Igabab.oovo[numpy.ix_(iocca, ioccb, ivira, ioccb)]
Iooov = Igabab.ooov[numpy.ix_(iocca, ioccb, iocca, ivirb)]
Ioooo = Igabab.oooo[numpy.ix_(iocca, ioccb, iocca, ioccb)]
Dvvvv = numpy.linalg.norm(Iabab.vvvv - Ivvvv)/numpy.sqrt(Ivvvv.size)
Dvvvo = numpy.linalg.norm(Iabab.vvvo - Ivvvo)/numpy.sqrt(Ivvvo.size)
Dvvov = numpy.linalg.norm(Iabab.vvov - Ivvov)/numpy.sqrt(Ivvov.size)
Dvovv = numpy.linalg.norm(Iabab.vovv - Ivovv)/numpy.sqrt(Ivovv.size)
Dovvv = numpy.linalg.norm(Iabab.ovvv - Iovvv)/numpy.sqrt(Iovvv.size)
Dvvoo = numpy.linalg.norm(Iabab.vvoo - Ivvoo)/numpy.sqrt(Ivvoo.size)
Doovv = numpy.linalg.norm(Iabab.oovv - Ioovv)/numpy.sqrt(Ioovv.size)
Dvovo = numpy.linalg.norm(Iabab.vovo - Ivovo)/numpy.sqrt(Ivovo.size)
Dovvo = numpy.linalg.norm(Iabab.ovvo - Iovvo)/numpy.sqrt(Iovvo.size)
Dvoov = numpy.linalg.norm(Iabab.voov - Ivoov)/numpy.sqrt(Ivoov.size)
Dovov = numpy.linalg.norm(Iabab.ovov - Iovov)/numpy.sqrt(Iovov.size)
Dvooo = numpy.linalg.norm(Iabab.vooo - Ivooo)/numpy.sqrt(Ivooo.size)
Dovoo = numpy.linalg.norm(Iabab.ovoo - Iovoo)/numpy.sqrt(Iovoo.size)
Doovo = numpy.linalg.norm(Iabab.oovo - Ioovo)/numpy.sqrt(Ioovo.size)
Dooov = numpy.linalg.norm(Iabab.ooov - Iooov)/numpy.sqrt(Iooov.size)
Doooo = numpy.linalg.norm(Iabab.oooo - Ioooo)/numpy.sqrt(Ioooo.size)
self.assertTrue(Dvvvv < thresh, "Error in Ivvvvab: {}".format(Dvvvv))
self.assertTrue(Dvvvo < thresh, "Error in Ivvvoab: {}".format(Dvvvo))
self.assertTrue(Dvvov < thresh, "Error in Ivvovab: {}".format(Dvvov))
self.assertTrue(Dvovv < thresh, "Error in Ivovvab: {}".format(Dvovv))
self.assertTrue(Dovvv < thresh, "Error in Iovvvab: {}".format(Dovvv))
self.assertTrue(Dvvoo < thresh, "Error in Ivvooab: {}".format(Dvvoo))
self.assertTrue(Doovv < thresh, "Error in Ioovvab: {}".format(Doovv))
self.assertTrue(Dvovo < thresh, "Error in Ivovoab: {}".format(Dvovo))
self.assertTrue(Dovvo < thresh, "Error in Iovvoab: {}".format(Dovvo))
self.assertTrue(Dvoov < thresh, "Error in Ivoovab: {}".format(Dvoov))
self.assertTrue(Dovov < thresh, "Error in Iovovab: {}".format(Dovov))
self.assertTrue(Dvooo < thresh, "Error in Ivoooab: {}".format(Dvooo))
self.assertTrue(Dovoo < thresh, "Error in Iovooab: {}".format(Dovoo))
self.assertTrue(Doovo < thresh, "Error in Iooovab: {}".format(Doovo))
self.assertTrue(Dooov < thresh, "Error in Iooovab: {}".format(Dooov))
self.assertTrue(Doooo < thresh, "Error in Iooooab: {}".format(Doooo))
def test_Be_gen_deriv(self):
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
T = 1.0
mu = 0.0
beta = 1/T
delta = 1e-4
thresh = 1e-8
m.conv_tol = 1e-12
m.scf()
sys = SCFSystem(m, T, mu+delta, orbtype='g')
en = sys.g_energies_tot()
Ff, If = cc_utils.ft_integrals(sys, en, beta, mu + delta)
sys = SCFSystem(m, T, mu-delta, orbtype='g')
en = sys.g_energies_tot()
Fb, Ib = cc_utils.ft_integrals(sys, en, beta, mu - delta)
sys = SCFSystem(m, T, mu, orbtype='g')
en = sys.g_energies_tot()
fo = ft_utils.ff(beta, en, mu)
fv = ft_utils.ffv(beta, en, mu)
dvec = -beta*numpy.ones(en.shape)
dF, dI = cc_utils.ft_d_integrals(sys, en, fo, fv, dvec)
self._test_fd(Ff.oo, Fb.oo, dF.oo, delta, "Foo", thresh)
self._test_fd(Ff.ov, Fb.ov, dF.ov, delta, "Fov", thresh)
self._test_fd(Ff.vo, Fb.vo, dF.vo, delta, "Fvo", thresh)
self._test_fd(Ff.vv, Fb.vv, dF.vv, delta, "Fvv", thresh)
self._test_fd(If.vvvv, Ib.vvvv, dI.vvvv, delta, "Ivvvv", thresh)
self._test_fd(If.vvvo, Ib.vvvo, dI.vvvo, delta, "Ivvvo", thresh)
self._test_fd(If.vovv, Ib.vovv, dI.vovv, delta, "Ivovv", thresh)
self._test_fd(If.vvoo, Ib.vvoo, dI.vvoo, delta, "Ivvoo", thresh)
self._test_fd(If.vovo, Ib.vovo, dI.vovo, delta, "Ivovo", thresh)
self._test_fd(If.oovv, Ib.oovv, dI.oovv, delta, "Ioovv", thresh)
self._test_fd(If.vooo, Ib.vooo, dI.vooo, delta, "Ivooo", thresh)
self._test_fd(If.ooov, Ib.ooov, dI.ooov, delta, "Iooov", thresh)
self._test_fd(If.oooo, Ib.oooo, dI.oooo, delta, "Ioooo", thresh)
def test_Be_deriv(self):
mol = gto.M(
verbose=0,
atom='Be 0 0 0',
basis='sto-3G')
m = scf.RHF(mol)
T = 1.0
mu = 0.0
beta = 1/T
delta = 1e-4
thresh = 1e-8
m.conv_tol = 1e-12
m.scf()
sys = SCFSystem(m, T, mu+delta, orbtype='u')
ea, eb = sys.u_energies_tot()
Faf, Fbf, Iaf, Ibf, Iababf = cc_utils.uft_integrals(
sys, ea, eb, beta, mu + delta)
sys = SCFSystem(m, T, mu-delta, orbtype='u')
ea, eb = sys.u_energies_tot()
Fab, Fbb, Iab, Ibb, Iababb = cc_utils.uft_integrals(
sys, ea, eb, beta, mu - delta)
sys = SCFSystem(m, T, mu, orbtype='u')
ea, eb = sys.u_energies_tot()
foa = ft_utils.ff(beta, ea, mu)
fva = ft_utils.ffv(beta, ea, mu)
fob = ft_utils.ff(beta, eb, mu)
fvb = ft_utils.ffv(beta, eb, mu)
dveca = -beta*numpy.ones(ea.shape)
dvecb = -beta*numpy.ones(eb.shape)
dFa, dFb, dIa, dIb, dIabab = cc_utils.u_ft_d_integrals(
sys, | |
### Import required python modules
import logging
from gevent import monkey; monkey.patch_all()
import platform
import os
from os import listdir, stat, makedirs, mkdir, walk, remove, pardir
from os.path import isdir, isfile, join, splitext, getmtime, basename, normpath, exists, expanduser, split, dirname, getsize, abspath
import pandas as pd
import time
from time import strftime, localtime
import shutil
from shutil import copy2
from configparser import ConfigParser
import numpy as np
from collections import defaultdict
import subprocess
from websocket import create_connection
import socket
import errno
import re
import gevent
from blackfynn import Blackfynn
from blackfynn.log import get_logger
from blackfynn.api.agent import agent_cmd
from blackfynn.api.agent import AgentError, check_port, socket_address
from urllib.request import urlopen
import json
import collections
from threading import Thread
import pathlib
from openpyxl import load_workbook
from openpyxl import Workbook
from openpyxl.styles import PatternFill, Font
from docx import Document
from datetime import datetime, timezone
### Global variables
curateprogress = ' '
curatestatus = ' '
curateprintstatus = ' '
total_dataset_size = 1
curated_dataset_size = 0
start_time = 0
userpath = expanduser("~")
configpath = join(userpath, '.blackfynn', 'config.ini')
submitdataprogress = ' '
submitdatastatus = ' '
submitprintstatus = ' '
total_file_size = 1
uploaded_file_size = 0
start_time_bf_upload = 0
start_submit = 0
metadatapath = join(userpath, 'SODA', 'SODA_metadata')
bf = ""
myds = ""
initial_bfdataset_size = 0
upload_directly_to_bf = 0
initial_bfdataset_size_submit = 0
forbidden_characters = '<>:"/\|?*'
forbidden_characters_bf = '\/:*?"<>'
## these subsequent CheckLeafValue and traverseForLeafNodes functions check for the validity of file paths,
## and folder and file size
def checkLeafValue(leafName, leafNodeValue):
error, c = '', 0
total_dataset_size = 1
curatestatus = ''
if isinstance(leafNodeValue, list):
filePath = leafNodeValue[0]
if exists(filePath):
filePathSize = getsize(filePath)
if filePathSize == 0:
c += 1
error = error + filePath + ' is 0 KB <br>'
else:
total_dataset_size += filePathSize
else:
c += 1
error = error + filePath + ' doesn not exist! <br>'
elif isinstance(leafNodeValue, dict):
c += 1
error = error + leafName + ' is empty <br>'
if c > 0:
error = error + '<br>Please remove invalid files/folders from your dataset and try again'
curatestatus = 'Done'
raise Exception(error)
else:
return [True, total_dataset_size-1]
def traverseForLeafNodes(jsonStructure):
total_dataset_size = 1
for key in jsonStructure:
if isinstance(jsonStructure[key], list):
returnedOutput = checkLeafValue(key, jsonStructure[key])
# returnedOutput = [True, total_dataset_size-1]
if returnedOutput[0]:
total_dataset_size += returnedOutput[1]
else:
if len(jsonStructure[key]) == 0:
returnedOutput = checkLeafValue(key, jsonStructure[key])
else:
# going one step down in the object tree
traverseForLeafNodes(jsonStructure[key])
return total_dataset_size
######## CREATE FILES FOR CURATE_DATASET FUNCTION
def createFiles(jsonpath, fileKey, distdir, listallfiles):
# fileKey is the key in json structure that is a file (meaning that its value is an array)
# note: this fileKey can be the new name of the file (if renamed by users in SODA)
# (or can be the original basename)
srcfile = jsonpath[fileKey][0]
distfile = distdir
listallfiles.append([srcfile, distfile])
def ignore_empty_high_level_folders(jsonObject):
items_to_delete = [folder for folder in jsonObject.keys() if len(jsonObject[folder].keys()) == 0]
for item in items_to_delete:
del jsonObject[item]
return jsonObject
def generate_dataset_locally(destinationdataset, pathdataset, newdatasetname, jsonpath):
"""
Associated with 'Generate' button in the 'Generate dataset' section of SODA interface
Checks validity of files / paths / folders and then generates the files and folders
as requested along with progress status
Args:
destinationdataset: type of destination dataset ('modify existing', 'create new', or 'upload to blackfynn')
pathdataset: destination path of new dataset if created locally or name of blackfynn account (string)
newdatasetname: name of the local dataset or name of the dataset on blackfynn (string)
manifeststatus: boolean to check if user request manifest files
jsonpath: path of the files to be included in the dataset (dictionary)
"""
global curatestatus #set to 'Done' when completed or error to stop progress tracking from front-end
global curateprogress #GUI messages shown to user to provide update on progress
global curateprintstatus # If = "Curating" Progress messages are shown to user
global total_dataset_size # total size of the dataset to be generated
global curated_dataset_size # total size of the dataset generated (locally or on blackfynn) at a given time
global start_time
global bf
global myds
global upload_directly_to_bf
global start_submit
global initial_bfdataset_size
curateprogress = ' '
curatestatus = ''
curateprintstatus = ' '
error, c = '', 0
curated_dataset_size = 0
start_time = 0
upload_directly_to_bf = 0
start_submit = 0
initial_bfdataset_size = 0
jsonstructure_non_empty = ignore_empty_high_level_folders(jsonpath)
if destinationdataset == 'create new':
if not isdir(pathdataset):
curatestatus = 'Done'
raise Exception('Error: Please select a valid folder for new dataset')
if not newdatasetname:
curatestatus = 'Done'
raise Exception('Error: Please enter a valid name for new dataset folder')
if check_forbidden_characters(newdatasetname):
curatestatus = 'Done'
raise Exception('Error: A folder name cannot contain any of the following characters ' + forbidden_characters)
total_dataset_size = 1
# check if path in jsonpath are valid and calculate total dataset size
total_dataset_size = traverseForLeafNodes(jsonstructure_non_empty)
total_dataset_size = total_dataset_size - 1
# CREATE NEW
if destinationdataset == 'create new':
try:
listallfiles = []
pathnewdatasetfolder = join(pathdataset, newdatasetname)
pathnewdatasetfolder = return_new_path(pathnewdatasetfolder)
open_file(pathnewdatasetfolder)
curateprogress = 'Started'
curateprintstatus = 'Curating'
start_time = time.time()
start_submit = 1
pathdataset = pathnewdatasetfolder
mkdir(pathdataset)
create_dataset(pathdataset, jsonstructure_non_empty, listallfiles)
curateprogress = 'New dataset created'
curateprogress = 'Success: COMPLETED!'
curatestatus = 'Done'
except Exception as e:
curatestatus = 'Done'
raise e
def create_folder_level_manifest(jsonpath, jsondescription):
"""
Function to create manifest files for each SPARC folder.
Files are created in a temporary folder
Args:
datasetpath: path of the dataset (string)
jsonpath: all paths in json format with key being SPARC folder names (dictionary)
jsondescription: description associated with each path (dictionary)
Action:
Creates manifest files in xslx format for each SPARC folder
"""
global total_dataset_size
local_timezone = TZLOCAL()
try:
datasetpath = metadatapath
shutil.rmtree(datasetpath) if isdir(datasetpath) else 0
makedirs(datasetpath)
folders = list(jsonpath.keys())
if 'main' in folders:
folders.remove('main')
# In each SPARC folder, generate a manifest file
for folder in folders:
if (jsonpath[folder] != []):
# Initialize dataframe where manifest info will be stored
df = pd.DataFrame(columns=['filename', 'timestamp', 'description',
'file type', 'Additional Metadata'])
# Get list of files/folders in the the folder
# Remove manifest file from the list if already exists
folderpath = join(datasetpath, folder)
allfiles = jsonpath[folder]
alldescription = jsondescription[folder + '_description']
manifestexists = join(folderpath, 'manifest.xlsx')
countpath = -1
for pathname in allfiles:
countpath += 1
if basename(pathname) == 'manifest.csv' or basename(pathname) == 'manifest.xlsx':
allfiles.pop(countpath)
alldescription.pop(countpath)
# Populate manifest dataframe
filename, timestamp, filetype, filedescription = [], [], [], []
countpath = -1
for paths in allfiles:
if isdir(paths):
key = basename(paths)
alldescription.pop(0)
for subdir, dirs, files in os.walk(paths):
for file in files:
gevent.sleep(0)
filepath = pathlib.Path(paths) / subdir / file
mtime = filepath.stat().st_mtime
lastmodtime = datetime.fromtimestamp(mtime).astimezone(local_timezone)
timestamp.append(lastmodtime.isoformat().replace('.', ',').replace('+00:00', 'Z'))
fullfilename = filepath.name
if folder == 'main': # if file in main folder
filename.append(fullfilename) if folder == '' else filename.append(join(folder, fullfilename))
else:
subdirname = os.path.relpath(subdir, paths) # gives relative path of the directory of the file w.r.t paths
if subdirname == '.':
filename.append(join(key, fullfilename))
else:
filename.append(join(key, subdirname, fullfilename))
fileextension = splitext(fullfilename)[1]
if not fileextension: # if empty (happens e.g. with Readme files)
fileextension = 'None'
filetype.append(fileextension)
filedescription.append('')
else:
gevent.sleep(0)
countpath += 1
filepath = pathlib.Path(paths)
file = filepath.name
filename.append(file)
mtime = filepath.stat().st_mtime
lastmodtime = datetime.fromtimestamp(mtime).astimezone(local_timezone)
timestamp.append(lastmodtime.isoformat().replace('.', ',').replace('+00:00', 'Z'))
filedescription.append(alldescription[countpath])
if isdir(paths):
filetype.append('folder')
else:
fileextension = splitext(file)[1]
if not fileextension: #if empty (happens e.g. with Readme files)
fileextension = 'None'
filetype.append(fileextension)
df['filename'] = filename
df['timestamp'] = timestamp
df['file type'] = filetype
df['description'] = filedescription
makedirs(folderpath)
# Save manifest as Excel sheet
manifestfile = join(folderpath, 'manifest.xlsx')
df.to_excel(manifestfile, index=None, header=True)
total_dataset_size += path_size(manifestfile)
jsonpath[folder].append(manifestfile)
return jsonpath
except Exception as e:
raise e
def check_forbidden_characters(my_string):
"""
Check for forbidden characters in file/folder name
Args:
my_string: string with characters (string)
Returns:
False: no forbidden character
True: presence of forbidden character(s)
"""
regex = re.compile('[' + forbidden_characters + ']')
if(regex.search(my_string) == None and "\\" not in r"%r" % my_string):
return False
else:
return True
def folder_size(path):
"""
Provides the size of the folder indicated by path
Args:
path: path of the folder (string)
Returns:
total_size: total size of the folder in bytes (integer)
"""
total_size = 0
start_path = '.' # To get size of current directory
for path, dirs, files in walk(path):
for f in files:
fp = join(path, f)
total_size += getsize(fp)
return total_size
def open_file(file_path):
"""
Opening folder on all platforms
https://stackoverflow.com/questions/6631299/python-opening-a-folder-in-explorer-nautilus-mac-thingie
Args:
file_path: path of the folder (string)
Action:
Opens file explorer window to the given path
"""
try:
if | |
len(line.split()) == 1:
res = [str(i)+';' for i in sec_ids]
else:
if not (';' in line):
res = [str(i)+';'
for i in sec_ids
if (str(i)+';').startswith(text)]
return res
elif len(tokens) == 2:
first_tokens = tokens[0].split(' ') # 'vf 1' => ['vf', '1']
if len(first_tokens) == 2:
idx = int(first_tokens[1])
# Add SppVf of sec_id if it is not exist
if self.secondaries['vf'][idx] is None:
self.secondaries['vf'][idx] = vf.SppVf(
self.spp_ctl_cli, idx)
return self.secondaries['vf'][idx].complete(
self.spp_ctl_cli.get_sec_ids('vf'),
text, line, begidx, endidx)
def do_mirror(self, cmd):
"""Send a command to spp_mirror."""
# remove unwanted spaces to avoid invalid command error
tmparg = self.clean_cmd(cmd)
cmds = tmparg.split(';')
if len(cmds) < 2:
print("Required an ID and ';' before the command.")
elif str.isdigit(cmds[0]):
if self._is_sec_registered('mirror', int(cmds[0])):
self.secondaries['mirror'][int(cmds[0])].run(cmds[1])
else:
print('Invalid command: {}'.format(tmparg))
def help_mirror(self):
"""Print help message of mirror command."""
mirror.SppMirror.help()
def complete_mirror(self, text, line, begidx, endidx):
"""Completion for mirror command."""
if self.use_cache is False:
self.init_spp_procs()
line = self.clean_cmd(line)
tokens = line.split(';')
if len(tokens) == 1:
# Add SppMirror of sec_id if it is not exist
sec_ids = self.spp_ctl_cli.get_sec_ids('mirror')
for idx in sec_ids:
if self.secondaries['mirror'][idx] is None:
self.secondaries['mirror'][idx] = mirror.SppMirror(
self.spp_ctl_cli, idx)
if len(line.split()) == 1:
res = [str(i)+';' for i in sec_ids]
else:
if not (';' in line):
res = [str(i)+';'
for i in sec_ids
if (str(i)+';').startswith(text)]
return res
elif len(tokens) == 2:
# Split tokens like as from 'mirror 1' to ['mirror', '1']
first_tokens = tokens[0].split(' ')
if len(first_tokens) == 2:
idx = int(first_tokens[1])
# Add SppMirror of sec_id if it is not exist
if self.secondaries['mirror'][idx] is None:
self.secondaries['mirror'][idx] = mirror.SppMirror(
self.spp_ctl_cli, idx)
return self.secondaries['mirror'][idx].complete(
self.spp_ctl_cli.get_sec_ids('mirror'),
text, line, begidx, endidx)
def do_pcap(self, cmd):
"""Send a command to spp_pcap."""
# remove unwanted spaces to avoid invalid command error
tmparg = self.clean_cmd(cmd)
cmds = tmparg.split(';')
if len(cmds) < 2:
print("Required an ID and ';' before the command.")
elif str.isdigit(cmds[0]):
if self._is_sec_registered('pcap', int(cmds[0])):
self.secondaries['pcap'][int(cmds[0])].run(cmds[1])
else:
print('Invalid command: {}'.format(tmparg))
def help_pcap(self):
"""Print help message of pcap command."""
pcap.SppPcap.help()
def complete_pcap(self, text, line, begidx, endidx):
"""Completion for pcap command."""
if self.use_cache is False:
self.init_spp_procs()
line = self.clean_cmd(line)
tokens = line.split(';')
if len(tokens) == 1:
# Add SppPcap of sec_id if it is not exist
sec_ids = self.spp_ctl_cli.get_sec_ids('pcap')
for idx in sec_ids:
if self.secondaries['pcap'][idx] is None:
self.secondaries['pcap'][idx] = pcap.SppPcap(
self.spp_ctl_cli, idx)
if len(line.split()) == 1:
res = [str(i)+';' for i in sec_ids]
else:
if not (';' in line):
res = [str(i)+';'
for i in sec_ids
if (str(i)+';').startswith(text)]
return res
elif len(tokens) == 2:
# Split tokens like as from 'pcap 1' to ['pcap', '1']
first_tokens = tokens[0].split(' ')
if len(first_tokens) == 2:
idx = int(first_tokens[1])
# Add SppPcap of sec_id if it is not exist
if self.secondaries['pcap'][idx] is None:
self.secondaries['pcap'][idx] = pcap.SppPcap(
self.spp_ctl_cli, idx)
return self.secondaries['pcap'][idx].complete(
self.spp_ctl_cli.get_sec_ids('pcap'),
text, line, begidx, endidx)
def do_record(self, fname):
"""Save commands as a recipe file."""
if fname == '':
print("Record file is required!")
else:
self.recorded_file = open(fname, 'w')
def help_record(self):
"""Print help message of record command."""
print(help_msg.cmds['record'])
def complete_record(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_playback(self, fname):
"""Setup a network configuration from recipe file."""
if fname == '':
print("Record file is required!")
else:
self.close()
try:
with open(fname) as recorded_file:
lines = []
for line in recorded_file:
if not common.is_comment_line(line):
lines.append("# %s" % line)
lines.append(line)
self.cmdqueue.extend(lines)
except IOError:
message = "Error: File does not exist."
print(message)
def help_playback(self):
"""Print help message of playback command."""
print(help_msg.cmds['playback'])
def complete_playback(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_config(self, args):
"""Show or update config."""
tokens = args.strip().split(' ')
if len(tokens) == 1:
key = tokens[0]
if key == '':
for k, v in self.cli_config.items():
print('- {}: "{}"\t# {}'.format(k, v['val'], v['desc']))
elif key in self.cli_config.keys():
print('- {}: "{}"\t# {}'.format(
key, self.cli_config[key]['val'],
self.cli_config[key]['desc']))
else:
res = {}
for k, v in self.cli_config.items():
if k.startswith(key):
res[k] = {'val': v['val'], 'desc': v['desc']}
for k, v in res.items():
print('- {}: "{}"\t# {}'.format(k, v['val'], v['desc']))
elif len(tokens) > 1:
key = tokens[0]
if key in self.cli_config.keys():
for s in ['"', "'"]:
args = args.replace(s, '')
val = args[(len(key) + 1):]
if common.validate_config_val(key, val):
self.cli_config[key]['val'] = val
print('Set {k}: "{v}"'.format(k=key, v=val))
else:
print('Invalid value "{v}" for "{k}"'.format(
k=key, v=val))
# Command prompt should be updated immediately
if key == 'prompt':
self.prompt = self.cli_config['prompt']['val']
elif key == 'topo_size':
self.spp_topo.resize(
self.cli_config['topo_size']['val'])
def help_config(self):
"""Print help message of config command."""
print(help_msg.cmds['config'])
def complete_config(self, text, line, begidx, endidx):
candidates = []
tokens = line.strip().split(' ')
if len(tokens) == 1:
candidates = self.cli_config.keys()
elif len(tokens) == 2:
if text:
candidates = self.cli_config.keys()
if not text:
completions = candidates
else:
logger.debug(candidates)
completions = [p for p in candidates
if p.startswith(text)
]
return completions
def do_pwd(self, args):
"""Show corrent directory."""
print(os.getcwd())
def help_pwd(self):
"""Print help message of pwd command."""
print(help_msg.cmds['pwd'])
def do_ls(self, args):
"""Show a list of specified directory."""
if args == '' or os.path.isdir(args):
c = 'ls -F %s' % args
subprocess.call(c, shell=True)
else:
print("No such a directory.")
def help_ls(self):
"""Print help message of ls command."""
print(help_msg.cmds['ls'])
def complete_ls(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_cd(self, args):
"""Change current directory."""
if os.path.isdir(args):
os.chdir(args)
print(os.getcwd())
else:
print("No such a directory.")
def help_cd(self):
"""Print help message of cd command."""
print(help_msg.cmds['cd'])
def complete_cd(self, text, line, begidx, endidx):
return common.compl_common(text, line, 'directory')
def do_mkdir(self, args):
"""Create a new directory."""
c = 'mkdir -p %s' % args
subprocess.call(c, shell=True)
def help_mkdir(self):
"""Print help message of mkdir command."""
print(help_msg.cmds['mkdir'])
def complete_mkdir(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_bye(self, args):
"""Terminate SPP processes and controller."""
cmds = args.split(' ')
if cmds[0] == '': # terminate SPP CLI itself
self.do_exit('')
return True
else: # terminate other SPP processes
spp_bye = bye.SppBye()
spp_bye.run(args, self.primary, self.secondaries)
def help_bye(self):
"""Print help message of bye command."""
bye.SppBye.help()
def complete_bye(self, text, line, begidx, endidx):
"""Completion for bye commands"""
spp_bye = bye.SppBye()
return spp_bye.complete(text, line, begidx, endidx)
def do_cat(self, arg):
"""View contents of a file."""
if os.path.isfile(arg):
c = 'cat %s' % arg
subprocess.call(c, shell=True)
else:
print("No such a directory.")
def help_cat(self):
"""Print help message of cat command."""
print(help_msg.cmds['cat'])
def do_redo(self, args):
"""Execute command of index of history."""
idx = int(args)
cmdline = None
cnt = 1
try:
for line in open(self.hist_file):
if cnt == idx:
cmdline = line.strip()
break
cnt += 1
if cmdline.find('pri;') > -1:
cmdline = cmdline.replace(';', ' ;')
print(cmdline)
cmd_ary = cmdline.split(' ')
cmd = cmd_ary.pop(0)
cmd_options = ' '.join(cmd_ary)
eval('self.do_%s(cmd_options)' % cmd)
except IOError:
print('Error: Cannot open history file "%s"' % self.hist_file)
def help_redo(self):
"""Print help message of redo command."""
print(help_msg.cmds['redo'])
def do_history(self, arg):
"""Show command history."""
try:
f = open(self.hist_file)
# setup output format
nof_lines = len(f.readlines())
f.seek(0)
lines_digit = len(str(nof_lines))
hist_format = ' %' + str(lines_digit) + 'd %s'
cnt = 1
for line in f:
line_s = line.strip()
print(hist_format % (cnt, line_s))
cnt += 1
f.close()
except IOError:
print('Error: Cannot open history file "%s"' % self.hist_file)
def help_history(self):
"""Print help message of history command."""
print(help_msg.cmds['history'])
def complete_cat(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_less(self, arg):
"""View contents of a file."""
if os.path.isfile(arg):
c = 'less %s' % arg
subprocess.call(c, shell=True)
else:
print("No such a directory.")
def help_less(self):
"""Print help message of less command."""
print(help_msg.cmds['less'])
def complete_less(self, text, line, begidx, endidx):
return common.compl_common(text, line)
def do_exit(self, args):
"""Terminate SPP controller process."""
self.close()
print('Thank you for using Soft Patch Panel')
return True
def help_exit(self):
"""Print help message of exit command."""
print(help_msg.cmds['exit'])
def do_inspect(self, args):
"""Print attributes of Shell for debugging."""
from pprint import pprint
if args == '':
pprint(vars(self))
def help_inspect(self):
"""Print help message of inspect command."""
print(help_msg.cmds['inspect'])
def terms_topo_subgraph(self):
"""Define terms of topo_subgraph command."""
return ['add', 'del']
def do_topo_subgraph(self, args):
"""Edit subgarph for topo command."""
# logger.info("Topo initialized with sec IDs %s" % sec_ids)
# delimiter of node in dot file
delim_node = '_'
args_cleaned = re.sub(r"\s+", ' ', args).strip()
# Show subgraphs if given no argments
if (args_cleaned == ''):
if len(self.spp_topo.subgraphs) == 0:
print("No subgraph.")
else:
| |
<reponame>fronzbot/AI-Challenge
#!/usr/bin/env python
import math
import random
from collections import deque
import sys
from optparse import OptionParser
#direction information
cdirections = ['N', 'E', 'S', 'W']
directions = {'N': (-1,0), 'S': (1,0), 'E': (0,1), 'W': (0,-1)}
#game parameters
min_players = 2
max_players = 8
#functions
def gcd(a, b):
while b:
a, b = b, a%b
return a
def lcm(a, b):
if a == 0 and b == 0:
return 0
else:
return abs(a*b)/gcd(a,b)
#map class
class Grid():
#sets up a grid with valid parameters for tile symmetry
def tile_symmetric_grid(self, no_players,
min_dimensions, max_dimensions,
min_starting_distance,
min_block_size, max_block_size):
self.no_players = no_players
self.min_dimensions = min_dimensions
self.max_dimensions = max_dimensions
self.min_starting_distance = min_starting_distance
self.min_block_size = min_block_size
self.max_block_size = max_block_size
if not self.pick_tile_dimensions():
return False
self.squares = [ ['%' for c in range(self.cols)] for r in range(self.rows) ]
self.add_starting_hills()
a_block = self.make_block(self.h_loc, self.block_size)
self.add_block_land(a_block)
return True
#sets up a grid with valid parameters for rotational symmetry
def rotationally_symmetric_grid(self, no_players,
min_dimensions, max_dimensions,
min_starting_distance,
min_block_size, max_block_size,
r_sym_type):
self.no_players = no_players
self.min_dimensions = min_dimensions
self.max_dimensions = max_dimensions
self.r_sym_type = r_sym_type
self.min_starting_distance = min_starting_distance
self.min_block_size = min_block_size
self.max_block_size = max_block_size
if not self.pick_rotational_dimensions():
return False
self.squares = [ ['%' for c in range(self.cols)] for r in range(self.rows) ]
self.add_starting_hills()
a_block = self.make_block(self.h_loc, self.block_size)
self.add_block_land(a_block)
return True
#picks valid dimensions for a tile symmetric grid
def pick_tile_dimensions(self):
original_no_players = self.no_players
for d_attempt in range(200000):
self.block_size = random.randint(self.min_block_size, self.max_block_size)
self.rows = random.randint(self.min_dimensions, self.max_dimensions)
self.cols = random.randint(self.rows, self.max_dimensions)
self.rows += 2*self.block_size - self.rows%(2*self.block_size)
self.cols += 2*self.block_size - self.cols%(2*self.block_size)
self.row_t = random.randint(3, self.rows-3)
self.col_t = random.randint(3, self.cols-3)
if original_no_players == -1:
self.no_players = lcm(self.rows/gcd(self.row_t, self.rows),
self.cols/gcd(self.col_t, self.cols))
self.h_loc = self.random_loc()
if self.rows <= self.max_dimensions and \
self.cols <= self.max_dimensions and \
self.no_players == lcm(self.rows/gcd(self.row_t, self.rows),
self.cols/gcd(self.col_t, self.cols) ) and \
self.no_players >= min_players and \
self.no_players <= max_players and\
self.rows/gcd(self.row_t, self.rows) == \
self.cols/gcd(self.col_t, self.cols) and \
self.row_t%(2*self.block_size) == 0 and \
self.col_t%(2*self.block_size) == 0 and \
self.is_valid_start():
return True
return False
#picks valid dimensions for a rotationally symmetric grid
def pick_rotational_dimensions(self):
original_no_players = self.no_players
original_r_sym_type = self.r_sym_type
for d_attempt in range(100):
#picks number of players if it is not given
if original_no_players == -1:
if original_r_sym_type > 3:
self.no_players = 2
elif original_r_sym_type > 1:
self.no_players = 2**random.randint(1,2)
else:
self.no_players = 2**random.randint(1,3)
#picks a symmetry type if one is not given
if original_r_sym_type == -1:
if self.no_players == 2:
self.r_sym_type = random.randint(1, 5)
elif self.no_players == 4:
self.r_sym_type = random.randint(1, 3)
elif self.no_players == 8:
self.r_sym_type = 1;
self.block_size = random.randint(self.min_block_size, self.max_block_size)
self.rows = random.randint(self.min_dimensions, self.max_dimensions)
self.cols = random.randint(self.rows, self.max_dimensions)
self.rows += 2*self.block_size - self.rows%(2*self.block_size)
self.cols += 2*self.block_size - self.cols%(2*self.block_size)
if (self.no_players == 2 and self.r_sym_type > 3) or \
(self.no_players == 4 and self.r_sym_type > 1) or \
self.no_players == 8:
self.cols = self.rows
visited = [ [False for c in range(self.cols)] for r in range(self.rows)]
for a_attempt in range(2*self.rows):
while True:
self.h_loc = self.random_loc()
if not visited[self.h_loc[0]][self.h_loc[1]]:
break
visited[self.h_loc[0]][self.h_loc[1]] = True
if self.rows <= self.max_dimensions and \
self.cols <= self.max_dimensions and \
self.is_valid_start():
return True
return False
#works out a list of loctations that generates the set of locations under the given symmetry
def generate_basis_information(self):
self.basis_locs = []
self.is_basis_block = [ [False for c in range(self.cols)] for r in range(self.rows)]
self.is_basis_loc = [ [False for c in range(self.cols)] for r in range(self.rows)]
visited = [ [False for c in range(self.cols)] for r in range(self.rows)]
a_block = self.make_block(self.h_loc, self.block_size)
queue = deque([a_block[0]])
self.is_basis_block[a_block[0][0]][a_block[0][1]] = True
for loc in a_block:
self.is_basis_loc[loc[0]][loc[1]] = True
self.basis_locs.append(loc)
s_locs = self.get_symmetric_locs(loc)
for s_loc in s_locs:
visited[s_loc[0]][s_loc[1]] = True
while queue:
c_loc = queue.popleft()
c_block = self.make_block(c_loc, self.block_size)
for d in directions:
n_block = self.get_adjacent_block(c_block, d)
n_loc = n_block[0]
if not visited[n_loc[0]][n_loc[1]]:
queue.append(n_loc)
self.is_basis_block[n_loc[0]][n_loc[1]] = True
for loc in n_block:
self.is_basis_loc[loc[0]][loc[1]] = True
self.basis_locs.append(loc)
s_locs = self.get_symmetric_locs(loc)
for s_loc in s_locs:
visited[s_loc[0]][s_loc[1]] = True
#returns a list of directions in random order
def random_directions(self):
r_directions = []
t = random.randint(0, 3)
for i in range(len(directions)):
r_directions.append(cdirections[(i+t)%4])
return r_directions
#randomly picks a location inside the map
def random_loc(self):
return [random.randint(0, self.rows-1), random.randint(0, self.cols-1)]
#returns the new location after moving in a particular direction
def get_loc(self, loc, direction):
dr, dc = directions[direction]
return [(loc[0]+dr)%self.rows, (loc[1]+dc)%self.cols ]
#returns the new location after translating it by t_amount = [rt, ct]
def get_translate_loc(self, loc, t_amount):
return [(loc[0]+t_amount[0])%self.rows,
(loc[1]+t_amount[1])%self.cols ]
#returns a symmetrically equivalent location as specified by num
def get_symmetric_loc(self, loc, num):
if num == 1: #horizontal
return [loc[0], self.cols - loc[1]-1]
elif num == 2: #vertical
return [self.rows - loc[0]-1, loc[1]]
elif num == 3: #horizontal and vertial
return [self.rows - loc[0]-1, self.cols - loc[1]-1]
elif num == 4: #diagonal/transpose
return [loc[1], loc[0]]
elif num == 5: # horizontal then vertical then diagonal
return [self.rows - loc[1]-1, self.cols - loc[0]-1]
elif num == 6: # horizontal then diagonal
return [self.rows - loc[1]-1, loc[0]]
elif num == 7: # vertical then diagonal
return [loc[1], self.cols-loc[0]-1]
#returns a list of the symmetric locations for all players
def get_symmetric_locs(self, loc):
locs = [loc]
if self.symmetry == "tile":
n_loc = loc
for n in range(self.no_players-1):
n_loc = self.get_translate_loc(n_loc, [self.row_t, self.col_t])
locs.append(n_loc)
elif self.symmetry == "rotational":
if self.no_players == 2:
locs.append(self.get_symmetric_loc(loc, self.r_sym_type))
elif self.no_players == 4:
if self.r_sym_type == 1:
locs.append(self.get_symmetric_loc(loc, 1))
locs.append(self.get_symmetric_loc(loc, 2))
locs.append(self.get_symmetric_loc(loc, 3))
elif self.r_sym_type == 2:
locs.append(self.get_symmetric_loc(loc, 3))
locs.append(self.get_symmetric_loc(loc, 4))
locs.append(self.get_symmetric_loc(loc, 5))
elif self.r_sym_type == 3:
locs.append(self.get_symmetric_loc(loc, 3))
locs.append(self.get_symmetric_loc(loc, 6))
locs.append(self.get_symmetric_loc(loc, 7))
elif self.no_players == 8:
for n in range(self.no_players-1):
locs.append(self.get_symmetric_loc(loc, n+1))
return locs
#makes a block inside the map
def make_block(self, loc, block_size):
block = []
for row_t in range(block_size):
for col_t in range(block_size):
block.append(self.get_translate_loc(loc, [row_t, col_t]))
return block
#returns the new block after moving in a particular direction
def get_block(self, block, direction):
n_block = []
for loc in block:
n_block.append(self.get_loc(loc, direction))
return n_block
#returns the adjacent block in a given direction
def get_adjacent_block(self, block, direction):
for n in range(int(math.sqrt(len(block)))):
block = self.get_block(block, direction)
return block
#returns the euclidean distance (squared) between two squares
def dist(self, loc1, loc2):
d1 = abs(loc1[0] - loc2[0])
d2 = abs(loc1[1] - loc2[1])
dr = min(d1, self.rows - d1)
dc = min(d2, self.cols - d2)
return dr*dr + dc*dc
#checks whether the players start far enough apart
def is_valid_start(self):
h_locs = self.get_symmetric_locs(self.h_loc)
for n in range(self.no_players-1):
if self.dist(h_locs[0], h_locs[n+1]) < self.min_starting_distance:
return False
return True
#checks whether the hills start far enough apart
def is_valid_hill_loc(self, h_loc):
if self.squares[h_loc[0]][h_loc[1]] != '.':
return False
h_locs = self.get_symmetric_locs(h_loc)
for n in range(len(h_locs)-1):
if self.dist(h_locs[0], h_locs[n+1]) < self.min_starting_distance:
return False
for c_loc in self.h_locs:
if self.dist(c_loc, h_loc) < self.min_starting_distance:
return False
return True
#adds land information to the grid
def add_land(self, loc):
if self.squares[loc[0]][loc[1]] == '%':
self.squares[loc[0]][loc[1]] = '.'
#add land information for a block
def add_block_land(self, block):
for loc in block:
self.add_land(loc)
#adds ants to the map
def add_starting_hills(self):
h_locs = self.get_symmetric_locs(self.h_loc)
player = '0'
for n in range(self.no_players):
self.squares[h_locs[n][0]][h_locs[n][1]] = player
player = chr(ord(player)+1)
#adds extra hills to the map
def add_extra_hills(self):
self.h_locs = self.get_symmetric_locs(self.h_loc)
for h in range(self.no_hills-1):
for d_attempt in range(100):
h_loc = self.random_loc()
if self.is_valid_hill_loc(h_loc):
break
if not self.is_valid_hill_loc(h_loc):
return
player = '0'
h_locs = self.get_symmetric_locs(h_loc)
for n in range(self.no_players):
self.squares[h_locs[n][0]][h_locs[n][1]] = player
self.h_locs.append(h_locs[n])
player = chr(ord(player)+1)
#outputs the grid in the expected format
def print_grid(self):
print "rows", self.rows
print "cols", self.cols
print "players", self.no_players
#self.print_food_spawn_info()
for row in self.squares:
print 'm', ''.join(row)
#adds land to a water map using backtracking "recursively"
def add_land_with_recursive_backtracking(self):
stack = []
c_loc = self.h_loc
c_block = self.make_block(c_loc, self.block_size)
visited = [ [False for c in range(self.cols)] for r in range(self.rows)]
while True:
visited[c_loc[0]][c_loc[1]] = True
neighbour_found = False
r_directions = self.random_directions()
for d in r_directions:
n_block = self.get_adjacent_block(c_block, d)
n_loc = n_block[0]
if not self.is_basis_block[n_loc[0]][n_loc[1]]: #can't carve here
continue
t_block = self.get_adjacent_block(n_block, d)
t_loc = t_block[0]
f_loc = t_block[0]
f_block = t_block
if not self.is_basis_block[t_loc[0]][t_loc[1]]:
f_loc = c_loc
f_block = self.make_block(c_loc, self.block_size)
if not visited[t_loc[0]][t_loc[1]]:
if self.is_basis_block[t_loc[0]][t_loc[1]]:
stack.append(c_loc)
self.add_block_land(n_block)
self.add_block_land(f_block)
elif random.randint(1,3) == 1:
self.add_block_land(n_block)
c_loc = f_loc
c_block | |
'\n') + '\n'
d['Editor-Extensions'] = [line.split('@')[0]
for line in output]
with open(f'{filepath}', 'w') as f:
f.writelines(lines)
if 'Pip-Packages' in d:
with open(f'{filepath}', 'r') as f:
lines = f.readlines()
for line in lines:
if '<pip:name>' in line or '<pip>' in line or '<python>' in line or '<python:name>' in line:
idx = lines.index(line)
proc = Popen('pip list'.split(),
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, _ = proc.communicate()
output = output.decode().splitlines()[2:]
pip_packages = []
pip_packages.append(
[line.lower().split()[0] for line in output])
pip_packages = pip_packages[0]
lines[idx] = Config.get_repr_packages(
pip_packages, False) + '\n'
d['Pip-Packages'] = lines[idx].split('\n')
with open(f'{filepath}', 'w') as f:
f.writelines(lines)
if '<pip:name,version>' in line or '<python:name,version>' in line:
idx = lines.index(line)
proc = Popen('pip list'.split(),
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, _ = proc.communicate()
output = output.decode().splitlines()[2:]
pip_packages = []
pip_packages.append(
[{line.lower().split()[0]: line.lower().split()[1]} for line in output])
pip_packages = pip_packages[0]
d['Pip-Packages'] = [line.lower().split()[0]
for line in output]
lines[idx] = Config.get_repr_packages(
pip_packages, True).replace('\n ', '\n') + '\n'
with open(f'{filepath}', 'w') as f:
f.writelines(lines)
if 'Node-Packages' in d:
with open(f'{filepath}', 'r') as f:
lines = f.readlines()
for line in lines:
if '<npm:name>' in line or '<npm>' in line or '<node:name>' in line or '<node>' in line:
idx = lines.index(line)
proc = Popen('npm list --global --depth=0'.split(),
stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, _ = proc.communicate()
output = output.decode().splitlines()[1:]
refined_output = []
for val in output:
if val:
refined_output.append(val.replace(
'+--', '').replace('`--', '').strip())
npm_packages = []
npm_packages.append(
[line.split('@')[0] for line in refined_output])
npm_packages = npm_packages[0]
new_packages = []
for package in npm_packages:
package = package.replace(
'UNMET PEER DEPENDENCY ', '')
new_packages.append(package)
lines[idx] = Config.get_repr_packages(
new_packages, False) + '\n'
d['Node-Packages'] = lines[idx].split('\n')
with open(f'{filepath}', 'w') as f:
f.writelines(lines)
if '<npm:name,version>' in line or '<node:name,version>' in line:
idx = lines.index(line)
proc = Popen('npm list --global --depth=0'.split(),
stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, _ = proc.communicate()
if not 'empty' in output.decode():
output = output.decode().splitlines()[1:]
refined_output = []
for val in output:
if val:
refined_output.append(val.replace(
'+--', '').replace('`--', '').strip())
npm_packages = []
npm_packages.append(
[{line.split('@')[0]: line.split('@')[1]} for line in refined_output])
npm_packages = npm_packages[0]
lines[idx] = Config.get_repr_packages(
npm_packages, True).replace('\n ', '\n') + '\n'
d['Node-Packages'] = [line.split('@')[0]
for line in refined_output]
with open(f'{filepath}', 'w') as f:
f.writelines(lines)
if signed:
with open(f'{filepath}', 'r') as f:
lines = f.readlines()
l = [line.strip() for line in lines]
if not '# --------------------Checksum Start-------------------------- #' in l or not '# --------------------Checksum End--------------------------- #' in l:
click.echo(click.style(
f'File Checksum Not Found! Run `electric sign {filepath}` ( Copied To Clipboard ) to sign your .electric configuration.', fg='red'))
copy_to_clipboard(f'electric sign {filepath}')
sys.exit()
if lines[-1] != '# --------------------Checksum End--------------------------- #':
click.echo(click.style(
'DataAfterChecksumError : Comments, Code And New lines Are Not Allowed After The Checksum End Header.', 'red'))
sys.exit()
if '# --------------------Checksum Start-------------------------- #' in l and '# --------------------Checksum End--------------------------- #' in l:
idx = 0
for item in l:
if item == '# --------------------Checksum Start-------------------------- #':
idx = list.index(l, item)
md5 = l[idx + 1].replace('#', '').strip()
sha256 = l[idx + 2].replace('#', '').strip()
# Generate Temporary Configuration File
with open(rf'{gettempdir()}\electric\configuration.electric', 'w+') as f:
f.writelines(lines[:-5])
md5_checksum = hashlib.md5(open(
rf'{gettempdir()}\electric\configuration.electric', 'rb').read()).hexdigest()
sha256_hash_checksum = hashlib.sha256()
with open(rf'{gettempdir()}\electric\configuration.electric', 'rb') as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b''):
sha256_hash_checksum.update(byte_block)
sha256_checksum = sha256_hash_checksum.hexdigest()
if md5 == md5_checksum and sha256 == sha256_checksum:
click.echo(click.style(
'Hashes Match!', 'bright_green'))
else:
click.echo(click.style(
'Hashes Don\'t Match!', 'red'))
os.remove(
rf'{gettempdir()}\electric\configuration.electric')
exit(1)
os.remove(
rf'{gettempdir()}\electric\configuration.electric')
except FileNotFoundError:
click.echo(click.style(
f'Could Not Find {Fore.LIGHTCYAN_EX}{filepath}{Fore.RESET}.', fg='red'), err=True)
time.sleep(2)
sys.exit()
d.pop('')
return Config(d)
def verify(self): # sourcery no-metrics
config = self.dictionary
python_packages = config['Pip-Packages'] if 'Pip-Packages' in self.headers else None
node_packages = config['Node-Packages'] if 'Node-Packages' in self.headers else None
editor_extensions = config['Editor-Extensions'] if 'Editor-Extensions' in self.headers else None
packages = config['Packages'] if 'Packages' in self.headers else None
editor_type = config['Editor-Configuration'][0]['Editor'] if 'Editor-Configuration' in self.headers else None
if packages:
click.echo(click.style(
'↓ Validating Electric Packages ↓', 'cyan'))
for package in packages:
if isinstance(package, dict):
if package:
proc = Popen(
rf'electric show {list(package.keys())[0]}', stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
err = 'UnicodeEncodeError' in err.decode()
if 'Could Not Find Any Packages' in output.decode() or 'Not Found.' in output.decode() or err:
click.echo(click.style(
f'`{list(package.keys())[0]}` does not exist or has been removed.', 'red'))
sys.exit()
else:
if package:
proc = Popen(
rf'electric show {package}', stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
err = 'UnicodeEncodeError' in err.decode()
if 'Could Not Find Any Packages' in output.decode() or 'Not Found.' in output.decode() or err:
click.echo(click.style(
f'`{package}` does not exist or has been removed.', 'red'))
sys.exit()
if node_packages:
click.echo(click.style(
'↓ Validating Node or Npm Modules ↓', 'cyan'))
for package_name in node_packages:
if isinstance(package_name, dict):
if package_name:
if not Config.check_node_name(list(package_name.keys())[0]):
click.echo(click.style(
f'The ( npm | node ) module => `{list(package_name.keys())[0]}` does not exist or has been removed.', 'red'))
sys.exit()
else:
if package_name:
if not Config.check_node_name(package_name):
click.echo(click.style(
f'The ( npm | node ) module => `{package_name}` does not exist or has been removed.', 'red'))
sys.exit()
click.echo(click.style(
'↓ Validating Python or Pip Modules ↓', 'cyan'))
if python_packages:
for package in python_packages:
if isinstance(package, dict):
if package:
if not Config.check_pypi_name(list(package.keys())[0]):
click.echo(click.style(
f'The ( npm | node ) module => `{list(package.keys()[0])}` does not exist or has been removed.', 'red'))
sys.exit()
else:
if package:
if not Config.check_pypi_name(package):
click.echo(click.style(
f'The ( python | pip ) module => `{package}` does not exist or has been removed.', 'red'))
sys.exit()
if editor_type:
if not editor_type in ['Visual Studio Code', 'Visual Studio Code Insiders', 'Atom', 'Sublime Text 3']:
click.echo(click.style(
f'{editor_type} is not supported by electric yet!', 'red'))
else:
if editor_extensions:
click.echo(click.style(
'↓ Validating Editor Extensions ↓', 'cyan'))
if editor_type == 'Visual Studio Code' or editor_type == 'Visual Studio Code Insiders':
for package_name in editor_extensions:
if isinstance(package_name, dict):
if package_name:
if not Config.check_vscode_name(list(package_name.keys())[0]):
click.echo(click.style(
f'Invalid Extension Name => {list(package_name.keys())[0]}', 'red'))
sys.exit()
else:
if package_name:
if not Config.check_vscode_name(package_name):
click.echo(click.style(
f'Invalid Extension Name => {package_name}', 'red'))
sys.exit()
if editor_type == 'Sublime Text 3':
for package_name in editor_extensions:
if isinstance(package_name, dict):
if package_name:
if not Config.check_sublime_name(list(package_name.keys())[0]):
click.echo(click.style(
f'Invalid Extension Name => {list(package_name.keys())[0]}', 'red'))
sys.exit()
else:
if package_name:
if not Config.check_sublime_name(package_name):
click.echo(click.style(
f'Invalid Extension Name => {package_name}', 'red'))
if editor_type == 'Atom':
for package_name in editor_extensions:
if isinstance(package_name, dict):
if package_name:
if not Config.check_atom_name(list(package_name.keys())[0]):
click.echo(click.style(
f'Invalid Extension Name => {list(package_name.keys())[0]}', 'red'))
sys.exit()
else:
if package_name:
if not Config.check_atom_name(package_name):
click.echo(click.style(
f'Invalid Extension Name => {package_name}', 'red'))
def install(self, include_versions: bool, install_directory: str, metadata: Metadata):
if is_admin():
flags = get_install_flags(install_directory, metadata)
config = self.dictionary
python_packages = config['Pip-Packages'] if 'Pip-Packages' in self.headers else None
node_packages = config['Node-Packages'] if 'Node-Packages' in self.headers else None
editor_extensions = config['Editor-Extensions'] if 'Editor-Extensions' in self.headers else None
packages = config['Packages'] if 'Packages' in self.headers else None
editor_type = config['Editor-Configuration'][0]['Editor'] if 'Editor-Configuration' in self.headers else None
command = ''
pip_command = ''
idx = 1
if not include_versions:
for package in packages:
if idx == len(packages):
command += list(package.keys())[0]
idx += 1
continue
command += list(package.keys())[0] + ','
idx += 1
for flag in flags:
command += ' ' + flag
for pkg in command.split(','):
if pkg:
os.system(f'electric install {pkg}')
else:
for package in packages:
if package:
if list(package.values())[0] is None or list(package.values())[0] == 'latest':
os.system(f'electric install {list(package.keys())[0]}')
else:
os.system(f'electric install {list(package.keys())[0]} --version {list(package.values())[0]}')
if python_packages:
package_versions = []
package_names = []
for package in python_packages:
if idx == len(packages):
package_versions.append(package[list(package.keys())[0]])
package_names.append(list(package.keys())[0])
pip_command += list(package.keys())[0]
idx += 1
continue
package_versions.append(package[list(package.keys())[0]])
package_names.append(list(package.keys())[0])
pip_command += list(package.keys())[0] + ','
idx += 1
os.system('refreshenv')
idx = 0
if include_versions and package_versions:
for package_name in package_names:
os.system(
f'electric install --python {package_name} --version {package_versions[idx]}')
idx += 1
else:
if include_versions:
print('No Versions Specified With This Configuration!')
sys.exit()
for package_name in package_names:
os.system(f'electric install --python {package_name}')
idx += 1
if editor_type == 'Visual Studio Code' or editor_type == 'Visual Studio Code Insiders' and editor_extensions != []:
editor_extensions = config['Editor-Extensions'] if 'Editor-Extensions' in self.headers else None
package_versions = []
if editor_extensions:
for ext in editor_extensions:
if not isinstance(ext, list):
extension = list(ext.keys())[0]
version = list(ext.values())[0]
command = f'electric install --vscode {extension}'
| |
1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, | |
n_group = R_intra_unique.size()[0]
# R_intra_unique.unsqueeze_(1)
# R_intra = []
# for i in range(n_group-1, -1, -1):
# R_intra.append(R_intra_unique[i])
# R_intra = torch.cat(R_intra, dim=0)
# R_intra = self.normalize(R_intra, dim=1).cuda()
# curr_seq_graph_state_in = torch.matmul(R_intra, torch.squeeze(curr_seq_graph_intra, dim=0))
# curr_seq_graph_state_in = torch.unsqueeze(curr_seq_graph_state_in, 0)
# M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# A_inter = self.normalize(M_inter, dim=1).cuda()
# curr_seq_graph_out = self.gat_inter(curr_seq_graph_state_in, A_inter)
# curr_seq_graph_inter = torch.matmul(R_intra.T, torch.squeeze(curr_seq_graph_out, dim=0))
# curr_seq_graph_inter = torch.unsqueeze(curr_seq_graph_inter, 0)
# curr_gat_state = torch.cat([curr_seq_graph_intra, curr_seq_graph_inter],dim=2)
# curr_gat_state = torch.squeeze(curr_gat_state, dim=0)
# curr_gat_state = self.out_embedding(curr_gat_state)
# curr_gat_state = torch.unsqueeze(curr_gat_state, 0)
# graph_embeded_data.append(curr_gat_state)
# graph_embeded_data = torch.cat(graph_embeded_data, dim=1)
# return graph_embeded_data
class PoolHiddenNet(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0
):
super(PoolHiddenNet, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim # 16
mlp_pre_dim = embedding_dim + h_dim
mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim] # mlp_pre_pool_dims: [48,512,8]
# mlp: 2*16
self.spatial_embedding = nn.Linear(2, embedding_dim)
# mlp: 48*512*8
self.mlp_pre_pool = make_mlp(
mlp_pre_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout)
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tensor of shape (num_layers, batch, h_dim) 即encoder的return:final_h
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
Output:
- pool_h: Tensor of shape (batch, bottleneck_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
# print("num_ped:", num_ped)
# print("h_states:", h_states.shape)
# h_states == final_h (即这里h_states就是LSTM的输出)
# h_states([1,batch,32]) -> cur_hidden([N,32])
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
# print("curr_hidden: ", curr_hidden.shape)
# Repeat -> H1, H2, H1, H2
# curr_hidden([N,32]) -> curr_hidden_1([N*N,32])
curr_hidden_1 = curr_hidden.repeat(num_ped, 1)
# print("curr_hidden_1: ", curr_hidden_1.shape)
# Repeat position -> P1, P2, P1, P2
curr_end_pos = end_pos[start:end]
curr_end_pos_1 = curr_end_pos.repeat(num_ped, 1)
# Repeat position -> P1, P1, P2, P2
curr_end_pos_2 = self.repeat(curr_end_pos, num_ped)
# curr_rel_pos: [N*N,2]
curr_rel_pos = curr_end_pos_1 - curr_end_pos_2
# self.spatial_embedding(mlp): 2*16
# curr_rel_embedding: [N*N,16]
curr_rel_embedding = self.spatial_embedding(curr_rel_pos)
# mlp_h_inpur: [N*N,48]
mlp_h_input = torch.cat([curr_rel_embedding, curr_hidden_1], dim=1)
# curr_pool_h: [N*N,8]
curr_pool_h = self.mlp_pre_pool(mlp_h_input)
# curr_pool_h: [N,8]
# print(curr_pool_h.view(num_ped, num_ped, -1)[0])
curr_pool_h = curr_pool_h.view(num_ped, num_ped, -1).max(1)[0] # [N,N,8] -->[n,8]
# print(curr_pool_h)
# print("curr_pool_h:", curr_pool_h.shape)
pool_h.append(curr_pool_h)
# pool_h: [batch,8]: a pooled tensor Pi for each person
pool_h = torch.cat(pool_h, dim=0)
# print("pool_h:", pool_h.shape)
return pool_h
class GCN(nn.Module):
"""GCN module"""
def __init__(self, input_dim=48, hidden_dim=72, out_dim=8, gcn_layers=2):
super(GCN, self).__init__()
self.X_dim = input_dim
self.hidden_dim = hidden_dim
self.out_dim = out_dim
self.gcn_layers = gcn_layers
# graph convolution layer
self.W = torch.nn.ParameterList()
for i in range(self.gcn_layers):
if i == 0:
self.W.append(nn.Parameter(torch.randn(self.X_dim, self.hidden_dim)))
elif i == self.gcn_layers-1:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.out_dim)))
else:
self.W.append(nn.Parameter(torch.randn(self.hidden_dim, self.hidden_dim)))
def forward(self, A, X):
next_H = H = X
for i in range(self.gcn_layers):
next_H = F.relu(torch.matmul(torch.matmul(A, H), self.W[i]))
H = next_H
feat = H
return feat
class GCNModule(nn.Module):
"""group information aggregation with GCN layer"""
def __init__(
self, input_dim=40, hidden_dim=72, out_dim=16, gcn_layers=2, final_dim=24
):
super(GCNModule, self).__init__()
# GCN_intra: 40*72*16
self.gcn_intra = GCN(
input_dim=input_dim,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# GCN_inter: 16*72*16
self.gcn_inter = GCN(
input_dim=16,
hidden_dim=hidden_dim,
out_dim=out_dim,
gcn_layers=gcn_layers)
# mlp:16*8
self.out_embedding = nn.Linear(out_dim*2, final_dim)
def normalize(self, adj, dim):
N = adj.size()
adj2 = torch.sum(adj, dim) # 对每一行求和
norm = adj2.unsqueeze(1).float() # 扩展张量维度
norm = norm.pow(-1) # 求倒数
norm_adj = adj.mul(norm) # 点乘
return norm_adj
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos, end_group):
"""
Inputs:
- h_states: Tensor of shape (batch, h_dim) 即encoder+pooling net的return
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
- end_group: group labels at the last time step (t_obs); shape: (batch, 1)
Output:
- gcn_aggre: Tensor of shape (batch, bottleneck_dim)
"""
gcn_aggre = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start # num_ped: number of pedestrians in the scene
# curr_state: [N,40]
curr_state = h_states[start:end]
# get the modulated adjacency matrix arrays
# Generate masks from the group labels
# labels can only be used to distinguish groups at a timestep.
# var: end_group; def: group labels at the last time step (t_obs); shape: (batch, 1)
# clip one onservation-prediction window out of multiple windows.
curr_end_group = end_group[start:end]
# get the coherency adjacency, dimension: (N, N)
# coherency mask is shared by all pedestrians in the scene
eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
A_g = curr_end_group.repeat(1, num_ped)
B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra: [N,N]
M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# get the modulated normalized adjacency matrix arrays
# normalized M_intra: [N,N]
A_intra = self.normalize(M_intra, dim=1).cuda()
"""gcn_intra"""
# curr_gcn_state_intra: [N,16] (GCN:[40,72,16])
curr_gcn_state_intra = self.gcn_intra(A_intra, curr_state)
"""GPool =================================================================="""
# M_intra: [N,N]
# R_intra_unique: [M,N]
R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# group 的数量
n_group = R_intra_unique.size()[0]
R_intra_unique.unsqueeze_(1) # 增加一维
# 从下到上翻转R_intra_unique
R_intra = []
for i in range(n_group-1, -1, -1):
R_intra.append(R_intra_unique[i])
R_intra = torch.cat(R_intra, dim=0)
# 归一化
R_intra = self.normalize(R_intra, dim=1).cuda()
# 提取群组部分 [M,N]*[N,16]
# curr_gcn_group_state: [M,16]
curr_gcn_group_state_in = torch.matmul(R_intra, curr_gcn_state_intra)
"""=========================================================================="""
"""gcn_inter"""
# M_inter: [M,M]
M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# normalize
A_inter = self.normalize(M_inter, dim=1).cuda()
# M_inter_norm: [M,M]
# curr_gcn_group_state_in: [M,16] (GCN:[16,72,16])
# curr_gcn_group_state_out: [M,16]
curr_gcn_group_state_out = self.gcn_inter(A_inter, curr_gcn_group_state_in)
"""GUnpool================================================================="""
# [N,M]*[M,16]
# curr_gcn_state_inter: [N,16]
curr_gcn_state_inter = torch.matmul(R_intra.T, curr_gcn_group_state_out)
"""========================================================================="""
# curr_gcn_state: [N,32]
curr_gcn_state = torch.cat([curr_gcn_state_intra, curr_gcn_state_inter], dim=1)
# curr_gcn_state: [N,24]
curr_gcn_state = self.out_embedding(curr_gcn_state)
gcn_aggre.append(curr_gcn_state)
# gcn_aggre: [batch,24]:
gcn_aggre = torch.cat(gcn_aggre, dim=0)
return gcn_aggre
class TrajectoryGenerator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, encoder_h_dim=64,
decoder_h_dim=128, mlp_dim=1024, num_layers=1, noise_dim=(0, ),
noise_type='gaussian', noise_mix_type='ped', pooling_type=None,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, neighborhood_size=2.0, grid_size=8,
n_units=[32,16,32], n_heads=4, dropout1=0, alpha=0.2,
):
super(TrajectoryGenerator, self).__init__()
if pooling_type and pooling_type.lower() == 'none':
pooling_type = None
self.obs_len = obs_len
self.pred_len = pred_len
self.mlp_dim = mlp_dim
self.encoder_h_dim = encoder_h_dim
self.decoder_h_dim = decoder_h_dim
self.embedding_dim = embedding_dim
self.noise_dim = noise_dim
self.num_layers = num_layers
self.noise_type = noise_type
self.noise_mix_type = noise_mix_type
self.pooling_type = pooling_type
self.noise_first_dim = 0
self.pool_every_timestep = pool_every_timestep
self.bottleneck_dim = 1024
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
self.decoder = Decoder(
pred_len,
embedding_dim=embedding_dim,
h_dim=decoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
pool_every_timestep=pool_every_timestep,
dropout=dropout,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
pooling_type=pooling_type,
grid_size=grid_size,
neighborhood_size=neighborhood_size
)
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm
)
if self.noise_dim is None:
self.noise_dim = None
elif self.noise_dim[0] == 0:
self.noise_dim = None
else:
self.noise_first_dim = noise_dim[0]
# gatencoder
self.gatencoder = GATEncoder(
n_units=n_units, n_heads=n_heads, dropout=dropout1, alpha=alpha
)
# Decoder Hidden
if pooling_type:
input_dim = encoder_h_dim + bottleneck_dim
else:
input_dim = encoder_h_dim
# if self.mlp_decoder_needed():
# mlp_decoder_context_dims = [input_dim, mlp_dim, decoder_h_dim - self.noise_first_dim]
# self.mlp_decoder_context = make_mlp(
# mlp_decoder_context_dims,
# activation=activation,
# batch_norm=batch_norm,
# dropout=dropout
# )
self.gcn_module = GCNModule(
input_dim=input_dim,
hidden_dim=72,
out_dim=16,
gcn_layers=2,
final_dim=decoder_h_dim - self.noise_first_dim
)
def add_noise(self, _input, seq_start_end, user_noise=None):
"""
Inputs:
- _input: Tensor of shape (_, decoder_h_dim - noise_first_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Outputs:
- decoder_h: Tensor of shape (_, decoder_h_dim)
"""
if not self.noise_dim:
return _input
if self.noise_mix_type == 'global':
noise_shape = (seq_start_end.size(0), ) + self.noise_dim
else:
noise_shape = (_input.size(0), ) + self.noise_dim
if user_noise is not None:
z_decoder = user_noise
else:
z_decoder = get_noise(noise_shape, self.noise_type)
if self.noise_mix_type == 'global':
_list = []
for idx, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
_vec = z_decoder[idx].view(1, -1)
_to_cat = _vec.repeat(end - start, 1)
_list.append(torch.cat([_input[start:end], _to_cat], dim=1))
decoder_h = torch.cat(_list, dim=0)
return decoder_h
decoder_h = torch.cat([_input, z_decoder], dim=1)
return decoder_h
def mlp_decoder_needed(self):
if (
self.noise_dim or self.pooling_type or
self.encoder_h_dim != self.decoder_h_dim
):
return True
else:
return False
# modified by zyl 2021/1/12
def forward(self, obs_traj, obs_traj_rel, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.