signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def attention_bias_ignore_padding ( memory_padding ) :
"""Create an bias tensor to be added to attention logits .
Args :
memory _ padding : a float ` Tensor ` with shape [ batch , memory _ length ] .
Returns :
a ` Tensor ` with shape [ batch , 1 , 1 , memory _ length ] ."""
|
ret = memory_padding * large_compatible_negative ( memory_padding . dtype )
return tf . expand_dims ( tf . expand_dims ( ret , axis = 1 ) , axis = 1 )
|
def callsigns ( self ) -> Set [ str ] :
"""Return only the most relevant callsigns"""
|
sub = self . data . query ( "callsign == callsign" )
return set ( cs for cs in sub . callsign if len ( cs ) > 3 and " " not in cs )
|
def _get ( self , node , key ) :
"""get value inside a node
: param node : node in form of list , or BLANK _ NODE
: param key : nibble list without terminator
: return :
BLANK _ NODE if does not exist , otherwise value or hash"""
|
node_type = self . _get_node_type ( node )
if node_type == NODE_TYPE_BLANK :
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH : # already reach the expected node
if not key :
return node [ - 1 ]
sub_node = self . _decode_to_node ( node [ key [ 0 ] ] )
return self . _get ( sub_node , key [ 1 : ] )
# key value node
curr_key = without_terminator ( unpack_to_nibbles ( node [ 0 ] ) )
if node_type == NODE_TYPE_LEAF :
return node [ 1 ] if key == curr_key else BLANK_NODE
if node_type == NODE_TYPE_EXTENSION : # traverse child nodes
if starts_with ( key , curr_key ) :
sub_node = self . _decode_to_node ( node [ 1 ] )
return self . _get ( sub_node , key [ len ( curr_key ) : ] )
else :
return BLANK_NODE
|
def codes_write ( handle , outfile ) : # type : ( cffi . FFI . CData , T . BinaryIO ) - > None
"""Write a coded message to a file . If the file does not exist , it is created .
: param str path : ( optional ) the path to the GRIB file ;
defaults to the one of the open index ."""
|
mess = ffi . new ( 'const void **' )
mess_len = ffi . new ( 'size_t*' )
codes_get_message = check_return ( lib . codes_get_message )
codes_get_message ( handle , mess , mess_len )
message = ffi . buffer ( mess [ 0 ] , size = mess_len [ 0 ] )
outfile . write ( message )
|
def decode_int ( tag , bits_per_char = 6 ) :
"""Decode string into int assuming encoding with ` encode _ int ( ) `
It is using 2 , 4 or 6 bits per coding character ( default 6 ) .
Parameters :
tag : str Encoded integer .
bits _ per _ char : int The number of bits per coding character .
Returns :
int : the decoded string"""
|
if bits_per_char == 6 :
return _decode_int64 ( tag )
if bits_per_char == 4 :
return _decode_int16 ( tag )
if bits_per_char == 2 :
return _decode_int4 ( tag )
raise ValueError ( '`bits_per_char` must be in {6, 4, 2}' )
|
def cget ( self , key ) :
"""Query widget option .
: param key : option name
: type key : str
: return : value of the option
To get the list of options for this widget , call the method : meth : ` ~ Balloon . keys ` ."""
|
if key == "headertext" :
return self . __headertext
elif key == "text" :
return self . __text
elif key == "width" :
return self . __width
elif key == "timeout" :
return self . _timeout
elif key == "background" :
return self . __background
else :
return ttk . Frame . cget ( self , key )
|
def get_homogenous_list_type ( list_ ) :
"""Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type . does not check this"""
|
# TODO Expand and make work correctly
if HAVE_NUMPY and isinstance ( list_ , np . ndarray ) :
item = list_
elif isinstance ( list_ , list ) and len ( list_ ) > 0 :
item = list_ [ 0 ]
else :
item = None
if item is not None :
if is_float ( item ) :
type_ = float
elif is_int ( item ) :
type_ = int
elif is_bool ( item ) :
type_ = bool
elif is_str ( item ) :
type_ = str
else :
type_ = get_type ( item )
else :
type_ = None
return type_
|
def handle_input ( self ) :
"""Sends differences in the device state to the MicroBitPad
as events ."""
|
difference = self . check_state ( )
if not difference :
return
self . events = [ ]
self . handle_new_events ( difference )
self . update_timeval ( )
self . events . append ( self . sync_marker ( self . timeval ) )
self . write_to_pipe ( self . events )
|
def get_bundles ( ) :
"""Used to cache the bundle definitions rather than loading from config every time they ' re used"""
|
global _cached_bundles
if not _cached_bundles :
_cached_bundles = BundleManager ( )
for bundle_conf in bundles_settings . BUNDLES :
_cached_bundles [ bundle_conf [ 0 ] ] = Bundle ( bundle_conf )
return _cached_bundles
|
def project ( X , Z , use_jit = False , debug = False ) :
"""Project tensor Z on the tangent space of tensor X .
X is a tensor in the TT format .
Z can be a tensor in the TT format or a list of tensors ( in this case
the function computes projection of the sum off all tensors in the list :
project ( X , Z ) = P _ X ( \ sum _ i Z _ i )
This function implements an algorithm from the paper [ 1 ] , theorem 3.1.
The jit version of the code is much faster when projecting a lot of tensors
simultaneously ( in other words Z is a list with many tensors ) .
Returns a tensor in the TT format with the TT - ranks equal 2 * rank ( Z ) ."""
|
zArr = None
if isinstance ( Z , tt . vector ) :
zArr = [ Z ]
else :
zArr = Z
# Get rid of redundant ranks ( they cause technical difficulties ) .
X = X . round ( eps = 0 )
numDims , modeSize = X . d , X . n
coresX = tt . tensor . to_list ( X )
coresZ = [ None ] * len ( zArr )
for idx in xrange ( len ( zArr ) ) :
assert ( modeSize == zArr [ idx ] . n ) . all ( )
coresZ [ idx ] = tt . tensor . to_list ( zArr [ idx ] )
if not use_jit and len ( zArr ) > 10 :
print ( 'Consider using use_jit=True option to speed up the projection ' 'process.' )
if use_jit :
for dim in xrange ( numDims ) :
r1 , n , r2 = coresZ [ 0 ] [ dim ] . shape
for idx in xrange ( len ( zArr ) ) :
if ( r1 , n , r2 ) != coresZ [ idx ] [ dim ] . shape :
print ( 'Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.' )
use_jit = False
if use_jit :
zCoresDim = [ None ] * numDims
for dim in xrange ( numDims ) :
r1 , n , r2 = coresZ [ 0 ] [ dim ] . shape
zCoresDim [ dim ] = np . zeros ( [ len ( zArr ) , r1 , n , r2 ] )
for idx in xrange ( len ( zArr ) ) :
if ( r1 , n , r2 ) != coresZ [ idx ] [ dim ] . shape :
print ( 'Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.' )
use_jit = False
zCoresDim [ dim ] [ idx , : , : , : ] = coresZ [ idx ] [ dim ]
# Initialize the cores of the projection _ X ( sum z [ i ] ) .
coresP = [ ]
for dim in xrange ( numDims ) :
r1 = 2 * X . r [ dim ]
r2 = 2 * X . r [ dim + 1 ]
if dim == 0 :
r1 = 1
if dim == numDims - 1 :
r2 = 1
coresP . append ( np . zeros ( ( r1 , modeSize [ dim ] , r2 ) ) )
# rhs [ dim ] is a len ( zArr ) x zArr [ idx ] x X . rank _ dim . rank _ dim ndarray .
# Right to left orthogonalization of X and preparation of the rhs vectors .
for dim in xrange ( numDims - 1 , 0 , - 1 ) : # Right to left orthogonalization of the X cores .
coresX = cores_orthogonalization_step ( coresX , dim , left_to_right = False )
r1 , n , r2 = coresX [ dim ] . shape
# Fill the right orthogonal part of the projection .
for value in xrange ( modeSize [ dim ] ) :
coresP [ dim ] [ 0 : r1 , value , 0 : r2 ] = coresX [ dim ] [ : , value , : ]
rhs = [ None ] * ( numDims + 1 )
for dim in xrange ( numDims ) :
rhs [ dim ] = np . zeros ( [ len ( zArr ) , zArr [ idx ] . r [ dim ] , coresX [ dim ] . shape [ 0 ] ] )
rhs [ numDims ] = np . ones ( [ len ( zArr ) , 1 , 1 ] )
for dim in xrange ( numDims - 1 , 0 , - 1 ) :
_update_rhs ( rhs [ dim + 1 ] , coresX [ dim ] , zCoresDim [ dim ] , rhs [ dim ] )
if debug :
assert ( np . allclose ( X . full ( ) , tt . tensor . from_list ( coresX ) . full ( ) ) )
# lsh is a len ( zArr ) x X . rank _ dim x zArr [ idx ] . rank _ dim ndarray .
lhs = np . ones ( [ len ( zArr ) , 1 , 1 ] )
# Left to right sweep .
for dim in xrange ( numDims ) :
cc = coresX [ dim ] . copy ( )
r1 , n , r2 = cc . shape
if dim < numDims - 1 : # Left to right orthogonalization .
cc = reshape ( cc , ( - 1 , r2 ) )
cc , rr = np . linalg . qr ( cc )
r2 = cc . shape [ 1 ]
# Warning : since ranks can change here , do not use X . r !
# Use coresX [ dim ] . shape instead .
if debug : # Need to do it before the move non orthogonal part rr to
# the coresX [ dim + 1 ] .
rightQ = right ( tt . tensor . from_list ( coresX ) , dim + 1 )
coresX [ dim ] = reshape ( cc , ( r1 , n , r2 ) ) . copy ( )
coresX [ dim + 1 ] = np . tensordot ( rr , coresX [ dim + 1 ] , 1 )
new_lhs = np . zeros ( [ len ( zArr ) , r2 , zArr [ idx ] . r [ dim + 1 ] ] )
_update_lhs ( lhs , coresX [ dim ] , zCoresDim [ dim ] , new_lhs )
# See the correspondic section in the non - jit version of this
# code for a less confusing implementation of
# the transformation below .
currPCore = np . einsum ( 'ijk,iklm->ijlm' , lhs , zCoresDim [ dim ] )
currPCore = reshape ( currPCore , ( len ( zArr ) , r1 * n , - 1 ) )
currPCore -= np . einsum ( 'ij,kjl->kil' , cc , new_lhs )
currPCore = np . einsum ( 'ijk,ikl' , currPCore , rhs [ dim + 1 ] )
currPCore = reshape ( currPCore , ( r1 , modeSize [ dim ] , r2 ) )
if dim == 0 :
coresP [ dim ] [ 0 : r1 , : , 0 : r2 ] += currPCore
else :
coresP [ dim ] [ r1 : , : , 0 : r2 ] += currPCore
if debug :
explicit_sum = np . zeros ( ( r1 , modeSize [ dim ] , r2 ) )
for idx in xrange ( len ( zArr ) ) :
leftQm1 = left ( tt . tensor . from_list ( coresX ) , dim - 1 )
leftQ = left ( tt . tensor . from_list ( coresX ) , dim )
first = np . tensordot ( leftQm1 . T , unfolding ( zArr [ idx ] , dim - 1 ) , 1 )
second = reshape ( first , ( - 1 , np . prod ( modeSize [ dim + 1 : ] ) ) )
if dim < numDims - 1 :
explicit = second . dot ( rightQ )
orth_cc = reshape ( coresX [ dim ] , ( - 1 , coresX [ dim ] . shape [ 2 ] ) )
explicit -= orth_cc . dot ( leftQ . T . dot ( unfolding ( zArr [ idx ] , dim ) ) . dot ( rightQ ) )
else :
explicit = second
explicit_sum += reshape ( explicit , currPCore . shape )
assert ( np . allclose ( explicit_sum , currPCore ) )
lhs = new_lhs
if dim == 0 :
coresP [ dim ] [ 0 : r1 , : , r2 : ] = coresX [ dim ]
else :
coresP [ dim ] [ r1 : , : , r2 : ] = coresX [ dim ]
if dim == numDims - 1 :
coresP [ dim ] [ r1 : , : , 0 : r2 ] += np . einsum ( 'ijk,iklm->jlm' , lhs , zCoresDim [ dim ] )
if debug :
assert ( np . allclose ( X . full ( ) , tt . tensor . from_list ( coresX ) . full ( ) ) )
return tt . tensor . from_list ( coresP )
else : # Non - jit version of the code .
# Initialize the cores of the projection _ X ( sum z [ i ] ) .
coresP = [ ]
for dim in xrange ( numDims ) :
r1 = 2 * X . r [ dim ]
r2 = 2 * X . r [ dim + 1 ]
if dim == 0 :
r1 = 1
if dim == numDims - 1 :
r2 = 1
coresP . append ( np . zeros ( ( r1 , modeSize [ dim ] , r2 ) ) )
# rhs [ idx ] [ dim ] is an ( Z . rank _ dim * X . rank _ dim ) x 1 vector
rhs = [ [ 0 ] * ( numDims + 1 ) for _ in xrange ( len ( zArr ) ) ]
for idx in xrange ( len ( zArr ) ) :
rhs [ idx ] [ numDims ] = np . ones ( [ 1 , 1 ] )
# Right to left sweep to orthogonalize the cores and prepare rhs .
for dim in xrange ( numDims - 1 , 0 , - 1 ) : # Right to left orthogonalization of the X cores .
coresX = cores_orthogonalization_step ( coresX , dim , left_to_right = False )
r1 , n , r2 = coresX [ dim ] . shape
# Fill the right orthogonal part of the projection .
coresP [ dim ] [ 0 : r1 , : , 0 : r2 ] = coresX [ dim ]
# Compute rhs .
for idx in xrange ( len ( zArr ) ) :
coreProd = np . tensordot ( coresZ [ idx ] [ dim ] , coresX [ dim ] , axes = ( 1 , 1 ) )
coreProd = np . transpose ( coreProd , ( 0 , 2 , 1 , 3 ) )
coreProd = reshape ( coreProd , ( zArr [ idx ] . r [ dim ] * r1 , zArr [ idx ] . r [ dim + 1 ] * r2 ) )
rhs [ idx ] [ dim ] = np . dot ( coreProd , rhs [ idx ] [ dim + 1 ] )
if debug :
assert ( np . allclose ( X . full ( ) , tt . tensor . from_list ( coresX ) . full ( ) ) )
# lsh [ idx ] is an X . rank _ dim x zArr [ idx ] . rank _ dim matrix .
lhs = [ np . ones ( [ 1 , 1 ] ) for _ in xrange ( len ( zArr ) ) ]
# Left to right sweep .
for dim in xrange ( numDims - 1 ) :
if debug :
rightQ = right ( tt . tensor . from_list ( coresX ) , dim + 1 )
# Left to right orthogonalization of the X cores .
coresX = cores_orthogonalization_step ( coresX , dim , left_to_right = True )
r1 , n , r2 = coresX [ dim ] . shape
cc = reshape ( coresX [ dim ] , ( - 1 , r2 ) )
for idx in xrange ( len ( zArr ) ) :
currZCore = reshape ( coresZ [ idx ] [ dim ] , ( zArr [ idx ] . r [ dim ] , - 1 ) )
currPCore = np . dot ( lhs [ idx ] , currZCore )
# TODO : consider using np . einsum .
coreProd = np . tensordot ( coresX [ dim ] , coresZ [ idx ] [ dim ] , axes = ( 1 , 1 ) )
coreProd = np . transpose ( coreProd , ( 0 , 2 , 1 , 3 ) )
coreProd = reshape ( coreProd , ( r1 * zArr [ idx ] . r [ dim ] , r2 * zArr [ idx ] . r [ dim + 1 ] ) )
lhs [ idx ] = reshape ( lhs [ idx ] , ( 1 , - 1 ) )
lhs [ idx ] = np . dot ( lhs [ idx ] , coreProd )
lhs [ idx ] = reshape ( lhs [ idx ] , ( r2 , zArr [ idx ] . r [ dim + 1 ] ) )
currPCore = reshape ( currPCore , ( - 1 , zArr [ idx ] . r [ dim + 1 ] ) )
currPCore -= np . dot ( cc , lhs [ idx ] )
rhs [ idx ] [ dim + 1 ] = reshape ( rhs [ idx ] [ dim + 1 ] , ( zArr [ idx ] . r [ dim + 1 ] , r2 ) )
currPCore = np . dot ( currPCore , rhs [ idx ] [ dim + 1 ] )
currPCore = reshape ( currPCore , ( r1 , modeSize [ dim ] , r2 ) )
if dim == 0 :
coresP [ dim ] [ 0 : r1 , : , 0 : r2 ] += currPCore
else :
coresP [ dim ] [ r1 : , : , 0 : r2 ] += currPCore
if debug :
leftQm1 = left ( tt . tensor . from_list ( coresX ) , dim - 1 )
leftQ = left ( tt . tensor . from_list ( coresX ) , dim )
first = np . tensordot ( leftQm1 . T , unfolding ( zArr [ idx ] , dim - 1 ) , 1 )
second = reshape ( first , ( - 1 , np . prod ( modeSize [ dim + 1 : ] ) ) )
if dim < numDims - 1 :
explicit = second . dot ( rightQ )
orth_cc = reshape ( coresX [ dim ] , ( - 1 , coresX [ dim ] . shape [ 2 ] ) )
explicit -= orth_cc . dot ( leftQ . T . dot ( unfolding ( zArr [ idx ] , dim ) ) . dot ( rightQ ) )
else :
explicit = second
explicit = reshape ( explicit , currPCore . shape )
assert ( np . allclose ( explicit , currPCore ) )
if dim == 0 :
coresP [ dim ] [ 0 : r1 , : , r2 : ] = coresX [ dim ]
else :
coresP [ dim ] [ r1 : , : , r2 : ] = coresX [ dim ]
for idx in xrange ( len ( zArr ) ) :
r1 , n , r2 = coresX [ numDims - 1 ] . shape
currZCore = reshape ( coresZ [ idx ] [ numDims - 1 ] , ( zArr [ idx ] . r [ numDims - 1 ] , - 1 ) )
currPCore = np . dot ( lhs [ idx ] , currZCore )
currPCore = reshape ( currPCore , ( r1 , n , r2 ) )
coresP [ numDims - 1 ] [ r1 : , : , 0 : r2 ] += currPCore
if debug :
assert ( np . allclose ( X . full ( ) , tt . tensor . from_list ( coresX ) . full ( ) ) )
return tt . tensor . from_list ( coresP )
|
def manage ( settingspath , root_dir , argv ) :
"""Manage all processes"""
|
# add settings . json to environment variables
os . environ [ ENV_VAR_SETTINGS ] = settingspath
# add root _ dir
os . environ [ ENV_VAR_ROOT_DIR ] = root_dir
# get datasets list
with open ( settingspath ) as settings_file :
settings = json . load ( settings_file )
# manage args
datasets_list = generate_datasets_list ( settings , argv )
if "make-data-file" == argv [ 1 ] :
make_data_file ( datasets_list , argv )
elif "parse-data" == argv [ 1 ] :
parse_data ( datasets_list , argv )
elif "do-operations" == argv [ 1 ] :
do_operations ( datasets_list , argv )
else :
print_help ( )
|
def _simple_response_to_error_adapter ( self , status , original_body ) :
"""Convert a single error response ."""
|
body = original_body . copy ( )
code = body . pop ( 'code' )
title = body . pop ( 'message' )
meta = body
# save whatever is left in the response
e = [ ErrorDetails ( status , code , title ) ]
return e , meta
|
def visit_shapes ( self , expr : ShExJ . shapeExpr , f : Callable [ [ Any , ShExJ . shapeExpr , "Context" ] , None ] , arg_cntxt : Any , visit_center : _VisitorCenter = None , follow_inner_shapes : bool = True ) -> None :
"""Visit expr and all of its " descendant " shapes .
: param expr : root shape expression
: param f : visitor function
: param arg _ cntxt : accompanying context for the visitor function
: param visit _ center : Recursive visit context . ( Not normally supplied on an external call )
: param follow _ inner _ shapes : Follow nested shapes or just visit on outer level"""
|
if visit_center is None :
visit_center = _VisitorCenter ( f , arg_cntxt )
has_id = getattr ( expr , 'id' , None ) is not None
if not has_id or not ( visit_center . already_seen_shape ( expr . id ) or visit_center . actively_visiting_shape ( expr . id ) ) : # Visit the root expression
if has_id :
visit_center . start_visiting_shape ( expr . id )
f ( arg_cntxt , expr , self )
# Traverse the expression and visit its components
if isinstance ( expr , ( ShExJ . ShapeOr , ShExJ . ShapeAnd ) ) :
for expr2 in expr . shapeExprs :
self . visit_shapes ( expr2 , f , arg_cntxt , visit_center , follow_inner_shapes = follow_inner_shapes )
elif isinstance ( expr , ShExJ . ShapeNot ) :
self . visit_shapes ( expr . shapeExpr , f , arg_cntxt , visit_center , follow_inner_shapes = follow_inner_shapes )
elif isinstance ( expr , ShExJ . Shape ) :
if expr . expression is not None and follow_inner_shapes :
self . visit_triple_expressions ( expr . expression , lambda ac , te , cntxt : self . _visit_shape_te ( te , visit_center ) , arg_cntxt , visit_center )
elif isinstance_ ( expr , ShExJ . shapeExprLabel ) :
if not visit_center . actively_visiting_shape ( str ( expr ) ) and follow_inner_shapes :
visit_center . start_visiting_shape ( str ( expr ) )
self . visit_shapes ( self . shapeExprFor ( expr ) , f , arg_cntxt , visit_center )
visit_center . done_visiting_shape ( str ( expr ) )
if has_id :
visit_center . done_visiting_shape ( expr . id )
|
def convert ( self , expr ) :
"""Override Backend . convert ( ) to add fast paths for BVVs and BoolVs ."""
|
if type ( expr ) is BV :
if expr . op == "BVV" :
cached_obj = self . _object_cache . get ( expr . _cache_key , None )
if cached_obj is None :
cached_obj = self . BVV ( * expr . args )
self . _object_cache [ expr . _cache_key ] = cached_obj
return cached_obj
if type ( expr ) is Bool and expr . op == "BoolV" :
return expr . args [ 0 ]
return super ( ) . convert ( expr )
|
def formfield_for_dbfield ( self , db_field , ** kwargs ) :
"""Override the default widget for Foreignkey fields if they are
specified in the related _ search _ fields class attribute ."""
|
if isinstance ( db_field , models . ForeignKey ) and db_field . name in self . related_search_fields :
help_text = self . get_help_text ( db_field . name , db_field . remote_field . model . _meta . object_name )
if kwargs . get ( 'help_text' ) :
help_text = six . u ( '%s %s' % ( kwargs [ 'help_text' ] , help_text ) )
kwargs [ 'widget' ] = ForeignKeySearchInput ( db_field . remote_field , self . related_search_fields [ db_field . name ] )
kwargs [ 'help_text' ] = help_text
return super ( ForeignKeyAutocompleteAdminMixin , self ) . formfield_for_dbfield ( db_field , ** kwargs )
|
def build ( self , start , end , symbols = None ) :
"""Return the list of basic blocks .
: int start : Start address of the disassembling process .
: int end : End address of the disassembling process ."""
|
symbols = { } if not symbols else symbols
# First pass : Recover BBs .
bbs = self . _recover_bbs ( start , end , symbols )
# Second pass : Split overlapping basic blocks introduced by back edges .
bbs = self . _split_bbs ( bbs , symbols )
# Third pass : Extract call targets for further analysis .
call_targets = self . _extract_call_targets ( bbs )
return bbs , call_targets
|
def leaves_are_consistent ( self ) :
"""Return ` ` True ` ` if the sync map fragments
which are the leaves of the sync map tree
( except for HEAD and TAIL leaves )
are all consistent , that is ,
their intervals do not overlap in forbidden ways .
: rtype : bool
. . versionadded : : 1.7.0"""
|
self . log ( u"Checking if leaves are consistent" )
leaves = self . leaves ( )
if len ( leaves ) < 1 :
self . log ( u"Empty leaves => return True" )
return True
min_time = min ( [ l . interval . begin for l in leaves ] )
self . log ( [ u" Min time: %.3f" , min_time ] )
max_time = max ( [ l . interval . end for l in leaves ] )
self . log ( [ u" Max time: %.3f" , max_time ] )
self . log ( u" Creating SyncMapFragmentList..." )
smf = SyncMapFragmentList ( begin = min_time , end = max_time , rconf = self . rconf , logger = self . logger )
self . log ( u" Creating SyncMapFragmentList... done" )
self . log ( u" Sorting SyncMapFragmentList..." )
result = True
not_head_tail = [ l for l in leaves if not l . is_head_or_tail ]
for l in not_head_tail :
smf . add ( l , sort = False )
try :
smf . sort ( )
self . log ( u" Sorting completed => return True" )
except ValueError :
self . log ( u" Exception while sorting => return False" )
result = False
self . log ( u" Sorting SyncMapFragmentList... done" )
return result
|
def pip_upgrade_all ( line ) :
"""Attempt to upgrade all packages"""
|
from pip import get_installed_distributions
user = set ( d . project_name for d in get_installed_distributions ( user_only = True ) )
all = set ( d . project_name for d in get_installed_distributions ( ) )
for dist in all - user :
do_pip ( [ "install" , "--upgrade" , dist ] )
for dist in user :
do_pip ( [ "install" , "--upgrade" , "--user" , dist ] )
|
def _receive_message ( self ) :
"""Internal coroutine for receiving messages"""
|
while True :
try :
if self . _socket . getsockopt ( zmq . TYPE ) == zmq . ROUTER :
zmq_identity , msg_bytes = yield from self . _socket . recv_multipart ( )
if msg_bytes == b'' : # send ACK for connection probes
LOGGER . debug ( "ROUTER PROBE FROM %s" , zmq_identity )
self . _socket . send_multipart ( [ bytes ( zmq_identity ) , msg_bytes ] )
else :
self . _received_from_identity ( zmq_identity )
self . _dispatcher_queue . put_nowait ( ( zmq_identity , msg_bytes ) )
else :
msg_bytes = yield from self . _socket . recv ( )
self . _last_message_time = time . time ( )
self . _dispatcher_queue . put_nowait ( ( None , msg_bytes ) )
self . _get_queue_size_gauge ( self . connection ) . set_value ( self . _dispatcher_queue . qsize ( ) )
except CancelledError : # pylint : disable = try - except - raise
# The concurrent . futures . CancelledError is caught by asyncio
# when the Task associated with the coroutine is cancelled .
# The raise is required to stop this component .
raise
except Exception as e : # pylint : disable = broad - except
LOGGER . exception ( "Received a message on address %s that " "caused an error: %s" , self . _address , e )
|
def print_multi_line ( content , force_single_line , sort_key ) :
"""' sort _ key ' 参数只在 dict 模式时有效
' sort _ key ' parameter only available in ' dict ' mode"""
|
global last_output_lines
global overflow_flag
global is_atty
if not is_atty :
if isinstance ( content , list ) :
for line in content :
print ( line )
elif isinstance ( content , dict ) :
for k , v in sorted ( content . items ( ) , key = sort_key ) :
print ( "{}: {}" . format ( k , v ) )
else :
raise TypeError ( "Excepting types: list, dict. Got: {}" . format ( type ( content ) ) )
return
columns , rows = get_terminal_size ( )
lines = lines_of_content ( content , columns )
if force_single_line is False and lines > rows :
overflow_flag = True
elif force_single_line is True and len ( content ) > rows :
overflow_flag = True
# 确保初始输出位置是位于最左处的
# to make sure the cursor is at the left most
print ( "\b" * columns , end = "" )
if isinstance ( content , list ) :
for line in content :
_line = preprocess ( line )
print_line ( _line , columns , force_single_line )
elif isinstance ( content , dict ) :
for k , v in sorted ( content . items ( ) , key = sort_key ) :
_k , _v = map ( preprocess , ( k , v ) )
print_line ( "{}: {}" . format ( _k , _v ) , columns , force_single_line )
else :
raise TypeError ( "Excepting types: list, dict. Got: {}" . format ( type ( content ) ) )
# 输出额外的空行来清除上一次输出的剩余内容
# do extra blank lines to wipe the remaining of last output
print ( " " * columns * ( last_output_lines - lines ) , end = "" )
# 回到初始输出位置
# back to the origin pos
print ( magic_char * ( max ( last_output_lines , lines ) - 1 ) , end = "" )
sys . stdout . flush ( )
last_output_lines = lines
|
def get_document_field_display ( self , field_name , field ) :
"""Render a link to a document"""
|
document = getattr ( self . instance , field_name )
if document :
return mark_safe ( '<a href="%s">%s <span class="meta">(%s, %s)</span></a>' % ( document . url , document . title , document . file_extension . upper ( ) , filesizeformat ( document . file . size ) , ) )
return self . model_admin . get_empty_value_display ( )
|
def check_roles ( self , account , aws_policies , aws_roles ) :
"""Iterate through the roles of a specific account and create or update the roles if they ' re missing or
does not match the roles from Git .
Args :
account ( : obj : ` Account ` ) : The account to check roles on
aws _ policies ( : obj : ` dict ` of ` str ` : ` dict ` ) : A dictionary containing all the policies for the specific
account
aws _ roles ( : obj : ` dict ` of ` str ` : ` dict ` ) : A dictionary containing all the roles for the specific account
Returns :
` None `"""
|
self . log . debug ( 'Checking roles for {}' . format ( account . account_name ) )
max_session_duration = self . dbconfig . get ( 'role_timeout_in_hours' , self . ns , 8 ) * 60 * 60
sess = get_aws_session ( account )
iam = sess . client ( 'iam' )
# Build a list of default role policies and extra account specific role policies
account_roles = copy . deepcopy ( self . cfg_roles )
if account . account_name in self . git_policies :
for role in self . git_policies [ account . account_name ] :
if role in account_roles :
account_roles [ role ] [ 'policies' ] += list ( self . git_policies [ account . account_name ] [ role ] . keys ( ) )
for role_name , data in list ( account_roles . items ( ) ) :
if role_name not in aws_roles :
iam . create_role ( Path = '/' , RoleName = role_name , AssumeRolePolicyDocument = json . dumps ( data [ 'trust' ] , indent = 4 ) , MaxSessionDuration = max_session_duration )
self . log . info ( 'Created role {}/{}' . format ( account . account_name , role_name ) )
else :
try :
if aws_roles [ role_name ] [ 'MaxSessionDuration' ] != max_session_duration :
iam . update_role ( RoleName = aws_roles [ role_name ] [ 'RoleName' ] , MaxSessionDuration = max_session_duration )
self . log . info ( 'Adjusted MaxSessionDuration for role {} in account {} to {} seconds' . format ( role_name , account . account_name , max_session_duration ) )
except ClientError :
self . log . exception ( 'Unable to adjust MaxSessionDuration for role {} in account {}' . format ( role_name , account . account_name ) )
aws_role_policies = [ x [ 'PolicyName' ] for x in iam . list_attached_role_policies ( RoleName = role_name ) [ 'AttachedPolicies' ] ]
aws_role_inline_policies = iam . list_role_policies ( RoleName = role_name ) [ 'PolicyNames' ]
cfg_role_policies = data [ 'policies' ]
missing_policies = list ( set ( cfg_role_policies ) - set ( aws_role_policies ) )
extra_policies = list ( set ( aws_role_policies ) - set ( cfg_role_policies ) )
if aws_role_inline_policies :
self . log . info ( 'IAM Role {} on {} has the following inline policies: {}' . format ( role_name , account . account_name , ', ' . join ( aws_role_inline_policies ) ) )
if self . dbconfig . get ( 'delete_inline_policies' , self . ns , False ) and self . manage_roles :
for policy in aws_role_inline_policies :
iam . delete_role_policy ( RoleName = role_name , PolicyName = policy )
auditlog ( event = 'iam.check_roles.delete_inline_role_policy' , actor = self . ns , data = { 'account' : account . account_name , 'roleName' : role_name , 'policy' : policy } )
if missing_policies :
self . log . info ( 'IAM Role {} on {} is missing the following policies: {}' . format ( role_name , account . account_name , ', ' . join ( missing_policies ) ) )
if self . manage_roles :
for policy in missing_policies :
iam . attach_role_policy ( RoleName = role_name , PolicyArn = aws_policies [ policy ] [ 'Arn' ] )
auditlog ( event = 'iam.check_roles.attach_role_policy' , actor = self . ns , data = { 'account' : account . account_name , 'roleName' : role_name , 'policyArn' : aws_policies [ policy ] [ 'Arn' ] } )
if extra_policies :
self . log . info ( 'IAM Role {} on {} has the following extra policies applied: {}' . format ( role_name , account . account_name , ', ' . join ( extra_policies ) ) )
for policy in extra_policies :
if policy in aws_policies :
polArn = aws_policies [ policy ] [ 'Arn' ]
elif policy in self . aws_managed_policies :
polArn = self . aws_managed_policies [ policy ] [ 'Arn' ]
else :
polArn = None
self . log . info ( 'IAM Role {} on {} has an unknown policy attached: {}' . format ( role_name , account . account_name , policy ) )
if self . manage_roles and polArn :
iam . detach_role_policy ( RoleName = role_name , PolicyArn = polArn )
auditlog ( event = 'iam.check_roles.detach_role_policy' , actor = self . ns , data = { 'account' : account . account_name , 'roleName' : role_name , 'policyArn' : polArn } )
|
def reduce_by_device ( parallelism , data , reduce_fn ) :
"""Reduces data per device .
This can be useful , for example , if we want to all - reduce n tensors on k < n
devices ( like during eval when we have only one device ) . We call
reduce _ by _ device ( ) to first sum the tensors per device , then call our usual
all - reduce operation to create one sum per device , followed by
expand _ by _ device , to create the appropriate number of pointers to these
results . See all _ reduce _ ring ( ) below for an example of how this is used .
Args :
parallelism : a expert _ utils . Parallelism object
data : a list of Tensors with length parallelism . n
reduce _ fn : a function taking a list of Tensors . e . g . tf . add _ n
Returns :
device _ parallelism : a Parallelism object with each device listed only once .
reduced _ data : A list of Tensors , one per device ."""
|
unique_devices = [ ]
device_to_data = { }
for dev , datum in zip ( parallelism . devices , data ) :
if dev not in device_to_data :
unique_devices . append ( dev )
device_to_data [ dev ] = [ datum ]
else :
device_to_data [ dev ] . append ( datum )
device_parallelism = Parallelism ( unique_devices )
grouped_data = [ device_to_data [ dev ] for dev in unique_devices ]
return device_parallelism , device_parallelism ( reduce_fn , grouped_data )
|
def line_intersection_2D ( abarg , cdarg ) :
'''line _ intersection ( ( a , b ) , ( c , d ) ) yields the intersection point between the lines that pass
through the given pairs of points . If any lines are parallel , ( numpy . nan , numpy . nan ) is
returned ; note that a , b , c , and d can all be 2 x n matrices of x and y coordinate row - vectors .'''
|
( ( x1 , y1 ) , ( x2 , y2 ) ) = abarg
( ( x3 , y3 ) , ( x4 , y4 ) ) = cdarg
dx12 = ( x1 - x2 )
dx34 = ( x3 - x4 )
dy12 = ( y1 - y2 )
dy34 = ( y3 - y4 )
denom = dx12 * dy34 - dy12 * dx34
unit = np . isclose ( denom , 0 )
if unit is True :
return ( np . nan , np . nan )
denom = unit + denom
q12 = ( x1 * y2 - y1 * x2 ) / denom
q34 = ( x3 * y4 - y3 * x4 ) / denom
xi = q12 * dx34 - q34 * dx12
yi = q12 * dy34 - q34 * dy12
if unit is False :
return ( xi , yi )
elif unit is True :
return ( np . nan , np . nan )
else :
xi = np . asarray ( xi )
yi = np . asarray ( yi )
xi [ unit ] = np . nan
yi [ unit ] = np . nan
return ( xi , yi )
|
def use_federated_book_view ( self ) :
"""Pass through to provider CommentLookupSession . use _ federated _ book _ view"""
|
self . _book_view = FEDERATED
# self . _ get _ provider _ session ( ' comment _ lookup _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_federated_book_view ( )
except AttributeError :
pass
|
def _get_site_term ( self , C , vs30 ) :
"""Returns only a linear site amplification term"""
|
dg1 , dg2 = self . _get_regional_site_term ( C )
return ( C [ "g1" ] + dg1 ) + ( C [ "g2" ] + dg2 ) * np . log ( vs30 )
|
def _get_ensemble_bed_files ( items ) :
"""get all ensemble structural BED file calls , skipping any normal samples from
tumor / normal calls"""
|
bed_files = [ ]
for data in items :
for sv in data . get ( "sv" , [ ] ) :
if sv [ "variantcaller" ] == "sv-ensemble" :
if ( "vrn_file" in sv and not vcfutils . get_paired_phenotype ( data ) == "normal" and file_exists ( sv [ "vrn_file" ] ) ) :
bed_files . append ( sv [ "vrn_file" ] )
return bed_files
|
def draw_uppercase_key ( self , surface , key ) :
"""Default drawing method for uppercase key . Drawn as character key .
: param surface : Surface background should be drawn in .
: param key : Target key to be drawn ."""
|
key . value = u'\u21e7'
if key . is_activated ( ) :
key . value = u'\u21ea'
self . draw_character_key ( surface , key , True )
|
def rename ( self , channel_name , new_name ) :
"""https : / / api . slack . com / methods / channels . rename"""
|
channel_id = self . get_channel_id ( channel_name )
self . params . update ( { 'channel' : channel_id , 'name' : new_name , } )
return FromUrl ( 'https://slack.com/api/channels.rename' , self . _requests ) ( data = self . params ) . post ( )
|
def fixChromName ( name , orgn = "medicago" ) :
"""Convert quirky chromosome names encountered in different
release files , which are very project specific , into a more
general format .
For example , in Medicago
Convert a seqid like
` Mt3.5.1 _ Chr1 ` to ` chr1 `
` Mt3.5 _ Chr3 ` to ` chr3 `
` chr01 _ pseudomolecule _ IMGAG ` to ` chr1 `
Some examples from Maize
Convert a seqid like
` chromosome : AGPv2:2:1:237068873:1 ` to ` 2 `
Special cases
` chromosome : AGPv2 : mitochondrion : 1:569630:1 ` to ` Mt `
` chromosome : AGPv2 : chloroplast : 1:140384:1 ` to ` Pt `"""
|
import re
mtr_pat1 = re . compile ( r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)" )
mtr_pat2 = re . compile ( r"([A-z0-9]+)_[A-z]+_[A-z]+" )
zmays_pat = re . compile ( r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+" )
zmays_sub = { 'mitochondrion' : 'Mt' , 'chloroplast' : 'Pt' }
if orgn == "medicago" :
for mtr_pat in ( mtr_pat1 , mtr_pat2 ) :
match = re . search ( mtr_pat , name )
if match :
n = match . group ( 1 )
n = n . replace ( "0" , "" )
name = re . sub ( mtr_pat , n , name )
elif orgn == "maize" :
match = re . search ( zmays_pat , name )
if match :
n = match . group ( 1 )
name = re . sub ( zmays_pat , n , name )
if name in zmays_sub :
name = zmays_sub [ name ]
return name
|
def _dump_to_pages ( dump ) :
"""Extract pages from an xml dump .
Args :
dump : a unicode string
Returns :
a list of unicode strings"""
|
pos = 0
ret = [ ]
start_tag = u"<page>\n"
end_tag = u"</page>\n"
while True :
start_pos = dump . find ( start_tag , pos )
if start_pos == - 1 :
break
start_pos += len ( start_tag )
end_pos = dump . find ( end_tag , start_pos )
if end_pos == - 1 :
break
ret . append ( dump [ start_pos : end_pos ] )
pos = end_pos + len ( end_tag )
return ret
|
def goto_line ( self , line , column = 0 , move = True ) :
"""Moves the text cursor to the specified position . .
: param line : Number of the line to go to ( 0 based )
: param column : Optional column number . Default is 0 ( start of line ) .
: param move : True to move the cursor . False will return the cursor
without setting it on the editor .
: return : The new text cursor
: rtype : QtGui . QTextCursor"""
|
text_cursor = self . move_cursor_to ( line )
if column :
text_cursor . movePosition ( text_cursor . Right , text_cursor . MoveAnchor , column )
if move :
block = text_cursor . block ( )
# unfold parent fold trigger if the block is collapsed
try :
folding_panel = self . _editor . panels . get ( 'FoldingPanel' )
except KeyError :
pass
else :
from pyqode . core . api . folding import FoldScope
if not block . isVisible ( ) :
block = FoldScope . find_parent_scope ( block )
if TextBlockHelper . is_collapsed ( block ) :
folding_panel . toggle_fold_trigger ( block )
self . _editor . setTextCursor ( text_cursor )
return text_cursor
|
def findzc ( x , thresh , t_max = None ) :
'''Find cues to each zero - crossing in vector x .
To be accepted as a zero - crossing , the signal must pass from below
- thresh to above thresh , or vice versa , in no more than t _ max samples .
Args
thresh : ( float )
magnitude threshold for detecting a zero - crossing .
t _ max : ( int )
maximum duration in samples between threshold crossings .
Returns
zc : ndarray
Array containing the start * * zc _ s * * , finish * * zc _ f * * and direction * * S * *
of zero crossings
where :
* zc _ s : the cue of the first threshold - crossing in samples
* zc _ f : the cue of the second threshold - crossing in samples
* S : the sign of each zero - crossing ( 1 = positive - going , - 1 = negative - going ) .
Notes
This routine is a reimplementation of Mark Johnson ' s Dtag toolbox method
and tested against the Matlab version to be sure it has the same result .'''
|
import numpy
# positive threshold : p ( over ) n ( under )
pt_p = x > thresh
pt_n = ~ pt_p
# negative threshold : p ( over ) n ( under )
nt_n = x < - thresh
nt_p = ~ nt_n
# Over positive threshold + thresh
# neg to pos
pt_np = ( pt_p [ : - 1 ] & pt_n [ 1 : ] ) . nonzero ( ) [ 0 ]
# pos to neg
pt_pn = ( pt_n [ : - 1 ] & pt_p [ 1 : ] ) . nonzero ( ) [ 0 ] + 1
# Over positive threshold + thresh
# neg to pos
nt_np = ( nt_p [ : - 1 ] & nt_n [ 1 : ] ) . nonzero ( ) [ 0 ] + 1
# pos to neg
nt_pn = ( nt_n [ : - 1 ] & nt_p [ 1 : ] ) . nonzero ( ) [ 0 ]
# Concat indices , order sequentially
ind_all = numpy . hstack ( ( pt_np , nt_np , pt_pn , nt_pn ) )
ind_all . sort ( )
# Omit rows where just touching but not crossing
crossing_mask = ~ ( numpy . diff ( numpy . sign ( x [ ind_all ] ) ) == 0 )
# Append a False to make the same length as ind _ all
crossing_mask = numpy . hstack ( ( crossing_mask , False ) )
# Get 1st and 2nd crossings
ind_1stx = ind_all [ crossing_mask ]
ind_2ndx = ind_all [ numpy . where ( crossing_mask ) [ 0 ] + 1 ]
# TODO odd option to replace with NaNs rather than delete ?
# Delete indices that do not have a second crossing
del_ind = numpy . where ( ind_2ndx > len ( x ) - 1 ) [ 0 ]
for i in del_ind :
ind_1stx = numpy . delete ( ind_1stx , i )
ind_2ndx = numpy . delete ( ind_1stx , i )
# Get direction / sign of crossing
signs = numpy . sign ( x [ ind_1stx ] ) * - 1
# Add column of direction and transpose
zc = numpy . vstack ( ( ind_1stx , ind_2ndx , signs ) ) . T
# TODO not mentioned in docstring , remove ?
# x _ norm ? = ( ( x [ : , 1 ] * zc [ : , 0 ] ) - ( x [ : , 0 ] * zc [ : , 1 ] ) ) / x [ : , 1 ] - x [ : , 0]
if t_max :
zc = zc [ zc [ : , 1 ] - zc [ : , 0 ] <= t_max , : ]
return zc . astype ( int )
|
def get_cot_artifacts ( context ) :
"""Generate the artifact relative paths and shas for the chain of trust .
Args :
context ( scriptworker . context . Context ) : the scriptworker context .
Returns :
dict : a dictionary of { " path / to / artifact " : { " hash _ alg " : " . . . " } , . . . }"""
|
artifacts = { }
filepaths = filepaths_in_dir ( context . config [ 'artifact_dir' ] )
hash_alg = context . config [ 'chain_of_trust_hash_algorithm' ]
for filepath in sorted ( filepaths ) :
path = os . path . join ( context . config [ 'artifact_dir' ] , filepath )
sha = get_hash ( path , hash_alg = hash_alg )
artifacts [ filepath ] = { hash_alg : sha }
return artifacts
|
def IntegerDifference ( left : vertex_constructor_param_types , right : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex :
"""Subtracts one vertex from another
: param left : the vertex to be subtracted from
: param right : the vertex to subtract"""
|
return Integer ( context . jvm_view ( ) . IntegerDifferenceVertex , label , cast_to_integer_vertex ( left ) , cast_to_integer_vertex ( right ) )
|
def clean_recipe_build ( self , args ) :
"""Deletes the build files of the given recipe .
This is intended for debug purposes . You may experience
strange behaviour or problems with some recipes if their
build has made unexpected state changes . If this happens , run
clean _ builds , or attempt to clean other recipes until things
work again ."""
|
recipe = Recipe . get_recipe ( args . recipe , self . ctx )
info ( 'Cleaning build for {} recipe.' . format ( recipe . name ) )
recipe . clean_build ( )
if not args . no_clean_dists :
self . clean_dists ( args )
|
def execute_command ( self , parts , dry_run ) :
"""Execute a command .
Parameters
parts : list
Sequence of strings constituting a command .
dry _ run : bool
Whether to just log the command instead of executing it .
Returns
status : int
Status code of the executed command or 0 if ` dry _ run ` is ` True ` ."""
|
if dry_run :
self . logger . info ( "dry-run command '%s'" , " " . join ( map ( str , parts ) ) )
return 0
else : # pragma : no cover
self . logger . debug ( "executing command '%s'" , " " . join ( map ( str , parts ) ) )
status_code = os . spawnvpe ( os . P_WAIT , parts [ 0 ] , parts , os . environ )
if status_code :
self . logger . warning ( "command '%s' returned status code %d" , " " . join ( map ( str , parts ) ) , status_code )
return status_code
|
def MAKE_WPARAM ( wParam ) :
"""Convert arguments to the WPARAM type .
Used automatically by SendMessage , PostMessage , etc .
You shouldn ' t need to call this function ."""
|
wParam = ctypes . cast ( wParam , LPVOID ) . value
if wParam is None :
wParam = 0
return wParam
|
def hours_estimate ( self , branch = 'master' , grouping_window = 0.5 , single_commit_hours = 0.5 , limit = None , days = None , committer = True , by = None , ignore_globs = None , include_globs = None ) :
"""inspired by : https : / / github . com / kimmobrunfeldt / git - hours / blob / 8aaeee237cb9d9028e7a2592a25ad8468b1f45e4 / index . js # L114 - L143
Iterates through the commit history of repo to estimate the time commitement of each author or committer over
the course of time indicated by limit / extensions / days / etc .
: param branch : the branch to return commits for
: param limit : ( optional , default = None ) a maximum number of commits to return , None for no limit
: param grouping _ window : ( optional , default = 0.5 hours ) the threhold for how close two commits need to be to consider them part of one coding session
: param single _ commit _ hours : ( optional , default 0.5 hours ) the time range to associate with one single commit
: param days : ( optional , default = None ) number of days to return , if limit is None
: param committer : ( optional , default = True ) whether to use committer vs . author
: param ignore _ globs : ( optional , default = None ) a list of globs to ignore , default none excludes nothing
: param include _ globs : ( optinal , default = None ) a list of globs to include , default of None includes everything .
: return : DataFrame"""
|
if limit is not None :
limit = int ( limit / len ( self . repo_dirs ) )
if committer :
com = 'committer'
else :
com = 'author'
df = pd . DataFrame ( columns = [ com , 'hours' , 'repository' ] )
for repo in self . repos :
try :
ch = repo . hours_estimate ( branch , grouping_window = grouping_window , single_commit_hours = single_commit_hours , limit = limit , days = days , committer = committer , ignore_globs = ignore_globs , include_globs = include_globs )
ch [ 'repository' ] = repo . repo_name
df = df . append ( ch )
except GitCommandError :
print ( 'Warning! Repo: %s seems to not have the branch: %s' % ( repo , branch ) )
df . reset_index ( )
if by == 'committer' or by == 'author' :
df = df . groupby ( com ) . agg ( { 'hours' : sum } )
df = df . reset_index ( )
elif by == 'repository' :
df = df . groupby ( 'repository' ) . agg ( { 'hours' : sum } )
df = df . reset_index ( )
return df
|
def get_fc2 ( supercell , symmetry , dataset , atom_list = None , decimals = None ) :
"""Force constants are computed .
Force constants , Phi , are calculated from sets for forces , F , and
atomic displacement , d :
Phi = - F / d
This is solved by matrix pseudo - inversion .
Crystal symmetry is included when creating F and d matrices .
Returns
ndarray
Force constants [ i , j , a , b ]
i : Atom index of finitely displaced atom .
j : Atom index at which force on the atom is measured .
a , b : Cartesian direction indices = ( 0 , 1 , 2 ) for i and j , respectively
dtype = double
shape = ( len ( atom _ list ) , n _ satom , 3,3 ) ,"""
|
if atom_list is None :
fc_dim0 = supercell . get_number_of_atoms ( )
else :
fc_dim0 = len ( atom_list )
force_constants = np . zeros ( ( fc_dim0 , supercell . get_number_of_atoms ( ) , 3 , 3 ) , dtype = 'double' , order = 'C' )
# Fill force _ constants [ displaced _ atoms , all _ atoms _ in _ supercell ]
atom_list_done = _get_force_constants_disps ( force_constants , supercell , dataset , symmetry , atom_list = atom_list )
rotations = symmetry . get_symmetry_operations ( ) [ 'rotations' ]
lattice = np . array ( supercell . get_cell ( ) . T , dtype = 'double' , order = 'C' )
permutations = symmetry . get_atomic_permutations ( )
distribute_force_constants ( force_constants , atom_list_done , lattice , rotations , permutations , atom_list = atom_list )
if decimals :
force_constants = force_constants . round ( decimals = decimals )
return force_constants
|
def start_standing_subprocess ( cmd , shell = False , env = None ) :
"""Starts a long - running subprocess .
This is not a blocking call and the subprocess started by it should be
explicitly terminated with stop _ standing _ subprocess .
For short - running commands , you should use subprocess . check _ call , which
blocks .
Args :
cmd : string , the command to start the subprocess with .
shell : bool , True to run this command through the system shell ,
False to invoke it directly . See subprocess . Proc ( ) docs .
env : dict , a custom environment to run the standing subprocess . If not
specified , inherits the current environment . See subprocess . Popen ( )
docs .
Returns :
The subprocess that was started ."""
|
logging . debug ( 'Starting standing subprocess with: %s' , cmd )
proc = subprocess . Popen ( cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , shell = shell , env = env )
# Leaving stdin open causes problems for input , e . g . breaking the
# code . inspect ( ) shell ( http : / / stackoverflow . com / a / 25512460/1612937 ) , so
# explicitly close it assuming it is not needed for standing subprocesses .
proc . stdin . close ( )
proc . stdin = None
logging . debug ( 'Started standing subprocess %d' , proc . pid )
return proc
|
def p_enum_constant ( t ) :
"""enum _ constant : ID EQUALS value"""
|
global name_dict , error_occurred
id = t [ 1 ]
value = t [ 3 ]
lineno = t . lineno ( 1 )
if id_unique ( id , 'enum' , lineno ) :
info = name_dict [ id ] = const_info ( id , value , lineno , enum = True )
if not ( value [ 0 ] . isdigit ( ) or value [ 0 ] == '-' ) : # We have a name instead of a constant , make sure it is defined
if value not in name_dict :
error_occurred = True
print ( "ERROR - can't derefence {0:s} at line {1:s}" . format ( value , lineno ) )
elif not isinstance ( name_dict [ value ] , const_info ) :
error_occurred = True
print ( "ERROR - reference to {0:s} at line {1:s} is not a constant" . format ( value , lineno ) )
else :
info . positive = name_dict [ value ] . positive
t [ 0 ] = [ info ]
else :
t [ 0 ] = [ ]
|
def send_durable_exchange_message ( self , exchange_name , body ) :
"""Send a message with the specified body to an exchange .
: param exchange _ name : str : name of the exchange to send the message into
: param body : str : contents of the message
: return Bool : True when delivery confirmed"""
|
self . connect ( )
channel = self . connection . channel ( )
# Fanout will send message to multiple subscribers
channel . exchange_declare ( exchange = exchange_name , type = 'fanout' )
result = channel . basic_publish ( exchange = exchange_name , routing_key = '' , body = body , properties = pika . BasicProperties ( delivery_mode = 2 , # make message persistent
) )
self . close ( )
return result
|
def value_validate ( self , value ) :
"""Converts the input single value into the expected Python data type ,
raising django . core . exceptions . ValidationError if the data can ' t be
converted . Returns the converted value . Subclasses should override
this ."""
|
if not isinstance ( value , six . integer_types ) :
raise tldap . exceptions . ValidationError ( "should be a integer" )
try :
return str ( value )
except ( TypeError , ValueError ) :
raise tldap . exceptions . ValidationError ( "is invalid integer" )
|
def _catalog_check ( self , cat_name , append = False ) :
"""Check to see if the name of the ingested catalog is valid
Parameters
cat _ name : str
The name of the catalog in the Catalog object
append : bool
Append the catalog rather than replace
Returns
bool
True if good catalog name else False"""
|
good = True
# Make sure the attribute name is good
if cat_name [ 0 ] . isdigit ( ) :
print ( "No names beginning with numbers please!" )
good = False
# Make sure catalog is unique
if not append and cat_name in self . catalogs :
print ( "Catalog {} already ingested. Set 'append=True' to add more records." . format ( cat_name ) )
good = False
return good
|
def compile_mof_string ( self , mof_str , namespace = None , search_paths = None , verbose = None ) :
"""Compile the MOF definitions in the specified string and add the
resulting CIM objects to the specified CIM namespace of the mock
repository .
If the namespace does not exist , : exc : ` ~ pywbem . CIMError ` with status
CIM _ ERR _ INVALID _ NAMESPACE is raised .
This method supports all MOF pragmas , and specifically the include
pragma .
If a CIM class or CIM qualifier type to be added already exists in the
target namespace with the same name ( comparing case insensitively ) ,
this method raises : exc : ` ~ pywbem . CIMError ` .
If a CIM instance to be added already exists in the target namespace
with the same keybinding values , this method raises
: exc : ` ~ pywbem . CIMError ` .
In all cases where this method raises an exception , the mock repository
remains unchanged .
Parameters :
mof _ str ( : term : ` string ` ) :
A string with the MOF definitions to be compiled .
namespace ( : term : ` string ` ) :
The name of the target CIM namespace in the mock repository . This
namespace is also used for lookup of any existing or dependent
CIM objects . If ` None ` , the default namespace of the connection is
used .
search _ paths ( : term : ` py : iterable ` of : term : ` string ` ) :
An iterable of directory path names where MOF dependent files will
be looked up .
See the description of the ` search _ path ` init parameter of the
: class : ` ~ pywbem . MOFCompiler ` class for more information on MOF
dependent files .
verbose ( : class : ` py : bool ` ) :
Controls whether to issue more detailed compiler messages .
Raises :
IOError : MOF file not found .
: exc : ` ~ pywbem . MOFParseError ` : Compile error in the MOF .
: exc : ` ~ pywbem . CIMError ` : CIM _ ERR _ INVALID _ NAMESPACE : Namespace does
not exist .
: exc : ` ~ pywbem . CIMError ` : Failure related to the CIM objects in the
mock repository ."""
|
namespace = namespace or self . default_namespace
# if not self . _ validate _ namespace ( namespace ) : TODO
# self . add _ namespace ( namespace )
self . _validate_namespace ( namespace )
mofcomp = MOFCompiler ( _MockMOFWBEMConnection ( self ) , search_paths = search_paths , verbose = verbose )
mofcomp . compile_string ( mof_str , namespace )
|
def get_fragment_language ( ) -> ParserElement :
"""Build a protein fragment parser ."""
|
_fragment_value_inner = fragment_range | missing_fragment ( FRAGMENT_MISSING )
_fragment_value = _fragment_value_inner | And ( [ Suppress ( '"' ) , _fragment_value_inner , Suppress ( '"' ) ] )
parser_element = fragment_tag + nest ( _fragment_value + Optional ( WCW + quote ( FRAGMENT_DESCRIPTION ) ) )
return parser_element
|
def _set_logging ( logger_name = "colin" , level = logging . INFO , handler_class = logging . StreamHandler , handler_kwargs = None , format = '%(asctime)s.%(msecs).03d %(filename)-17s %(levelname)-6s %(message)s' , date_format = '%H:%M:%S' ) :
"""Set personal logger for this library .
: param logger _ name : str , name of the logger
: param level : int , see logging . { DEBUG , INFO , ERROR , . . . } : level of logger and handler
: param handler _ class : logging . Handler instance , default is StreamHandler ( / dev / stderr )
: param handler _ kwargs : dict , keyword arguments to handler ' s constructor
: param format : str , formatting style
: param date _ format : str , date style in the logs"""
|
if level != logging . NOTSET :
logger = logging . getLogger ( logger_name )
logger . setLevel ( level )
# do not readd handlers if they are already present
if not [ x for x in logger . handlers if isinstance ( x , handler_class ) ] :
handler_kwargs = handler_kwargs or { }
handler = handler_class ( ** handler_kwargs )
handler . setLevel ( level )
formatter = logging . Formatter ( format , date_format )
handler . setFormatter ( formatter )
logger . addHandler ( handler )
|
def checkPerformance ( self ) -> Optional [ bool ] :
"""Check if master instance is slow and send an instance change request .
: returns True if master performance is OK , False if performance
degraded , None if the check was needed"""
|
logger . trace ( "{} checking its performance" . format ( self ) )
# Move ahead only if the node has synchronized its state with other
# nodes
if not self . isParticipating :
return
if self . view_change_in_progress :
return
if not self . _update_new_ordered_reqs_count ( ) :
logger . trace ( "{} ordered no new requests" . format ( self ) )
return
if self . instances . masterId is not None :
self . sendNodeRequestSpike ( )
master_throughput , backup_throughput = self . monitor . getThroughputs ( 0 )
if master_throughput is not None :
self . metrics . add_event ( MetricsName . MONITOR_AVG_THROUGHPUT , master_throughput )
if backup_throughput is not None :
self . metrics . add_event ( MetricsName . BACKUP_MONITOR_AVG_THROUGHPUT , backup_throughput )
avg_lat_master , avg_lat_backup = self . monitor . getLatencies ( )
if avg_lat_master :
self . metrics . add_event ( MetricsName . MONITOR_AVG_LATENCY , avg_lat_master )
if avg_lat_backup :
self . metrics . add_event ( MetricsName . BACKUP_MONITOR_AVG_LATENCY , avg_lat_backup )
degraded_backups = self . monitor . areBackupsDegraded ( )
if degraded_backups :
logger . display ( '{} backup instances performance degraded' . format ( degraded_backups ) )
self . backup_instance_faulty_processor . on_backup_degradation ( degraded_backups )
if self . monitor . isMasterDegraded ( ) :
logger . display ( '{} master instance performance degraded' . format ( self ) )
self . view_changer . on_master_degradation ( )
return False
else :
logger . trace ( "{}'s master has higher performance than backups" . format ( self ) )
return True
|
def _tofloat ( obj ) :
"""Convert to float if object is a float string ."""
|
if "inf" in obj . lower ( ) . strip ( ) :
return obj
try :
return int ( obj )
except ValueError :
try :
return float ( obj )
except ValueError :
return obj
|
async def update ( self , fields = '' ) :
'''reload object info from emby
| coro |
Parameters
fields : str
additional fields to request when updating
See Also
refresh : same thing
send :
post :'''
|
path = 'Users/{{UserId}}/Items/{}' . format ( self . id )
info = await self . connector . getJson ( path , remote = False , Fields = 'Path,Overview,' + fields )
self . object_dict . update ( info )
self . extras = { }
return self
|
def open_shot_path ( self , * args , ** kwargs ) :
"""Open the currently selected shot in the filebrowser
: returns : None
: rtype : None
: raises : None"""
|
f = self . shot_path_le . text ( )
d = os . path . dirname ( f )
osinter = get_interface ( )
osinter . open_path ( d )
|
def find_minimum_spanning_forest_as_subgraphs ( graph ) :
"""Calculates the minimum spanning forest and returns a list of trees as subgraphs ."""
|
forest = find_minimum_spanning_forest ( graph )
list_of_subgraphs = [ get_subgraph_from_edge_list ( graph , edge_list ) for edge_list in forest ]
return list_of_subgraphs
|
def add_action ( self , actor , action , date , type = None , committees = None , legislators = None , ** kwargs ) :
"""Add an action that was performed on this bill .
: param actor : a string representing who performed the action .
If the action is associated with one of the chambers this
should be ' upper ' or ' lower ' . Alternatively , this could be
the name of a committee , a specific legislator , or an outside
actor such as ' Governor ' .
: param action : a string representing the action performed , e . g .
' Introduced ' , ' Signed by the Governor ' , ' Amended '
: param date : the date / time this action was performed .
: param type : a type classification for this action
; param committees : a committee or list of committees to associate with
this action"""
|
def _cleanup_list ( obj , default ) :
if not obj :
obj = default
elif isinstance ( obj , string_types ) :
obj = [ obj ]
elif not isinstance ( obj , list ) :
obj = list ( obj )
return obj
type = _cleanup_list ( type , [ 'other' ] )
committees = _cleanup_list ( committees , [ ] )
legislators = _cleanup_list ( legislators , [ ] )
if 'committee' in kwargs :
raise ValueError ( "invalid param 'committee' passed to add_action, " "must use committees" )
if isinstance ( committees , string_types ) :
committees = [ committees ]
related_entities = [ ]
# OK , let ' s work some magic .
for committee in committees :
related_entities . append ( { "type" : "committee" , "name" : committee } )
for legislator in legislators :
related_entities . append ( { "type" : "legislator" , "name" : legislator } )
self [ 'actions' ] . append ( dict ( actor = actor , action = action , date = date , type = type , related_entities = related_entities , ** kwargs ) )
|
def _linearize ( interface ) :
"""Return a list of all the bases of a given interface in depth - first order .
@ param interface : an Interface object .
@ return : a L { list } of Interface objects , the input in all its bases , in
subclass - to - base - class , depth - first order ."""
|
L = [ interface ]
for baseInterface in interface . __bases__ :
if baseInterface is not Interface :
L . extend ( _linearize ( baseInterface ) )
return L
|
def oauth_client_create ( self , name , redirect_uri , ** kwargs ) :
"""Make a new OAuth Client and return it"""
|
params = { "label" : name , "redirect_uri" : redirect_uri , }
params . update ( kwargs )
result = self . client . post ( '/account/oauth-clients' , data = params )
if not 'id' in result :
raise UnexpectedResponseError ( 'Unexpected response when creating OAuth Client!' , json = result )
c = OAuthClient ( self . client , result [ 'id' ] , result )
return c
|
def tokenize ( self , string ) :
'''Maps a string to an iterator over tokens . In other words : [ char ] - > [ token ]'''
|
new_lexer = ply . lex . lex ( module = self , debug = self . debug , errorlog = logger )
new_lexer . latest_newline = 0
new_lexer . string_value = None
new_lexer . input ( string )
while True :
t = new_lexer . token ( )
if t is None :
break
t . col = t . lexpos - new_lexer . latest_newline
yield t
if new_lexer . string_value is not None :
raise JsonPathLexerError ( 'Unexpected EOF in string literal or identifier' )
|
def getConfigRoot ( cls , create = False ) :
"""Return the mapped configuration root node"""
|
try :
return manager . gettree ( getattr ( cls , 'configkey' ) , create )
except AttributeError :
return None
|
def import_table_in_db ( self , file_path , use_columns_with_index , column_names_in_db , table ) :
"""Imports data from CTD file into database
: param str file _ path : path to file
: param list [ int ] use _ columns _ with _ index : list of column indices in file
: param list [ str ] column _ names _ in _ db : list of column names ( have to fit to models except domain _ id column name )
: param table : ` manager . table . Table ` object"""
|
chunks = pd . read_table ( file_path , usecols = use_columns_with_index , names = column_names_in_db , header = None , comment = '#' , index_col = False , chunksize = 1000000 , dtype = self . get_dtypes ( table . model ) )
for chunk in chunks : # this is an evil hack because CTD is not using the MESH prefix in this table
if table . name == 'exposure_event' :
chunk . disease_id = 'MESH:' + chunk . disease_id
chunk [ 'id' ] = chunk . index + 1
if table . model not in table_conf . models_to_map :
for model in table_conf . models_to_map :
domain = model . table_suffix
domain_id = domain + "_id"
if domain_id in column_names_in_db :
chunk = pd . merge ( chunk , self . mapper [ domain ] , on = domain_id , how = 'left' )
del chunk [ domain_id ]
chunk . set_index ( 'id' , inplace = True )
table_with_prefix = defaults . TABLE_PREFIX + table . name
chunk . to_sql ( name = table_with_prefix , if_exists = 'append' , con = self . engine )
del chunks
|
def query_pager_by_slug ( slug , current_page_num = 1 , tag = '' , order = False ) :
'''Query pager via category slug .'''
|
cat_rec = MCategory . get_by_slug ( slug )
if cat_rec :
cat_id = cat_rec . uid
else :
return None
# The flowing code is valid .
if cat_id . endswith ( '00' ) : # The first level category , using the code bellow .
cat_con = TabPost2Tag . par_id == cat_id
else :
cat_con = TabPost2Tag . tag_id == cat_id
if tag :
condition = { 'def_tag_arr' : [ tag ] }
recs = TabPost . select ( ) . join ( TabPost2Tag , on = ( ( TabPost . uid == TabPost2Tag . post_id ) & ( TabPost . valid == 1 ) ) ) . where ( cat_con & TabPost . extinfo . contains ( condition ) ) . order_by ( TabPost . time_update . desc ( ) ) . paginate ( current_page_num , CMS_CFG [ 'list_num' ] )
elif order :
recs = TabPost . select ( ) . join ( TabPost2Tag , on = ( ( TabPost . uid == TabPost2Tag . post_id ) & ( TabPost . valid == 1 ) ) ) . where ( cat_con ) . order_by ( TabPost . order . asc ( ) )
else :
recs = TabPost . select ( ) . join ( TabPost2Tag , on = ( ( TabPost . uid == TabPost2Tag . post_id ) & ( TabPost . valid == 1 ) ) ) . where ( cat_con ) . order_by ( TabPost . time_update . desc ( ) ) . paginate ( current_page_num , CMS_CFG [ 'list_num' ] )
return recs
|
def calc_core_bytes ( self ) :
"""Convert all used annotation fields into bytes to write"""
|
# The difference sample to write
if len ( self . sample ) == 1 :
sampdiff = np . array ( [ self . sample [ 0 ] ] )
else :
sampdiff = np . concatenate ( ( [ self . sample [ 0 ] ] , np . diff ( self . sample ) ) )
# Create a copy of the annotation object with a
# compact version of fields to write
compact_annotation = copy . deepcopy ( self )
compact_annotation . compact_fields ( )
# The optional fields to be written . Write if they are not None or all empty
extra_write_fields = [ ]
for field in [ 'num' , 'subtype' , 'chan' , 'aux_note' ] :
if not isblank ( getattr ( compact_annotation , field ) ) :
extra_write_fields . append ( field )
data_bytes = [ ]
# Iterate across all fields one index at a time
for i in range ( len ( sampdiff ) ) : # Process the samp ( difference ) and sym items
data_bytes . append ( field2bytes ( 'samptype' , [ sampdiff [ i ] , self . symbol [ i ] ] ) )
# Process the extra optional fields
for field in extra_write_fields :
value = getattr ( compact_annotation , field ) [ i ]
if value is not None :
data_bytes . append ( field2bytes ( field , value ) )
# Flatten and convert to correct format
data_bytes = np . array ( [ item for sublist in data_bytes for item in sublist ] ) . astype ( 'u1' )
return data_bytes
|
def dispersion ( words , corpus , y = None , ax = None , colors = None , colormap = None , labels = None , annotate_docs = False , ignore_case = False , ** kwargs ) :
"""Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one - off analysis
Parameters
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances . If this is specified , then the points will be colored
according to their class .
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document .
ax : matplotlib axes , default : None
The axes to plot the figure on .
labels : list of strings
The names of the classes in the target , used to create a legend .
Labels must match names of classes in sorted order .
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate _ docs : boolean , default : False
Specify whether document boundaries will be displayed . Vertical lines
are positioned at the end of each document .
ignore _ case : boolean , default : False
Specify whether input will be case - sensitive .
kwargs : dict
Pass any additional keyword arguments to the super class .
Returns
ax : matplotlib axes
Returns the axes that the plot was drawn on"""
|
# Instantiate the visualizer
visualizer = DispersionPlot ( words , ax = ax , colors = colors , colormap = colormap , ignore_case = ignore_case , labels = labels , annotate_docs = annotate_docs , ** kwargs )
# Fit and transform the visualizer ( calls draw )
visualizer . fit ( corpus , y , ** kwargs )
# Return the axes object on the visualizer
return visualizer . ax
|
def ReadAllClientActionRequests ( self , client_id ) :
"""Reads all client action requests available for a given client _ id ."""
|
res = [ ]
for key , orig_request in iteritems ( self . client_action_requests ) :
request_client_id , _ , _ = key
if request_client_id != client_id :
continue
request = orig_request . Copy ( )
current_lease = self . client_action_request_leases . get ( key )
request . ttl = db . Database . CLIENT_MESSAGES_TTL
if current_lease is not None :
request . leased_until , request . leased_by , leased_count = current_lease
request . ttl -= leased_count
else :
request . leased_until = None
request . leased_by = None
res . append ( request )
return res
|
def ismatch ( a , b ) :
"""Method to allow smart comparisons between classes , instances ,
and string representations of units and give the right answer .
For internal use only ."""
|
# Try the easy case
if a == b :
return True
else : # Try isinstance in both orders
try :
if isinstance ( a , b ) :
return True
except TypeError :
try :
if isinstance ( b , a ) :
return True
except TypeError : # Try isinstance ( a , type ( b ) ) in both orders
try :
if isinstance ( a , type ( b ) ) :
return True
except TypeError :
try :
if isinstance ( b , type ( a ) ) :
return True
except TypeError : # Try the string representation
if str ( a ) . lower ( ) == str ( b ) . lower ( ) :
return True
else :
return False
|
def _write_utf8 ( write , value ) :
"""Writes a length - prefixed UTF - 8 string ."""
|
write ( 'h' , len ( value ) )
write . io . write ( value . encode ( 'utf-8' ) )
|
def run_changed_file_cmd ( cmd , fp , pretty ) :
"""running commands on changes .
pretty the parsed file"""
|
with open ( fp ) as f :
raw = f . read ( )
# go sure regarding quotes :
for ph in ( dir_mon_filepath_ph , dir_mon_content_raw , dir_mon_content_pretty ) :
if ph in cmd and not ( '"%s"' % ph ) in cmd and not ( "'%s'" % ph ) in cmd :
cmd = cmd . replace ( ph , '"%s"' % ph )
cmd = cmd . replace ( dir_mon_filepath_ph , fp )
print col ( 'Running %s' % cmd , H1 )
for r , what in ( ( dir_mon_content_raw , raw ) , ( dir_mon_content_pretty , pretty ) ) :
cmd = cmd . replace ( r , what . encode ( 'base64' ) )
# yeah , i know , sub bla bla . . .
if os . system ( cmd ) :
print col ( '(the command failed)' , R )
|
def validate_collections ( self , model , context = None ) :
"""Validate collection properties
Performs validation on collection properties to return a result object .
: param model : object or dict
: param context : object , dict or None
: return : shiftschema . result . Result"""
|
result = Result ( )
for property_name in self . collections :
prop = self . collections [ property_name ]
collection = self . get ( model , property_name )
errors = prop . validate ( value = collection , model = model , context = context )
if len ( errors ) :
result . add_collection_errors ( property_name = property_name , direct_errors = errors )
collection_errors = prop . validate_with_schema ( collection = collection , context = context )
result . add_collection_errors ( property_name = property_name , collection_errors = collection_errors )
return result
|
def curve_specialize ( curve , new_curve ) :
"""Image for : meth ` . Curve . specialize ` docstring ."""
|
if NO_IMAGES :
return
ax = curve . plot ( 256 )
interval = r"$\left[0, 1\right]$"
line = ax . lines [ - 1 ]
line . set_label ( interval )
color1 = line . get_color ( )
new_curve . plot ( 256 , ax = ax )
interval = r"$\left[-\frac{1}{4}, \frac{3}{4}\right]$"
line = ax . lines [ - 1 ]
line . set_label ( interval )
ax . plot ( curve . _nodes [ 0 , ( 0 , - 1 ) ] , curve . _nodes [ 1 , ( 0 , - 1 ) ] , color = color1 , linestyle = "None" , marker = "o" , )
ax . plot ( new_curve . _nodes [ 0 , ( 0 , - 1 ) ] , new_curve . _nodes [ 1 , ( 0 , - 1 ) ] , color = line . get_color ( ) , linestyle = "None" , marker = "o" , )
ax . legend ( loc = "lower right" , fontsize = 12 )
ax . axis ( "scaled" )
ax . set_xlim ( - 0.375 , 1.125 )
ax . set_ylim ( - 0.75 , 0.625 )
save_image ( ax . figure , "curve_specialize.png" )
|
def insrtc ( item , inset ) :
"""Insert an item into a character set .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / insrtc _ c . html
: param item : Item to be inserted .
: type item : str or list of str
: param inset : Insertion set .
: type inset : spiceypy . utils . support _ types . SpiceCell"""
|
assert isinstance ( inset , stypes . SpiceCell )
if isinstance ( item , list ) :
for c in item :
libspice . insrtc_c ( stypes . stringToCharP ( c ) , ctypes . byref ( inset ) )
else :
item = stypes . stringToCharP ( item )
libspice . insrtc_c ( item , ctypes . byref ( inset ) )
|
def forbid_multi_line_headers ( name , val ) :
"""Forbid multi - line headers , to prevent header injection ."""
|
val = smart_text ( val )
if "\n" in val or "\r" in val :
raise BadHeaderError ( "Header values can't contain newlines " "(got %r for header %r)" % ( val , name ) )
try :
val = val . encode ( "ascii" )
except UnicodeEncodeError :
if name . lower ( ) in ( "to" , "from" , "cc" ) :
result = [ ]
for item in val . split ( ", " ) :
nm , addr = parseaddr ( item )
nm = str ( Header ( nm , DEFAULT_CHARSET ) )
result . append ( formataddr ( ( nm , str ( addr ) ) ) )
val = ", " . join ( result )
else :
val = Header ( val , DEFAULT_CHARSET )
else :
if name . lower ( ) == "subject" :
val = Header ( val )
return name , val
|
def selected_item ( self ) :
""": obj : ` consolemenu . items . MenuItem ` : The item in : attr : ` items ` that the user most recently selected , or None ."""
|
if self . items and self . selected_option != - 1 :
return self . items [ self . current_option ]
else :
return None
|
def setup_pilotpoints_grid ( ml = None , sr = None , ibound = None , prefix_dict = None , every_n_cell = 4 , use_ibound_zones = False , pp_dir = '.' , tpl_dir = '.' , shapename = "pp.shp" ) :
"""setup regularly - spaced ( gridded ) pilot point parameterization
Parameters
ml : flopy . mbase
a flopy mbase dervied type . If None , sr must not be None .
sr : flopy . utils . reference . SpatialReference
a spatial reference use to locate the model grid in space . If None ,
ml must not be None . Default is None
ibound : numpy . ndarray
the modflow ibound integer array . Used to set pilot points only in active areas .
If None and ml is None , then pilot points are set in all rows and columns according to
every _ n _ cell . Default is None .
prefix _ dict : dict
a dictionary of pilot point parameter prefix , layer pairs . example : { " hk " : [ 0,1,2,3 ] } would
setup pilot points with the prefix " hk " for model layers 1 - 4 ( zero based ) . If None , a generic set
of pilot points with the " pp " prefix are setup for a generic nrowXncol grid . Default is None
use _ ibound _ zones : bool
a flag to use the greater - than - zero values in the ibound as pilot point zones . If False , ibound
values greater than zero are treated as a single zone . Default is False .
pp _ dir : str
directory to write pilot point files to . Default is ' . '
tpl _ dir : str
directory to write pilot point template file to . Default is ' . '
shapename : str
name of shapefile to write that containts pilot point information . Default is " pp . shp "
Returns
pp _ df : pandas . DataFrame
a dataframe summarizing pilot point information ( same information
written to shapename"""
|
from . import pp_utils
warnings . warn ( "setup_pilotpoint_grid has moved to pp_utils..." , PyemuWarning )
return pp_utils . setup_pilotpoints_grid ( ml = ml , sr = sr , ibound = ibound , prefix_dict = prefix_dict , every_n_cell = every_n_cell , use_ibound_zones = use_ibound_zones , pp_dir = pp_dir , tpl_dir = tpl_dir , shapename = shapename )
|
def _admx_policy_parent_walk ( path , policy_namespace , parent_category , policy_nsmap , return_full_policy_names , adml_language ) :
'''helper function to recursively walk up the ADMX namespaces and build the
hierarchy for the policy'''
|
admx_policy_definitions = _get_policy_definitions ( language = adml_language )
category_xpath_string = '/policyDefinitions/categories/{0}:category[@name="{1}"]'
using_xpath_string = '/policyDefinitions/policyNamespaces/{0}:using'
if parent_category . find ( ':' ) >= 0 : # the parent is in another namespace
policy_namespace = parent_category . split ( ':' ) [ 0 ]
parent_category = parent_category . split ( ':' ) [ 1 ]
using_xpath_string = using_xpath_string . format ( policy_namespace )
policy_nsmap = dictupdate . update ( policy_nsmap , _buildElementNsmap ( admx_policy_definitions . xpath ( using_xpath_string , namespaces = policy_nsmap ) ) )
category_xpath_string = category_xpath_string . format ( policy_namespace , parent_category )
if admx_policy_definitions . xpath ( category_xpath_string , namespaces = policy_nsmap ) :
tparent_category = admx_policy_definitions . xpath ( category_xpath_string , namespaces = policy_nsmap ) [ 0 ]
this_parent_name = _getFullPolicyName ( policy_item = tparent_category , policy_name = tparent_category . attrib [ 'name' ] , return_full_policy_names = return_full_policy_names , adml_language = adml_language )
path . append ( this_parent_name )
if tparent_category . xpath ( '{0}:parentCategory/@ref' . format ( policy_namespace ) , namespaces = policy_nsmap ) : # parent has a parent
path = _admx_policy_parent_walk ( path = path , policy_namespace = policy_namespace , parent_category = tparent_category . xpath ( '{0}:parentCategory/@ref' . format ( policy_namespace ) , namespaces = policy_nsmap ) [ 0 ] , policy_nsmap = policy_nsmap , return_full_policy_names = return_full_policy_names , adml_language = adml_language )
return path
|
def compute_ng_stat ( gene_graph , pos_ct , alpha = .5 ) :
"""Compute the clustering score for the gene on its neighbor graph .
Parameters
gene _ graph : dict
Graph of spatially near codons . keys = nodes , edges = key - > value .
pos _ ct : dict
missense mutation count for each codon
alpha : float
smoothing factor
Returns
graph _ score : float
score measuring the clustering of missense mutations in the graph
coverage : int
number of nodes that received non - zero weight"""
|
# skip if there are no missense mutations
if not len ( pos_ct ) :
return 1.0 , 0
max_pos = max ( gene_graph )
codon_vals = np . zeros ( max_pos + 1 )
# smooth out mutation counts
for pos in pos_ct :
mut_count = pos_ct [ pos ]
# update neighbor values
neighbors = list ( gene_graph [ pos ] )
num_neighbors = len ( neighbors )
codon_vals [ neighbors ] += alpha * mut_count
# update self - value
codon_vals [ pos ] += ( 1 - alpha ) * mut_count
# compute the normalized entropy
# total _ cts = float ( np . count _ nonzero ( codon _ vals ) )
# graph _ score = mymath . normalized _ mutation _ entropy ( codon _ vals , total _ cts = total _ cts )
# compute regular entropy
p = codon_vals / np . sum ( codon_vals )
graph_score = mymath . shannon_entropy ( p )
# get coverage
coverage = np . count_nonzero ( p )
return graph_score , coverage
|
def from_date ( cls , date ) :
"""Returns a Month instance from the given datetime . date or
datetime . datetime object"""
|
try :
date = date . date ( )
except AttributeError :
pass
return cls ( date . year , date . month )
|
def xml ( self , attribs = None , elements = None , skipchildren = False ) :
"""See : meth : ` AbstractElement . xml `"""
|
if not attribs :
attribs = { }
E = ElementMaker ( namespace = "http://ilk.uvt.nl/folia" , nsmap = { None : "http://ilk.uvt.nl/folia" , 'xml' : "http://www.w3.org/XML/1998/namespace" } )
e = super ( AbstractSpanAnnotation , self ) . xml ( attribs , elements , True )
for child in self :
if isinstance ( child , ( Word , Morpheme , Phoneme ) ) : # Include REFERENCES to word items instead of word items themselves
attribs [ '{' + NSFOLIA + '}id' ] = child . id
if child . PRINTABLE and child . hastext ( self . textclass ) :
attribs [ '{' + NSFOLIA + '}t' ] = child . text ( self . textclass )
e . append ( E . wref ( ** attribs ) )
elif not ( isinstance ( child , Feature ) and child . SUBSET ) : # Don ' t add pre - defined features , they are already added as attributes
e . append ( child . xml ( ) )
return e
|
def make_fig ( self ) :
"""Figure constructor , called before ` self . plot ( ) `"""
|
self . fig = plt . figure ( figsize = ( 8 , 4 ) )
self . _all_figures . append ( self . fig )
|
def Many2ManyThroughModel ( field ) :
'''Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys .'''
|
from stdnet . odm import ModelType , StdModel , ForeignKey , CompositeIdField
name_model = field . model . _meta . name
name_relmodel = field . relmodel . _meta . name
# The two models are the same .
if name_model == name_relmodel :
name_relmodel += '2'
through = field . through
# Create the through model
if through is None :
name = '{0}_{1}' . format ( name_model , name_relmodel )
class Meta :
app_label = field . model . _meta . app_label
through = ModelType ( name , ( StdModel , ) , { 'Meta' : Meta } )
field . through = through
# The first field
field1 = ForeignKey ( field . model , related_name = field . name , related_manager_class = makeMany2ManyRelatedManager ( field . relmodel , name_model , name_relmodel ) )
field1 . register_with_model ( name_model , through )
# The second field
field2 = ForeignKey ( field . relmodel , related_name = field . related_name , related_manager_class = makeMany2ManyRelatedManager ( field . model , name_relmodel , name_model ) )
field2 . register_with_model ( name_relmodel , through )
pk = CompositeIdField ( name_model , name_relmodel )
pk . register_with_model ( 'id' , through )
|
def updateVocalAuto ( self , component , files ) :
"""Updates the auto - parameter with selected * component * to have
* files * . Adds auto - parameter if not already present . The auto - parameter is expected to have only one selected
component ( the one given ) . If length of files < 1 , removes the
auto - parameter from the model .
: param component : Component that the auto - parameter is modifying
: type component : : class : ` AbstractStimulusComponent < sparkle . stim . abstract _ component . AbstractStimulusComponent > `
: param files : list of file names to act as the auto - parameter list
: type files : list < str >"""
|
auto_model = self . model ( ) . autoParams ( )
row = auto_model . fileParameter ( component )
if len ( files ) > 1 :
clean_component = self . model ( ) . data ( self . model ( ) . indexByComponent ( component ) , AbstractDragView . DragRole )
p = { 'parameter' : 'filename' , 'names' : files , 'selection' : [ clean_component ] }
if row is None :
auto_model . insertItem ( auto_model . index ( 0 , 0 ) , p )
else :
auto_model . setData ( auto_model . index ( row , 0 ) , p )
elif row is not None : # remove the autoparameter
auto_model . removeRow ( row )
# if row is none and len ( files ) = = 1 then we don ' t need to do anything
self . countChanged . emit ( )
|
def _flatten ( self , iterator , ** filter_options ) :
'''iterator here gives as lists of tuples . Method flattens the structure
to a single list of tuples .'''
|
resp = list ( )
for entry in iterator :
for tup in entry :
if self . _matches_filter ( tup , ** filter_options ) :
resp . append ( tup )
return resp
|
def get_catalog_nodes ( self , catalog_id , ancestor_levels , descendant_levels , include_siblings ) :
"""Gets a portion of the hierarchy for the given catalog .
arg : catalog _ id ( osid . id . Id ) : the ` ` Id ` ` to query
arg : ancestor _ levels ( cardinal ) : the maximum number of
ancestor levels to include . A value of 0 returns no
parents in the node .
arg : descendant _ levels ( cardinal ) : the maximum number of
descendant levels to include . A value of 0 returns no
children in the node .
arg : include _ siblings ( boolean ) : ` ` true ` ` to include the
siblings of the given node , ` ` false ` ` to omit the
siblings
return : ( osid . cataloging . CatalogNode ) - a catalog node
raise : NotFound - a ` ` Catalog ` ` identified by ` ` Id is ` ` not
found
raise : NullArgument - ` ` catalog _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . BinHierarchySession . get _ bin _ nodes
return objects . CatalogNode ( self . get_catalog_node_ids ( catalog_id = catalog_id , ancestor_levels = ancestor_levels , descendant_levels = descendant_levels , include_siblings = include_siblings ) . _my_map , runtime = self . _runtime , proxy = self . _proxy )
|
def update ( self , adgroup_id , catmatch_id , max_price , is_default_price , online_status , nick = None ) :
'''xxxxx . xxxxx . adgroup . catmatch . update
更新一个推广组的类目出价 , 可以设置类目出价 、 是否使用默认出价 、 是否打开类目出价'''
|
request = TOPRequest ( 'xxxxx.xxxxx.adgroup.catmatch.update' )
request [ 'adgroup_id' ] = adgroup_id
request [ 'catmatch_id' ] = catmatch_id
request [ 'max_price' ] = max_price
request [ 'is_default_price' ] = is_default_price
request [ 'online_status' ] = online_status
if nick != None :
request [ 'nick' ] = nick
self . create ( self . execute ( request ) , fields = [ 'success' , 'result' , 'success' , 'result_code' , 'result_message' ] , models = { 'result' : ADGroupCatmatch } )
return self . result
|
def receive ( self ) :
"""Receives incoming websocket messages , and puts them on the Client queue
for processing .
: return :"""
|
while self . running :
if self . _receiver_lock . acquire ( blocking = False ) :
try :
raw = self . conn . recv ( )
except WebSocketTimeoutException :
self . _receiver_lock . release ( )
continue
except WebSocketConnectionClosedException : # this needs to restart the client , while keeping track
# of the currently subscribed channels !
self . conn = None
self . _controller_q . put ( 'restart' )
except AttributeError : # self . conn is None , idle loop until shutdown of thread
self . _receiver_lock . release ( )
continue
msg = time . time ( ) , json . loads ( raw )
log . debug ( "receiver Thread: Data Received: %s" , msg )
self . receiver_q . put ( msg )
self . _receiver_lock . release ( )
else : # The receiver _ lock was locked , idling until available
time . sleep ( 0.5 )
|
def get_team_push_restrictions ( self ) :
""": rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . Team . Team `"""
|
if self . _team_push_restrictions is github . GithubObject . NotSet :
return None
return github . PaginatedList . PaginatedList ( github . Team . Team , self . _requester , self . _team_push_restrictions , None )
|
def complete_path ( curr_dir , last_dir ) :
"""Return the path to complete that matches the last entered component .
If the last entered component is ~ , expanded path would not
match , so return all of the available paths .
: param curr _ dir : str
: param last _ dir : str
: return : str"""
|
if not last_dir or curr_dir . startswith ( last_dir ) :
return curr_dir
elif last_dir == '~' :
return os . path . join ( last_dir , curr_dir )
|
def patch_splitMax ( self , patches ) :
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm .
Intended to be called only from within patch _ apply .
Args :
patches : Array of Patch objects ."""
|
patch_size = self . Match_MaxBits
if patch_size == 0 : # Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision .
return
for x in range ( len ( patches ) ) :
if patches [ x ] . length1 <= patch_size :
continue
bigpatch = patches [ x ]
# Remove the big old patch .
del patches [ x ]
x -= 1
start1 = bigpatch . start1
start2 = bigpatch . start2
precontext = ''
while len ( bigpatch . diffs ) != 0 : # Create one of several smaller patches .
patch = patch_obj ( )
empty = True
patch . start1 = start1 - len ( precontext )
patch . start2 = start2 - len ( precontext )
if precontext :
patch . length1 = patch . length2 = len ( precontext )
patch . diffs . append ( ( self . DIFF_EQUAL , precontext ) )
while ( len ( bigpatch . diffs ) != 0 and patch . length1 < patch_size - self . Patch_Margin ) :
( diff_type , diff_text ) = bigpatch . diffs [ 0 ]
if diff_type == self . DIFF_INSERT : # Insertions are harmless .
patch . length2 += len ( diff_text )
start2 += len ( diff_text )
patch . diffs . append ( bigpatch . diffs . pop ( 0 ) )
empty = False
elif ( diff_type == self . DIFF_DELETE and len ( patch . diffs ) == 1 and patch . diffs [ 0 ] [ 0 ] == self . DIFF_EQUAL and len ( diff_text ) > 2 * patch_size ) : # This is a large deletion . Let it pass in one chunk .
patch . length1 += len ( diff_text )
start1 += len ( diff_text )
empty = False
patch . diffs . append ( ( diff_type , diff_text ) )
del bigpatch . diffs [ 0 ]
else : # Deletion or equality . Only take as much as we can stomach .
diff_text = diff_text [ : patch_size - patch . length1 - self . Patch_Margin ]
patch . length1 += len ( diff_text )
start1 += len ( diff_text )
if diff_type == self . DIFF_EQUAL :
patch . length2 += len ( diff_text )
start2 += len ( diff_text )
else :
empty = False
patch . diffs . append ( ( diff_type , diff_text ) )
if diff_text == bigpatch . diffs [ 0 ] [ 1 ] :
del bigpatch . diffs [ 0 ]
else :
bigpatch . diffs [ 0 ] = ( bigpatch . diffs [ 0 ] [ 0 ] , bigpatch . diffs [ 0 ] [ 1 ] [ len ( diff_text ) : ] )
# Compute the head context for the next patch .
precontext = self . diff_text2 ( patch . diffs )
precontext = precontext [ - self . Patch_Margin : ]
# Append the end context for this patch .
postcontext = self . diff_text1 ( bigpatch . diffs ) [ : self . Patch_Margin ]
if postcontext :
patch . length1 += len ( postcontext )
patch . length2 += len ( postcontext )
if len ( patch . diffs ) != 0 and patch . diffs [ - 1 ] [ 0 ] == self . DIFF_EQUAL :
patch . diffs [ - 1 ] = ( self . DIFF_EQUAL , patch . diffs [ - 1 ] [ 1 ] + postcontext )
else :
patch . diffs . append ( ( self . DIFF_EQUAL , postcontext ) )
if not empty :
x += 1
patches . insert ( x , patch )
|
def impute_and_confidence ( self , M_c , X_L , X_D , Y , Q , seed , n ) :
"""Impute values and confidence of the value from the predictive
distribution of the given latent state .
: param Y : A list of constraints to apply when sampling . Each constraint
is a triplet of ( r , d , v ) : r is the row index , d is the column
index and v is the value of the constraint
: type Y : list of lists
: param Q : A list of values to sample . Each value is doublet of ( r , d ) :
r is the row index , d is the column index
: type Q : list of lists
: param n : the number of samples to use in the imputation
: type n : int
: returns : list of lists - - list of ( value , confidence ) tuples in the
same order as specified by Q"""
|
get_next_seed = make_get_next_seed ( seed )
if isinstance ( X_L , ( list , tuple ) ) :
assert isinstance ( X_D , ( list , tuple ) )
# TODO : multistate impute doesn ' t exist yet
# e , confidence = su . impute _ and _ confidence _ multistate (
# M _ c , X _ L , X _ D , Y , Q , n , self . get _ next _ seed )
e , confidence = su . impute_and_confidence ( M_c , X_L , X_D , Y , Q , n , get_next_seed )
else :
e , confidence = su . impute_and_confidence ( M_c , X_L , X_D , Y , Q , n , get_next_seed )
return ( e , confidence )
|
def VerifyStructure ( self , parser_mediator , line ) :
"""Verify that this file is a SkyDrive old log file .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
line ( str ) : line from a text file .
Returns :
bool : True if the line is in the expected format , False if not ."""
|
try :
structure = self . _LINE . parseString ( line )
except pyparsing . ParseException :
logger . debug ( 'Not a SkyDrive old log file' )
return False
day_of_month , month , year , hours , minutes , seconds , milliseconds = ( structure . date_time )
time_elements_tuple = ( year , month , day_of_month , hours , minutes , seconds , milliseconds )
try :
dfdatetime_time_elements . TimeElementsInMilliseconds ( time_elements_tuple = time_elements_tuple )
except ValueError :
logger . debug ( 'Not a SkyDrive old log file, invalid date and time: {0!s}' . format ( structure . date_time ) )
return False
return True
|
def init_app ( self , app , context = DEFAULT_DICT ) :
"""Lazy constructor for the : class : ` Component ` class .
This method will allow the component to be used like a Flask
extension / singleton .
Args :
app ( flask . Flask ) : The Application to base this Component upon .
Useful for app wide singletons .
Keyword Args :
context ( dict , optional ) : The contextual information to supply to
this component ."""
|
if context is not _CONTEXT_MISSING :
self . update_context ( context , app = app )
# do not readd callbacks if already present ; and if there ' s no context
# present , there ' s no real need to add callbacks
if ( app not in _CONTEXT_CALLBACK_MAP and context is not _CONTEXT_MISSING ) :
key = self . _get_context_name ( app = app )
self . _context_callbacks ( app , key , original_context = context )
|
def _make_dav_request ( self , method , path , ** kwargs ) :
"""Makes a WebDAV request
: param method : HTTP method
: param path : remote path of the targetted file
: param \ * \ * kwargs : optional arguments that ` ` requests . Request . request ` ` accepts
: returns array of : class : ` FileInfo ` if the response
contains it , or True if the operation succeded , False
if it didn ' t"""
|
if self . _debug :
print ( 'DAV request: %s %s' % ( method , path ) )
if kwargs . get ( 'headers' ) :
print ( 'Headers: ' , kwargs . get ( 'headers' ) )
path = self . _normalize_path ( path )
res = self . _session . request ( method , self . _webdav_url + parse . quote ( self . _encode_string ( path ) ) , ** kwargs )
if self . _debug :
print ( 'DAV status: %i' % res . status_code )
if res . status_code in [ 200 , 207 ] :
return self . _parse_dav_response ( res )
if res . status_code in [ 204 , 201 ] :
return True
raise HTTPResponseError ( res )
|
def _GenerateInitConfigs ( self , template_dir , rpm_build_dir ) :
"""Generates init - system configs ."""
|
client_name = config . CONFIG . Get ( "Client.name" , context = self . context )
initd_target_filename = os . path . join ( rpm_build_dir , "etc/init.d" , client_name )
# Generate init . d
utils . EnsureDirExists ( os . path . dirname ( initd_target_filename ) )
self . GenerateFile ( os . path . join ( template_dir , "rpmbuild/grr-client.initd.in" ) , initd_target_filename )
# Generate systemd unit
if config . CONFIG [ "Template.version_numeric" ] >= 3125 :
systemd_target_filename = os . path . join ( rpm_build_dir , "usr/lib/systemd/system/" , "%s.service" % client_name )
utils . EnsureDirExists ( os . path . dirname ( systemd_target_filename ) )
self . GenerateFile ( os . path . join ( template_dir , "rpmbuild/grr-client.service.in" ) , systemd_target_filename )
|
def get_tree ( self ) :
"""Returns a : class : ` Con ` instance with all kinds of methods and selectors .
Start here with exploration . Read up on the : class : ` Con ` stuffs .
: rtype : Con"""
|
data = self . message ( MessageType . GET_TREE , '' )
return Con ( json . loads ( data ) , None , self )
|
def raw_cube_array ( self ) :
"""Return read - only ndarray of measure values from cube - response .
The shape of the ndarray mirrors the shape of the ( raw ) cube
response . Specifically , it includes values for missing elements , any
MR _ CAT dimensions , and any prunable rows and columns ."""
|
array = np . array ( self . _flat_values ) . reshape ( self . _all_dimensions . shape )
# - - - must be read - only to avoid hard - to - find bugs - - -
array . flags . writeable = False
return array
|
def flip_for ( self , twig = None , expression = None , ** kwargs ) :
"""flip the constraint to solve for for any of the parameters in the expression
expression ( optional if sympy available , required if not )"""
|
_orig_expression = self . get_value ( )
# try to get the parameter from the bundle
kwargs [ 'twig' ] = twig
newly_constrained_var = self . _get_var ( ** kwargs )
newly_constrained_param = self . get_parameter ( ** kwargs )
check_kwargs = { k : v for k , v in newly_constrained_param . meta . items ( ) if k not in [ 'context' , 'twig' , 'uniquetwig' ] }
check_kwargs [ 'context' ] = 'constraint'
if len ( self . _bundle . filter ( ** check_kwargs ) ) :
raise ValueError ( "'{}' is already constrained" . format ( newly_constrained_param . twig ) )
currently_constrained_var = self . _get_var ( qualifier = self . qualifier , component = self . component )
currently_constrained_param = currently_constrained_var . get_parameter ( )
# or self . constrained _ parameter
import constraint
if self . constraint_func is not None and hasattr ( constraint , self . constraint_func ) : # then let ' s see if the method is capable of resolving for use
# try :
if True : # TODO : this is not nearly general enough , each method takes different arguments
# and getting solve _ for as newly _ constrained _ param . qualifier
lhs , rhs , constraint_kwargs = getattr ( constraint , self . constraint_func ) ( self . _bundle , solve_for = newly_constrained_param , ** self . constraint_kwargs )
# except NotImplementedError :
# pass
# else :
# TODO : this needs to be smarter and match to self . _ get _ var ( ) . user _ label instead of the current uniquetwig
expression = rhs . _value
# safe expression
# ~ print " * * * flip by recalling method success ! " , expression
# print " * * * " , lhs . _ value , rhs . _ value
if expression is not None :
expression = expression
elif _use_sympy :
eq_safe = "({}) - {}" . format ( self . _value , currently_constrained_var . safe_label )
# ~ print " * * * solving { } for { } " . format ( eq _ safe , newly _ constrained _ var . safe _ label )
expression = sympy . solve ( eq_safe , newly_constrained_var . safe_label ) [ 0 ]
# ~ print " * * * solution : { } " . format ( expression )
else : # TODO : ability for built - in constraints to flip themselves
# we could access self . kind and re - call that with a new solve _ for option ?
raise ValueError ( "must either have sympy installed or provide a new expression" )
self . _qualifier = newly_constrained_param . qualifier
self . _component = newly_constrained_param . component
self . _kind = newly_constrained_param . kind
self . _value = str ( expression )
# reset the default _ unit so that set _ default _ unit doesn ' t complain
# about incompatible units
self . _default_unit = None
self . set_default_unit ( newly_constrained_param . default_unit )
self . _update_bookkeeping ( )
self . _add_history ( redo_func = 'flip_constraint' , redo_kwargs = { 'expression' : expression , 'uniqueid' : newly_constrained_param . uniqueid } , undo_func = 'flip_constraint' , undo_kwargs = { 'expression' : _orig_expression , 'uniqueid' : currently_constrained_param . uniqueid } )
|
def upgrade_code ( self ) :
'''For installers which follow the Microsoft Installer standard , returns
the ` ` Upgrade code ` ` .
Returns :
value ( str ) : ` ` Upgrade code ` ` GUID for installed software .'''
|
if not self . __squid : # Must have a valid squid for an upgrade code to exist
return ''
# GUID / SQUID are unique , so it does not matter if they are 32bit or
# 64bit or user install so all items are cached into a single dict
have_scan_key = '{0}\\{1}\\{2}' . format ( self . __reg_hive , self . __reg_upgradecode_path , self . __reg_32bit )
if not self . __upgrade_codes or self . __reg_key_guid not in self . __upgrade_codes : # Read in the upgrade codes in this section of the registry .
try :
uc_handle = win32api . RegOpenKeyEx ( getattr ( win32con , self . __reg_hive ) , # pylint : disable = no - member
self . __reg_upgradecode_path , 0 , win32con . KEY_READ | self . __reg_32bit_access )
except pywintypes . error as exc : # pylint : disable = no - member
if exc . winerror == winerror . ERROR_FILE_NOT_FOUND : # Not Found
log . warning ( 'Not Found %s\\%s 32bit %s' , self . __reg_hive , self . __reg_upgradecode_path , self . __reg_32bit )
return ''
raise
squid_upgrade_code_all , _ , _ , suc_pytime = zip ( * win32api . RegEnumKeyEx ( uc_handle ) )
# pylint : disable = no - member
# Check if we have already scanned these upgrade codes before , and also
# check if they have been updated in the registry since last time we scanned .
if ( have_scan_key in self . __upgrade_code_have_scan and self . __upgrade_code_have_scan [ have_scan_key ] == ( squid_upgrade_code_all , suc_pytime ) ) :
log . debug ( 'Scan skipped for upgrade codes, no changes (%s)' , have_scan_key )
return ''
# we have scanned this before and no new changes .
# Go into each squid upgrade code and find all the related product codes .
log . debug ( 'Scan for upgrade codes (%s) for product codes' , have_scan_key )
for upgrade_code_squid in squid_upgrade_code_all :
upgrade_code_guid = self . __squid_to_guid ( upgrade_code_squid )
pc_handle = win32api . RegOpenKeyEx ( uc_handle , # pylint : disable = no - member
upgrade_code_squid , 0 , win32con . KEY_READ | self . __reg_32bit_access )
_ , pc_val_count , _ = win32api . RegQueryInfoKey ( pc_handle )
# pylint : disable = no - member
for item_index in range ( pc_val_count ) :
product_code_guid = self . __squid_to_guid ( win32api . RegEnumValue ( pc_handle , item_index ) [ 0 ] )
# pylint : disable = no - member
if product_code_guid :
self . __upgrade_codes [ product_code_guid ] = upgrade_code_guid
win32api . RegCloseKey ( pc_handle )
# pylint : disable = no - member
win32api . RegCloseKey ( uc_handle )
# pylint : disable = no - member
self . __upgrade_code_have_scan [ have_scan_key ] = ( squid_upgrade_code_all , suc_pytime )
return self . __upgrade_codes . get ( self . __reg_key_guid , '' )
|
def get_instance ( self , payload ) :
"""Build an instance of PhoneNumberInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . trunking . v1 . trunk . phone _ number . PhoneNumberInstance
: rtype : twilio . rest . trunking . v1 . trunk . phone _ number . PhoneNumberInstance"""
|
return PhoneNumberInstance ( self . _version , payload , trunk_sid = self . _solution [ 'trunk_sid' ] , )
|
def get_distance ( self , node ) :
"""Get the distance beetween 2 nodes
Args :
node ( object ) : The other node ."""
|
delta = ( node . pos [ 0 ] - self . pos [ 0 ] , node . pos [ 1 ] - self . pos [ 1 ] )
return sqrt ( delta [ 0 ] ** 2 + delta [ 1 ] ** 2 )
|
def read_namespaced_horizontal_pod_autoscaler_status ( self , name , namespace , ** kwargs ) : # noqa : E501
"""read _ namespaced _ horizontal _ pod _ autoscaler _ status # noqa : E501
read status of the specified HorizontalPodAutoscaler # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . read _ namespaced _ horizontal _ pod _ autoscaler _ status ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the HorizontalPodAutoscaler ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: return : V2beta1HorizontalPodAutoscaler
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . read_namespaced_horizontal_pod_autoscaler_status_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
else :
( data ) = self . read_namespaced_horizontal_pod_autoscaler_status_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
return data
|
def optimization_loop ( self , timeSeries , forecastingMethod , remainingParameters , currentParameterValues = None ) :
"""The optimization loop .
This function is called recursively , until all parameter values were evaluated .
: param TimeSeries timeSeries : TimeSeries instance that requires an optimized forecast .
: param BaseForecastingMethod forecastingMethod : ForecastingMethod that is used to optimize the parameters .
: param list remainingParameters : List containing all parameters with their corresponding values that still
need to be evaluated .
When this list is empty , the most inner optimization loop is reached .
: param dictionary currentParameterValues : The currently evaluated forecast parameter combination .
: return : Returns a list containing a BaseErrorMeasure instance as defined in
: py : meth : ` BaseOptimizationMethod . _ _ init _ _ ` and the forecastingMethods parameter .
: rtype : list"""
|
if currentParameterValues is None :
currentParameterValues = { }
# The most inner loop is reached
if 0 == len ( remainingParameters ) : # set the forecasting parameters
for parameter in currentParameterValues :
forecastingMethod . set_parameter ( parameter , currentParameterValues [ parameter ] )
# calculate the forecast
forecast = timeSeries . apply ( forecastingMethod )
# create and initialize the ErrorMeasure
error = self . _errorClass ( ** self . _errorMeasureKWArgs )
# when the error could not be calculated , return an empty result
if not error . initialize ( timeSeries , forecast ) :
return [ ]
# Debugging GridSearchTest . inner _ optimization _ result _ test
# print " Instance / SMAPE / Alpha : % s / % s / % s " % (
# str ( error ) [ - 12 : - 1 ] ,
# str ( error . get _ error ( self . _ startingPercentage , self . _ endPercentage ) ) [ : 8 ] ,
# currentParameterValues [ " smoothingFactor " ]
# return the result
return [ [ error , dict ( currentParameterValues ) ] ]
# If this is not the most inner loop than extract an additional parameter
localParameter = remainingParameters [ - 1 ]
localParameterName = localParameter [ 0 ]
localParameterValues = localParameter [ 1 ]
# initialize the result
results = [ ]
# check the next level for each existing parameter
for value in localParameterValues :
currentParameterValues [ localParameterName ] = value
remainingParameters = remainingParameters [ : - 1 ]
results += self . optimization_loop ( timeSeries , forecastingMethod , remainingParameters , currentParameterValues )
return results
|
def build_all_keys_dict ( self ) :
"""build _ all _ keys _ dict"""
|
log . info ( "finding keys" )
for k in self . eth_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all eths
for k in self . ip_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all ips
for k in self . ipvsix_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all ipvsixs
for k in self . icmp_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all icmps
for k in self . arp_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all arps
for k in self . tcp_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all tcps
for k in self . udp_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all udps
for k in self . dns_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all dnss
for k in self . raw_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all raws
for k in self . pad_keys :
ak = "{}" . format ( k )
if ak not in self . all_keys :
self . all_keys [ ak ] = k
# end of building all pads
# this will be the columns for the csv
for k in self . all_keys :
self . all_keys_list . append ( k )
log . debug ( ( "unique all_keys keys={} values={}" ) . format ( len ( self . all_keys_list ) , self . all_keys ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.