signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def load_params ( self , fname ) :
"""Loads model parameters from file .
Parameters
fname : str
Path to input param file .
Examples
> > > # An example of loading module parameters .
> > > mod . load _ params ( ' myfile ' )""" | save_dict = ndarray . load ( fname )
arg_params = { }
aux_params = { }
for k , value in save_dict . items ( ) :
arg_type , name = k . split ( ':' , 1 )
if arg_type == 'arg' :
arg_params [ name ] = value
elif arg_type == 'aux' :
aux_params [ name ] = value
else :
raise ValueError ( "Invalid param file " + fname )
self . set_params ( arg_params , aux_params ) |
def run_and_exit ( command_class ) :
'''A shortcut for reading from sys . argv and exiting the interpreter''' | cmd = command_class ( sys . argv [ 1 : ] )
if cmd . error :
print ( 'error: {0}' . format ( cmd . error ) )
sys . exit ( 1 )
else :
sys . exit ( cmd . run ( ) ) |
def delete_contacts ( self , uid , ** kwargs ) :
"""Unassign contacts from the specified list .
If contacts assign only to the specified list , then delete permanently .
Returns True if success .
: Example :
client . lists . delete _ contacts ( uid = 1901010 , contacts = " 1723812,1239912 " )
: param int uid : The unique id of the List . Required .
: param str contacts : Contact ID ( s ) , separated by comma . Required .""" | uri = "%s/%s/contacts" % ( self . uri , uid )
response , instance = self . request ( "DELETE" , uri , data = kwargs )
return response . status == 204 |
def node ( self , rows , ind , dep , depth = 0 , parent = None , parent_decisions = None ) :
"""internal method to create a node in the tree""" | depth += 1
if self . max_depth < depth :
terminal_node = Node ( choices = parent_decisions , node_id = self . node_count , parent = parent , indices = rows , dep_v = dep )
self . _tree_store . append ( terminal_node )
self . node_count += 1
terminal_node . split . invalid_reason = InvalidSplitReason . MAX_DEPTH
return self . _tree_store
split = self . _stats . best_split ( ind , dep )
node = Node ( choices = parent_decisions , node_id = self . node_count , indices = rows , dep_v = dep , parent = parent , split = split )
self . _tree_store . append ( node )
parent = self . node_count
self . node_count += 1
if not split . valid ( ) :
return self . _tree_store
for index , choices in enumerate ( split . splits ) :
correct_rows = np . in1d ( ind [ split . column_id ] . arr , choices )
dep_slice = dep [ correct_rows ]
ind_slice = [ vect [ correct_rows ] for vect in ind ]
row_slice = rows [ correct_rows ]
if self . min_parent_node_size < len ( dep_slice . arr ) :
self . node ( row_slice , ind_slice , dep_slice , depth = depth , parent = parent , parent_decisions = split . split_map [ index ] )
else :
terminal_node = Node ( choices = split . split_map [ index ] , node_id = self . node_count , parent = parent , indices = row_slice , dep_v = dep_slice )
terminal_node . split . invalid_reason = InvalidSplitReason . MIN_PARENT_NODE_SIZE
self . _tree_store . append ( terminal_node )
self . node_count += 1
return self . _tree_store |
def check_license ( package_info , * args ) :
"""Does the package have a license classifier ?
: param package _ info : package _ info dictionary
: return : Tuple ( is the condition True or False ? , reason if it is False else None , score to be applied )""" | classifiers = package_info . get ( 'classifiers' )
reason = "No License"
result = False
if len ( [ c for c in classifiers if c . startswith ( 'License ::' ) ] ) > 0 :
result = True
return result , reason , HAS_LICENSE |
def gen_modules ( self , initial_load = False ) :
'''Tell the minion to reload the execution modules
CLI Example :
. . code - block : : bash
salt ' * ' sys . reload _ modules''' | self . utils = salt . loader . utils ( self . opts )
self . functions = salt . loader . minion_mods ( self . opts , utils = self . utils , whitelist = self . whitelist , initial_load = initial_load )
self . serializers = salt . loader . serializers ( self . opts )
if self . mk_returners :
self . returners = salt . loader . returners ( self . opts , self . functions )
if self . mk_states :
self . states = salt . loader . states ( self . opts , self . functions , self . utils , self . serializers )
if self . mk_rend :
self . rend = salt . loader . render ( self . opts , self . functions )
if self . mk_matcher :
self . matchers = salt . loader . matchers ( self . opts )
self . functions [ 'sys.reload_modules' ] = self . gen_modules |
def config ( ) :
'''Shows the current configuration .''' | config = get_config ( )
print ( 'Client version: {0}' . format ( click . style ( __version__ , bold = True ) ) )
print ( 'API endpoint: {0}' . format ( click . style ( str ( config . endpoint ) , bold = True ) ) )
print ( 'API version: {0}' . format ( click . style ( config . version , bold = True ) ) )
print ( 'Access key: "{0}"' . format ( click . style ( config . access_key , bold = True ) ) )
masked_skey = config . secret_key [ : 6 ] + ( '*' * 24 ) + config . secret_key [ - 10 : ]
print ( 'Secret key: "{0}"' . format ( click . style ( masked_skey , bold = True ) ) )
print ( 'Signature hash type: {0}' . format ( click . style ( config . hash_type , bold = True ) ) )
print ( 'Skip SSL certificate validation? {0}' . format ( click . style ( str ( config . skip_sslcert_validation ) , bold = True ) ) ) |
def calculate_payload_hash ( payload , algorithm , content_type ) :
"""Calculates a hash for a given payload .""" | p_hash = hashlib . new ( algorithm )
parts = [ ]
parts . append ( 'hawk.' + str ( HAWK_VER ) + '.payload\n' )
parts . append ( parse_content_type ( content_type ) + '\n' )
parts . append ( payload or '' )
parts . append ( '\n' )
for i , p in enumerate ( parts ) : # Make sure we are about to hash binary strings .
if not isinstance ( p , six . binary_type ) :
p = p . encode ( 'utf8' )
p_hash . update ( p )
parts [ i ] = p
log . debug ( 'calculating payload hash from:\n{parts}' . format ( parts = pprint . pformat ( parts ) ) )
return b64encode ( p_hash . digest ( ) ) |
def assert_greater_equal ( first , second , msg_fmt = "{msg}" ) :
"""Fail if first is not greater than or equal to second .
> > > assert _ greater _ equal ( ' foo ' , ' bar ' )
> > > assert _ greater _ equal ( 5 , 5)
> > > assert _ greater _ equal ( 5 , 6)
Traceback ( most recent call last ) :
AssertionError : 5 is not greater than or equal to 6
The following msg _ fmt arguments are supported :
* msg - the default error message
* first - the first argument
* second - the second argument""" | if not first >= second :
msg = "{!r} is not greater than or equal to {!r}" . format ( first , second )
fail ( msg_fmt . format ( msg = msg , first = first , second = second ) ) |
def wait_for_unit ( service_name , timeout = 480 ) :
"""Wait ` timeout ` seconds for a given service name to come up .""" | wait_for_machine ( num_machines = 1 )
start_time = time . time ( )
while True :
state = unit_info ( service_name , 'agent-state' )
if 'error' in state or state == 'started' :
break
if time . time ( ) - start_time >= timeout :
raise RuntimeError ( 'timeout waiting for service to start' )
time . sleep ( SLEEP_AMOUNT )
if state != 'started' :
raise RuntimeError ( 'unit did not start, agent-state: ' + state ) |
def set_widgets ( self ) :
"""Set widgets on the LayerMode tab .""" | self . clear_further_steps ( )
# Set widgets
purpose = self . parent . step_kw_purpose . selected_purpose ( )
subcategory = self . parent . step_kw_subcategory . selected_subcategory ( )
layer_mode_question = ( layer_mode_raster_question if is_raster_layer ( self . parent . layer ) else layer_mode_vector_question )
self . lblDescribeLayerMode . setText ( '' )
self . lstLayerModes . clear ( )
layer_modes = get_layer_modes ( subcategory [ 'key' ] )
if is_raster_layer ( self . parent . layer ) :
layer_mode_question = layer_mode_raster_question
else :
if len ( layer_modes ) == 2 :
layer_mode_question = layer_mode_vector_question
elif len ( layer_modes ) == 1 :
if layer_modes [ 0 ] [ 'key' ] == 'classified' :
layer_mode_question = layer_mode_vector_classified_confirm
elif layer_modes [ 0 ] [ 'key' ] == 'continuous' :
layer_mode_question = layer_mode_vector_continuous_confirm
else :
layer_mode_question = layer_mode_vector_question
self . lblSelectLayerMode . setText ( layer_mode_question % ( subcategory [ 'name' ] , purpose [ 'name' ] ) )
for layer_mode in layer_modes :
item = QListWidgetItem ( layer_mode [ 'name' ] , self . lstLayerModes )
item . setData ( QtCore . Qt . UserRole , layer_mode [ 'key' ] )
self . lstLayerModes . addItem ( item )
# Set value to existing keyword or default value
layer_mode_keys = [ m [ 'key' ] for m in layer_modes ]
layer_mode_keyword = self . parent . get_existing_keyword ( 'layer_mode' )
if layer_mode_keyword in layer_mode_keys :
index = layer_mode_keys . index ( layer_mode_keyword )
elif layer_mode_continuous [ 'key' ] in layer_mode_keys : # Set default value
index = layer_mode_keys . index ( layer_mode_continuous [ 'key' ] )
else :
index = - 1
self . lstLayerModes . setCurrentRow ( index )
self . auto_select_one_item ( self . lstLayerModes ) |
def get_first_category ( app_uid ) :
'''Get the first , as the uniqe category of post .''' | recs = MPost2Catalog . query_by_entity_uid ( app_uid ) . objects ( )
if recs . count ( ) > 0 :
return recs . get ( )
return None |
def _build_command ( self , python_executable , lib_dir_fq , proxy_enabled ) :
"""Build the pip command for installing dependencies .
Args :
python _ executable ( str ) : The fully qualified path of the Python executable .
lib _ dir _ fq ( str ) : The fully qualified path of the lib directory .
Returns :
list : The Python pip command with all required args .""" | exe_command = [ os . path . expanduser ( python_executable ) , '-m' , 'pip' , 'install' , '-r' , self . requirements_file , '--ignore-installed' , '--quiet' , '--target' , lib_dir_fq , ]
if self . args . no_cache_dir :
exe_command . append ( '--no-cache-dir' )
if proxy_enabled : # trust the pypi hosts to avoid ssl errors
trusted_hosts = [ 'pypi.org' , 'pypi.python.org' , 'files.pythonhosted.org' ]
for host in trusted_hosts :
exe_command . append ( '--trusted-host' )
exe_command . append ( host )
return exe_command |
def add_menu ( self , name ) :
"""Add a menu with name ` name ` to the global menu bar .
Returns a menu widget .""" | if self . menubar is None :
raise ValueError ( "No menu bar configured" )
return self . menubar . add_name ( name ) |
def N ( self , ID , asp = 0 ) :
"""Returns the conjunction or opposition aspect
of an object .""" | obj = self . chart . get ( ID ) . copy ( )
obj . relocate ( obj . lon + asp )
ID = 'N_%s_%s' % ( ID , asp )
return self . G ( ID , obj . lat , obj . lon ) |
def arm_and_takeoff ( aTargetAltitude ) :
"""Arms vehicle and fly to aTargetAltitude .""" | # Don ' t try to arm until autopilot is ready
while not vehicle . is_armable :
print ( " Waiting for vehicle to initialise..." )
time . sleep ( 1 )
# Set mode to GUIDED for arming and takeoff :
while ( vehicle . mode . name != "GUIDED" ) :
vehicle . mode = VehicleMode ( "GUIDED" )
time . sleep ( 0.1 )
# Confirm vehicle armed before attempting to take off
while not vehicle . armed :
vehicle . armed = True
print ( " Waiting for arming..." )
time . sleep ( 1 )
print ( " Taking off!" )
vehicle . simple_takeoff ( aTargetAltitude )
# Take off to target altitude
# Wait until the vehicle reaches a safe height
# before allowing next command to process .
while True :
requiredAlt = aTargetAltitude * 0.95
# Break and return from function just below target altitude .
if vehicle . location . global_relative_frame . alt >= requiredAlt :
print ( " Reached target altitude of ~%f" % ( aTargetAltitude ) )
break
print ( " Altitude: %f < %f" % ( vehicle . location . global_relative_frame . alt , requiredAlt ) )
time . sleep ( 1 ) |
def get_vocabularies ( self ) :
"""Get the vocabularies to pull the qualifiers from .""" | # Timeout in seconds .
timeout = 15
socket . setdefaulttimeout ( timeout )
# Create the ordered vocabulary URL .
vocab_url = VOCABULARIES_URL . replace ( 'all' , 'all-verbose' )
# Request the vocabularies dictionary .
try :
vocab_dict = eval ( urllib2 . urlopen ( vocab_url ) . read ( ) )
except :
raise UNTLStructureException ( 'Could not retrieve the vocabularies' )
return vocab_dict |
def create_card ( self , title = None , subtitle = None , content = None , card_type = "Simple" ) :
"""card _ obj = JSON card object to substitute the ' card ' field in the raw _ response
format :
" type " : " Simple " , # COMPULSORY
" title " : " string " , # OPTIONAL
" subtitle " : " string " , # OPTIONAL
" content " : " string " # OPTIONAL""" | card = { "type" : card_type }
if title :
card [ "title" ] = title
if subtitle :
card [ "subtitle" ] = subtitle
if content :
card [ "content" ] = content
return card |
def check_imports ( self ) :
"""Check the projects top level directory for missing imports .
This method will check only files ending in * * . py * * and does not handle imports validation
for sub - directories .""" | modules = [ ]
for filename in sorted ( os . listdir ( self . app_path ) ) :
if not filename . endswith ( '.py' ) :
continue
fq_path = os . path . join ( self . app_path , filename )
with open ( fq_path , 'rb' ) as f : # TODO : fix this
code_lines = deque ( [ ( f . read ( ) , 1 ) ] )
while code_lines :
m_status = True
code , lineno = code_lines . popleft ( )
# pylint : disable = W0612
try :
parsed_code = ast . parse ( code )
for node in ast . walk ( parsed_code ) :
if isinstance ( node , ast . Import ) :
for n in node . names :
m = n . name . split ( '.' ) [ 0 ]
if self . check_import_stdlib ( m ) : # stdlib module , not need to proceed
continue
m_status = self . check_imported ( m )
modules . append ( { 'file' : filename , 'module' : m , 'status' : m_status } )
elif isinstance ( node , ast . ImportFrom ) :
m = node . module . split ( '.' ) [ 0 ]
if self . check_import_stdlib ( m ) : # stdlib module , not need to proceed
continue
m_status = self . check_imported ( m )
modules . append ( { 'file' : filename , 'module' : m , 'status' : m_status } )
else :
continue
except SyntaxError :
pass
for module_data in modules :
status = True
if not module_data . get ( 'status' ) :
status = False
# update validation data errors
self . validation_data [ 'errors' ] . append ( 'Module validation failed for {} (module "{}" could not be imported).' . format ( module_data . get ( 'file' ) , module_data . get ( 'module' ) ) )
# update validation data for module
self . validation_data [ 'moduleImports' ] . append ( { 'filename' : module_data . get ( 'file' ) , 'module' : module_data . get ( 'module' ) , 'status' : status , } ) |
def get ( self , key , default = None ) :
"""Gets config from dynaconf variables
if variables does not exists in dynaconf try getting from
` app . config ` to support runtime settings .""" | return self . _settings . get ( key , Config . get ( self , key , default ) ) |
def ingest ( self , co , classname = None , code_objects = { } , show_asm = None ) :
"""Pick out tokens from an uncompyle6 code object , and transform them ,
returning a list of uncompyle6 ' Token ' s .
The transformations are made to assist the deparsing grammar .
Specificially :
- various types of LOAD _ CONST ' s are categorized in terms of what they load
- COME _ FROM instructions are added to assist parsing control structures
- MAKE _ FUNCTION and FUNCTION _ CALLS append the number of positional arguments
Also , when we encounter certain tokens , we add them to a set which will cause custom
grammar rules . Specifically , variable arg tokens like MAKE _ FUNCTION or BUILD _ LIST
cause specific rules for the specific number of arguments they take .""" | if not show_asm :
show_asm = self . show_asm
bytecode = self . build_instructions ( co )
# show _ asm = ' after '
if show_asm in ( 'both' , 'before' ) :
for instr in bytecode . get_instructions ( co ) :
print ( instr . disassemble ( ) )
# Container for tokens
tokens = [ ]
customize = { }
if self . is_pypy :
customize [ 'PyPy' ] = 1
codelen = len ( self . code )
free , names , varnames = self . unmangle_code_names ( co , classname )
self . names = names
# Scan for assertions . Later we will
# turn ' LOAD _ GLOBAL ' to ' LOAD _ ASSERT ' .
# ' LOAD _ ASSERT ' is used in assert statements .
self . load_asserts = set ( )
for i in self . op_range ( 0 , codelen ) : # We need to detect the difference between :
# raise AssertionError
# and
# assert . . .
if ( self . code [ i ] == self . opc . JUMP_IF_TRUE and i + 4 < codelen and self . code [ i + 3 ] == self . opc . POP_TOP and self . code [ i + 4 ] == self . opc . LOAD_GLOBAL ) :
if names [ self . get_argument ( i + 4 ) ] == 'AssertionError' :
self . load_asserts . add ( i + 4 )
jump_targets = self . find_jump_targets ( show_asm )
# contains ( code , [ addrRefToCode ] )
last_stmt = self . next_stmt [ 0 ]
i = self . next_stmt [ last_stmt ]
replace = { }
while i < codelen - 1 :
if self . lines [ last_stmt ] . next > i : # Distinguish " print . . . " from " print . . . , "
if self . code [ last_stmt ] == self . opc . PRINT_ITEM :
if self . code [ i ] == self . opc . PRINT_ITEM :
replace [ i ] = 'PRINT_ITEM_CONT'
elif self . code [ i ] == self . opc . PRINT_NEWLINE :
replace [ i ] = 'PRINT_NEWLINE_CONT'
last_stmt = i
i = self . next_stmt [ i ]
extended_arg = 0
for offset in self . op_range ( 0 , codelen ) :
op = self . code [ offset ]
op_name = self . opname [ op ]
oparg = None ;
pattr = None
if offset in jump_targets :
jump_idx = 0
# We want to process COME _ FROMs to the same offset to be in * descending *
# offset order so we have the larger range or biggest instruction interval
# last . ( I think they are sorted in increasing order , but for safety
# we sort them ) . That way , specific COME _ FROM tags will match up
# properly . For example , a " loop " with an " if " nested in it should have the
# " loop " tag last so the grammar rule matches that properly .
last_jump_offset = - 1
for jump_offset in sorted ( jump_targets [ offset ] , reverse = True ) :
if jump_offset != last_jump_offset :
tokens . append ( Token ( 'COME_FROM' , jump_offset , repr ( jump_offset ) , offset = "%s_%d" % ( offset , jump_idx ) , has_arg = True ) )
jump_idx += 1
last_jump_offset = jump_offset
elif offset in self . thens :
tokens . append ( Token ( 'THEN' , None , self . thens [ offset ] , offset = "%s_0" % offset , has_arg = True ) )
has_arg = ( op >= self . opc . HAVE_ARGUMENT )
if has_arg :
oparg = self . get_argument ( offset ) + extended_arg
extended_arg = 0
if op == self . opc . EXTENDED_ARG :
extended_arg = oparg * L65536
continue
if op in self . opc . CONST_OPS :
const = co . co_consts [ oparg ]
# We can ' t use inspect . iscode ( ) because we may be
# using a different version of Python than the
# one that this was byte - compiled on . So the code
# types may mismatch .
if hasattr ( const , 'co_name' ) :
oparg = const
if const . co_name == '<lambda>' :
assert op_name == 'LOAD_CONST'
op_name = 'LOAD_LAMBDA'
elif const . co_name == self . genexpr_name :
op_name = 'LOAD_GENEXPR'
elif const . co_name == '<dictcomp>' :
op_name = 'LOAD_DICTCOMP'
elif const . co_name == '<setcomp>' :
op_name = 'LOAD_SETCOMP'
# verify uses ' pattr ' for comparison , since ' attr '
# now holds Code ( const ) and thus can not be used
# for comparison ( todo : think about changing this )
# pattr = ' code _ object @ 0x % x % s - > % s ' % \
# ( id ( const ) , const . co _ filename , const . co _ name )
pattr = '<code_object ' + const . co_name + '>'
else :
if oparg < len ( co . co_consts ) :
argval , _ = _get_const_info ( oparg , co . co_consts )
# Why don ' t we use _ above for " pattr " rather than " const " ?
# This * is * a little hoaky , but we have to coordinate with
# other parts like n _ LOAD _ CONST in pysource . py for example .
pattr = const
pass
elif op in self . opc . NAME_OPS :
pattr = names [ oparg ]
elif op in self . opc . JREL_OPS :
pattr = repr ( offset + 3 + oparg )
if op == self . opc . JUMP_FORWARD :
target = self . get_target ( offset )
# FIXME : this is a hack to catch stuff like :
# if x : continue
# the " continue " is not on a new line .
if len ( tokens ) and tokens [ - 1 ] . kind == 'JUMP_BACK' :
tokens [ - 1 ] . kind = intern ( 'CONTINUE' )
elif op in self . opc . JABS_OPS :
pattr = repr ( oparg )
elif op in self . opc . LOCAL_OPS :
pattr = varnames [ oparg ]
elif op in self . opc . COMPARE_OPS :
pattr = self . opc . cmp_op [ oparg ]
elif op in self . opc . FREE_OPS :
pattr = free [ oparg ]
if op in self . varargs_ops : # CE - Hack for > = 2.5
# Now all values loaded via LOAD _ CLOSURE are packed into
# a tuple before calling MAKE _ CLOSURE .
if ( self . version >= 2.5 and op == self . opc . BUILD_TUPLE and self . code [ self . prev [ offset ] ] == self . opc . LOAD_CLOSURE ) :
continue
else :
op_name = '%s_%d' % ( op_name , oparg )
customize [ op_name ] = oparg
elif self . version > 2.0 and op == self . opc . CONTINUE_LOOP :
customize [ op_name ] = 0
elif op_name in """
CONTINUE_LOOP EXEC_STMT LOAD_LISTCOMP LOAD_SETCOMP
""" . split ( ) :
customize [ op_name ] = 0
elif op == self . opc . JUMP_ABSOLUTE : # Further classify JUMP _ ABSOLUTE into backward jumps
# which are used in loops , and " CONTINUE " jumps which
# may appear in a " continue " statement . The loop - type
# and continue - type jumps will help us classify loop
# boundaries The continue - type jumps help us get
# " continue " statements with would otherwise be turned
# into a " pass " statement because JUMPs are sometimes
# ignored in rules as just boundary overhead . In
# comprehensions we might sometimes classify JUMP _ BACK
# as CONTINUE , but that ' s okay since we add a grammar
# rule for that .
target = self . get_target ( offset )
if target <= offset :
op_name = 'JUMP_BACK'
if ( offset in self . stmts and self . code [ offset + 3 ] not in ( self . opc . END_FINALLY , self . opc . POP_BLOCK ) ) :
if ( ( offset in self . linestarts and tokens [ - 1 ] . kind == 'JUMP_BACK' ) or offset not in self . not_continue ) :
op_name = 'CONTINUE'
else : # FIXME : this is a hack to catch stuff like :
# if x : continue
# the " continue " is not on a new line .
if tokens [ - 1 ] . kind == 'JUMP_BACK' : # We need ' intern ' since we have
# already have processed the previous
# token .
tokens [ - 1 ] . kind = intern ( 'CONTINUE' )
elif op == self . opc . LOAD_GLOBAL :
if offset in self . load_asserts :
op_name = 'LOAD_ASSERT'
elif op == self . opc . RETURN_VALUE :
if offset in self . return_end_ifs :
op_name = 'RETURN_END_IF'
linestart = self . linestarts . get ( offset , None )
if offset not in replace :
tokens . append ( Token ( op_name , oparg , pattr , offset , linestart , op , has_arg , self . opc ) )
else :
tokens . append ( Token ( replace [ offset ] , oparg , pattr , offset , linestart , op , has_arg , self . opc ) )
pass
pass
if show_asm in ( 'both' , 'after' ) :
for t in tokens :
print ( t . format ( line_prefix = 'L.' ) )
print ( )
return tokens , customize |
def best_motif_in_cluster ( single_pwm , clus_pwm , clusters , fg_fa , background , stats = None , metrics = ( "roc_auc" , "recall_at_fdr" ) ) :
"""Return the best motif per cluster for a clustering results .
The motif can be either the average motif or one of the clustered motifs .
Parameters
single _ pwm : str
Filename of motifs .
clus _ pwm : str
Filename of motifs .
clusters :
Motif clustering result .
fg _ fa : str
Filename of FASTA file .
background : dict
Dictionary for background file names .
stats : dict , optional
If statistics are not supplied they will be computed .
metrics : sequence , optional
Metrics to use for motif evaluation . Default are " roc _ auc " and
" recall _ at _ fdr " .
Returns
motifs : list
List of Motif instances .""" | # combine original and clustered motifs
motifs = read_motifs ( single_pwm ) + read_motifs ( clus_pwm )
motifs = dict ( [ ( str ( m ) , m ) for m in motifs ] )
# get the statistics for those motifs that were not yet checked
clustered_motifs = [ ]
for clus , singles in clusters :
for motif in set ( [ clus ] + singles ) :
if str ( motif ) not in stats :
clustered_motifs . append ( motifs [ str ( motif ) ] )
new_stats = { }
for bg , bg_fa in background . items ( ) :
for m , s in calc_stats ( clustered_motifs , fg_fa , bg_fa ) . items ( ) :
if m not in new_stats :
new_stats [ m ] = { }
new_stats [ m ] [ bg ] = s
stats . update ( new_stats )
rank = rank_motifs ( stats , metrics )
# rank the motifs
best_motifs = [ ]
for clus , singles in clusters :
if len ( singles ) > 1 :
eval_motifs = singles
if clus not in motifs :
eval_motifs . append ( clus )
eval_motifs = [ motifs [ str ( e ) ] for e in eval_motifs ]
best_motif = sorted ( eval_motifs , key = lambda x : rank [ str ( x ) ] ) [ - 1 ]
best_motifs . append ( best_motif )
else :
best_motifs . append ( clus )
for bg in background :
stats [ str ( best_motifs [ - 1 ] ) ] [ bg ] [ "num_cluster" ] = len ( singles )
best_motifs = sorted ( best_motifs , key = lambda x : rank [ str ( x ) ] , reverse = True )
return best_motifs |
def AddRunsFromDirectory ( self , path , name = None ) :
"""Load runs from a directory ; recursively walks subdirectories .
If path doesn ' t exist , no - op . This ensures that it is safe to call
` AddRunsFromDirectory ` multiple times , even before the directory is made .
Args :
path : A string path to a directory to load runs from .
name : Optional , specifies a name for the experiment under which the
runs from this directory hierarchy will be imported . If omitted , the
path will be used as the name .
Raises :
ValueError : If the path exists and isn ' t a directory .""" | logger . info ( 'Starting AddRunsFromDirectory: %s (as %s)' , path , name )
for subdir in io_wrapper . GetLogdirSubdirectories ( path ) :
logger . info ( 'Processing directory %s' , subdir )
if subdir not in self . _run_loaders :
logger . info ( 'Creating DB loader for directory %s' , subdir )
names = self . _get_exp_and_run_names ( path , subdir , name )
experiment_name , run_name = names
self . _run_loaders [ subdir ] = _RunLoader ( subdir = subdir , experiment_name = experiment_name , run_name = run_name )
logger . info ( 'Done with AddRunsFromDirectory: %s' , path ) |
def use_args ( self , argmap , req = None , locations = None , as_kwargs = False , validate = None , error_status_code = None , error_headers = None , ) :
"""Decorator that injects parsed arguments into a view function or method .
Example usage with Flask : : :
@ app . route ( ' / echo ' , methods = [ ' get ' , ' post ' ] )
@ parser . use _ args ( { ' name ' : fields . Str ( ) } )
def greet ( args ) :
return ' Hello ' + args [ ' name ' ]
: param argmap : Either a ` marshmallow . Schema ` , a ` dict `
of argname - > ` marshmallow . fields . Field ` pairs , or a callable
which accepts a request and returns a ` marshmallow . Schema ` .
: param tuple locations : Where on the request to search for values .
: param bool as _ kwargs : Whether to insert arguments as keyword arguments .
: param callable validate : Validation function that receives the dictionary
of parsed arguments . If the function returns ` ` False ` ` , the parser
will raise a : exc : ` ValidationError ` .
: param int error _ status _ code : Status code passed to error handler functions when
a ` ValidationError ` is raised .
: param dict error _ headers : Headers passed to error handler functions when a
a ` ValidationError ` is raised .""" | locations = locations or self . locations
request_obj = req
# Optimization : If argmap is passed as a dictionary , we only need
# to generate a Schema once
if isinstance ( argmap , Mapping ) :
argmap = dict2schema ( argmap , self . schema_class ) ( )
def decorator ( func ) :
req_ = request_obj
@ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) :
req_obj = req_
if not req_obj :
req_obj = self . get_request_from_view_args ( func , args , kwargs )
# NOTE : At this point , argmap may be a Schema , or a callable
parsed_args = self . parse ( argmap , req = req_obj , locations = locations , validate = validate , error_status_code = error_status_code , error_headers = error_headers , )
if as_kwargs :
kwargs . update ( parsed_args )
return func ( * args , ** kwargs )
else : # Add parsed _ args after other positional arguments
new_args = args + ( parsed_args , )
return func ( * new_args , ** kwargs )
wrapper . __wrapped__ = func
return wrapper
return decorator |
def get_slack_users ( self , token ) :
'''Get all users from Slack''' | ret = salt . utils . slack . query ( function = 'users' , api_key = token , opts = __opts__ )
users = { }
if 'message' in ret :
for item in ret [ 'message' ] :
if 'is_bot' in item :
if not item [ 'is_bot' ] :
users [ item [ 'name' ] ] = item [ 'id' ]
users [ item [ 'id' ] ] = item [ 'name' ]
return users |
def setup_tasks ( self , tasks ) :
"""Find task classes from category . namespace . name strings
tasks - list of strings""" | task_classes = [ ]
for task in tasks :
category , namespace , name = task . split ( "." )
try :
cls = find_in_registry ( category = category , namespace = namespace , name = name ) [ 0 ]
except TypeError :
log . error ( "Could not find the task with category.namespace.name {0}" . format ( task ) )
raise TypeError
task_classes . append ( cls )
self . tasks = task_classes |
def start ( self , segment ) :
"""Begin transfer for an indicated wal segment .""" | if self . closed :
raise UserCritical ( msg = 'attempt to transfer wal after closing' , hint = 'report a bug' )
g = gevent . Greenlet ( self . transferer , segment )
g . link ( self . _complete_execution )
self . greenlets . add ( g )
# Increment . expect before starting the greenlet , or else a
# very unlucky . join could be fooled as to when pool is
# complete .
self . expect += 1
g . start ( ) |
def _create_driver ( self , ** kwargs ) :
"""Create webdriver , assign it to ` ` self . driver ` ` , and run webdriver
initiation process , which is usually used for manual login .""" | if self . driver is None :
self . driver = self . create_driver ( ** kwargs )
self . init_driver_func ( self . driver ) |
def evaluate ( self , x , * args ) :
"""One dimensional constant flux model function .
Parameters
x : number or ndarray
Wavelengths in Angstrom .
Returns
y : number or ndarray
Flux in PHOTLAM .""" | a = ( self . amplitude * np . ones_like ( x ) ) * self . _flux_unit
y = units . convert_flux ( x , a , units . PHOTLAM )
return y . value |
def execute ( self , input_data ) :
'''Execute the Strings worker''' | raw_bytes = input_data [ 'sample' ] [ 'raw_bytes' ]
strings = self . find_strings . findall ( raw_bytes )
return { 'string_list' : strings } |
def get_bits ( block_representation , coin_symbol = 'btc' , api_key = None ) :
'''Takes a block _ representation and returns the number of bits''' | return get_block_overview ( block_representation = block_representation , coin_symbol = coin_symbol , txn_limit = 1 , api_key = api_key ) [ 'bits' ] |
def this_week ( ) :
"""Return start and end date of the current week .""" | since = TODAY + delta ( weekday = MONDAY ( - 1 ) )
until = since + delta ( weeks = 1 )
return Date ( since ) , Date ( until ) |
def _map_unity_proxy_to_object ( value ) :
"""Map returning value , if it is unity SFrame , SArray , map it""" | vtype = type ( value )
if vtype in _proxy_map :
return _proxy_map [ vtype ] ( value )
elif vtype == list :
return [ _map_unity_proxy_to_object ( v ) for v in value ]
elif vtype == dict :
return { k : _map_unity_proxy_to_object ( v ) for k , v in value . items ( ) }
else :
return value |
def send_contact ( self , chat_id , phone_number , first_name , last_name = None , vcard = None , disable_notification = None , reply_to_message_id = None , reply_markup = None ) :
"""Use this method to send phone contacts . On success , the sent Message is returned .
https : / / core . telegram . org / bots / api # sendcontact
Parameters :
: param chat _ id : Unique identifier for the target chat or username of the target channel ( in the format @ channelusername )
: type chat _ id : int | str | unicode
: param phone _ number : Contact ' s phone number
: type phone _ number : str | unicode
: param first _ name : Contact ' s first name
: type first _ name : str | unicode
Optional keyword parameters :
: param last _ name : Contact ' s last name
: type last _ name : str | unicode
: param vcard : Additional data about the contact in the form of a vCard , 0-2048 bytes
: type vcard : str | unicode
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound .
: type disable _ notification : bool
: param reply _ to _ message _ id : If the message is a reply , ID of the original message
: type reply _ to _ message _ id : int
: param reply _ markup : Additional interface options . A JSON - serialized object for an inline keyboard , custom reply keyboard , instructions to remove keyboard or to force a reply from the user .
: type reply _ markup : pytgbot . api _ types . sendable . reply _ markup . InlineKeyboardMarkup | pytgbot . api _ types . sendable . reply _ markup . ReplyKeyboardMarkup | pytgbot . api _ types . sendable . reply _ markup . ReplyKeyboardRemove | pytgbot . api _ types . sendable . reply _ markup . ForceReply
Returns :
: return : On success , the sent Message is returned
: rtype : pytgbot . api _ types . receivable . updates . Message""" | from pytgbot . api_types . sendable . reply_markup import ForceReply
from pytgbot . api_types . sendable . reply_markup import InlineKeyboardMarkup
from pytgbot . api_types . sendable . reply_markup import ReplyKeyboardMarkup
from pytgbot . api_types . sendable . reply_markup import ReplyKeyboardRemove
assert_type_or_raise ( chat_id , ( int , unicode_type ) , parameter_name = "chat_id" )
assert_type_or_raise ( phone_number , unicode_type , parameter_name = "phone_number" )
assert_type_or_raise ( first_name , unicode_type , parameter_name = "first_name" )
assert_type_or_raise ( last_name , None , unicode_type , parameter_name = "last_name" )
assert_type_or_raise ( vcard , None , unicode_type , parameter_name = "vcard" )
assert_type_or_raise ( disable_notification , None , bool , parameter_name = "disable_notification" )
assert_type_or_raise ( reply_to_message_id , None , int , parameter_name = "reply_to_message_id" )
assert_type_or_raise ( reply_markup , None , ( InlineKeyboardMarkup , ReplyKeyboardMarkup , ReplyKeyboardRemove , ForceReply ) , parameter_name = "reply_markup" )
result = self . do ( "sendContact" , chat_id = chat_id , phone_number = phone_number , first_name = first_name , last_name = last_name , vcard = vcard , disable_notification = disable_notification , reply_to_message_id = reply_to_message_id , reply_markup = reply_markup )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . updates import Message
try :
return Message . from_array ( result )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type Message" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result |
def _sign_simple_signature_fulfillment ( cls , input_ , message , key_pairs ) :
"""Signs a Ed25519Fulfillment .
Args :
input _ ( : class : ` ~ bigchaindb . common . transaction .
Input ` ) The input to be signed .
message ( str ) : The message to be signed
key _ pairs ( dict ) : The keys to sign the Transaction with .""" | # NOTE : To eliminate the dangers of accidentally signing a condition by
# reference , we remove the reference of input _ here
# intentionally . If the user of this class knows how to use it ,
# this should never happen , but then again , never say never .
input_ = deepcopy ( input_ )
public_key = input_ . owners_before [ 0 ]
message = sha3_256 ( message . encode ( ) )
if input_ . fulfills :
message . update ( '{}{}' . format ( input_ . fulfills . txid , input_ . fulfills . output ) . encode ( ) )
try : # cryptoconditions makes no assumptions of the encoding of the
# message to sign or verify . It only accepts bytestrings
input_ . fulfillment . sign ( message . digest ( ) , base58 . b58decode ( key_pairs [ public_key ] . encode ( ) ) )
except KeyError :
raise KeypairMismatchException ( 'Public key {} is not a pair to ' 'any of the private keys' . format ( public_key ) )
return input_ |
def decode_produce_response ( cls , response ) :
"""Decode ProduceResponse to ProduceResponsePayload
Arguments :
response : ProduceResponse
Return : list of ProduceResponsePayload""" | return [ kafka . structs . ProduceResponsePayload ( topic , partition , error , offset ) for topic , partitions in response . topics for partition , error , offset in partitions ] |
def ensure_arg ( args , arg , param = None ) :
"""Make sure the arg is present in the list of args .
If arg is not present , adds the arg and the optional param .
If present and param ! = None , sets the parameter following the arg to param .
: param list args : strings representing an argument list .
: param string arg : argument to make sure is present in the list .
: param string param : parameter to add or update after arg in the list .
: return : possibly modified list of args .""" | for idx , found_arg in enumerate ( args ) :
if found_arg == arg :
if param is not None :
args [ idx + 1 ] = param
return args
args . append ( arg )
if param is not None :
args . append ( param )
return args |
def parse ( self , instr ) : # type : ( bytes ) - > bool
'''A method to parse ISO hybridization info out of an existing ISO .
Parameters :
instr - The data for the ISO hybridization .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'This IsoHybrid object is already initialized' )
if len ( instr ) != 512 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid size of the instr' )
if instr [ 0 : 32 ] == self . ORIG_HEADER :
self . header = self . ORIG_HEADER
elif instr [ 0 : 32 ] == self . MAC_AFP :
self . header = self . MAC_AFP
else : # If we didn ' t see anything that we expected , then this is not an
# IsoHybrid ISO , so just quietly return False
return False
( self . mbr , self . rba , unused1 , self . mbr_id , unused2 ) = struct . unpack_from ( self . FMT , instr [ : 32 + struct . calcsize ( self . FMT ) ] , 32 )
if unused1 != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid IsoHybrid section' )
if unused2 != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid IsoHybrid section' )
offset = 32 + struct . calcsize ( self . FMT )
for i in range ( 1 , 5 ) :
if bytes ( bytearray ( [ instr [ offset ] ] ) ) == b'\x80' :
self . part_entry = i
( const_unused , self . bhead , self . bsect , self . bcyle , self . ptype , self . ehead , self . esect , self . ecyle , self . part_offset , self . psize ) = struct . unpack_from ( '=BBBBBBBBLL' , instr [ : offset + 16 ] , offset )
break
offset += 16
else :
raise pycdlibexception . PyCdlibInvalidISO ( 'No valid partition found in IsoHybrid!' )
if bytes ( bytearray ( [ instr [ - 2 ] ] ) ) != b'\x55' or bytes ( bytearray ( [ instr [ - 1 ] ] ) ) != b'\xaa' :
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid tail on isohybrid section' )
self . geometry_heads = self . ehead + 1
# FIXME : I can ' t see any way to compute the number of sectors from the
# available information . For now , we just hard - code this at 32 and
# hope for the best .
self . geometry_sectors = 32
self . _initialized = True
return True |
def populate_schema_objects ( self , schema , obj_type ) :
"""Returns list of tables or functions for a ( optional ) schema""" | metadata = self . dbmetadata [ obj_type ]
schema = schema or self . dbname
try :
objects = metadata [ schema ] . keys ( )
except KeyError : # schema doesn ' t exist
objects = [ ]
return objects |
def clean_fails ( self ) :
"""Check if there are any fails that were not subsequently retried .
: return : Boolean""" | for item in self . data :
if item . failure and not item . retries_left > 0 :
return True
return False |
def _get_set ( self , key , operation , create = False ) :
"""Get ( and maybe create ) a set by name .""" | return self . _get_by_type ( key , operation , create , b'set' , set ( ) ) |
def clean_up_tabs ( self ) :
"""Method remove state - tabs for those no state machine exists anymore .""" | tabs_to_close = [ ]
for state_identifier , tab_dict in list ( self . tabs . items ( ) ) :
if tab_dict [ 'sm_id' ] not in self . model . state_machine_manager . state_machines :
tabs_to_close . append ( state_identifier )
for state_identifier , tab_dict in list ( self . closed_tabs . items ( ) ) :
if tab_dict [ 'sm_id' ] not in self . model . state_machine_manager . state_machines :
tabs_to_close . append ( state_identifier )
for state_identifier in tabs_to_close :
self . close_page ( state_identifier , delete = True ) |
def _raise_error_from_response ( data ) :
"""Processes the response data""" | # Check the meta - data for why this request failed
meta = data . get ( 'meta' )
if meta : # Account for foursquare conflicts
# see : https : / / developer . foursquare . com / overview / responses
if meta . get ( 'code' ) in ( 200 , 409 ) :
return data
exc = error_types . get ( meta . get ( 'errorType' ) )
if exc :
raise exc ( meta . get ( 'errorDetail' ) )
else :
_log_and_raise_exception ( 'Unknown error. meta' , meta )
else :
_log_and_raise_exception ( 'Response format invalid, missing meta property. data' , data ) |
def get_host_cache ( service_instance = None ) :
'''Returns the host cache configuration on the proxy host .
service _ instance
Service instance ( vim . ServiceInstance ) of the vCenter / ESXi host .
Default is None .
. . code - block : : bash
salt ' * ' vsphere . get _ host _ cache''' | # Default to getting all disks if no filtering is done
ret_dict = { }
host_ref = _get_proxy_target ( service_instance )
hostname = __proxy__ [ 'esxi.get_details' ] ( ) [ 'esxi_host' ]
hci = salt . utils . vmware . get_host_cache ( host_ref )
if not hci :
log . debug ( 'Host cache not configured on host \'%s\'' , hostname )
ret_dict [ 'enabled' ] = False
return ret_dict
# TODO Support multiple host cache info objects ( on multiple datastores )
return { 'enabled' : True , 'datastore' : { 'name' : hci . key . name } , 'swap_size' : '{}MiB' . format ( hci . swapSize ) } |
def RemoveUser ( self , user ) :
"""Remove a Linux user account .
Args :
user : string , the Linux user account to remove .""" | self . logger . info ( 'Removing user %s.' , user )
if self . remove :
command = self . userdel_cmd . format ( user = user )
try :
subprocess . check_call ( command . split ( ' ' ) )
except subprocess . CalledProcessError as e :
self . logger . warning ( 'Could not remove user %s. %s.' , user , str ( e ) )
else :
self . logger . info ( 'Removed user account %s.' , user )
self . _RemoveAuthorizedKeys ( user )
self . _UpdateSudoer ( user , sudoer = False ) |
def run_task_json ( task_cls , task_data ) :
"""Instantiate and run the perform method od given task data .
: param task _ cls : task class
: param task _ data : task data
: type task _ data : TaskData
: return : task ' s result""" | # TODO what does set _ skipping _ json do ?
task = instantiate ( task_cls )
task_callable = get_callable ( task )
td = TaskData ( task_data )
td . set_skipping_json ( True )
return task_callable ( td ) |
def add_live_points ( self ) :
"""Add the remaining set of live points to the current set of dead
points . Instantiates a generator that will be called by
the user . Returns the same outputs as : meth : ` sample ` .""" | # Check if the remaining live points have already been added
# to the output set of samples .
if self . added_live :
raise ValueError ( "The remaining live points have already " "been added to the list of samples!" )
else :
self . added_live = True
# After N samples have been taken out , the remaining volume is
# ` e ^ ( - N / nlive ) ` . The remaining points are distributed uniformly
# within the remaining volume so that the expected volume enclosed
# by the ` i ` - th worst likelihood is
# ` e ^ ( - N / nlive ) * ( nlive + 1 - i ) / ( nlive + 1 ) ` .
logvols = self . saved_logvol [ - 1 ]
logvols += np . log ( 1. - ( np . arange ( self . nlive ) + 1. ) / ( self . nlive + 1. ) )
logvols_pad = np . concatenate ( ( [ self . saved_logvol [ - 1 ] ] , logvols ) )
logdvols = logsumexp ( a = np . c_ [ logvols_pad [ : - 1 ] , logvols_pad [ 1 : ] ] , axis = 1 , b = np . c_ [ np . ones ( self . nlive ) , - np . ones ( self . nlive ) ] )
logdvols += math . log ( 0.5 )
# Defining change in ` logvol ` used in ` logzvar ` approximation .
dlvs = logvols_pad [ : - 1 ] - logvols_pad [ 1 : ]
# Sorting remaining live points .
lsort_idx = np . argsort ( self . live_logl )
loglmax = max ( self . live_logl )
# Grabbing relevant values from the last dead point .
logz = self . saved_logz [ - 1 ]
logzvar = self . saved_logzvar [ - 1 ]
h = self . saved_h [ - 1 ]
loglstar = self . saved_logl [ - 1 ]
if self . _beyond_unit_bound ( loglstar ) :
bounditer = self . nbound - 1
else :
bounditer = 0
# Add contributions from the remaining live points in order
# from the lowest to the highest log - likelihoods .
for i in range ( self . nlive ) : # Grab live point with ` i ` - th lowest log - likelihood along with
# ancillary quantities .
idx = lsort_idx [ i ]
logvol , logdvol , dlv = logvols [ i ] , logdvols [ i ] , dlvs [ i ]
ustar = np . array ( self . live_u [ idx ] )
vstar = np . array ( self . live_v [ idx ] )
loglstar_new = self . live_logl [ idx ]
boundidx = self . live_bound [ idx ]
point_it = self . live_it [ idx ]
# Compute relative contribution to results .
logwt = np . logaddexp ( loglstar_new , loglstar ) + logdvol
# weight
logz_new = np . logaddexp ( logz , logwt )
# ln ( evidence )
lzterm = ( math . exp ( loglstar - logz_new ) * loglstar + math . exp ( loglstar_new - logz_new ) * loglstar_new )
h_new = ( math . exp ( logdvol ) * lzterm + math . exp ( logz - logz_new ) * ( h + logz ) - logz_new )
# information
dh = h_new - h
h = h_new
logz = logz_new
logzvar += dh * dlv
# var [ ln ( evidence ) ] estimate
loglstar = loglstar_new
logz_remain = loglmax + logvol
# remaining ln ( evidence )
delta_logz = np . logaddexp ( logz , logz_remain ) - logz
# dlogz
# Save results .
if self . save_samples :
self . saved_id . append ( idx )
self . saved_u . append ( ustar )
self . saved_v . append ( vstar )
self . saved_logl . append ( loglstar )
self . saved_logvol . append ( logvol )
self . saved_logwt . append ( logwt )
self . saved_logz . append ( logz )
self . saved_logzvar . append ( logzvar )
self . saved_h . append ( h )
self . saved_nc . append ( 1 )
self . saved_boundidx . append ( boundidx )
self . saved_it . append ( point_it )
self . saved_bounditer . append ( bounditer )
self . saved_scale . append ( self . scale )
self . eff = 100. * ( self . it + i ) / self . ncall
# efficiency
# Return our new " dead " point and ancillary quantities .
yield ( idx , ustar , vstar , loglstar , logvol , logwt , logz , logzvar , h , 1 , point_it , boundidx , bounditer , self . eff , delta_logz ) |
def daily ( self , symbol = None ) :
'''获取日线数据
: return : pd . dataFrame or None''' | reader = TdxExHqDailyBarReader ( )
symbol = self . find_path ( symbol )
if symbol is not None :
return reader . get_df ( symbol )
return None |
def file_list_projects ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / file - xxxx / listProjects API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Cloning # API - method % 3A - % 2Fclass - xxxx % 2FlistProjects""" | return DXHTTPRequest ( '/%s/listProjects' % object_id , input_params , always_retry = always_retry , ** kwargs ) |
def account_block_count ( self , account ) :
"""Get number of blocks for a specific * * account * *
: param account : Account to get number of blocks for
: type account : str
: raises : : py : exc : ` nano . rpc . RPCException `
> > > rpc . account _ block _ count ( account = " xrb _ 3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3 " )
19""" | account = self . _process_value ( account , 'account' )
payload = { "account" : account }
resp = self . call ( 'account_block_count' , payload )
return int ( resp [ 'block_count' ] ) |
def read_file ( file_path ) :
"""Read yaml head and raw body content from a file .
: param file _ path : file path
: return : tuple ( meta , raw _ content )""" | with open ( file_path , 'r' , encoding = 'utf-8' ) as f :
whole = f . read ( ) . strip ( )
if whole . startswith ( '---' ) : # may has yaml meta info , so we try to split it out
sp = re . split ( r'-{3,}' , whole . lstrip ( '-' ) , maxsplit = 1 )
if len ( sp ) == 2 : # do have yaml meta info , so we read it
return yaml . load ( sp [ 0 ] ) , sp [ 1 ] . lstrip ( )
return { } , whole |
def getConfigPath ( configFileName = None ) :
"""Auxiliar function to get the configuration path depending on the system .""" | if configFileName != None : # Returning the path of the configuration file
if sys . platform == 'win32' :
return os . path . expanduser ( os . path . join ( '~\\' , 'Deepify' , configFileName ) )
else :
return os . path . expanduser ( os . path . join ( '~/' , '.config' , 'Deepify' , configFileName ) )
else : # Returning the path of the configuration folder
if sys . platform == 'win32' :
return os . path . expanduser ( os . path . join ( '~\\' , 'Deepify' ) )
else :
return os . path . expanduser ( os . path . join ( '~/' , '.config' , 'Deepify' ) ) |
def plot_entropy ( self , tmin , tmax , ntemp , ylim = None , ** kwargs ) :
"""Plots the vibrational entrpy in a temperature range .
Args :
tmin : minimum temperature
tmax : maximum temperature
ntemp : number of steps
ylim : tuple specifying the y - axis limits .
kwargs : kwargs passed to the matplotlib function ' plot ' .
Returns :
matplotlib figure""" | temperatures = np . linspace ( tmin , tmax , ntemp )
if self . structure :
ylabel = r"$S$ (J/K/mol)"
else :
ylabel = r"$S$ (J/K/mol-c)"
fig = self . _plot_thermo ( self . dos . entropy , temperatures , ylabel = ylabel , ylim = ylim , ** kwargs )
return fig |
def entities ( self , subject_id ) :
"""Returns all the entities of assertions for a subject , disregarding
whether the assertion still is valid or not .
: param subject _ id : The identifier of the subject
: return : A possibly empty list of entity identifiers""" | try :
return [ i [ "entity_id" ] for i in self . _cache . find ( { "subject_id" : subject_id } ) ]
except ValueError :
return [ ] |
def _create_font_size_combo ( self ) :
"""Creates font size combo box""" | self . std_font_sizes = config [ "font_default_sizes" ]
font_size = str ( get_default_font ( ) . GetPointSize ( ) )
self . font_size_combo = wx . ComboBox ( self , - 1 , value = font_size , size = ( 60 , - 1 ) , choices = map ( unicode , self . std_font_sizes ) , style = wx . CB_DROPDOWN | wx . TE_PROCESS_ENTER )
self . font_size_combo . SetToolTipString ( _ ( u"Text size\n(points)" ) )
self . AddControl ( self . font_size_combo )
self . Bind ( wx . EVT_COMBOBOX , self . OnTextSize , self . font_size_combo )
self . Bind ( wx . EVT_TEXT_ENTER , self . OnTextSize , self . font_size_combo ) |
def PROFILE_VOIGT ( sg0 , GamD , Gam0 , sg ) :
"""# Voigt profile based on HTP .
# Input parameters :
# sg0 : Unperturbed line position in cm - 1 ( Input ) .
# GamD : Doppler HWHM in cm - 1 ( Input )
# Gam0 : Speed - averaged line - width in cm - 1 ( Input ) .
# sg : Current WaveNumber of the Computation in cm - 1 ( Input ) .""" | return PROFILE_HTP ( sg0 , GamD , Gam0 , cZero , cZero , cZero , cZero , cZero , sg ) |
def macro_tpm_sbs ( self , state_by_state_micro_tpm ) :
"""Create a state - by - state coarse - grained macro TPM .
Args :
micro _ tpm ( nd . array ) : The state - by - state TPM of the micro - system .
Returns :
np . ndarray : The state - by - state TPM of the macro - system .""" | validate . tpm ( state_by_state_micro_tpm , check_independence = False )
mapping = self . make_mapping ( )
num_macro_states = 2 ** len ( self . macro_indices )
macro_tpm = np . zeros ( ( num_macro_states , num_macro_states ) )
micro_states = range ( 2 ** len ( self . micro_indices ) )
micro_state_transitions = itertools . product ( micro_states , repeat = 2 )
# For every possible micro - state transition , get the corresponding
# previous and next macro - state using the mapping and add that
# probability to the state - by - state macro TPM .
for previous_state , current_state in micro_state_transitions :
macro_tpm [ mapping [ previous_state ] , mapping [ current_state ] ] += ( state_by_state_micro_tpm [ previous_state , current_state ] )
# Re - normalize each row because we ' re going from larger to smaller TPM
return np . array ( [ distribution . normalize ( row ) for row in macro_tpm ] ) |
def authenticate_with_email_and_pwd ( user_email , user_password ) :
'''Authenticate the user by passing the email and password .
This function avoids prompting the command line for user credentials and is useful for calling tools programmatically''' | if user_email is None or user_password is None :
raise ValueError ( 'Could not authenticate user. Missing username or password' )
upload_token = uploader . get_upload_token ( user_email , user_password )
if not upload_token :
print ( "Authentication failed for user name " + user_name + ", please try again." )
sys . exit ( 1 )
user_key = get_user_key ( user_name )
if not user_key :
print ( "User name {} does not exist, please try again or contact Mapillary user support." . format ( user_name ) )
sys . exit ( 1 )
user_permission_hash , user_signature_hash = get_user_hashes ( user_key , upload_token )
user_items [ "MAPSettingsUsername" ] = section
user_items [ "MAPSettingsUserKey" ] = user_key
user_items [ "user_upload_token" ] = upload_token
user_items [ "user_permission_hash" ] = user_permission_hash
user_items [ "user_signature_hash" ] = user_signature_hash
return user_items |
def update ( self ) :
"""Determine all AR coefficients .
> > > from hydpy . models . arma import *
> > > parameterstep ( ' 1d ' )
> > > responses ( ( ( 1 . , 2 . ) , ( 1 . , ) ) , th _ 3 = ( ( 1 . , ) , ( 1 . , 2 . , 3 . ) ) )
> > > derived . ar _ coefs . update ( )
> > > derived . ar _ coefs
ar _ coefs ( [ [ 1.0 , 2.0 ] ,
[1.0 , nan ] ] )
Note that updating parameter ` ar _ coefs ` sets the shape of the log
sequence | LogOut | automatically .
> > > logs . logout
logout ( [ [ nan , nan ] ,
[ nan , nan ] ] )""" | pars = self . subpars . pars
coefs = pars . control . responses . ar_coefs
self . shape = coefs . shape
self ( coefs )
pars . model . sequences . logs . logout . shape = self . shape |
def get_queryset ( self , request ) :
"""Limit Pages to those that belong to the request ' s user .""" | qs = super ( VISADeviceAdmin , self ) . get_queryset ( request )
return qs . filter ( protocol_id = PROTOCOL_ID ) |
def fetch_lists ( keyword , max_results = 20 ) :
"""Fetch the urls of up to max _ results Twitter lists that match the provided keyword .
> > > len ( fetch _ lists ( ' politics ' , max _ results = 4 ) )""" | # CONFIG FILE READ
api_key = config . get ( 'GOOGLE_CSE_KEYS' , 'API_KEY' )
cse_id = config . get ( 'GOOGLE_CSE_KEYS' , 'CSE_ID' )
results = [ ]
start_c = 1
search_term = "inurl:lists + " + keyword
while len ( results ) < max_results :
temp_res = google_search ( search_term , api_key , cse_id , num = 10 , start = start_c )
if len ( temp_res ) == 0 :
print ( "Google API Error, returning retrieved results" )
return results
results . extend ( temp_res )
start_c += 10
return results [ : max_results ] |
def _check_heartbeats ( self , ts , * args , ** kwargs ) :
"""Checks if the heartbeats are on - time . If not , the channel id is escalated
to self . _ late _ heartbeats and a warning is issued ; once a hb is received
again from this channel , it ' ll be removed from this dict , and an Info
message logged .
: param ts : timestamp , declares when data was received by the client
: return :""" | for chan_id in self . _heartbeats :
if ts - self . _heartbeats [ chan_id ] >= 10 :
if chan_id not in self . _late_heartbeats :
try : # This is newly late ; escalate
log . warning ( "BitfinexWSS.heartbeats: Channel %s hasn't " "sent a heartbeat in %s seconds!" , self . channel_labels [ chan_id ] , ts - self . _heartbeats [ chan_id ] )
self . _late_heartbeats [ chan_id ] = ts
except KeyError : # This channel ID Is not known to us - log and raise
log . error ( "BitfinexWSS.heartbeats: Channel %s is not " "registered in the connector's registry! " "Restarting Connection to avoid errors.." , chan_id )
raise UnknownChannelError
else : # We know of this already
continue
else : # its not late
try :
self . _late_heartbeats . pop ( chan_id )
except KeyError : # wasn ' t late before , check next channel
continue
log . info ( "BitfinexWSS.heartbeats: Channel %s has sent a " "heartbeat again!" , self . channel_labels [ chan_id ] )
self . ping ( ) |
def current_version_string ( ) :
"""Current system python version as string major . minor . micro""" | return "{0}.{1}.{2}" . format ( sys . version_info . major , sys . version_info . minor , sys . version_info . micro ) |
def find_page_of_state_m ( self , state_m ) :
"""Return the identifier and page of a given state model
: param state _ m : The state model to be searched
: return : page containing the state and the state _ identifier""" | for state_identifier , page_info in list ( self . tabs . items ( ) ) :
if page_info [ 'state_m' ] is state_m :
return page_info [ 'page' ] , state_identifier
return None , None |
def delFromTimeInv ( self , * params ) :
'''Removes any number of parameters from time _ inv for this instance .
Parameters
params : string
Any number of strings naming attributes to be removed from time _ inv
Returns
None''' | for param in params :
if param in self . time_inv :
self . time_inv . remove ( param ) |
def get_consumed_write_units_percent ( table_name , gsi_name , lookback_window_start = 15 , lookback_period = 5 ) :
"""Returns the number of consumed write units in percent
: type table _ name : str
: param table _ name : Name of the DynamoDB table
: type gsi _ name : str
: param gsi _ name : Name of the GSI
: type lookback _ window _ start : int
: param lookback _ window _ start : Relative start time for the CloudWatch metric
: type lookback _ period : int
: param lookback _ period : Number of minutes to look at
: returns : float - - Number of consumed writes as a
percentage of provisioned writes""" | try :
metrics = __get_aws_metric ( table_name , gsi_name , lookback_window_start , lookback_period , 'ConsumedWriteCapacityUnits' )
except BotoServerError :
raise
if metrics :
lookback_seconds = lookback_period * 60
consumed_write_units = ( float ( metrics [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) )
else :
consumed_write_units = 0
try :
gsi_write_units = dynamodb . get_provisioned_gsi_write_units ( table_name , gsi_name )
consumed_write_units_percent = ( float ( consumed_write_units ) / float ( gsi_write_units ) * 100 )
except JSONResponseError :
raise
logger . info ( '{0} - GSI: {1} - Consumed write units: {2:.2f}%' . format ( table_name , gsi_name , consumed_write_units_percent ) )
return consumed_write_units_percent |
def sqlvm_group_list ( client , resource_group_name = None ) :
'''Lists all SQL virtual machine groups in a resource group or subscription .''' | if resource_group_name : # List all sql vm groups in the resource group
return client . list_by_resource_group ( resource_group_name = resource_group_name )
# List all sql vm groups in the subscription
return client . list ( ) |
def _create_adapter_type ( network_adapter , adapter_type , network_adapter_label = '' ) :
'''Returns a vim . vm . device . VirtualEthernetCard object specifying a virtual
ethernet card information
network _ adapter
None or VirtualEthernet object
adapter _ type
String , type of adapter
network _ adapter _ label
string , network adapter name''' | log . trace ( 'Configuring virtual machine network ' 'adapter adapter_type=%s' , adapter_type )
if adapter_type in [ 'vmxnet' , 'vmxnet2' , 'vmxnet3' , 'e1000' , 'e1000e' ] :
edited_network_adapter = salt . utils . vmware . get_network_adapter_type ( adapter_type )
if isinstance ( network_adapter , type ( edited_network_adapter ) ) :
edited_network_adapter = network_adapter
else :
if network_adapter :
log . trace ( 'Changing type of \'%s\' from \'%s\' to \'%s\'' , network_adapter . deviceInfo . label , type ( network_adapter ) . __name__ . rsplit ( "." , 1 ) [ 1 ] [ 7 : ] . lower ( ) , adapter_type )
else : # If device is edited and type not specified or does not match ,
# don ' t change adapter type
if network_adapter :
if adapter_type :
log . error ( 'Cannot change type of \'%s\' to \'%s\'. Not changing type' , network_adapter . deviceInfo . label , adapter_type )
edited_network_adapter = network_adapter
else :
if not adapter_type :
log . trace ( 'The type of \'%s\' has not been specified. ' 'Creating of default type \'vmxnet3\'' , network_adapter_label )
edited_network_adapter = vim . vm . device . VirtualVmxnet3 ( )
return edited_network_adapter |
def save_rnn_checkpoint ( cells , prefix , epoch , symbol , arg_params , aux_params ) :
"""Save checkpoint for model using RNN cells .
Unpacks weight before saving .
Parameters
cells : mxnet . rnn . RNNCell or list of RNNCells
The RNN cells used by this symbol .
prefix : str
Prefix of model name .
epoch : int
The epoch number of the model .
symbol : Symbol
The input symbol
arg _ params : dict of str to NDArray
Model parameter , dict of name to NDArray of net ' s weights .
aux _ params : dict of str to NDArray
Model parameter , dict of name to NDArray of net ' s auxiliary states .
Notes
- ` ` prefix - symbol . json ` ` will be saved for symbol .
- ` ` prefix - epoch . params ` ` will be saved for parameters .""" | if isinstance ( cells , BaseRNNCell ) :
cells = [ cells ]
for cell in cells :
arg_params = cell . unpack_weights ( arg_params )
save_checkpoint ( prefix , epoch , symbol , arg_params , aux_params ) |
def set_irreps ( self , q , is_little_cogroup = False , nac_q_direction = None , degeneracy_tolerance = 1e-4 ) :
"""Identify ir - reps of phonon modes .
The design of this API is not very satisfactory and is expceted
to be redesined in the next major versions once the use case
of the API for ir - reps feature becomes clearer .""" | if self . _dynamical_matrix is None :
msg = ( "Dynamical matrix has not yet built." )
raise RuntimeError ( msg )
self . _irreps = IrReps ( self . _dynamical_matrix , q , is_little_cogroup = is_little_cogroup , nac_q_direction = nac_q_direction , factor = self . _factor , symprec = self . _symprec , degeneracy_tolerance = degeneracy_tolerance , log_level = self . _log_level )
return self . _irreps . run ( ) |
def grad_desc_update ( x , a , c , step = 0.01 ) :
"""Given a value of x , return a better x
using gradient descent""" | return x - step * gradient ( x , a , c ) |
def history_backward ( self , count = 1 ) :
"""Move backwards through history .""" | self . _set_history_search ( )
# Go back in history .
found_something = False
for i in range ( self . working_index - 1 , - 1 , - 1 ) :
if self . _history_matches ( i ) :
self . working_index = i
count -= 1
found_something = True
if count == 0 :
break
# If we move to another entry , move cursor to the end of the line .
if found_something :
self . cursor_position = len ( self . text ) |
def lookupjoin ( left , right , key = None , lkey = None , rkey = None , missing = None , presorted = False , buffersize = None , tempdir = None , cache = True , lprefix = None , rprefix = None ) :
"""Perform a left join , but where the key is not unique in the right - hand
table , arbitrarily choose the first row and ignore others . E . g . : :
> > > import petl as etl
> > > table1 = [ [ ' id ' , ' color ' , ' cost ' ] ,
. . . [ 1 , ' blue ' , 12 ] ,
. . . [ 2 , ' red ' , 8 ] ,
. . . [ 3 , ' purple ' , 4 ] ]
> > > table2 = [ [ ' id ' , ' shape ' , ' size ' ] ,
. . . [ 1 , ' circle ' , ' big ' ] ,
. . . [ 1 , ' circle ' , ' small ' ] ,
. . . [ 2 , ' square ' , ' tiny ' ] ,
. . . [ 2 , ' square ' , ' big ' ] ,
. . . [ 3 , ' ellipse ' , ' small ' ] ,
. . . [ 3 , ' ellipse ' , ' tiny ' ] ]
> > > table3 = etl . lookupjoin ( table1 , table2 , key = ' id ' )
> > > table3
| id | color | cost | shape | size |
| 1 | ' blue ' | 12 | ' circle ' | ' big ' |
| 2 | ' red ' | 8 | ' square ' | ' tiny ' |
| 3 | ' purple ' | 4 | ' ellipse ' | ' small ' |
See also : func : ` petl . transform . joins . leftjoin ` .""" | lkey , rkey = keys_from_args ( left , right , key , lkey , rkey )
return LookupJoinView ( left , right , lkey , rkey , presorted = presorted , missing = missing , buffersize = buffersize , tempdir = tempdir , cache = cache , lprefix = lprefix , rprefix = rprefix ) |
def find_domain ( session , name ) :
"""Find a domain .
Find a domain by its domain name using the given ` session ` .
When the domain does not exist the function will
return ` None ` .
: param session : database session
: param name : name of the domain to find
: returns : a domain object ; ` None ` when the domain
does not exist""" | domain = session . query ( Domain ) . filter ( Domain . domain == name ) . first ( )
return domain |
def scanJoiner ( self , xEUI = '*' , strPSKd = 'threadjpaketest' ) :
"""scan Joiner
Args :
xEUI : Joiner ' s EUI - 64
strPSKd : Joiner ' s PSKd for commissioning
Returns :
True : successful to add Joiner ' s steering data
False : fail to add Joiner ' s steering data""" | print '%s call scanJoiner' % self . port
if not isinstance ( xEUI , str ) :
eui64 = self . __convertLongToString ( xEUI )
# prepend 0 at the beginning
if len ( eui64 ) < 16 :
eui64 = eui64 . zfill ( 16 )
print eui64
else :
eui64 = xEUI
# long timeout value to avoid automatic joiner removal ( in seconds )
timeout = 500
cmd = WPANCTL_CMD + 'commissioner joiner-add %s %s %s' % ( eui64 , str ( timeout ) , strPSKd )
print cmd
if not self . isActiveCommissioner :
self . startCollapsedCommissioner ( )
if self . __sendCommand ( cmd ) [ 0 ] != 'Fail' :
return True
else :
return False |
def glBufferData ( target , data , usage ) :
"""Data can be numpy array or the size of data to allocate .""" | if isinstance ( data , int ) :
size = data
data = ctypes . c_voidp ( 0 )
else :
if not data . flags [ 'C_CONTIGUOUS' ] or not data . flags [ 'ALIGNED' ] :
data = data . copy ( 'C' )
data_ = data
size = data_ . nbytes
data = data_ . ctypes . data
try :
nativefunc = glBufferData . _native
except AttributeError :
nativefunc = glBufferData . _native = _get_gl_func ( "glBufferData" , None , ( ctypes . c_uint , ctypes . c_int , ctypes . c_void_p , ctypes . c_uint , ) )
res = nativefunc ( target , size , data , usage ) |
def get_visible_profiles ( self , user = None ) :
"""Returns all the visible profiles available to this user .
For now keeps it simple by just applying the cases when a user is not
active , a user has it ' s profile closed to everyone or a user only
allows registered users to view their profile .
: param user :
A Django : class : ` User ` instance .
: return :
All profiles that are visible to this user .""" | profiles = self . select_related ( ) . all ( )
filter_kwargs = { 'is_active' : True }
profiles = profiles . filter ( ** filter_kwargs )
if user and isinstance ( user , AnonymousUser ) :
profiles = [ ]
return profiles |
def length ( self , t0 = 0 , t1 = 1 , error = LENGTH_ERROR , min_depth = LENGTH_MIN_DEPTH ) :
"""Calculate the length of the path up to a certain position""" | if t0 == 0 and t1 == 1 :
if self . _length_info [ 'bpoints' ] == self . bpoints ( ) and self . _length_info [ 'error' ] >= error and self . _length_info [ 'min_depth' ] >= min_depth :
return self . _length_info [ 'length' ]
# using scipy . integrate . quad is quick
if _quad_available :
s = quad ( lambda tau : abs ( self . derivative ( tau ) ) , t0 , t1 , epsabs = error , limit = 1000 ) [ 0 ]
else :
s = segment_length ( self , t0 , t1 , self . point ( t0 ) , self . point ( t1 ) , error , min_depth , 0 )
if t0 == 0 and t1 == 1 :
self . _length_info [ 'length' ] = s
self . _length_info [ 'bpoints' ] = self . bpoints ( )
self . _length_info [ 'error' ] = error
self . _length_info [ 'min_depth' ] = min_depth
return self . _length_info [ 'length' ]
else :
return s |
def getProjectAreas ( self , archived = False , returned_properties = None ) :
"""Get all : class : ` rtcclient . project _ area . ProjectArea ` objects
If no : class : ` rtcclient . project _ area . ProjectArea ` objects are
retrieved , ` None ` is returned .
: param archived : ( default is False ) whether the project area
is archived
: param returned _ properties : the returned properties that you want .
Refer to : class : ` rtcclient . client . RTCClient ` for more explanations
: return : A : class : ` list ` that contains all the
: class : ` rtcclient . project _ area . ProjectArea ` objects
: rtype : list""" | return self . _getProjectAreas ( archived = archived , returned_properties = returned_properties ) |
def update_or_create ( cls , filter_key = None , with_status = False , ** kwargs ) :
"""Update or create the element . If the element exists , update it using the
kwargs provided if the provided kwargs after resolving differences from
existing values . When comparing values , strings and ints are compared
directly . If a list is provided and is a list of strings , it will be
compared and updated if different . If the list contains unhashable elements ,
it is skipped . To handle complex comparisons , override this method on
the subclass and process the comparison seperately .
If an element does not have a ` create ` classmethod , then it
is considered read - only and the request will be redirected to
: meth : ` ~ get ` . Provide a ` ` filter _ key ` ` dict key / value if you want to
match the element by a specific attribute and value . If no
filter _ key is provided , the name field will be used to find the
element .
> > > host = Host ( ' kali ' )
> > > print ( host . address )
12.12.12.12
> > > host = Host . update _ or _ create ( name = ' kali ' , address = ' 10.10.10.10 ' )
> > > print ( host , host . address )
Host ( name = kali ) 10.10.10.10
: param dict filter _ key : filter key represents the data attribute and
value to use to find the element . If none is provided , the name
field will be used .
: param kwargs : keyword arguments mapping to the elements ` ` create ` `
method .
: param bool with _ status : if set to True , a 3 - tuple is returned with
( Element , modified , created ) , where the second and third tuple
items are booleans indicating the status
: raises CreateElementFailed : could not create element with reason
: raises ElementNotFound : if read - only element does not exist
: return : element instance by type
: rtype : Element""" | updated = False
# Providing this flag will return before updating and require the calling
# class to call update if changes were made .
defer_update = kwargs . pop ( 'defer_update' , False )
if defer_update :
with_status = True
element , created = cls . get_or_create ( filter_key = filter_key , with_status = True , ** kwargs )
if not created :
for key , value in kwargs . items ( ) : # Callable , Element or string
if callable ( value ) :
value = value ( )
elif isinstance ( value , Element ) :
value = value . href
# Retrieve the ' type ' of instance attribute . This is used to
# serialize attributes that resolve href ' s to elements . It
# provides a common structure but also prevents the fetching
# of the href to element when doing an equality comparison
attr_type = getattr ( type ( element ) , key , None )
if isinstance ( attr_type , ElementRef ) :
attr_name = getattr ( attr_type , 'attr' , None )
if element . data . get ( attr_name ) != value :
element . data [ attr_name ] = value
updated = True
continue
elif isinstance ( attr_type , ElementList ) :
value_hrefs = element_resolver ( value )
# Resolve the elements to href
attr_name = getattr ( attr_type , 'attr' , None )
if set ( element . data . get ( attr_name , [ ] ) ) != set ( value_hrefs ) :
element . data [ attr_name ] = value_hrefs
updated = True
continue
# Type is not ' special ' , therefore we are expecting only strings ,
# integer types or list of strings . Complex data structures
# will be handled later through encapsulation and _ _ eq _ _ , _ _ hash _ _
# for comparison . The keys value type here is going to assume the
# provided value is of the right type as the key may not necessarily
# exist in the cached json .
if isinstance ( value , ( string_types , int ) ) : # also covers bool
val = getattr ( element , key , None )
if val != value :
element . data [ key ] = value
updated = True
elif isinstance ( value , list ) and all ( isinstance ( s , string_types ) for s in value ) : # List of simple strings ( assuming the attribute is also ! )
if set ( value ) ^ set ( element . data . get ( key , [ ] ) ) :
element . data [ key ] = value
updated = True
# Complex lists , objects , etc will fall down here and be skipped .
# To process these , provide defer _ update = True , override update _ or _ create ,
# process the complex object deltas and call update ( )
if updated and not defer_update :
element . update ( )
if with_status :
return element , updated , created
return element |
def log_message ( self , msg , * args ) :
"""Hook to log a message .""" | if args :
msg = msg % args
self . logger . info ( msg ) |
def autoinit ( fn ) :
"""Automates initialization so things are more composable .
* All specified kwargs in the class and all autoinit classes in
inheritance hierarchy will be setattr ' d at the end of initialization ,
with defaults derived from the inheritance hierarchy as well .
* If * * kwargs is explicitly specified , the _ _ init _ _ method will be called .
* If a default is specified as a new [ class name ] then a default instance
of that class will be initialized as the value .
class Base ( object ) :
@ autoinit
def _ _ init _ _ ( self , a = " A " , * * kwargs ) :
print " In Base . "
class Base2 ( object ) :
@ autoinit
def _ _ init _ _ ( self , a = " A2 " ) :
print " In Base2 . "
class T ( Base , Base2 ) :
@ autoinit
def _ _ init _ _ ( self , b = new [ list ] , * * kwargs ) :
print " In T . "
t = T ( )
print t . a , t . b
t = T ( [ ' x ' ] )
print t . a , t . b""" | if fn is None :
fn = _empty_init
if fn_has_args ( fn ) :
raise Error ( "*args support is not available in autoinit." )
# its pretty hard to support this , though doable if really needed . . .
__defaults = fn_kwargs ( fn )
avail_ac = fn_available_argcount ( fn )
avail_args = list ( fn . __code__ . co_varnames [ 1 : avail_ac ] )
signature = fn_signature ( fn , argument_transform = ( lambda name : name ) , default_transform = ( lambda name , _ : "%s=__defaults['%s']" % ( name , name ) ) , vararg_transform = None , kwargs_transform = ( lambda _ : "**__kwargs" ) )
signature [ 0 ] = "self"
call_signature = fn_signature ( fn , argument_transform = ( lambda name : "%s=%s" % ( name , name ) ) , default_transform = ( lambda name , _ : "%s=%s" % ( name , name ) ) , vararg_transform = None , kwargs_transform = ( lambda _ : "**__kwargs" ) )
call_signature [ 0 ] = "self"
if not fn_has_kwargs ( fn ) :
signature . append ( "**__kwargs" )
call_signature . append ( "**__kwargs" )
signature = ", " . join ( signature )
call_signature = ", " . join ( call_signature )
avail_args = repr ( tuple ( avail_args ) )
code = '''def __init__(%(signature)s):
__cls = self.__class__
__mro = tuple(__cls.mro())
# call up the mro
for __base in __mro:
if __base is object: continue
try:
__wrapped_init = __base.__init__.__wrapped_init
except AttributeError:
# not an autoinit class
pass
else:
# **kwargs signals that the initializer wants to be called
if __wrapped_init and fn_has_kwargs(__wrapped_init):
__wrapped_init(%(call_signature)s)
# get defaults from hierarchy
__update_kwargs = { }
for __base in reversed(__mro):
if __base is __cls or __base is object: continue
try:
__defaults = __base.__init__.__defaults
except AttributeError:
# not an autoinit class
pass
else:
for __name, __val in __defaults.iteritems():
if __val is not Default:
__update_kwargs[__name] = __val
# get locally passed arguments into __update_kwargs
__locals = locals()
for __name in %(avail_args)s:
__val = __locals[__name]
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
for __name, __val in __kwargs.iteritems():
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
# set attributes according to kwargs
for __name, __val in __update_kwargs.iteritems():
if isinstance(__val, _new_initializer):
setattr(self, __name, __val())
else:
setattr(self, __name, __val)
''' % locals ( )
exec ( code , globals ( ) , locals ( ) )
# i know , exec - - no other way to get the signature to match it seems
# unless i build it out of an abstract syntax tree or something , which
# seems excessive . or i could inspect the signature and do stuff dynamically
# but that is troublesome and the documentation generators won ' t like it
# if you want to try to fix it to not use exec but retain the semantics
# please do .
# - cyrus
init = eval ( '__init__' )
init . __wrapped_init = fn
# @ UndefinedVariable
init . __defaults = __defaults
# @ UndefinedVariable
_functools . update_wrapper ( init , fn )
# @ UndefinedVariable
return init
# @ UndefinedVariable |
def __one_equals_true ( value ) :
'''Test for ` ` 1 ` ` as a number or a string and return ` ` True ` ` if it is .
Args :
value : string or number or None .
Returns :
bool : ` ` True ` ` if 1 otherwise ` ` False ` ` .''' | if isinstance ( value , six . integer_types ) and value == 1 :
return True
elif ( isinstance ( value , six . string_types ) and re . match ( r'\d+' , value , flags = re . IGNORECASE + re . UNICODE ) is not None and six . text_type ( value ) == '1' ) :
return True
return False |
def batch_move_file ( path , dest , in_name = None , copy = True ) :
"""将path目录下文件名中包含的in _ name的文件移动到dest目录下
parameters
path 需要转移文件的目录
dest 目标目录
in _ name str , 默认是None 。 文件名过滤规则
example
# 将path目录下的所有txt文件转移到dest目录
move _ file ( path , dest , in _ name = ' . txt ' )
默认会覆盖目标目录下的同名文件""" | assert os . path . exists ( path ) , '%s 文件夹不存在' % path
assert os . path . exists ( dest ) , '%s 文件夹不存在' % dest
if copy :
mv = shutil . copy
else :
mv = shutil . move
if in_name is None :
file_list = os . listdir ( path )
else :
file_list = [ i for i in os . listdir ( path ) if in_name in i ]
if len ( file_list ) != 0 :
for file in file_list :
f = os . path . join ( path , file )
d = os . path . join ( dest , file )
mv ( f , d )
print ( '已转移:%s' % f )
else :
if in_name is None :
print ( '%s 中没有文件' % path )
else :
print ( '%s 中没有包含【 %s 】的文件' % ( path , in_name ) ) |
def wrap_call ( self , call_cmd ) :
"""" wraps " the call _ cmd so it can be executed by subprocess . call ( and related flavors ) as " args " argument
: param call _ cmd : original args like argument ( string or sequence )
: return : a sequence with the original command " executed " under trickle""" | if isinstance ( call_cmd , basestring ) : # FIXME python 3 unsafe
call_cmd = [ call_cmd ]
return [ self . _trickle_cmd , "-s" ] + self . _settings . to_argument_list ( ) + list ( call_cmd ) |
def put_object ( self , parent_object , connection_name , ** data ) :
"""Writes the given object to the graph , connected to the given parent .
For example ,
graph . put _ object ( " me " , " feed " , message = " Hello , world " )
writes " Hello , world " to the active user ' s wall . Likewise , this
will comment on the first post of the active user ' s feed :
feed = graph . get _ connections ( " me " , " feed " )
post = feed [ " data " ] [ 0]
graph . put _ object ( post [ " id " ] , " comments " , message = " First ! " )
Certain operations require extended permissions . See
https : / / developers . facebook . com / docs / facebook - login / permissions
for details about permissions .""" | assert self . access_token , "Write operations require an access token"
return self . request ( "{0}/{1}/{2}" . format ( self . version , parent_object , connection_name ) , post_args = data , method = "POST" , ) |
def registerAccount ( self , person , vendorSpecific = None ) :
"""See Also : registerAccountResponse ( )
Args :
person :
vendorSpecific :
Returns :""" | response = self . registerAccountResponse ( person , vendorSpecific )
return self . _read_boolean_response ( response ) |
def make_id ( ) :
'''Return a new unique ID for a Bokeh object .
Normally this function will return simple monotonically increasing integer
IDs ( as strings ) for identifying Bokeh objects within a Document . However ,
if it is desirable to have globally unique for every object , this behavior
can be overridden by setting the environment variable ` ` BOKEH _ SIMPLE _ IDS = no ` ` .
Returns :
str''' | global _simple_id
if settings . simple_ids ( True ) :
with _simple_id_lock :
_simple_id += 1
return str ( _simple_id )
else :
return make_globally_unique_id ( ) |
def finalize ( self ) :
"""Finalizes the pull request processing by updating the wiki page with
details , posting success / failure to the github pull request ' s commit .""" | # Determine the percentage success on the unit tests . Also see the total time for all
# the unit tests .
stotal = 0
ttotal = 0
for test in self . repo . testing . tests :
stotal += ( 1 if test [ "success" ] == True else 0 )
ttotal += ( test [ "end" ] - test [ "start" ] ) . seconds
self . percent = stotal / float ( len ( self . repo . testing . tests ) )
self . message = "Results: {0:.2%} in {1:d}s." . format ( self . percent , ttotal )
if not self . testmode :
if percent < 1 :
self . commit . create_status ( "failure" , self . url , self . message )
elif any ( [ test [ "code" ] == 1 for test in self . repo . testing . tests ] ) :
self . commit . create_status ( "pending" , self . url , self . message + " Slowdown reported." )
else :
self . commit . create_status ( "success" , self . url , self . message )
self . server . wiki . update ( self ) |
def get_list_class ( context , list ) :
"""Returns the class to use for the passed in list . We just build something up
from the object type for the list .""" | return "list_%s_%s" % ( list . model . _meta . app_label , list . model . _meta . model_name ) |
def __prepare_gprest_call ( self , requestURL , params = None , headers = None , restType = 'GET' , body = None ) :
"""Returns Authorization type and GP headers""" | if self . __serviceAccount . is_iam_enabled ( ) :
auth = None
iam_api_key_header = { self . __AUTHORIZATION_HEADER_KEY : str ( 'API-KEY ' + self . __serviceAccount . get_api_key ( ) ) }
if not headers is None :
headers . update ( iam_api_key_header )
else :
headers = iam_api_key_header
elif self . __auth == self . BASIC_AUTH :
auth = ( self . __serviceAccount . get_user_id ( ) , self . __serviceAccount . get_password ( ) )
elif self . __auth == self . HMAC_AUTH :
auth = None
# need to prepare url by appending params to the end
# before creating the hmac headers
fakeRequest = requests . PreparedRequest ( )
fakeRequest . prepare_url ( requestURL , params = params )
preparedUrl = fakeRequest . url
hmacHeaders = self . __get_gaas_hmac_headers ( method = restType , url = preparedUrl , body = body )
if not headers is None :
headers . update ( hmacHeaders )
else :
headers = hmacHeaders
return auth , headers |
def present ( name , ip , clean = False ) : # pylint : disable = C0103
'''Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr ( s ) to apply to the host . Can be a single IP or a list of IP
addresses .
clean : False
Remove any entries which don ' t match those configured in the ` ` ip ` `
option .
. . versionadded : : 2018.3.4''' | ret = { 'name' : name , 'changes' : { } , 'result' : None if __opts__ [ 'test' ] else True , 'comment' : '' }
if not isinstance ( ip , list ) :
ip = [ ip ]
all_hosts = __salt__ [ 'hosts.list_hosts' ] ( )
comments = [ ]
to_add = set ( )
to_remove = set ( )
# First check for IPs not currently in the hosts file
to_add . update ( [ ( addr , name ) for addr in ip if addr not in all_hosts ] )
# Now sweep through the hosts file and look for entries matching either the
# IP address ( es ) or hostname .
for addr , aliases in six . iteritems ( all_hosts ) :
if addr not in ip :
if name in aliases : # Found match for hostname , but the corresponding IP is not in
# our list , so we need to remove it .
if clean :
to_remove . add ( ( addr , name ) )
else :
ret . setdefault ( 'warnings' , [ ] ) . append ( 'Host {0} present for IP address {1}. To get rid of ' 'this warning, either run this state with \'clean\' ' 'set to True to remove {0} from {1}, or add {1} to ' 'the \'ip\' argument.' . format ( name , addr ) )
else :
if name in aliases : # No changes needed for this IP address and hostname
comments . append ( 'Host {0} ({1}) already present' . format ( name , addr ) )
else : # IP address listed in hosts file , but hostname is not present .
# We will need to add it .
if salt . utils . validate . net . ip_addr ( addr ) :
to_add . add ( ( addr , name ) )
else :
ret [ 'result' ] = False
comments . append ( 'Invalid IP Address for {0} ({1})' . format ( name , addr ) )
for addr , name in to_add :
if __opts__ [ 'test' ] :
comments . append ( 'Host {0} ({1}) would be added' . format ( name , addr ) )
else :
if __salt__ [ 'hosts.add_host' ] ( addr , name ) :
comments . append ( 'Added host {0} ({1})' . format ( name , addr ) )
else :
ret [ 'result' ] = False
comments . append ( 'Failed to add host {0} ({1})' . format ( name , addr ) )
continue
ret [ 'changes' ] . setdefault ( 'added' , { } ) . setdefault ( addr , [ ] ) . append ( name )
for addr , name in to_remove :
if __opts__ [ 'test' ] :
comments . append ( 'Host {0} ({1}) would be removed' . format ( name , addr ) )
else :
if __salt__ [ 'hosts.rm_host' ] ( addr , name ) :
comments . append ( 'Removed host {0} ({1})' . format ( name , addr ) )
else :
ret [ 'result' ] = False
comments . append ( 'Failed to remove host {0} ({1})' . format ( name , addr ) )
continue
ret [ 'changes' ] . setdefault ( 'removed' , { } ) . setdefault ( addr , [ ] ) . append ( name )
ret [ 'comment' ] = '\n' . join ( comments )
return ret |
def destroy_ebs_volume ( connection , region , volume_id , log = False ) :
"""destroys an ebs volume""" | if ebs_volume_exists ( connection , region , volume_id ) :
if log :
log_yellow ( 'destroying EBS volume ...' )
try :
connection . delete_volume ( volume_id )
except : # our EBS volume may be gone , but AWS info tables are stale
# wait a bit and ask again
sleep ( 5 )
if not ebs_volume_exists ( connection , region , volume_id ) :
pass
else :
raise ( "Couldn't delete EBS volume" ) |
def _parse_irc ( self ) :
"""Parse intrinsic reaction coordinate calculation .
returns a dictionary containing :
geometries : a list of Molecule instances representing each point in the IRC
energies : a list of total energies ( Hartree )
distances : distance from the starting point in mass - weighted coords ( bohr \ sqrt ( amu ) )""" | irc_geoms = sections ( re . escape ( "***** NEXT POINT ON IRC FOUND *****" ) , re . escape ( "INTERNUCLEAR DISTANCES (ANGS.)" ) , self . text )
# get and store the energy
energies = [ entry . splitlines ( ) [ 5 ] for entry in irc_geoms ]
# The total energy line
energies = [ float ( entry . split ( ) [ 3 ] ) for entry in energies ]
# get and store the distance
distances = [ entry . splitlines ( ) [ 4 ] for entry in irc_geoms ]
# The path distance line
distances = [ float ( entry . split ( ) [ 5 ] ) for entry in distances ]
# strip the garbage
irc_geoms = [ '\n' . join ( i . splitlines ( ) [ 11 : - 1 ] ) for i in irc_geoms ]
irc_geoms = [ self . _parse_geometry ( i ) for i in irc_geoms ]
return { "geometries" : irc_geoms , "energies" : energies , "distances" : distances } |
def process ( self , sessionRecord , message ) :
""": param sessionRecord :
: param message :
: type message : PreKeyWhisperMessage""" | messageVersion = message . getMessageVersion ( )
theirIdentityKey = message . getIdentityKey ( )
unsignedPreKeyId = None
if not self . identityKeyStore . isTrustedIdentity ( self . recipientId , theirIdentityKey ) :
raise UntrustedIdentityException ( self . recipientId , theirIdentityKey )
if messageVersion == 2 :
unsignedPreKeyId = self . processV2 ( sessionRecord , message )
elif messageVersion == 3 :
unsignedPreKeyId = self . processV3 ( sessionRecord , message )
else :
raise AssertionError ( "Unkown version %s" % messageVersion )
self . identityKeyStore . saveIdentity ( self . recipientId , theirIdentityKey )
return unsignedPreKeyId |
def _handle_parentof ( self , node , scope , ctxt , stream ) :
"""Handle the parentof unary operator
: node : TODO
: scope : TODO
: ctxt : TODO
: stream : TODO
: returns : TODO""" | # if someone does something like parentof ( this ) . blah ,
# we ' ll end up with a StructRef instead of an ID ref
# for node . expr , but we ' ll also end up with a structref
# if the user does parentof ( a . b . c ) . . .
# TODO how to differentiate between the two ? ?
# the proper way would be to do ( parentof ( a . b . c ) ) . a or
# ( parentof a . b . c ) . a
field = self . _handle_node ( node . expr , scope , ctxt , stream )
parent = field . _pfp__parent
return parent |
def replace ( self , text , to_template = '{name} ({url})' , from_template = None , name_matcher = Matcher ( looks_like_name ) , url_matcher = Matcher ( r'.*[^:]+$' ) ) :
"""Replace all occurrences of rendered from _ template in text with ` template ` rendered from each match . groupdict ( )
TODO : from _ template
> > > translator = HyperlinkStyleCorrector ( )
> > > adoc = ' See http : / / totalgood . com [ Total Good ] about that . '
> > > translator . replace ( adoc , ' { scheme _ type } s : / / ' , ' { scheme } : / / ' )
' See http : / / totalgood . com [ Total Good ] about that . '
> > > adoc = " Nada here : / / Only a . com & no ( parens . symbol ) or http / [ hyperlinks ] or anything ! "
> > > translator . translate ( adoc )
' Nada here : / / Only a . com & no ( parens . symbol ) or http / [ hyperlinks ] or anything ! '
> > > adoc = " Two http : / / what . com [ WAT ] with https : / / another . com / api ? q = 1 & a = 2 [ longer url ] . "
> > > translator . translate ( adoc )
' Two WAT ( http : / / what . com ) with longer url ( https : / / another . com / api ? q = 1 & a = 2 ) . '""" | self . name_matcher = name_matcher or Matcher ( )
self . url_matcher = url_matcher or Matcher ( )
matches = self . finditer ( text )
newdoc = copy ( text )
logger . debug ( 'before translate: {}' . format ( newdoc ) )
for m in matches : # this outer m . captures ( ) loop is overkill :
# overlapping pattern matches probably won ' t match after the first replace
logger . debug ( 'match: {}' . format ( m ) )
logger . debug ( 'match.captures(): {}' . format ( m . captures ( ) ) )
for i , captured_str in enumerate ( m . captures ( ) ) :
captureddict = { 'name' : None , 'scheme' : None , 'url' : None }
for k , v in m . capturesdict ( ) . items ( ) :
if len ( v ) > i :
captureddict [ k ] = v [ i ]
else :
captureddict [ k ] = None
logger . warning ( 'Overlapping captured matches were mishandled: {}' . format ( m . capturesdict ( ) ) )
# need to check for optional args :
name = captureddict . get ( 'name' , None )
url = captureddict . get ( 'url' , None )
scheme = captureddict . get ( 'scheme' , None )
if ( not scheme or not name or not self . name_matcher . ismatch ( name ) or not url or not self . url_matcher . ismatch ( url ) ) :
continue
if from_template :
rendered_from_template = from_template . format ( ** captureddict )
else :
rendered_from_template = captured_str
# TODO : render numbered references like r ' \ 1 ' before rendering named references
# or do them together in one ` . format ( * * kwargs ) ` after translating \ 1 to { 1 } and groupsdict ( ) . update ( { 1 : . . . } )
rendered_to_template = to_template . format ( ** m . groupdict ( ) )
newdoc = newdoc . replace ( rendered_from_template , rendered_to_template )
return newdoc |
def bad_syntax ( cls , syntax , resource_name , ex = None ) :
"""Exception used when the resource name cannot be parsed .""" | if ex :
msg = "The syntax is '%s' (%s)." % ( syntax , ex )
else :
msg = "The syntax is '%s'." % syntax
msg = "Could not parse '%s'. %s" % ( resource_name , msg )
return cls ( msg ) |
def CreateServer ( frontend = None ) :
"""Start frontend http server .""" | max_port = config . CONFIG . Get ( "Frontend.port_max" , config . CONFIG [ "Frontend.bind_port" ] )
for port in range ( config . CONFIG [ "Frontend.bind_port" ] , max_port + 1 ) :
server_address = ( config . CONFIG [ "Frontend.bind_address" ] , port )
try :
httpd = GRRHTTPServer ( server_address , GRRHTTPServerHandler , frontend = frontend )
break
except socket . error as e :
if e . errno == socket . errno . EADDRINUSE and port < max_port :
logging . info ( "Port %s in use, trying %s" , port , port + 1 )
else :
raise
sa = httpd . socket . getsockname ( )
logging . info ( "Serving HTTP on %s port %d ..." , sa [ 0 ] , sa [ 1 ] )
return httpd |
def and_filter_from_opts ( opts ) :
'''build an AND filter from the provided opts dict as passed to a command
from the filter _ options decorator . Assumes all dict values are lists of
filter dict constructs .''' | return filters . and_filter ( * list ( chain . from_iterable ( [ o for o in opts . values ( ) if o ] ) ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.