signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def unique_ordered ( list_ ) :
"""Returns unique items in ` ` list _ ` ` in the order they were seen .
Args :
list _ ( list ) :
Returns :
list : unique _ list - unique list which maintains order
CommandLine :
python - m utool . util _ list - - exec - unique _ ordered
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > list _ = [ 4 , 6 , 6 , 0 , 6 , 1 , 0 , 2 , 2 , 1]
> > > unique _ list = unique _ ordered ( list _ )
> > > result = ( ' unique _ list = % s ' % ( str ( unique _ list ) , ) )
> > > print ( result )
unique _ list = [ 4 , 6 , 0 , 1 , 2]"""
|
list_ = list ( list_ )
flag_list = flag_unique_items ( list_ )
unique_list = compress ( list_ , flag_list )
return unique_list
|
def add ( self , ipaddr = None , proto = None , port = None , fields = None ) :
"""Add a service record
: param ipaddr : IP Address
: param proto : Protocol ( tcp , udp , info )
: param port : Port ( 0-65535)
: param fields : Extra fields
: return : ( True / False , t _ services . id or response message )"""
|
return self . send . service_add ( ipaddr , proto , port , fields )
|
def add_node ( cls , cluster_id_label , parameters = None ) :
"""Add a node to an existing cluster"""
|
conn = Qubole . agent ( version = Cluster . api_version )
parameters = { } if not parameters else parameters
return conn . post ( cls . element_path ( cluster_id_label ) + "/nodes" , data = { "parameters" : parameters } )
|
def _startup ( cls ) :
"""Create Endpoint instances and manage automatic flags ."""
|
for endpoint_name in sorted ( hookenv . relation_types ( ) ) : # populate context based on attached relations
relf = relation_factory ( endpoint_name )
if not relf or not issubclass ( relf , cls ) :
continue
rids = sorted ( hookenv . relation_ids ( endpoint_name ) )
# ensure that relation IDs have the endpoint name prefix , in case
# juju decides to drop it at some point
rids = [ '{}:{}' . format ( endpoint_name , rid ) if ':' not in rid else rid for rid in rids ]
endpoint = relf ( endpoint_name , rids )
cls . _endpoints [ endpoint_name ] = endpoint
endpoint . register_triggers ( )
endpoint . _manage_departed ( )
endpoint . _manage_flags ( )
for relation in endpoint . relations :
hookenv . atexit ( relation . _flush_data )
|
def _default_request_kwargs ( self ) :
"""The default request keyword arguments to be passed to the requests library ."""
|
defaults = copy . deepcopy ( super ( Acls , self ) . _default_request_kwargs )
defaults . setdefault ( 'headers' , { } ) . update ( { 'X-Auth-Token' : self . _client . auth . _token } )
return defaults
|
def split_namespace ( self , tag ) :
r"""Split tag namespace .
: param tag : tag name
: return : a pair of ( namespace , tag )
: rtype : tuple"""
|
matchobj = self . __regex [ 'xml_ns' ] . search ( tag )
return matchobj . groups ( ) if matchobj else ( '' , tag )
|
def _is_statement_in_list ( new_stmt , old_stmt_list ) :
"""Return True of given statement is equivalent to on in a list
Determines whether the statement is equivalent to any statement in the
given list of statements , with equivalency determined by Statement ' s
equals method .
Parameters
new _ stmt : indra . statements . Statement
The statement to compare with
old _ stmt _ list : list [ indra . statements . Statement ]
The statement list whose entries we compare with statement
Returns
in _ list : bool
True if statement is equivalent to any statements in the list"""
|
for old_stmt in old_stmt_list :
if old_stmt . equals ( new_stmt ) :
return True
elif old_stmt . evidence_equals ( new_stmt ) and old_stmt . matches ( new_stmt ) : # If we ' re comparing a complex , make sure the agents are sorted .
if isinstance ( new_stmt , Complex ) :
agent_pairs = zip ( old_stmt . sorted_members ( ) , new_stmt . sorted_members ( ) )
else :
agent_pairs = zip ( old_stmt . agent_list ( ) , new_stmt . agent_list ( ) )
# Compare agent - by - agent .
for ag_old , ag_new in agent_pairs :
s_old = set ( ag_old . db_refs . items ( ) )
s_new = set ( ag_new . db_refs . items ( ) )
# If they ' re equal this isn ' t the one we ' re interested in .
if s_old == s_new :
continue
# If the new statement has nothing new to offer , just ignore it
if s_old > s_new :
return True
# If the new statement does have something new , add it to the
# existing statement . And then ignore it .
if s_new > s_old :
ag_old . db_refs . update ( ag_new . db_refs )
return True
# If this is a case where different CHEBI ids were mapped to
# the same entity , set the agent name to the CHEBI id .
if _fix_different_refs ( ag_old , ag_new , 'CHEBI' ) : # Check to make sure the newly described statement does
# not match anything .
return _is_statement_in_list ( new_stmt , old_stmt_list )
# If this is a case , like above , but with UMLS IDs , do the same
# thing as above . This will likely never be improved .
if _fix_different_refs ( ag_old , ag_new , 'UMLS' ) : # Check to make sure the newly described statement does
# not match anything .
return _is_statement_in_list ( new_stmt , old_stmt_list )
logger . warning ( "Found an unexpected kind of duplicate. " "Ignoring it." )
return True
# This means all the agents matched , which can happen if the
# original issue was the ordering of agents in a Complex .
return True
elif old_stmt . get_hash ( True , True ) == new_stmt . get_hash ( True , True ) : # Check to see if we can improve the annotation of the existing
# statement .
e_old = old_stmt . evidence [ 0 ]
e_new = new_stmt . evidence [ 0 ]
if e_old . annotations [ 'last_verb' ] is None :
e_old . annotations [ 'last_verb' ] = e_new . annotations [ 'last_verb' ]
# If the evidence is " the same " , modulo annotations , just ignore it
if e_old . get_source_hash ( True ) == e_new . get_source_hash ( True ) :
return True
return False
|
def policy_definition_absent ( name , connection_auth = None ) :
'''. . versionadded : : 2019.2.0
Ensure a policy definition does not exist in the current subscription .
: param name :
Name of the policy definition .
: param connection _ auth :
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API .'''
|
ret = { 'name' : name , 'result' : False , 'comment' : '' , 'changes' : { } }
if not isinstance ( connection_auth , dict ) :
ret [ 'comment' ] = 'Connection information must be specified via connection_auth dictionary!'
return ret
policy = __salt__ [ 'azurearm_resource.policy_definition_get' ] ( name , azurearm_log_level = 'info' , ** connection_auth )
if 'error' in policy :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Policy definition {0} is already absent.' . format ( name )
return ret
elif __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Policy definition {0} would be deleted.' . format ( name )
ret [ 'result' ] = None
ret [ 'changes' ] = { 'old' : policy , 'new' : { } , }
return ret
deleted = __salt__ [ 'azurearm_resource.policy_definition_delete' ] ( name , ** connection_auth )
if deleted :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Policy definition {0} has been deleted.' . format ( name )
ret [ 'changes' ] = { 'old' : policy , 'new' : { } }
return ret
ret [ 'comment' ] = 'Failed to delete policy definition {0}!' . format ( name )
return ret
|
def get_throttled_by_provisioned_read_event_percent ( table_name , gsi_name , lookback_window_start = 15 , lookback_period = 5 ) :
"""Returns the number of throttled read events in percent
: type table _ name : str
: param table _ name : Name of the DynamoDB table
: type gsi _ name : str
: param gsi _ name : Name of the GSI
: type lookback _ window _ start : int
: param lookback _ window _ start : Relative start time for the CloudWatch metric
: type lookback _ period : int
: param lookback _ period : Number of minutes to look at
: returns : float - - Percent of throttled read events by provisioning"""
|
try :
metrics = __get_aws_metric ( table_name , gsi_name , lookback_window_start , lookback_period , 'ReadThrottleEvents' )
except BotoServerError :
raise
if metrics :
lookback_seconds = lookback_period * 60
throttled_read_events = ( float ( metrics [ 0 ] [ 'Sum' ] ) / float ( lookback_seconds ) )
else :
throttled_read_events = 0
try :
gsi_read_units = dynamodb . get_provisioned_gsi_read_units ( table_name , gsi_name )
throttled_by_provisioned_read_percent = ( float ( throttled_read_events ) / float ( gsi_read_units ) * 100 )
except JSONResponseError :
raise
logger . info ( '{0} - GSI: {1} - Throttled read percent ' 'by provision: {2:.2f}%' . format ( table_name , gsi_name , throttled_by_provisioned_read_percent ) )
return throttled_by_provisioned_read_percent
|
def _unpack ( formatstring , packed ) :
"""Unpack a bytestring into a value .
Uses the built - in : mod : ` struct ` Python module .
Args :
* formatstring ( str ) : String for the packing . See the : mod : ` struct ` module for details .
* packed ( str ) : The bytestring to be unpacked .
Returns :
A value . The type depends on the formatstring .
Raises :
ValueError
Note that the : mod : ` struct ` module wants byte buffers for Python3,
but bytestrings for Python2 . This is compensated for automatically ."""
|
_checkString ( formatstring , description = 'formatstring' , minlength = 1 )
_checkString ( packed , description = 'packed string' , minlength = 1 )
if sys . version_info [ 0 ] > 2 :
packed = bytes ( packed , encoding = 'latin1' )
# Convert types to make it Python3 compatible
try :
value = struct . unpack ( formatstring , packed ) [ 0 ]
except :
errortext = 'The received bytestring is probably wrong, as the bytestring-to-num conversion failed.'
errortext += ' Bytestring: {0!r} Struct format code is: {1}'
raise ValueError ( errortext . format ( packed , formatstring ) )
return value
|
def user_info ( self , username ) :
"""Get info of a specific user .
: param username : the username of the user to get info about
: return :"""
|
request_url = "{}/api/0/user/{}" . format ( self . instance , username )
return_value = self . _call_api ( request_url )
return return_value
|
def _outliers ( self , x ) :
"""Compute number of outliers"""
|
outliers = self . _tukey ( x , threshold = 1.5 )
return np . size ( outliers )
|
def read_cz_sem ( fh , byteorder , dtype , count , offsetsize ) :
"""Read Zeiss SEM tag and return as dict .
See https : / / sourceforge . net / p / gwyddion / mailman / message / 29275000 / for
unnamed values ."""
|
result = { '' : ( ) }
key = None
data = bytes2str ( stripnull ( fh . read ( count ) ) )
for line in data . splitlines ( ) :
if line . isupper ( ) :
key = line . lower ( )
elif key :
try :
name , value = line . split ( '=' )
except ValueError :
try :
name , value = line . split ( ':' , 1 )
except Exception :
continue
value = value . strip ( )
unit = ''
try :
v , u = value . split ( )
number = astype ( v , ( int , float ) )
if number != v :
value = number
unit = u
except Exception :
number = astype ( value , ( int , float ) )
if number != value :
value = number
if value in ( 'No' , 'Off' ) :
value = False
elif value in ( 'Yes' , 'On' ) :
value = True
result [ key ] = ( name . strip ( ) , value )
if unit :
result [ key ] += ( unit , )
key = None
else :
result [ '' ] += ( astype ( line , ( int , float ) ) , )
return result
|
def state ( self ) :
"""Personsa state ( e . g . Online , Offline , Away , Busy , etc )
: rtype : : class : ` . EPersonaState `"""
|
state = self . get_ps ( 'persona_state' , False )
return EPersonaState ( state ) if state else EPersonaState . Offline
|
def _get_constrs_load_memory ( self , gadget ) :
"""Generate constraints for the LoadMemory gadgets : dst _ reg < - mem [ src _ reg + offset ]"""
|
dst = self . analyzer . get_register_expr ( gadget . destination [ 0 ] . name , mode = "post" )
size = gadget . destination [ 0 ] . size
if isinstance ( gadget . sources [ 0 ] , ReilRegisterOperand ) :
base_addr = self . analyzer . get_register_expr ( gadget . sources [ 0 ] . name , mode = "pre" )
offset = gadget . sources [ 1 ] . immediate
addr = base_addr + offset
else :
addr = gadget . sources [ 1 ] . immediate
constrs = [ ]
for i in reversed ( range ( 0 , size , 8 ) ) :
bytes_exprs_1 = self . analyzer . get_memory_expr ( addr + i // 8 , 8 // 8 )
bytes_exprs_2 = smtfunction . extract ( dst , i , 8 )
constrs += [ bytes_exprs_1 != bytes_exprs_2 ]
# Check all non - modified registers don ' t change .
constrs_mod = [ ]
for name in self . _arch_info . registers_gp_base :
if name not in [ r . name for r in gadget . modified_registers ] :
var_initial = self . analyzer . get_register_expr ( name , mode = "pre" )
var_final = self . analyzer . get_register_expr ( name , mode = "post" )
constrs_mod += [ var_initial != var_final ]
if constrs_mod :
constrs_mod = [ reduce ( lambda c , acc : acc | c , constrs_mod [ 1 : ] , constrs_mod [ 0 ] ) ]
return constrs + constrs_mod
|
def QA_util_get_real_datelist ( start , end ) :
"""取数据的真实区间 , 返回的时候用 start , end = QA _ util _ get _ real _ datelist
@ yutiansut
2017/8/10
当start end中间没有交易日 返回None , None
@ yutiansut / 2017-12-19"""
|
real_start = QA_util_get_real_date ( start , trade_date_sse , 1 )
real_end = QA_util_get_real_date ( end , trade_date_sse , - 1 )
if trade_date_sse . index ( real_start ) > trade_date_sse . index ( real_end ) :
return None , None
else :
return ( real_start , real_end )
|
def nx_transitive_reduction ( G , mode = 1 ) :
"""References :
https : / / en . wikipedia . org / wiki / Transitive _ reduction # Computing _ the _ reduction _ using _ the _ closure
http : / / dept - info . labri . fr / ~ thibault / tmp / 0201008 . pdf
http : / / stackoverflow . com / questions / 17078696 / transitive - reduction - of - directed - graph - in - python
CommandLine :
python - m utool . util _ graph nx _ transitive _ reduction - - show
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ graph import * # NOQA
> > > import utool as ut
> > > G = nx . DiGraph ( [ ( ' a ' , ' b ' ) , ( ' a ' , ' c ' ) , ( ' a ' , ' e ' ) ,
> > > ( ' a ' , ' d ' ) , ( ' b ' , ' d ' ) , ( ' c ' , ' e ' ) ,
> > > ( ' d ' , ' e ' ) , ( ' c ' , ' e ' ) , ( ' c ' , ' d ' ) ] )
> > > G = testdata _ graph ( ) [ 1]
> > > G _ tr = nx _ transitive _ reduction ( G , mode = 1)
> > > G _ tr2 = nx _ transitive _ reduction ( G , mode = 1)
> > > ut . quit _ if _ noshow ( )
> > > import plottool as pt
> > > G _ = nx . dag . transitive _ closure ( G )
> > > pt . show _ nx ( G , pnum = ( 1 , 5 , 1 ) , fnum = 1)
> > > pt . show _ nx ( G _ tr , pnum = ( 1 , 5 , 2 ) , fnum = 1)
> > > pt . show _ nx ( G _ tr2 , pnum = ( 1 , 5 , 3 ) , fnum = 1)
> > > pt . show _ nx ( G _ , pnum = ( 1 , 5 , 4 ) , fnum = 1)
> > > pt . show _ nx ( nx . dag . transitive _ closure ( G _ tr ) , pnum = ( 1 , 5 , 5 ) , fnum = 1)
> > > ut . show _ if _ requested ( )"""
|
import utool as ut
has_cycles = not nx . is_directed_acyclic_graph ( G )
if has_cycles : # FIXME : this does not work for cycle graphs .
# Need to do algorithm on SCCs
G_orig = G
G = nx . condensation ( G_orig )
nodes = list ( G . nodes ( ) )
node2_idx = ut . make_index_lookup ( nodes )
# For each node u , perform DFS consider its set of ( non - self ) children C .
# For each descendant v , of a node in C , remove any edge from u to v .
if mode == 1 :
G_tr = G . copy ( )
for parent in G_tr . nodes ( ) : # Remove self loops
if G_tr . has_edge ( parent , parent ) :
G_tr . remove_edge ( parent , parent )
# For each child of the parent
for child in list ( G_tr . successors ( parent ) ) : # Preorder nodes includes its argument ( no added complexity )
for gchild in list ( G_tr . successors ( child ) ) : # Remove all edges from parent to non - child descendants
for descendant in nx . dfs_preorder_nodes ( G_tr , gchild ) :
if G_tr . has_edge ( parent , descendant ) :
G_tr . remove_edge ( parent , descendant )
if has_cycles : # Uncondense graph
uncondensed_G_tr = G . __class__ ( )
mapping = G . graph [ 'mapping' ]
uncondensed_G_tr . add_nodes_from ( mapping . keys ( ) )
inv_mapping = ut . invert_dict ( mapping , unique_vals = False )
for u , v in G_tr . edges ( ) :
u_ = inv_mapping [ u ] [ 0 ]
v_ = inv_mapping [ v ] [ 0 ]
uncondensed_G_tr . add_edge ( u_ , v_ )
for key , path in inv_mapping . items ( ) :
if len ( path ) > 1 :
directed_cycle = list ( ut . itertwo ( path , wrap = True ) )
uncondensed_G_tr . add_edges_from ( directed_cycle )
G_tr = uncondensed_G_tr
else :
def make_adj_matrix ( G ) :
edges = list ( G . edges ( ) )
edge2_idx = ut . partial ( ut . dict_take , node2_idx )
uv_list = ut . lmap ( edge2_idx , edges )
A = np . zeros ( ( len ( nodes ) , len ( nodes ) ) )
A [ tuple ( np . array ( uv_list ) . T ) ] = 1
return A
G_ = nx . dag . transitive_closure ( G )
A = make_adj_matrix ( G )
B = make_adj_matrix ( G_ )
# AB = A * B
# AB = A . T . dot ( B )
AB = A . dot ( B )
# AB = A . dot ( B . T )
A_and_notAB = np . logical_and ( A , np . logical_not ( AB ) )
tr_uvs = np . where ( A_and_notAB )
# nodes = G . nodes ( )
edges = list ( zip ( * ut . unflat_take ( nodes , tr_uvs ) ) )
G_tr = G . __class__ ( )
G_tr . add_nodes_from ( nodes )
G_tr . add_edges_from ( edges )
if has_cycles : # Uncondense graph
uncondensed_G_tr = G . __class__ ( )
mapping = G . graph [ 'mapping' ]
uncondensed_G_tr . add_nodes_from ( mapping . keys ( ) )
inv_mapping = ut . invert_dict ( mapping , unique_vals = False )
for u , v in G_tr . edges ( ) :
u_ = inv_mapping [ u ] [ 0 ]
v_ = inv_mapping [ v ] [ 0 ]
uncondensed_G_tr . add_edge ( u_ , v_ )
for key , path in inv_mapping . items ( ) :
if len ( path ) > 1 :
directed_cycle = list ( ut . itertwo ( path , wrap = True ) )
uncondensed_G_tr . add_edges_from ( directed_cycle )
G_tr = uncondensed_G_tr
return G_tr
|
def check_environment ( provider_conf ) :
"""Check all ressources needed by Enos ."""
|
session = get_session ( )
image_id = check_glance ( session , provider_conf . image )
flavor_to_id , id_to_flavor = check_flavors ( session )
ext_net , network , subnet = check_network ( session , provider_conf . configure_network , provider_conf . network , subnet = provider_conf . subnet , dns_nameservers = provider_conf . dns_nameservers , allocation_pool = provider_conf . allocation_pool )
return { 'session' : session , 'image_id' : image_id , 'flavor_to_id' : flavor_to_id , 'id_to_flavor' : id_to_flavor , 'ext_net' : ext_net , 'network' : network , 'subnet' : subnet }
|
def parse_requirements ( file_ ) :
"""Parse a requirements formatted file .
Traverse a string until a delimiter is detected , then split at said
delimiter , get module name by element index , create a dict consisting of
module : version , and add dict to list of parsed modules .
Args :
file _ : File to parse .
Raises :
OSerror : If there ' s any issues accessing the file .
Returns :
tuple : The contents of the file , excluding comments ."""
|
modules = [ ]
delim = [ "<" , ">" , "=" , "!" , "~" ]
# https : / / www . python . org / dev / peps / pep - 0508 / # complete - grammar
try :
f = open_func ( file_ , "r" )
except OSError :
logging . error ( "Failed on file: {}" . format ( file_ ) )
raise
else :
data = [ x . strip ( ) for x in f . readlines ( ) if x != "\n" ]
finally :
f . close ( )
data = [ x for x in data if x [ 0 ] . isalpha ( ) ]
for x in data :
if not any ( [ y in x for y in delim ] ) : # Check for modules w / o a specifier .
modules . append ( { "name" : x , "version" : None } )
for y in x :
if y in delim :
module = x . split ( y )
module_name = module [ 0 ]
module_version = module [ - 1 ] . replace ( "=" , "" )
module = { "name" : module_name , "version" : module_version }
if module not in modules :
modules . append ( module )
break
return modules
|
def set_initial_state ( self , initial_state , initial_frames ) :
"""Sets the state that will be used on next reset ."""
|
self . env . set_initial_state ( initial_state , initial_frames )
self . _initial_frames = initial_frames
|
async def acquire_lease_async ( self , lease ) :
"""Acquire the lease on the desired partition for this EventProcessorHost .
Note that it is legal to acquire a lease that is already owned by another host .
Lease - stealing is how partitions are redistributed when additional hosts are started .
: param lease : The stored lease to be acquired .
: type lease : ~ azure . eventprocessorhost . lease . Lease
: return : ` True ` if the lease was acquired successfully , ` False ` if not .
: rtype : bool"""
|
retval = True
new_lease_id = str ( uuid . uuid4 ( ) )
partition_id = lease . partition_id
try :
if asyncio . iscoroutinefunction ( lease . state ) :
state = await lease . state ( )
else :
state = lease . state ( )
if state == "leased" :
if not lease . token : # We reach here in a race condition : when this instance of EventProcessorHost
# scanned the lease blobs , this partition was unowned ( token is empty ) but
# between then and now , another instance of EPH has established a lease
# ( getLeaseState ( ) is LEASED ) . We normally enforcethat we only steal the lease
# if it is still owned by the instance which owned it when we scanned , but we
# can ' t do that when we don ' t know who owns it . The safest thing to do is just
# fail the acquisition . If that means that one EPH instance gets more partitions
# than it should , rebalancing will take care of that quickly enough .
retval = False
else :
_logger . info ( "ChangingLease %r %r" , self . host . guid , lease . partition_id )
await self . host . loop . run_in_executor ( self . executor , functools . partial ( self . storage_client . change_blob_lease , self . lease_container_name , partition_id , lease . token , new_lease_id ) )
lease . token = new_lease_id
else :
_logger . info ( "AcquiringLease %r %r" , self . host . guid , lease . partition_id )
lease . token = await self . host . loop . run_in_executor ( self . executor , functools . partial ( self . storage_client . acquire_blob_lease , self . lease_container_name , partition_id , self . lease_duration , new_lease_id ) )
lease . owner = self . host . host_name
lease . increment_epoch ( )
# check if this solves the issue
retval = await self . update_lease_async ( lease )
except Exception as err : # pylint : disable = broad - except
_logger . error ( "Failed to acquire lease %r %r %r" , err , partition_id , lease . token )
return False
return retval
|
def is_escalable ( self , notification , escalations , timeperiods ) :
"""Check if a notification can be escalated .
Basically call is _ eligible for each escalation
: param notification : notification we would like to escalate
: type notification : alignak . objects . notification . Notification
: param escalations : Esclations objects , used to get escalation objects ( period )
: type escalations : alignak . objects . escalation . Escalations
: param timeperiods : Timeperiods objects , used to get escalation period
: type timeperiods : alignak . objects . timeperiod . Timeperiods
: return : True if notification can be escalated , otherwise False
: rtype : bool"""
|
cls = self . __class__
# We search since when we are in notification for escalations
# that are based on time
in_notif_time = time . time ( ) - notification . creation_time
# Check is an escalation match the current _ notification _ number
for escalation_id in self . escalations :
escalation = escalations [ escalation_id ]
escalation_period = timeperiods [ escalation . escalation_period ]
if escalation . is_eligible ( notification . t_to_go , self . state , notification . notif_nb , in_notif_time , cls . interval_length , escalation_period ) :
return True
return False
|
def is_purine ( nucleotide , allow_extended_nucleotides = False ) :
"""Is the nucleotide a purine"""
|
if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES :
raise ValueError ( "{} is a non-standard nucleotide, neither purine or pyrimidine" . format ( nucleotide ) )
return nucleotide in PURINE_NUCLEOTIDES
|
def triplifyGML ( fname = "foo.gml" , fpath = "./fb/" , scriptpath = None , uid = None , sid = None , extra_info = None ) :
"""Produce a linked data publication tree from a standard GML file .
INPUTS :
= > the file name ( fname , with path ) where the gdf file
of the friendship network is .
= > the final path ( fpath ) for the tree of files to be created .
= > a path to the script that is calling this function ( scriptpath ) .
= > the numeric id ( uid ) of the facebook user of which fname holds a friendship network
= > the numeric id ( sid ) of the facebook user of which fname holds a friendship network
OUTPUTS :
the tree in the directory fpath ."""
|
# aname = fname . split ( " / " ) [ - 1 ] . split ( " . " ) [ 0]
aname = fname . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ]
if "RonaldCosta" in fname :
aname = fname . split ( "/" ) [ - 1 ] . split ( "." ) [ 0 ]
name , day , month , year = re . findall ( ".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml" , fname ) [ 0 ]
datetime_snapshot = datetime . datetime ( * [ int ( i ) for i in ( year , month , day ) ] ) . isoformat ( ) . split ( "T" ) [ 0 ]
name_ = "Ronald Scherolt Costa"
elif "AntonioAnzoategui" in fname :
aname = re . findall ( ".*/([a-zA-Z]*\d*)" , fname ) [ 0 ]
name , year , month , day , hour , minute = re . findall ( r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*" , fname ) [ 0 ]
datetime_snapshot = datetime . datetime ( * [ int ( i ) for i in ( year , month , day , hour , minute ) ] ) . isoformat ( ) [ : - 3 ]
name_ = "Antônio Anzoategui Fabbri"
elif re . findall ( ".*/[a-zA-Z]*(\d)" , fname ) :
name , day , month , year = re . findall ( ".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml" , fname ) [ 0 ]
datetime_snapshot = datetime . datetime ( * [ int ( i ) for i in ( year , month , day ) ] ) . isoformat ( ) . split ( "T" ) [ 0 ]
name_ = " " . join ( re . findall ( "[A-Z][^A-Z]*" , name ) )
elif re . findall ( "[a-zA-Z]*_" , fname ) :
name , year , month , day , hour , minute = re . findall ( ".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml" , fname ) [ 0 ]
datetime_snapshot = datetime . datetime ( * [ int ( i ) for i in ( year , month , day , hour , minute ) ] ) . isoformat ( ) . split ( "T" ) [ 0 ]
name_ = " " . join ( re . findall ( "[A-Z][^A-Z]*" , name ) )
else :
name_ = " " . join ( re . findall ( "[A-Z][^A-Z]*" , name ) )
aname += "_fb"
name += "_fb"
c ( "started snapshot" , aname )
tg = P . rdf . makeBasicGraph ( [ [ "po" , "fb" ] , [ P . rdf . ns . per , P . rdf . ns . fb ] ] , "the {} facebook ego friendship network" )
tg2 = P . rdf . makeBasicGraph ( [ [ "po" , "fb" ] , [ P . rdf . ns . per , P . rdf . ns . fb ] ] , "RDF metadata for the facebook friendship network of my son" )
snapshot = P . rdf . IC ( [ tg2 ] , P . rdf . ns . po . FacebookSnapshot , aname , "Snapshot {}" . format ( aname ) )
extra_uri = extra_val = [ ]
if extra_info :
extra_uri = [ NS . po . extraInfo ]
extra_val = [ extra_info ]
P . rdf . link ( [ tg2 ] , snapshot , "Snapshot {}" . format ( aname ) , [ P . rdf . ns . po . createdAt , P . rdf . ns . po . triplifiedIn , P . rdf . ns . po . donatedBy , P . rdf . ns . po . availableAt , P . rdf . ns . po . originalFile , P . rdf . ns . po . onlineTranslateXMLFile , P . rdf . ns . po . onlineTranslateTTLFile , P . rdf . ns . po . translateXMLFile , P . rdf . ns . po . translateTTLFile , P . rdf . ns . po . onlineMetaXMLFile , P . rdf . ns . po . onlineMetaTTLFile , P . rdf . ns . po . metaXMLFilename , P . rdf . ns . po . metaTTLFilename , P . rdf . ns . po . acquiredThrough , P . rdf . ns . rdfs . comment , P . rdf . ns . fb . uid , P . rdf . ns . fb . sid ] + extra_uri , [ datetime_snapshot , datetime . datetime . now ( ) , name , "https://github.com/ttm/{}" . format ( aname ) , "https://raw.githubusercontent.com/ttm/{}/master/base/{}" . format ( aname , fname . split ( "/" ) [ - 1 ] ) , "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf" . format ( aname , aname ) , "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl" . format ( aname , aname ) , "{}Translate.rdf" . format ( aname ) , "{}Translate.ttl" . format ( aname ) , "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf" . format ( aname , aname ) , "https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl" . format ( aname , aname ) , "{}Meta.owl" . format ( aname ) , "{}Meta.ttl" . format ( aname ) , "Netvizz" , "The facebook friendship network from {}" . format ( name_ ) , uid , sid ] + extra_val )
# for friend _ attr in fg2 [ " friends " ] :
c ( ( aname , name_ , datetime_snapshot ) )
fg2 = x . read_gml ( fname )
c ( "read gml" )
for uid in fg2 :
c ( uid )
ind = P . rdf . IC ( [ tg ] , P . rdf . ns . fb . Participant , "{}-{}" . format ( aname , uid ) )
if "locale" in fg2 . node [ uid ] . keys ( ) :
data = [ fg2 . node [ uid ] [ attr ] for attr in ( "id" , "label" , "locale" , "sex" , "agerank" , "wallcount" ) ]
uris = [ NS . fb . gid , NS . fb . name , NS . fb . locale , NS . fb . sex , NS . fb . agerank , NS . fb . wallcount ]
else :
data = [ fg2 . node [ uid ] [ attr ] for attr in ( "id" , "label" , "sex" , "agerank" , "wallcount" ) ]
uris = [ NS . fb . gid , NS . fb . name , NS . fb . sex , NS . fb . agerank , NS . fb . wallcount ]
P . rdf . link ( [ tg ] , ind , None , uris , data , draw = False )
P . rdf . link_ ( [ tg ] , ind , None , [ NS . po . snapshot ] , [ snapshot ] , draw = False )
# friends _ = [ fg2 [ " friends " ] [ i ] for i in ( " name " , " label " , " locale " , " sex " , " agerank " ) ]
# for name , label , locale , sex , agerank in zip ( * friends _ ) :
# ind = P . rdf . IC ( [ tg ] , P . rdf . ns . fb . Participant , name , label )
# P . rdf . link ( [ tg ] , ind , label , [ P . rdf . ns . fb . uid , P . rdf . ns . fb . name ,
# P . rdf . ns . fb . locale , P . rdf . ns . fb . sex ,
# P . rdf . ns . fb . agerank ] ,
# [ name , label , locale , sex , agerank ] )
c ( "escritos participantes" )
# friendships _ = [ fg2 [ " friendships " ] [ i ] for i in ( " node1 " , " node2 " ) ]
i = 1
for uid1 , uid2 in fg2 . edges ( ) :
flabel = "{}-{}-{}" . format ( aname , uid1 , uid2 )
ind = P . rdf . IC ( [ tg ] , P . rdf . ns . fb . Friendship , flabel )
uids = [ P . rdf . IC ( None , P . rdf . ns . fb . Participant , "{}-{}" . format ( aname , i ) ) for i in ( uid1 , uid2 ) ]
P . rdf . link_ ( [ tg ] , ind , flabel , [ NS . po . snapshot ] + [ NS . fb . member ] * 2 , [ snapshot ] + uids , draw = False )
P . rdf . L_ ( [ tg ] , uids [ 0 ] , P . rdf . ns . fb . friend , uids [ 1 ] )
if ( i % 1000 ) == 0 :
c ( i )
i += 1
c ( "escritas amizades" )
tg_ = [ tg [ 0 ] + tg2 [ 0 ] , tg [ 1 ] ]
fpath_ = "{}/{}/" . format ( fpath , aname )
P . rdf . writeAll ( tg_ , aname + "Translate" , fpath_ , False , 1 )
# copia o script que gera este codigo
if not os . path . isdir ( fpath_ + "scripts" ) :
os . mkdir ( fpath_ + "scripts" )
# shutil . copy ( this _ dir + " / . . / tests / rdfMyFNetwork2 . py " , fpath + " scripts / " )
shutil . copy ( scriptpath , fpath_ + "scripts/" )
# copia do base data
if not os . path . isdir ( fpath_ + "base" ) :
os . mkdir ( fpath_ + "base" )
shutil . copy ( fname , fpath_ + "base/" )
P . rdf . writeAll ( tg2 , aname + "Meta" , fpath_ , False )
# faz um README
with open ( fpath_ + "README" , "w" ) as f :
f . write ( """This repo delivers RDF data from the facebook
friendship network of {} ({}) collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""" . format ( name_ , aname , datetime_snapshot , fg2 . number_of_nodes ( ) , "name, locale (maybe), sex, agerank and wallcount" , fg2 . number_of_edges ( ) ) )
|
def venv_resolve_deps ( deps , which , project , pre = False , clear = False , allow_global = False , pypi_mirror = None , dev = False , pipfile = None , lockfile = None , keep_outdated = False ) :
"""Resolve dependencies for a pipenv project , acts as a portal to the target environment .
Regardless of whether a virtual environment is present or not , this will spawn
a subproces which is isolated to the target environment and which will perform
dependency resolution . This function reads the output of that call and mutates
the provided lockfile accordingly , returning nothing .
: param List [ : class : ` ~ requirementslib . Requirement ` ] deps : A list of dependencies to resolve .
: param Callable which : [ description ]
: param project : The pipenv Project instance to use during resolution
: param Optional [ bool ] pre : Whether to resolve pre - release candidates , defaults to False
: param Optional [ bool ] clear : Whether to clear the cache during resolution , defaults to False
: param Optional [ bool ] allow _ global : Whether to use * sys . executable * as the python binary , defaults to False
: param Optional [ str ] pypi _ mirror : A URL to substitute any time * pypi . org * is encountered , defaults to None
: param Optional [ bool ] dev : Whether to target * dev - packages * or not , defaults to False
: param pipfile : A Pipfile section to operate on , defaults to None
: type pipfile : Optional [ Dict [ str , Union [ str , Dict [ str , bool , List [ str ] ] ] ] ]
: param Dict [ str , Any ] lockfile : A project lockfile to mutate , defaults to None
: param bool keep _ outdated : Whether to retain outdated dependencies and resolve with them in mind , defaults to False
: raises RuntimeError : Raised on resolution failure
: return : Nothing
: rtype : None"""
|
from . vendor . vistir . misc import fs_str
from . vendor . vistir . compat import Path , JSONDecodeError , NamedTemporaryFile
from . vendor . vistir . path import create_tracked_tempdir
from . import resolver
from . _compat import decode_for_output
import json
results = [ ]
pipfile_section = "dev-packages" if dev else "packages"
lockfile_section = "develop" if dev else "default"
if not deps :
if not project . pipfile_exists :
return None
deps = project . parsed_pipfile . get ( pipfile_section , { } )
if not deps :
return None
if not pipfile :
pipfile = getattr ( project , pipfile_section , { } )
if not lockfile :
lockfile = project . _lockfile
req_dir = create_tracked_tempdir ( prefix = "pipenv" , suffix = "requirements" )
cmd = [ which ( "python" , allow_global = allow_global ) , Path ( resolver . __file__ . rstrip ( "co" ) ) . as_posix ( ) ]
if pre :
cmd . append ( "--pre" )
if clear :
cmd . append ( "--clear" )
if allow_global :
cmd . append ( "--system" )
if dev :
cmd . append ( "--dev" )
target_file = NamedTemporaryFile ( prefix = "resolver" , suffix = ".json" , delete = False )
target_file . close ( )
cmd . extend ( [ "--write" , make_posix ( target_file . name ) ] )
with temp_environ ( ) :
os . environ . update ( { fs_str ( k ) : fs_str ( val ) for k , val in os . environ . items ( ) } )
if pypi_mirror :
os . environ [ "PIPENV_PYPI_MIRROR" ] = str ( pypi_mirror )
os . environ [ "PIPENV_VERBOSITY" ] = str ( environments . PIPENV_VERBOSITY )
os . environ [ "PIPENV_REQ_DIR" ] = fs_str ( req_dir )
os . environ [ "PIP_NO_INPUT" ] = fs_str ( "1" )
os . environ [ "PIPENV_SITE_DIR" ] = get_pipenv_sitedir ( )
if keep_outdated :
os . environ [ "PIPENV_KEEP_OUTDATED" ] = fs_str ( "1" )
with create_spinner ( text = decode_for_output ( "Locking..." ) ) as sp : # This conversion is somewhat slow on local and file - type requirements since
# we now download those requirements / make temporary folders to perform
# dependency resolution on them , so we are including this step inside the
# spinner context manager for the UX improvement
sp . write ( decode_for_output ( "Building requirements..." ) )
deps = convert_deps_to_pip ( deps , project , r = False , include_index = True )
constraints = set ( deps )
os . environ [ "PIPENV_PACKAGES" ] = str ( "\n" . join ( constraints ) )
sp . write ( decode_for_output ( "Resolving dependencies..." ) )
c = resolve ( cmd , sp )
results = c . out . strip ( )
sp . green . ok ( environments . PIPENV_SPINNER_OK_TEXT . format ( "Success!" ) )
try :
with open ( target_file . name , "r" ) as fh :
results = json . load ( fh )
except ( IndexError , JSONDecodeError ) :
click_echo ( c . out . strip ( ) , err = True )
click_echo ( c . err . strip ( ) , err = True )
if os . path . exists ( target_file . name ) :
os . unlink ( target_file . name )
raise RuntimeError ( "There was a problem with locking." )
if os . path . exists ( target_file . name ) :
os . unlink ( target_file . name )
if environments . is_verbose ( ) :
click_echo ( results , err = True )
if lockfile_section not in lockfile :
lockfile [ lockfile_section ] = { }
prepare_lockfile ( results , pipfile , lockfile [ lockfile_section ] )
|
def _generate_field_with_default ( ** kwargs ) :
"""Only called if field . default ! = NOT _ PROVIDED"""
|
field = kwargs [ 'field' ]
if callable ( field . default ) :
return field . default ( )
return field . default
|
def _populate_profile_flags_from_dn_regex ( self , profile ) :
"""Populate the given profile object flags from AUTH _ LDAP _ PROFILE _ FLAGS _ BY _ DN _ REGEX .
Returns True if the profile was modified"""
|
save_profile = True
for field , regex in self . settings . PROFILE_FLAGS_BY_DN_REGEX . items ( ) :
field_value = False
if re . search ( regex , self . _get_user_dn ( ) , re . IGNORECASE ) :
field_value = True
setattr ( profile , field , field_value )
save_profile = True
return save_profile
|
def replace_body_vars ( self , body ) :
"""Given a multiline string that is the body of the job script , replace
the placeholders for environment variables with backend - specific
realizations , and return the modified body . See the ` job _ vars `
attribute for the mappings that are performed ."""
|
for key , val in self . job_vars . items ( ) :
body = body . replace ( key , val )
return body
|
def compare_ecp_pots ( potential1 , potential2 , compare_meta = False , rel_tol = 0.0 ) :
'''Compare two ecp potentials for approximate equality
( exponents / coefficients are within a tolerance )
If compare _ meta is True , the metadata is also compared for exact equality .'''
|
if potential1 [ 'angular_momentum' ] != potential2 [ 'angular_momentum' ] :
return False
rexponents1 = potential1 [ 'r_exponents' ]
rexponents2 = potential2 [ 'r_exponents' ]
gexponents1 = potential1 [ 'gaussian_exponents' ]
gexponents2 = potential2 [ 'gaussian_exponents' ]
coefficients1 = potential1 [ 'coefficients' ]
coefficients2 = potential2 [ 'coefficients' ]
# integer comparison
if rexponents1 != rexponents2 :
return False
if not _compare_vector ( gexponents1 , gexponents2 , rel_tol ) :
return False
if not _compare_matrix ( coefficients1 , coefficients2 , rel_tol ) :
return False
if compare_meta :
if potential1 [ 'ecp_type' ] != potential2 [ 'ecp_type' ] :
return False
return True
else :
return True
|
def main ( ) :
"""NAME
di _ eq . py
DESCRIPTION
converts dec , inc pairs to x , y pairs using equal area projection
NB : do only upper or lower hemisphere at a time : does not distinguish between up and down .
SYNTAX
di _ eq . py [ command line options ] [ < filename ]
OPTIONS
- h prints help message and quits
- f FILE , input file"""
|
out = ""
UP = 0
if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
if '-f' in sys . argv :
ind = sys . argv . index ( '-f' )
file = sys . argv [ ind + 1 ]
DI = numpy . loadtxt ( file , dtype = numpy . float )
else :
DI = numpy . loadtxt ( sys . stdin , dtype = numpy . float )
# read from standard input
Ds = DI . transpose ( ) [ 0 ]
Is = DI . transpose ( ) [ 1 ]
if len ( DI ) > 1 : # array of data
XY = pmag . dimap_V ( Ds , Is )
for xy in XY :
print ( '%f %f' % ( xy [ 0 ] , xy [ 1 ] ) )
else : # single data point
XY = pmag . dimap ( Ds , Is )
print ( '%f %f' % ( XY [ 0 ] , XY [ 1 ] ) )
|
def _import ( self ) :
"""Makes imports
: return :"""
|
import os . path
import gspread
self . path = os . path
self . gspread = gspread
self . _login ( )
|
def reduce ( self , values , inplace = True ) :
"""Reduces the factor to the context of the given variable values .
Parameters
values : list , array - like
A list of tuples of the form ( variable _ name , variable _ value ) .
inplace : boolean
If inplace = True it will modify the factor itself , else would return
a new ContinuosFactor object .
Returns
ContinuousFactor or None : if inplace = True ( default ) returns None
if inplace = False returns a new ContinuousFactor instance .
Examples
> > > import numpy as np
> > > from scipy . special import beta
> > > from pgmpy . factors . continuous import ContinuousFactor
> > > def custom _ pdf ( x , y , z ) :
. . . return z * ( np . power ( x , 1 ) * np . power ( y , 2 ) ) / beta ( x , y )
> > > custom _ factor = ContinuousFactor ( [ ' x ' , ' y ' , ' z ' ] , custom _ pdf )
> > > custom _ factor . variables
[ ' x ' , ' y ' , ' z ' ]
> > > custom _ factor . assignment ( 1 , 2 , 3)
24.0
> > > custom _ factor . reduce ( [ ( ' y ' , 2 ) ] )
> > > custom _ factor . variables
[ ' x ' , ' z ' ]
> > > custom _ factor . assignment ( 1 , 3)
24.0"""
|
phi = self if inplace else self . copy ( )
phi . distribution = phi . distribution . reduce ( values , inplace = False )
if not inplace :
return phi
|
def hidden_from ( self , a , b ) :
"""Return True if ` ` a ` ` is hidden in a different box than ` ` b ` ` ."""
|
return a in self . hidden_indices and not self . in_same_box ( a , b )
|
def get_sensor_data ( ** kwargs ) :
'''Get sensor readings
Iterates sensor reading objects
: param kwargs :
- api _ host = 127.0.0.1
- api _ user = admin
- api _ pass = example
- api _ port = 623
- api _ kg = None
CLI Example :
. . code - block : : bash
salt - call ipmi . get _ sensor _ data api _ host = 127.0.0.1 api _ user = admin api _ pass = pass'''
|
import ast
with _IpmiCommand ( ** kwargs ) as s :
data = { }
for reading in s . get_sensor_data ( ) :
if reading :
r = ast . literal_eval ( repr ( reading ) )
data [ r . pop ( 'name' ) ] = r
return data
|
def calculate_bucket_level ( k , b ) :
"""Calculate the level in which a 0 - based bucket
lives inside of a k - ary heap ."""
|
assert k >= 2
if k == 2 :
return log2floor ( b + 1 )
v = ( k - 1 ) * ( b + 1 ) + 1
h = 0
while k ** ( h + 1 ) < v :
h += 1
return h
|
def reciprocal_remove ( self ) :
"""Removes results rows for which the n - gram is not present in
at least one text in each labelled set of texts ."""
|
self . _logger . info ( 'Removing n-grams that are not attested in all labels' )
self . _matches = self . _reciprocal_remove ( self . _matches )
|
def virtual ( opts , virtualname , filename ) :
'''Returns the _ _ virtual _ _ .'''
|
if ( ( HAS_NAPALM and NAPALM_MAJOR >= 2 ) or HAS_NAPALM_BASE ) and ( is_proxy ( opts ) or is_minion ( opts ) ) :
return virtualname
else :
return ( False , ( '"{vname}"" {filename} cannot be loaded: ' 'NAPALM is not installed: ``pip install napalm``' ) . format ( vname = virtualname , filename = '({filename})' . format ( filename = filename ) ) )
|
def get_storage_pools ( self , id_or_uri ) :
"""Gets a list of Storage pools . Returns a list of storage pools belonging to the storage system referred by the
Path property { ID } parameter or URI .
Args :
id _ or _ uri : Can be either the storage system ID ( serial number ) or the storage system URI .
Returns :
dict : Host types ."""
|
uri = self . _client . build_uri ( id_or_uri ) + "/storage-pools"
return self . _client . get ( uri )
|
def check_covariance_Kgrad_x ( covar , relchange = 1E-5 , threshold = 1E-2 , check_diag = True ) :
"""check _ covariance _ Kgrad _ x ( ACovarianceFunction covar , limix : : mfloat _ t relchange = 1E - 5 , limix : : mfloat _ t threshold = 1E - 2 , bool check _ diag = True ) - > bool
Parameters
covar : limix : : ACovarianceFunction &
relchange : limix : : mfloat _ t
threshold : limix : : mfloat _ t
check _ diag : bool
check _ covariance _ Kgrad _ x ( ACovarianceFunction covar , limix : : mfloat _ t relchange = 1E - 5 , limix : : mfloat _ t threshold = 1E - 2 ) - > bool
Parameters
covar : limix : : ACovarianceFunction &
relchange : limix : : mfloat _ t
threshold : limix : : mfloat _ t
check _ covariance _ Kgrad _ x ( ACovarianceFunction covar , limix : : mfloat _ t relchange = 1E - 5 ) - > bool
Parameters
covar : limix : : ACovarianceFunction &
relchange : limix : : mfloat _ t
check _ covariance _ Kgrad _ x ( ACovarianceFunction covar ) - > bool
Parameters
covar : limix : : ACovarianceFunction &"""
|
return _core . ACovarianceFunction_check_covariance_Kgrad_x ( covar , relchange , threshold , check_diag )
|
def _py2round ( x ) :
"""This function returns a rounded up value of the argument , similar
to Python 2."""
|
if hasattr ( x , '__iter__' ) :
rx = np . empty_like ( x )
m = x >= 0.0
rx [ m ] = np . floor ( x [ m ] + 0.5 )
m = np . logical_not ( m )
rx [ m ] = np . ceil ( x [ m ] - 0.5 )
return rx
else :
if x >= 0.0 :
return np . floor ( x + 0.5 )
else :
return np . ceil ( x - 0.5 )
|
def _check_spades_log_file ( logfile ) :
'''SPAdes can fail with a strange error . Stop everything if this happens'''
|
f = pyfastaq . utils . open_file_read ( logfile )
for line in f :
if line . startswith ( '== Error == system call for:' ) and line . rstrip ( ) . endswith ( 'finished abnormally, err code: -7' ) :
pyfastaq . utils . close ( f )
print ( 'Error running SPAdes. Cannot continue. This is the error from the log file' , logfile , '...' , file = sys . stderr )
print ( line , file = sys . stderr )
raise Error ( 'Fatal error ("err code: -7") running spades. Cannot continue' )
pyfastaq . utils . close ( f )
return True
|
def fill_subparser ( subparser ) :
"""Sets up a subparser to convert the ILSVRC2012 dataset files .
Parameters
subparser : : class : ` argparse . ArgumentParser `
Subparser handling the ` ilsvrc2012 ` command ."""
|
subparser . add_argument ( "--shuffle-seed" , help = "Seed to use for randomizing order of the " "training set on disk." , default = config . default_seed , type = int , required = False )
return convert_ilsvrc2012
|
def set_value ( self , value : str ) :
"""Sets the displayed digits based on the value string .
: param value : a string containing an integer or float value
: return : None"""
|
[ digit . clear ( ) for digit in self . _digits ]
grouped = self . _group ( value )
# return the parts , reversed
digits = self . _digits [ : : - 1 ]
# reverse the digits
# fill from right to left
has_period = False
for i , digit_value in enumerate ( grouped ) :
try :
if has_period :
digits [ i ] . set_value ( digit_value + '.' )
has_period = False
elif grouped [ i ] == '.' :
has_period = True
else :
digits [ i ] . set_value ( digit_value )
except IndexError :
raise ValueError ( 'the value "{}" contains too ' 'many digits' . format ( value ) )
|
def resolve ( self , uri = None , ** parts ) :
"""Attempt to resolve a new URI given an updated URI , partial or complete ."""
|
if uri :
result = self . __class__ ( urljoin ( str ( self ) , str ( uri ) ) )
else :
result = self . __class__ ( self )
for part , value in parts . items ( ) :
if part not in self . __all_parts__ :
raise TypeError ( "Unknown URI component: " + part )
setattr ( result , part , value )
return result
|
def disable ( cls , user_id , github_id , name ) :
"""Disable webhooks for a repository .
Disables the webhook from a repository if it exists in the DB .
: param user _ id : User identifier .
: param repo _ id : GitHub id of the repository .
: param name : Fully qualified name of the repository ."""
|
repo = cls . get ( user_id , github_id = github_id , name = name )
repo . hook = None
repo . user_id = None
return repo
|
def select_default ( self ) :
"""Resets the combo box to the original " selected " value from the
constructor ( or the first value if no selected value was specified ) ."""
|
if self . _default is None :
if not self . _set_option_by_index ( 0 ) :
utils . error_format ( self . description + "\n" + "Unable to select default option as the Combo is empty" )
else :
if not self . _set_option ( self . _default ) :
utils . error_format ( self . description + "\n" + "Unable to select default option as it doesnt exist in the Combo" )
|
def _GetUncompressedStreamSize ( self ) :
"""Retrieves the uncompressed stream size .
Returns :
int : uncompressed stream size ."""
|
self . _file_object . seek ( 0 , os . SEEK_SET )
self . _decompressor = self . _GetDecompressor ( )
self . _uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self . _file_object . get_size ( )
uncompressed_stream_size = 0
while compressed_data_offset < compressed_data_size :
read_count = self . _ReadCompressedData ( self . _COMPRESSED_DATA_BUFFER_SIZE )
if read_count == 0 :
break
compressed_data_offset += read_count
uncompressed_stream_size += self . _uncompressed_data_size
return uncompressed_stream_size
|
def mod_liu ( q , w ) :
r"""Joint significance of statistics derived from chi2 - squared distributions .
Parameters
q : float
Test statistics .
w : array _ like
Weights of the linear combination .
Returns
float
Estimated p - value ."""
|
q = asarray ( q , float )
if not all ( isfinite ( atleast_1d ( q ) ) ) :
raise ValueError ( "There are non-finite values in `q`." )
w = asarray ( w , float )
if not all ( isfinite ( atleast_1d ( w ) ) ) :
raise ValueError ( "There are non-finite values in `w`." )
d = sum ( w )
w /= d
c1 = sum ( w )
c2 = sum ( w ** 2 )
c3 = sum ( w ** 3 )
c4 = sum ( w ** 4 )
s1 = c3 / ( c2 ** ( 3 / 2 ) )
s2 = c4 / c2 ** 2
muQ = c1
sigmaQ = sqrt ( 2 * c2 )
if s1 ** 2 > s2 :
a = 1 / ( s1 - sqrt ( s1 ** 2 - s2 ) )
delta = s1 * a ** 3 - a ** 2
l = a ** 2 - 2 * delta
if l < 0 :
raise RuntimeError ( "This term cannot be negative." )
else :
delta = 0
l = 1 / s2
a = sqrt ( l )
Q_norm = ( q / d - muQ ) / sigmaQ * sqrt ( 2 * l ) + l
Qq = atleast_1d ( chi2 ( df = l ) . sf ( Q_norm ) ) [ 0 ]
return ( Qq , muQ * d , sigmaQ * d , l )
|
def mk_definition ( defn ) :
"""Instantiates a struct or SBP message specification from a parsed
" AST " of a struct or message .
Parameters
defn : dict
Returns
A Definition or a specialization of a definition , like a Struct"""
|
assert len ( defn ) == 1
identifier , contents = next ( iter ( defn . items ( ) ) )
fs = [ mk_field ( f ) for f in contents . get ( 'fields' , [ ] ) ]
return sbp . resolve_type ( sbp . Definition ( identifier = identifier , sbp_id = contents . get ( 'id' , None ) , short_desc = contents . get ( 'short_desc' , None ) , desc = contents . get ( 'desc' , None ) , type_id = contents . get ( 'type' ) , fields = fs , public = contents . get ( 'public' , True ) ) )
|
def validate_depth ( self , depth : DepthDefinitionType ) -> Optional [ int ] :
"""Converts the depth to int and validates that the value can be used .
: raise ValueError : If the provided depth is not valid"""
|
if depth is not None :
try :
depth = int ( depth )
except ValueError :
raise ValueError ( f"Depth '{depth}' can't be converted to int." )
if depth < 1 :
raise ValueError ( f"Depth '{depth}' isn't a positive number" )
return depth
return None
|
def get_data_by_time ( path , columns , dates , start_time = '00:00' , end_time = '23:59' ) :
"""Extract columns of data from a ProCoDA datalog based on date ( s ) and time ( s )
Note : Column 0 is time . The first data column is column 1.
: param path : The path to the folder containing the ProCoDA data file ( s )
: type path : string
: param columns : A single index of a column OR a list of indices of columns of data to extract .
: type columns : int or int list
: param dates : A single date or list of dates for which data was recorded , formatted " M - D - YYYY "
: type dates : string or string list
: param start _ time : Starting time of data to extract , formatted ' HH : MM ' ( 24 - hour time )
: type start _ time : string , optional
: param end _ time : Ending time of data to extract , formatted ' HH : MM ' ( 24 - hour time )
: type end _ time : string , optional
: return : a list containing the single column of data to extract , OR a list of lists containing the columns to extract , in order of the indices given in the columns variable
: rtype : list or list list
: Examples :
. . code - block : : python
data = get _ data _ by _ time ( path = ' / Users / . . . / ProCoDA Data / ' , columns = 4 , dates = [ ' 6-14-2018 ' , ' 6-15-2018 ' ] , start _ time = ' 12:20 ' , end _ time = ' 10:50 ' )
data = get _ data _ by _ time ( path = ' / Users / . . . / ProCoDA Data / ' , columns = [ 0,4 ] , dates = ' 6-14-2018 ' , start _ time = ' 12:20 ' , end _ time = ' 23:59 ' )
data = get _ data _ by _ time ( path = ' / Users / . . . / ProCoDA Data / ' , columns = [ 0,3,4 ] , dates = ' 6-14-2018 ' )"""
|
data = data_from_dates ( path , dates )
first_time_column = pd . to_numeric ( data [ 0 ] . iloc [ : , 0 ] )
start = max ( day_fraction ( start_time ) , first_time_column [ 0 ] )
start_idx = time_column_index ( start , first_time_column )
end_idx = time_column_index ( day_fraction ( end_time ) , pd . to_numeric ( data [ - 1 ] . iloc [ : , 0 ] ) ) + 1
if isinstance ( columns , int ) :
return column_start_to_end ( data , columns , start_idx , end_idx )
else :
result = [ ]
for c in columns :
result . append ( column_start_to_end ( data , c , start_idx , end_idx ) )
return result
|
def get_link ( self , peer ) :
"""Retrieves the link to the given peer"""
|
for access in peer . accesses :
if access . type == 'mqtt' :
break
else : # No MQTT access found
return None
# Get server access tuple
server = ( access . server . host , access . server . port )
with self . __lock :
try : # Get existing link
return self . _links [ server ]
except KeyError : # Create a new link
link = self . _links [ server ] = MQTTLink ( access )
return link
|
def QueryService ( svc_name ) :
"""Query service and get its config ."""
|
hscm = win32service . OpenSCManager ( None , None , win32service . SC_MANAGER_ALL_ACCESS )
result = None
try :
hs = win32serviceutil . SmartOpenService ( hscm , svc_name , win32service . SERVICE_ALL_ACCESS )
result = win32service . QueryServiceConfig ( hs )
win32service . CloseServiceHandle ( hs )
finally :
win32service . CloseServiceHandle ( hscm )
return result
|
def default_callback ( self , text ) :
"""Default clean the \\ n in text ."""
|
text = text . replace ( "\r\n" , "\n" )
text = "%s\n" % text
flush_print ( text , sep = "" , end = "" )
return text
|
def splitpath ( path ) :
"""Recursively split a filepath into all directories and files ."""
|
head , tail = os . path . split ( path )
if tail == '' :
return head ,
elif head == '' :
return tail ,
else :
return splitpath ( head ) + ( tail , )
|
def sample_indexes_by_distribution ( indexes , distributions , nsample ) :
"""Samples trajectory / time indexes according to the given probability distributions
Parameters
indexes : list of ndarray ( ( N _ i , 2 ) )
For each state , all trajectory and time indexes where this state occurs .
Each matrix has a number of rows equal to the number of occurrences of the corresponding state ,
with rows consisting of a tuple ( i , t ) , where i is the index of the trajectory and t is the time index
within the trajectory .
distributions : list or array of ndarray ( ( n ) )
m distributions over states . Each distribution must be of length n and must sum up to 1.0
nsample : int
Number of samples per distribution . If replace = False , the number of returned samples per state could be smaller
if less than nsample indexes are available for a state .
Returns
indexes : length m list of ndarray ( ( nsample , 2 ) )
List of the sampled indices by distribution .
Each element is an index array with a number of rows equal to nsample , with rows consisting of a
tuple ( i , t ) , where i is the index of the trajectory and t is the time index within the trajectory ."""
|
# how many states in total ?
n = len ( indexes )
for dist in distributions :
if len ( dist ) != n :
raise ValueError ( 'Size error: Distributions must all be of length n (number of states).' )
# list of states
res = np . ndarray ( ( len ( distributions ) ) , dtype = object )
for i in range ( len ( distributions ) ) : # sample states by distribution
sequence = np . random . choice ( n , size = nsample , p = distributions [ i ] )
res [ i ] = sample_indexes_by_sequence ( indexes , sequence )
return res
|
def _rm_is_alignment_line ( parts , s1_name , s2_name ) :
"""return true if the tokenized line is a repeatmasker alignment line .
: param parts : the line , already split into tokens around whitespace
: param s1 _ name : the name of the first sequence , as extracted from the header
of the element this line is in
: param s2 _ name : the name of the second sequence , as extracted from the header
of the element this line is in"""
|
if len ( parts ) < 2 :
return False
if _rm_name_match ( parts [ 0 ] , s1_name ) :
return True
if ( _rm_name_match ( parts [ 0 ] , s2_name ) or ( parts [ 0 ] == "C" and _rm_name_match ( parts [ 1 ] , s2_name ) ) ) :
return True
return False
|
def rank ( self ) :
"""Returns the item ' s rank ( if it has one )
as a dict that includes required score , name , and level ."""
|
if self . _rank != { } : # Don ' t bother doing attribute lookups again
return self . _rank
try : # The eater determining the rank
levelkey , typename , count = self . kill_eaters [ 0 ]
except IndexError : # Apparently no eater available
self . _rank = None
return None
rankset = self . _ranks . get ( levelkey , [ { "level" : 0 , "required_score" : 0 , "name" : "Strange" } ] )
for rank in rankset :
self . _rank = rank
if count < rank [ "required_score" ] :
break
return self . _rank
|
def parse_angular_length_quantity ( string_rep ) :
"""Given a string that is a number and a unit , return a
Quantity of that string . Raise an Error If there is no unit . e . g . :
50 " - > 50 * u . arcsec
50 - > CRTFRegionParserError : Units must be specified for 50"""
|
unit_mapping = { 'deg' : u . deg , 'rad' : u . rad , 'arcmin' : u . arcmin , 'arcsec' : u . arcsec , 'pix' : u . dimensionless_unscaled , '"' : u . arcsec , "'" : u . arcmin , }
regex_str = re . compile ( r'([0-9+,-.]*)(.*)' )
str = regex_str . search ( string_rep )
unit = str . group ( 2 )
if unit :
if unit in unit_mapping :
return u . Quantity ( str . group ( 1 ) , unit = unit_mapping [ unit ] )
return u . Quantity ( str . group ( 1 ) )
else :
raise CRTFRegionParserError ( 'Units must be specified for {0} ' . format ( string_rep ) )
|
def doesnt_have ( self , relation , boolean = 'and' , extra = None ) :
"""Add a relationship count to the query .
: param relation : The relation to count
: type relation : str
: param boolean : The boolean value
: type boolean : str
: param extra : The extra query
: type extra : Builder or callable
: rtype : Builder"""
|
return self . has ( relation , '<' , 1 , boolean , extra )
|
def cmd_certclone ( hostname , port , keyfile , certfile , copy_extensions , expired , verbose ) :
"""Connect to an SSL / TLS server , get the certificate and generate
a certificate with the same options and field values .
Note : The generated certificate is invalid , but can be used for social engineering attacks
Example :
$ habu . certclone www . google . com 443 / tmp / key . pem / tmp / cert . pem"""
|
context = ssl . create_default_context ( )
with socket . create_connection ( ( hostname , port ) , timeout = 3 ) as sock :
with context . wrap_socket ( sock , server_hostname = hostname ) as ssock :
original = ssock . getpeercert ( binary_form = True )
key , cert = certclone ( original , copy_extensions = copy_extensions , expired = expired )
keyfile . write ( key )
certfile . write ( cert )
|
def escape_filename_sh_ansic ( name ) :
"""Return an ansi - c shell - escaped version of a filename ."""
|
out = [ ]
# gather the escaped characters into a list
for ch in name :
if ord ( ch ) < 32 :
out . append ( "\\x%02x" % ord ( ch ) )
elif ch == '\\' :
out . append ( '\\\\' )
else :
out . append ( ch )
# slap them back together in an ansi - c quote $ ' . . . '
return "$'" + "" . join ( out ) + "'"
|
def hex_to_rgb ( color ) :
"""Converts from hex to rgb
Parameters :
color : string
Color representation on hex or rgb
Example :
hex _ to _ rgb ( ' # E1E5ED ' )
hex _ to _ rgb ( ' # f03 ' )"""
|
color = normalize ( color )
color = color [ 1 : ]
# return ' rgb ' + str ( tuple ( ord ( c ) for c in color . decode ( ' hex ' ) ) )
return 'rgb' + str ( ( int ( color [ 0 : 2 ] , base = 16 ) , int ( color [ 2 : 4 ] , base = 16 ) , int ( color [ 4 : 6 ] , base = 16 ) ) )
|
def sg_sum ( tensor , opt ) :
r"""Computes the sum of elements across axis of a tensor .
See ` tf . reduce _ sum ( ) ` in tensorflow .
Args :
tensor : A ` Tensor ` with zero - padding ( automatically given by chain ) .
opt :
axis : A tuple / list of integers or an integer . The axis to reduce .
keep _ dims : If true , retains reduced dimensions with length 1.
name : If provided , replace current tensor ' s name .
Returns :
A ` Tensor ` ."""
|
return tf . reduce_sum ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
|
def create_plot_option_dicts ( info , marker_types = None , colors = None , line_dash = None , size = None ) :
"""Create two dictionaries with plot - options .
The first iterates colors ( based on group - number ) , the second iterates
through marker types .
Returns : group _ styles ( dict ) , sub _ group _ styles ( dict )"""
|
logging . debug ( " - creating plot-options-dict (for bokeh)" )
# Current only works for bokeh
if marker_types is None :
marker_types = [ "circle" , "square" , "triangle" , "invertedtriangle" , "diamond" , "cross" , "asterix" ]
if line_dash is None :
line_dash = [ 0 , 0 ]
if size is None :
size = 10
groups = info . groups . unique ( )
number_of_groups = len ( groups )
if colors is None :
if number_of_groups < 4 : # print ( " using 3 " )
colors = bokeh . palettes . brewer [ 'YlGnBu' ] [ 3 ]
else : # print ( f " using { min ( 9 , number _ of _ groups ) } " )
colors = bokeh . palettes . brewer [ 'YlGnBu' ] [ min ( 9 , number_of_groups ) ]
sub_groups = info . sub_groups . unique ( )
marker_it = itertools . cycle ( marker_types )
colors_it = itertools . cycle ( colors )
group_styles = dict ( )
sub_group_styles = dict ( )
for j in groups :
color = next ( colors_it )
marker_options = { "line_color" : color , "fill_color" : color , }
line_options = { "line_color" : color , }
group_styles [ j ] = { "marker" : marker_options , "line" : line_options , }
for j in sub_groups :
marker_type = next ( marker_it )
marker_options = { "marker" : marker_type , "size" : size , }
line_options = { "line_dash" : line_dash , }
sub_group_styles [ j ] = { "marker" : marker_options , "line" : line_options , }
return group_styles , sub_group_styles
|
def identifiers ( dataset_uri ) :
"""List the item identifiers in the dataset ."""
|
dataset = dtoolcore . DataSet . from_uri ( dataset_uri )
for i in dataset . identifiers :
click . secho ( i )
|
def get_module ( path ) :
"""A modified duplicate from Django ' s built in backend
retriever .
slugify = get _ module ( ' django . template . defaultfilters . slugify ' )"""
|
try :
from importlib import import_module
except ImportError as e :
from django . utils . importlib import import_module
try :
mod_name , func_name = path . rsplit ( '.' , 1 )
mod = import_module ( mod_name )
except ImportError as e :
raise ImportError ( 'Error importing alert function {0}: "{1}"' . format ( mod_name , e ) )
try :
func = getattr ( mod , func_name )
except AttributeError :
raise ImportError ( ( 'Module "{0}" does not define a "{1}" function' ) . format ( mod_name , func_name ) )
return func
|
def show_pricing ( kwargs = None , call = None ) :
'''Show pricing for a particular profile . This is only an estimate , based on
unofficial pricing sources .
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt - cloud - f show _ pricing my - linode - config profile = my - linode - profile'''
|
if call != 'function' :
raise SaltCloudException ( 'The show_instance action must be called with -f or --function.' )
profile = __opts__ [ 'profiles' ] . get ( kwargs [ 'profile' ] , { } )
if not profile :
raise SaltCloudNotFound ( 'The requested profile was not found.' )
# Make sure the profile belongs to Linode
provider = profile . get ( 'provider' , '0:0' )
comps = provider . split ( ':' )
if len ( comps ) < 2 or comps [ 1 ] != 'linode' :
raise SaltCloudException ( 'The requested profile does not belong to Linode.' )
plan_id = get_plan_id ( kwargs = { 'label' : profile [ 'size' ] } )
response = _query ( 'avail' , 'linodeplans' , args = { 'PlanID' : plan_id } ) [ 'DATA' ] [ 0 ]
ret = { }
ret [ 'per_hour' ] = response [ 'HOURLY' ]
ret [ 'per_day' ] = ret [ 'per_hour' ] * 24
ret [ 'per_week' ] = ret [ 'per_day' ] * 7
ret [ 'per_month' ] = response [ 'PRICE' ]
ret [ 'per_year' ] = ret [ 'per_month' ] * 12
return { profile [ 'profile' ] : ret }
|
def serial_udb_extra_f13_encode ( self , sue_week_no , sue_lat_origin , sue_lon_origin , sue_alt_origin ) :
'''Backwards compatible version of SERIAL _ UDB _ EXTRA F13 : format
sue _ week _ no : Serial UDB Extra GPS Week Number ( int16 _ t )
sue _ lat _ origin : Serial UDB Extra MP Origin Latitude ( int32 _ t )
sue _ lon _ origin : Serial UDB Extra MP Origin Longitude ( int32 _ t )
sue _ alt _ origin : Serial UDB Extra MP Origin Altitude Above Sea Level ( int32 _ t )'''
|
return MAVLink_serial_udb_extra_f13_message ( sue_week_no , sue_lat_origin , sue_lon_origin , sue_alt_origin )
|
def default_pixmap ( self , value ) :
"""Setter for * * self . _ _ default _ pixmap * * attribute .
: param value : Attribute value .
: type value : QPixmap"""
|
if value is not None :
assert type ( value ) is QPixmap , "'{0}' attribute: '{1}' type is not 'QPixmap'!" . format ( "default_pixmap" , value )
self . __default_pixmap = value
|
def register_account ( self , account , portfolio_cookie = None ) :
'''注册一个account到portfolio组合中
account 也可以是一个策略类 , 实现其 on _ bar 方法
: param account : 被注册的account
: return :'''
|
# 查找 portfolio
if len ( self . portfolio_list . keys ( ) ) < 1 :
po = self . new_portfolio ( )
elif portfolio_cookie is not None :
po = self . portfolio_list [ portfolio_cookie ]
else :
po = list ( self . portfolio_list . values ( ) ) [ 0 ]
# 把account 添加到 portfolio中去
po . add_account ( account )
return ( po , account )
|
def get_event ( self , client , check ) :
"""Returns an event for a given client & check name ."""
|
data = self . _request ( 'GET' , '/events/{}/{}' . format ( client , check ) )
return data . json ( )
|
def complex_check ( * args , func = None ) :
"""Check if arguments are complex numbers ."""
|
func = func or inspect . stack ( ) [ 2 ] [ 3 ]
for var in args :
if not isinstance ( var , numbers . Complex ) :
name = type ( var ) . __name__
raise ComplexError ( f'Function {func} expected complex number, {name} got instead.' )
|
def unpack_float16 ( src ) :
"""Read and unpack a 16b float .
The structure is :
- 1 bit for the sign
. 5 bits for the exponent , with an exponent bias of 16
- 10 bits for the mantissa"""
|
bc = BitConsumer ( src )
sign = bc . u_get ( 1 )
exponent = bc . u_get ( 5 )
mantissa = bc . u_get ( 10 )
exponent -= 16
mantissa /= 2 ** 10
num = ( - 1 ** sign ) * mantissa * ( 10 ** exponent )
return num
|
def data_iterator_csv_dataset ( uri , batch_size , shuffle = False , rng = None , normalize = True , with_memory_cache = True , with_file_cache = True , cache_dir = None , epoch_begin_callbacks = [ ] , epoch_end_callbacks = [ ] ) :
'''data _ iterator _ csv _ dataset
Get data directly from a dataset provided as a CSV file .
You can read files located on the local file system , http ( s ) servers or Amazon AWS S3 storage .
For example ,
. . code - block : : python
batch = data _ iterator _ csv _ dataset ( ' CSV _ FILE . csv ' , batch _ size , shuffle = True )
Args :
uri ( str ) : Location of dataset CSV file .
batch _ size ( int ) : Size of data unit .
shuffle ( bool ) :
Indicates whether the dataset is shuffled or not .
Default value is False .
rng ( None or : obj : ` numpy . random . RandomState ` ) : Numpy random number
generator .
normalize ( bool ) : If True , each sample in the data gets normalized by a factor of 255.
Default is True .
with _ memory _ cache ( bool ) :
If ` ` True ` ` , use : py : class : ` . data _ source . DataSourceWithMemoryCache `
to wrap ` ` data _ source ` ` . It is a good idea to set this as true unless
data _ source provides on - memory data .
Default value is True .
with _ file _ cache ( bool ) :
If ` ` True ` ` , use : py : class : ` . data _ source . DataSourceWithFileCache `
to wrap ` ` data _ source ` ` .
If ` ` data _ source ` ` is slow , enabling this option a is good idea .
Default value is False .
cache _ dir ( str ) :
Location of file _ cache .
If this value is None , : py : class : ` . data _ source . DataSourceWithFileCache `
creates file caches implicitly on temporary directory and erases them all
when data _ iterator is finished .
Otherwise , : py : class : ` . data _ source . DataSourceWithFileCache ` keeps created cache .
Default is None .
epoch _ begin _ callbacks ( list of functions ) : An item is a function
which takes an epoch index as an argument . These are called
at the beginning of an epoch .
epoch _ end _ callbacks ( list of functions ) : An item is a function
which takes an epoch index as an argument . These are called
at the end of an epoch .
Returns :
: py : class : ` DataIterator < nnabla . utils . data _ iterator . DataIterator > ` :
Instance of DataIterator'''
|
ds = CsvDataSource ( uri , shuffle = shuffle , rng = rng , normalize = normalize )
return data_iterator ( ds , batch_size = batch_size , with_memory_cache = with_memory_cache , with_file_cache = with_file_cache , cache_dir = cache_dir , epoch_begin_callbacks = epoch_begin_callbacks , epoch_end_callbacks = epoch_end_callbacks )
|
def execute ( self , conn , daoinput , transaction = False ) :
"""required keys :
migration _ status , migration _ request _ id"""
|
if not conn :
dbsExceptionHandler ( "dbsException-failed-connect2host" , "Oracle/MigrationRequests/UpdateRequestStatus. Expects db connection from upper layer." , self . logger . exception )
if daoinput [ 'migration_status' ] == 1 :
sql = self . sql2
elif daoinput [ 'migration_status' ] == 2 :
sql = self . sql + " and MIGRATION_STATUS = 1 "
elif daoinput [ 'migration_status' ] == 3 :
sql = self . sql3 + " and MIGRATION_STATUS = 1 "
else :
dbsExceptionHandler ( "dbsException-conflict-data" , "Oracle/MigrationRequest/UpdateRequestStatus. Expected migration status to be 1, 2 or 3" , self . logger . exception )
result = self . dbi . processData ( sql , daoinput , conn , transaction )
|
def render_to_string ( template_name , context = None , request = None , using = None ) :
"""Loads a template and renders it with a context . Returns a string .
template _ name may be a string or a list of strings ."""
|
if isinstance ( template_name , ( list , tuple ) ) :
template = select_template ( template_name , using = using )
else :
template = get_template ( template_name , using = using )
return template . render ( context , request )
|
def update_namelist_file ( self , rapid_namelist_file , new_namelist_file = None ) :
"""Update existing namelist file with new parameters
Parameters
rapid _ namelist _ file : str
Path of namelist file to use in the simulation . It will be
updated with any parameters added to the RAPID manager .
new _ namelist _ file : str , optional
Path to output the updated namelist file ."""
|
if os . path . exists ( rapid_namelist_file ) and rapid_namelist_file :
log ( "Adding missing inputs from RAPID input file ..." , "INFO" )
with open ( rapid_namelist_file , 'r' ) as old_file :
for line in old_file :
line = line . strip ( )
if not line [ : 1 ] . isalpha ( ) or not line :
continue
line_split = line . split ( "=" )
attr = line_split [ 0 ] . strip ( )
value = None
if len ( line_split ) > 1 :
value = line_split [ 1 ] . strip ( ) . replace ( "'" , "" ) . replace ( '"' , "" )
# convert integers to integers
try :
value = int ( value )
except ValueError :
pass
# remove dots from beginning & end of value
if attr . startswith ( 'BS' ) :
value = value . replace ( "." , "" )
# add attribute if exists
if attr in dir ( self ) and not attr . startswith ( '_' ) : # set attribute if not set already
if not getattr ( self , attr ) :
setattr ( self , attr , value )
else :
log ( "Invalid argument {0}. Skipping ..." . format ( attr ) , "INFO" )
if new_namelist_file is None :
new_namelist_file = rapid_namelist_file
self . generate_namelist_file ( new_namelist_file )
else :
log ( "RAPID namelist file to update not found." , "ERROR" )
|
def get_pmt ( self , dom_id , channel_id ) :
"""Return PMT with DOM ID and DAQ channel ID"""
|
du , floor , _ = self . doms [ dom_id ]
pmt = self . pmts [ self . _pmt_index_by_omkey [ ( du , floor , channel_id ) ] ]
return pmt
|
def copy_from ( self , other ) :
"""Copy other onto self ."""
|
self . verbose , self . quiet , self . path , self . name , self . tracing = other . verbose , other . quiet , other . path , other . name , other . tracing
|
def form_fields ( self ) :
"""Return fields of default form .
Fill some fields with reasonable values ."""
|
fields = dict ( self . form . fields )
# pylint : disable = no - member
fields_to_remove = set ( )
for key , val in list ( fields . items ( ) ) :
if isinstance ( val , CheckboxValues ) :
if not len ( val ) : # pylint : disable = len - as - condition
del fields [ key ]
elif len ( val ) == 1 :
fields [ key ] = val . pop ( )
else :
fields [ key ] = list ( val )
if isinstance ( val , MultipleSelectOptions ) :
if not len ( val ) : # pylint : disable = len - as - condition
del fields [ key ]
elif len ( val ) == 1 :
fields [ key ] = val . pop ( )
else :
fields [ key ] = list ( val )
for elem in self . form . inputs : # pylint : disable = no - member
# Ignore elements without name
if not elem . get ( 'name' ) :
continue
# Do not submit disabled fields
# http : / / www . w3 . org / TR / html4 / interact / forms . html # h - 17.12
if elem . get ( 'disabled' ) :
if elem . name in fields :
fields_to_remove . add ( elem . name )
elif getattr ( elem , 'type' , None ) == 'checkbox' :
if not elem . checked :
if elem . name is not None :
if elem . name in fields and fields [ elem . name ] is None :
fields_to_remove . add ( elem . name )
else :
if elem . name in fields_to_remove :
fields_to_remove . remove ( elem . name )
if elem . tag == 'select' :
if elem . name in fields and fields [ elem . name ] is None :
if elem . value_options :
fields [ elem . name ] = elem . value_options [ 0 ]
elif getattr ( elem , 'type' , None ) == 'radio' :
if fields [ elem . name ] is None :
fields [ elem . name ] = elem . get ( 'value' )
for fname in fields_to_remove :
del fields [ fname ]
return fields
|
def put ( self , request , bot_id , id , format = None ) :
"""Update existing handler
serializer : HandlerUpdateSerializer
responseMessages :
- code : 401
message : Not authenticated
- code : 400
message : Not valid request"""
|
return super ( HandlerDetail , self ) . put ( request , bot_id , id , format )
|
def disable_reporting ( self ) :
"""Call this method to explicitly disable reporting .
The current report will be discarded , along with the previously
recorded ones that haven ' t been uploaded . The configuration is updated
so that future runs do not record or upload reports ."""
|
if self . status == Stats . DISABLED :
return
if not self . disableable :
logger . critical ( "Can't disable reporting" )
return
self . status = Stats . DISABLED
self . write_config ( self . status )
if os . path . exists ( self . location ) :
old_reports = [ f for f in os . listdir ( self . location ) if f . startswith ( 'report_' ) ]
for old_filename in old_reports :
fullname = os . path . join ( self . location , old_filename )
os . remove ( fullname )
logger . info ( "Deleted %d pending reports" , len ( old_reports ) )
|
async def encrypt ( self , message : bytes , authn : bool = False , recip : str = None ) -> bytes :
"""Encrypt plaintext for owner of DID or verification key , anonymously or via
authenticated encryption scheme . If given DID , first check wallet and then pool
for corresponding verification key .
Raise WalletState if the wallet is closed . Given a recipient DID not in the wallet ,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed .
: param message : plaintext , as bytes
: param authn : whether to use authenticated encryption scheme
: param recip : DID or verification key of recipient , None for anchor ' s own
: return : ciphertext , as bytes"""
|
LOGGER . debug ( 'BaseAnchor.encrypt >>> message: %s, authn: %s, recip: %s' , message , authn , recip )
if not self . wallet . handle :
LOGGER . debug ( 'BaseAnchor.encrypt <!< Wallet %s is closed' , self . name )
raise WalletState ( 'Wallet {} is closed' . format ( self . name ) )
rv = await self . wallet . encrypt ( message , authn , await self . _verkey_for ( recip ) )
LOGGER . debug ( 'BaseAnchor.auth_encrypt <<< %s' , rv )
return rv
|
def _send_post ( self , blogname , params ) :
"""Formats parameters and sends the API request off . Validates
common and per - post - type parameters and formats your tags for you .
: param blogname : a string , the blogname of the blog you are posting to
: param params : a dict , the key - value of the parameters for the api request
: param valid _ options : a list of valid options that the request allows
: returns : a dict parsed from the JSON response"""
|
url = "/v2/blog/{}/post" . format ( blogname )
valid_options = self . _post_valid_options ( params . get ( 'type' , None ) )
if len ( params . get ( "tags" , [ ] ) ) > 0 : # Take a list of tags and make them acceptable for upload
params [ 'tags' ] = "," . join ( params [ 'tags' ] )
return self . send_api_request ( "post" , url , params , valid_options )
|
def findNestedDirectories ( self , lst ) :
'''Recursive helper function for finding nested directories . If this node is a
directory node , it is appended to ` ` lst ` ` . Each node also calls each of its
child ` ` findNestedDirectories ` ` with the same list .
: Parameters :
` ` lst ` ` ( list )
The list each directory node is to be appended to .'''
|
if self . kind == "dir" :
lst . append ( self )
for c in self . children :
c . findNestedDirectories ( lst )
|
def get_all ( self , page = None , per_page = None , include_totals = False ) :
"""Retrieves all resource servers
Args :
page ( int , optional ) : The result ' s page number ( zero based ) .
per _ page ( int , optional ) : The amount of entries per page .
include _ totals ( bool , optional ) : True if the query summary is
to be included in the result , False otherwise .
See : https : / / auth0 . com / docs / api / management / v2 # ! / Resource _ Servers / get _ resource _ servers"""
|
params = { 'page' : page , 'per_page' : per_page , 'include_totals' : str ( include_totals ) . lower ( ) }
return self . client . get ( self . _url ( ) , params = params )
|
def ssml_phoneme ( self , words , alphabet = None , ph = None , ** kwargs ) :
"""Create a < Phoneme > element
: param words : Words to speak
: param alphabet : Specify the phonetic alphabet
: param ph : Specifiy the phonetic symbols for pronunciation
: param kwargs : additional attributes
: returns : < Phoneme > element"""
|
return self . nest ( SsmlPhoneme ( words , alphabet = alphabet , ph = ph , ** kwargs ) )
|
def train_model ( self , dataset_id , model_name , token = None , url = API_TRAIN_MODEL ) :
"""Train a model given a specifi dataset previously created
: param dataset _ id : string , the id of a previously created dataset
: param model _ name : string , what you will call this model
attention : This may take a while and a response will be returned before the model has
finished being trained . See docos and method get _ training _ status .
returns : requests object"""
|
auth = 'Bearer ' + self . check_for_token ( token )
m = MultipartEncoder ( fields = { 'name' : model_name , 'datasetId' : dataset_id } )
h = { 'Authorization' : auth , 'Cache-Control' : 'no-cache' , 'Content-Type' : m . content_type }
the_url = url
r = requests . post ( the_url , headers = h , data = m )
return r
|
def namespace ( self ) :
"""Return a dictionary representing the namespace which should be
available to the user ."""
|
self . _ns = { 'db' : self . store , 'store' : store , 'autocommit' : False , }
return self . _ns
|
def load ( self , path = None , ignore_errors = True , block_user_signals = False ) :
"""Loads all the parameters from a databox text file . If path = None ,
loads from self . _ autosettings _ path ( provided this is not None ) .
Parameters
path = None
Path to load the settings from . If None , will load from the
specified autosettings _ path .
ignore _ errors = True
Whether we should raise a stink when a setting doesn ' t exist .
When settings do not exist , they are stuffed into the dictionary
self . _ lazy _ load .
block _ user _ signals = False
If True , the load will not trigger any signals ."""
|
if path == None : # Bail if there is no path
if self . _autosettings_path == None :
return self
# Get the gui settings directory
gui_settings_dir = _os . path . join ( _cwd , 'egg_settings' )
# Get the final path
path = _os . path . join ( gui_settings_dir , self . _autosettings_path )
# make the databox object
d = _d . databox ( )
# only load if it exists
if _os . path . exists ( path ) :
d . load_file ( path , header_only = True )
else :
return None
# update the settings
self . update ( d , ignore_errors = ignore_errors , block_user_signals = block_user_signals )
return self
|
def copy_content ( origin , dstPath , blockSize , mode ) :
'''copy the content of ` origin ` to ` dstPath ` in a safe manner .
this function will first copy the content to a temporary file
and then move it atomically to the requested destination .
if some error occurred during content copy or file movement
the temporary file will be deleted .'''
|
tmpFD , tmpPath = tempfile . mkstemp ( prefix = os . path . basename ( dstPath ) + "_" , suffix = '.tmp' , dir = os . path . dirname ( dstPath ) )
try :
try : # change mode of the temp file
oldmask = os . umask ( 0 )
try :
os . chmod ( tmpPath , mode )
finally :
os . umask ( oldmask )
# copy content to temporary file
while True :
chunk = origin . read ( blockSize )
if not chunk :
break
os . write ( tmpFD , chunk )
finally :
os . close ( tmpFD )
# move temporary file to actual requested destination
try :
os . rename ( tmpPath , dstPath )
except OSError as e : # on Windows if dstPath already exists at renaming time , an OSError is raised .
if platform . system ( ) is 'Windows' and e . errno is errno . EEXIST :
pass
else :
raise
except :
os . remove ( tmpPath )
raise
|
def terminate_manager ( self ) :
"""* * Purpose * * : Method to terminate the tmgr process . This method is
blocking as it waits for the tmgr process to terminate ( aka join ) ."""
|
try :
if self . _tmgr_process :
if not self . _tmgr_terminate . is_set ( ) :
self . _tmgr_terminate . set ( )
if self . check_manager ( ) :
self . _tmgr_process . join ( )
self . _tmgr_process = None
self . _logger . info ( 'Task manager process closed' )
self . _prof . prof ( 'tmgr process terminated' , uid = self . _uid )
except Exception , ex :
self . _logger . exception ( 'Could not terminate task manager process' )
raise
|
def run ( self , scale_out , sectors = 'all' ) :
"""Evolve the Wilson coefficients to the scale ` scale _ out ` .
Parameters :
- scale _ out : output scale
- sectors : optional . If provided , must be a tuple of strings
corresponding to WCxf sector names . Only Wilson coefficients
belonging to these sectors will be present in the output .
Returns an instance of ` wcxf . WC ` ."""
|
C_out = self . _run_dict ( scale_out , sectors = sectors )
all_wcs = set ( wcxf . Basis [ self . eft , 'JMS' ] . all_wcs )
# to speed up lookup
C_out = { k : v for k , v in C_out . items ( ) if v != 0 and k in all_wcs }
return wcxf . WC ( eft = self . eft , basis = 'JMS' , scale = scale_out , values = wcxf . WC . dict2values ( C_out ) )
|
def prefix_lines ( lines , prefix ) :
"""Add the prefix to each of the lines .
> > > prefix _ lines ( [ ' foo ' , ' bar ' ] , ' ' )
[ ' foo ' , ' bar ' ]
> > > prefix _ lines ( ' foo \\ nbar ' , ' ' )
[ ' foo ' , ' bar ' ]
: param list or str lines : A string or a list of strings . If a string is
passed , the string is split using splitlines ( ) .
: param str prefix : Prefix to add to the lines . Usually an indent .
: returns : list"""
|
if isinstance ( lines , bytes ) :
lines = lines . decode ( 'utf-8' )
if isinstance ( lines , str ) :
lines = lines . splitlines ( )
return [ prefix + line for line in lines ]
|
def maintenance ( self , on = True ) :
"""Toggles maintenance mode ."""
|
r = self . _h . _http_resource ( method = 'POST' , resource = ( 'apps' , self . name , 'server' , 'maintenance' ) , data = { 'maintenance_mode' : int ( on ) } )
return r . ok
|
def round_sig ( x , n , scien_notation = False ) :
if x < 0 :
x = x * - 1
symbol = '-'
else :
symbol = ''
'''round floating point x to n significant figures'''
|
if type ( n ) is not types . IntType :
raise TypeError , "n must be an integer"
try :
x = float ( x )
except :
raise TypeError , "x must be a floating point object"
form = "%0." + str ( n - 1 ) + "e"
st = form % x
num , expo = epat . findall ( st ) [ 0 ]
expo = int ( expo )
fs = string . split ( num , '.' )
if len ( fs ) < 2 :
fs = [ fs [ 0 ] , "" ]
if expo == 0 : # print ' One ' , num
return symbol + num
elif expo > 0 :
if len ( fs [ 1 ] ) < expo :
fs [ 1 ] += "0" * ( expo - len ( fs [ 1 ] ) )
st = fs [ 0 ] + fs [ 1 ] [ 0 : expo ]
if len ( fs [ 1 ] [ expo : ] ) > 0 :
st += '.' + fs [ 1 ] [ expo : ]
# print ' Two ' , st
return symbol + st
else :
expo = - expo
if fs [ 0 ] [ 0 ] == '-' :
fs [ 0 ] = fs [ 0 ] [ 1 : ]
sign = "-"
else :
sign = ""
if scien_notation :
coso = float ( sign + "0." + "0" * ( expo - 1 ) + fs [ 0 ] + fs [ 1 ] )
scient_format = '{:.' + str ( n ) + 'e}'
StrScientific = scient_format . format ( coso )
# print ' three ' , StrScientific
return symbol + StrScientific
else :
formated = sign + "0." + "0" * ( expo - 1 ) + fs [ 0 ] + fs [ 1 ]
if len ( formated ) > ( n + 5 ) :
formated = '0.' + '0' * n
# print ' cuantro ' , formated
return symbol + formated
|
def create_qualification_type ( Name = None , Keywords = None , Description = None , QualificationTypeStatus = None , RetryDelayInSeconds = None , Test = None , AnswerKey = None , TestDurationInSeconds = None , AutoGranted = None , AutoGrantedValue = None ) :
"""The CreateQualificationType operation creates a new Qualification type , which is represented by a QualificationType data structure .
See also : AWS API Documentation
: example : response = client . create _ qualification _ type (
Name = ' string ' ,
Keywords = ' string ' ,
Description = ' string ' ,
QualificationTypeStatus = ' Active ' | ' Inactive ' ,
RetryDelayInSeconds = 123,
Test = ' string ' ,
AnswerKey = ' string ' ,
TestDurationInSeconds = 123,
AutoGranted = True | False ,
AutoGrantedValue = 123
: type Name : string
: param Name : [ REQUIRED ]
The name you give to the Qualification type . The type name is used to represent the Qualification to Workers , and to find the type using a Qualification type search . It must be unique across all of your Qualification types .
: type Keywords : string
: param Keywords : One or more words or phrases that describe the Qualification type , separated by commas . The keywords of a type make the type easier to find during a search .
: type Description : string
: param Description : [ REQUIRED ]
A long description for the Qualification type . On the Amazon Mechanical Turk website , the long description is displayed when a Worker examines a Qualification type .
: type QualificationTypeStatus : string
: param QualificationTypeStatus : [ REQUIRED ]
The initial status of the Qualification type .
Constraints : Valid values are : Active | Inactive
: type RetryDelayInSeconds : integer
: param RetryDelayInSeconds : The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request .
Constraints : None . If not specified , retries are disabled and Workers can request a Qualification of this type only once , even if the Worker has not been granted the Qualification . It is not possible to disable retries for a Qualification type after it has been created with retries enabled . If you want to disable retries , you must delete existing retry - enabled Qualification type and then create a new Qualification type with retries disabled .
: type Test : string
: param Test : The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type . If this parameter is specified , TestDurationInSeconds must also be specified .
Constraints : Must not be longer than 65535 bytes . Must be a QuestionForm data structure . This parameter cannot be specified if AutoGranted is true .
Constraints : None . If not specified , the Worker may request the Qualification without answering any questions .
: type AnswerKey : string
: param AnswerKey : The answers to the Qualification test specified in the Test parameter , in the form of an AnswerKey data structure .
Constraints : Must not be longer than 65535 bytes .
Constraints : None . If not specified , you must process Qualification requests manually .
: type TestDurationInSeconds : integer
: param TestDurationInSeconds : The number of seconds the Worker has to complete the Qualification test , starting from the time the Worker requests the Qualification .
: type AutoGranted : boolean
: param AutoGranted : Specifies whether requests for the Qualification type are granted immediately , without prompting the Worker with a Qualification test .
Constraints : If the Test parameter is specified , this parameter cannot be true .
: type AutoGrantedValue : integer
: param AutoGrantedValue : The Qualification value to use for automatically granted Qualifications . This parameter is used only if the AutoGranted parameter is true .
: rtype : dict
: return : {
' QualificationType ' : {
' QualificationTypeId ' : ' string ' ,
' CreationTime ' : datetime ( 2015 , 1 , 1 ) ,
' Name ' : ' string ' ,
' Description ' : ' string ' ,
' Keywords ' : ' string ' ,
' QualificationTypeStatus ' : ' Active ' | ' Inactive ' ,
' Test ' : ' string ' ,
' TestDurationInSeconds ' : 123,
' AnswerKey ' : ' string ' ,
' RetryDelayInSeconds ' : 123,
' IsRequestable ' : True | False ,
' AutoGranted ' : True | False ,
' AutoGrantedValue ' : 123"""
|
pass
|
def spawn_server_api ( api_name , app , api_spec , error_callback , decorator ) :
"""Take a a Flask app and a swagger file in YAML format describing a REST
API , and populate the app with routes handling all the paths and methods
declared in the swagger file .
Also handle marshaling and unmarshaling between json and object instances
representing the definitions from the swagger file ."""
|
def mycallback ( endpoint ) :
handler_func = get_function ( endpoint . handler_server )
# Generate api endpoint around that handler
handler_wrapper = _generate_handler_wrapper ( api_name , api_spec , endpoint , handler_func , error_callback , decorator )
# Bind handler to the API path
log . info ( "Binding %s %s ==> %s" % ( endpoint . method , endpoint . path , endpoint . handler_server ) )
endpoint_name = '_' . join ( [ endpoint . method , endpoint . path ] ) . replace ( '/' , '_' )
app . add_url_rule ( endpoint . path , endpoint_name , handler_wrapper , methods = [ endpoint . method ] )
api_spec . call_on_each_endpoint ( mycallback )
# Add custom error handlers to the app
add_error_handlers ( app )
|
def drop_database ( self , name , force = False ) :
"""Drop an MapD database
Parameters
name : string
Database name
force : boolean , default False
If False and there are any tables in this database , raises an
IntegrityError"""
|
tables = [ ]
if not force or self . database ( name ) :
tables = self . list_tables ( database = name )
if not force and len ( tables ) :
raise com . IntegrityError ( 'Database {0} must be empty before being dropped, or set ' 'force=True' . format ( name ) )
statement = ddl . DropDatabase ( name )
self . _execute ( statement )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.