signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def linear ( self , fnct , x , y , sd = None , wt = 1.0 , fid = 0 ) :
"""Make a linear least squares solution .
Makes a linear least squares solution for the points through the
ordinates at the x values , using the specified fnct . The x can be of
any dimension , depending on the number of arguments needed in the
functional evaluation . The values should be given in the order :
x0[1 ] , x0[2 ] , . . . , x1[1 ] , . . . , xn [ m ] if there are n observations ,
and m arguments . x should be a vector of m * n length ; y ( the
observations ) a vector of length n .
: param fnct : the functional to fit
: param x : the abscissa values
: param y : the ordinate values
: param sd : standard deviation of equations ( one or more values used
cyclically )
: param wt : an optional alternate for ` sd `
: param fid : the id of the sub - fitter ( numerical )"""
|
self . _fit ( fitfunc = "linear" , fnct = fnct , x = x , y = y , sd = sd , wt = wt , fid = fid )
|
def search ( self , name : str = None , acc_type : str = None ) :
"""Search accounts by passing parameters .
name = exact name
name _ part = part of name
parent _ id = id of the parent account
type = account type"""
|
query = self . query
if name is not None :
query = query . filter ( Account . name == name )
if acc_type is not None : # account type is capitalized
acc_type = acc_type . upper ( )
query = query . filter ( Account . type == acc_type )
return query . all ( )
|
def list_insert ( lst , new_elements , index_or_name = None , after = True ) :
"""Return a copy of the list with the new element ( s ) inserted .
Args :
lst ( list ) : The original list .
new _ elements ( " any " or list of " any " ) : The element ( s ) to insert in the list .
index _ or _ name ( int or str ) : The value of the reference element , or directly its numeric index .
Default : None ( = append ) .
after ( bool ) : Whether to insert the new elements before or after the reference element . Default : True .
Returns :
( list ) A copy of the original list containing the new element ( s ) ."""
|
if index_or_name is None :
index = None
else :
try :
index = get_list_index ( lst , index_or_name )
except ValueError :
index = None
to_return = lst [ : ]
if index is None : # Append .
to_return += new_elements
elif index == 0 : # Prepend .
to_return = new_elements + to_return
else :
if after :
index += 1
to_return = to_return [ : index ] + new_elements + to_return [ index : ]
return to_return
|
def on_backward_begin ( self , last_loss , last_output , ** kwargs ) :
"Record ` last _ loss ` in the proper list ."
|
last_loss = last_loss . detach ( ) . cpu ( )
if self . gen_mode :
self . smoothenerG . add_value ( last_loss )
self . glosses . append ( self . smoothenerG . smooth )
self . last_gen = last_output . detach ( ) . cpu ( )
else :
self . smoothenerC . add_value ( last_loss )
self . closses . append ( self . smoothenerC . smooth )
|
def file_needs_update ( target_file , source_file ) :
"""Checks if target _ file is not existing or differing from source _ file
: param target _ file : File target for a copy action
: param source _ file : File to be copied
: return : True , if target _ file not existing or differing from source _ file , else False
: rtype : False"""
|
if not os . path . isfile ( target_file ) or get_md5_file_hash ( target_file ) != get_md5_file_hash ( source_file ) :
return True
return False
|
def put ( self , item , * args , ** kwargs ) :
"""Put an item into the cache , for this combination of args and kwargs .
Args :
* args : any arguments .
* * kwargs : any keyword arguments . If ` ` timeout ` ` is specified as one
of the keyword arguments , the item will remain available
for retrieval for ` ` timeout ` ` seconds . If ` ` timeout ` ` is
` None ` or not specified , the ` ` default _ timeout ` ` for this
cache will be used . Specify a ` ` timeout ` ` of 0 ( or ensure that
the ` ` default _ timeout ` ` for this cache is 0 ) if this item is
not to be cached ."""
|
if not self . enabled :
return
# Check for a timeout keyword , store and remove it .
timeout = kwargs . pop ( 'timeout' , None )
if timeout is None :
timeout = self . default_timeout
cache_key = self . make_key ( args , kwargs )
# Store the item , along with the time at which it will expire
with self . _cache_lock :
self . _cache [ cache_key ] = ( time ( ) + timeout , item )
|
def inasafe_place_value_coefficient ( number , feature , parent ) :
"""Given a number , it will return the coefficient of the place value name .
For instance :
* inasafe _ place _ value _ coefficient ( 10 ) - > 1
* inasafe _ place _ value _ coefficient ( 1700 ) - > 1.7
It needs to be used with inasafe _ number _ denomination _ unit ."""
|
_ = feature , parent
# NOQA
if number >= 0 :
rounded_number = round_affected_number ( number , use_rounding = True , use_population_rounding = True )
min_number = 1000
value , unit = denomination ( rounded_number , min_number )
if number < min_number :
rounded_number = int ( round ( value , 1 ) )
else :
rounded_number = round ( value , 1 )
return str ( rounded_number )
else :
return None
|
def _init_properties ( self ) :
"""Init Properties"""
|
super ( BaseCRUDView , self ) . _init_properties ( )
# Reset init props
self . related_views = self . related_views or [ ]
self . _related_views = self . _related_views or [ ]
self . description_columns = self . description_columns or { }
self . validators_columns = self . validators_columns or { }
self . formatters_columns = self . formatters_columns or { }
self . add_form_extra_fields = self . add_form_extra_fields or { }
self . edit_form_extra_fields = self . edit_form_extra_fields or { }
self . show_exclude_columns = self . show_exclude_columns or [ ]
self . add_exclude_columns = self . add_exclude_columns or [ ]
self . edit_exclude_columns = self . edit_exclude_columns or [ ]
# Generate base props
list_cols = self . datamodel . get_user_columns_list ( )
self . list_columns = self . list_columns or [ list_cols [ 0 ] ]
self . _gen_labels_columns ( self . list_columns )
self . order_columns = ( self . order_columns or self . datamodel . get_order_columns_list ( list_columns = self . list_columns ) )
if self . show_fieldsets :
self . show_columns = [ ]
for fieldset_item in self . show_fieldsets :
self . show_columns = self . show_columns + list ( fieldset_item [ 1 ] . get ( "fields" ) )
else :
if not self . show_columns :
self . show_columns = [ x for x in list_cols if x not in self . show_exclude_columns ]
if self . add_fieldsets :
self . add_columns = [ ]
for fieldset_item in self . add_fieldsets :
self . add_columns = self . add_columns + list ( fieldset_item [ 1 ] . get ( "fields" ) )
else :
if not self . add_columns :
self . add_columns = [ x for x in list_cols if x not in self . add_exclude_columns ]
if self . edit_fieldsets :
self . edit_columns = [ ]
for fieldset_item in self . edit_fieldsets :
self . edit_columns = self . edit_columns + list ( fieldset_item [ 1 ] . get ( "fields" ) )
else :
if not self . edit_columns :
self . edit_columns = [ x for x in list_cols if x not in self . edit_exclude_columns ]
|
def select_where_like ( self , table , cols , where_col , start = None , end = None , anywhere = None , index = ( None , None ) , length = None ) :
"""Query rows from a table where a specific pattern is found in a column .
MySQL syntax assumptions :
( % ) The percent sign represents zero , one , or multiple characters .
( _ ) The underscore represents a single character .
: param table : Name of the table
: param cols : List , tuple or set of columns or string with single column name
: param where _ col : Column to check pattern against
: param start : Value to be found at the start
: param end : Value to be found at the end
: param anywhere : Value to be found anywhere
: param index : Value to be found at a certain index
: param length : Minimum character length
: return : Queried rows"""
|
# Retrieve search pattern
pattern = self . _like_pattern ( start , end , anywhere , index , length )
# Concatenate full statement and execute
statement = "SELECT {0} FROM {1} WHERE {2} LIKE '{3}'" . format ( join_cols ( cols ) , wrap ( table ) , where_col , pattern )
return self . fetch ( statement )
|
def is_int_type ( val ) :
"""Return True if ` val ` is of integer type ."""
|
try : # Python 2
return isinstance ( val , ( int , long ) )
except NameError : # Python 3
return isinstance ( val , int )
|
def mouseDoubleClickEvent ( self , event ) :
"""Launches an editor for the component , if the mouse cursor is over an item"""
|
if self . mode == BuildMode :
if event . button ( ) == QtCore . Qt . LeftButton :
index = self . indexAt ( event . pos ( ) )
self . edit ( index )
|
def __advice_stack_frame_protection ( self , frame ) :
"""Overriding of this is only permitted if and only if your name is
Megumin and you have a pet / familiar named Chomusuke ."""
|
if frame is None :
logger . debug ( 'currentframe() returned None; frame protection disabled' )
return
f_back = frame . f_back
while f_back :
if f_back . f_code is self . handle . __code__ :
raise RuntimeError ( "indirect invocation of '%s' by 'handle' is forbidden" % frame . f_code . co_name , )
f_back = f_back . f_back
|
def _attach_to_model ( self , model ) :
"""When we have a model , save the relation in the database , to later create
RelatedCollection objects in the related model"""
|
super ( RelatedFieldMixin , self ) . _attach_to_model ( model )
if model . abstract : # do not manage the relation if it ' s an abstract model
return
# now , check related _ name and save the relation in the database
# get related parameters to identify the relation
self . related_name = self . _get_related_name ( )
self . related_to = self . _get_related_model_name ( )
# create entry for the model in the _ relations list of the database
if not hasattr ( self . database , '_relations' ) :
self . database . _relations = { }
self . database . _relations . setdefault ( self . related_to , [ ] )
# check unicity of related name for related model
self . _assert_relation_does_not_exists ( )
# the relation didn ' t exists , we can save it
relation = ( self . _model . _name , self . name , self . related_name )
self . database . _relations [ self . related_to ] . append ( relation )
|
def get_default_datatable_kwargs ( self , ** kwargs ) :
"""Builds the default set of kwargs for initializing a Datatable class . Note that by default
the MultipleDatatableMixin does not support any configuration via the view ' s class
attributes , and instead relies completely on the Datatable class itself to declare its
configuration details ."""
|
kwargs [ 'view' ] = self
# This is provided by default , but if the view is instantiated outside of the request cycle
# ( such as for the purposes of embedding that view ' s datatable elsewhere ) , the request may
# not be required , so the user may not have a compelling reason to go through the trouble of
# putting it on self .
if hasattr ( self , 'request' ) :
kwargs [ 'url' ] = self . request . path
kwargs [ 'query_config' ] = getattr ( self . request , self . request . method )
else :
kwargs [ 'query_config' ] = { }
return kwargs
|
def other_set_producer ( socket , which_set , image_archive , patch_archive , groundtruth ) :
"""Push image files read from the valid / test set TAR to a socket .
Parameters
socket : : class : ` zmq . Socket `
PUSH socket on which to send images .
which _ set : str
Which set of images is being processed . One of ' train ' , ' valid ' ,
' test ' . Used for extracting the appropriate images from the patch
archive .
image _ archive : str or file - like object
The filename or file - handle for the TAR archive containing images .
patch _ archive : str or file - like object
Filename or file handle for the TAR archive of patch images .
groundtruth : iterable
Iterable container containing scalar 0 - based class index for each
image , sorted by filename ."""
|
patch_images = extract_patch_images ( patch_archive , which_set )
num_patched = 0
with tar_open ( image_archive ) as tar :
filenames = sorted ( info . name for info in tar if info . isfile ( ) )
images = ( load_from_tar_or_patch ( tar , filename , patch_images ) for filename in filenames )
pathless_filenames = ( os . path . split ( fn ) [ - 1 ] for fn in filenames )
image_iterator = equizip ( images , pathless_filenames , groundtruth )
for ( image_data , patched ) , filename , class_index in image_iterator :
if patched :
num_patched += 1
socket . send_pyobj ( ( filename , class_index ) , zmq . SNDMORE )
socket . send ( image_data , copy = False )
if num_patched != len ( patch_images ) :
raise Exception
|
def is_B_hypergraph ( self ) :
"""Indicates whether the hypergraph is a B - hypergraph .
In a B - hypergraph , all hyperedges are B - hyperedges - - that is , every
hyperedge has exactly one node in the head .
: returns : bool - - True iff the hypergraph is a B - hypergraph ."""
|
for hyperedge_id in self . _hyperedge_attributes :
head = self . get_hyperedge_head ( hyperedge_id )
if len ( head ) > 1 :
return False
return True
|
def get_belapi_handle ( client , username = None , password = None ) :
"""Get BEL API arango db handle"""
|
( username , password ) = get_user_creds ( username , password )
sys_db = client . db ( "_system" , username = username , password = password )
# Create a new database named " belapi "
try :
if username and password :
belapi_db = sys_db . create_database ( name = belapi_db_name , users = [ { "username" : username , "password" : password , "active" : True } ] , )
else :
belapi_db = sys_db . create_database ( name = belapi_db_name )
except arango . exceptions . DatabaseCreateError :
if username and password :
belapi_db = client . db ( belapi_db_name , username = username , password = password )
else :
belapi_db = client . db ( belapi_db_name )
try :
belapi_db . create_collection ( belapi_settings_name )
except Exception :
pass
try :
belapi_db . create_collection ( belapi_statemgmt_name )
except Exception :
pass
return belapi_db
|
def generic_converter_cli ( docgraph_class , file_descriptor = '' ) :
"""generic command line interface for importers . Will convert the file
specified on the command line into a dot representation of the
corresponding DiscourseDocumentGraph and write the output to stdout
or a file specified on the command line .
Parameters
docgraph _ class : class
a DiscourseDocumentGraph ( or a class derived from it ) , not an
instance of it !
file _ descriptor : str
string descring the input format , e . g . ' TigerXML ( syntax ) '"""
|
parser = argparse . ArgumentParser ( )
parser . add_argument ( 'input_file' , help = '{} file to be converted' . format ( file_descriptor ) )
parser . add_argument ( 'output_file' , nargs = '?' , default = sys . stdout )
args = parser . parse_args ( sys . argv [ 1 : ] )
assert os . path . isfile ( args . input_file ) , "'{}' isn't a file" . format ( args . input_file )
docgraph = docgraph_class ( args . input_file )
write_dot ( docgraph , args . output_file )
|
def start ( self ) :
"""Start a node"""
|
try : # For IOU we need to send the licence everytime
if self . node_type == "iou" :
try :
licence = self . _project . controller . settings [ "IOU" ] [ "iourc_content" ]
except KeyError :
raise aiohttp . web . HTTPConflict ( text = "IOU licence is not configured" )
yield from self . post ( "/start" , timeout = 240 , data = { "iourc_content" : licence } )
else :
yield from self . post ( "/start" , timeout = 240 )
except asyncio . TimeoutError :
raise aiohttp . web . HTTPRequestTimeout ( text = "Timeout when starting {}" . format ( self . _name ) )
|
def insert_element_to_dict_of_dicts_of_list ( dict_of_dict_of_list , first_key , second_key , parser ) :
"""Utility method
: param dict _ of _ dict _ of _ list :
: param first _ key :
: param second _ key :
: param parser :
: return :"""
|
list_to_insert = parser if isinstance ( parser , list ) else [ parser ]
if first_key not in dict_of_dict_of_list . keys ( ) :
dict_of_dict_of_list [ first_key ] = { second_key : list_to_insert }
else :
if second_key not in dict_of_dict_of_list [ first_key ] . keys ( ) :
dict_of_dict_of_list [ first_key ] [ second_key ] = list_to_insert
else :
dict_of_dict_of_list [ first_key ] [ second_key ] += list_to_insert
|
def _load_feed ( path : str , view : View , config : nx . DiGraph ) -> Feed :
"""Multi - file feed filtering"""
|
config_ = remove_node_attributes ( config , [ "converters" , "transformations" ] )
feed_ = Feed ( path , view = { } , config = config_ )
for filename , column_filters in view . items ( ) :
config_ = reroot_graph ( config_ , filename )
view_ = { filename : column_filters }
feed_ = Feed ( feed_ , view = view_ , config = config_ )
return Feed ( feed_ , config = config )
|
def _ctypes_ex_variables ( executable ) :
"""Returns a list of the local variable definitions required to construct the
ctypes interop wrapper ."""
|
result = [ ]
for p in executable . ordered_parameters :
_ctypes_code_parameter ( result , p , "indices" )
_ctypes_code_parameter ( result , p , "variable" )
_ctypes_code_parameter ( result , p , "out" )
if type ( executable ) . __name__ == "Function" : # For functions , we still create a subroutine - type interface and then just add an extra
# output - type parameter for the function ' s return type .
_ctypes_code_parameter ( result , executable , "indices" )
_ctypes_code_parameter ( result , executable , "variable" )
_ctypes_code_parameter ( result , executable , "out" )
return result
|
def setdoc ( self , newdoc ) :
"""Set a different document . Usually no need to call this directly , invoked implicitly by : meth : ` copy `"""
|
self . doc = newdoc
if self . doc and self . id :
self . doc . index [ self . id ] = self
for c in self :
if isinstance ( c , AbstractElement ) :
c . setdoc ( newdoc )
|
def title_prefix_json ( soup ) :
"titlePrefix with capitalisation changed"
|
prefix = title_prefix ( soup )
prefix_rewritten = elifetools . json_rewrite . rewrite_json ( "title_prefix_json" , soup , prefix )
return prefix_rewritten
|
def getUnitCost ( self , CorpNum ) :
"""αα
’α¨αα
³ αα
₯α«αα
©αΌ αα
‘α«αα
‘ αα
ͺα¨αα
΅α«
args
CorpNum : αα
‘αΈαα
΅α―αα
¬αα
―α« αα
‘αα
₯αΈαα
‘αα
₯α«αα
©
return
αα
₯α«αα
©αΌ αα
‘α«αα
‘ by float
raise
PopbillException"""
|
result = self . _httpget ( '/FAX/UnitCost' , CorpNum )
return int ( result . unitCost )
|
def else_ ( self , result_expr ) :
"""Specify
Returns
builder : CaseBuilder"""
|
kwargs = { slot : getattr ( self , slot ) for slot in self . __slots__ if slot != 'default' }
result_expr = ir . as_value_expr ( result_expr )
kwargs [ 'default' ] = result_expr
# Maintain immutability
return type ( self ) ( ** kwargs )
|
def add_metadata ( self , observation_n , info_n , available_at = None ) :
"""Mutates the info _ n dictionary ."""
|
if self . instance_n is None :
return
with pyprofile . push ( 'vnc_env.diagnostics.Diagnostics.add_metadata' ) :
async = self . pool . imap_unordered ( self . _add_metadata_i , zip ( self . instance_n , observation_n , info_n , [ available_at ] * len ( observation_n ) ) )
list ( async )
|
def require_backup_exists ( func ) :
"""Requires that the file referred to by ` backup _ file ` exists in the file
system before running the decorated function ."""
|
def new_func ( * args , ** kwargs ) :
backup_file = kwargs [ 'backup_file' ]
if not os . path . exists ( backup_file ) :
raise RestoreError ( "Could not find file '{0}'" . format ( backup_file ) )
return func ( * args , ** kwargs )
return new_func
|
def load ( source , triples = False , cls = PENMANCodec , ** kwargs ) :
"""Deserialize a list of PENMAN - encoded graphs from * source * .
Args :
source : a filename or file - like object to read from
triples : if True , read graphs as triples instead of as PENMAN
cls : serialization codec class
kwargs : keyword arguments passed to the constructor of * cls *
Returns :
a list of Graph objects"""
|
decode = cls ( ** kwargs ) . iterdecode
if hasattr ( source , 'read' ) :
return list ( decode ( source . read ( ) ) )
else :
with open ( source ) as fh :
return list ( decode ( fh . read ( ) ) )
|
def set_end_date ( self , lifetime ) :
"""Computes and store an absolute end _ date session according to the
lifetime of the session"""
|
self . end_date = ( datetime . datetime . now ( ) + datetime . timedelta ( 0 , lifetime ) )
|
def _create ( cls , name , node_type , physical_interfaces , nodes = 1 , loopback_ndi = None , log_server_ref = None , domain_server_address = None , enable_antivirus = False , enable_gti = False , sidewinder_proxy_enabled = False , default_nat = False , location_ref = None , enable_ospf = None , ospf_profile = None , snmp_agent = None , comment = None ) :
"""Create will return the engine configuration as a dict that is a
representation of the engine . The creating class will also add
engine specific requirements before constructing the request
and sending to SMC ( which will serialize the dict to json ) .
: param name : name of engine
: param str node _ type : comes from class attribute of engine type
: param dict physical _ interfaces : physical interface list of dict
: param int nodes : number of nodes for engine
: param str log _ server _ ref : href of log server
: param list domain _ server _ address : dns addresses"""
|
node_list = [ ]
for nodeid in range ( 1 , nodes + 1 ) : # start at nodeid = 1
node_list . append ( Node . _create ( name , node_type , nodeid , loopback_ndi ) )
domain_server_list = [ ]
if domain_server_address :
for num , server in enumerate ( domain_server_address ) :
try :
domain_server = { 'rank' : num , 'ne_ref' : server . href }
except AttributeError :
domain_server = { 'rank' : num , 'value' : server }
domain_server_list . append ( domain_server )
# Set log server reference , if not explicitly provided
if not log_server_ref and node_type is not 'virtual_fw_node' :
log_server_ref = LogServer . objects . first ( ) . href
base_cfg = { 'name' : name , 'nodes' : node_list , 'domain_server_address' : domain_server_list , 'log_server_ref' : log_server_ref , 'physicalInterfaces' : physical_interfaces }
if enable_antivirus :
antivirus = { 'antivirus' : { 'antivirus_enabled' : True , 'antivirus_update' : 'daily' , 'virus_log_level' : 'stored' , 'virus_mirror' : 'update.nai.com/Products/CommonUpdater' } }
base_cfg . update ( antivirus )
if enable_gti :
gti = { 'gti_settings' : { 'file_reputation_context' : 'gti_cloud_only' } }
base_cfg . update ( gti )
if sidewinder_proxy_enabled :
base_cfg . update ( sidewinder_proxy_enabled = True )
if default_nat :
base_cfg . update ( default_nat = True )
if location_ref :
base_cfg . update ( location_ref = location_helper ( location_ref ) if location_ref else None )
if snmp_agent :
snmp_agent_ref = SNMPAgent ( snmp_agent . pop ( 'snmp_agent_ref' ) ) . href
base_cfg . update ( snmp_agent_ref = snmp_agent_ref , ** snmp_agent )
if enable_ospf :
if not ospf_profile : # get default profile
ospf_profile = OSPFProfile ( 'Default OSPFv2 Profile' ) . href
ospf = { 'dynamic_routing' : { 'ospfv2' : { 'enabled' : True , 'ospfv2_profile_ref' : ospf_profile } } }
base_cfg . update ( ospf )
base_cfg . update ( comment = comment )
return base_cfg
|
def Serialize ( self , writer ) :
"""Serialize full object .
Args :
writer ( neo . IO . BinaryWriter ) :"""
|
super ( Header , self ) . Serialize ( writer )
writer . WriteByte ( 0 )
|
def from_forums ( cls , forums ) :
"""Initializes a ` ` ForumVisibilityContentTree ` ` instance from a list of forums ."""
|
root_level = None
current_path = [ ]
nodes = [ ]
# Ensures forums last posts and related poster relations are " followed " for better
# performance ( only if we ' re considering a queryset ) .
forums = ( forums . select_related ( 'last_post' , 'last_post__poster' ) if isinstance ( forums , QuerySet ) else forums )
for forum in forums :
level = forum . level
# Set the root level to the top node level at the first iteration .
if root_level is None :
root_level = level
# Initializes a visibility forum node associated with current forum instance .
vcontent_node = ForumVisibilityContentNode ( forum )
# Computes a relative level associated to the node .
relative_level = level - root_level
vcontent_node . relative_level = relative_level
# All children nodes will be stored in an array attached to the current node .
vcontent_node . children = [ ]
# Removes the forum that are not in the current branch .
while len ( current_path ) > relative_level :
current_path . pop ( - 1 )
if level != root_level : # Update the parent of the current forum .
parent_node = current_path [ - 1 ]
vcontent_node . parent = parent_node
parent_node . children . append ( vcontent_node )
# Sets visible flag if applicable . The visible flag is used to determine whether a forum
# can be seen in a forum list or not . A forum can be seen if one of the following
# statements is true :
# * the forum is a direct child of the starting forum for the considered level
# * the forum have a parent which is a category and this category is a direct child of
# the starting forum
# * the forum have its ' display _ sub _ forum _ list ' option set to True and have a parent
# which is another forum . The latter is a direct child of the starting forum
# * the forum have its ' display _ sub _ forum _ list ' option set to True and have a parent
# which is another forum . The later have a parent which is a category and this
# category is a direct child of the starting forum
# If forums at the root level don ' t have parents , the visible forums are those that can
# be seen from the root of the forums tree .
vcontent_node . visible = ( ( relative_level == 0 ) or ( forum . display_sub_forum_list and relative_level == 1 ) or ( forum . is_category and relative_level == 1 ) or ( relative_level == 2 and vcontent_node . parent . parent . obj . is_category and vcontent_node . parent . obj . is_forum ) )
# Add the current forum to the end of the current branch and inserts the node inside the
# final node dictionary .
current_path . append ( vcontent_node )
nodes . append ( vcontent_node )
tree = cls ( nodes = nodes )
for node in tree . nodes :
node . tree = tree
return tree
|
def medlineRecordParser ( record ) :
"""The parser [ ` MedlineRecord ` ] ( . . / classes / MedlineRecord . html # metaknowledge . medline . MedlineRecord ) use . This takes an entry from [ medlineParser ( ) ] ( # metaknowledge . medline . medlineHandlers . medlineParser ) and parses it a part of the creation of a ` MedlineRecord ` .
# Parameters
_ record _ : ` enumerate object `
> a file wrapped by ` enumerate ( ) `
# Returns
` collections . OrderedDict `
> An ordered dictionary of the key - vaue pairs in the entry"""
|
tagDict = collections . OrderedDict ( )
tag = 'PMID'
mostRecentAuthor = None
for lineNum , line in record :
tmptag = line [ : 4 ] . rstrip ( )
contents = line [ 6 : - 1 ]
if tmptag . isalpha ( ) and line [ 4 ] == '-' :
tag = tmptag
if tag == 'AU' :
mostRecentAuthor = contents
if tag in authorBasedTags :
contents = "{} : {}" . format ( mostRecentAuthor , contents )
try :
tagDict [ tag ] . append ( contents )
except KeyError :
tagDict [ tag ] = [ contents ]
elif line [ : 6 ] == ' ' :
tagDict [ tag ] [ - 1 ] += '\n' + line [ 6 : - 1 ]
elif line == '\n' :
break
else :
raise BadPubmedRecord ( "Tag not formed correctly on line {}: '{}'" . format ( lineNum , line ) )
return tagDict
|
def distorted_bounding_box_crop ( image , bbox , min_object_covered = 0.1 , aspect_ratio_range = ( 0.75 , 1.33 ) , area_range = ( 0.05 , 1.0 ) , max_attempts = 100 , scope = None ) :
"""Generates cropped _ image using a one of the bboxes randomly distorted .
See ` tf . image . sample _ distorted _ bounding _ box ` for more documentation .
Args :
image : ` Tensor ` of image ( it will be converted to floats in [ 0 , 1 ] ) .
bbox : ` Tensor ` of bounding boxes arranged ` [ 1 , num _ boxes , coords ] `
where each coordinate is [ 0 , 1 ) and the coordinates are arranged
as ` [ ymin , xmin , ymax , xmax ] ` . If num _ boxes is 0 then use the whole
image .
min _ object _ covered : An optional ` float ` . Defaults to ` 0.1 ` . The cropped
area of the image must contain at least this fraction of any bounding
box supplied .
aspect _ ratio _ range : An optional list of ` float ` s . The cropped area of the
image must have an aspect ratio = width / height within this range .
area _ range : An optional list of ` float ` s . The cropped area of the image
must contain a fraction of the supplied image within in this range .
max _ attempts : An optional ` int ` . Number of attempts at generating a cropped
region of the image of the specified constraints . After ` max _ attempts `
failures , return the entire image .
scope : Optional ` str ` for name scope .
Returns :
( cropped image ` Tensor ` , distorted bbox ` Tensor ` ) ."""
|
with tf . name_scope ( scope , default_name = "distorted_bounding_box_crop" , values = [ image , bbox ] ) : # Each bounding box has shape [ 1 , num _ boxes , box coords ] and
# the coordinates are ordered [ ymin , xmin , ymax , xmax ] .
# A large fraction of image datasets contain a human - annotated bounding
# box delineating the region of the image containing the object of interest .
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human - annotated bounding box that obeys an
# allowed range of aspect ratios , sizes and overlap with the human - annotated
# bounding box . If no box is supplied , then we assume the bounding box is
# the entire image .
sample_distorted_bounding_box = tf . image . sample_distorted_bounding_box ( tf . shape ( image ) , bounding_boxes = bbox , min_object_covered = min_object_covered , aspect_ratio_range = aspect_ratio_range , area_range = area_range , max_attempts = max_attempts , use_image_if_no_bounding_boxes = True )
bbox_begin , bbox_size , distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box .
cropped_image = tf . slice ( image , bbox_begin , bbox_size )
return cropped_image , distort_bbox
|
def flow_coef_bd ( CIJ ) :
'''Computes the flow coefficient for each node and averaged over the
network , as described in Honey et al . ( 2007 ) PNAS . The flow coefficient
is similar to betweenness centrality , but works on a local
neighborhood . It is mathematically related to the clustering
coefficient ( cc ) at each node as , fc + cc < = 1.
Parameters
CIJ : NxN np . ndarray
binary directed connection matrix
Returns
fc : Nx1 np . ndarray
flow coefficient for each node
FC : float
average flow coefficient over the network
total _ flo : int
number of paths that " flow " across the central node'''
|
N = len ( CIJ )
fc = np . zeros ( ( N , ) )
total_flo = np . zeros ( ( N , ) )
max_flo = np . zeros ( ( N , ) )
# loop over nodes
for v in range ( N ) : # find neighbors - note : both incoming and outgoing connections
nb , = np . where ( CIJ [ v , : ] + CIJ [ : , v ] . T )
fc [ v ] = 0
if np . where ( nb ) [ 0 ] . size :
CIJflo = - CIJ [ np . ix_ ( nb , nb ) ]
for i in range ( len ( nb ) ) :
for j in range ( len ( nb ) ) :
if CIJ [ nb [ i ] , v ] and CIJ [ v , nb [ j ] ] :
CIJflo [ i , j ] += 1
total_flo [ v ] = np . sum ( ( CIJflo == 1 ) * np . logical_not ( np . eye ( len ( nb ) ) ) )
max_flo [ v ] = len ( nb ) * len ( nb ) - len ( nb )
fc [ v ] = total_flo [ v ] / max_flo [ v ]
fc [ np . isnan ( fc ) ] = 0
FC = np . mean ( fc )
return fc , FC , total_flo
|
def betting_market_group_update ( self , betting_market_group_id , description = None , event_id = None , rules_id = None , status = None , account = None , ** kwargs ) :
"""Update an betting market . This needs to be * * proposed * * .
: param str betting _ market _ group _ id : Id of the betting market group
to update
: param list description : Internationalized list of descriptions
: param str event _ id : Event ID to create this for
: param str rule _ id : Rule ID to create this with
: param str status : New Status
: param str account : ( optional ) the account to allow access
to ( defaults to ` ` default _ account ` ` )"""
|
if not account :
if "default_account" in self . config :
account = self . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
account = Account ( account , blockchain_instance = self )
bmg = BettingMarketGroup ( betting_market_group_id )
# Do not try to update status of it doesn ' t change it on the chain
if bmg [ "status" ] == status :
status = None
op_data = { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "betting_market_group_id" : bmg [ "id" ] , "prefix" : self . prefix , }
if event_id :
if event_id [ 0 ] == "1" : # Test if object exists
Event ( event_id )
else : # Test if object is proposed
test_proposal_in_buffer ( kwargs . get ( "append_to" , self . propbuffer ) , "event_create" , event_id )
op_data . update ( { "new_event_id" : event_id } )
if rules_id :
if rules_id [ 0 ] == "1" : # Test if object exists
Rule ( rules_id )
else : # Test if object is proposed
test_proposal_in_buffer ( kwargs . get ( "append_to" , self . propbuffer ) , "betting_market_rules_create" , rules_id , )
op_data . update ( { "new_rules_id" : rules_id } )
if description :
op_data . update ( { "new_description" : description } )
if status :
op_data . update ( { "status" : status } )
op = operations . Betting_market_group_update ( ** op_data )
return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
|
def rotate ( image , angle , interpolation = cv2 . INTER_CUBIC , borderMode = cv2 . BORDER_REFLECT , borderValue = 0 ) :
'''angle [ deg ]'''
|
s0 , s1 = image . shape
image_center = ( s0 - 1 ) / 2. , ( s1 - 1 ) / 2.
rot_mat = cv2 . getRotationMatrix2D ( image_center , angle , 1.0 )
result = cv2 . warpAffine ( image , rot_mat , image . shape , flags = interpolation , borderMode = borderMode , borderValue = borderValue )
return result
|
def is_valid_rgb_color ( value ) :
"""Checks whether the value is a valid rgb or rgba color string .
Valid colors consist of :
- rgb ( 255 , 255 , 255)
- rgba ( 23 , 34 , 45 , . 5)"""
|
if not value :
return False
regex = re . compile ( RGB_COLOR_REGEX )
return bool ( regex . match ( value ) )
|
def batch_delete_intents ( self , parent , intents , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None ) :
"""Deletes intents in the specified agent .
Operation < response : ` ` google . protobuf . Empty ` ` >
Example :
> > > import dialogflow _ v2
> > > client = dialogflow _ v2 . IntentsClient ( )
> > > parent = client . project _ agent _ path ( ' [ PROJECT ] ' )
> > > # TODO : Initialize ` ` intents ` ` :
> > > intents = [ ]
> > > response = client . batch _ delete _ intents ( parent , intents )
> > > def callback ( operation _ future ) :
. . . # Handle result .
. . . result = operation _ future . result ( )
> > > response . add _ done _ callback ( callback )
> > > # Handle metadata .
> > > metadata = response . metadata ( )
Args :
parent ( str ) : Required . The name of the agent to delete all entities types for . Format :
` ` projects / < Project ID > / agent ` ` .
intents ( list [ Union [ dict , ~ google . cloud . dialogflow _ v2 . types . Intent ] ] ) : Required . The collection of intents to delete . Only intent ` ` name ` ` must be
filled in .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . dialogflow _ v2 . types . Intent `
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . dialogflow _ v2 . types . _ OperationFuture ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if 'batch_delete_intents' not in self . _inner_api_calls :
self . _inner_api_calls [ 'batch_delete_intents' ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . batch_delete_intents , default_retry = self . _method_configs [ 'BatchDeleteIntents' ] . retry , default_timeout = self . _method_configs [ 'BatchDeleteIntents' ] . timeout , client_info = self . _client_info , )
request = intent_pb2 . BatchDeleteIntentsRequest ( parent = parent , intents = intents , )
operation = self . _inner_api_calls [ 'batch_delete_intents' ] ( request , retry = retry , timeout = timeout , metadata = metadata )
return google . api_core . operation . from_gapic ( operation , self . transport . _operations_client , empty_pb2 . Empty , metadata_type = struct_pb2 . Struct , )
|
def watch ( ) :
"""Renerate documentation when it changes ."""
|
# Start with a clean build
sphinx_build [ '-b' , 'html' , '-E' , 'docs' , 'docs/_build/html' ] & FG
handler = ShellCommandTrick ( shell_command = 'sphinx-build -b html docs docs/_build/html' , patterns = [ '*.rst' , '*.py' ] , ignore_patterns = [ '_build/*' ] , ignore_directories = [ '.tox' ] , drop_during_process = True )
observer = Observer ( )
observe_with ( observer , handler , pathnames = [ '.' ] , recursive = True )
|
def check_resource ( resource ) :
'''Check a resource availability against a linkchecker backend
The linkchecker used can be configured on a resource basis by setting
the ` resource . extras [ ' check : checker ' ] ` attribute with a key that points
to a valid ` udata . linkcheckers ` entrypoint . If not set , it will
fallback on the default linkchecker defined by the configuration variable
` LINKCHECKING _ DEFAULT _ LINKCHECKER ` .
Returns
dict or ( dict , int )
Check results dict and status code ( if error ) .'''
|
linkchecker_type = resource . extras . get ( 'check:checker' )
LinkChecker = get_linkchecker ( linkchecker_type )
if not LinkChecker :
return { 'error' : 'No linkchecker configured.' } , 503
if is_ignored ( resource ) :
return dummy_check_response ( )
result = LinkChecker ( ) . check ( resource )
if not result :
return { 'error' : 'No response from linkchecker' } , 503
elif result . get ( 'check:error' ) :
return { 'error' : result [ 'check:error' ] } , 500
elif not result . get ( 'check:status' ) :
return { 'error' : 'No status in response from linkchecker' } , 503
# store the check result in the resource ' s extras
# XXX maybe this logic should be in the ` Resource ` model ?
previous_status = resource . extras . get ( 'check:available' )
check_keys = _get_check_keys ( result , resource , previous_status )
resource . extras . update ( check_keys )
resource . save ( signal_kwargs = { 'ignores' : [ 'post_save' ] } )
# Prevent signal triggering on dataset
return result
|
def head ( self , path , query = None , data = None , redirects = True ) :
"""HEAD request wrapper for : func : ` request ( ) `"""
|
return self . request ( 'HEAD' , path , query , None , redirects )
|
def create_access_key ( self , name , is_active = True , permitted = [ ] , options = { } ) :
"""Creates a new access key . A master key must be set first .
: param name : the name of the access key to create
: param is _ active : Boolean value dictating whether this key is currently active ( default True )
: param permitted : list of strings describing which operation types this key will permit
Legal values include " writes " , " queries " , " saved _ queries " , " cached _ queries " ,
" datasets " , and " schema " .
: param options : dictionary containing more details about the key ' s permitted and restricted
functionality"""
|
return self . api . create_access_key ( name = name , is_active = is_active , permitted = permitted , options = options )
|
def get_default_config ( self ) :
"""Returns the default collector settings"""
|
config = super ( VMSFSCollector , self ) . get_default_config ( )
config . update ( { 'path' : 'vmsfs' } )
return config
|
def process_request ( self , unused_request ) :
"""Called by Django before deciding which view to execute ."""
|
# Compare to the first half of toplevel ( ) in context . py .
tasklets . _state . clear_all_pending ( )
# Create and install a new context .
ctx = tasklets . make_default_context ( )
tasklets . set_context ( ctx )
|
def notch ( ts , freq_hz , bandwidth_hz = 1.0 ) :
"""notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden"""
|
orig_ndim = ts . ndim
if ts . ndim is 1 :
ts = ts [ : , np . newaxis ]
channels = ts . shape [ 1 ]
fs = ( len ( ts ) - 1.0 ) / ( ts . tspan [ - 1 ] - ts . tspan [ 0 ] )
nyq = 0.5 * fs
freq = freq_hz / nyq
bandwidth = bandwidth_hz / nyq
R = 1.0 - 3.0 * ( bandwidth / 2.0 )
K = ( ( 1.0 - 2.0 * R * np . cos ( np . pi * freq ) + R ** 2 ) / ( 2.0 - 2.0 * np . cos ( np . pi * freq ) ) )
b , a = np . zeros ( 3 ) , np . zeros ( 3 )
a [ 0 ] = 1.0
a [ 1 ] = - 2.0 * R * np . cos ( np . pi * freq )
a [ 2 ] = R ** 2
b [ 0 ] = K
b [ 1 ] = - 2 * K * np . cos ( np . pi * freq )
b [ 2 ] = K
if not np . all ( np . abs ( np . roots ( a ) ) < 1.0 ) :
raise ValueError ( 'Filter will not be stable with these values.' )
dtype = ts . dtype
output = np . zeros ( ( len ( ts ) , channels ) , dtype )
for i in range ( channels ) :
output [ : , i ] = signal . filtfilt ( b , a , ts [ : , i ] )
if orig_ndim is 1 :
output = output [ : , 0 ]
return Timeseries ( output , ts . tspan , labels = ts . labels )
|
def grid_widgets ( self ) :
"""Configure all widgets using the grid geometry manager
Automatically called by the : meth : ` _ _ init _ _ ` method .
Does not have to be called by the user except in extraordinary
cases ."""
|
# Categories
for index , label in enumerate ( self . _category_labels . values ( ) ) :
label . grid ( column = 0 , row = index , padx = 5 , sticky = "nw" , pady = ( 1 , 0 ) if index == 0 else 0 )
# Canvas widgets
self . _canvas_scroll . grid ( column = 1 , row = 0 , padx = ( 0 , 5 ) , pady = 5 , sticky = "nswe" )
self . _canvas_ticks . grid ( column = 1 , row = 1 , padx = ( 0 , 5 ) , pady = ( 0 , 5 ) , sticky = "nswe" )
self . _scrollbar_timeline . grid ( column = 1 , row = 2 , padx = ( 0 , 5 ) , pady = ( 0 , 5 ) , sticky = "we" )
# Zoom widgets
self . _button_zoom_in . grid ( row = 0 , column = 0 , pady = 5 , sticky = "nswe" )
self . _button_zoom_out . grid ( row = 1 , column = 0 , pady = ( 0 , 5 ) , sticky = "nswe" )
self . _button_zoom_reset . grid ( row = 2 , column = 0 , pady = ( 0 , 5 ) , sticky = "nswe" )
# Frames
self . _canvas_categories . grid ( column = 0 , row = 0 , padx = 5 , pady = 5 , sticky = "nswe" )
self . _scrollbar_vertical . grid ( column = 2 , row = 0 , pady = 5 , padx = ( 0 , 5 ) , sticky = "ns" )
self . _frame_zoom . grid ( column = 3 , row = 0 , rowspan = 2 , padx = ( 0 , 5 ) , pady = 5 , sticky = "nswe" )
|
def count_features_of_type ( self , featuretype = None ) :
"""Simple count of features .
Can be faster than " grep " , and is faster than checking the length of
results from : meth : ` gffutils . FeatureDB . features _ of _ type ` .
Parameters
featuretype : string
Feature type ( e . g . , " gene " ) to count . If None , then count * all *
features in the database .
Returns
The number of features of this type , as an integer"""
|
c = self . conn . cursor ( )
if featuretype is not None :
c . execute ( '''
SELECT count() FROM features
WHERE featuretype = ?
''' , ( featuretype , ) )
else :
c . execute ( '''
SELECT count() FROM features
''' )
results = c . fetchone ( )
if results is not None :
results = results [ 0 ]
return results
|
def complete_opt_display ( self , text , * _ ) :
"""Autocomplete for display option"""
|
return [ t + " " for t in DISPLAYS if t . startswith ( text ) ]
|
def stop ( self ) :
"""Stop the sensor ."""
|
# Check that everything is running
if not self . _running :
logging . warning ( 'PhoXi not running. Aborting stop' )
return False
# Stop the subscribers
self . _color_im_sub . unregister ( )
self . _depth_im_sub . unregister ( )
self . _normal_map_sub . unregister ( )
# Disconnect from the camera
rospy . ServiceProxy ( 'phoxi_camera/disconnect_camera' , Empty ) ( )
self . _running = False
return True
|
def get_timezones ( ) :
"""Get the supported timezones .
The list will be cached unless you set the " fresh " attribute to True .
: param fresh : Whether to get a fresh list or not
: type fresh : bool
: rtype : tuple"""
|
base_dir = _DIRECTORY
zones = ( )
for root , dirs , files in os . walk ( base_dir ) :
for basename in files :
zone = os . path . join ( root , basename )
if os . path . isdir ( zone ) :
continue
zone = os . path . relpath ( zone , base_dir )
with open ( os . path . join ( root , basename ) , 'rb' ) as fd :
if fd . read ( 4 ) == b'TZif' and zone not in INVALID_ZONES :
zones = zones + ( zone , )
return tuple ( sorted ( zones ) )
|
def add_string ( self , string ) :
"""Add to the working string and its length and reset eos ."""
|
self . string += string
self . length += len ( string )
self . eos = 0
|
def get ( self , singleSnapshot = False ) :
"""* geneate the pyephem positions *
* * Key Arguments : * *
- ` ` singleSnapshot ` ` - - just extract positions for a single pyephem snapshot ( used for unit testing )
* * Return : * *
- ` ` None ` `"""
|
self . log . info ( 'starting the ``get`` method' )
global xephemOE
global tileSide
global magLimit
# GRAB PARAMETERS FROM SETTINGS FILE
tileSide = float ( self . settings [ "pyephem" ] [ "atlas exposure match side" ] )
magLimit = float ( self . settings [ "pyephem" ] [ "magnitude limit" ] )
snapshotsRequired = 1
while snapshotsRequired > 0 :
nextMjds , exposures , snapshotsRequired = self . _get_exposures_requiring_pyephem_positions ( concurrentSnapshots = int ( self . settings [ "pyephem" ] [ "batch size" ] ) )
print "There are currently %(snapshotsRequired)s more pyephem snapshots required " % locals ( )
if snapshotsRequired == 0 :
return
if len ( xephemOE ) == 0 :
xephemOE = self . _get_xephem_orbital_elements ( )
# DEFINE AN INPUT ARRAY
magLimit = self . settings [ "pyephem" ] [ "magnitude limit" ]
pyephemDB = fmultiprocess ( log = self . log , function = _generate_pyephem_snapshot , timeout = 300 , inputArray = nextMjds , magLimit = magLimit )
matchedObjects = [ ]
for p , e , m in zip ( pyephemDB , exposures , nextMjds ) :
matchedObjects . append ( self . _match_pyephem_snapshot_to_atlas_exposures ( p , e , m ) )
self . _add_matched_objects_to_database ( matchedObjects )
self . _update_database_flag ( exposures )
if singleSnapshot :
snapshotsRequired = 0
self . log . info ( 'completed the ``get`` method' )
return None
|
def create ( cls , name , ipv4_network = None , ipv6_network = None , comment = None ) :
"""Create the network element
: param str name : Name of element
: param str ipv4 _ network : network cidr ( optional if ipv6)
: param str ipv6 _ network : network cidr ( optional if ipv4)
: param str comment : comment ( optional )
: raises CreateElementFailed : element creation failed with reason
: return : instance with meta
: rtype : Network
. . note : : Either an ipv4 _ network or ipv6 _ network must be specified"""
|
ipv4_network = ipv4_network if ipv4_network else None
ipv6_network = ipv6_network if ipv6_network else None
json = { 'name' : name , 'ipv4_network' : ipv4_network , 'ipv6_network' : ipv6_network , 'comment' : comment }
return ElementCreator ( cls , json )
|
def add_view_menu ( self , name ) :
"""Adds a view or menu to the backend , model view _ menu
param name :
name of the view menu to add"""
|
view_menu = self . find_view_menu ( name )
if view_menu is None :
try :
view_menu = self . viewmenu_model ( name = name )
view_menu . save ( )
return view_menu
except Exception as e :
log . error ( c . LOGMSG_ERR_SEC_ADD_VIEWMENU . format ( str ( e ) ) )
return view_menu
|
def simple_spool_transaction ( self , from_address , to , op_return , min_confirmations = 6 ) :
"""Utililty function to create the spool transactions . Selects the inputs ,
encodes the op _ return and constructs the transaction .
Args :
from _ address ( str ) : Address originating the transaction
to ( str ) : list of addresses to receive tokens ( file _ hash , file _ hash _ metadata , . . . )
op _ return ( str ) : String representation of the spoolverb , as returned by the properties of Spoolverb
min _ confirmations ( int ) : Number of confirmations when chosing the inputs of the transaction . Defaults to 6
Returns :
str : unsigned transaction"""
|
# list of addresses to send
ntokens = len ( to )
nfees = old_div ( self . _t . estimate_fee ( ntokens , 2 ) , self . fee )
inputs = self . select_inputs ( from_address , nfees , ntokens , min_confirmations = min_confirmations )
# outputs
outputs = [ { 'address' : to_address , 'value' : self . token } for to_address in to ]
outputs += [ { 'script' : self . _t . _op_return_hex ( op_return ) , 'value' : 0 } ]
# build transaction
unsigned_tx = self . _t . build_transaction ( inputs , outputs )
return unsigned_tx
|
def _forgiving_issubclass ( derived_class , base_class ) :
"""Forgiving version of ` ` issubclass ` `
Does not throw any exception when arguments are not of class type"""
|
return ( type ( derived_class ) is ClassType and type ( base_class ) is ClassType and issubclass ( derived_class , base_class ) )
|
def get_registers ( self , cpu_id ) :
"""Gets all the registers for the given CPU .
in cpu _ id of type int
The identifier of the Virtual CPU .
out names of type str
Array containing the lowercase register names .
out values of type str
Array parallel to the names holding the register values as if the
register was returned by : py : func : ` IMachineDebugger . get _ register ` ."""
|
if not isinstance ( cpu_id , baseinteger ) :
raise TypeError ( "cpu_id can only be an instance of type baseinteger" )
( names , values ) = self . _call ( "getRegisters" , in_p = [ cpu_id ] )
return ( names , values )
|
def __findout_range ( self , name , decl_type , recursive ) :
"""implementation details"""
|
if not self . _optimized :
self . _logger . debug ( 'running non optimized query - optimization has not been done' )
decls = self . declarations
if recursive :
decls = make_flatten ( self . declarations )
if decl_type :
decls = [ d for d in decls if isinstance ( d , decl_type ) ]
return decls
if name and templates . is_instantiation ( name ) : # templates has tricky mode to compare them , so lets check the
# whole range
name = None
if name and decl_type :
impl_match = scopedef_t . _impl_matchers [ scopedef_t . decl ] ( name = name )
if impl_match . is_full_name ( ) :
name = impl_match . decl_name_only
if recursive :
self . _logger . debug ( 'query has been optimized on type and name' )
return self . _type2name2decls [ decl_type ] . get ( name , [ ] )
self . _logger . debug ( 'non recursive query has been optimized on type and name' )
return self . _type2name2decls_nr [ decl_type ] . get ( name , [ ] )
elif decl_type :
if recursive :
self . _logger . debug ( 'query has been optimized on type' )
return self . _type2decls [ decl_type ]
self . _logger . debug ( 'non recursive query has been optimized on type' )
return self . _type2decls_nr [ decl_type ]
else :
if recursive :
self . _logger . debug ( ( 'query has not been optimized ( hint: query does not ' + 'contain type and/or name )' ) )
return self . _all_decls
self . _logger . debug ( ( 'non recursive query has not been optimized ( hint: ' + 'query does not contain type and/or name )' ) )
return self . _all_decls_not_recursive
|
def get_available_languages ( self , obj , formset ) :
"""Fetching the available inline languages as queryset ."""
|
if obj : # Inlines dictate language code , not the parent model .
# Hence , not looking at obj . get _ available _ languages ( ) , but see what languages
# are used by the inline objects that point to it .
filter = { 'master__{0}' . format ( formset . fk . name ) : obj }
return self . model . _parler_meta . root_model . objects . using ( obj . _state . db ) . filter ( ** filter ) . values_list ( 'language_code' , flat = True ) . distinct ( ) . order_by ( 'language_code' )
else :
return self . model . _parler_meta . root_model . objects . none ( )
|
def load_mirteFile ( path , m , logger = None ) :
"""Loads the mirte - file at < path > into the manager < m > ."""
|
l = logging . getLogger ( 'load_mirteFile' ) if logger is None else logger
had = set ( )
for name , path , d in walk_mirteFiles ( path , logger ) :
if os . path . realpath ( path ) in m . loaded_mirteFiles :
continue
identifier = name
if name in had :
identifier = path
else :
had . add ( name )
l . info ( 'loading %s' % identifier )
m . loaded_mirteFiles . add ( os . path . realpath ( path ) )
_load_mirteFile ( d , m )
|
def adjustButtons ( self ) :
"""Adjusts the placement of the buttons for this line edit ."""
|
y = 1
for btn in self . buttons ( ) :
btn . setIconSize ( self . iconSize ( ) )
btn . setFixedSize ( QSize ( self . height ( ) - 2 , self . height ( ) - 2 ) )
# adjust the location for the left buttons
left_buttons = self . _buttons . get ( Qt . AlignLeft , [ ] )
x = ( self . cornerRadius ( ) / 2.0 ) + 2
for btn in left_buttons :
btn . move ( x , y )
x += btn . width ( )
# adjust the location for the right buttons
right_buttons = self . _buttons . get ( Qt . AlignRight , [ ] )
w = self . width ( )
bwidth = sum ( [ btn . width ( ) for btn in right_buttons ] )
bwidth += ( self . cornerRadius ( ) / 2.0 ) + 1
for btn in right_buttons :
btn . move ( w - bwidth , y )
bwidth -= btn . width ( )
self . _buttonWidth = sum ( [ btn . width ( ) for btn in self . buttons ( ) ] )
self . adjustTextMargins ( )
|
def list ( self , request , * args , ** kwargs ) :
"""To get an actual value for object quotas limit and usage issue a * * GET * * request against * / api / < objects > / * .
To get all quotas visible to the user issue a * * GET * * request against * / api / quotas / *"""
|
return super ( QuotaViewSet , self ) . list ( request , * args , ** kwargs )
|
def allclose ( a , b , align = False , rtol = 1.e-5 , atol = 1.e-8 ) :
"""Compare two molecules for numerical equality .
Args :
a ( Cartesian ) :
b ( Cartesian ) :
align ( bool ) : a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing .
rtol ( float ) : Relative tolerance for the numerical equality comparison
look into : func : ` numpy . allclose ` for further explanation .
atol ( float ) : Relative tolerance for the numerical equality comparison
look into : func : ` numpy . allclose ` for further explanation .
Returns :
bool :"""
|
return np . alltrue ( isclose ( a , b , align = align , rtol = rtol , atol = atol ) )
|
def force_unroll_loops ( self , max_loop_unrolling_times ) :
"""Unroll loops globally . The resulting CFG does not contain any loop , but this method is slow on large graphs .
: param int max _ loop _ unrolling _ times : The maximum iterations of unrolling .
: return : None"""
|
if not isinstance ( max_loop_unrolling_times , int ) or max_loop_unrolling_times < 0 :
raise AngrCFGError ( 'Max loop unrolling times must be set to an integer greater than or equal to 0 if ' + 'loop unrolling is enabled.' )
# Traverse the CFG and try to find the beginning of loops
loop_backedges = [ ]
start = self . _starts [ 0 ]
if isinstance ( start , tuple ) :
start , _ = start
# pylint : disable = unpacking - non - sequence
start_node = self . get_any_node ( start )
if start_node is None :
raise AngrCFGError ( 'Cannot find start node when trying to unroll loops. The CFG might be empty.' )
graph_copy = networkx . DiGraph ( self . graph )
while True :
cycles_iter = networkx . simple_cycles ( graph_copy )
try :
cycle = next ( cycles_iter )
except StopIteration :
break
loop_backedge = ( None , None )
for n in networkx . dfs_preorder_nodes ( graph_copy , source = start_node ) :
if n in cycle :
idx = cycle . index ( n )
if idx == 0 :
loop_backedge = ( cycle [ - 1 ] , cycle [ idx ] )
else :
loop_backedge = ( cycle [ idx - 1 ] , cycle [ idx ] )
break
if loop_backedge not in loop_backedges :
loop_backedges . append ( loop_backedge )
# Create a common end node for all nodes whose out _ degree is 0
end_nodes = [ n for n in graph_copy . nodes ( ) if graph_copy . out_degree ( n ) == 0 ]
new_end_node = "end_node"
if not end_nodes : # We gotta randomly break a loop
cycles = sorted ( networkx . simple_cycles ( graph_copy ) , key = len )
first_cycle = cycles [ 0 ]
if len ( first_cycle ) == 1 :
graph_copy . remove_edge ( first_cycle [ 0 ] , first_cycle [ 0 ] )
else :
graph_copy . remove_edge ( first_cycle [ 0 ] , first_cycle [ 1 ] )
end_nodes = [ n for n in graph_copy . nodes ( ) if graph_copy . out_degree ( n ) == 0 ]
for en in end_nodes :
graph_copy . add_edge ( en , new_end_node )
# postdoms = self . immediate _ postdominators ( new _ end _ node , target _ graph = graph _ copy )
# reverse _ postdoms = defaultdict ( list )
# for k , v in postdoms . items ( ) :
# reverse _ postdoms [ v ] . append ( k )
# Find all loop bodies
# for src , dst in loop _ backedges :
# nodes _ in _ loop = { src , dst }
# while True :
# new _ nodes = set ( )
# for n in nodes _ in _ loop :
# if n in reverse _ postdoms :
# for node in reverse _ postdoms [ n ] :
# if node not in nodes _ in _ loop :
# new _ nodes . add ( node )
# if not new _ nodes :
# break
# nodes _ in _ loop | = new _ nodes
# Unroll the loop body
# TODO : Finish the implementation
graph_copy . remove_node ( new_end_node )
src , dst = loop_backedge
if graph_copy . has_edge ( src , dst ) : # It might have been removed before
# Duplicate the dst node
new_dst = dst . copy ( )
new_dst . looping_times = dst . looping_times + 1
if ( new_dst not in graph_copy and # If the new _ dst is already in the graph , we don ' t want to keep unrolling
# the this loop anymore since it may * create * a new loop . Of course we
# will lose some edges in this way , but in general it is acceptable .
new_dst . looping_times <= max_loop_unrolling_times ) : # Log all successors of the dst node
dst_successors = list ( graph_copy . successors ( dst ) )
# Add new _ dst to the graph
edge_data = graph_copy . get_edge_data ( src , dst )
graph_copy . add_edge ( src , new_dst , ** edge_data )
for ds in dst_successors :
if ds . looping_times == 0 and ds not in cycle :
edge_data = graph_copy . get_edge_data ( dst , ds )
graph_copy . add_edge ( new_dst , ds , ** edge_data )
# Remove the original edge
graph_copy . remove_edge ( src , dst )
# Update loop backedges
self . _loop_back_edges = loop_backedges
self . model . graph = graph_copy
|
def exists_table ( self , name , database = None ) :
"""Determine if the indicated table or view exists
Parameters
name : string
database : string , default None
Returns
if _ exists : boolean"""
|
return bool ( self . list_tables ( like = name , database = database ) )
|
def trailing_stop_loss_replace ( self , accountID , orderID , ** kwargs ) :
"""Shortcut to replace a pending Trailing Stop Loss Order in an Account
Args :
accountID : The ID of the Account
orderID : The ID of the Take Profit Order to replace
kwargs : The arguments to create a TrailingStopLossOrderRequest
Returns :
v20 . response . Response containing the results from submitting
the request"""
|
return self . replace ( accountID , orderID , order = TrailingStopLossOrderRequest ( ** kwargs ) )
|
def build_freeform ( self , start_x = 0 , start_y = 0 , scale = 1.0 ) :
"""Return | FreeformBuilder | object to specify a freeform shape .
The optional * start _ x * and * start _ y * arguments specify the starting
pen position in local coordinates . They will be rounded to the
nearest integer before use and each default to zero .
The optional * scale * argument specifies the size of local coordinates
proportional to slide coordinates ( EMU ) . If the vertical scale is
different than the horizontal scale ( local coordinate units are
" rectangular " ) , a pair of numeric values can be provided as the
* scale * argument , e . g . ` scale = ( 1.0 , 2.0 ) ` . In this case the first
number is interpreted as the horizontal ( X ) scale and the second as
the vertical ( Y ) scale .
A convenient method for calculating scale is to divide a | Length |
object by an equivalent count of local coordinate units , e . g .
` scale = Inches ( 1 ) / 1000 ` for 1000 local units per inch ."""
|
try :
x_scale , y_scale = scale
except TypeError :
x_scale = y_scale = scale
return FreeformBuilder . new ( self , start_x , start_y , x_scale , y_scale )
|
def fromarray ( values , labels = None , npartitions = None , engine = None ) :
"""Load images from an array .
First dimension will be used to index images ,
so remaining dimensions after the first should
be the dimensions of the images ,
e . g . ( 3 , 100 , 200 ) for 3 x ( 100 , 200 ) images
Parameters
values : array - like
The array of images . Can be a numpy array ,
a bolt array , or an array - like .
labels : array , optional , default = None
Labels for records . If provided , should be one - dimensional .
npartitions : int , default = None
Number of partitions for parallelization ( spark only )
engine : object , default = None
Computational engine ( e . g . a SparkContext for spark )"""
|
from . images import Images
import bolt
if isinstance ( values , bolt . spark . array . BoltArraySpark ) :
return Images ( values )
values = asarray ( values )
if values . ndim < 2 :
raise ValueError ( 'Array for images must have at least 2 dimensions, got %g' % values . ndim )
if values . ndim == 2 :
values = expand_dims ( values , 0 )
shape = None
dtype = None
for im in values :
if shape is None :
shape = im . shape
dtype = im . dtype
if not im . shape == shape :
raise ValueError ( 'Arrays must all be of same shape; got both %s and %s' % ( str ( shape ) , str ( im . shape ) ) )
if not im . dtype == dtype :
raise ValueError ( 'Arrays must all be of same data type; got both %s and %s' % ( str ( dtype ) , str ( im . dtype ) ) )
if spark and isinstance ( engine , spark ) :
if not npartitions :
npartitions = engine . defaultParallelism
values = bolt . array ( values , context = engine , npartitions = npartitions , axis = ( 0 , ) )
values . _ordered = True
return Images ( values )
return Images ( values , labels = labels )
|
def get_owner_access_token ( self ) :
"""Return workflow owner access token ."""
|
from . database import Session
db_session = Session . object_session ( self )
owner = db_session . query ( User ) . filter_by ( id_ = self . owner_id ) . first ( )
return owner . access_token
|
def apply_transformation ( self , structure , return_ranked_list = False ) :
"""Apply the transformation .
Args :
structure : input structure
return _ ranked _ list ( bool ) : Whether or not multiple structures are
returned . If return _ ranked _ list is a number , that number of
structures is returned .
Returns :
Depending on returned _ ranked list , either a transformed structure
or a list of dictionaries , where each dictionary is of the form
{ " structure " = . . . . , " other _ arguments " }
the key " transformation " is reserved for the transformation that
was actually applied to the structure .
This transformation is parsed by the alchemy classes for generating
a more specific transformation history . Any other information will
be stored in the transformation _ parameters dictionary in the
transmuted structure class ."""
|
num_remove_dict = { }
total_combis = 0
for indices , frac in zip ( self . indices , self . fractions ) :
num_to_remove = len ( indices ) * frac
if abs ( num_to_remove - int ( round ( num_to_remove ) ) ) > 1e-3 :
raise ValueError ( "Fraction to remove must be consistent with " "integer amounts in structure." )
else :
num_to_remove = int ( round ( num_to_remove ) )
num_remove_dict [ tuple ( indices ) ] = num_to_remove
n = len ( indices )
total_combis += int ( round ( math . factorial ( n ) / math . factorial ( num_to_remove ) / math . factorial ( n - num_to_remove ) ) )
self . logger . debug ( "Total combinations = {}" . format ( total_combis ) )
try :
num_to_return = int ( return_ranked_list )
except ValueError :
num_to_return = 1
num_to_return = max ( 1 , num_to_return )
self . logger . debug ( "Will return {} best structures." . format ( num_to_return ) )
if self . algo == PartialRemoveSitesTransformation . ALGO_FAST :
all_structures = self . fast_ordering ( structure , num_remove_dict , num_to_return )
elif self . algo == PartialRemoveSitesTransformation . ALGO_COMPLETE :
all_structures = self . complete_ordering ( structure , num_remove_dict )
elif self . algo == PartialRemoveSitesTransformation . ALGO_BEST_FIRST :
all_structures = self . best_first_ordering ( structure , num_remove_dict )
elif self . algo == PartialRemoveSitesTransformation . ALGO_ENUMERATE :
all_structures = self . enumerate_ordering ( structure )
else :
raise ValueError ( "Invalid algo." )
opt_s = all_structures [ 0 ] [ "structure" ]
return opt_s if not return_ranked_list else all_structures [ 0 : num_to_return ]
|
import re
def lookup_string ( target_str , search_str ) :
"""This function locates a specific substring within a larger string and returns its position . It uses regular expressions to perform a search .
Sample Input : ( ' python ' , ' python programming language ' )
Sample Output : ( 0 , 6)
Other Examples :
( ' programming ' , ' python programming language ' ) - > ( 7 , 18)
( ' language ' , ' python programming language ' ) - > ( 19 , 27)
Args :
target _ str : The substring for which to search .
search _ str : The string within which to search .
Returns :
A tuple indicating the start and end indices of the found substring ."""
|
result = re . search ( target_str , search_str )
start_pos = result . start ( )
end_pos = result . end ( )
return start_pos , end_pos
|
def union ( self , enumerable , key = lambda x : x ) :
"""Returns enumerable that is a union of elements between self and given
enumerable
: param enumerable : enumerable to union self to
: param key : key selector used to determine uniqueness
: return : new Enumerable object"""
|
if not isinstance ( enumerable , Enumerable3 ) :
raise TypeError ( u"enumerable parameter must be an instance of Enumerable" )
if self . count ( ) == 0 :
return enumerable
if enumerable . count ( ) == 0 :
return self
return self . concat ( enumerable ) . distinct ( key )
|
def policy_definition_delete ( name , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Delete a policy definition .
: param name : The name of the policy definition to delete .
CLI Example :
. . code - block : : bash
salt - call azurearm _ resource . policy _ definition _ delete testpolicy'''
|
result = False
polconn = __utils__ [ 'azurearm.get_client' ] ( 'policy' , ** kwargs )
try : # pylint : disable = unused - variable
policy = polconn . policy_definitions . delete ( policy_definition_name = name )
result = True
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'resource' , str ( exc ) , ** kwargs )
return result
|
def parse_next ( self , ptype , m ) :
"""Parse the next packet .
: param ptype : The ( string ) type of the incoming packet
: param ` . Message ` m : The paket content"""
|
if ptype == MSG_KEXGSS_GROUPREQ :
return self . _parse_kexgss_groupreq ( m )
elif ptype == MSG_KEXGSS_GROUP :
return self . _parse_kexgss_group ( m )
elif ptype == MSG_KEXGSS_INIT :
return self . _parse_kexgss_gex_init ( m )
elif ptype == MSG_KEXGSS_HOSTKEY :
return self . _parse_kexgss_hostkey ( m )
elif ptype == MSG_KEXGSS_CONTINUE :
return self . _parse_kexgss_continue ( m )
elif ptype == MSG_KEXGSS_COMPLETE :
return self . _parse_kexgss_complete ( m )
elif ptype == MSG_KEXGSS_ERROR :
return self . _parse_kexgss_error ( m )
msg = "KexGex asked to handle packet type {:d}"
raise SSHException ( msg . format ( ptype ) )
|
def image ( name , data , step = None , max_outputs = 3 , description = None ) :
"""Write an image summary .
Arguments :
name : A name for this summary . The summary tag used for TensorBoard will
be this name prefixed by any active name scopes .
data : A ` Tensor ` representing pixel data with shape ` [ k , h , w , c ] ` ,
where ` k ` is the number of images , ` h ` and ` w ` are the height and
width of the images , and ` c ` is the number of channels , which
should be 1 , 2 , 3 , or 4 ( grayscale , grayscale with alpha , RGB , RGBA ) .
Any of the dimensions may be statically unknown ( i . e . , ` None ` ) .
Floating point data will be clipped to the range [ 0,1 ) .
step : Explicit ` int64 ` - castable monotonic step value for this summary . If
omitted , this defaults to ` tf . summary . experimental . get _ step ( ) ` , which must
not be None .
max _ outputs : Optional ` int ` or rank - 0 integer ` Tensor ` . At most this
many images will be emitted at each step . When more than
` max _ outputs ` many images are provided , the first ` max _ outputs ` many
images will be used and the rest silently discarded .
description : Optional long - form description for this summary , as a
constant ` str ` . Markdown is supported . Defaults to empty .
Returns :
True on success , or false if no summary was emitted because no default
summary writer was available .
Raises :
ValueError : if a default writer exists , but no step was provided and
` tf . summary . experimental . get _ step ( ) ` is None ."""
|
summary_metadata = metadata . create_summary_metadata ( display_name = None , description = description )
# TODO ( https : / / github . com / tensorflow / tensorboard / issues / 2109 ) : remove fallback
summary_scope = ( getattr ( tf . summary . experimental , 'summary_scope' , None ) or tf . summary . summary_scope )
with summary_scope ( name , 'image_summary' , values = [ data , max_outputs , step ] ) as ( tag , _ ) :
tf . debugging . assert_rank ( data , 4 )
tf . debugging . assert_non_negative ( max_outputs )
images = tf . image . convert_image_dtype ( data , tf . uint8 , saturate = True )
limited_images = images [ : max_outputs ]
encoded_images = tf . map_fn ( tf . image . encode_png , limited_images , dtype = tf . string , name = 'encode_each_image' )
# Workaround for map _ fn returning float dtype for an empty elems input .
encoded_images = tf . cond ( tf . shape ( input = encoded_images ) [ 0 ] > 0 , lambda : encoded_images , lambda : tf . constant ( [ ] , tf . string ) )
image_shape = tf . shape ( input = images )
dimensions = tf . stack ( [ tf . as_string ( image_shape [ 2 ] , name = 'width' ) , tf . as_string ( image_shape [ 1 ] , name = 'height' ) ] , name = 'dimensions' )
tensor = tf . concat ( [ dimensions , encoded_images ] , axis = 0 )
return tf . summary . write ( tag = tag , tensor = tensor , step = step , metadata = summary_metadata )
|
def _extract_actions_unique_topics ( self , movement_counts , max_movements , cluster_topology , max_movement_size ) :
"""Extract actions limiting to given max value such that
the resultant has the minimum possible number of duplicate topics .
Algorithm :
1 . Group actions by by topic - name : { topic : action - list }
2 . Iterate through the dictionary in circular fashion and keep
extracting actions with until max _ partition _ movements
are reached .
: param movement _ counts : list of tuple ( ( topic , partition ) , movement count )
: param max _ movements : max number of movements to extract
: param cluster _ topology : cluster topology containing the new proposed assignment for the cluster
: param max _ movement _ size : maximum size of data to move at a time in extracted actions
: return : list of tuple ( topic , partitions ) to include in the reduced plan"""
|
# Group actions by topic
topic_actions = defaultdict ( list )
for t_p , replica_change_cnt in movement_counts :
topic_actions [ t_p [ 0 ] ] . append ( ( t_p , replica_change_cnt ) )
# Create reduced assignment minimizing duplication of topics
extracted_actions = [ ]
curr_movements = 0
curr_size = 0
action_available = True
while curr_movements < max_movements and curr_size <= max_movement_size and action_available :
action_available = False
for topic , actions in six . iteritems ( topic_actions ) :
for action in actions :
action_size = cluster_topology . partitions [ action [ 0 ] ] . size
if curr_movements + action [ 1 ] > max_movements or curr_size + action_size > max_movement_size : # Remove action since it won ' t be possible to use it
actions . remove ( action )
else : # Append ( topic , partition ) to the list of movements
action_available = True
extracted_actions . append ( action [ 0 ] )
curr_movements += action [ 1 ]
curr_size += action_size
actions . remove ( action )
break
return extracted_actions
|
def publish ( self , topic , data , defer = None ) :
"""Publish a message to the given topic over http .
: param topic : the topic to publish to
: param data : bytestring data to publish
: param defer : duration in millisconds to defer before publishing
( requires nsq 0.3.6)"""
|
nsq . assert_valid_topic_name ( topic )
fields = { 'topic' : topic }
if defer is not None :
fields [ 'defer' ] = '{}' . format ( defer )
return self . _request ( 'POST' , '/pub' , fields = fields , body = data )
|
def getctime ( self , path ) :
"""Returns the creation time of the fake file .
Args :
path : the path to fake file .
Returns :
( int , float ) the creation time of the fake file in number of
seconds since the epoch .
Raises :
OSError : if the file does not exist ."""
|
try :
file_obj = self . filesystem . resolve ( path )
except IOError :
self . filesystem . raise_os_error ( errno . ENOENT )
return file_obj . st_ctime
|
def list_shoulds ( options ) :
"""Construct the list of ' SHOULD ' validators to be run by the validator ."""
|
validator_list = [ ]
# Default : enable all
if not options . disabled and not options . enabled :
validator_list . extend ( CHECKS [ 'all' ] )
return validator_list
# - - disable
# Add SHOULD requirements to the list unless disabled
if options . disabled :
if 'all' not in options . disabled :
if 'format-checks' not in options . disabled :
if 'custom-prefix' not in options . disabled :
validator_list . append ( CHECKS [ 'custom-prefix' ] )
elif 'custom-prefix-lax' not in options . disabled :
validator_list . append ( CHECKS [ 'custom-prefix-lax' ] )
if 'open-vocab-format' not in options . disabled :
validator_list . append ( CHECKS [ 'open-vocab-format' ] )
if 'kill-chain-names' not in options . disabled :
validator_list . append ( CHECKS [ 'kill-chain-names' ] )
if 'observable-object-keys' not in options . disabled :
validator_list . append ( CHECKS [ 'observable-object-keys' ] )
if 'observable-dictionary-keys' not in options . disabled :
validator_list . append ( CHECKS [ 'observable-dictionary-keys' ] )
if 'windows-process-priority-format' not in options . disabled :
validator_list . append ( CHECKS [ 'windows-process-priority-format' ] )
if 'hash-length' not in options . disabled :
validator_list . append ( CHECKS [ 'hash-length' ] )
if 'approved-values' not in options . disabled :
if 'marking-definition-type' not in options . disabled :
validator_list . append ( CHECKS [ 'marking-definition-type' ] )
if 'relationship-types' not in options . disabled :
validator_list . append ( CHECKS [ 'relationship-types' ] )
if 'duplicate-ids' not in options . disabled :
validator_list . append ( CHECKS [ 'duplicate-ids' ] )
if 'all-vocabs' not in options . disabled :
if 'attack-motivation' not in options . disabled :
validator_list . append ( CHECKS [ 'attack-motivation' ] )
if 'attack-resource-level' not in options . disabled :
validator_list . append ( CHECKS [ 'attack-resource-level' ] )
if 'identity-class' not in options . disabled :
validator_list . append ( CHECKS [ 'identity-class' ] )
if 'indicator-types' not in options . disabled :
validator_list . append ( CHECKS [ 'indicator-types' ] )
if 'industry-sector' not in options . disabled :
validator_list . append ( CHECKS [ 'industry-sector' ] )
if 'malware-types' not in options . disabled :
validator_list . append ( CHECKS [ 'malware-types' ] )
if 'report-types' not in options . disabled :
validator_list . append ( CHECKS [ 'report-types' ] )
if 'threat-actor-types' not in options . disabled :
validator_list . append ( CHECKS [ 'threat-actor-types' ] )
if 'threat-actor-role' not in options . disabled :
validator_list . append ( CHECKS [ 'threat-actor-role' ] )
if 'threat-actor-sophistication' not in options . disabled :
validator_list . append ( CHECKS [ 'threat-actor-sophistication' ] )
if 'tool-types' not in options . disabled :
validator_list . append ( CHECKS [ 'tool-types' ] )
if 'region' not in options . disabled :
validator_list . append ( CHECKS [ 'region' ] )
if 'hash-algo' not in options . disabled :
validator_list . append ( CHECKS [ 'hash-algo' ] )
if 'windows-pebinary-type' not in options . disabled :
validator_list . append ( CHECKS [ 'windows-pebinary-type' ] )
if 'account-type' not in options . disabled :
validator_list . append ( CHECKS [ 'account-type' ] )
if 'all-external-sources' not in options . disabled :
if 'mime-type' not in options . disabled :
validator_list . append ( CHECKS [ 'mime-type' ] )
if 'protocols' not in options . disabled :
validator_list . append ( CHECKS [ 'protocols' ] )
if 'ipfix' not in options . disabled :
validator_list . append ( CHECKS [ 'ipfix' ] )
if 'http-request-headers' not in options . disabled :
validator_list . append ( CHECKS [ 'http-request-headers' ] )
if 'socket-options' not in options . disabled :
validator_list . append ( CHECKS [ 'socket-options' ] )
if 'pdf-doc-info' not in options . disabled :
validator_list . append ( CHECKS [ 'pdf-doc-info' ] )
if 'countries' not in options . disabled :
validator_list . append ( CHECKS [ 'countries' ] )
if 'network-traffic-ports' not in options . disabled :
validator_list . append ( CHECKS [ 'network-traffic-ports' ] )
if 'extref-hashes' not in options . disabled :
validator_list . append ( CHECKS [ 'extref-hashes' ] )
# - - enable
if options . enabled :
for check in options . enabled :
try :
if CHECKS [ check ] in validator_list :
continue
if type ( CHECKS [ check ] ) is list :
validator_list . extend ( CHECKS [ check ] )
else :
validator_list . append ( CHECKS [ check ] )
except KeyError :
raise JSONError ( "%s is not a valid check!" % check )
return validator_list
|
def pre_init ( ) :
"""The pre _ init function of the plugin . Here rafcon - classes can be extended / monkey - patched or completely substituted .
A example is given with the rafcon _ execution _ hooks _ plugin .
: return :"""
|
logger . info ( "Run pre-initiation hook of {} plugin." . format ( __file__ . split ( os . path . sep ) [ - 2 ] ) )
# Example : Monkey - Path rafcon . core . script . Script class to print additional log - message while execution
from rafcon . core . script import Script
old_execute_method = Script . execute
def new_execute_method ( self , state , inputs = None , outputs = None , backward_execution = False ) :
logger . debug ( "patched version of Script class is used." )
result = old_execute_method ( self , state , inputs , outputs , backward_execution )
logger . debug ( "patched version of Script execute-method is finished with result: {}." . format ( result ) )
return result
Script . execute = new_execute_method
|
def setDragDropFilter ( self , ddFilter ) :
"""Sets the drag drop filter for this widget .
: warning The dragdropfilter is stored as a weak - reference , so using \
mutable methods will not be stored well . Things like \
instancemethods will not hold their pointer after they \
leave the scope that is being used . Instead , use a \
classmethod or staticmethod to define the dragdropfilter .
: param ddFilter | < function > | | < method > | | None"""
|
if ddFilter :
self . _dragDropFilterRef = weakref . ref ( ddFilter )
else :
self . _dragDropFilterRef = None
|
def validate_registry_uri ( uri : str ) -> None :
"""Raise an exception if the URI does not conform to the registry URI scheme ."""
|
parsed = parse . urlparse ( uri )
scheme , authority , pkg_name , query = ( parsed . scheme , parsed . netloc , parsed . path , parsed . query , )
validate_registry_uri_scheme ( scheme )
validate_registry_uri_authority ( authority )
if query :
validate_registry_uri_version ( query )
validate_package_name ( pkg_name [ 1 : ] )
|
def get_byte_array ( integer ) :
"""Return the variable length bytes corresponding to the given int"""
|
# Operate in big endian ( unlike most of Telegram API ) since :
# > " . . . pq is a representation of a natural number
# ( in binary * big endian * format ) . . . "
# > " . . . current value of dh _ prime equals
# ( in * big - endian * byte order ) . . . "
# Reference : https : / / core . telegram . org / mtproto / auth _ key
return int . to_bytes ( integer , ( integer . bit_length ( ) + 8 - 1 ) // 8 , # 8 bits per byte ,
byteorder = 'big' , signed = False )
|
def open_file ( path , grib_errors = 'warn' , ** kwargs ) :
"""Open a GRIB file as a ` ` cfgrib . Dataset ` ` ."""
|
if 'mode' in kwargs :
warnings . warn ( "the `mode` keyword argument is ignored and deprecated" , FutureWarning )
kwargs . pop ( 'mode' )
stream = messages . FileStream ( path , message_class = cfmessage . CfMessage , errors = grib_errors )
return Dataset ( * build_dataset_components ( stream , ** kwargs ) )
|
def collapse_phenotypes ( self , input_phenotype_labels , output_phenotype_label , verbose = True ) :
"""Rename one or more input phenotypes to a single output phenotype
Args :
input _ phenotype _ labels ( list ) : A str name or list of names to combine
output _ phenotype _ label ( list ) : A str name to change the phenotype names to
verbose ( bool ) : output more details
Returns :
CellDataFrame : The CellDataFrame modified ."""
|
if isinstance ( input_phenotype_labels , str ) :
input_phenotype_labels = [ input_phenotype_labels ]
bad_phenotypes = set ( input_phenotype_labels ) - set ( self . phenotypes )
if len ( bad_phenotypes ) > 0 :
raise ValueError ( "Error phenotype(s) " + str ( bad_phenotypes ) + " are not in the data." )
data = self . copy ( )
if len ( input_phenotype_labels ) == 0 :
return data
def _swap_in ( d , inputs , output ) : # Get the keys we need to merge together
overlap = set ( d . keys ( ) ) . intersection ( inputs )
# if there are none to merge we ' re done already
if len ( overlap ) == 0 :
return d
keepers = [ ( k , v ) for k , v in d . items ( ) if k not in inputs ]
# combine anything thats not a keeper
return dict ( keepers + [ ( output_phenotype_label , max ( [ d [ x ] for x in overlap ] ) ) ] )
data [ 'phenotype_calls' ] = data . apply ( lambda x : _swap_in ( x [ 'phenotype_calls' ] , input_phenotype_labels , output_phenotype_label ) , 1 )
def _set_label ( d ) :
vals = [ k for k , v in d . items ( ) if v == 1 ]
return np . nan if len ( vals ) == 0 else vals [ 0 ]
data [ 'phenotype_label' ] = data . apply ( lambda x : _set_label ( x [ 'phenotype_calls' ] ) , 1 )
return data
|
def _get_ned_query_url ( self , raDeg , decDeg , arcsec ) :
"""* single ned conesearch *
* * Key Arguments : * *
* * Return : * *
- None
. . todo : :
- @ review : when complete , clean _ get _ ned _ query _ url method
- @ review : when complete add logging"""
|
self . log . info ( 'starting the ``_get_ned_query_url`` method' )
radArcMin = float ( arcsec ) / ( 60. )
if self . redshift == True :
z_constraint = "Available"
else :
z_constraint = "Unconstrained"
url = "http://ned.ipac.caltech.edu/cgi-bin/objsearch"
params = { "in_csys" : "Equatorial" , "in_equinox" : "J2000.0" , "lon" : "%(raDeg)0.6fd" % locals ( ) , "lat" : "%(decDeg)0.6fd" % locals ( ) , "radius" : "%(radArcMin)0.6s" % locals ( ) , "hconst" : "73" , "omegam" : "0.27" , "omegav" : "0.73" , "corr_z" : "1" , "z_constraint" : z_constraint , "z_value1" : "" , "z_value2" : "" , "z_unit" : "z" , "ot_include" : "ANY" , "nmp_op" : "ANY" , "out_csys" : "Equatorial" , "out_equinox" : "J2000.0" , "obj_sort" : "Distance to search center" , "of" : "ascii_bar" , "zv_breaker" : "30000.0" , "list_limit" : "500" , "img_stamp" : "NO" , "search_type" : "Near Position Search" , }
url = url + "?" + urllib . urlencode ( params )
if not self . unclassified :
url = url + "&" + urllib . urlencode ( { "ot_include" : "ANY" } )
in_objtypes1 = [ "Galaxies" , "GPairs" , "GTriples" , "GGroups" , "GClusters" , "QSO" , "QSOGroups" , "GravLens" , "AbsLineSys" , "EmissnLine" ]
for o in in_objtypes1 :
url = url + "&" + urllib . urlencode ( { "in_objtypes1" : o } )
in_objtypes3 = [ "Supernovae" , "HIIregion" , "PN" , "SNR" , "StarAssoc" , "StarClust" , "MolCloud" , "Nova" , "VarStar" , "WolfRayet" , "CarbonStar" , "PofG" , "Other" , "Star" , "BlueStar" , "RedStar" , "Pulsar" , "ReflNeb" , "DblStar" , "EmissnObj" , "EmissnNeb" , "WhiteDwarf" ]
for o in in_objtypes3 :
url = url + "&" + urllib . urlencode ( { "in_objtypes3" : o } )
self . log . info ( 'completed the ``_get_ned_query_url`` method' )
return url
|
def sentinels ( self , name ) :
"""Returns a list of sentinels for ` ` name ` ` ."""
|
fut = self . execute ( b'SENTINELS' , name , encoding = 'utf-8' )
return wait_convert ( fut , parse_sentinel_slaves_and_sentinels )
|
def get_allEvents ( self ) :
'''Splitting this method out to get the set of events to filter allows
one to subclass for different subsets of events without copying other
logic'''
|
if not hasattr ( self , 'allEvents' ) :
timeFilters = { 'endTime__gte' : timezone . now ( ) }
if getConstant ( 'registration__displayLimitDays' ) or 0 > 0 :
timeFilters [ 'startTime__lte' ] = timezone . now ( ) + timedelta ( days = getConstant ( 'registration__displayLimitDays' ) )
# Get the Event listing here to avoid duplicate queries
self . allEvents = Event . objects . filter ( ** timeFilters ) . filter ( Q ( instance_of = PublicEvent ) | Q ( instance_of = Series ) ) . annotate ( ** self . get_annotations ( ) ) . exclude ( Q ( status = Event . RegStatus . hidden ) | Q ( status = Event . RegStatus . regHidden ) | Q ( status = Event . RegStatus . linkOnly ) ) . order_by ( * self . get_ordering ( ) )
return self . allEvents
|
def _flatten_beam_dim ( tensor ) :
"""Reshapes first two dimensions in to single dimension .
Args :
tensor : Tensor to reshape of shape [ A , B , . . . ]
Returns :
Reshaped tensor of shape [ A * B , . . . ]"""
|
shape = _shape_list ( tensor )
shape [ 0 ] *= shape [ 1 ]
shape . pop ( 1 )
# Remove beam dim
return tf . reshape ( tensor , shape )
|
def update_traded ( self , traded_update ) :
""": param traded _ update : [ price , size ]"""
|
if not traded_update :
self . traded . clear ( )
else :
self . traded . update ( traded_update )
|
async def takewhile ( source , func ) :
"""Forward an asynchronous sequence while a condition is met .
The given function takes the item as an argument and returns a boolean
corresponding to the condition to meet . The function can either be
synchronous or asynchronous ."""
|
iscorofunc = asyncio . iscoroutinefunction ( func )
async with streamcontext ( source ) as streamer :
async for item in streamer :
result = func ( item )
if iscorofunc :
result = await result
if not result :
return
yield item
|
def get_highlighted_code ( name , code , type = 'terminal' ) :
"""If pygments are available on the system
then returned output is colored . Otherwise
unchanged content is returned ."""
|
import logging
try :
import pygments
pygments
except ImportError :
return code
from pygments import highlight
from pygments . lexers import guess_lexer_for_filename , ClassNotFound
from pygments . formatters import TerminalFormatter
try :
lexer = guess_lexer_for_filename ( name , code )
formatter = TerminalFormatter ( )
content = highlight ( code , lexer , formatter )
except ClassNotFound :
logging . debug ( "Couldn't guess Lexer, will not use pygments." )
content = code
return content
|
def res_phi_pie ( pst , logger = None , ** kwargs ) :
"""plot current phi components as a pie chart .
Parameters
pst : pyemu . Pst
logger : pyemu . Logger
kwargs : dict
accepts ' include _ zero ' as a flag to include phi groups with
only zero - weight obs ( not sure why anyone would do this , but
whatevs ) .
Returns
ax : matplotlib . Axis"""
|
if logger is None :
logger = Logger ( 'Default_Loggger.log' , echo = False )
logger . log ( "plot res_phi_pie" )
if "ensemble" in kwargs :
try :
res = pst_utils . res_from_en ( pst , kwargs [ 'ensemble' ] )
except :
logger . statement ( "res_1to1: could not find ensemble file {0}" . format ( kwargs [ 'ensemble' ] ) )
else :
try :
res = pst . res
except :
logger . lraise ( "res_phi_pie: pst.res is None, couldn't find residuals file" )
obs = pst . observation_data
phi = pst . phi
phi_comps = pst . phi_components
norm_phi_comps = pst . phi_components_normalized
keys = list ( phi_comps . keys ( ) )
if "include_zero" not in kwargs or kwargs [ "include_zero" ] is True :
phi_comps = { k : phi_comps [ k ] for k in keys if phi_comps [ k ] > 0.0 }
keys = list ( phi_comps . keys ( ) )
norm_phi_comps = { k : norm_phi_comps [ k ] for k in keys }
if "ax" in kwargs :
ax = kwargs [ "ax" ]
else :
fig = plt . figure ( figsize = figsize )
ax = plt . subplot ( 1 , 1 , 1 , aspect = "equal" )
labels = [ "{0}\n{1:4G}\n({2:3.1f}%)" . format ( k , phi_comps [ k ] , 100. * ( phi_comps [ k ] / phi ) ) for k in keys ]
ax . pie ( [ float ( norm_phi_comps [ k ] ) for k in keys ] , labels = labels )
logger . log ( "plot res_phi_pie" )
if "filename" in kwargs :
plt . savefig ( kwargs [ "filename" ] )
return ax
|
def mean_size ( self , p , q ) :
'''> > > psd = PSDLognormal ( s = 0.5 , d _ characteristic = 5E - 6)
> > > psd . mean _ size ( 3 , 2)
4.412484512922977e - 06
Note that for the case where p = = q , a different set of formulas are
required - which do not have analytical results for many distributions .
Therefore , a close numerical approximation is used instead , to
perturb the values of p and q so they are 1E - 9 away from each other .
This leads only to slight errors , as in the example below where the
correct answer is 5E - 6.
> > > psd . mean _ size ( 3 , 3)
4.99999304923345e - 06'''
|
if p == q :
p -= 1e-9
q += 1e-9
pow1 = q - self . order
denominator = self . _pdf_basis_integral_definite ( d_min = self . d_minimum , d_max = self . d_excessive , n = pow1 )
root_power = p - q
pow3 = p - self . order
numerator = self . _pdf_basis_integral_definite ( d_min = self . d_minimum , d_max = self . d_excessive , n = pow3 )
return ( numerator / denominator ) ** ( 1.0 / ( root_power ) )
|
def set_proxy ( self , proxy_account , account = None , ** kwargs ) :
"""Set a specific proxy for account
: param bitshares . account . Account proxy _ account : Account to be
proxied
: param str account : ( optional ) the account to allow access
to ( defaults to ` ` default _ account ` ` )"""
|
if not account :
if "default_account" in self . config :
account = self . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
account = Account ( account , blockchain_instance = self )
proxy = Account ( proxy_account , blockchain_instance = self )
options = account [ "options" ]
options [ "voting_account" ] = proxy [ "id" ]
op = operations . Account_update ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "account" : account [ "id" ] , "new_options" : options , "extensions" : { } , "prefix" : self . prefix , } )
return self . finalizeOp ( op , account [ "name" ] , "active" , ** kwargs )
|
def makebunches ( data , commdct ) :
"""make bunches with data"""
|
bunchdt = { }
ddtt , dtls = data . dt , data . dtls
for obj_i , key in enumerate ( dtls ) :
key = key . upper ( )
bunchdt [ key ] = [ ]
objs = ddtt [ key ]
for obj in objs :
bobj = makeabunch ( commdct , obj , obj_i )
bunchdt [ key ] . append ( bobj )
return bunchdt
|
def window_open ( dev , temp , duration ) :
"""Gets and sets the window open settings ."""
|
click . echo ( "Window open: %s" % dev . window_open )
if temp and duration :
click . echo ( "Setting window open conf, temp: %s duration: %s" % ( temp , duration ) )
dev . window_open_config ( temp , duration )
|
def remove_node ( self , node , stop = False ) :
"""Removes a node from the cluster .
By default , it doesn ' t also stop the node , just remove from
the known hosts of this cluster .
: param node : node to remove
: type node : : py : class : ` Node `
: param stop : Stop the node
: type stop : bool"""
|
if node . kind not in self . nodes :
raise NodeNotFound ( "Unable to remove node %s: invalid node type `%s`." , node . name , node . kind )
else :
try :
index = self . nodes [ node . kind ] . index ( node )
if self . nodes [ node . kind ] [ index ] :
del self . nodes [ node . kind ] [ index ]
if stop :
node . stop ( )
self . _naming_policy . free ( node . kind , node . name )
self . repository . save_or_update ( self )
remaining_nodes = self . get_all_nodes ( )
self . _gather_node_ip_addresses ( remaining_nodes , self . start_timeout , self . ssh_probe_timeout , remake = True )
except ValueError :
raise NodeNotFound ( "Node %s not found in cluster" % node . name )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.