signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def resolve_spec_identifier ( self , obj_name ) :
"""Find reference name based on spec identifier
Spec identifiers are used in parameter and return type definitions , but
should be a user - friendly version instead . Use docfx ` ` references ` `
lookup mapping for resolution .
If the spec identifier reference has a ` ` spec . csharp ` ` key , this implies
a compound reference that should be linked in a special way . Resolve to
a nested reference , with the corrected nodes .
. . note : :
This uses a special format that is interpreted by the domain for
parameter type and return type fields .
: param obj _ name : spec identifier to resolve to a correct reference
: returns : resolved string with one or more references
: rtype : str"""
|
ref = self . references . get ( obj_name )
if ref is None :
return obj_name
resolved = ref . get ( "fullName" , obj_name )
spec = ref . get ( "spec.csharp" , [ ] )
parts = [ ]
for part in spec :
if part . get ( "name" ) == "<" :
parts . append ( "{" )
elif part . get ( "name" ) == ">" :
parts . append ( "}" )
elif "fullName" in part and "uid" in part :
parts . append ( "{fullName}<{uid}>" . format ( ** part ) )
elif "uid" in part :
parts . append ( part [ "uid" ] )
elif "fullName" in part :
parts . append ( part [ "fullName" ] )
if parts :
resolved = "" . join ( parts )
return resolved
|
def create_csv ( filename , csv_data , mode = "w" ) :
"""Create a CSV file with the given data and store it in the
file with the given name .
: param filename : name of the file to store the data in
: pram csv _ data : the data to be stored in the file
: param mode : the mode in which we have to open the file . It can
be ' w ' , ' a ' , etc . Default is ' w '"""
|
with open ( filename , mode ) as f :
csv_data . replace ( "_" , r"\_" )
f . write ( csv_data )
|
def get_es_action_item ( data_item , action_settings , es_type , id_field = None ) :
'''This method will return an item formated and ready to append
to the action list'''
|
action_item = dict . copy ( action_settings )
if id_field is not None :
id_val = first ( list ( get_dict_key ( data_item , id_field ) ) )
if id_val is not None :
action_item [ '_id' ] = id_val
elif data_item . get ( 'id' ) :
if data_item [ 'id' ] . startswith ( "%s/" % action_settings [ '_index' ] ) :
action_item [ '_id' ] = "/" . join ( data_item [ 'id' ] . split ( "/" ) [ 2 : ] )
else :
action_item [ '_id' ] = data_item [ 'id' ]
if data_item . get ( 'data' ) :
action_item [ '_source' ] = data_item [ 'data' ]
else :
action_item [ '_source' ] = data_item
action_item [ '_type' ] = es_type
return action_item
|
def decode ( self , value ) :
"""The decoder for this schema .
Tries each decoder in order of the types specified for this schema ."""
|
# Use the default value unless the field accepts None types
has_null_encoder = bool ( encode_decode_null in self . _decoders )
if value is None and self . _default is not None and not has_null_encoder :
value = self . _default
for decoder in self . _decoders :
try :
return decoder ( value )
except ValueError as ex :
pass
raise ValueError ( 'Value \'{}\' is invalid. {}' . format ( value , ex . message ) )
|
def predict ( self , quadruplets ) :
"""Predicts the ordering between sample distances in input quadruplets .
For each quadruplet , returns 1 if the quadruplet is in the right order (
first pair is more similar than second pair ) , and - 1 if not .
Parameters
quadruplets : array - like , shape = ( n _ quadruplets , 4 , n _ features ) or
( n _ quadruplets , 4)
3D Array of quadruplets to predict , with each row corresponding to four
points , or 2D array of indices of quadruplets if the metric learner
uses a preprocessor .
Returns
prediction : ` numpy . ndarray ` of floats , shape = ( n _ constraints , )
Predictions of the ordering of pairs , for each quadruplet ."""
|
check_is_fitted ( self , 'transformer_' )
quadruplets = check_input ( quadruplets , type_of_inputs = 'tuples' , preprocessor = self . preprocessor_ , estimator = self , tuple_size = self . _tuple_size )
return np . sign ( self . decision_function ( quadruplets ) )
|
def run_func ( self , func_path , * func_args , ** kwargs ) :
"""Run a function in Matlab and return the result .
Parameters
func _ path : str
Name of function to run or a path to an m - file .
func _ args : object , optional
Function args to send to the function .
nargout : int , optional
Desired number of return arguments .
kwargs :
Keyword arguments are passed to Matlab in the form [ key , val ] so
that matlab . plot ( x , y , ' - - ' , LineWidth = 2 ) would be translated into
plot ( x , y , ' - - ' , ' LineWidth ' , 2)
Returns
Result dictionary with keys : ' message ' , ' result ' , and ' success '"""
|
if not self . started :
raise ValueError ( 'Session not started, use start()' )
nargout = kwargs . pop ( 'nargout' , 1 )
func_args += tuple ( item for pair in zip ( kwargs . keys ( ) , kwargs . values ( ) ) for item in pair )
dname = os . path . dirname ( func_path )
fname = os . path . basename ( func_path )
func_name , ext = os . path . splitext ( fname )
if ext and not ext == '.m' :
raise TypeError ( 'Need to give path to .m file' )
return self . _json_response ( cmd = 'eval' , func_name = func_name , func_args = func_args or '' , dname = dname , nargout = nargout )
|
def delete_user_contact_list ( self , id , contact_list_id , ** data ) :
"""DELETE / users / : id / contact _ lists / : contact _ list _ id /
Deletes the contact list . Returns ` ` { " deleted " : true } ` ` ."""
|
return self . delete ( "/users/{0}/contact_lists/{0}/" . format ( id , contact_list_id ) , data = data )
|
def verify ( self , obj ) :
"""Verify that the object conforms to this verifier ' s schema
Args :
obj ( object ) : A python object to verify
Returns :
bytes or byterray : The decoded byte buffer
Raises :
ValidationError : If there is a problem verifying the object , a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation ."""
|
if self . encoding == 'none' and not isinstance ( obj , ( bytes , bytearray ) ) :
raise ValidationError ( 'Byte object was not either bytes or a bytearray' , type = obj . __class__ . __name__ )
elif self . encoding == 'base64' :
try :
data = base64 . b64decode ( obj )
return data
except TypeError :
raise ValidationError ( "Could not decode base64 encoded bytes" , obj = obj )
elif self . encoding == 'hex' :
try :
data = binascii . unhexlify ( obj )
return data
except TypeError :
raise ValidationError ( "Could not decode hex encoded bytes" , obj = obj )
return obj
|
def validate_uncle ( cls , block : BaseBlock , uncle : BaseBlock , uncle_parent : BaseBlock ) -> None :
"""Validate the given uncle in the context of the given block ."""
|
if uncle . block_number >= block . number :
raise ValidationError ( "Uncle number ({0}) is higher than block number ({1})" . format ( uncle . block_number , block . number ) )
if uncle . block_number != uncle_parent . block_number + 1 :
raise ValidationError ( "Uncle number ({0}) is not one above ancestor's number ({1})" . format ( uncle . block_number , uncle_parent . block_number ) )
if uncle . timestamp < uncle_parent . timestamp :
raise ValidationError ( "Uncle timestamp ({0}) is before ancestor's timestamp ({1})" . format ( uncle . timestamp , uncle_parent . timestamp ) )
if uncle . gas_used > uncle . gas_limit :
raise ValidationError ( "Uncle's gas usage ({0}) is above the limit ({1})" . format ( uncle . gas_used , uncle . gas_limit ) )
|
def change_sample ( self , old_samp_name , new_samp_name , new_site_name = None , new_er_data = None , new_pmag_data = None , replace_data = False ) :
"""Find actual data objects for sample and site .
Then call Sample class change method to update sample name and data . ."""
|
sample = self . find_by_name ( old_samp_name , self . samples )
if not sample :
print ( '-W- {} is not a currently existing sample, so it cannot be updated' . format ( old_samp_name ) )
return False
if new_site_name :
new_site = self . find_by_name ( new_site_name , self . sites )
if not new_site :
print ( """-W- {} is not a currently existing site.
Adding site named: {}""" . format ( new_site_name , new_site_name ) )
# sample . site or ' * empty * ' , sample )
new_site = self . add_site ( new_site_name )
else :
new_site = None
sample . change_sample ( new_samp_name , new_site , new_er_data , new_pmag_data , replace_data )
return sample
|
def vn ( x ) :
"""value or none , returns none if x is an empty list"""
|
if x == [ ] :
return None
if isinstance ( x , list ) :
return '|' . join ( x )
if isinstance ( x , datetime ) :
return x . isoformat ( )
return x
|
def read_command ( self ) :
"""Read a command from the user line by line .
Returns a code object suitable for execution ."""
|
reader = self . reader
line = yield from reader . readline ( )
if line == b'' : # lost connection
raise ConnectionResetError ( )
try : # skip the newline to make CommandCompiler work as advertised
codeobj = self . attempt_compile ( line . rstrip ( b'\n' ) )
except SyntaxError :
yield from self . send_exception ( )
return
return codeobj
|
def remove_sites ( self , indices ) :
"""Delete sites with at indices .
Args :
indices : Sequence of indices of sites to delete ."""
|
self . _sites = [ self . _sites [ i ] for i in range ( len ( self . _sites ) ) if i not in indices ]
|
def _get_variable_names ( arr ) :
"""Return the variable names of an array"""
|
if VARIABLELABEL in arr . dims :
return arr . coords [ VARIABLELABEL ] . tolist ( )
else :
return arr . name
|
def get_class_field ( cls , field_name ) :
"""Add management of dynamic fields : if a normal field cannot be retrieved ,
check if it can be a dynamic field and in this case , create a copy with
the given name and associate it to the model ."""
|
try :
field = super ( ModelWithDynamicFieldMixin , cls ) . get_class_field ( field_name )
except AttributeError : # the " has _ field " returned True but getattr raised . . . we have a DynamicField
dynamic_field = cls . _get_dynamic_field_for ( field_name )
field = cls . _add_dynamic_field_to_model ( dynamic_field , field_name )
return field
|
def hsts_header ( self ) :
"""Returns the proper HSTS policy ."""
|
hsts_policy = 'max-age={0}' . format ( self . hsts_age )
if self . hsts_include_subdomains :
hsts_policy += '; includeSubDomains'
return hsts_policy
|
def random_sample ( self , num_samples , df = None , replace = False , weights = None , random_state = 100 , axis = 'row' ) :
'''Return random sample of matrix .'''
|
if df is None :
df = self . dat_to_df ( )
if axis == 'row' :
axis = 0
if axis == 'col' :
axis = 1
df = self . export_df ( )
df = df . sample ( n = num_samples , replace = replace , weights = weights , random_state = random_state , axis = axis )
self . load_df ( df )
|
def auto_key ( ) :
"""This method attempts to auto - generate a unique cryptographic key based on the hardware ID .
It should * NOT * be used in production , or to replace a proper key , but it can help get will
running in local and test environments more easily ."""
|
import uuid
import time
import random
import hashlib
node = uuid . getnode ( )
h = hashlib . md5 ( )
h . update ( str ( "%s" % node ) . encode ( 'utf-8' ) )
key1 = h . hexdigest ( )
time . sleep ( random . uniform ( 0 , 0.5 ) )
node = uuid . getnode ( )
h = hashlib . md5 ( )
h . update ( str ( "%s" % node ) . encode ( 'utf-8' ) )
key2 = h . hexdigest ( )
time . sleep ( random . uniform ( 0 , 0.5 ) )
node = uuid . getnode ( )
h = hashlib . md5 ( )
h . update ( str ( "%s" % node ) . encode ( 'utf-8' ) )
key3 = h . hexdigest ( )
if key1 == key2 and key2 == key3 :
return key1
return False
|
def filter ( self ) :
"""Called when parallelize is False .
This function will generate the file names in a directory tree by walking the tree either top - down or
bottom - up . For each directory in the tree rooted at directory top ( including top itself ) , it yields a 3 - tuple
( dirpath , dirnames , filenames ) ."""
|
self . _printer ( 'Standard Walk' )
count = Counter ( length = 3 )
for directory in self . directory :
self . _printer ( 'Searching ' + directory )
for root , directories , files in os . walk ( directory , topdown = self . topdown ) :
root = root [ len ( str ( directory ) ) + 1 : ]
self . _printer ( str ( count . up ) + ": Explored path - " + str ( root ) , stream = True )
if self . filters . validate ( root ) : # Check that non - empty folders flag is on and we ' re at the max directory level
if self . filters . non_empty_folders and self . filters . get_level ( root ) == self . filters . max_level : # Check that the path is not an empty folder
if os . path . isdir ( directory + os . sep + root ) : # Get paths in folder without walking directory
paths = os . listdir ( directory + os . sep + root )
# Check that any of the paths are files and not just directories
if paths and any ( os . path . isfile ( os . path . join ( directory , p ) ) for p in paths ) :
self . add_path ( directory , root )
else :
for filename in files :
fullname = os . path . join ( root , filename )
if self . filters . validate ( fullname ) : # Join the two strings in order to form the full filepath .
self . add_path ( directory , fullname )
|
def archive_basename ( filename ) :
'''returns the basename ( name without extension ) of a recognized archive file'''
|
for archive in archive_formats :
if filename . endswith ( archive_formats [ archive ] [ 'suffix' ] ) :
return filename . rstrip ( '.' + archive_formats [ archive ] [ 'suffix' ] )
return False
|
def _ParseStringOption ( cls , options , argument_name , default_value = None ) :
"""Parses a string command line argument .
Args :
options ( argparse . Namespace ) : parser options .
argument _ name ( str ) : name of the command line argument .
default _ value ( Optional [ str ] ) : default value of the command line argument .
Returns :
str : command line argument value or the default value if the command line
argument is not set
Raises :
BadConfigOption : if the command line argument value cannot be converted
to a Unicode string ."""
|
argument_value = getattr ( options , argument_name , None )
if argument_value is None :
return default_value
if isinstance ( argument_value , py2to3 . BYTES_TYPE ) :
encoding = sys . stdin . encoding
# Note that sys . stdin . encoding can be None .
if not encoding :
encoding = locale . getpreferredencoding ( )
if not encoding :
encoding = cls . _PREFERRED_ENCODING
try :
argument_value = argument_value . decode ( encoding )
except UnicodeDecodeError as exception :
raise errors . BadConfigOption ( ( 'Unable to convert option: {0:s} to Unicode with error: ' '{1!s}.' ) . format ( argument_name , exception ) )
elif not isinstance ( argument_value , py2to3 . UNICODE_TYPE ) :
raise errors . BadConfigOption ( 'Unsupported option: {0:s} string type required.' . format ( argument_name ) )
return argument_value
|
def rsdl_rn ( self , AX , Y ) :
"""Compute primal residual normalisation term ."""
|
# The primal residual normalisation term is
# max ( | | A x ^ ( k ) | | _ 2 , | | B y ^ ( k ) | | _ 2 ) and B = - ( I I I . . . ) ^ T .
# The scaling by sqrt ( Nb ) of the l2 norm of Y accounts for the
# block replication introduced by multiplication by B
return max ( ( np . linalg . norm ( AX ) , np . sqrt ( self . Nb ) * np . linalg . norm ( Y ) ) )
|
def get_protocol_from_name ( name ) :
"""Returns the protocol class for the protocol with the given name .
: type name : str
: param name : The name of the protocol .
: rtype : Protocol
: return : The protocol class ."""
|
cls = protocol_map . get ( name )
if not cls :
raise ValueError ( 'Unsupported protocol "%s".' % name )
return cls
|
def checks ( similarities , verbose = False ) :
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable .
Parameters
similarities : array of shape ( n _ samples , n _ samples )
A matrix of pairwise similarities between ( sub ) - samples of the data - set .
verbose : Boolean , optional ( default = False )
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem ."""
|
if similarities . size == 0 :
raise ValueError ( "\nERROR: Cluster_Ensembles: checks: the similarities " "matrix provided as input happens to be empty.\n" )
elif np . where ( np . isnan ( similarities ) ) [ 0 ] . size != 0 :
raise ValueError ( "\nERROR: Cluster_Ensembles: checks: input similarities " "matrix contains at least one 'NaN'.\n" )
elif np . where ( np . isinf ( similarities ) ) [ 0 ] . size != 0 :
raise ValueError ( "\nERROR: Cluster_Ensembles: checks: at least one infinite entry " "detected in input similarities matrix.\n" )
else :
if np . where ( np . logical_not ( np . isreal ( similarities ) ) ) [ 0 ] . size != 0 :
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: complex entries found " "in the similarities matrix." )
similarities = similarities . real
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: " "truncated to their real components." )
if similarities . shape [ 0 ] != similarities . shape [ 1 ] :
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: non-square matrix provided." )
N_square = min ( similarities . shape )
similarities = similarities [ : N_square , : N_square ]
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix." )
max_sim = np . amax ( similarities )
min_sim = np . amin ( similarities )
if max_sim > 1 or min_sim < 0 :
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: strictly negative " "or bigger than unity entries spotted in input similarities matrix." )
indices_too_big = np . where ( similarities > 1 )
indices_negative = np . where ( similarities < 0 )
similarities [ indices_too_big ] = 1.0
similarities [ indices_negative ] = 0.0
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: done setting them to " "the lower or upper accepted values." )
if not np . allclose ( similarities , np . transpose ( similarities ) ) :
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: non-symmetric input " "similarities matrix." )
similarities = np . divide ( similarities + np . transpose ( similarities ) , 2.0 )
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: now symmetrized." )
if not np . allclose ( np . diag ( similarities ) , np . ones ( similarities . shape [ 0 ] ) ) :
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: the self-similarities " "provided as input are not all of unit value." )
similarities [ np . diag_indices ( similarities . shape [ 0 ] ) ] = 1
if verbose :
print ( "\nINFO: Cluster_Ensembles: checks: issue corrected." )
|
def pool_revert ( self , pool_id , version_id ) :
"""Function to revert a specific pool ( Requires login ) ( UNTESTED ) .
Parameters :
pool _ id ( int ) : Where pool _ id is the pool id .
version _ id ( int ) :"""
|
return self . _get ( 'pools/{0}/revert.json' . format ( pool_id ) , { 'version_id' : version_id } , method = 'PUT' , auth = True )
|
def load_model_from_path ( model_path , meta = False , ** overrides ) :
"""Load a model from a data directory path . Creates Language class with
pipeline from meta . json and then calls from _ disk ( ) with path ."""
|
from . tokenizer_loader import TokenizerLoader
if not meta :
meta = get_model_meta ( model_path )
tokenizer_loader = TokenizerLoader ( meta = meta , ** overrides )
tokenizers = meta . get ( 'tokenizers' , [ ] )
disable = overrides . get ( 'disable' , [ ] )
if tokenizers is True :
tokenizers = TokenizerLoader . Defaults . tokenizers
elif tokenizers in ( False , None ) :
tokenizers = [ ]
for tokenizer_name in tokenizers :
if tokenizer_name not in disable :
config = meta . get ( 'tokenizer_args' , { } ) . get ( tokenizer_name , { } )
component = tokenizer_loader . create_tokenizer ( tokenizer_name , config = config )
tokenizer_loader . add_tokenizer ( component , name = tokenizer_name )
return tokenizer_loader . from_disk ( model_path )
|
def set_title ( self , title : str , url : str = None ) -> None :
"""Sets the title of the embed .
Parameters
title : str
Title of the embed .
url : str or None , optional
URL hyperlink of the title ."""
|
self . title = title
self . url = url
|
def install_exception_handler ( handler ) :
"""Installs an exception handler .
Args :
handler : ExceptionHandler , the exception handler to install .
Raises :
TypeError : Raised when the handler was not of the correct type .
All installed exception handlers will be called if main ( ) exits via
an abnormal exception , i . e . not one of SystemExit , KeyboardInterrupt ,
FlagsError or UsageError ."""
|
if not isinstance ( handler , ExceptionHandler ) :
raise TypeError ( 'handler of type %s does not inherit from ExceptionHandler' % type ( handler ) )
EXCEPTION_HANDLERS . append ( handler )
|
def add_entity ( self , domain , ** kwargs ) :
'''Add a new Entity to tracking .'''
|
# Set the entity ' s mapping func if one was specified
map_func = kwargs . get ( 'map_func' , None )
if map_func is not None and not callable ( kwargs [ 'map_func' ] ) :
if self . entity_mapper is None :
raise ValueError ( "Mapping function '%s' specified for Entity " "'%s', but no entity mapper was passed when " "initializing the current Layout. Please make" " sure the 'entity_mapper' argument is set." % ( map_func , kwargs [ 'name' ] ) )
map_func = getattr ( self . entity_mapper , kwargs [ 'map_func' ] )
kwargs [ 'map_func' ] = map_func
ent = Entity ( domain = domain , ** kwargs )
domain . add_entity ( ent )
if ent . mandatory :
self . mandatory . add ( ent . id )
if ent . directory is not None :
ent . directory = ent . directory . replace ( '{{root}}' , self . root )
self . entities [ ent . id ] = ent
for alias in ent . aliases :
self . entities [ alias ] = ent
if self . dynamic_getters :
func = partial ( getattr ( self , 'get' ) , target = ent . name , return_type = 'id' )
func_name = inflect . engine ( ) . plural ( ent . name )
setattr ( self , 'get_%s' % func_name , func )
|
def jcrop_css ( css_url = None ) :
"""Load jcrop css file .
: param css _ url : The custom CSS URL ."""
|
if css_url is None :
if current_app . config [ 'AVATARS_SERVE_LOCAL' ] :
css_url = url_for ( 'avatars.static' , filename = 'jcrop/css/jquery.Jcrop.min.css' )
else :
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup ( '<link rel="stylesheet" href="%s">' % css_url )
|
def update ( self , force = False ) :
"""Fetch new posts from the server .
Arguments :
force ( bool ) : Force a thread update , even if thread has 404 ' d .
Returns :
int : How many new posts have been fetched ."""
|
# The thread has already 404 ' ed , this function shouldn ' t do anything anymore .
if self . is_404 and not force :
return 0
if self . _last_modified :
headers = { 'If-Modified-Since' : self . _last_modified }
else :
headers = None
# random connection errors , just return 0 and try again later
try :
res = self . _board . _requests_session . get ( self . _api_url , headers = headers )
except : # try again later
return 0
# 304 Not Modified , no new posts .
if res . status_code == 304 :
return 0
# 404 Not Found , thread died .
elif res . status_code == 404 :
self . is_404 = True
# remove post from cache , because it ' s gone .
self . _board . _thread_cache . pop ( self . id , None )
return 0
elif res . status_code == 200 : # If we somehow 404 ' ed , we should put ourself back in the cache .
if self . is_404 :
self . is_404 = False
self . _board . _thread_cache [ self . id ] = self
# Remove
self . want_update = False
self . omitted_images = 0
self . omitted_posts = 0
self . _last_modified = res . headers [ 'Last-Modified' ]
posts = res . json ( ) [ 'posts' ]
original_post_count = len ( self . replies )
self . topic = Post ( self , posts [ 0 ] )
if self . last_reply_id and not force :
self . replies . extend ( Post ( self , p ) for p in posts if p [ 'no' ] > self . last_reply_id )
else :
self . replies [ : ] = [ Post ( self , p ) for p in posts [ 1 : ] ]
new_post_count = len ( self . replies )
post_count_delta = new_post_count - original_post_count
if not post_count_delta :
return 0
self . last_reply_id = self . replies [ - 1 ] . post_number
return post_count_delta
else :
res . raise_for_status ( )
|
def reset_index ( self , ** kwargs ) :
"""Removes all levels from index and sets a default level _ 0 index .
Returns :
A new QueryCompiler with updated data and reset index ."""
|
drop = kwargs . get ( "drop" , False )
new_index = pandas . RangeIndex ( len ( self . index ) )
if not drop :
if isinstance ( self . index , pandas . MultiIndex ) : # TODO ( devin - petersohn ) ensure partitioning is properly aligned
new_column_names = pandas . Index ( self . index . names )
new_columns = new_column_names . append ( self . columns )
index_data = pandas . DataFrame ( list ( zip ( * self . index ) ) ) . T
result = self . data . from_pandas ( index_data ) . concat ( 1 , self . data )
return self . __constructor__ ( result , new_index , new_columns )
else :
new_column_name = ( self . index . name if self . index . name is not None else "index" if "index" not in self . columns else "level_0" )
new_columns = self . columns . insert ( 0 , new_column_name )
result = self . insert ( 0 , new_column_name , self . index )
return self . __constructor__ ( result . data , new_index , new_columns )
else : # The copies here are to ensure that we do not give references to
# this object for the purposes of updates .
return self . __constructor__ ( self . data . copy ( ) , new_index , self . columns . copy ( ) , self . _dtype_cache )
|
def get_page_numbers ( current_page , num_pages ) :
"""Default callable for page listing .
Produce a Digg - style pagination ."""
|
if current_page <= 2 :
start_page = 1
else :
start_page = current_page - 2
if num_pages <= 4 :
end_page = num_pages
else :
end_page = start_page + 4
if end_page > num_pages :
end_page = num_pages
pages = [ ]
if current_page != 1 :
pages . append ( 'first' )
pages . append ( 'previous' )
pages . extend ( [ i for i in range ( start_page , end_page + 1 ) ] )
if current_page != num_pages :
pages . append ( 'next' )
pages . append ( 'last' )
return pages
|
def do_operation ( database , keys , table , operation , latencies_ms ) :
"""Does a single operation and records latency ."""
|
key = random . choice ( keys )
start = timeit . default_timer ( )
if operation == 'read' :
read ( database , table , key )
elif operation == 'update' :
update ( database , table , key )
else :
raise ValueError ( 'Unknown operation: %s' % operation )
end = timeit . default_timer ( )
latencies_ms [ operation ] . append ( ( end - start ) * 1000 )
|
def build_full_toctree ( builder , docname , prune , collapse ) :
"""Return a single toctree starting from docname containing all
sub - document doctrees ."""
|
env = builder . env
doctree = env . get_doctree ( env . config . master_doc )
toctrees = [ ]
for toctreenode in doctree . traverse ( addnodes . toctree ) :
toctree = env . resolve_toctree ( docname , builder , toctreenode , collapse = collapse , prune = prune , )
toctrees . append ( toctree )
if not toctrees :
return None
result = toctrees [ 0 ]
for toctree in toctrees [ 1 : ] :
if toctree :
result . extend ( toctree . children )
env . resolve_references ( result , docname , builder )
return result
|
def json_success_response ( data , response ) :
"""Formats the response of a successful token request as JSON .
Also adds default headers and status code ."""
|
response . body = json . dumps ( data )
response . status_code = 200
response . add_header ( "Content-Type" , "application/json" )
response . add_header ( "Cache-Control" , "no-store" )
response . add_header ( "Pragma" , "no-cache" )
|
def get_all_reserved_instances ( self , reserved_instances_id = None , filters = None ) :
"""Describes Reserved Instance offerings that are available for purchase .
: type reserved _ instance _ ids : list
: param reserved _ instance _ ids : A list of the reserved instance ids that
will be returned . If not provided , all
reserved instances will be returned .
: type filters : dict
: param filters : Optional filters that can be used to limit
the results returned . Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value . The set of allowable filter
names / values is dependent on the request
being performed . Check the EC2 API guide
for details .
: rtype : list
: return : A list of : class : ` boto . ec2 . reservedinstance . ReservedInstance `"""
|
params = { }
if reserved_instances_id :
self . build_list_params ( params , reserved_instances_id , 'ReservedInstancesId' )
if filters :
self . build_filter_params ( params , filters )
return self . get_list ( 'DescribeReservedInstances' , params , [ ( 'item' , ReservedInstance ) ] , verb = 'POST' )
|
def _update_pwm ( self ) :
"""Update the pwm values of the driver regarding the current state ."""
|
if self . _is_on :
values = self . _get_pwm_values ( )
else :
values = [ 0 ] * len ( self . _driver . pins )
self . _driver . set_pwm ( values )
|
def get_network ( network_id , summary = False , include_data = 'N' , scenario_ids = None , template_id = None , ** kwargs ) :
"""Return a whole network as a dictionary .
network _ id : ID of the network to retrieve
include _ data : ' Y ' or ' N ' . Indicate whether scenario data is to be returned .
This has a significant speed impact as retrieving large amounts
of data can be expensive .
scenario _ ids : list of IDS to be returned . Used if a network has multiple
scenarios but you only want one returned . Using this filter
will speed up this function call .
template _ id : Return the network with only attributes associated with this
template on the network , groups , nodes and links ."""
|
log . debug ( "getting network %s" % network_id )
user_id = kwargs . get ( 'user_id' )
network_id = int ( network_id )
try :
log . debug ( "Querying Network %s" , network_id )
net_i = db . DBSession . query ( Network ) . filter ( Network . id == network_id ) . options ( noload ( 'scenarios' ) ) . options ( noload ( 'nodes' ) ) . options ( noload ( 'links' ) ) . options ( noload ( 'types' ) ) . options ( noload ( 'attributes' ) ) . options ( noload ( 'resourcegroups' ) ) . one ( )
net_i . check_read_permission ( user_id )
net = JSONObject ( net_i )
net . nodes = _get_nodes ( network_id , template_id = template_id )
net . links = _get_links ( network_id , template_id = template_id )
net . resourcegroups = _get_groups ( network_id , template_id = template_id )
net . owners = _get_network_owners ( network_id )
if summary is False :
all_attributes = _get_all_resource_attributes ( network_id , template_id )
log . info ( "Setting attributes" )
net . attributes = all_attributes [ 'NETWORK' ] . get ( network_id , [ ] )
for node_i in net . nodes :
node_i . attributes = all_attributes [ 'NODE' ] . get ( node_i . id , [ ] )
log . info ( "Node attributes set" )
for link_i in net . links :
link_i . attributes = all_attributes [ 'LINK' ] . get ( link_i . id , [ ] )
log . info ( "Link attributes set" )
for group_i in net . resourcegroups :
group_i . attributes = all_attributes [ 'GROUP' ] . get ( group_i . id , [ ] )
log . info ( "Group attributes set" )
log . info ( "Setting types" )
all_types = _get_all_templates ( network_id , template_id )
net . types = all_types [ 'NETWORK' ] . get ( network_id , [ ] )
for node_i in net . nodes :
node_i . types = all_types [ 'NODE' ] . get ( node_i . id , [ ] )
for link_i in net . links :
link_i . types = all_types [ 'LINK' ] . get ( link_i . id , [ ] )
for group_i in net . resourcegroups :
group_i . types = all_types [ 'GROUP' ] . get ( group_i . id , [ ] )
log . info ( "Getting scenarios" )
net . scenarios = _get_scenarios ( network_id , include_data , user_id , scenario_ids )
except NoResultFound :
raise ResourceNotFoundError ( "Network (network_id=%s) not found." % network_id )
return net
|
def response_body_to_dict ( http_requests_response , content_type , xml_root_element_name = None , is_list = False ) :
"""Convert a XML or JSON response in a Python dict
: param http _ requests _ response : ' Requests ( lib ) ' response
: param content _ type : Expected content - type header value ( Accept header value in the request )
: param xml _ root _ element _ name : For XML requests . XML root element in response .
: param is _ list : For XML requests . If response is a list , a True value will delete list node name
: return : Python dict with response ."""
|
logger . info ( "Converting response body from API (XML or JSON) to Python dict" )
if HEADER_REPRESENTATION_JSON == content_type :
try :
return http_requests_response . json ( )
except Exception , e :
logger . error ( "Error parsing the response to JSON. Exception:" + str ( e ) )
raise e
else :
assert xml_root_element_name is not None , "xml_root_element_name is a mandatory param when body is in XML"
try :
response_body = _xml_to_dict ( http_requests_response . content ) [ xml_root_element_name ]
except Exception , e :
logger . error ( "Error parsing the response to XML. Exception: " + str ( e ) )
raise e
if is_list and response_body is not None :
response_body = response_body . popitem ( ) [ 1 ]
return response_body
|
def scaleBy ( self , value , origin = None , width = False , height = False ) :
"""* * width * * indicates if the glyph ' s width should be scaled .
* * height * * indicates if the glyph ' s height should be scaled .
The origin must not be specified when scaling the width or height ."""
|
value = normalizers . normalizeTransformationScale ( value )
if origin is None :
origin = ( 0 , 0 )
origin = normalizers . normalizeCoordinateTuple ( origin )
if origin != ( 0 , 0 ) and ( width or height ) :
raise FontPartsError ( ( "The origin must not be set when " "scaling the width or height." ) )
super ( BaseGlyph , self ) . scaleBy ( value , origin = origin )
sX , sY = value
if width :
self . _scaleWidthBy ( sX )
if height :
self . _scaleHeightBy ( sY )
|
def user ( self , email : str ) -> models . User :
"""Fetch a user from the database ."""
|
return self . User . query . filter_by ( email = email ) . first ( )
|
def alert ( text = '' , title = '' , button = OK_TEXT , root = None , timeout = None ) :
"""Displays a simple message box with text and a single OK button . Returns the text of the button clicked on ."""
|
assert TKINTER_IMPORT_SUCCEEDED , 'Tkinter is required for pymsgbox'
return _buttonbox ( msg = text , title = title , choices = [ str ( button ) ] , root = root , timeout = timeout )
|
def del_group ( self ) :
"""Delete current group ."""
|
idx = self . tabs . currentIndex ( )
self . tabs . removeTab ( idx )
self . apply ( )
|
def _start_monitoring ( self ) :
"""Internal method that monitors the directory for changes"""
|
# Grab all the timestamp info
before = self . _file_timestamp_info ( self . path )
while True :
gevent . sleep ( 1 )
after = self . _file_timestamp_info ( self . path )
added = [ fname for fname in after . keys ( ) if fname not in before . keys ( ) ]
removed = [ fname for fname in before . keys ( ) if fname not in after . keys ( ) ]
modified = [ ]
for fname in before . keys ( ) :
if fname not in removed :
if os . path . getmtime ( fname ) != before . get ( fname ) :
modified . append ( fname )
if added :
self . on_create ( added )
if removed :
self . on_delete ( removed )
if modified :
self . on_modify ( modified )
before = after
|
def extract ( fileobj , keywords , comment_tags , options ) :
"""Extracts translation messages from underscore template files .
This method does also extract django templates . If a template does not
contain any django translation tags we always fallback to underscore extraction .
This is a plugin to Babel , written according to
http : / / babel . pocoo . org / docs / messages / # writing - extraction - methods
: param fileobj : the file - like object the messages should be extracted
from
: param keywords : a list of keywords ( i . e . function names ) that should
be recognized as translation functions
: param comment _ tags : a list of translator tags to search for and
include in the results
: param options : a dictionary of additional options ( optional )
: return : an iterator over ` ` ( lineno , funcname , message , comments ) ` `
tuples
: rtype : ` ` iterator ` `"""
|
encoding = options . get ( 'encoding' , 'utf-8' )
original_position = fileobj . tell ( )
text = fileobj . read ( ) . decode ( encoding )
if django . VERSION [ : 2 ] >= ( 1 , 9 ) :
tokens = Lexer ( text ) . tokenize ( )
else :
tokens = Lexer ( text , None ) . tokenize ( )
vars = [ token . token_type != TOKEN_TEXT for token in tokens ]
could_be_django = any ( list ( vars ) )
if could_be_django :
fileobj . seek ( original_position )
iterator = extract_django ( fileobj , keywords , comment_tags , options )
for lineno , funcname , message , comments in iterator :
yield lineno , funcname , message , comments
else : # Underscore template extraction
comments = [ ]
fileobj . seek ( original_position )
for lineno , line in enumerate ( fileobj , 1 ) :
funcname = None
stream = TokenStream . from_tuple_iter ( tokenize ( line , underscore . rules ) )
while not stream . eof :
if stream . current . type == 'gettext_begin' :
stream . expect ( 'gettext_begin' )
funcname = stream . expect ( 'func_name' ) . value
args , kwargs = parse_arguments ( stream , 'gettext_end' )
strings = [ ]
for arg , argtype in args :
if argtype == 'func_string_arg' :
strings . append ( force_text ( arg ) )
else :
strings . append ( None )
for arg in kwargs :
strings . append ( None )
if len ( strings ) == 1 :
strings = strings [ 0 ]
else :
strings = tuple ( strings )
yield lineno , funcname , strings , [ ]
stream . next ( )
|
def _ssh_forward_accept ( ssh_session , timeout_ms ) :
"""Waiting for an incoming connection from a reverse forwarded port . Note
that this results in a kernel block until a connection is received ."""
|
ssh_channel = c_ssh_forward_accept ( c_void_p ( ssh_session ) , c_int ( timeout_ms ) )
if ssh_channel is None :
raise SshTimeoutException ( )
return ssh_channel
|
def uninstall ( ) :
"""Uninstall data and resource locations"""
|
_check_root ( )
response = _ask ( "This will delete all data of your HFOS installations! Type" "YES to continue:" , default = "N" , show_hint = False )
if response == 'YES' :
shutil . rmtree ( '/var/lib/hfos' )
shutil . rmtree ( '/var/cache/hfos' )
|
def _normalize ( val ) :
'''Fix Salt ' s yaml - ification of on / off , and otherwise normalize the on / off
values to be used in writing the options file'''
|
if isinstance ( val , bool ) :
return 'on' if val else 'off'
return six . text_type ( val ) . lower ( )
|
def what_task ( self , token_id , presented_pronunciation , index , phonemes , phonemes_probability , warn = True , default = True ) :
"""Provide the prediction of the what task .
This function is used to predict the probability of a given phoneme being reported at a given index
for a given token .
: param token _ id : The token for which the prediction is provided
: param index : The index of the token for which the prediction is provided
: param phonemes : The phoneme or phoneme sequence for which the prediction is being made
( as a space separated string )
: param phonemes _ probability : The probability of the phoneme or phoneme sequence
: param warn : Set to False in order to avoid warnings about 0 or 1 probabilities
: param default : Set to False in order to avoid generating the default probabilities"""
|
if phonemes_probability is not None and not 0. < phonemes_probability < 1. and warn :
logging . warning ( 'Setting a probability of [{}] to phonemes [{}] for token [{}].\n ' 'Using probabilities of 0.0 or 1.0 ' 'may lead to likelihoods of -Infinity' . format ( phonemes_probability , phonemes , token_id ) )
default_preds = self . _what_default ( presented_pronunciation ) if default else { }
self [ 'tokens' ] . setdefault ( token_id , { } ) . setdefault ( 'what' , default_preds )
if index is not None :
self [ 'tokens' ] [ token_id ] [ 'what' ] . setdefault ( str ( index ) , { } )
if phonemes is not None :
if phonemes_probability is not None and index is not None :
self [ 'tokens' ] [ token_id ] [ 'what' ] [ str ( index ) ] [ phonemes ] = phonemes_probability
else :
if index is not None :
if phonemes in default_preds [ str ( index ) ] :
self [ 'tokens' ] [ token_id ] [ 'what' ] [ str ( index ) ] [ phonemes ] = default_preds [ str ( index ) ] [ phonemes ]
else :
self [ 'tokens' ] [ token_id ] [ 'what' ] [ str ( index ) ] . pop ( phonemes )
else :
if str ( index ) in default_preds :
self [ 'tokens' ] [ token_id ] [ 'what' ] [ str ( index ) ] = default_preds [ str ( index ) ]
else :
self [ 'tokens' ] [ token_id ] [ 'what' ] . pop ( str ( index ) )
|
def amplify_gmfs ( imts , vs30s , gmfs ) :
"""Amplify the ground shaking depending on the vs30s"""
|
n = len ( vs30s )
out = [ amplify_ground_shaking ( im . period , vs30s [ i ] , gmfs [ m * n + i ] ) for m , im in enumerate ( imts ) for i in range ( n ) ]
return numpy . array ( out )
|
def call_member ( obj , f , * args , ** kwargs ) :
"""Calls the specified method , property or attribute of the given object
Parameters
obj : object
The object that will be used
f : str or function
Name of or reference to method , property or attribute
failfast : bool
If True , will raise an exception when trying a method that doesn ' t exist . If False , will simply return None
in that case"""
|
# get function name
if not isinstance ( f , str ) :
fname = f . __func__ . __name__
else :
fname = f
# get the method ref
method = getattr ( obj , fname )
# handle cases
if inspect . ismethod ( method ) :
return method ( * args , ** kwargs )
# attribute or property
return method
|
def unzip ( self , remotepath , subpath = '/' , start = 0 , limit = 1000 ) :
'''Usage : unzip < remotepath > [ < subpath > [ < start > [ < limit > ] ] ]'''
|
rpath = get_pcs_path ( remotepath )
return self . __panapi_unzip_file ( rpath , subpath , start , limit ) ;
|
def list ( self , pattern = '*' ) :
"""Returns a list of resource descriptors that match the filters .
Args :
pattern : An optional pattern to further filter the descriptors . This can
include Unix shell - style wildcards . E . g . ` ` " aws * " ` ` , ` ` " * cluster * " ` ` .
Returns :
A list of ResourceDescriptor objects that match the filters ."""
|
if self . _descriptors is None :
self . _descriptors = self . _client . list_resource_descriptors ( filter_string = self . _filter_string )
return [ resource for resource in self . _descriptors if fnmatch . fnmatch ( resource . type , pattern ) ]
|
def get_cluster_assignment ( self ) :
"""Fetch the cluster layout in form of assignment from zookeeper"""
|
plan = self . get_cluster_plan ( )
assignment = { }
for elem in plan [ 'partitions' ] :
assignment [ ( elem [ 'topic' ] , elem [ 'partition' ] ) ] = elem [ 'replicas' ]
return assignment
|
def copy ( self , * args , ** kwargs ) :
'''Returns a copy of the current data object
: param flag : if an argument is provided , this returns an updated copy of current object ( ie . equivalent to obj . copy ( ) ; obj . update ( flag ) ) , optimising the memory (
: keyword True deep : deep copies the object ( object data will be copied as well ) .'''
|
deep = kwargs . get ( 'deep' , True )
if len ( args ) > 0 :
return self . updated_copy ( * args )
else :
return copy . deepcopy ( self ) if deep else copy . copy ( self )
|
def _euler_to_q ( self , euler ) :
"""Create q array from euler angles
: param euler : array [ roll , pitch , yaw ] in rad
: returns : array q which represents a quaternion [ w , x , y , z ]"""
|
assert ( len ( euler ) == 3 )
phi = euler [ 0 ]
theta = euler [ 1 ]
psi = euler [ 2 ]
c_phi_2 = np . cos ( phi / 2 )
s_phi_2 = np . sin ( phi / 2 )
c_theta_2 = np . cos ( theta / 2 )
s_theta_2 = np . sin ( theta / 2 )
c_psi_2 = np . cos ( psi / 2 )
s_psi_2 = np . sin ( psi / 2 )
q = np . zeros ( 4 )
q [ 0 ] = ( c_phi_2 * c_theta_2 * c_psi_2 + s_phi_2 * s_theta_2 * s_psi_2 )
q [ 1 ] = ( s_phi_2 * c_theta_2 * c_psi_2 - c_phi_2 * s_theta_2 * s_psi_2 )
q [ 2 ] = ( c_phi_2 * s_theta_2 * c_psi_2 + s_phi_2 * c_theta_2 * s_psi_2 )
q [ 3 ] = ( c_phi_2 * c_theta_2 * s_psi_2 - s_phi_2 * s_theta_2 * c_psi_2 )
return q
|
def __script ( self , mode , filestem ) :
"""Makes the script file to be run by yap ."""
|
scriptPath = '%s/%s' % ( self . tmpdir , Aleph . SCRIPT )
script = open ( scriptPath , 'w' )
# Permit the owner to execute and read this script
os . chmod ( scriptPath , S_IREAD | S_IEXEC )
cat = lambda x : script . write ( x + '\n' )
cat ( ":- initialization(run_aleph)." )
cat ( "run_aleph :- " )
cat ( "consult(aleph)," )
cat ( "read_all('%s')," % filestem )
# Cat all the non - default settings
for setting , value in self . settings . items ( ) :
cat ( "set(%s, %s)," % ( setting , str ( value ) ) )
cat ( "%s," % mode )
eof = ',' if self . postScript else '.'
if mode == 'induce_features' :
cat ( "consult(features)," )
features_fn = filestem + Aleph . FEATURES_SUFFIX
dataset_fn = filestem + Aleph . PROP_DATASET_SUFFIX
cat ( 'save_features(%s),' % features_fn )
cat ( 'save_dataset(%s)%s' % ( dataset_fn , eof ) )
else :
rules_fn = filestem + Aleph . RULES_SUFFIX
cat ( "write_rules('%s')%s" % ( rules_fn , eof ) )
if self . postScript :
cat ( self . postGoal + "." )
cat ( self . postScript )
script . close ( )
|
def _validate_id_types ( self ) :
'''Check that the ID types are integers for Rosetta , SEQRES , and UniParc sequences and 6 - character PDB IDs for the ATOM sequences .'''
|
for sequences in [ self . uniparc_sequences , self . fasta_sequences , self . seqres_sequences , self . rosetta_sequences ] :
for chain_id , sequence in sequences . iteritems ( ) :
sequence_id_types = set ( map ( type , sequence . ids ( ) ) )
if sequence_id_types :
assert ( len ( sequence_id_types ) == 1 )
assert ( sequence_id_types . pop ( ) == types . IntType )
for chain_id , sequence in self . atom_sequences . iteritems ( ) :
sequence_id_types = set ( map ( type , sequence . ids ( ) ) )
assert ( len ( sequence_id_types ) == 1 )
sequence_id_type = sequence_id_types . pop ( )
assert ( sequence_id_type == types . StringType or sequence_id_type == types . UnicodeType )
|
def elcm_request ( irmc_info , method , path , ** kwargs ) :
"""send an eLCM request to the server
: param irmc _ info : dict of iRMC params to access the server node
' irmc _ address ' : host ,
' irmc _ username ' : user _ id ,
' irmc _ password ' : password ,
' irmc _ port ' : 80 or 443 , default is 443,
' irmc _ auth _ method ' : ' basic ' or ' digest ' , default is ' basic ' ,
' irmc _ client _ timeout ' : timeout , default is 60,
: param method : request method such as ' GET ' , ' POST '
: param path : url path for eLCM request
: returns : requests . Response from SCCI server
: raises SCCIInvalidInputError : if port and / or auth _ method params
are invalid
: raises SCCIClientError : if SCCI failed"""
|
host = irmc_info [ 'irmc_address' ]
port = irmc_info . get ( 'irmc_port' , 443 )
auth_method = irmc_info . get ( 'irmc_auth_method' , 'basic' )
userid = irmc_info [ 'irmc_username' ]
password = irmc_info [ 'irmc_password' ]
client_timeout = irmc_info . get ( 'irmc_client_timeout' , 60 )
# Request headers , params , and data
headers = kwargs . get ( 'headers' , { 'Accept' : 'application/json' } )
params = kwargs . get ( 'params' )
data = kwargs . get ( 'data' )
auth_obj = None
try :
protocol = { 80 : 'http' , 443 : 'https' } [ port ]
auth_obj = { 'basic' : requests . auth . HTTPBasicAuth ( userid , password ) , 'digest' : requests . auth . HTTPDigestAuth ( userid , password ) } [ auth_method . lower ( ) ]
except KeyError :
raise scci . SCCIInvalidInputError ( ( "Invalid port %(port)d or " + "auth_method for method %(auth_method)s" ) % { 'port' : port , 'auth_method' : auth_method } )
try :
r = requests . request ( method , protocol + '://' + host + path , headers = headers , params = params , data = data , verify = False , timeout = client_timeout , allow_redirects = False , auth = auth_obj )
except requests . exceptions . RequestException as requests_exception :
raise scci . SCCIClientError ( requests_exception )
# Process status _ code 401
if r . status_code == 401 :
raise scci . SCCIClientError ( 'UNAUTHORIZED' )
return r
|
def add_patchs_to_build_without_pkg_config ( self , lib_dir , include_dir ) :
"""Add patches to remove pkg - config command and rpm . pc part .
Replace with given library _ path : lib _ dir and include _ path : include _ dir
without rpm . pc file ."""
|
additional_patches = [ { 'src' : r"pkgconfig\('--libs-only-L'\)" , 'dest' : "['{0}']" . format ( lib_dir ) , } , # Considering - libs - only - l and - libs - only - L
# https : / / github . com / rpm - software - management / rpm / pull / 327
{ 'src' : r"pkgconfig\('--libs(-only-l)?'\)" , 'dest' : "['rpm', 'rpmio']" , 'required' : True , } , { 'src' : r"pkgconfig\('--cflags'\)" , 'dest' : "['{0}']" . format ( include_dir ) , 'required' : True , } , ]
self . patches . extend ( additional_patches )
|
def _fixPermissions ( tool , workDir ) :
"""Deprecated .
Fix permission of a mounted Docker directory by reusing the tool to change
ownership . Docker natively runs as a root inside the container , and files
written to the mounted directory are implicitly owned by root .
: param list baseDockerCall : Docker run parameters
: param str tool : Name of tool
: param str workDir : Path of work directory to recursively chown"""
|
if os . geteuid ( ) == 0 : # we ' re running as root so this chown is redundant
return
baseDockerCall = [ 'docker' , 'run' , '--log-driver=none' , '-v' , os . path . abspath ( workDir ) + ':/data' , '--rm' , '--entrypoint=chown' ]
stat = os . stat ( workDir )
command = baseDockerCall + [ tool ] + [ '-R' , '{}:{}' . format ( stat . st_uid , stat . st_gid ) , '/data' ]
for attempt in retry ( predicate = dockerPredicate ) :
with attempt :
subprocess . check_call ( command )
|
def nvmlEventSetCreate ( ) :
r"""* Create an empty set of events .
* Event set should be freed by \ ref nvmlEventSetFree
* For Fermi & tm ; or newer fully supported devices .
* @ param set Reference in which to return the event handle
* @ return
* - \ ref NVML _ SUCCESS if the event has been set
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a set is NULL
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
* @ see nvmlEventSetFree
nvmlReturn _ t DECLDIR nvmlEventSetCreate"""
|
fn = _nvmlGetFunctionPointer ( "nvmlEventSetCreate" )
eventSet = c_nvmlEventSet_t ( )
ret = fn ( byref ( eventSet ) )
_nvmlCheckReturn ( ret )
return eventSet
|
def rename_attribute ( self , attribute : str , new_name : str ) -> None :
"""Renames an attribute .
Use only if is _ mapping ( ) returns true .
If the attribute does not exist , this will do nothing .
Args :
attribute : The ( old ) name of the attribute to rename .
new _ name : The new name to rename it to ."""
|
for key_node , _ in self . yaml_node . value :
if key_node . value == attribute :
key_node . value = new_name
break
|
def mtx_refl ( nv , reps = 1 ) :
"""Generate block - diagonal reflection matrix about nv .
reps must be > = 1 and indicates the number of times the reflection
matrix should be repeated along the block diagonal . Typically this
will be the number of atoms in a geometry .
. . todo : : Complete mtx _ refl docstring"""
|
# Imports
import numpy as np
from scipy import linalg as spla
from . . const import PRM
# Ensure | nv | is large enough for confident directionality
if spla . norm ( nv ) < PRM . ZERO_VEC_TOL :
raise ValueError ( "Norm of 'nv' is too small." )
# # end if
# Ensure nv is a normalized np . float64 3 - vector
nv = make_nd_vec ( nv , nd = 3 , t = np . float64 , norm = True )
# Ensure reps is a positive scalar integer
if not np . isscalar ( reps ) :
raise ValueError ( "'reps' must be scalar." )
# # end if
if not np . issubdtype ( type ( reps ) , int ) :
raise ValueError ( "'reps' must be an integer." )
# # end if
if not reps > 0 :
raise ValueError ( "'reps' must be a positive integer." )
# # end if
# Initialize the single - point reflection transform matrix
base_mtx = np . zeros ( shape = ( 3 , 3 ) , dtype = np . float64 )
# Construct the single - point transform matrix
for i in range ( 3 ) :
for j in range ( i , 3 ) :
if i == j :
base_mtx [ i , j ] = 1 - 2 * nv [ i ] ** 2
else :
base_mtx [ i , j ] = base_mtx [ j , i ] = - 2 * nv [ i ] * nv [ j ]
# # end if
# # next j
# # next i
# Construct the block - diagonal replicated reflection matrix
refl_mtx = spla . block_diag ( * [ base_mtx for i in range ( reps ) ] )
# Return the result
return refl_mtx
|
def cube ( f ) :
"""Read a full cube from a file
Takes an open segy file ( created with segyio . open ) or a file name .
If the file is a prestack file , the cube returned has the dimensions
` ` ( fast , slow , offset , sample ) ` ` . If it is post - stack ( only the one
offset ) , the dimensions are normalised to ` ` ( fast , slow , sample ) ` `
Parameters
f : str or segyio . SegyFile
Returns
cube : numpy . ndarray
Notes
. . versionadded : : 1.1"""
|
if not isinstance ( f , segyio . SegyFile ) :
with segyio . open ( f ) as fl :
return cube ( fl )
ilsort = f . sorting == segyio . TraceSortingFormat . INLINE_SORTING
fast = f . ilines if ilsort else f . xlines
slow = f . xlines if ilsort else f . ilines
fast , slow , offs = len ( fast ) , len ( slow ) , len ( f . offsets )
smps = len ( f . samples )
dims = ( fast , slow , smps ) if offs == 1 else ( fast , slow , offs , smps )
return f . trace . raw [ : ] . reshape ( dims )
|
def get_method_descriptor ( self , class_name , method_name , descriptor ) :
"""Return the specific method
: param class _ name : the class name of the method
: type class _ name : string
: param method _ name : the name of the method
: type method _ name : string
: param descriptor : the descriptor of the method
: type descriptor : string
: rtype : None or a : class : ` EncodedMethod ` object"""
|
key = class_name + method_name + descriptor
if self . __cache_methods is None :
self . __cache_methods = { }
for i in self . get_classes ( ) :
for j in i . get_methods ( ) :
self . __cache_methods [ j . get_class_name ( ) + j . get_name ( ) + j . get_descriptor ( ) ] = j
return self . __cache_methods . get ( key )
|
def unstar ( self ) :
"""Un - star this gist .
: returns : bool - - True if successful , False otherwise"""
|
url = self . _build_url ( 'star' , base_url = self . _api )
return self . _boolean ( self . _delete ( url ) , 204 , 404 )
|
def sometimesish ( fn ) :
"""Has a 50/50 chance of calling a function"""
|
def wrapped ( * args , ** kwargs ) :
if random . randint ( 1 , 2 ) == 1 :
return fn ( * args , ** kwargs )
return wrapped
|
def load_nouns ( self , file ) :
"""Load dict from file for random words .
: param str file : filename"""
|
with open ( os . path . join ( main_dir , file + '.dat' ) , 'r' ) as f :
self . nouns = json . load ( f )
|
def _kmp_search_first ( self , pInput_sequence , pPattern ) :
"""use KMP algorithm to search the first occurrence in the input _ sequence of the pattern . both arguments are integer arrays . return the position of the occurence if found ; otherwise , - 1."""
|
input_sequence , pattern = pInput_sequence , [ len ( bin ( e ) ) for e in pPattern ]
n , m = len ( input_sequence ) , len ( pattern )
d = p = 0
next = self . _kmp_construct_next ( pattern )
while d < n and p < m :
p = next [ len ( bin ( input_sequence [ d ] ) ) ] [ p ]
d += 1
if p == m :
return d - p
else :
return - 1
|
def _normalize ( self , address ) :
"""Normalize prefixes , suffixes and other to make matching original to returned easier ."""
|
normalized_address = [ ]
if self . logger :
self . logger . debug ( "Normalizing Address: {0}" . format ( address ) )
for token in address . split ( ) :
if token . upper ( ) in self . parser . suffixes . keys ( ) :
normalized_address . append ( self . parser . suffixes [ token . upper ( ) ] . lower ( ) )
elif token . upper ( ) in self . parser . suffixes . values ( ) :
normalized_address . append ( token . lower ( ) )
elif token . upper ( ) . replace ( '.' , '' ) in self . parser . suffixes . values ( ) :
normalized_address . append ( token . lower ( ) . replace ( '.' , '' ) )
elif token . lower ( ) in self . parser . prefixes . keys ( ) :
normalized_address . append ( self . parser . prefixes [ token . lower ( ) ] . lower ( ) )
elif token . upper ( ) in self . parser . prefixes . values ( ) :
normalized_address . append ( token . lower ( ) [ : - 1 ] )
elif token . upper ( ) + '.' in self . parser . prefixes . values ( ) :
normalized_address . append ( token . lower ( ) )
else :
normalized_address . append ( token . lower ( ) )
return normalized_address
|
def create_rot2d ( angle ) :
"""Create 2D rotation matrix"""
|
ca = math . cos ( angle )
sa = math . sin ( angle )
return np . array ( [ [ ca , - sa ] , [ sa , ca ] ] )
|
def _get_pgtiou ( pgt ) :
"""Returns a PgtIOU object given a pgt .
The PgtIOU ( tgt ) is set by the CAS server in a different request
that has completed before this call , however , it may not be found in
the database by this calling thread , hence the attempt to get the
ticket is retried for up to 5 seconds . This should be handled some
better way .
Users can opt out of this waiting period by setting CAS _ PGT _ FETCH _ WAIT = False
: param : pgt"""
|
pgtIou = None
retries_left = 5
if not settings . CAS_PGT_FETCH_WAIT :
retries_left = 1
while not pgtIou and retries_left :
try :
return PgtIOU . objects . get ( tgt = pgt )
except PgtIOU . DoesNotExist :
if settings . CAS_PGT_FETCH_WAIT :
time . sleep ( 1 )
retries_left -= 1
logger . info ( 'Did not fetch ticket, trying again. {tries} tries left.' . format ( tries = retries_left ) )
raise CasTicketException ( "Could not find pgtIou for pgt %s" % pgt )
|
def watermark_text ( image , text , corner = 2 ) :
'''Adds a text watermark to an instance of a PIL Image .
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image . The text will
be white with a thin black outline .
Args :
image : An instance of a PIL Image . This is the base image .
text : Text to use as a watermark .
corner : An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image . 0 is top left , 1 is top right , 2 is bottom
right and 3 is bottom left . NOTE : Right now , this is
permanently set to 2 ( bottom right ) but this can be
changed in the future by either creating a new cmd - line
flag or putting this in the config file .
Returns : The watermarked image'''
|
# Load Font
FONT_PATH = ''
if resource_exists ( __name__ , 'resources/fonts/SourceSansPro-Regular.ttf' ) :
FONT_PATH = resource_filename ( __name__ , 'resources/fonts/SourceSansPro-Regular.ttf' )
padding = 5
was_P = image . mode == 'P'
was_L = image . mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image . mode not in [ 'RGB' , 'RGBA' ] :
if image . format in [ 'JPG' , 'JPEG' ] :
image = image . convert ( 'RGB' )
else :
image = image . convert ( 'RGBA' )
# Get drawable image
img_draw = ImageDraw . Draw ( image )
fontsize = 1
# starting font size
# portion of image width you want text height to be .
# default font size will have a height that is ~ 1/20
# the height of the base image .
img_fraction = 0.05
# attempt to use Aperture default font . If that fails , use ImageFont default
try :
font = ImageFont . truetype ( font = FONT_PATH , size = fontsize )
was_over = False
inc = 2
while True :
if font . getsize ( text ) [ 1 ] > img_fraction * image . height :
if not was_over :
was_over = True
inc = - 1
else :
if was_over :
break
# iterate until the text size is just larger than the criteria
fontsize += inc
font = ImageFont . truetype ( font = FONT_PATH , size = fontsize )
fontsize -= 1
font = ImageFont . truetype ( font = FONT_PATH , size = fontsize )
except : # replace with log message
print ( 'Failed to load Aperture font. Using default font instead.' )
font = ImageFont . load_default ( )
# Bad because default is suuuuper small
# get position of text
pos = get_pos ( corner , image . size , font . getsize ( text ) , padding )
# draw a thin black border
img_draw . text ( ( pos [ 0 ] - 1 , pos [ 1 ] ) , text , font = font , fill = 'black' )
img_draw . text ( ( pos [ 0 ] + 1 , pos [ 1 ] ) , text , font = font , fill = 'black' )
img_draw . text ( ( pos [ 0 ] , pos [ 1 ] - 1 ) , text , font = font , fill = 'black' )
img_draw . text ( ( pos [ 0 ] , pos [ 1 ] + 1 ) , text , font = font , fill = 'black' )
# draw the actual text
img_draw . text ( pos , text , font = font , fill = 'white' )
# Remove cached font file
cleanup_resources ( )
del img_draw
if was_P :
image = image . convert ( 'P' , palette = Image . ADAPTIVE , colors = 256 )
elif was_L :
image = image . convert ( 'L' )
return image
|
def _resolve_number_of_copies ( self , items ) :
"""For the given objects generate as many copies as the desired number
of stickers .
: param items : list of objects whose stickers are going to be previewed .
: type items : list
: returns : list containing n copies of each object in the items list
: rtype : list"""
|
copied_items = [ ]
for obj in items :
for copy in range ( self . get_copies_count ( ) ) :
copied_items . append ( obj )
return copied_items
|
def find_sinks ( obj ) :
"""Returns a dictionary of sink methods found on this object ,
keyed on method name . Sink methods are identified by
( self , context ) arguments on this object . For example :
def f ( self , context ) :
is a sink method , but
def f ( self , ctx ) :
is not ."""
|
SINK_ARGSPEC = [ 'self' , 'context' ]
return { n : m for n , m in inspect . getmembers ( obj , inspect . ismethod ) if inspect . getargspec ( m ) [ 0 ] == SINK_ARGSPEC }
|
def from_json ( self , json ) :
"""Create resource out of JSON data .
: param json : JSON dict .
: return : Resource with a type defined by the given JSON data ."""
|
res_type = json [ 'sys' ] [ 'type' ]
if ResourceType . Array . value == res_type :
return self . create_array ( json )
elif ResourceType . Entry . value == res_type :
return self . create_entry ( json )
elif ResourceType . Asset . value == res_type :
return ResourceFactory . create_asset ( json )
elif ResourceType . ContentType . value == res_type :
return ResourceFactory . create_content_type ( json )
elif ResourceType . Space . value == res_type :
return ResourceFactory . create_space ( json )
|
def valueFromString ( self , value , context = None ) :
"""Re - implements the orb . Column . valueFromString method to
lookup a reference object based on the given value .
: param value : < str >
: param context : < orb . Context > | | None
: return : < orb . Model > | | None"""
|
model = self . referenceModel ( )
return model ( value , context = context )
|
def density ( self ) :
"""The percent of non - ` ` fill _ value ` ` points , as decimal .
Examples
> > > s = SparseArray ( [ 0 , 0 , 1 , 1 , 1 ] , fill _ value = 0)
> > > s . density
0.6"""
|
r = float ( self . sp_index . npoints ) / float ( self . sp_index . length )
return r
|
def aggregate ( self , aggregates = None , drilldowns = None , cuts = None , order = None , page = None , page_size = None , page_max = None ) :
"""Main aggregation function . This is used to compute a given set of
aggregates , grouped by a given set of drilldown dimensions ( i . e .
dividers ) . The query can also be filtered and sorted ."""
|
def prep ( cuts , drilldowns = False , aggregates = False , columns = None ) :
q = select ( columns )
bindings = [ ]
cuts , q , bindings = Cuts ( self ) . apply ( q , bindings , cuts )
attributes = None
if drilldowns is not False :
attributes , q , bindings = Drilldowns ( self ) . apply ( q , bindings , drilldowns )
if aggregates is not False :
aggregates , q , bindings = Aggregates ( self ) . apply ( q , bindings , aggregates )
q = self . restrict_joins ( q , bindings )
return q , bindings , attributes , aggregates , cuts
# Count
count = count_results ( self , prep ( cuts , drilldowns = drilldowns , columns = [ 1 ] ) [ 0 ] )
# Summary
summary = first_result ( self , prep ( cuts , aggregates = aggregates ) [ 0 ] . limit ( 1 ) )
# Results
q , bindings , attributes , aggregates , cuts = prep ( cuts , drilldowns = drilldowns , aggregates = aggregates )
page , q = Pagination ( self ) . apply ( q , page , page_size , page_max )
ordering , q , bindings = Ordering ( self ) . apply ( q , bindings , order )
q = self . restrict_joins ( q , bindings )
cells = list ( generate_results ( self , q ) )
return { 'total_cell_count' : count , 'cells' : cells , 'summary' : summary , 'cell' : cuts , 'aggregates' : aggregates , 'attributes' : attributes , 'order' : ordering , 'page' : page [ 'page' ] , 'page_size' : page [ 'page_size' ] }
|
def resolve_symbol ( self , symbol , bCaseSensitive = False ) :
"""Resolves a debugging symbol ' s address .
@ type symbol : str
@ param symbol : Name of the symbol to resolve .
@ type bCaseSensitive : bool
@ param bCaseSensitive : C { True } for case sensitive matches ,
C { False } for case insensitive .
@ rtype : int or None
@ return : Memory address of symbol . C { None } if not found ."""
|
if bCaseSensitive :
for ( SymbolName , SymbolAddress , SymbolSize ) in self . iter_symbols ( ) :
if symbol == SymbolName :
return SymbolAddress
for ( SymbolName , SymbolAddress , SymbolSize ) in self . iter_symbols ( ) :
try :
SymbolName = win32 . UnDecorateSymbolName ( SymbolName )
except Exception :
continue
if symbol == SymbolName :
return SymbolAddress
else :
symbol = symbol . lower ( )
for ( SymbolName , SymbolAddress , SymbolSize ) in self . iter_symbols ( ) :
if symbol == SymbolName . lower ( ) :
return SymbolAddress
for ( SymbolName , SymbolAddress , SymbolSize ) in self . iter_symbols ( ) :
try :
SymbolName = win32 . UnDecorateSymbolName ( SymbolName )
except Exception :
continue
if symbol == SymbolName . lower ( ) :
return SymbolAddress
|
def of ( cls , value ) :
"""Given either a dictionary or a ` ConfigSource ` object , return
a ` ConfigSource ` object . This lets a function accept either type
of object as an argument ."""
|
if isinstance ( value , ConfigSource ) :
return value
elif isinstance ( value , dict ) :
return ConfigSource ( value )
else :
raise TypeError ( u'source value must be a dict' )
|
def dedup ( args ) :
"""% prog dedup scaffolds . fasta
Remove redundant contigs with CD - HIT . This is run prior to
assembly . sspace . embed ( ) ."""
|
from jcvi . formats . fasta import gaps
from jcvi . apps . cdhit import deduplicate , ids
p = OptionParser ( dedup . __doc__ )
p . set_align ( pctid = GoodPct )
p . set_mingap ( default = 10 )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
scaffolds , = args
mingap = opts . mingap
splitfile , oagpfile , cagpfile = gaps ( [ scaffolds , "--split" , "--mingap={0}" . format ( mingap ) ] )
dd = splitfile + ".cdhit"
clstrfile = dd + ".clstr"
idsfile = dd + ".ids"
if need_update ( splitfile , clstrfile ) :
deduplicate ( [ splitfile , "--pctid={0}" . format ( opts . pctid ) ] )
if need_update ( clstrfile , idsfile ) :
ids ( [ clstrfile ] )
agp = AGP ( cagpfile )
reps = set ( x . split ( ) [ - 1 ] for x in open ( idsfile ) )
pf = scaffolds . rsplit ( "." , 1 ) [ 0 ]
dedupagp = pf + ".dedup.agp"
fw = open ( dedupagp , "w" )
ndropped = ndroppedbases = 0
for a in agp :
if not a . is_gap and a . component_id not in reps :
span = a . component_span
logging . debug ( "Drop component {0} ({1})" . format ( a . component_id , span ) )
ndropped += 1
ndroppedbases += span
continue
print ( a , file = fw )
fw . close ( )
logging . debug ( "Dropped components: {0}, Dropped bases: {1}" . format ( ndropped , ndroppedbases ) )
logging . debug ( "Deduplicated file written to `{0}`." . format ( dedupagp ) )
tidyagp = tidy ( [ dedupagp , splitfile ] )
dedupfasta = pf + ".dedup.fasta"
build ( [ tidyagp , dd , dedupfasta ] )
return dedupfasta
|
def convolve_sep3 ( data , hx , hy , hz , res_g = None , sub_blocks = ( 1 , 1 , 1 ) , tmp_g = None ) :
"""convolves 3d data with kernel h = outer ( hx , hy , hz )
boundary conditions are clamping to edge .
data , hx , hy . . . . are either np array or a gpu buffer ( OCLArray )"""
|
if isinstance ( data , np . ndarray ) :
data = np . ascontiguousarray ( data )
if sub_blocks == ( 1 , 1 , 1 ) or sub_blocks is None :
return _convolve_sep3_numpy ( data , hx , hy , hz )
else : # cut the image into tile and operate on every of them
N_sub = [ int ( np . ceil ( 1. * n / s ) ) for n , s in zip ( data . shape , sub_blocks ) ]
Npads = [ int ( len ( _h ) / 2 ) for _h in [ hz , hy , hx ] ]
res = np . empty ( data . shape , np . float32 )
for i , ( data_tile , data_s_src , data_s_dest ) in enumerate ( tile_iterator ( data , blocksize = N_sub , padsize = Npads , mode = "constant" ) ) :
res_tile = _convolve_sep3_numpy ( data_tile . copy ( ) , hx , hy , hz )
res [ data_s_src ] = res_tile [ data_s_dest ]
return res
elif isinstance ( data , OCLArray ) :
return _convolve_sep3_gpu ( data , hx , hy , hz , res_g = res_g , tmp_g = tmp_g )
else :
raise TypeError ( "array argument (1) has bad type: %s" % type ( data ) )
|
def get ( dataset , ** kwargs ) :
"""Return dataframe of requested dataset from Quandl .
: param dataset : str or list , depending on single dataset usage or multiset usage
Dataset codes are available on the Quandl website
: param str api _ key : Downloads are limited to 50 unless api _ key is specified
: param str start _ date , end _ date : Optional datefilers , otherwise entire
dataset is returned
: param str collapse : Options are daily , weekly , monthly , quarterly , annual
: param str transform : options are diff , rdiff , cumul , and normalize
: param int rows : Number of rows which will be returned
: param str order : options are asc , desc . Default : ` asc `
: param str returns : specify what format you wish your dataset returned as ,
either ` numpy ` for a numpy ndarray or ` pandas ` . Default : ` pandas `
: returns : : class : ` pandas . DataFrame ` or : class : ` numpy . ndarray `
Note that Pandas expects timeseries data to be sorted ascending for most
timeseries functionality to work .
Any other ` kwargs ` passed to ` get ` are sent as field / value params to Quandl
with no interference ."""
|
_convert_params_to_v3 ( kwargs )
data_format = kwargs . pop ( 'returns' , 'pandas' )
ApiKeyUtil . init_api_key_from_args ( kwargs )
# Check whether dataset is given as a string
# ( for a single dataset ) or an array ( for a multiset call )
# Unicode String
if isinstance ( dataset , string_types ) :
dataset_args = _parse_dataset_code ( dataset )
if dataset_args [ 'column_index' ] is not None :
kwargs . update ( { 'column_index' : dataset_args [ 'column_index' ] } )
data = Dataset ( dataset_args [ 'code' ] ) . data ( params = kwargs , handle_column_not_found = True )
# Array
elif isinstance ( dataset , list ) :
args = _build_merged_dataset_args ( dataset )
# handle _ not _ found _ error if set to True will add an empty DataFrame
# for a non - existent dataset instead of raising an error
data = MergedDataset ( args ) . data ( params = kwargs , handle_not_found_error = True , handle_column_not_found = True )
# If wrong format
else :
raise InvalidRequestError ( Message . ERROR_DATASET_FORMAT )
if data_format == 'numpy' :
return data . to_numpy ( )
return data . to_pandas ( )
|
def _get_variable_from_filepath ( self ) :
"""Determine the file variable from the filepath .
Returns
str
Best guess of variable name from the filepath"""
|
try :
return self . regexp_capture_variable . search ( self . filepath ) . group ( 1 )
except AttributeError :
self . _raise_cannot_determine_variable_from_filepath_error ( )
|
def connect_iam ( aws_access_key_id = None , aws_secret_access_key = None , ** kwargs ) :
""": type aws _ access _ key _ id : string
: param aws _ access _ key _ id : Your AWS Access Key ID
: type aws _ secret _ access _ key : string
: param aws _ secret _ access _ key : Your AWS Secret Access Key
: rtype : : class : ` boto . iam . IAMConnection `
: return : A connection to Amazon ' s IAM"""
|
from boto . iam import IAMConnection
return IAMConnection ( aws_access_key_id , aws_secret_access_key , ** kwargs )
|
def decode ( self , data ) :
"""Parses the file , creating a CWRFile from it .
It requires a dictionary with two values :
- filename , containing the filename
- contents , containing the file contents
: param data : dictionary with the data to parse
: return : a CWRFile instance"""
|
file_name = self . _filename_decoder . decode ( data [ 'filename' ] )
file_data = data [ 'contents' ]
i = 0
max_size = len ( file_data )
while file_data [ i : i + 1 ] != 'H' and i < max_size :
i += 1
if i > 0 :
data [ 'contents' ] = file_data [ i : ]
transmission = self . _file_decoder . decode ( data [ 'contents' ] ) [ 0 ]
return CWRFile ( file_name , transmission )
|
def add_nodes_from ( self , nodes , ** attr ) :
"""Add multiple nodes .
Parameters
nodes : iterable container
A container of nodes ( list , dict , set , etc . ) .
OR
A container of ( node , attribute dict ) tuples .
Node attributes are updated using the attribute dict .
attr : keyword arguments , optional ( default = no attributes )
Update attributes for all nodes in nodes .
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally .
See Also
add _ node
Examples
> > > from discoursegraphs import DiscourseDocumentGraph
> > > d = DiscourseDocumentGraph ( )
> > > d . add _ nodes _ from ( [ ( 1 , { ' layers ' : { ' token ' } , ' word ' : ' hello ' } ) , ( 2 , { ' layers ' : { ' token ' } , ' word ' : ' world ' } ) ] )
> > > d . nodes ( data = True )
[ ( 1 , { ' layers ' : { ' token ' } , ' word ' : ' hello ' } ) ,
(2 , { ' layers ' : { ' token ' } , ' word ' : ' world ' } ) ]
Use keywords to update specific node attributes for every node .
> > > d . add _ nodes _ from ( d . nodes ( data = True ) , weight = 1.0)
> > > d . nodes ( data = True )
[ ( 1 , { ' layers ' : { ' token ' } , ' weight ' : 1.0 , ' word ' : ' hello ' } ) ,
(2 , { ' layers ' : { ' token ' } , ' weight ' : 1.0 , ' word ' : ' world ' } ) ]
Use ( node , attrdict ) tuples to update attributes for specific
nodes .
> > > d . add _ nodes _ from ( [ ( 1 , { ' layers ' : { ' tiger ' } } ) ] , size = 10)
> > > d . nodes ( data = True )
[ ( 1 , { ' layers ' : { ' tiger ' , ' token ' } , ' size ' : 10 , ' weight ' : 1.0,
' word ' : ' hello ' } ) ,
(2 , { ' layers ' : { ' token ' } , ' weight ' : 1.0 , ' word ' : ' world ' } ) ]"""
|
additional_attribs = attr
# will be added to each node
for n in nodes :
try : # check , if n is a node _ id or a ( node _ id , attrib dict ) tuple
newnode = n not in self . succ
# is node in the graph , yet ?
except TypeError : # n is a ( node _ id , attribute dict ) tuple
node_id , ndict = n
if not 'layers' in ndict :
ndict [ 'layers' ] = { self . ns }
layers = ndict [ 'layers' ]
assert isinstance ( layers , set ) , "'layers' must be specified as a set of strings."
assert all ( ( isinstance ( layer , str ) for layer in layers ) ) , "All elements of the 'layers' set must be strings."
if node_id not in self . succ : # node doesn ' t exist , yet
self . succ [ node_id ] = { }
self . pred [ node_id ] = { }
newdict = additional_attribs . copy ( )
newdict . update ( ndict )
# all given attribs incl . layers
self . node [ node_id ] = newdict
else : # node already exists
existing_layers = self . node [ node_id ] [ 'layers' ]
all_layers = existing_layers . union ( layers )
self . node [ node_id ] . update ( ndict )
self . node [ node_id ] . update ( additional_attribs )
self . node [ node_id ] . update ( { 'layers' : all_layers } )
continue
# process next node
# newnode check didn ' t raise an exception
if newnode : # n is a node _ id and it ' s not in the graph , yet
self . succ [ n ] = { }
self . pred [ n ] = { }
self . node [ n ] = attr . copy ( )
# since the node isn ' t represented as a
# ( node _ id , attribute dict ) tuple , we don ' t know which layers
# it is part of . Therefore , we ' ll add the namespace of the
# graph as the node layer
self . node [ n ] . update ( { 'layers' : set ( [ self . ns ] ) } )
else : # n is a node _ id and it ' s already in the graph
self . node [ n ] . update ( attr )
|
def patch_for ( self , path ) :
"""Returns the ` ` Patch ` ` for the target path , creating it if necessary .
: param str path : The absolute module path to the target .
: return : The mapped ` ` Patch ` ` .
: rtype : Patch"""
|
if path not in self . _patches :
self . _patches [ path ] = Patch ( path )
return self . _patches [ path ]
|
def _ExpandUsersVariablePathSegments ( cls , path_segments , path_separator , user_accounts ) :
"""Expands path segments with a users variable , e . g . % % users . homedir % % .
Args :
path _ segments ( list [ str ] ) : path segments .
path _ separator ( str ) : path segment separator .
user _ accounts ( list [ UserAccountArtifact ] ) : user accounts .
Returns :
list [ str ] : paths for which the users variables have been expanded ."""
|
if not path_segments :
return [ ]
path_segments_lower = [ path_segment . lower ( ) for path_segment in path_segments ]
if path_segments_lower [ 0 ] in ( '%%users.homedir%%' , '%%users.userprofile%%' ) :
return cls . _ExpandUsersHomeDirectoryPathSegments ( path_segments , path_separator , user_accounts )
path_expansions = cls . _PATH_EXPANSIONS_PER_USERS_VARIABLE . get ( path_segments [ 0 ] , None )
if path_expansions :
expanded_paths = [ ]
for path_expansion in path_expansions :
expanded_path_segments = list ( path_expansion )
expanded_path_segments . extend ( path_segments [ 1 : ] )
paths = cls . _ExpandUsersVariablePathSegments ( expanded_path_segments , path_separator , user_accounts )
expanded_paths . extend ( paths )
return expanded_paths
if cls . _IsWindowsDrivePathSegment ( path_segments [ 0 ] ) :
path_segments [ 0 ] = ''
# TODO : add support for % % users . username % %
path = path_separator . join ( path_segments )
return [ path ]
|
def wind_shear ( shear : str , unit_alt : str = 'ft' , unit_wind : str = 'kt' ) -> str :
"""Format wind shear string into a spoken word string"""
|
unit_alt = SPOKEN_UNITS . get ( unit_alt , unit_alt )
unit_wind = SPOKEN_UNITS . get ( unit_wind , unit_wind )
return translate . wind_shear ( shear , unit_alt , unit_wind , spoken = True ) or 'Wind shear unknown'
|
def download_results ( self , savedir = None , raw = True , calib = False , index = None ) :
"""Download the previously found and stored Opus obsids .
Parameters
savedir : str or pathlib . Path , optional
If the database root folder as defined by the config . ini should not be used ,
provide a different savedir here . It will be handed to PathManager ."""
|
obsids = self . obsids if index is None else [ self . obsids [ index ] ]
for obsid in obsids :
pm = io . PathManager ( obsid . img_id , savedir = savedir )
pm . basepath . mkdir ( exist_ok = True )
to_download = [ ]
if raw is True :
to_download . extend ( obsid . raw_urls )
if calib is True :
to_download . extend ( obsid . calib_urls )
for url in to_download :
basename = Path ( url ) . name
print ( "Downloading" , basename )
store_path = str ( pm . basepath / basename )
try :
urlretrieve ( url , store_path )
except Exception as e :
urlretrieve ( url . replace ( "https" , "http" ) , store_path )
return str ( pm . basepath )
|
def check_for_invalid_columns ( problems : List , table : str , df : DataFrame ) -> List :
"""Check for invalid columns in the given GTFS DataFrame .
Parameters
problems : list
A four - tuple containing
1 . A problem type ( string ) equal to ` ` ' error ' ` ` or ` ` ' warning ' ` ` ;
` ` ' error ' ` ` means the GTFS is violated ;
` ` ' warning ' ` ` means there is a problem but it is not a
GTFS violation
2 . A message ( string ) that describes the problem
3 . A GTFS table name , e . g . ` ` ' routes ' ` ` , in which the problem
occurs
4 . A list of rows ( integers ) of the table ' s DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ` ` table ` `
Returns
list
The ` ` problems ` ` list extended as follows .
Check whether the DataFrame contains extra columns not in the
GTFS and append to the problems list one warning for each extra
column ."""
|
r = cs . GTFS_REF
valid_columns = r . loc [ r [ "table" ] == table , "column" ] . values
for col in df . columns :
if col not in valid_columns :
problems . append ( [ "warning" , f"Unrecognized column {col}" , table , [ ] ] )
return problems
|
def ip_to_url ( ip_addr ) :
"""Resolve a hostname based off an IP address .
This is very limited and will
probably not return any results if it is a shared IP address or an
address with improperly setup DNS records .
. . code : : python
reusables . ip _ to _ url ( ' 93.184.216.34 ' ) # example . com
# None
reusables . ip _ to _ url ( ' 8.8.8.8 ' )
# ' google - public - dns - a . google . com '
: param ip _ addr : IP address to resolve to hostname
: return : string of hostname or None"""
|
try :
return socket . gethostbyaddr ( ip_addr ) [ 0 ]
except ( socket . gaierror , socket . herror ) :
logger . exception ( "Could not resolve hostname" )
|
def _handleSelectAllAxes ( self , evt ) :
"""Called when the ' select all axes ' menu item is selected ."""
|
if len ( self . _axisId ) == 0 :
return
for i in range ( len ( self . _axisId ) ) :
self . _menu . Check ( self . _axisId [ i ] , True )
self . _toolbar . set_active ( self . getActiveAxes ( ) )
evt . Skip ( )
|
def reset ( name ) :
'''Force power down and restart an existing VM'''
|
ret = { }
client = salt . client . get_local_client ( __opts__ [ 'conf_file' ] )
data = vm_info ( name , quiet = True )
if not data :
__jid_event__ . fire_event ( { 'message' : 'Failed to find VM {0} to reset' . format ( name ) } , 'progress' )
return 'fail'
host = next ( six . iterkeys ( data ) )
try :
cmd_ret = client . cmd_iter ( host , 'virt.reset' , [ name ] , timeout = 600 )
for comp in cmd_ret :
ret . update ( comp )
__jid_event__ . fire_event ( { 'message' : 'Reset VM {0}' . format ( name ) } , 'progress' )
except SaltClientError as client_error :
print ( client_error )
return ret
|
def seq_sha512 ( seq , normalize = True ) :
"""returns unicode sequence sha512 hexdigest for sequence ` seq ` .
> > > seq _ sha512 ( ' ' )
' cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e '
> > > seq _ sha512 ( ' ACGT ' )
'68a178f7c740c5c240aa67ba41843b119d3bf9f8b0f0ac36cf701d26672964efbd536d197f51ce634fc70634d1eefe575bec34c83247abc52010f6e2bbdb8253'
> > > seq _ sha512 ( ' acgt ' )
'68a178f7c740c5c240aa67ba41843b119d3bf9f8b0f0ac36cf701d26672964efbd536d197f51ce634fc70634d1eefe575bec34c83247abc52010f6e2bbdb8253'
> > > seq _ sha512 ( ' acgt ' , normalize = False )
'785c1ac071dd89b69904372cf645b7826df587534d25c41edb2862e54fb2940d697218f2883d2bf1a11cdaee658c7f7ab945a1cfd08eb26cbce57ee88790250a '"""
|
seq = normalize_sequence ( seq ) if normalize else seq
bseq = seq . encode ( "ascii" )
return hashlib . sha512 ( bseq ) . hexdigest ( )
|
async def enable ( ) :
"""Get the data from a * hole instance ."""
|
async with aiohttp . ClientSession ( ) as session :
data = Hole ( '192.168.0.215' , loop , session , api_token = API_TOKEN )
await data . enable ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.