signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def register_on_guest_mouse ( self , callback ) :
"""Set the callback function to consume on guest mouse events .
Callback receives a IGuestMouseEvent object .
Example :
def callback ( event ) :
print ( ( " % s % s % s " % ( event . x , event . y , event . z ) )""" | event_type = library . VBoxEventType . on_guest_mouse
return self . event_source . register_callback ( callback , event_type ) |
def init_model_based_tags ( self , model ) :
"""Initializing the model based memory and NIC information tags .
It should be called just after instantiating a RIBCL object .
ribcl = ribcl . RIBCLOperations ( host , login , password , timeout ,
port , cacert = cacert )
model = ribcl . get _ product _ name ( )
ribcl . init _ model _ based _ tags ( model )
Again , model attribute is also set here on the RIBCL object .
: param model : the model string""" | self . model = model
if 'G7' in self . model :
self . MEMORY_SIZE_TAG = "MEMORY_SIZE"
self . MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed"
self . NIC_INFORMATION_TAG = "NIC_INFOMATION"
else :
self . MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE"
self . MEMORY_SIZE_NOT_PRESENT_TAG = "N/A"
self . NIC_INFORMATION_TAG = "NIC_INFORMATION" |
def update_case_task ( self , task ) :
""": Updates TheHive Task
: param case : The task to update . The task ' s ` id ` determines which Task to update .
: return :""" | req = self . url + "/api/case/task/{}" . format ( task . id )
# Choose which attributes to send
update_keys = [ 'title' , 'description' , 'status' , 'order' , 'user' , 'owner' , 'flag' , 'endDate' ]
data = { k : v for k , v in task . __dict__ . items ( ) if k in update_keys }
try :
return requests . patch ( req , headers = { 'Content-Type' : 'application/json' } , json = data , proxies = self . proxies , auth = self . auth , verify = self . cert )
except requests . exceptions . RequestException as e :
raise CaseTaskException ( "Case task update error: {}" . format ( e ) ) |
def extract_examples_from_readme_rst ( indent = ' ' ) :
"""Extract examples from this project ' s README . rst file .
Parameters
indent : str
Prepend each line with this string . Should contain some number of spaces .
Returns
str
The examples .
Notes
Quite fragile , depends on named labels inside the README . rst file .""" | curr_dir = os . path . dirname ( os . path . abspath ( __file__ ) )
readme_path = os . path . join ( curr_dir , '..' , 'README.rst' )
try :
with open ( readme_path ) as fin :
lines = list ( fin )
start = lines . index ( '.. _doctools_before_examples:\n' )
end = lines . index ( ".. _doctools_after_examples:\n" )
lines = lines [ start + 4 : end - 2 ]
return '' . join ( [ indent + re . sub ( '^ ' , '' , l ) for l in lines ] )
except Exception :
return indent + 'See README.rst' |
def warn ( callingClass , astr_key , ** kwargs ) :
'''Convenience dispatcher to the error _ exit ( ) method .
Will raise " warning " error , i . e . script processing continues .''' | kwargs [ 'exitToOS' ] = False
report ( callingClass , astr_key , ** kwargs ) |
def removeStepListener ( listener ) :
"""removeStepListener ( traci . StepListener ) - > bool
Remove the step listener from traci ' s step listener container .
Returns True if the listener was removed successfully , False if it wasn ' t registered .""" | if listener in _stepListeners :
_stepListeners . remove ( listener )
return True
warnings . warn ( "removeStepListener(listener): listener %s not registered as step listener" % str ( listener ) )
return False |
def deserialize ( self , serialized ) :
'''Deserialize a JSON macaroon depending on the format .
@ param serialized the macaroon in JSON format .
@ return the macaroon object .''' | deserialized = json . loads ( serialized )
if deserialized . get ( 'identifier' ) is None :
return self . _deserialize_v2 ( deserialized )
else :
return self . _deserialize_v1 ( deserialized ) |
def listen ( self ) :
'''Listen for events as they come in''' | try :
self . _pubsub . subscribe ( self . _channels )
for message in self . _pubsub . listen ( ) :
if message [ 'type' ] == 'message' :
yield message
finally :
self . _channels = [ ] |
def populate_translation_fields ( sender , kwargs ) :
"""When models are created or loaded from fixtures , replicates values
provided for translatable fields to some / all empty translation fields ,
according to the current population mode .
Population is performed only on keys ( field names ) present in kwargs .
Nothing is returned , but passed kwargs dictionary is altered .
With ` ` mode ` ` set to :
- - ` ` all ` ` : fills all translation fields , skipping just those for
which a translated value is also provided ;
- - ` ` default ` ` : fills only the default translation ( unless it is
additionally provided ) ;
- - ` ` required ` ` : like ` ` default ` ` , but only if the original field is
non - nullable ;
At least the ` ` required ` ` mode should be used when loading untranslated
fixtures to keep the database consistent ( note that Django management
commands are normally forced to run with hardcoded ` ` en - us ` ` language
active ) . The ` ` default ` ` mode is useful if you need to ensure fallback
values are available , and ` ` all ` ` if you need to have all translations
defined ( for example to make lookups / filtering without resorting to
query fallbacks ) .""" | populate = mt_settings . AUTO_POPULATE
if not populate :
return
if populate is True : # What was meant by ` ` True ` ` is now called ` ` all ` ` .
populate = 'all'
opts = translator . get_options_for_model ( sender )
for key , val in list ( kwargs . items ( ) ) :
if key in opts . fields :
if populate == 'all' : # Set the value for every language .
for translation_field in opts . fields [ key ] :
kwargs . setdefault ( translation_field . name , val )
elif populate == 'default' :
default = build_localized_fieldname ( key , mt_settings . DEFAULT_LANGUAGE )
kwargs . setdefault ( default , val )
elif populate == 'required' :
default = build_localized_fieldname ( key , mt_settings . DEFAULT_LANGUAGE )
if not sender . _meta . get_field ( key ) . null :
kwargs . setdefault ( default , val )
else :
raise AttributeError ( "Unknown population mode '%s'." % populate ) |
def generate_package_content ( self , package ) :
"""Generate package . rst text content .
{ { package _ name } }
. . automodule : : { { package _ name } }
: members :
sub packages and modules
. . toctree : :
: maxdepth : 1
{ { sub _ package _ name1 } } < { { sub _ package _ name1 } } / _ _ init _ _ >
{ { sub _ package _ name2 } } < { { sub _ package _ name2 } } / _ _ init _ _ >
{ { sub _ module _ name1 } } < { { sub _ module _ name1 } } >
{ { sub _ module _ name2 } } < { { sub _ module _ name2 } } >""" | if isinstance ( package , Package ) :
return package . render ( ignored_package = self . ignored_package )
else : # pragma : no cover
raise Exception ( "%r is not a Package object" % package ) |
def robust_single_linkage ( X , cut , k = 5 , alpha = 1.4142135623730951 , gamma = 5 , metric = 'euclidean' , algorithm = 'best' , memory = Memory ( cachedir = None , verbose = 0 ) , leaf_size = 40 , core_dist_n_jobs = 4 , ** kwargs ) :
"""Perform robust single linkage clustering from a vector array
or distance matrix .
Parameters
X : array or sparse ( CSR ) matrix of shape ( n _ samples , n _ features ) , or array of shape ( n _ samples , n _ samples )
A feature array , or array of distances between samples if
` ` metric = ' precomputed ' ` ` .
cut : float
The reachability distance value to cut the cluster heirarchy at
to derive a flat cluster labelling .
k : int , optional ( default = 5)
Reachability distances will be computed with regard to the ` k `
nearest neighbors .
alpha : float , optional ( default = np . sqrt ( 2 ) )
Distance scaling for reachability distance computation . Reachability
distance is computed as
$ max \ { core _ k ( a ) , core _ k ( b ) , 1/ \a lpha d ( a , b ) \ } $ .
gamma : int , optional ( default = 5)
Ignore any clusters in the flat clustering with size less than gamma ,
and declare points in such clusters as noise points .
metric : string , or callable , optional ( default = ' euclidean ' )
The metric to use when calculating distance between instances in a
feature array . If metric is a string or callable , it must be one of
the options allowed by metrics . pairwise . pairwise _ distances for its
metric parameter .
If metric is " precomputed " , X is assumed to be a distance matrix and
must be square .
algorithm : string , optional ( default = ' best ' )
Exactly which algorithm to use ; hdbscan has variants specialised
for different characteristics of the data . By default this is set
to ` ` best ` ` which chooses the " best " algorithm given the nature of
the data . You can force other options if you believe you know
better . Options are :
* ` ` generic ` `
* ` ` best ` `
* ` ` prims _ kdtree ` `
* ` ` prims _ balltree ` `
* ` ` boruvka _ kdtree ` `
* ` ` boruvka _ balltree ` `
memory : Instance of joblib . Memory or string ( optional )
Used to cache the output of the computation of the tree .
By default , no caching is done . If a string is given , it is the
path to the caching directory .
leaf _ size : int , optional ( default = 40)
Leaf size for trees responsible for fast nearest
neighbour queries .
core _ dist _ n _ jobs : int , optional
Number of parallel jobs to run in core distance computations ( if
supported by the specific algorithm ) . For ` ` core _ dist _ n _ jobs ` `
below - 1 , ( n _ cpus + 1 + core _ dist _ n _ jobs ) are used .
( default 4)
Returns
labels : ndarray , shape ( n _ samples , )
Cluster labels for each point . Noisy samples are given the label - 1.
single _ linkage _ tree : ndarray , shape ( n _ samples - 1 , 4)
The single linkage tree produced during clustering in scipy
hierarchical clustering format
( see http : / / docs . scipy . org / doc / scipy / reference / cluster . hierarchy . html ) .
References
. . [ 1 ] Chaudhuri , K . , & Dasgupta , S . ( 2010 ) . Rates of convergence for the
cluster tree . In Advances in Neural Information Processing Systems
( pp . 343-351 ) .""" | if not isinstance ( k , int ) or k < 1 :
raise ValueError ( 'k must be an integer greater than zero!' )
if not isinstance ( alpha , float ) or alpha < 1.0 :
raise ValueError ( 'alpha must be a float greater than or equal to 1.0!' )
if not isinstance ( gamma , int ) or gamma < 1 :
raise ValueError ( 'gamma must be an integer greater than zero!' )
if not isinstance ( leaf_size , int ) or leaf_size < 1 :
raise ValueError ( 'Leaf size must be at least one!' )
if metric == 'minkowski' :
if 'p' not in kwargs or kwargs [ 'p' ] is None :
raise TypeError ( 'Minkowski metric given but no p value supplied!' )
if kwargs [ 'p' ] < 0 :
raise ValueError ( 'Minkowski metric with negative p value is not' ' defined!' )
X = check_array ( X , accept_sparse = 'csr' )
if isinstance ( memory , six . string_types ) :
memory = Memory ( cachedir = memory , verbose = 0 )
if algorithm != 'best' :
if algorithm == 'generic' :
single_linkage_tree = memory . cache ( _rsl_generic ) ( X , k , alpha , metric , ** kwargs )
elif algorithm == 'prims_kdtree' :
single_linkage_tree = memory . cache ( _rsl_prims_kdtree ) ( X , k , alpha , metric , ** kwargs )
elif algorithm == 'prims_balltree' :
single_linkage_tree = memory . cache ( _rsl_prims_balltree ) ( X , k , alpha , metric , ** kwargs )
elif algorithm == 'boruvka_kdtree' :
single_linkage_tree = memory . cache ( _rsl_boruvka_kdtree ) ( X , k , alpha , metric , leaf_size , core_dist_n_jobs , ** kwargs )
elif algorithm == 'boruvka_balltree' :
single_linkage_tree = memory . cache ( _rsl_boruvka_balltree ) ( X , k , alpha , metric , leaf_size , core_dist_n_jobs , ** kwargs )
else :
raise TypeError ( 'Unknown algorithm type %s specified' % algorithm )
else :
if issparse ( X ) or metric not in FAST_METRICS : # We can ' t do much with sparse matrices . . .
single_linkage_tree = memory . cache ( _rsl_generic ) ( X , k , alpha , metric , ** kwargs )
elif metric in KDTree . valid_metrics : # Need heuristic to decide when to go to boruvka ;
# still debugging for now
if X . shape [ 1 ] > 128 :
single_linkage_tree = memory . cache ( _rsl_prims_kdtree ) ( X , k , alpha , metric , ** kwargs )
else :
single_linkage_tree = memory . cache ( _rsl_boruvka_kdtree ) ( X , k , alpha , metric , leaf_size , core_dist_n_jobs , ** kwargs )
else : # Metric is a valid BallTree metric
# Need heuristic to decide when to go to boruvka ;
# still debugging for now
if X . shape [ 1 ] > 128 :
single_linkage_tree = memory . cache ( _rsl_prims_kdtree ) ( X , k , alpha , metric , ** kwargs )
else :
single_linkage_tree = memory . cache ( _rsl_boruvka_balltree ) ( X , k , alpha , metric , leaf_size , core_dist_n_jobs , ** kwargs )
labels = single_linkage_tree . get_clusters ( cut , gamma )
return labels , single_linkage_tree . to_numpy ( ) |
def _fix_up ( self , cls , code_name ) :
"""Internal helper called to tell the property its name .
This is called by _ fix _ up _ properties ( ) which is called by
MetaModel when finishing the construction of a Model subclass .
The name passed in is the name of the class attribute to which the
Property is assigned ( a . k . a . the code name ) . Note that this means
that each Property instance must be assigned to ( at most ) one
class attribute . E . g . to declare three strings , you must call
StringProperty ( ) three times , you cannot write
foo = bar = baz = StringProperty ( )""" | self . _code_name = code_name
if self . _name is None :
self . _name = code_name |
def _search_inasafe_layer ( self ) :
"""Search for an inasafe layer in an active group .
: returns : A valid layer .
: rtype : QgsMapLayer
. . versionadded : : 4.3""" | selected_nodes = self . iface . layerTreeView ( ) . selectedNodes ( )
for selected_node in selected_nodes :
tree_layers = [ child for child in selected_node . children ( ) if ( isinstance ( child , QgsLayerTreeLayer ) ) ]
for tree_layer in tree_layers :
layer = tree_layer . layer ( )
keywords = self . keyword_io . read_keywords ( layer )
if keywords . get ( 'inasafe_fields' ) :
return layer |
def parse_mark_duplicate_metrics ( fn ) :
"""Parse the output from Picard ' s MarkDuplicates and return as pandas
Series .
Parameters
filename : str of filename or file handle
Filename of the Picard output you want to parse .
Returns
metrics : pandas . Series
Duplicate metrics .
hist : pandas . Series
Duplicate histogram .""" | with open ( fn ) as f :
lines = [ x . strip ( ) . split ( '\t' ) for x in f . readlines ( ) ]
metrics = pd . Series ( lines [ 7 ] , lines [ 6 ] )
m = pd . to_numeric ( metrics [ metrics . index [ 1 : ] ] )
metrics [ m . index ] = m . values
vals = np . array ( lines [ 11 : - 1 ] )
hist = pd . Series ( vals [ : , 1 ] , index = [ int ( float ( x ) ) for x in vals [ : , 0 ] ] )
hist = pd . to_numeric ( hist )
return metrics , hist |
def verify_resource_dict ( res_dict , is_create , attr_info ) :
"""Verifies required attributes are in resource dictionary , res _ dict .
Also checking that an attribute is only specified if it is allowed
for the given operation ( create / update ) .
Attribute with default values are considered to be optional .
This function contains code taken from function ' prepare _ request _ body ' in
attributes . py .""" | if ( ( bc . NEUTRON_VERSION >= bc . NEUTRON_NEWTON_VERSION ) and 'tenant_id' in res_dict ) :
res_dict [ 'project_id' ] = res_dict [ 'tenant_id' ]
if is_create : # POST
for attr , attr_vals in six . iteritems ( attr_info ) :
if attr_vals [ 'allow_post' ] :
if 'default' not in attr_vals and attr not in res_dict :
msg = _ ( "Failed to parse request. Required attribute '%s' " "not specified" ) % attr
raise webob . exc . HTTPBadRequest ( msg )
res_dict [ attr ] = res_dict . get ( attr , attr_vals . get ( 'default' ) )
else :
if attr in res_dict :
msg = _ ( "Attribute '%s' not allowed in POST" ) % attr
raise webob . exc . HTTPBadRequest ( msg )
else : # PUT
for attr , attr_vals in six . iteritems ( attr_info ) :
if attr in res_dict and not attr_vals [ 'allow_put' ] :
msg = _ ( "Cannot update read-only attribute %s" ) % attr
raise webob . exc . HTTPBadRequest ( msg )
for attr , attr_vals in six . iteritems ( attr_info ) :
if ( attr not in res_dict or res_dict [ attr ] is bc . constants . ATTR_NOT_SPECIFIED ) :
continue
# Convert values if necessary
if 'convert_to' in attr_vals :
res_dict [ attr ] = attr_vals [ 'convert_to' ] ( res_dict [ attr ] )
# Check that configured values are correct
if 'validate' not in attr_vals :
continue
for rule in attr_vals [ 'validate' ] :
_ensure_format ( rule , attr , res_dict )
res = bc . validators [ rule ] ( res_dict [ attr ] , attr_vals [ 'validate' ] [ rule ] )
if res :
msg_dict = dict ( attr = attr , reason = res )
msg = ( _ ( "Invalid input for %(attr)s. Reason: %(reason)s." ) % msg_dict )
raise webob . exc . HTTPBadRequest ( msg )
return res_dict |
def nhanesi ( display = False ) :
"""A nicely packaged version of NHANES I data with surivival times as labels .""" | X = pd . read_csv ( cache ( github_data_url + "NHANESI_subset_X.csv" ) )
y = pd . read_csv ( cache ( github_data_url + "NHANESI_subset_y.csv" ) ) [ "y" ]
if display :
X_display = X . copy ( )
X_display [ "Sex" ] = [ "Male" if v == 1 else "Female" for v in X [ "Sex" ] ]
return X_display , np . array ( y )
else :
return X , np . array ( y ) |
def convert_config_value ( self , value , label ) :
"""Converts all ' Truthy ' values to True and ' Falsy ' values to False .
Args :
value : Value to convert
label : Label of the config which this item was found .
Returns :""" | if isinstance ( value , six . string_types ) :
value = value . lower ( )
if value in self . TRUTHY_VALUES :
return True
elif value in self . FALSY_VALUES :
return False
else :
raise YapconfValueError ( "Cowardly refusing to interpret " "config value as a boolean. Name: " "{0}, Value: {1}" . format ( self . name , value ) ) |
def stop_tuning_job ( self , name ) :
"""Stop the Amazon SageMaker hyperparameter tuning job with the specified name .
Args :
name ( str ) : Name of the Amazon SageMaker hyperparameter tuning job .
Raises :
ClientError : If an error occurs while trying to stop the hyperparameter tuning job .""" | try :
LOGGER . info ( 'Stopping tuning job: {}' . format ( name ) )
self . sagemaker_client . stop_hyper_parameter_tuning_job ( HyperParameterTuningJobName = name )
except ClientError as e :
error_code = e . response [ 'Error' ] [ 'Code' ]
# allow to pass if the job already stopped
if error_code == 'ValidationException' :
LOGGER . info ( 'Tuning job: {} is already stopped or not running.' . format ( name ) )
else :
LOGGER . error ( 'Error occurred while attempting to stop tuning job: {}. Please try again.' . format ( name ) )
raise |
def parse ( self ) :
"""parse data""" | url = self . config . get ( 'url' )
self . cnml = CNMLParser ( url )
self . parsed_data = self . cnml . getNodes ( ) |
def p_propertyDeclaration_4 ( p ) :
"""propertyDeclaration _ 4 : dataType propertyName array defaultValue ' ; '""" | p [ 0 ] = CIMProperty ( p [ 2 ] , p [ 4 ] , type = p [ 1 ] , is_array = True , array_size = p [ 3 ] ) |
def emergency ( self ) :
'''Sends the emergency command .''' | self . send ( at . REF ( at . REF . input . select ) ) |
def main_loop ( self , timeout = None ) :
"""Check if self . trigger _ event is set . If it is , then run our function . If not , return early .
: param timeout : How long to wait for a trigger event . Defaults to 0.
: return :""" | if self . trigger_event . wait ( timeout ) :
try :
self . func ( )
except Exception as e :
self . logger . warning ( "Got an exception running {func}: {e}" . format ( func = self . func , e = str ( e ) ) )
finally :
self . trigger_event . clear ( ) |
def convert_frame ( frame , body_encoding = None ) :
"""Convert a frame to a list of lines separated by newlines .
: param Frame frame : the Frame object to convert
: rtype : list ( str )""" | lines = [ ]
body = None
if frame . body :
if body_encoding :
body = encode ( frame . body , body_encoding )
else :
body = encode ( frame . body )
if HDR_CONTENT_LENGTH in frame . headers :
frame . headers [ HDR_CONTENT_LENGTH ] = len ( body )
if frame . cmd :
lines . append ( encode ( frame . cmd ) )
lines . append ( ENC_NEWLINE )
for key , vals in sorted ( frame . headers . items ( ) ) :
if vals is None :
continue
if type ( vals ) != tuple :
vals = ( vals , )
for val in vals :
lines . append ( encode ( "%s:%s\n" % ( key , val ) ) )
lines . append ( ENC_NEWLINE )
if body :
lines . append ( body )
if frame . cmd :
lines . append ( ENC_NULL )
return lines |
def post_processor_affected_function ( exposure = None , hazard = None , classification = None , hazard_class = None ) :
"""Private function used in the affected postprocessor .
It returns a boolean if it ' s affected or not , or not exposed .
: param exposure : The exposure to use .
: type exposure : str
: param hazard : The hazard to use .
: type hazard : str
: param classification : The hazard classification to use .
: type classification : str
: param hazard _ class : The hazard class of the feature .
: type hazard _ class : str
: return : If this hazard class is affected or not . It can be ` not exposed ` .
The not exposed value returned is the key defined in
` hazard _ classification . py ` at the top of the file .
: rtype : bool , ' not exposed '""" | if exposure == exposure_population [ 'key' ] :
affected = is_affected ( hazard , classification , hazard_class )
else :
classes = None
for hazard in hazard_classes_all :
if hazard [ 'key' ] == classification :
classes = hazard [ 'classes' ]
break
for the_class in classes :
if the_class [ 'key' ] == hazard_class :
affected = the_class [ 'affected' ]
break
else :
affected = not_exposed_class [ 'key' ]
return affected |
def _write_to_datastore ( self ) :
"""Writes all submissions to datastore .""" | # Populate datastore
roots_and_submissions = zip ( [ ATTACKS_ENTITY_KEY , TARGET_ATTACKS_ENTITY_KEY , DEFENSES_ENTITY_KEY ] , [ self . _attacks , self . _targeted_attacks , self . _defenses ] )
client = self . _datastore_client
with client . no_transact_batch ( ) as batch :
for root_key , submissions in roots_and_submissions :
batch . put ( client . entity ( client . key ( * root_key ) ) )
for k , v in iteritems ( submissions ) :
entity = client . entity ( client . key ( * ( root_key + [ KIND_SUBMISSION , k ] ) ) )
entity [ 'submission_path' ] = v . path
entity . update ( participant_from_submission_path ( v . path ) )
batch . put ( entity ) |
def create_api_docs ( code_path , api_docs_path , max_depth = 2 ) :
"""Function for generating . rst file for all . py file in dir _ path folder .
: param code _ path : Path of the source code .
: type code _ path : str
: param api _ docs _ path : Path of the api documentation directory .
: type api _ docs _ path : str
: param max _ depth : Maximum depth for the index .
: type max _ depth : int""" | base_path = os . path . split ( code_path ) [ 0 ]
for package , subpackages , candidate_files in os . walk ( code_path ) : # Checking _ _ init _ _ . py file
if '__init__.py' not in candidate_files :
continue
# Creating directory for the package
package_relative_path = package . replace ( base_path + os . sep , '' )
index_package_path = os . path . join ( api_docs_path , package_relative_path )
# calculate dir one up from package to store the index in
index_base_path , package_base_name = os . path . split ( index_package_path )
if package_base_name in EXCLUDED_PACKAGES :
continue
full_package_name = package_relative_path . replace ( os . sep , '.' )
new_rst_dir = os . path . join ( api_docs_path , package_relative_path )
create_dirs ( new_rst_dir )
# Create index _ file for the directory
modules = get_python_files_from_list ( candidate_files )
index_file_text = create_package_level_rst_index_file ( package_name = full_package_name , max_depth = max_depth , modules = modules , inner_packages = subpackages )
write_rst_file ( file_directory = index_base_path , file_name = package_base_name , content = index_file_text )
# Creating . rst file for each . py file
for module in modules :
module = module [ : - 3 ]
# strip . py off the end
py_module_text = create_module_rst_file ( '%s.%s' % ( full_package_name , module ) )
write_rst_file ( file_directory = new_rst_dir , file_name = module , content = py_module_text ) |
def get_surface_boundaries ( self ) :
""": returns : ( min _ max lons , min _ max lats )""" | min_lon , min_lat , max_lon , max_lat = self . get_bounding_box ( )
return [ [ min_lon , max_lon ] ] , [ [ min_lat , max_lat ] ] |
def metrics ( self , name ) :
"""Return the metrics received under the given name""" | return [ MetricStub ( ensure_unicode ( stub . name ) , stub . type , stub . value , normalize_tags ( stub . tags ) , ensure_unicode ( stub . hostname ) , ) for stub in self . _metrics . get ( to_string ( name ) , [ ] ) ] |
def from_hive_file ( cls , fname , * args , ** kwargs ) :
"""Open a local JSON hive file and initialize from the hive contained
in that file , paying attention to the version keyword argument .""" | version = kwargs . pop ( 'version' , None )
require = kwargs . pop ( 'require_https' , True )
return cls ( Hive . from_file ( fname , version , require ) , * args , ** kwargs ) |
def ls ( sess_id_or_alias , path ) :
"""List files in a path of a running container .
SESSID : Session ID or its alias given when creating the session .
PATH : Path inside container .""" | with Session ( ) as session :
try :
print_wait ( 'Retrieving list of files in "{}"...' . format ( path ) )
kernel = session . Kernel ( sess_id_or_alias )
result = kernel . list_files ( path )
if 'errors' in result and result [ 'errors' ] :
print_fail ( result [ 'errors' ] )
sys . exit ( 1 )
files = json . loads ( result [ 'files' ] )
table = [ ]
headers = [ 'file name' , 'size' , 'modified' , 'mode' ]
for file in files :
mdt = datetime . fromtimestamp ( file [ 'mtime' ] )
mtime = mdt . strftime ( '%b %d %Y %H:%M:%S' )
row = [ file [ 'filename' ] , file [ 'size' ] , mtime , file [ 'mode' ] ]
table . append ( row )
print_done ( 'Retrived.' )
print ( 'Path in container:' , result [ 'abspath' ] , end = '' )
print ( tabulate ( table , headers = headers ) )
except Exception as e :
print_error ( e )
sys . exit ( 1 ) |
def setHint ( self , hint ) :
"""Sets the hint for this filepath .
: param hint | < str >""" | if self . normalizePath ( ) :
filepath = os . path . normpath ( nativestring ( hint ) )
else :
filepath = os . path . normpath ( nativestring ( hint ) ) . replace ( '\\' , '/' )
self . _filepathEdit . setHint ( hint ) |
def load_and_migrate ( ) -> Dict [ str , Path ] :
"""Ensure the settings directory tree is properly configured .
This function does most of its work on the actual robot . It will move
all settings files from wherever they happen to be to the proper
place . On non - robots , this mostly just loads . In addition , it writes
a default config and makes sure all directories required exist ( though
the files in them may not ) .""" | if IS_ROBOT :
_migrate_robot ( )
base = infer_config_base_dir ( )
base . mkdir ( parents = True , exist_ok = True )
index = _load_with_overrides ( base )
return _ensure_paths_and_types ( index ) |
def _draw ( self ) :
"""Call the drawing API for the main menu widget with the current known
terminal size and the terminal .""" | self . _window . draw ( self . _width , self . _height , self . terminal ) |
def configure ( self , graph , spanning_tree ) :
"""Configure the filter .
@ type graph : graph
@ param graph : Graph .
@ type spanning _ tree : dictionary
@ param spanning _ tree : Spanning tree .""" | self . graph = graph
self . spanning_tree = spanning_tree |
def apply_patch ( document , patch ) :
"""Apply a Patch object to a document .""" | # pylint : disable = too - many - return - statements
op = patch . op
parent , idx = resolve_path ( document , patch . path )
if op == "add" :
return add ( parent , idx , patch . value )
elif op == "remove" :
return remove ( parent , idx )
elif op == "replace" :
return replace ( parent , idx , patch . value , patch . src )
elif op == "merge" :
return merge ( parent , idx , patch . value )
elif op == "copy" :
sparent , sidx = resolve_path ( document , patch . src )
return copy ( sparent , sidx , parent , idx )
elif op == "move" :
sparent , sidx = resolve_path ( document , patch . src )
return move ( sparent , sidx , parent , idx )
elif op == "test" :
return test ( parent , idx , patch . value )
elif op == "setremove" :
return set_remove ( parent , idx , patch . value )
elif op == "setadd" :
return set_add ( parent , idx , patch . value )
else :
raise JSONPatchError ( "Invalid operator" ) |
def _read_ftdna_famfinder ( file ) :
"""Read and parse Family Tree DNA ( FTDNA ) " famfinder " file .
https : / / www . familytreedna . com
Parameters
file : str
path to file
Returns
pandas . DataFrame
individual ' s genetic data normalized for use with ` lineage `
str
name of data source""" | df = pd . read_csv ( file , comment = "#" , na_values = "-" , names = [ "rsid" , "chrom" , "pos" , "allele1" , "allele2" ] , index_col = 0 , dtype = { "chrom" : object } , )
# create genotype column from allele columns
df [ "genotype" ] = df [ "allele1" ] + df [ "allele2" ]
# delete allele columns
# http : / / stackoverflow . com / a / 13485766
del df [ "allele1" ]
del df [ "allele2" ]
return sort_snps ( df ) , "FTDNA" |
def libvlc_media_list_player_play_item ( p_mlp , p_md ) :
'''Play the given media item .
@ param p _ mlp : media list player instance .
@ param p _ md : the media instance .
@ return : 0 upon success , - 1 if the media is not part of the media list .''' | f = _Cfunctions . get ( 'libvlc_media_list_player_play_item' , None ) or _Cfunction ( 'libvlc_media_list_player_play_item' , ( ( 1 , ) , ( 1 , ) , ) , None , ctypes . c_int , MediaListPlayer , Media )
return f ( p_mlp , p_md ) |
def _get_list ( self , key , operation , create = False ) :
"""Get ( and maybe create ) a list by name .""" | return self . _get_by_type ( key , operation , create , b'list' , [ ] ) |
def __ipv4_netmask ( value ) :
'''validate an IPv4 dotted quad or integer CIDR netmask''' | valid , errmsg = False , 'dotted quad or integer CIDR (0->32)'
valid , value , _ = __int ( value )
if not ( valid and 0 <= value <= 32 ) :
valid = salt . utils . validate . net . netmask ( value )
return ( valid , value , errmsg ) |
def __split_name ( self , name ) :
u"""Разделяет имя на сегменты по разделителям в self . separators
: param name : имя
: return : разделённое имя вместе с разделителями""" | def gen ( name , separators ) :
if len ( separators ) == 0 :
yield name
else :
segments = name . split ( separators [ 0 ] )
for subsegment in gen ( segments [ 0 ] , separators [ 1 : ] ) :
yield subsegment
for segment in segments [ 1 : ] :
for subsegment in gen ( segment , separators [ 1 : ] ) :
yield separators [ 0 ]
yield subsegment
return gen ( name , self . separators ) |
def generate_snapshot ( self , prov_dep ) : # type : ( MutableMapping [ Text , Any ] ) - > None
"""Copy all of the CWL files to the snapshot / directory .""" | self . self_check ( )
for key , value in prov_dep . items ( ) :
if key == "location" and value . split ( "/" ) [ - 1 ] :
filename = value . split ( "/" ) [ - 1 ]
path = os . path . join ( self . folder , SNAPSHOT , filename )
filepath = ''
if "file://" in value :
filepath = value [ 7 : ]
else :
filepath = value
# FIXME : What if destination path already exists ?
if os . path . exists ( filepath ) :
try :
if os . path . isdir ( filepath ) :
shutil . copytree ( filepath , path )
else :
shutil . copy ( filepath , path )
timestamp = datetime . datetime . fromtimestamp ( os . path . getmtime ( filepath ) )
self . add_tagfile ( path , timestamp )
except PermissionError :
pass
# FIXME : avoids duplicate snapshotting ; need better solution
elif key in ( "secondaryFiles" , "listing" ) :
for files in value :
if isinstance ( files , MutableMapping ) :
self . generate_snapshot ( files )
else :
pass |
def add_comment ( node , text , location = 'above' ) :
"""Add a comment to the given node .
If the ` SourceWithCommentGenerator ` class is used these comments will be
output as part of the source code .
Note that a node can only contain one comment . Subsequent calls to
` add _ comment ` will ovverride the existing comments .
Args :
node : The AST node whose containing statement will be commented .
text : A comment string .
location : Where the comment should appear . Valid values are ' above ' ,
' below ' and ' right '
Returns :
The node with the comment stored as an annotation .""" | anno . setanno ( node , 'comment' , dict ( location = location , text = text ) , safe = False )
return node |
def differential_pressure_meter_dP ( D , D2 , P1 , P2 , C = None , meter_type = ISO_5167_ORIFICE ) :
r'''Calculates either the non - recoverable pressure drop of a differential
pressure flow meter based on the geometry of the meter , measured pressures
of the meter , and for most models the meter discharge coefficient .
Parameters
D : float
Upstream internal pipe diameter , [ m ]
D2 : float
Diameter of orifice , or venturi meter orifice , or flow tube orifice ,
or cone meter end diameter , or wedge meter fluid flow height , [ m ]
P1 : float
Static pressure of fluid upstream of differential pressure meter at the
cross - section of the pressure tap , [ Pa ]
P2 : float
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location ( varies by type of meter ) [ Pa ]
C : float , optional
Coefficient of discharge ( used only in orifice plates , and venturi
nozzles ) , [ - ]
meter _ type : str , optional
One of ( ' ISO 5167 orifice ' , ' long radius nozzle ' , ' ISA 1932 nozzle ' ,
' as cast convergent venturi tube ' ,
' machined convergent venturi tube ' ,
' rough welded convergent venturi tube ' , ' cone meter ' , ' cone meter ' ) ,
Returns
dP : float
Non - recoverable pressure drop of the differential pressure flow
meter , [ Pa ]
Notes
See the appropriate functions for the documentation for the formulas and
references used in each method .
Wedge meters , and venturi nozzles do not have standard formulas available
for pressure drop computation .
Examples
> > > differential _ pressure _ meter _ dP ( D = 0.07366 , D2 = 0.05 , P1 = 200000.0,
. . . P2 = 183000.0 , meter _ type = ' as cast convergent venturi tube ' )
1788.5717754177406''' | if meter_type == ISO_5167_ORIFICE :
dP = dP_orifice ( D = D , Do = D2 , P1 = P1 , P2 = P2 , C = C )
elif meter_type == LONG_RADIUS_NOZZLE :
dP = dP_orifice ( D = D , Do = D2 , P1 = P1 , P2 = P2 , C = C )
elif meter_type == ISA_1932_NOZZLE :
dP = dP_orifice ( D = D , Do = D2 , P1 = P1 , P2 = P2 , C = C )
elif meter_type == VENTURI_NOZZLE :
raise Exception ( NotImplemented )
elif meter_type == AS_CAST_VENTURI_TUBE :
dP = dP_venturi_tube ( D = D , Do = D2 , P1 = P1 , P2 = P2 )
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE :
dP = dP_venturi_tube ( D = D , Do = D2 , P1 = P1 , P2 = P2 )
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE :
dP = dP_venturi_tube ( D = D , Do = D2 , P1 = P1 , P2 = P2 )
elif meter_type == CONE_METER :
dP = dP_cone_meter ( D = D , Dc = D2 , P1 = P1 , P2 = P2 )
elif meter_type == WEDGE_METER :
dP = dP_wedge_meter ( D = D , H = D2 , P1 = P1 , P2 = P2 )
return dP |
def consume_file ( self , filename ) :
"""Counts all kmers in all sequences in a FASTA / FASTQ file .""" | with screed . open ( filename ) as sequences :
for seq in sequences :
self . consume ( seq [ 'sequence' ] ) |
def set_xlimits ( self , min = None , max = None ) :
"""Set limits for the x - axis .
: param min : minimum value to be displayed . If None , it will be
calculated .
: param max : maximum value to be displayed . If None , it will be
calculated .""" | self . limits [ 'xmin' ] = min
self . limits [ 'xmax' ] = max |
def append_from_json ( self , json_string ) :
"""Creates a ` ` measurement . Measurement ` ` object from the supplied JSON string
and then appends it to the buffer
: param json _ string : the JSON formatted string""" | a_dict = json . loads ( json_string )
self . append_from_dict ( a_dict ) |
def _create_latent_variables ( self ) :
"""Creates model latent variables
Returns
None ( changes model attributes )""" | self . latent_variables . add_z ( 'Vol Constant' , fam . Normal ( 0 , 3 , transform = None ) , fam . Normal ( 0 , 3 ) )
for p_term in range ( self . p ) :
self . latent_variables . add_z ( 'p(' + str ( p_term + 1 ) + ')' , fam . Normal ( 0 , 0.5 , transform = 'logit' ) , fam . Normal ( 0 , 3 ) )
if p_term == 0 :
self . latent_variables . z_list [ - 1 ] . start = 3.00
else :
self . latent_variables . z_list [ - 1 ] . start = - 4.00
for q_term in range ( self . q ) :
self . latent_variables . add_z ( 'q(' + str ( q_term + 1 ) + ')' , fam . Normal ( 0 , 0.5 , transform = 'logit' ) , fam . Normal ( 0 , 3 ) )
if q_term == 0 :
self . latent_variables . z_list [ - 1 ] . start = - 1.50
else :
self . latent_variables . z_list [ - 1 ] . start = - 4.00
self . latent_variables . add_z ( 'v' , fam . Flat ( transform = 'exp' ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'Returns Constant' , fam . Normal ( 0 , 3 , transform = None ) , fam . Normal ( 0 , 3 ) )
self . latent_variables . add_z ( 'GARCH-M' , fam . Normal ( 0 , 3 , transform = None ) , fam . Normal ( 0 , 3 ) )
# Starting values
self . latent_variables . z_list [ - 3 ] . start = 2.0 |
def get_objective_form_for_create ( self , objective_record_types ) :
"""Gets the objective form for creating new objectives .
A new form should be requested for each create transaction .
arg : objective _ record _ types ( osid . type . Type [ ] ) : array of
objective record types
return : ( osid . learning . ObjectiveForm ) - the objective form
raise : NullArgument - ` ` objective _ record _ types ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - unable to get form for requested record
types
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceAdminSession . get _ resource _ form _ for _ create _ template
for arg in objective_record_types :
if not isinstance ( arg , ABCType ) :
raise errors . InvalidArgument ( 'one or more argument array elements is not a valid OSID Type' )
if objective_record_types == [ ] :
obj_form = objects . ObjectiveForm ( objective_bank_id = self . _catalog_id , runtime = self . _runtime , effective_agent_id = self . get_effective_agent_id ( ) , proxy = self . _proxy )
else :
obj_form = objects . ObjectiveForm ( objective_bank_id = self . _catalog_id , record_types = objective_record_types , runtime = self . _runtime , effective_agent_id = self . get_effective_agent_id ( ) , proxy = self . _proxy )
self . _forms [ obj_form . get_id ( ) . get_identifier ( ) ] = not CREATED
return obj_form |
def _syllabify ( word ) :
'''Syllabify the given word .''' | word = replace_umlauts ( word )
word , CONTINUE_VV , CONTINUE_VVV , applied_rules = apply_T1 ( word )
if CONTINUE_VV :
word , T2 = apply_T2 ( word )
word , T4 = apply_T4 ( word )
applied_rules += T2 + T4
if CONTINUE_VVV :
word , T5 = apply_T5 ( word )
word , T6 = apply_T6 ( word )
word , T7 = apply_T7 ( word )
applied_rules += T5 + T6 + T7
word = replace_umlauts ( word , put_back = True )
return word , applied_rules |
def calc_transform ( src , dst_crs = None , resolution = None , dimensions = None , src_bounds = None , dst_bounds = None , target_aligned_pixels = False ) :
"""Output dimensions and transform for a reprojection .
Parameters
src : rasterio . io . DatasetReader
Data source .
dst _ crs : rasterio . crs . CRS , optional
Target coordinate reference system .
resolution : tuple ( x resolution , y resolution ) or float , optional
Target resolution , in units of target coordinate reference
system .
dimensions : tuple ( width , height ) , optional
Output file size in pixels and lines .
src _ bounds : tuple ( xmin , ymin , xmax , ymax ) , optional
Georeferenced extent of output file from source bounds
( in source georeferenced units ) .
dst _ bounds : tuple ( xmin , ymin , xmax , ymax ) , optional
Georeferenced extent of output file from destination bounds
( in destination georeferenced units ) .
target _ aligned _ pixels : bool , optional
Align the output bounds based on the resolution .
Default is ` False ` .
Returns
dst _ crs : rasterio . crs . CRS
Output crs
transform : Affine
Output affine transformation matrix
width , height : int
Output dimensions""" | if resolution is not None :
if isinstance ( resolution , ( float , int ) ) :
resolution = ( float ( resolution ) , float ( resolution ) )
if target_aligned_pixels :
if not resolution :
raise ValueError ( 'target_aligned_pixels cannot be used without resolution' )
if src_bounds or dst_bounds :
raise ValueError ( 'target_aligned_pixels cannot be used with src_bounds or dst_bounds' )
elif dimensions :
invalid_combos = ( dst_bounds , resolution )
if any ( p for p in invalid_combos if p is not None ) :
raise ValueError ( 'dimensions cannot be used with dst_bounds or resolution' )
if src_bounds and dst_bounds :
raise ValueError ( 'src_bounds and dst_bounds may not be specified simultaneously' )
if dst_crs is not None :
if dimensions : # Calculate resolution appropriate for dimensions
# in target .
dst_width , dst_height = dimensions
bounds = src_bounds or src . bounds
xmin , ymin , xmax , ymax = transform_bounds ( src . crs , dst_crs , * bounds )
dst_transform = Affine ( ( xmax - xmin ) / float ( dst_width ) , 0 , xmin , 0 , ( ymin - ymax ) / float ( dst_height ) , ymax )
elif src_bounds or dst_bounds :
if not resolution :
raise ValueError ( 'resolution is required when using src_bounds or dst_bounds' )
if src_bounds :
xmin , ymin , xmax , ymax = transform_bounds ( src . crs , dst_crs , * src_bounds )
else :
xmin , ymin , xmax , ymax = dst_bounds
dst_transform = Affine ( resolution [ 0 ] , 0 , xmin , 0 , - resolution [ 1 ] , ymax )
dst_width = max ( int ( ceil ( ( xmax - xmin ) / resolution [ 0 ] ) ) , 1 )
dst_height = max ( int ( ceil ( ( ymax - ymin ) / resolution [ 1 ] ) ) , 1 )
else :
if src . transform . is_identity and src . gcps :
src_crs = src . gcps [ 1 ]
kwargs = { 'gcps' : src . gcps [ 0 ] }
else :
src_crs = src . crs
kwargs = src . bounds . _asdict ( )
dst_transform , dst_width , dst_height = calcdt ( src_crs , dst_crs , src . width , src . height , resolution = resolution , ** kwargs )
elif dimensions : # Same projection , different dimensions , calculate resolution .
dst_crs = src . crs
dst_width , dst_height = dimensions
l , b , r , t = src_bounds or src . bounds
dst_transform = Affine ( ( r - l ) / float ( dst_width ) , 0 , l , 0 , ( b - t ) / float ( dst_height ) , t )
elif src_bounds or dst_bounds : # Same projection , different dimensions and possibly
# different resolution .
if not resolution :
resolution = ( src . transform . a , - src . transform . e )
dst_crs = src . crs
xmin , ymin , xmax , ymax = ( src_bounds or dst_bounds )
dst_transform = Affine ( resolution [ 0 ] , 0 , xmin , 0 , - resolution [ 1 ] , ymax )
dst_width = max ( int ( ceil ( ( xmax - xmin ) / resolution [ 0 ] ) ) , 1 )
dst_height = max ( int ( ceil ( ( ymax - ymin ) / resolution [ 1 ] ) ) , 1 )
elif resolution : # Same projection , different resolution .
dst_crs = src . crs
l , b , r , t = src . bounds
dst_transform = Affine ( resolution [ 0 ] , 0 , l , 0 , - resolution [ 1 ] , t )
dst_width = max ( int ( ceil ( ( r - l ) / resolution [ 0 ] ) ) , 1 )
dst_height = max ( int ( ceil ( ( t - b ) / resolution [ 1 ] ) ) , 1 )
else :
dst_crs = src . crs
dst_transform = src . transform
dst_width = src . width
dst_height = src . height
if target_aligned_pixels :
dst_transform , dst_width , dst_height = aligned_target ( dst_transform , dst_width , dst_height , resolution )
return dst_crs , dst_transform , dst_width , dst_height |
def lineage ( expr , container = Stack ) :
"""Yield the path of the expression tree that comprises a column
expression .
Parameters
expr : Expr
An ibis expression . It must be an instance of
: class : ` ibis . expr . types . ColumnExpr ` .
container : Container , { Stack , Queue }
Stack for depth - first traversal , and Queue for breadth - first .
Depth - first will reach root table nodes before continuing on to other
columns in a column that is derived from multiple column . Breadth -
first will traverse all columns at each level before reaching root
tables .
Yields
node : Expr
A column and its dependencies""" | if not isinstance ( expr , ir . ColumnExpr ) :
raise TypeError ( 'Input expression must be an instance of ColumnExpr' )
c = container ( [ ( expr , expr . _name ) ] )
seen = set ( )
# while we haven ' t visited everything
while c :
node , name = c . get ( )
if node not in seen :
seen . add ( node )
yield node
# add our dependencies to the container if they match our name
# and are ibis expressions
c . extend ( ( arg , getattr ( arg , '_name' , name ) ) for arg in c . visitor ( _get_args ( node . op ( ) , name ) ) if isinstance ( arg , ir . Expr ) ) |
def create_new_sticker_set ( self , user_id , name , title , png_sticker , emojis , contains_masks = None , mask_position = None ) :
"""Use this method to create new sticker set owned by a user . The bot will be able to edit the created sticker set . Returns True on success .
https : / / core . telegram . org / bots / api # createnewstickerset
Parameters :
: param user _ id : User identifier of created sticker set owner
: type user _ id : int
: param name : Short name of sticker set , to be used in t . me / addstickers / URLs ( e . g . , animals ) . Can contain only english letters , digits and underscores . Must begin with a letter , can ' t contain consecutive underscores and must end in “ _ by _ < bot username > ” . < bot _ username > is case insensitive . 1-64 characters .
: type name : str | unicode
: param title : Sticker set title , 1-64 characters
: type title : str | unicode
: param png _ sticker : Png image with the sticker , must be up to 512 kilobytes in size , dimensions must not exceed 512px , and either width or height must be exactly 512px . Pass a file _ id as a String to send a file that already exists on the Telegram servers , pass an HTTP URL as a String for Telegram to get a file from the Internet , or upload a new one using multipart / form - data . More info on Sending Files »
: type png _ sticker : pytgbot . api _ types . sendable . files . InputFile | str | unicode
: param emojis : One or more emoji corresponding to the sticker
: type emojis : str | unicode
Optional keyword parameters :
: param contains _ masks : Pass True , if a set of mask stickers should be created
: type contains _ masks : bool
: param mask _ position : A JSON - serialized object for position where the mask should be placed on faces
: type mask _ position : pytgbot . api _ types . receivable . stickers . MaskPosition
Returns :
: return : Returns True on success
: rtype : bool""" | from pytgbot . api_types . receivable . stickers import MaskPosition
from pytgbot . api_types . sendable . files import InputFile
assert_type_or_raise ( user_id , int , parameter_name = "user_id" )
assert_type_or_raise ( name , unicode_type , parameter_name = "name" )
assert_type_or_raise ( title , unicode_type , parameter_name = "title" )
assert_type_or_raise ( png_sticker , ( InputFile , unicode_type ) , parameter_name = "png_sticker" )
assert_type_or_raise ( emojis , unicode_type , parameter_name = "emojis" )
assert_type_or_raise ( contains_masks , None , bool , parameter_name = "contains_masks" )
assert_type_or_raise ( mask_position , None , MaskPosition , parameter_name = "mask_position" )
result = self . do ( "createNewStickerSet" , user_id = user_id , name = name , title = title , png_sticker = png_sticker , emojis = emojis , contains_masks = contains_masks , mask_position = mask_position )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
try :
return from_array_list ( bool , result , list_level = 0 , is_builtin = True )
except TgApiParseException :
logger . debug ( "Failed parsing as primitive bool" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result |
def add_data_file ( self , from_fp , timestamp = None , content_type = None ) : # type : ( IO , Optional [ datetime . datetime ] , Optional [ str ] ) - > Text
"""Copy inputs to data / folder .""" | self . self_check ( )
tmp_dir , tmp_prefix = os . path . split ( self . temp_prefix )
with tempfile . NamedTemporaryFile ( prefix = tmp_prefix , dir = tmp_dir , delete = False ) as tmp :
checksum = checksum_copy ( from_fp , tmp )
# Calculate hash - based file path
folder = os . path . join ( self . folder , DATA , checksum [ 0 : 2 ] )
path = os . path . join ( folder , checksum )
# os . rename assumed safe , as our temp file should
# be in same file system as our temp folder
if not os . path . isdir ( folder ) :
os . makedirs ( folder )
os . rename ( tmp . name , path )
# Relative posix path
# ( to avoid \ on Windows )
rel_path = _posix_path ( os . path . relpath ( path , self . folder ) )
# Register in bagit checksum
if Hasher == hashlib . sha1 :
self . _add_to_bagit ( rel_path , sha1 = checksum )
else :
_logger . warning ( u"[provenance] Unknown hash method %s for bagit manifest" , Hasher )
# Inefficient , bagit support need to checksum again
self . _add_to_bagit ( rel_path )
_logger . debug ( u"[provenance] Added data file %s" , path )
if timestamp is not None :
self . _file_provenance [ rel_path ] = self . _self_made ( timestamp )
_logger . debug ( u"[provenance] Relative path for data file %s" , rel_path )
if content_type is not None :
self . _content_types [ rel_path ] = content_type
return rel_path |
def extract_values ( field_list = None , filter_arg_dict = None , out_stream = None ) :
"""Get list of dicts where each dict holds values from one SciObj .
Args :
field _ list : list of str
List of field names for which to return values . Must be strings from
FIELD _ NAME _ TO _ generate _ dict . keys ( ) .
If None , return all fields .
filter _ arg _ dict : dict
Dict of arguments to pass to ` ` ScienceObject . objects . filter ( ) ` ` .
Returns :
list of dict : The keys in the returned dict correspond to the field names in
` ` field _ list ` ` .
Raises :
raise d1 _ common . types . exceptions . InvalidRequest ( ) if ` ` field _ list ` ` contains any
invalid field names . A list of the invalid fields is included in the exception .""" | lookup_dict , generate_dict = _split_field_list ( field_list )
query , annotate_key_list = _create_query ( filter_arg_dict , generate_dict )
lookup_list = [ v [ "lookup_str" ] for k , v in lookup_dict . items ( ) ] + annotate_key_list
if out_stream is None :
return _create_sciobj_list ( query , lookup_list , lookup_dict , generate_dict )
else :
return _write_stream ( query , lookup_list , lookup_dict , generate_dict , out_stream ) |
def reduce_function ( op_func , input_tensor , axis = None , keepdims = None , name = None , reduction_indices = None ) :
"""This function used to be needed to support tf 1.4 and early , but support for tf 1.4 and earlier is now dropped .
: param op _ func : expects the function to handle eg : tf . reduce _ sum .
: param input _ tensor : The tensor to reduce . Should have numeric type .
: param axis : The dimensions to reduce . If None ( the default ) ,
reduces all dimensions . Must be in the range
[ - rank ( input _ tensor ) , rank ( input _ tensor ) ) .
: param keepdims : If true , retains reduced dimensions with length 1.
: param name : A name for the operation ( optional ) .
: param reduction _ indices : The old ( deprecated ) name for axis .
: return : outputs same value as op _ func .""" | warnings . warn ( "`reduce_function` is deprecated and may be removed on or after 2019-09-08." )
out = op_func ( input_tensor , axis = axis , keepdims = keepdims , name = name , reduction_indices = reduction_indices )
return out |
def p_referenceDeclaration ( p ) : # pylint : disable = line - too - long
"""referenceDeclaration : objectRef referenceName ' ; '
| objectRef referenceName defaultValue ' ; '
| qualifierList objectRef referenceName ' ; '
| qualifierList objectRef referenceName defaultValue ' ; '""" | # noqa : E501
quals = [ ]
dv = None
if isinstance ( p [ 1 ] , list ) : # qualifiers
quals = p [ 1 ]
cname = p [ 2 ]
pname = p [ 3 ]
if len ( p ) == 6 :
dv = p [ 4 ]
else :
cname = p [ 1 ]
pname = p [ 2 ]
if len ( p ) == 5 :
dv = p [ 3 ]
quals = OrderedDict ( [ ( x . name , x ) for x in quals ] )
p [ 0 ] = CIMProperty ( pname , dv , type = 'reference' , reference_class = cname , qualifiers = quals ) |
def popitem ( self , last = True ) :
"""Remove and return a ` ` ( key , value ) ` ` pair from the dictionary . If
last = True ( default ) then remove the * greatest * ` key ` from the
diciontary . Else , remove the * least * key from the dictionary .
If the dictionary is empty , calling ` popitem ` raises a
KeyError ` .""" | if not len ( self ) :
raise KeyError ( 'popitem(): dictionary is empty' )
key = self . _list_pop ( - 1 if last else 0 )
value = self . _pop ( key )
return ( key , value ) |
def saturation ( p ) :
"""Returns the saturation of a pixel , defined as the ratio of chroma to value .
: param p : A tuple of ( R , G , B ) values
: return : The saturation of a pixel , from 0 to 1""" | max_c = max ( p )
min_c = min ( p )
if max_c == 0 :
return 0
return ( max_c - min_c ) / float ( max_c ) |
def publish ( ** kwargs ) :
"""Runs the version task before pushing to git and uploading to pypi .""" | current_version = get_current_version ( )
click . echo ( 'Current version: {0}' . format ( current_version ) )
retry = kwargs . get ( "retry" )
debug ( 'publish: retry=' , retry )
if retry : # The " new " version will actually be the current version , and the
# " current " version will be the previous version .
new_version = current_version
current_version = get_previous_version ( current_version )
else :
level_bump = evaluate_version_bump ( current_version , kwargs [ 'force_level' ] )
new_version = get_new_version ( current_version , level_bump )
owner , name = get_repository_owner_and_name ( )
ci_checks . check ( 'master' )
checkout ( 'master' )
if version ( ** kwargs ) :
push_new_version ( gh_token = os . environ . get ( 'GH_TOKEN' ) , owner = owner , name = name )
if config . getboolean ( 'semantic_release' , 'upload_to_pypi' ) :
upload_to_pypi ( username = os . environ . get ( 'PYPI_USERNAME' ) , password = os . environ . get ( 'PYPI_PASSWORD' ) , # We are retrying , so we don ' t want errors for files that are already on PyPI .
skip_existing = retry , )
if check_token ( ) :
click . echo ( 'Updating changelog' )
try :
log = generate_changelog ( current_version , new_version )
post_changelog ( owner , name , new_version , markdown_changelog ( new_version , log , header = False ) )
except GitError :
click . echo ( click . style ( 'Posting changelog failed.' , 'red' ) , err = True )
else :
click . echo ( click . style ( 'Missing token: cannot post changelog' , 'red' ) , err = True )
click . echo ( click . style ( 'New release published' , 'green' ) )
else :
click . echo ( 'Version failed, no release will be published.' , err = True ) |
def chunk_size ( self , value ) :
"""Set the blob ' s default chunk size .
: type value : int
: param value : ( Optional ) The current blob ' s chunk size , if it is set .
: raises : : class : ` ValueError ` if ` ` value ` ` is not ` ` None ` ` and is not a
multiple of 256 KB .""" | if value is not None and value > 0 and value % self . _CHUNK_SIZE_MULTIPLE != 0 :
raise ValueError ( "Chunk size must be a multiple of %d." % ( self . _CHUNK_SIZE_MULTIPLE , ) )
self . _chunk_size = value |
def map_all ( self , prot_alignment , nucl_sequences ) :
"""Convert protein sequences to nucleotide alignment""" | zipped = itertools . zip_longest ( prot_alignment , nucl_sequences )
for p , n in zipped :
if p is None :
raise ValueError ( "Exhausted protein sequences" )
elif n is None :
raise ValueError ( "Exhausted nucleotide sequences" )
yield self . map_alignment ( p , n ) |
def compute_CDR3_pgen ( self , CDR3_seq , V_usage_mask , J_usage_mask ) :
"""Compute Pgen for CDR3 ' amino acid ' sequence CDR3 _ seq from VJ model .
Conditioned on the already formatted V genes / alleles indicated in
V _ usage _ mask and the J genes / alleles in J _ usage _ mask .
Parameters
CDR3 _ seq : str
CDR3 sequence composed of ' amino acids ' ( single character symbols
each corresponding to a collection of codons as given by codons _ dict ) .
V _ usage _ mask : list
Indices of the V alleles to be considered in the Pgen computation
J _ usage _ mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
pgen : float
The generation probability ( Pgen ) of the sequence
Examples
> > > compute _ CDR3 _ pgen ( ' CAVKIQGAQKLVF ' , ppp , [ 72 ] , [ 56 ] )
4.1818202431143785e - 07
> > > compute _ CDR3 _ pgen ( nt2codon _ rep ( ' TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC ' ) , ppp , [ 42 ] , [ 1 ] )
1.3971676613008565e - 08
> > > compute _ CDR3 _ pgen ( ' \xbb \xb6 \xbe \x80 \xbc \xa1 \x8a \x96 \xa1 \xa0 \xad \x8e \xbf ' , ppp , [ 72 ] , [ 56 ] )
1.3971676613008565e - 08""" | # Genomic J alignment / matching ( contribution from P ( delJ | J ) ) , return Pi _ J and reduced J _ usage _ mask
Pi_J , r_J_usage_mask = self . compute_Pi_J ( CDR3_seq , J_usage_mask )
# Genomic V alignment / matching conditioned on J gene ( contribution from P ( V , J , delV ) ) , return Pi _ V _ given _ J
Pi_V_given_J , max_V_align = self . compute_Pi_V_given_J ( CDR3_seq , V_usage_mask , r_J_usage_mask )
# Include insertions ( R and PinsVJ ) to get the total contribution from the left ( 3 ' ) side conditioned on J gene . Return Pi _ V _ insVJ _ given _ J
Pi_V_insVJ_given_J = self . compute_Pi_V_insVJ_given_J ( CDR3_seq , Pi_V_given_J , max_V_align )
pgen = 0
# zip Pi _ V _ insVJ _ given _ J and Pi _ J together for each J gene to get total pgen
for j in range ( len ( r_J_usage_mask ) ) :
for pos in range ( len ( CDR3_seq ) * 3 - 1 ) :
pgen += np . dot ( Pi_V_insVJ_given_J [ j ] [ : , pos ] , Pi_J [ j ] [ : , pos + 1 ] )
return pgen |
def createEditor ( self , parent , option , index ) :
"""Return the editor to be used for editing the data item with the given index .
Note that the index contains information about the model being used .
The editor ' s parent widget is specified by parent , and the item options by option .
This will set auto fill background to True on the editor , because else , you would see
The rendered delegate below .
: param parent : the parent widget
: type parent : QtGui . QWidget
: param option : the options for painting
: type option : QtGui . QStyleOptionViewItem
: param index : the index to paint
: type index : QtCore . QModelIndex
: returns : The created widget | None
: rtype : : class : ` QtGui . QWidget ` | None
: raises : None""" | # close all editors
self . close_editors ( )
e = self . create_editor_widget ( parent , option , index )
if e :
self . _edit_widgets [ index ] = e
e . setAutoFillBackground ( True )
e . destroyed . connect ( partial ( self . editor_destroyed , index = index ) )
return e |
def create_manage_py ( self , apps ) :
"""Creates manage . py file , with a given list of installed apps .
: param list apps :""" | self . logger . debug ( 'Creating manage.py ...' )
with open ( self . _get_manage_py_path ( ) , mode = 'w' ) as f :
south_migration_modules = [ ]
for app in apps :
south_migration_modules . append ( "'%(app)s': '%(app)s.south_migrations'" % { 'app' : app } )
f . write ( MANAGE_PY % { 'apps_available' : "', '" . join ( apps ) , 'apps_path' : self . apps_path , 'south_migration_modules' : ", " . join ( south_migration_modules ) } ) |
def quartus_prop ( self , buff : List [ str ] , intfName : str , name : str , value , escapeStr = True ) :
"""Set property on interface in Quartus TCL
: param buff : line buffer for output
: param intfName : name of interface to set property on
: param name : property name
: param value : property value
: param escapeStr : flag , if True put string properties to extra " " """ | if escapeStr and isinstance ( value , str ) :
value = '"%s"' % value
elif isinstance ( value , bool ) :
value = str ( value ) . lower ( )
else :
value = str ( value )
buff . append ( "set_interface_property %s %s %s" % ( intfName , name , value ) ) |
def remove_checksum ( path ) :
"""Remove the checksum of an image from cache if exists""" | path = '{}.md5sum' . format ( path )
if os . path . exists ( path ) :
os . remove ( path ) |
def leftAt ( self , offset = 0 ) :
"""Returns point in the center of the region ' s left side ( offset to the left
by negative ` ` offset ` ` )""" | return Location ( self . getX ( ) + offset , self . getY ( ) + ( self . getH ( ) / 2 ) ) |
def _record_key ( record : Record ) -> List [ Tuple [ Column , str ] ] :
"An orderable representation of this record ." | return sorted ( record . items ( ) ) |
def rows ( array ) :
"""Function to find the number of rows in an array .
Excel reference : https : / / support . office . com / en - ie / article / rows - function - b592593e - 3fc2-47f2 - bec1 - bda493811597
: param array : the array of which the rows should be counted .
: return : the number of rows .""" | if isinstance ( array , ( float , int ) ) :
rows = 1
# special case for A1 : A1 type ranges which for some reason only return an int / float
elif array is None :
rows = 1
# some A1 : A1 ranges return None ( issue with ref cell )
else :
rows = len ( array . values )
return rows |
def clone ( self ) :
"""clones a book from GITenberg ' s repo into the library
assumes you are authenticated to git clone from repo ?
returns True / False , message""" | logger . debug ( "Attempting to clone {0}" . format ( self . book_repo_name ) )
if self . path_exists ( ) :
return False , "Error: Local clone of {0} already exists" . format ( self . book_repo_name )
try :
self . local_repo = git . Repo . clone_from ( self . get_clone_url_ssh ( ) , self . library_book_dir ( ) )
return True , "Success! Cloned {0}" . format ( self . book_repo_name )
except git . exc . GitCommandError as e :
print ( e )
logger . debug ( "clone ran into an issue, likely remote doesn't exist" )
return False , "Error git returned a fail code" |
def update ( self , cls , rid , partialrecord , user = 'undefined' ) :
"""Update existing record
> > > s = teststore ( )
> > > s . create ( ' tstoretest ' , { ' id ' : ' 1 ' , ' name ' : ' Toto ' } )
> > > r = s . get ( ' tstoretest ' , ' 1 ' )
> > > r [ ' age ' ]
> > > s . update ( ' tstoretest ' , ' 1 ' , { ' age ' : 25 } )
> > > r = s . get ( ' tstoretest ' , ' 1 ' )
> > > r [ ' age ' ]
25
> > > s . update ( ' tstoretest ' , ' 1 ' , { ' age ' : 30 } , user = ' jane ' )
> > > r = s . get ( ' tstoretest ' , ' 1 ' )
> > > r [ UPDATER ]
' jane '
> > > s . update ( ' tstoretest ' , ' 2 ' , { ' age ' : 25 } )
Traceback ( most recent call last ) :
KeyError : ' No such record '
> > > s . create ( ' tstoretest ' , { ' id ' : ' 2 ' , ' name ' : ' Joe ' } )
> > > s . update ( ' tstoretest ' , ' 2 ' , { ' id ' : ' 1 ' } )
Traceback ( most recent call last ) :
KeyError : ' There is already a record for tstoretest / 1'
> > > s . update ( ' tstoretest ' , ' 2 ' , { ' badcol ' : ' 1 ' } )
Traceback ( most recent call last ) :
ValueError : Undefined field
> > > s . update ( ' tstoretest ' , ' 2 ' , { ' age ' : ' hello ' } )
Traceback ( most recent call last ) :
ValueError : Bad update ( INVALID _ TEXT _ REPRESENTATION )""" | self . validate_partial_record ( cls , partialrecord )
partialrecord [ UPDATE_DATE ] = self . nowstr ( )
partialrecord [ UPDATER ] = user
try :
updatecount = self . db . update ( cls , partialrecord , where = { ID : rid } )
if updatecount < 1 :
raise KeyError ( 'No such record' )
except ( psycopg2 . IntegrityError , psycopg2 . ProgrammingError , psycopg2 . DataError ) as error :
if error . pgcode == psycopg2 . errorcodes . UNIQUE_VIOLATION :
raise KeyError ( 'There is already a record for {}/{}' . format ( cls , partialrecord [ ID ] ) )
elif error . pgcode == psycopg2 . errorcodes . UNDEFINED_COLUMN :
raise ValueError ( 'Undefined field' )
else :
raise ValueError ( 'Bad update ({})' . format ( psycopg2 . errorcodes . lookup ( error . pgcode ) ) ) |
def duration ( days = 0 , # type : float
seconds = 0 , # type : float
microseconds = 0 , # type : float
milliseconds = 0 , # type : float
minutes = 0 , # type : float
hours = 0 , # type : float
weeks = 0 , # type : float
years = 0 , # type : float
months = 0 , # type : float
) : # type : ( . . . ) - > Duration
"""Create a Duration instance .""" | return Duration ( days = days , seconds = seconds , microseconds = microseconds , milliseconds = milliseconds , minutes = minutes , hours = hours , weeks = weeks , years = years , months = months , ) |
def create_snmp_manager ( self , manager , host , ** kwargs ) :
"""Create an SNMP manager .
: param manager : Name of manager to be created .
: type manager : str
: param host : IP address or DNS name of SNMP server to be used .
: type host : str
: param \ * \ * kwargs : See the REST API Guide on your array for the
documentation on the request :
* * POST snmp / : manager * *
: type \ * \ * kwargs : optional
: returns : A dictionary describing the created SNMP manager .
: rtype : ResponseDict""" | data = { "host" : host }
data . update ( kwargs )
return self . _request ( "POST" , "snmp/{0}" . format ( manager ) , data ) |
def readconfig ( self , configpath = None ) :
""": param configpath : Optional bugzillarc path to read , instead of
the default list .
This function is called automatically from Bugzilla connect ( ) , which
is called at _ _ init _ _ if a URL is passed . Calling it manually is
just for passing in a non - standard configpath .
The locations for the bugzillarc file are preferred in this order :
~ / . config / python - bugzilla / bugzillarc
~ / . bugzillarc
/ etc / bugzillarc
It has content like :
[ bugzilla . yoursite . com ]
user = username
password = password
Or
[ bugzilla . yoursite . com ]
api _ key = key
The file can have multiple sections for different bugzilla instances .
A ' url ' field in the [ DEFAULT ] section can be used to set a default
URL for the bugzilla command line tool .
Be sure to set appropriate permissions on bugzillarc if you choose to
store your password in it !""" | cfg = _open_bugzillarc ( configpath or self . configpath )
if not cfg :
return
section = ""
log . debug ( "bugzillarc: Searching for config section matching %s" , self . url )
def _parse_hostname ( _u ) : # If http : / / example . com is passed , netloc = example . com path = " "
# If just example . com is passed , netloc = " " path = example . com
parsedbits = urlparse ( self . url )
return parsedbits . netloc or parsedbits . path
urlhost = _parse_hostname ( self . url )
for sectionhost in sorted ( cfg . sections ( ) ) : # If the section is just a hostname , make it match
# If the section has a / in it , do a substring match
if "/" not in sectionhost :
if sectionhost == urlhost :
section = sectionhost
elif sectionhost in self . url :
section = sectionhost
if section :
log . debug ( "bugzillarc: Found matching section: %s" , section )
break
if not section :
log . debug ( "bugzillarc: No section found" )
return
for key , val in cfg . items ( section ) :
if key == "api_key" :
log . debug ( "bugzillarc: setting api_key" )
self . api_key = val
elif key == "user" :
log . debug ( "bugzillarc: setting user=%s" , val )
self . user = val
elif key == "password" :
log . debug ( "bugzillarc: setting password" )
self . password = val
elif key == "cert" :
log . debug ( "bugzillarc: setting cert" )
self . cert = val
else :
log . debug ( "bugzillarc: unknown key=%s" , key ) |
def _add_attr_values_from_insert_to_original ( original_code , insert_code , insert_code_list , attribute_name , op_list ) :
"""This function appends values of the attribute ` attribute _ name ` of the inserted code to the original values ,
and changes indexes inside inserted code . If some bytecode instruction in the inserted code used to call argument
number i , after modification it calls argument n + i , where n - length of the values in the original code .
So it helps to avoid variables mixing between two pieces of code .
: param original _ code : code to modify
: param insert _ code : code to insert
: param insert _ code _ obj : bytes sequence of inserted code , which should be modified too
: param attribute _ name : name of attribute to modify ( ' co _ names ' , ' co _ consts ' or ' co _ varnames ' )
: param op _ list : sequence of bytecodes whose arguments should be changed
: return : modified bytes sequence of the code to insert and new values of the attribute ` attribute _ name ` for
original code""" | orig_value = getattr ( original_code , attribute_name )
insert_value = getattr ( insert_code , attribute_name )
orig_names_len = len ( orig_value )
code_with_new_values = list ( insert_code_list )
offset = 0
while offset < len ( code_with_new_values ) :
op = code_with_new_values [ offset ]
if op in op_list :
new_val = code_with_new_values [ offset + 1 ] + orig_names_len
if new_val > MAX_BYTE :
code_with_new_values [ offset + 1 ] = new_val & MAX_BYTE
code_with_new_values = code_with_new_values [ : offset ] + [ EXTENDED_ARG , new_val >> 8 ] + code_with_new_values [ offset : ]
offset += 2
else :
code_with_new_values [ offset + 1 ] = new_val
offset += 2
new_values = orig_value + insert_value
return bytes ( code_with_new_values ) , new_values |
def read_ipv6 ( self , length ) :
"""Read Internet Protocol version 6 ( IPv6 ) .
Structure of IPv6 header [ RFC 2460 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Version | Traffic Class | Flow Label |
| Payload Length | Next Header | Hop Limit |
+ Source Address +
+ Destination Address +
Octets Bits Name Description
0 0 ip . version Version ( 6)
0 4 ip . class Traffic Class
1 12 ip . label Flow Label
4 32 ip . payload Payload Length ( header excludes )
6 48 ip . next Next Header
7 56 ip . limit Hop Limit
8 64 ip . src Source Address
24 192 ip . dst Destination Address""" | if length is None :
length = len ( self )
_htet = self . _read_ip_hextet ( )
_plen = self . _read_unpack ( 2 )
_next = self . _read_protos ( 1 )
_hlmt = self . _read_unpack ( 1 )
_srca = self . _read_ip_addr ( )
_dsta = self . _read_ip_addr ( )
ipv6 = dict ( version = _htet [ 0 ] , tclass = _htet [ 1 ] , label = _htet [ 2 ] , payload = _plen , next = _next , limit = _hlmt , src = _srca , dst = _dsta , )
hdr_len = 40
raw_len = ipv6 [ 'payload' ]
ipv6 [ 'packet' ] = self . _read_packet ( header = hdr_len , payload = raw_len )
return self . _decode_next_layer ( ipv6 , _next , raw_len ) |
def get_portchannel_info_by_intf_output_lacp_actor_brcd_state ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_portchannel_info_by_intf = ET . Element ( "get_portchannel_info_by_intf" )
config = get_portchannel_info_by_intf
output = ET . SubElement ( get_portchannel_info_by_intf , "output" )
lacp = ET . SubElement ( output , "lacp" )
actor_brcd_state = ET . SubElement ( lacp , "actor-brcd-state" )
actor_brcd_state . text = kwargs . pop ( 'actor_brcd_state' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def rs_generator_poly_all ( max_nsym , fcr = 0 , generator = 2 ) :
'''Generate all irreducible generator polynomials up to max _ nsym ( usually you can use n , the length of the message + ecc ) . Very useful to reduce processing time if you want to encode using variable schemes and nsym rates .''' | g_all = { }
g_all [ 0 ] = g_all [ 1 ] = [ 1 ]
for nsym in xrange ( max_nsym ) :
g_all [ nsym ] = rs_generator_poly ( nsym , fcr , generator )
return g_all |
def available_time_entries_impl ( self ) :
"""Creates the JSON object for the available time entries route response .
Returns :
The JSON object for the available time entries route response .""" | result = { }
if self . _db_connection_provider :
db = self . _db_connection_provider ( )
# For each run , pick a tag .
cursor = db . execute ( '''
SELECT
TagPickingTable.run_name,
Tensors.step,
Tensors.computed_time
FROM (/* For each run, pick any tag. */
SELECT
Runs.run_id AS run_id,
Runs.run_name AS run_name,
Tags.tag_id AS tag_id
FROM Runs
JOIN Tags
ON Tags.run_id = Runs.run_id
WHERE
Tags.plugin_name = ?
GROUP BY Runs.run_id) AS TagPickingTable
JOIN Tensors
ON Tensors.series = TagPickingTable.tag_id
WHERE Tensors.step IS NOT NULL
ORDER BY Tensors.step
''' , ( metadata . PLUGIN_NAME , ) )
for ( run , step , wall_time ) in cursor :
if run not in result :
result [ run ] = [ ]
result [ run ] . append ( self . _create_time_entry ( step , wall_time ) )
else : # Read data from disk .
all_runs = self . _multiplexer . PluginRunToTagToContent ( metadata . PLUGIN_NAME )
for run , tag_to_content in all_runs . items ( ) :
if not tag_to_content : # This run lacks data for this plugin .
continue
# Just use the list of tensor events for any of the tags to determine
# the steps to list for the run . The steps are often the same across
# tags for each run , albeit the user may elect to sample certain tags
# differently within the same run . If the latter occurs , TensorBoard
# will show the actual step of each tag atop the card for the tag .
tensor_events = self . _multiplexer . Tensors ( run , min ( six . iterkeys ( tag_to_content ) ) )
result [ run ] = [ self . _create_time_entry ( e . step , e . wall_time ) for e in tensor_events ]
return result |
def node ( * nodes ) :
"""Selects and configures a list of nodes . ' all ' configures all nodes""" | chef . build_node_data_bag ( )
if not len ( nodes ) or nodes [ 0 ] == '' :
abort ( 'No node was given' )
elif nodes [ 0 ] == 'all' : # Fetch all nodes and add them to env . hosts
for node in lib . get_nodes ( env . chef_environment ) :
env . hosts . append ( node [ 'name' ] )
if not len ( env . hosts ) :
abort ( 'No nodes found in /nodes/' )
message = "Are you sure you want to configure all nodes ({0})" . format ( len ( env . hosts ) )
if env . chef_environment :
message += " in the {0} environment" . format ( env . chef_environment )
message += "?"
if not __testing__ :
if not lib . global_confirm ( message ) :
abort ( 'Aborted by user' )
else : # A list of nodes was given
env . hosts = list ( nodes )
env . all_hosts = list ( env . hosts )
# Shouldn ' t be needed
# Check whether another command was given in addition to " node : "
if not ( littlechef . __cooking__ and 'node:' not in sys . argv [ - 1 ] and 'nodes_with_role:' not in sys . argv [ - 1 ] and 'nodes_with_recipe:' not in sys . argv [ - 1 ] and 'nodes_with_tag:' not in sys . argv [ - 1 ] ) : # If user didn ' t type recipe : X , role : Y or deploy _ chef ,
# configure the nodes
with settings ( ) :
execute ( _node_runner )
chef . remove_local_node_data_bag ( ) |
def parse_latitude ( latitude , hemisphere ) :
"""Parse a NMEA - formatted latitude pair .
Args :
latitude ( str ) : Latitude in DDMM . MMMM
hemisphere ( str ) : North or South
Returns :
float : Decimal representation of latitude""" | latitude = int ( latitude [ : 2 ] ) + float ( latitude [ 2 : ] ) / 60
if hemisphere == 'S' :
latitude = - latitude
elif not hemisphere == 'N' :
raise ValueError ( 'Incorrect North/South value %r' % hemisphere )
return latitude |
def get ( self , url , parameters = None ) :
"""Convenience method for requesting to google with proper cookies / params .""" | if not self . access_token :
raise IOError ( "No authorized client available." )
if parameters is None :
parameters = { }
parameters . update ( { 'access_token' : self . access_token , 'alt' : 'json' } )
request = requests . get ( url + '?' + self . getParameters ( parameters ) )
if request . status_code != 200 :
return None
else :
return toUnicode ( request . text ) |
def tmsh_mode ( self , delay_factor = 1 ) :
"""tmsh command is equivalent to config command on F5.""" | delay_factor = self . select_delay_factor ( delay_factor )
self . clear_buffer ( )
command = "{}tmsh{}" . format ( self . RETURN , self . RETURN )
self . write_channel ( command )
time . sleep ( 1 * delay_factor )
self . clear_buffer ( )
return None |
def aggregate ( self ) :
"""Aggregate all reports of the same type into a master report""" | for report in self . reportset :
printtime ( 'Processing {}' . format ( report . split ( '.' ) [ 0 ] ) , self . start )
# Initialise the header for each report - MLST is different , as the header is different for each
# MLST scheme . This provides a generic header instead
header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n'
# Initialise a string to hold the data for each report
data = ''
# Open the aggregated report
with open ( os . path . join ( self . reportpath , report ) , 'w' ) as aggregate :
for sample in self . runmetadata . samples : # Try to open the report for this run
try :
with open ( os . path . join ( sample . general . reportpath , report ) , 'r' ) as runreport : # Only get the header from the first file
if not header :
header = runreport . readline ( )
else :
for row in runreport : # The final entry in a report does not have a newline character . Add \ n as required
if not row . endswith ( '\n' ) :
row += '\n'
# For certain reports , the header row is printed above each strain - ignore multiple
# instances of the header
if row . split ( ',' ) [ 0 ] != header . split ( ',' ) [ 0 ] : # Add the row to the string of data
data += row
except IOError :
pass
# Write the strings to the aggregate report file
aggregate . write ( header )
aggregate . write ( data ) |
def datum_to_value ( self , instance , datum ) :
"""Convert a given MAAS - side datum to a Python - side value .
: param instance : The ` Object ` instance on which this field is
currently operating . This method should treat it as read - only , for
example to perform validation with regards to other fields .
: param datum : The MAAS - side datum to validate and convert into a
Python - side value .
: return : A set of ` cls ` from the given datum .""" | if datum is None :
return [ ]
if not isinstance ( datum , Sequence ) :
raise TypeError ( "datum must be a sequence, not %s" % type ( datum ) . __name__ )
# Get the class from the bound origin .
bound = getattr ( instance . _origin , "FilesystemGroupDevices" )
return bound ( ( get_device_object ( instance . _origin , item ) for item in datum ) ) |
def _read_config_file ( self ) :
"""read in the configuration file , a json file defined at self . config""" | try :
with open ( self . config , 'r' ) as f :
config_data = json . load ( f )
except FileNotFoundError :
config_data = { }
return config_data |
def get_queryset ( self ) :
"""Returns only objects which are accessible to the current user .
If user is not authenticated all public objects will be returned .
Model must implement AccessLevelManager !""" | return self . queryset . all ( ) . accessible_to ( user = self . request . user ) |
def nth_centered_hexagonal_num ( index ) :
"""Calculate the nth centered hexagonal number .
Examples :
> > > nth _ centered _ hexagonal _ num ( 10)
271
> > > nth _ centered _ hexagonal _ num ( 2)
> > > nth _ centered _ hexagonal _ num ( 9)
217
: param index : The nth position of the centered hexagonal number series to return
: return : The nth centered hexagonal number""" | return 3 * index * ( index - 1 ) + 1 |
def do_call ( self , path , method , body = None , headers = None ) :
"""Send an HTTP request to the REST API .
: param string path : A URL
: param string method : The HTTP method ( GET , POST , etc . ) to use
in the request .
: param string body : A string representing any data to be sent in the
body of the HTTP request .
: param dictionary headers :
" { header - name : header - value } " dictionary .""" | url = urljoin ( self . base_url , path )
try :
resp = requests . request ( method , url , data = body , headers = headers , auth = self . auth , timeout = self . timeout )
except requests . exceptions . Timeout as out :
raise NetworkError ( "Timeout while trying to connect to RabbitMQ" )
except requests . exceptions . RequestException as err : # All other requests exceptions inherit from RequestException
raise NetworkError ( "Error during request %s %s" % ( type ( err ) , err ) )
try :
content = resp . json ( )
except ValueError as out :
content = None
# ' success ' HTTP status codes are 200-206
if resp . status_code < 200 or resp . status_code > 206 :
raise HTTPError ( content , resp . status_code , resp . text , path , body )
else :
if content :
return content
else :
return None |
def put ( self , url , request_data , content_type = None , auth_map = None ) :
"""Envia uma requisição PUT para a URL informada .
Se auth _ map é diferente de None , então deverá conter as
chaves NETWORKAPI _ PASSWORD e NETWORKAPI _ USERNAME para realizar
a autenticação na networkAPI .
As chaves e os seus valores são enviados no header da requisição .
: param url : URL para enviar a requisição HTTP .
: param request _ data : Descrição para enviar no corpo da requisição HTTP .
: param content _ type : Tipo do conteúdo enviado em request _ data . O valor deste
parâmetro será adicionado no header " Content - Type " da requisição .
: param auth _ map : Dicionário com as informações para autenticação na networkAPI .
: return : Retorna uma tupla contendo :
( < código de resposta http > , < corpo da resposta > ) .
: raise ConnectionError : Falha na conexão com a networkAPI .
: raise RestError : Falha no acesso à networkAPI .""" | try :
LOG . debug ( 'PUT %s\n%s' , url , request_data )
parsed_url = urlparse ( url )
if parsed_url . scheme == 'https' :
connection = HTTPSConnection ( parsed_url . hostname , parsed_url . port )
else :
connection = HTTPConnection ( parsed_url . hostname , parsed_url . port )
try :
headers_map = dict ( )
if auth_map is not None :
headers_map . update ( auth_map )
if content_type is not None :
headers_map [ 'Content-Type' ] = content_type
connection . request ( 'PUT' , parsed_url . path , request_data , headers_map )
response = connection . getresponse ( )
body = response . read ( )
LOG . debug ( 'PUT %s returns %s\n%s' , url , response . status , body )
return response . status , body
finally :
connection . close ( )
except URLError as e :
raise ConnectionError ( e )
except Exception as e :
raise RestError ( e , e . message ) |
def prox_nuclear ( V , alpha ) :
r"""Proximal operator of the nuclear norm : cite : ` cai - 2010 - singular `
with parameter : math : ` \ alpha ` .
Parameters
v : array _ like
Input array : math : ` V `
alpha : float
Parameter : math : ` \ alpha `
Returns
X : ndarray
Output array
s : ndarray
Singular values of ` X `""" | Usvd , s , Vsvd = sl . promote16 ( V , fn = np . linalg . svd , full_matrices = False )
ss = np . maximum ( 0 , s - alpha )
return np . dot ( Usvd , np . dot ( np . diag ( ss ) , Vsvd ) ) , ss |
def config ( ) :
"""Return sun configuration values""" | conf_args = { "INTERVAL" : 60 , "STANDBY" : 3 }
config_file = read_file ( "{0}{1}" . format ( conf_path , "sun.conf" ) )
for line in config_file . splitlines ( ) :
line = line . lstrip ( )
if line and not line . startswith ( "#" ) :
conf_args [ line . split ( "=" ) [ 0 ] ] = line . split ( "=" ) [ 1 ]
return conf_args |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'tooling' ) and self . tooling is not None :
_dict [ 'tooling' ] = self . tooling . _to_dict ( )
if hasattr ( self , 'disambiguation' ) and self . disambiguation is not None :
_dict [ 'disambiguation' ] = self . disambiguation . _to_dict ( )
if hasattr ( self , 'human_agent_assist' ) and self . human_agent_assist is not None :
_dict [ 'human_agent_assist' ] = self . human_agent_assist
return _dict |
def get_download_link ( self ) :
"""Get direct download link with soudcloud ' s redirect system .""" | url = None
if not self . get ( "downloadable" ) :
try :
url = self . client . get_location ( self . client . STREAM_URL % self . get ( "id" ) )
except serror as e :
print ( e )
if not url :
try :
url = self . client . get_location ( self . client . DOWNLOAD_URL % self . get ( "id" ) )
except serror as e :
print ( e )
return url |
def login ( self , username = None , password = None ) :
"""Explicit Abode login .""" | if username is not None :
self . _cache [ CONST . ID ] = username
if password is not None :
self . _cache [ CONST . PASSWORD ] = password
if ( self . _cache [ CONST . ID ] is None or not isinstance ( self . _cache [ CONST . ID ] , str ) ) :
raise AbodeAuthenticationException ( ERROR . USERNAME )
if ( self . _cache [ CONST . PASSWORD ] is None or not isinstance ( self . _cache [ CONST . PASSWORD ] , str ) ) :
raise AbodeAuthenticationException ( ERROR . PASSWORD )
self . _save_cache ( )
self . _token = None
login_data = { CONST . ID : self . _cache [ CONST . ID ] , CONST . PASSWORD : self . _cache [ CONST . PASSWORD ] , CONST . UUID : self . _cache [ CONST . UUID ] }
response = self . _session . post ( CONST . LOGIN_URL , json = login_data )
response_object = json . loads ( response . text )
oauth_token = self . _session . get ( CONST . OAUTH_TOKEN_URL )
oauth_token_object = json . loads ( oauth_token . text )
if response . status_code != 200 :
raise AbodeAuthenticationException ( ( response . status_code , response_object [ 'message' ] ) )
_LOGGER . debug ( "Login Response: %s" , response . text )
self . _token = response_object [ 'token' ]
self . _panel = response_object [ 'panel' ]
self . _user = response_object [ 'user' ]
self . _oauth_token = oauth_token_object [ 'access_token' ]
_LOGGER . info ( "Login successful" )
return True |
def show_network_ip_availability ( self , network , ** _params ) :
"""Fetches IP availability information for a specified network""" | return self . get ( self . network_ip_availability_path % ( network ) , params = _params ) |
def render_header ( self , ctx , data ) :
"""Render any required static content in the header , from the C { staticContent }
attribute of this page .""" | if self . staticContent is None :
return ctx . tag
header = self . staticContent . getHeader ( )
if header is not None :
return ctx . tag [ header ]
else :
return ctx . tag |
def _validate_resolution_output_length ( path , entity_name , results , allow_mult = False , all_mult = False , ask_to_resolve = True ) :
""": param path : Path to the object that required resolution ; propagated from
command - line
: type path : string
: param entity _ name : Name of the object
: type entity _ name : string
: param results : Result of resolution ; non - empty list of object
specifications ( each specification is a dictionary with
keys " project " and " id " )
: type results : list of dictionaries
: param allow _ mult : If True , it is okay to choose from multiple results
of a single resolved object , or return all results
found ; if False , raise an error if multiple results
are found
: type allow _ mult : boolean
: param all _ mult : If True , return all results if multiple results are
found for a single resolved object ; if False , user needs
to choose a single result if multiple are found ; the value
of all _ mult only has an effect if allow _ mult is True )
: type all _ mult : boolean
: param ask _ to _ resolve : Whether picking may be necessary ( if True , a
list is returned ; if False , only one result
is returned ) ; if specified as True , then all
results will be returned , regardless of the
values of allow _ mult and all _ mult
: type ask _ to _ resolve : boolean
: returns : The results of resolving entity _ name , expected to be of the
following form :
< resolved _ object > # If only one result is present or the user
# is able to select from multiple
OR
[ < resolved _ object > , . . . ] # If multiple results are present and
# it is allowed
where < resolved _ object > is of the following form :
{ " project " : < project _ id > , " id " : < object _ id > }
: rtype : dict or list of dicts
: raises : ValueError if results is empty
: raises : ResolutionError if too many results are found and the user is
not in interactive mode and cannot select one
Precondition : results must be a nonempty list
Validates length of results .
If there are multiple results found and the user is in interactive mode ,
then the user will be prompted to select a single result to be returned .""" | if len ( results ) == 0 :
raise ValueError ( "'results' must be nonempty." )
# Caller wants ALL results , so return the entire results list
# At this point , do not care about the values of allow _ mult or all _ mult
if not ask_to_resolve :
return results
if len ( results ) > 1 : # The other way the caller can specify it wants all results is by setting
# allow _ mult to be True and allowing all _ mult to be True ( or if the object name is a glob pattern )
if allow_mult and ( all_mult or is_glob_pattern ( entity_name ) ) :
return results
if INTERACTIVE_CLI :
print ( 'The given path "' + path + '" resolves to the following data objects:' )
if any ( [ 'describe' not in result for result in results ] ) : # findDataObject API call must be made to get ' describe ' mappings
project , folderpath , entity_name = resolve_path ( path , expected = 'entity' )
results = _resolve_global_entity ( project , folderpath , entity_name )
choice = pick ( [ get_ls_l_desc ( result [ 'describe' ] ) for result in results ] , allow_mult = allow_mult )
if allow_mult and choice == '*' :
return results
else :
return [ results [ choice ] ] if allow_mult else results [ choice ]
else :
raise ResolutionError ( 'The given path "' + path + '" resolves to ' + str ( len ( results ) ) + ' data objects' )
else :
return [ results [ 0 ] ] if allow_mult else results [ 0 ] |
def paragraph_generator ( sentences = None ) :
"""Creates a generator for generating paragraphs .
: arg sentences : list or tuple of sentences you want to use ;
defaults to LOREM
: returns : generator
Example : :
from eadred . helpers import paragraph _ generator
gen = paragraph _ generator ( )
for i in range ( 50 ) :
mymodel = SomeModel ( description = gen . next ( ) )
mymodel . save ( )""" | if sentences is None :
sentences = LOREM
while True : # Paragraph consists of 1-7 sentences .
paragraph = [ random . choice ( sentences ) for num in range ( random . randint ( 1 , 7 ) ) ]
yield u' ' . join ( paragraph ) |
def while_until_true ( interval , max_attempts ) :
"""Decorator that executes a function until it returns True .
Executes wrapped function at every number of seconds specified by interval ,
until wrapped function either returns True or max _ attempts are exhausted ,
whichever comes 1st .
The difference between while _ until _ true and wait _ until _ true is that the
latter will always loop to a max _ attempts , whereas while _ until _ true will
keep going indefinitely .
The other notable difference to wait _ until _ true is that the wrapped
function signature must be :
func ( counter , * args , * * kwargs )
This is because this decorator injects the while loop counter into the
invoked function .
Args :
interval : In seconds . How long to wait between executing the wrapped
function .
max _ attempts : int . Execute wrapped function up to this limit . None
means infinite ( or until wrapped function returns True ) .
Passing anything < 0 also means infinite .
Returns :
Bool . True if wrapped function returned True . False if reached
max _ attempts without the wrapped function ever returning True .""" | def decorator ( f ) :
logger . debug ( "started" )
def sleep_looper ( * args , ** kwargs ) :
if max_attempts :
logger . debug ( f"Looping every {interval} seconds for " f"{max_attempts} attempts" )
else :
logger . debug ( f"Looping every {interval} seconds." )
i = 0
result = False
# pragma for coverage : cov can ' t figure out the branch construct
# with the dynamic function invocation , it seems , so marks the
# branch partial . unit test cov is 100 % , though .
while not result : # pragma : no branch
i += 1
result = f ( i , * args , ** kwargs )
if result :
logger . debug ( f"iteration {i}. Desired state reached." )
break
elif max_attempts :
if i < max_attempts :
logger . debug ( f"iteration {i}. Still waiting. . ." )
time . sleep ( interval )
else :
logger . debug ( f"iteration {i}. Max attempts exhausted." )
break
else : # result False AND max _ attempts is None means keep looping
# because None = infinite
logger . debug ( f"iteration {i}. Still waiting. . ." )
time . sleep ( interval )
logger . debug ( "done" )
return result
return sleep_looper
return decorator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.