signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def parseLines ( self ) :
"""Form an AST for the code and produce a new version of the source .""" | inAst = parse ( '' . join ( self . lines ) , self . inFilename )
# Visit all the nodes in our tree and apply Doxygen tags to the source .
self . visit ( inAst ) |
def get_details ( self ) :
"""Finds songs details
: return : Dictionary with songs details about title , artist , album and
year""" | title = str ( self . get_title ( ) ) . strip ( )
artist = str ( self . get_artist ( ) ) . strip ( )
album = str ( self . get_album ( ) ) . strip ( )
year = str ( self . get_year ( ) ) . strip ( )
return { "title" : title , "artist" : artist , "album" : album , "year" : year } |
def get_task_runs ( self , json_file = None ) :
"""Load all project Task Runs from Tasks .""" | if self . project is None :
raise ProjectError
loader = create_task_runs_loader ( self . project . id , self . tasks , json_file , self . all )
self . task_runs , self . task_runs_file = loader . load ( )
self . _check_project_has_taskruns ( )
self . task_runs_df = dataframer . create_task_run_data_frames ( self . tasks , self . task_runs ) |
def _get_federation_info ( address_or_id , federation_service , fed_type = 'name' ) :
"""Send a federation query to a Stellar Federation service .
Note : The preferred method of making this call is via
: function : ` federation ` , as it handles error checking and parsing of
arguments .
: param str address _ or _ id : The address which you expect te retrieve
federation information about .
: param str federation _ service : The url of the federation service you ' re
requesting information from .
: param str fed _ type : The type of federation query that you are making . Must
be ' name ' , ' id ' , ' forward ' , or ' txid ' .
: return dict : The federation query response decoded from JSON as a dict .""" | params = { 'q' : address_or_id , 'type' : fed_type }
r = requests . get ( federation_service , params = params )
if r . status_code == 200 :
return r . json ( )
else :
return None |
def pkt_check ( * args , func = None ) :
"""Check if arguments are valid packets .""" | func = func or inspect . stack ( ) [ 2 ] [ 3 ]
for var in args :
dict_check ( var , func = func )
dict_check ( var . get ( 'frame' ) , func = func )
enum_check ( var . get ( 'protocol' ) , func = func )
real_check ( var . get ( 'timestamp' ) , func = func )
ip_check ( var . get ( 'src' ) , var . get ( 'dst' ) , func = func )
bool_check ( var . get ( 'syn' ) , var . get ( 'fin' ) , func = func )
int_check ( var . get ( 'srcport' ) , var . get ( 'dstport' ) , var . get ( 'index' ) , func = func ) |
def get_msgs_for_lagged_nodes ( self ) -> List [ ViewChangeDone ] : # Should not return a list , only done for compatibility with interface
"""Returns the last accepted ` ViewChangeDone ` message .
If no view change has happened returns ViewChangeDone
with view no 0 to a newly joined node""" | # TODO : Consider a case where more than one node joins immediately ,
# then one of the node might not have an accepted
# ViewChangeDone message
messages = [ ]
accepted = self . _accepted_view_change_done_message
if accepted :
messages . append ( ViewChangeDone ( self . last_completed_view_no , * accepted ) )
elif self . name in self . _view_change_done :
messages . append ( ViewChangeDone ( self . last_completed_view_no , * self . _view_change_done [ self . name ] ) )
else :
logger . info ( '{} has no ViewChangeDone message to send for view {}' . format ( self , self . view_no ) )
return messages |
def replace_by_field ( self , field_name , field_value , fields , typecast = False , ** options ) :
"""Replaces the first record to match field name and value .
All Fields are updated to match the new ` ` fields ` ` provided .
If a field is not included in ` ` fields ` ` , value will bet set to null .
To update only selected fields , use : any : ` update ` .
Args :
field _ name ( ` ` str ` ` ) : Name of field to match ( column name ) .
field _ value ( ` ` str ` ` ) : Value of field to match .
fields ( ` ` dict ` ` ) : Fields to replace with .
Must be dictionary with Column names as Key .
typecast ( ` ` boolean ` ` ) : Automatic data conversion from string values .
Keyword Args :
view ( ` ` str ` ` , optional ) : The name or ID of a view .
See : any : ` ViewParam ` .
sort ( ` ` list ` ` , optional ) : List of fields to sort by .
Default order is ascending . See : any : ` SortParam ` .
Returns :
record ( ` ` dict ` ` ) : New record""" | record = self . match ( field_name , field_value , ** options )
return { } if not record else self . replace ( record [ 'id' ] , fields , typecast ) |
def get_file_service_properties ( self , timeout = None ) :
'''Gets the properties of a storage account ' s File service , including
Azure Storage Analytics .
: param int timeout :
The timeout parameter is expressed in seconds .
: return : The file service properties .
: rtype :
: class : ` ~ azure . storage . common . models . ServiceProperties `''' | request = HTTPRequest ( )
request . method = 'GET'
request . host_locations = self . _get_host_locations ( )
request . path = _get_path ( )
request . query = { 'restype' : 'service' , 'comp' : 'properties' , 'timeout' : _int_to_str ( timeout ) , }
return self . _perform_request ( request , _convert_xml_to_service_properties ) |
def read_detections ( fname ) :
"""Read detections from a file to a list of Detection objects .
: type fname : str
: param fname : File to read from , must be a file written to by Detection . write .
: returns : list of : class : ` eqcorrscan . core . match _ filter . Detection `
: rtype : list
. . note : :
: class : ` eqcorrscan . core . match _ filter . Detection ` ' s returned do not
contain Detection . event""" | f = open ( fname , 'r' )
detections = [ ]
for index , line in enumerate ( f ) :
if index == 0 :
continue
# Skip header
if line . rstrip ( ) . split ( '; ' ) [ 0 ] == 'Template name' :
continue
# Skip any repeated headers
detection = line . rstrip ( ) . split ( '; ' )
detection [ 1 ] = UTCDateTime ( detection [ 1 ] )
detection [ 2 ] = int ( float ( detection [ 2 ] ) )
detection [ 3 ] = ast . literal_eval ( detection [ 3 ] )
detection [ 4 ] = float ( detection [ 4 ] )
detection [ 5 ] = float ( detection [ 5 ] )
if len ( detection ) < 9 :
detection . extend ( [ 'Unset' , float ( 'NaN' ) ] )
else :
detection [ 7 ] = float ( detection [ 7 ] )
detections . append ( Detection ( template_name = detection [ 0 ] , detect_time = detection [ 1 ] , no_chans = detection [ 2 ] , detect_val = detection [ 4 ] , threshold = detection [ 5 ] , threshold_type = detection [ 6 ] , threshold_input = detection [ 7 ] , typeofdet = detection [ 8 ] , chans = detection [ 3 ] ) )
f . close ( )
return detections |
def _parse_cli_args ( ) :
"""Parse the arguments from CLI using ArgumentParser
: return : The arguments parsed by ArgumentParser
: rtype : Namespace""" | parser = argparse . ArgumentParser ( )
parser . add_argument ( '-g' , help = 'The photoset id to be downloaded' , metavar = '<photoset_id>' )
parser . add_argument ( '-s' , default = 1 , help = ( 'Image size. 12 is smallest, 1 is original size. ' 'Default: 1' ) , type = int , choices = xrange ( 0 , 10 ) , metavar = '<num>' )
parser . add_argument ( '-d' , default = None , help = ( 'The path to store the downloaded images. ' 'Automatically create it if not exist. ' 'Default use the photoset id as folder name under current path' ) , metavar = '<path>' )
parser . add_argument ( '-O' , default = 1 , help = ( '0 for single process, ' '1 for multithread. ' '2 for event driven. ' 'Default: 1' ) , type = int , choices = xrange ( 0 , 3 ) , metavar = '<num>' )
parser . add_argument ( '-u' , help = ( 'Set your API key' ) , action = 'store_true' )
args = parser . parse_args ( )
logger . debug ( args )
return args |
def log ( self , n = None , ** kwargs ) :
"""Run the repository log command
Returns :
str : output of log command ( ` ` git log - n < n > < - - kwarg = value > ` ` )""" | kwargs [ 'format' ] = kwargs . pop ( 'template' , self . template )
cmd = [ 'git' , 'log' ]
if n :
cmd . append ( '-n%d' % n )
cmd . extend ( ( ( '--%s=%s' % ( k , v ) ) for ( k , v ) in iteritems ( kwargs ) ) )
try :
output = self . sh ( cmd , shell = False )
if "fatal: bad default revision 'HEAD'" in output :
return output
return output
except Exception as e :
e
return |
def raw_xml ( self ) -> _RawXML :
"""Bytes representation of the XML content .
( ` learn more < http : / / www . diveintopython3 . net / strings . html > ` _ ) .""" | if self . _xml :
return self . _xml
else :
return etree . tostring ( self . element , encoding = 'unicode' ) . strip ( ) . encode ( self . encoding ) |
def diffuser_conical ( Di1 , Di2 , l = None , angle = None , fd = None , Re = None , roughness = 0.0 , method = 'Rennels' ) :
r'''Returns the loss coefficient for any conical pipe diffuser .
This calculation has four methods available .
The ' Rennels ' [ 1 ] _ formulas are as follows ( three different formulas are
used , depending on the angle and the ratio of diameters ) :
For 0 to 20 degrees , all aspect ratios :
. . math : :
K _ 1 = 8.30 [ \ tan ( \ alpha / 2 ) ] ^ { 1.75 } ( 1 - \ beta ^ 2 ) ^ 2 + \ frac { f ( 1 - \ beta ^ 4 ) } { 8 \ sin ( \ alpha / 2 ) }
For 20 to 60 degrees , beta < 0.5:
. . math : :
K _ 1 = \ left \ { 1.366 \ sin \ left [ \ frac { 2 \ pi ( \ alpha - 15 ^ \ circ ) } { 180 } \ right ] ^ { 0.5}
- 0.170 - 3.28(0.0625 - \ beta ^ 4 ) \ sqrt { \ frac { \ alpha - 20 ^ \ circ } { 40 ^ \ circ } } \ right \ }
(1 - \ beta ^ 2 ) ^ 2 + \ frac { f ( 1 - \ beta ^ 4 ) } { 8 \ sin ( \ alpha / 2 ) }
For 20 to 60 degrees , beta > = 0.5:
. . math : :
K _ 1 = \ left \ { 1.366 \ sin \ left [ \ frac { 2 \ pi ( \ alpha - 15 ^ \ circ ) } { 180 } \ right ] ^ { 0.5}
- 0.170 \ right \ } ( 1 - \ beta ^ 2 ) ^ 2 + \ frac { f ( 1 - \ beta ^ 4 ) } { 8 \ sin ( \ alpha / 2 ) }
For 60 to 180 degrees , beta < 0.5:
. . math : :
K _ 1 = \ left [ 1.205 - 3.28(0.0625 - \ beta ^ 4 ) - 12.8 \ beta ^ 6 \ sqrt { \ frac
{ \ alpha - 60 ^ \ circ } { 120 ^ \ circ } } \ right ] ( 1 - \ beta ^ 2 ) ^ 2
For 60 to 180 degrees , beta > = 0.5:
. . math : :
K _ 1 = \ left [ 1.205 - 0.20 \ sqrt { \ frac { \ alpha - 60 ^ \ circ } { 120 ^ \ circ } }
\ right ] ( 1 - \ beta ^ 2 ) ^ 2
The Swamee [ 5 ] _ formula is :
. . math : :
K = \ left \ { \ frac { 0.25 } { \ theta ^ 3 } \ left [ 1 + \ frac { 0.6 } { r ^ { 1.67 } }
\ left ( \ frac { \ pi - \ theta } { \ theta } \ right ) \ right ] ^ { 0.533r - 2.6}
\ right \ } ^ { - 0.5}
. . figure : : fittings / diffuser _ conical . png
: scale : 60 %
: alt : diffuser conical ; after [ 1 ] _
Parameters
Di1 : float
Inside diameter of original pipe ( smaller ) , [ m ]
Di2 : float
Inside diameter of following pipe ( larger ) , [ m ]
l : float , optional
Length of the contraction along the pipe axis , optional , [ m ]
angle : float , optional
Angle of contraction , [ degrees ]
fd : float , optional
Darcy friction factor [ - ]
Re : float , optional
Reynolds number of the pipe ( used in Rennels method only if no friction
factor given ) , [ m ]
roughness : float , optional
Roughness of bend wall ( used in Rennel method if no friction factor
given ) , [ m ]
method : str
The method to use for the calculation ; one of ' Rennels ' , ' Crane ' ,
' Miller ' , ' Swamee ' , or ' Idelchik ' [ - ]
Returns
K : float
Loss coefficient with respect to smaller , upstream diameter [ - ]
Notes
The Miller method changes around quite a bit .
There is quite a bit of variance in the predictions of the methods , as
demonstrated by the following figure .
. . plot : : plots / diffuser _ conical . py
Examples
> > > diffuser _ conical ( Di1 = 1/3 . , Di2 = 1.0 , angle = 50.0 , Re = 1E6)
0.8027721093415322
References
. . [ 1 ] Rennels , Donald C . , and Hobart M . Hudson . Pipe Flow : A Practical
and Comprehensive Guide . 1st edition . Hoboken , N . J : Wiley , 2012.
. . [ 2 ] Idel ’ chik , I . E . Handbook of Hydraulic Resistance : Coefficients of
Local Resistance and of Friction ( Spravochnik Po Gidravlicheskim
Soprotivleniyam , Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya ) . National technical information Service , 1966.
. . [ 3 ] Crane Co . Flow of Fluids Through Valves , Fittings , and Pipe . Crane ,
2009.
. . [ 4 ] Swamee , Prabhata K . , and Ashok K . Sharma . Design of Water Supply
Pipe Networks . John Wiley & Sons , 2008.
. . [ 5 ] Miller , Donald S . Internal Flow Systems : Design and Performance
Prediction . Gulf Publishing Company , 1990.''' | beta = Di1 / Di2
beta2 = beta * beta
if angle is not None :
angle_rad = radians ( angle )
l = ( Di2 - Di1 ) / ( 2.0 * tan ( 0.5 * angle_rad ) )
elif l is not None :
angle_rad = 2.0 * atan ( 0.5 * ( Di2 - Di1 ) / l )
angle = degrees ( angle_rad )
else :
raise Exception ( 'Either `l` or `angle` must be specified' )
if method is None :
method == 'Rennels'
if method == 'Rennels' :
if fd is None :
if Re is None :
raise ValueError ( "The `Rennels` method requires either a " "specified friction factor or `Re`" )
fd = Colebrook ( Re = Re , eD = roughness / Di2 , tol = - 1 )
if 0.0 < angle <= 20.0 :
K = 8.30 * tan ( 0.5 * angle_rad ) ** 1.75 * ( 1.0 - beta2 ) ** 2 + 0.125 * fd * ( 1.0 - beta2 * beta2 ) / sin ( 0.5 * angle_rad )
elif 20.0 < angle <= 60.0 and 0.0 <= beta < 0.5 :
K = ( 1.366 * sin ( 2.0 * pi * ( angle - 15.0 ) / 180. ) ** 0.5 - 0.170 - 3.28 * ( 0.0625 - beta ** 4 ) * ( 0.025 * ( angle - 20.0 ) ) ** 0.5 ) * ( 1.0 - beta2 ) ** 2 + 0.125 * fd * ( 1.0 - beta2 * beta2 ) / sin ( 0.5 * angle_rad )
elif 20.0 < angle <= 60.0 and beta >= 0.5 :
K = ( 1.366 * sin ( 2.0 * pi * ( angle - 15.0 ) / 180.0 ) ** 0.5 - 0.170 ) * ( 1.0 - beta2 ) ** 2 + 0.125 * fd * ( 1.0 - beta2 * beta2 ) / sin ( 0.5 * angle_rad )
elif 60.0 < angle <= 180.0 and 0.0 <= beta < 0.5 :
beta4 = beta2 * beta2
K = ( 1.205 - 3.28 * ( 0.0625 - beta4 ) - 12.8 * beta4 * beta2 * ( ( angle - 60.0 ) / 120. ) ** 0.5 ) * ( 1.0 - beta2 ) ** 2
elif 60.0 < angle <= 180.0 and beta >= 0.5 :
K = ( 1.205 - 0.20 * ( ( angle - 60.0 ) / 120. ) ** 0.5 ) * ( 1.0 - beta ** 2 ) ** 2
else :
raise Exception ( 'Conical diffuser inputs incorrect' )
return K
elif method == 'Crane' :
return diffuser_conical_Crane ( Di1 = Di1 , Di2 = Di2 , l = l , angle = angle )
elif method == 'Miller' :
A_ratio = 1.0 / beta2
if A_ratio > 4.0 :
A_ratio = 4.0
elif A_ratio < 1.1 :
A_ratio = 1.1
l_R1_ratio = l / ( 0.5 * Di1 )
if l_R1_ratio < 0.1 :
l_R1_ratio = 0.1
elif l_R1_ratio > 20.0 :
l_R1_ratio = 20.0
Kd = max ( float ( bisplev ( log ( l_R1_ratio ) , log ( A_ratio ) , tck_diffuser_conical_Miller ) ) , 0 )
return Kd
elif method == 'Idelchik' :
A_ratio = beta2
# Angles 0 to 20 , ratios 0.05 to 0.06
if angle > 20.0 :
angle_fric = 20.0
elif angle < 2.0 :
angle_fric = 2.0
else :
angle_fric = angle
A_ratio_fric = A_ratio
if A_ratio_fric < 0.05 :
A_ratio_fric = 0.05
elif A_ratio_fric > 0.6 :
A_ratio_fric = 0.6
K_fr = float ( contraction_conical_frction_Idelchik_obj ( angle_fric , A_ratio_fric ) )
K_exp = float ( diffuser_conical_Idelchik_obj ( min ( 0.6 , A_ratio ) , max ( 3.0 , angle ) ) )
return K_fr + K_exp
elif method == 'Swamee' : # Really starting to thing Swamee uses a different definition of loss coefficient !
r = Di2 / Di1
K = ( 0.25 * angle_rad ** - 3 * ( 1.0 + 0.6 * r ** ( - 1.67 ) * ( pi - angle_rad ) / angle_rad ) ** ( 0.533 * r - 2.6 ) ) ** - 0.5
return K
else :
raise ValueError ( 'Specified method not recognized; methods are %s' % ( diffuser_conical_methods ) ) |
def get_xy_dataset_statistics_pandas ( dataframe , x_series , y_series , fcorrect_x_cutoff = 1.0 , fcorrect_y_cutoff = 1.0 , x_fuzzy_range = 0.1 , y_scalar = 1.0 , ignore_null_values = False , bootstrap_data = False , expect_negative_correlation = False , STDev_cutoff = 1.0 , run_standardized_analysis = True , check_multiple_analysis_for_consistency = True ) :
'''A version of _ get _ xy _ dataset _ statistics which accepts a pandas dataframe rather than X - and Y - value lists .
: param dataframe : A pandas dataframe
: param x _ series : The column name of the X - axis series
: param y _ series : The column name of the Y - axis series
: param fcorrect _ x _ cutoff : The X - axis cutoff value for the fraction correct metric .
: param fcorrect _ y _ cutoff : The Y - axis cutoff value for the fraction correct metric .
: param x _ fuzzy _ range : The X - axis fuzzy range value for the fuzzy fraction correct metric .
: param y _ scalar : The Y - axis scalar multiplier for the fuzzy fraction correct metric ( used to calculate y _ cutoff and y _ fuzzy _ range in that metric )
: return : A table of statistics .''' | x_values = dataframe [ x_series ] . tolist ( )
y_values = dataframe [ y_series ] . tolist ( )
return _get_xy_dataset_statistics ( x_values , y_values , fcorrect_x_cutoff = fcorrect_x_cutoff , fcorrect_y_cutoff = fcorrect_y_cutoff , x_fuzzy_range = x_fuzzy_range , y_scalar = y_scalar , ignore_null_values = ignore_null_values , bootstrap_data = bootstrap_data , expect_negative_correlation = expect_negative_correlation , STDev_cutoff = STDev_cutoff , run_standardized_analysis = run_standardized_analysis , check_multiple_analysis_for_consistency = check_multiple_analysis_for_consistency ) |
def all_synonyms ( self , include_label = False ) :
"""Retrieves all synonyms
Arguments
include _ label : bool
If True , include label / names as Synonym objects
Returns
list [ Synonym ]
: class : ` Synonym ` objects""" | syns = [ ]
for n in self . nodes ( ) :
syns = syns + self . synonyms ( n , include_label = include_label )
return syns |
def update ( self , response , ** kwargs ) :
'''If a record matching the instance already exists in the database , update
both the column and venue column attributes , else create a new record .''' | response_cls = super ( LocationResponseClassLegacyAccessor , self ) . _get_instance ( ** kwargs )
if response_cls :
setattr ( response_cls , self . column , self . accessor ( response ) )
setattr ( response_cls , self . venue_column , self . venue_accessor ( response ) )
_action_and_commit ( response_cls , session . add ) |
def positioning_headlines ( headlines ) :
"""Strips unnecessary whitespaces / tabs if first header is not left - aligned""" | left_just = False
for row in headlines :
if row [ - 1 ] == 1 :
left_just = True
break
if not left_just :
for row in headlines :
row [ - 1 ] -= 1
return headlines |
def get_null_snr ( self ) :
"""Get the coherent Null SNR for this row .""" | null_snr_sq = ( numpy . asarray ( self . get_sngl_snrs ( ) . values ( ) ) ** 2 ) . sum ( ) - self . snr ** 2
if null_snr_sq < 0 :
return 0
else :
return null_snr_sq ** ( 1. / 2. ) |
def rank ( self ) :
"""Returns an ` ` int ` ` of the team ' s rank at the time the game was played .""" | rank = re . findall ( r'\d+' , self . _rank )
if len ( rank ) == 0 :
return None
return rank [ 0 ] |
def from_numpy ( ndarr , arr , alpha_merge = 0.05 , max_depth = 2 , min_parent_node_size = 30 , min_child_node_size = 30 , split_titles = None , split_threshold = 0 , weights = None , variable_types = None , dep_variable_type = 'categorical' ) :
"""Create a CHAID object from numpy
Parameters
ndarr : numpy . ndarray
non - aggregated 2 - dimensional array containing
independent variables on the veritcal axis and ( usually )
respondent level data on the horizontal axis
arr : numpy . ndarray
1 - dimensional array of the dependent variable associated with
ndarr
alpha _ merge : float
the threshold value in which to create a split ( default 0.05)
max _ depth : float
the threshold value for the maximum number of levels after the root
node in the tree ( default 2)
min _ parent _ node _ size : float
the threshold value of the number of respondents that the node must
contain ( default 30)
split _ titles : array - like
array of names for the independent variables in the data
variable _ types : array - like or dict
array of variable types , or dict of column names to variable types .
Supported variable types are the strings ' nominal ' or ' ordinal ' in
lower case""" | vectorised_array = [ ]
variable_types = variable_types or [ 'nominal' ] * ndarr . shape [ 1 ]
for ind , col_type in enumerate ( variable_types ) :
title = None
if split_titles is not None :
title = split_titles [ ind ]
if col_type == 'ordinal' :
col = OrdinalColumn ( ndarr [ : , ind ] , name = title )
elif col_type == 'nominal' :
col = NominalColumn ( ndarr [ : , ind ] , name = title )
else :
raise NotImplementedError ( 'Unknown independent variable type ' + col_type )
vectorised_array . append ( col )
if dep_variable_type == 'categorical' :
observed = NominalColumn ( arr , weights = weights )
elif dep_variable_type == 'continuous' :
observed = ContinuousColumn ( arr , weights = weights )
else :
raise NotImplementedError ( 'Unknown dependent variable type ' + dep_variable_type )
config = { 'alpha_merge' : alpha_merge , 'max_depth' : max_depth , 'min_parent_node_size' : min_parent_node_size , 'min_child_node_size' : min_child_node_size , 'split_threshold' : split_threshold }
return Tree ( vectorised_array , observed , config ) |
def set_value ( ctx , key , value ) :
"""Assigns values to config file entries . If the value is omitted ,
you will be prompted , with the input hidden if it is sensitive .
$ ddev config set github . user foo
New setting :
[ github ]
user = " foo " """ | scrubbing = False
if value is None :
scrubbing = key in SECRET_KEYS
value = click . prompt ( 'Value for `{}`' . format ( key ) , hide_input = scrubbing )
if key in ( 'core' , 'extras' , 'agent' ) and not value . startswith ( '~' ) :
value = os . path . abspath ( value )
user_config = new_config = ctx . obj
user_config . pop ( 'repo_choice' , None )
data = [ value ]
data . extend ( reversed ( key . split ( '.' ) ) )
key = data . pop ( )
value = data . pop ( )
# Use a separate mapping to show only what has changed in the end
branch_config_root = branch_config = { }
# Consider dots as keys
while data :
default_branch = { value : '' }
branch_config [ key ] = default_branch
branch_config = branch_config [ key ]
new_value = new_config . get ( key )
if not hasattr ( new_value , 'get' ) :
new_value = default_branch
new_config [ key ] = new_value
new_config = new_config [ key ]
key = value
value = data . pop ( )
value = string_to_toml_type ( value )
branch_config [ key ] = new_config [ key ] = value
save_config ( user_config )
output_config = scrub_secrets ( branch_config_root ) if scrubbing else branch_config_root
echo_success ( 'New setting:' )
echo_info ( toml . dumps ( output_config ) . rstrip ( ) ) |
def ratio_area_clay_total ( ConcClay , material , DiamTube , RatioHeightDiameter ) :
"""Return the surface area of clay normalized by total surface area .
Total surface area is a combination of clay and reactor wall
surface areas . This function is used to estimate how much coagulant
actually goes to the clay .
: param ConcClay : Concentration of clay in suspension
: type ConcClay : float
: param material : Type of clay in suspension , e . g . floc _ model . Clay
: type material : floc _ model . Material
: param DiamTube : Diameter of flocculator tube ( assumes tube flocculator for calculation of reactor surface area )
: type DiamTube : float
: param RatioHeightDiameter : Dimensionless ratio describing ratio of clay height to clay diameter
: type RatioHeightDiameter : float
: return : The ratio of clay surface area to total available surface area ( accounting for reactor walls )
: rtype : float""" | return ( 1 / ( 1 + ( 2 * material . Diameter / ( 3 * DiamTube * ratio_clay_sphere ( RatioHeightDiameter ) * ( ConcClay / material . Density ) ) ) ) ) |
def volume_usage ( self , year = None , month = None ) :
"""Retrieves Cloudant volume usage data , optionally for a given
year and month .
: param int year : Year to query against , for example 2014.
Optional parameter . Defaults to None . If used , it must be
accompanied by ` ` month ` ` .
: param int month : Month to query against that must be an integer
between 1 and 12 . Optional parameter . Defaults to None .
If used , it must be accompanied by ` ` year ` ` .
: returns : Volume usage data in JSON format""" | endpoint = '/' . join ( ( self . server_url , '_api' , 'v2' , 'usage' , 'data_volume' ) )
return self . _usage_endpoint ( endpoint , year , month ) |
def remove_oxidation_states ( self ) :
"""Removes oxidation states from a structure .""" | for site in self . sites :
new_sp = collections . defaultdict ( float )
for el , occu in site . species . items ( ) :
sym = el . symbol
new_sp [ Element ( sym ) ] += occu
site . species = new_sp |
def read ( cls , iprot ) :
'''Read a new object from the given input protocol and return the object .
: type iprot : thryft . protocol . _ input _ protocol . _ InputProtocol
: rtype : pastpy . gen . database . impl . dummy . dummy _ database _ configuration . DummyDatabaseConfiguration''' | init_kwds = { }
iprot . read_struct_begin ( )
while True :
ifield_name , ifield_type , _ifield_id = iprot . read_field_begin ( )
if ifield_type == 0 : # STOP
break
elif ifield_name == 'images_per_object' :
init_kwds [ 'images_per_object' ] = iprot . read_i32 ( )
elif ifield_name == 'objects' :
init_kwds [ 'objects' ] = iprot . read_i32 ( )
iprot . read_field_end ( )
iprot . read_struct_end ( )
return cls ( ** init_kwds ) |
def checkout_and_create_branch ( repo , name ) :
"""Checkout branch . Create it if necessary""" | local_branch = repo . branches [ name ] if name in repo . branches else None
if not local_branch :
if name in repo . remotes . origin . refs : # If origin branch exists but not local , git . checkout is the fatest way
# to create local branch with origin link automatically
msg = repo . git . checkout ( name )
_LOGGER . debug ( msg )
return
# Create local branch , will be link to origin later
local_branch = repo . create_head ( name )
local_branch . checkout ( ) |
def _maxlength ( X ) :
"""Returns the maximum length of signal trajectories X""" | N = 0
for x in X :
if len ( x ) > N :
N = len ( x )
return N |
def get_organizations ( self , since = github . GithubObject . NotSet ) :
""": calls : ` GET / organizations < http : / / developer . github . com / v3 / orgs # list - all - organizations > ` _
: param since : integer
: rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . Organization . Organization `""" | assert since is github . GithubObject . NotSet or isinstance ( since , ( int , long ) ) , since
url_parameters = dict ( )
if since is not github . GithubObject . NotSet :
url_parameters [ "since" ] = since
return github . PaginatedList . PaginatedList ( github . NamedUser . NamedUser , self . __requester , "/organizations" , url_parameters ) |
def discover ( self , ip ) :
'''Discover the network starting at the defined root node IP .
Recursively enumerate the network tree up to self . depth .
Populates self . nodes [ ] as a list of discovered nodes in the
network with self . root _ node being the root .
This function will discover the network with minimal information .
It is enough to define the structure of the network but will not
include much data on each node . Call discover _ details ( ) after this
to update the self . nodes [ ] array with more info .''' | if ( self . verbose > 0 ) :
print ( 'Discovery codes:\n' ' . depth %s connection error\n' ' %s discovering node %s numerating adjacencies\n' ' %s include node %s leaf node\n' % ( DCODE_ERR_SNMP_STR , DCODE_DISCOVERED_STR , DCODE_STEP_INTO_STR , DCODE_INCLUDE_STR , DCODE_LEAF_STR ) )
print ( 'Discovering network...' )
# Start the process of querying this node and recursing adjacencies .
node , new_node = self . __query_node ( ip , 'UNKNOWN' )
self . root_node = node
if ( node != None ) :
self . nodes . append ( node )
self . __print_step ( node . ip [ 0 ] , node . name , 0 , DCODE_ROOT | DCODE_DISCOVERED )
self . __discover_node ( node , 0 )
else :
return
# we may have missed chassis info
for n in self . nodes :
if ( ( n . serial == None ) | ( n . plat == None ) | ( n . ios == None ) ) :
n . opts . get_chassis_info = True
if ( n . serial == None ) :
n . opts . get_serial = True
if ( n . ios == None ) :
n . opts . get_ios = True
if ( n . plat == None ) :
n . opts . get_plat = True
n . query_node ( ) |
def destroy_record ( client = None , found_record = None , record = '' , zone_id = '' ) :
"""Destroy an individual DNS record .
Args :
client ( botocore . client . Route53 ) : Route 53 boto3 client .
found _ record ( dict ) : Route 53 record set : :
{ ' Name ' : ' unicorn . forrest . dev . example . com . ' ,
' ResourceRecords ' :
[ { ' Value ' :
' internal - unicornforrest - 1777489395 . us - east - 1 . elb . amazonaws . com '
' TTL ' : 60,
' Type ' : ' CNAME ' }
record ( str ) : Application DNS record name . e . g .
zone _ id ( str ) : Route 53 Hosted Zone ID , e . g . / hostedzone / ZSVGJWJ979WQD .
Returns :
bool : True upon successful completion .""" | LOG . debug ( 'Found DNS record: %s' , found_record )
if found_record [ 'Name' ] . strip ( '.' ) == record :
dns_json = get_template ( template_file = 'destroy/destroy_dns.json.j2' , record = json . dumps ( found_record ) )
dns_dict = json . loads ( dns_json )
client . change_resource_record_sets ( HostedZoneId = zone_id , ChangeBatch = dns_dict )
LOG . info ( 'Destroyed "%s" in %s' , found_record [ 'Name' ] , zone_id )
else :
LOG . info ( 'DNS record "%s" missing from %s.' , record , zone_id )
LOG . debug ( 'Found someone else\'s record: %s' , found_record [ 'Name' ] )
return True |
def read_chunk_header ( self ) :
'''Read a single chunk ' s header .
Returns :
tuple : 2 - item tuple with the size of the content in the chunk and
the raw header byte string .
Coroutine .''' | # _ logger . debug ( ' Reading chunk . ' )
try :
chunk_size_hex = yield from self . _connection . readline ( )
except ValueError as error :
raise ProtocolError ( 'Invalid chunk size: {0}' . format ( error ) ) from error
if not chunk_size_hex . endswith ( b'\n' ) :
raise NetworkError ( 'Connection closed.' )
try :
chunk_size = int ( chunk_size_hex . split ( b';' , 1 ) [ 0 ] . strip ( ) , 16 )
except ValueError as error :
raise ProtocolError ( 'Invalid chunk size: {0}' . format ( error ) ) from error
if chunk_size < 0 :
raise ProtocolError ( 'Chunk size cannot be negative.' )
self . _chunk_size = self . _bytes_left = chunk_size
return chunk_size , chunk_size_hex |
def encode_request ( request_line , ** headers ) :
'''Creates the data for a SSDP request .
Args :
request _ line ( string ) : The request line for the request ( e . g .
` ` " M - SEARCH * HTTP / 1.1 " ` ` ) .
headers ( dict of string - > string ) : Dictionary of header name - header
value pairs to present in the request .
Returns :
bytes : The encoded request .''' | lines = [ request_line ]
lines . extend ( [ '%s: %s' % kv for kv in headers . items ( ) ] )
return ( '\r\n' . join ( lines ) + '\r\n\r\n' ) . encode ( 'utf-8' ) |
def emails ( self , qs ) :
"""Single entry with email only in the format of :
my @ address . com ,""" | self . stdout . write ( ",\n" . join ( ent [ 'email' ] for ent in qs ) )
self . stdout . write ( "\n" ) |
def to_csvline ( self , with_header = False ) :
"""Return a string with data in CSV format""" | string = ""
if with_header :
string += "# " + " " . join ( at for at in AbinitTimerSection . FIELDS ) + "\n"
string += ", " . join ( str ( v ) for v in self . to_tuple ( ) ) + "\n"
return string |
def humanise_exception ( exception ) :
"""Humanise a python exception by giving the class name and traceback .
The function will return a tuple with the exception name and the traceback .
: param exception : Exception object .
: type exception : Exception
: return : A tuple with the exception name and the traceback .
: rtype : ( str , str )""" | trace = '' . join ( traceback . format_tb ( sys . exc_info ( ) [ 2 ] ) )
name = exception . __class__ . __name__
return name , trace |
def base_recherche_rapide ( self , base , pattern , to_string_hook = None ) :
"""Return a collection of access matching ` pattern ` .
` to _ string _ hook ` is an optionnal callable dict - > str to map record to string . Default to _ record _ to _ string""" | Ac = self . ACCES
if pattern == "*" :
return groups . Collection ( Ac ( base , i ) for i in self )
if len ( pattern ) >= MIN_CHAR_SEARCH : # Needed chars .
sub_patterns = pattern . split ( " " )
try :
regexps = tuple ( re . compile ( sub_pattern , flags = re . I ) for sub_pattern in sub_patterns )
except re . error :
return groups . Collection ( )
def search ( string ) :
for regexp in regexps :
if not regexp . search ( string ) :
return False
return True
to_string_hook = to_string_hook or self . _record_to_string
return groups . Collection ( Ac ( base , i ) for i , p in self . items ( ) if search ( to_string_hook ( p ) ) )
return groups . Collection ( ) |
def verify ( self ) :
"""Verifies expectations on all doubled objects .
: raise : ` ` MockExpectationError ` ` on the first expectation that is not satisfied , if any .""" | if self . _is_verified :
return
for proxy in self . _proxies . values ( ) :
proxy . verify ( )
self . _is_verified = True |
def _pooling_general ( inputs , reducer , init_val , rescaler = None , pool_size = ( 2 , 2 ) , strides = None , padding = 'VALID' ) :
"""Helper : general pooling computation used in pooling layers later .""" | spatial_strides = strides or ( 1 , ) * len ( pool_size )
rescale = rescaler ( pool_size , spatial_strides , padding ) if rescaler else None
dims = ( 1 , ) + pool_size + ( 1 , )
# NHWC
strides = ( 1 , ) + spatial_strides + ( 1 , )
out = lax . reduce_window ( inputs , init_val , reducer , dims , strides , padding )
return rescale ( out , inputs ) if rescale else out |
def stop_subscribe ( self ) :
"""This function is used to stop the event loop created when subscribe is called . But this function doesn ' t
stop the thread and should be avoided until its completely developed .""" | asyncio . gather ( * asyncio . Task . all_tasks ( ) ) . cancel ( )
self . event_loop . stop ( )
self . event_loop . close ( ) |
def create_account ( self , account_type , currency ) :
"""Create an account
https : / / docs . kucoin . com / # create - an - account
: param account _ type : Account type - main or trade
: type account _ type : string
: param currency : Currency code
: type currency : string
. . code : : python
account = client . create _ account ( ' trade ' , ' BTC ' )
: returns : API Response
. . code - block : : python
" id " : " 5bd6e9286d99522a52e458de "
: raises : KucoinResponseException , KucoinAPIException""" | data = { 'type' : account_type , 'currency' : currency }
return self . _post ( 'accounts' , True , data = data ) |
def summarize_variables ( variables = None ) :
"""Logs a summary of variable information .
This function groups Variables by dtype and prints out the number of Variables
and the total number of scalar values for each datatype , as well as the total
memory consumed .
For Variables of type tf . string , the memory usage cannot be accurately
calculated from the Graph as the memory requirements change based on what
strings are actually stored , which can only be determined inside a session .
In this case , the amount of memory used to stored the pointers to the strings
is logged , along with a warning .
Args :
variables : iterable of variables ; if not provided , then all variables
( in the default graph ) are summarized .""" | variable_counts = count_variables_by_type ( variables = variables )
total_num_scalars = 0
total_num_bytes = 0
# Sort by string representation of type name , so output is deterministic .
for dtype in sorted ( variable_counts , key = lambda dtype : "%r" % dtype ) :
var_info_for_type = variable_counts [ dtype ]
num_bytes = var_info_for_type [ "num_scalars" ] * dtype . size
total_num_scalars += var_info_for_type [ "num_scalars" ]
total_num_bytes += num_bytes
tf . logging . info ( "%r: %d variables comprising %d scalars, %s" , dtype , var_info_for_type [ "num_variables" ] , var_info_for_type [ "num_scalars" ] , _num_bytes_to_human_readable ( num_bytes ) ) |
def _set_openflowPo ( self , v , load = False ) :
"""Setter method for openflowPo , mapped from YANG variable / interface / port _ channel / openflowPo ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ openflowPo is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ openflowPo ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = openflowPo . openflowPo , is_container = 'container' , presence = False , yang_name = "openflowPo" , rest_name = "openflow" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'enables openflow ' , u'alt-name' : u'openflow' , u'sort-priority' : u'116' , u'callpoint' : u'OpenflowPo' } } , namespace = 'urn:brocade.com:mgmt:brocade-openflow' , defining_module = 'brocade-openflow' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """openflowPo must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=openflowPo.openflowPo, is_container='container', presence=False, yang_name="openflowPo", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'enables openflow ', u'alt-name': u'openflow', u'sort-priority': u'116', u'callpoint': u'OpenflowPo'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""" , } )
self . __openflowPo = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def coerce ( self , value , resource ) :
"""Get a dict with attributes from ` ` value ` ` .
Arguments
value : ?
The value to get some resources from .
resource : dataql . resources . Object
The ` ` Object ` ` object used to obtain this value from the original one .
Returns
dict
A dictionary containing the wanted resources for the given value .
Key are the ` ` name ` ` attributes of the resources , and the values are the solved values .""" | return { r . name : self . registry . solve_resource ( value , r ) for r in resource . resources } |
def createKmerProfile ( self , k , profileCsvFN ) :
'''TODO : this method is olddddd , needs to be ported into BasicBWT AND reworked to do this better
@ param k - the length of the k - mers to profile
@ param profileCsvFN - the filename of the csv to create''' | searches = [ ( '' , 0 , self . bwt . shape [ 0 ] ) ]
normTotal = 0
lines = [ ]
while len ( searches ) > 0 :
( seq , start , end ) = searches . pop ( 0 )
if len ( seq ) == k :
lines . append ( seq + ',' + str ( end - start ) )
normTotal += ( end - start ) ** 2
else :
nls = self . getFullFMAtIndex ( start )
nhs = self . getFullFMAtIndex ( end )
for c in xrange ( self . vcLen - 1 , - 1 , - 1 ) :
if nls [ c ] == nhs [ c ] : # do nothing
pass
else :
newSeq = self . numToChar [ c ] + seq
searches . insert ( 0 , ( newSeq , int ( nls [ c ] ) , int ( nhs [ c ] ) ) )
fp = open ( profileCsvFN , 'w+' )
fp . write ( 'total,' + str ( math . sqrt ( normTotal ) ) + '\n' )
for l in sorted ( lines ) :
fp . write ( l + '\n' )
fp . close ( ) |
def pretrain ( self , train_set , validation_set = None ) :
"""Perform Unsupervised pretraining of the DBN .""" | self . do_pretrain = True
def set_params_func ( rbmmachine , rbmgraph ) :
params = rbmmachine . get_parameters ( graph = rbmgraph )
self . encoding_w_ . append ( params [ 'W' ] )
self . encoding_b_ . append ( params [ 'bh_' ] )
return SupervisedModel . pretrain_procedure ( self , self . rbms , self . rbm_graphs , set_params_func = set_params_func , train_set = train_set , validation_set = validation_set ) |
def get_instructions ( self , cm , size , insn , idx ) :
""": param cm : a ClassManager object
: type cm : : class : ` ClassManager ` object
: param size : the total size of the buffer
: type size : int
: param insn : a raw buffer where are the instructions
: type insn : string
: param idx : a start address in the buffer
: type idx : int
: rtype : a generator of : class : ` Instruction ` objects""" | self . odex = cm . get_odex_format ( )
max_idx = size * calcsize ( '=H' )
if max_idx > len ( insn ) :
max_idx = len ( insn )
# Get instructions
while idx < max_idx :
obj = None
classic_instruction = True
op_value = insn [ idx ]
# print " % x % x " % ( op _ value , idx )
# payload instructions or extented / optimized instructions
if ( op_value == 0x00 or op_value == 0xff ) and ( ( idx + 2 ) < max_idx ) :
op_value = unpack ( '=H' , insn [ idx : idx + 2 ] ) [ 0 ]
# payload instructions ?
if op_value in DALVIK_OPCODES_PAYLOAD :
try :
obj = get_instruction_payload ( op_value , insn [ idx : ] )
classic_instruction = False
except struct . error :
warning ( "error while decoding instruction ..." )
elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH :
try :
obj = get_extented_instruction ( cm , op_value , insn [ idx : ] )
classic_instruction = False
except struct . error as why :
warning ( "error while decoding instruction ..." + why . __str__ ( ) )
# optimized instructions ?
elif self . odex and ( op_value in DALVIK_OPCODES_OPTIMIZED ) :
obj = get_optimized_instruction ( cm , op_value , insn [ idx : ] )
classic_instruction = False
# classical instructions
if classic_instruction :
op_value = insn [ idx ]
obj = get_instruction ( cm , op_value , insn [ idx : ] , self . odex )
# emit instruction
yield obj
idx = idx + obj . get_length ( ) |
def system_find_analyses ( input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / system / findAnalyses API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Search # API - method % 3A - % 2Fsystem % 2FfindAnalyses""" | return DXHTTPRequest ( '/system/findAnalyses' , input_params , always_retry = always_retry , ** kwargs ) |
def loads ( s , encoding = None , cls = None , object_hook = None , ** kw ) :
"""Deserialize ` ` s ` ` ( a ` ` str ` ` or ` ` unicode ` ` instance containing a JSON
document ) to a Python object .
If ` ` s ` ` is a ` ` str ` ` instance and is encoded with an ASCII based encoding
other than utf - 8 ( e . g . latin - 1 ) then an appropriate ` ` encoding ` ` name
must be specified . Encodings that are not ASCII based ( such as UCS - 2)
are not allowed and should be decoded to ` ` unicode ` ` first .
` ` object _ hook ` ` is an optional function that will be called with the
result of any object literal decode ( a ` ` dict ` ` ) . The return value of
` ` object _ hook ` ` will be used instead of the ` ` dict ` ` . This feature
can be used to implement custom decoders ( e . g . JSON - RPC class hinting ) .
To use a custom ` ` JSONDecoder ` ` subclass , specify it with the ` ` cls ` `
kwarg .""" | if cls is None :
cls = JSONDecoder
if object_hook is not None :
kw [ 'object_hook' ] = object_hook
return cls ( encoding = encoding , ** kw ) . decode ( s ) |
def initialize ( self , conf , ctx ) :
"""Initialization steps :
1 . Get : func : ` ~ birding . search . search _ manager _ from _ config ` .
2 . Prepare to track searched terms as to avoid redundant searches .""" | self . manager = get_search_manager ( )
config = get_config ( ) [ 'TwitterSearchBolt' ]
self . term_shelf = shelf_from_config ( config ) |
def delete ( self , using = None ) :
"""If the organization user is also the owner , this should not be deleted
unless it ' s part of a cascade from the Organization .
If there is no owner then the deletion should proceed .""" | from organizations . exceptions import OwnershipRequired
try :
if self . organization . owner . organization_user . pk == self . pk :
raise OwnershipRequired ( _ ( "Cannot delete organization owner " "before organization or transferring ownership." ) )
# TODO This line presumes that OrgOwner model can ' t be modified
except self . _org_owner_model . DoesNotExist :
pass
super ( AbstractBaseOrganizationUser , self ) . delete ( using = using ) |
def gen_add ( src1 , src2 , dst ) :
"""Return an ADD instruction .""" | assert src1 . size == src2 . size
return ReilBuilder . build ( ReilMnemonic . ADD , src1 , src2 , dst ) |
def GetFormatStringAttributeNames ( self ) :
"""Retrieves the attribute names in the format string .
Returns :
set ( str ) : attribute names .""" | if self . _format_string_attribute_names is None :
self . _format_string_attribute_names = ( self . _FORMAT_STRING_ATTRIBUTE_NAME_RE . findall ( self . FORMAT_STRING ) )
return set ( self . _format_string_attribute_names ) |
def transform_folder ( args ) :
"""Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
: param args : tuple with the following arguments :
- the command name : ' zero ' , ' one ' , ' two ' , . . .
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset""" | command , ( transform , src , dest ) = args
try :
print ( progress . value , "remaining" )
# Apply transformations to all files
data = [ ]
data_dir = os . path . join ( src , command )
for filename in os . listdir ( data_dir ) :
path = os . path . join ( data_dir , filename )
data . append ( transform ( { 'path' : path } ) )
# Save results
pickleFile = os . path . join ( dest , "{}.pkl" . format ( command ) )
gc . disable ( )
with open ( pickleFile , "wb" ) as f :
pickle . dump ( data , f , pickle . HIGHEST_PROTOCOL )
gc . enable ( )
# Update progress
with progress . get_lock ( ) :
progress . value -= 1
except Exception as e :
print ( command , e , file = sys . stderr )
traceback . print_exc ( ) |
def _get_elmt_amt_in_rxt ( self , rxt ) :
"""Computes total number of atoms in a reaction formula for elements
not in external reservoir . This method is used in the calculation
of reaction energy per mol of reaction formula .
Args :
rxt ( Reaction ) : a reaction .
Returns :
Total number of atoms for non _ reservoir elements .""" | return sum ( [ rxt . get_el_amount ( e ) for e in self . pd . elements ] ) |
def _save ( self , acl , predefined , client ) :
"""Helper for : meth : ` save ` and : meth : ` save _ predefined ` .
: type acl : : class : ` google . cloud . storage . acl . ACL ` , or a compatible list .
: param acl : The ACL object to save . If left blank , this will save
current entries .
: type predefined : str
: param predefined :
( Optional ) An identifier for a predefined ACL . Must be one of the
keys in : attr : ` PREDEFINED _ JSON _ ACLS ` If passed , ` acl ` must be None .
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : Optional . The client to use . If not passed , falls back
to the ` ` client ` ` stored on the ACL ' s parent .""" | query_params = { "projection" : "full" }
if predefined is not None :
acl = [ ]
query_params [ self . _PREDEFINED_QUERY_PARAM ] = predefined
if self . user_project is not None :
query_params [ "userProject" ] = self . user_project
path = self . save_path
client = self . _require_client ( client )
result = client . _connection . api_request ( method = "PATCH" , path = path , data = { self . _URL_PATH_ELEM : list ( acl ) } , query_params = query_params , )
self . entities . clear ( )
for entry in result . get ( self . _URL_PATH_ELEM , ( ) ) :
self . add_entity ( self . entity_from_dict ( entry ) )
self . loaded = True |
def put ( files , remote_path = None , recursive = False , preserve_times = False , saltenv = 'base' , ** kwargs ) :
'''Transfer files and directories to remote host .
files
A single path or a list of paths to be transferred .
remote _ path
The path on the remote device where to store the files .
recursive : ` ` True ` `
Transfer files and directories recursively .
preserve _ times : ` ` False ` `
Preserve ` ` mtime ` ` and ` ` atime ` ` of transferred files and directories .
hostname
The hostname of the remote device .
port : ` ` 22 ` `
The port of the remote device .
username
The username required for SSH authentication on the device .
password
Used for password authentication . It is also used for private key
decryption if ` ` passphrase ` ` is not given .
passphrase
Used for decrypting private keys .
pkey
An optional private key to use for authentication .
key _ filename
The filename , or list of filenames , of optional private key ( s ) and / or
certificates to try for authentication .
timeout
An optional timeout ( in seconds ) for the TCP connect .
socket _ timeout : ` ` 10 ` `
The channel socket timeout in seconds .
buff _ size : ` ` 16384 ` `
The size of the SCP send buffer .
allow _ agent : ` ` True ` `
Set to ` ` False ` ` to disable connecting to the SSH agent .
look _ for _ keys : ` ` True ` `
Set to ` ` False ` ` to disable searching for discoverable private key
files in ` ` ~ / . ssh / ` `
banner _ timeout
An optional timeout ( in seconds ) to wait for the SSH banner to be
presented .
auth _ timeout
An optional timeout ( in seconds ) to wait for an authentication
response .
auto _ add _ policy : ` ` False ` `
Automatically add the host to the ` ` known _ hosts ` ` .
CLI Example :
. . code - block : : bash
salt ' * ' scp . put / path / to / file / var / tmp / file hostname = server1 auto _ add _ policy = True''' | scp_client = _prepare_connection ( ** kwargs )
put_kwargs = { 'recursive' : recursive , 'preserve_times' : preserve_times }
if remote_path :
put_kwargs [ 'remote_path' ] = remote_path
cached_files = [ ]
if not isinstance ( files , ( list , tuple ) ) :
files = [ files ]
for file_ in files :
cached_file = __salt__ [ 'cp.cache_file' ] ( file_ , saltenv = saltenv )
cached_files . append ( cached_file )
return scp_client . put ( cached_files , ** put_kwargs ) |
def update_health_check ( HealthCheckId = None , HealthCheckVersion = None , IPAddress = None , Port = None , ResourcePath = None , FullyQualifiedDomainName = None , SearchString = None , FailureThreshold = None , Inverted = None , HealthThreshold = None , ChildHealthChecks = None , EnableSNI = None , Regions = None , AlarmIdentifier = None , InsufficientDataHealthStatus = None ) :
"""Updates an existing health check . Note that some values can ' t be updated .
For more information about updating health checks , see Creating , Updating , and Deleting Health Checks in the Amazon Route 53 Developer Guide .
See also : AWS API Documentation
: example : response = client . update _ health _ check (
HealthCheckId = ' string ' ,
HealthCheckVersion = 123,
IPAddress = ' string ' ,
Port = 123,
ResourcePath = ' string ' ,
FullyQualifiedDomainName = ' string ' ,
SearchString = ' string ' ,
FailureThreshold = 123,
Inverted = True | False ,
HealthThreshold = 123,
ChildHealthChecks = [
' string ' ,
EnableSNI = True | False ,
Regions = [
' us - east - 1 ' | ' us - west - 1 ' | ' us - west - 2 ' | ' eu - west - 1 ' | ' ap - southeast - 1 ' | ' ap - southeast - 2 ' | ' ap - northeast - 1 ' | ' sa - east - 1 ' ,
AlarmIdentifier = {
' Region ' : ' us - east - 1 ' | ' us - east - 2 ' | ' us - west - 1 ' | ' us - west - 2 ' | ' ca - central - 1 ' | ' eu - central - 1 ' | ' eu - west - 1 ' | ' eu - west - 2 ' | ' ap - south - 1 ' | ' ap - southeast - 1 ' | ' ap - southeast - 2 ' | ' ap - northeast - 1 ' | ' ap - northeast - 2 ' | ' sa - east - 1 ' ,
' Name ' : ' string '
InsufficientDataHealthStatus = ' Healthy ' | ' Unhealthy ' | ' LastKnownStatus '
: type HealthCheckId : string
: param HealthCheckId : [ REQUIRED ]
The ID for the health check for which you want detailed information . When you created the health check , CreateHealthCheck returned the ID in the response , in the HealthCheckId element .
: type HealthCheckVersion : integer
: param HealthCheckVersion : A sequential counter that Amazon Route 53 sets to 1 when you create a health check and increments by 1 each time you update settings for the health check .
We recommend that you use GetHealthCheck or ListHealthChecks to get the current value of HealthCheckVersion for the health check that you want to update , and that you include that value in your UpdateHealthCheck request . This prevents Amazon Route 53 from overwriting an intervening update :
If the value in the UpdateHealthCheck request matches the value of HealthCheckVersion in the health check , Amazon Route 53 updates the health check with the new settings .
If the value of HealthCheckVersion in the health check is greater , the health check was changed after you got the version number . Amazon Route 53 does not update the health check , and it returns a HealthCheckVersionMismatch error .
: type IPAddress : string
: param IPAddress : The IPv4 or IPv6 IP address for the endpoint that you want Amazon Route 53 to perform health checks on . If you don ' t specify a value for IPAddress , Amazon Route 53 sends a DNS request to resolve the domain name that you specify in FullyQualifiedDomainName at the interval that you specify in RequestInterval . Using an IP address that is returned by DNS , Amazon Route 53 then checks the health of the endpoint .
Use one of the following formats for the value of IPAddress :
IPv4 address : four values between 0 and 255 , separated by periods ( . ) , for example , 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values , separated by colons ( : ) , for example , 2001:0db8:85a3:0000:0000 : abcd : 0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952 , for example , 2001 : db8:85a3 : : abcd : 1:2345 .
If the endpoint is an EC2 instance , we recommend that you create an Elastic IP address , associate it with your EC2 instance , and specify the Elastic IP address for IPAddress . This ensures that the IP address of your instance never changes . For more information , see the applicable documentation :
Linux : Elastic IP Addresses ( EIP ) in the Amazon EC2 User Guide for Linux Instances
Windows : Elastic IP Addresses ( EIP ) in the Amazon EC2 User Guide for Windows Instances
Note
If a health check already has a value for IPAddress , you can change the value . However , you can ' t update an existing health check to add or remove the value of IPAddress .
For more information , see UpdateHealthCheckRequest $ FullyQualifiedDomainName .
Constraints : Amazon Route 53 can ' t check the health of endpoints for which the IP address is in local , private , non - routable , or multicast ranges . For more information about IP addresses for which you can ' t create health checks , see the following documents :
RFC 5735 , Special Use IPv4 Addresses
RFC 6598 , IANA - Reserved IPv4 Prefix for Shared Address Space
RFC 5156 , Special - Use IPv6 Addresses
: type Port : integer
: param Port : The port on the endpoint on which you want Amazon Route 53 to perform health checks .
: type ResourcePath : string
: param ResourcePath : The path that you want Amazon Route 53 to request when performing health checks . The path can be any value for which your endpoint will return an HTTP status code of 2xx or 3xx when the endpoint is healthy , for example the file / docs / route53 - health - check . html .
Specify this value only if you want to change it .
: type FullyQualifiedDomainName : string
: param FullyQualifiedDomainName : Amazon Route 53 behavior depends on whether you specify a value for IPAddress .
Note
If a health check already has a value for IPAddress , you can change the value . However , you can ' t update an existing health check to add or remove the value of IPAddress .
If you specify a value for IPAddress :
Amazon Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks . This is typically the fully qualified DNS name of the endpoint on which you want Amazon Route 53 to perform health checks .
When Amazon Route 53 checks the health of an endpoint , here is how it constructs the Host header :
If you specify a value of 80 for Port and HTTP or HTTP _ STR _ MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header .
If you specify a value of 443 for Port and HTTPS or HTTPS _ STR _ MATCH for Type , Amazon Route 53 passes the value of FullyQualifiedDomainName to the endpoint in the Host header .
If you specify another value for Port and any value except TCP for Type , Amazon Route 53 passes * FullyQualifiedDomainName : Port * to the endpoint in the Host header .
If you don ' t specify a value for FullyQualifiedDomainName , Amazon Route 53 substitutes the value of IPAddress in the Host header in each of the above cases .
If you don ' t specify a value for IPAddress :
If you don ' t specify a value for IPAddress , Amazon Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval . Using an IPv4 address that is returned by DNS , Amazon Route 53 then checks the health of the endpoint .
Note
If you don ' t specify a value for IPAddress , Amazon Route 53 uses only IPv4 to send health checks to the endpoint . If there ' s no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName , the health check fails with a ' DNS resolution failed ' error .
If you want to check the health of weighted , latency , or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName , we recommend that you create a separate health check for each endpoint . For example , create a health check for each HTTP server that is serving content for www . example . com . For the value of FullyQualifiedDomainName , specify the domain name of the server ( such as us - east - 2 - www . example . com ) , not the name of the resource record sets ( www . example . com ) .
Warning
In this configuration , if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets , health check results will be unpredictable .
In addition , if the value of Type is HTTP , HTTPS , HTTP _ STR _ MATCH , or HTTPS _ STR _ MATCH , Amazon Route 53 passes the value of FullyQualifiedDomainName in the Host header , as it does when you specify a value for IPAddress . If the value of Type is TCP , Amazon Route 53 doesn ' t pass a Host header .
: type SearchString : string
: param SearchString : If the value of Type is HTTP _ STR _ MATCH or HTTP _ STR _ MATCH , the string that you want Amazon Route 53 to search for in the response body from the specified resource . If the string appears in the response body , Amazon Route 53 considers the resource healthy . ( You can ' t change the value of Type when you update a health check . )
: type FailureThreshold : integer
: param FailureThreshold : The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa . For more information , see How Amazon Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide .
If you don ' t specify a value for FailureThreshold , the default value is three health checks .
: type Inverted : boolean
: param Inverted : Specify whether you want Amazon Route 53 to invert the status of a health check , for example , to consider a health check unhealthy when it otherwise would be considered healthy .
: type HealthThreshold : integer
: param HealthThreshold : The number of child health checks that are associated with a CALCULATED health that Amazon Route 53 must consider healthy for the CALCULATED health check to be considered healthy . To specify the child health checks that you want to associate with a CALCULATED health check , use the ChildHealthChecks and ChildHealthCheck elements .
Note the following :
If you specify a number greater than the number of child health checks , Amazon Route 53 always considers this health check to be unhealthy .
If you specify 0 , Amazon Route 53 always considers this health check to be healthy .
: type ChildHealthChecks : list
: param ChildHealthChecks : A complex type that contains one ChildHealthCheck element for each health check that you want to associate with a CALCULATED health check .
( string ) - -
: type EnableSNI : boolean
: param EnableSNI : Specify whether you want Amazon Route 53 to send the value of FullyQualifiedDomainName to the endpoint in the client _ hello message during TLS negotiation . This allows the endpoint to respond to HTTPS health check requests with the applicable SSL / TLS certificate .
Some endpoints require that HTTPS requests include the host name in the client _ hello message . If you don ' t enable SNI , the status of the health check will be SSL alert handshake _ failure . A health check can also have that status for other reasons . If SNI is enabled and you ' re still getting the error , check the SSL / TLS configuration on your endpoint and confirm that your certificate is valid .
The SSL / TLS certificate on your endpoint includes a domain name in the Common Name field and possibly several more in the Subject Alternative Names field . One of the domain names in the certificate should match the value that you specify for FullyQualifiedDomainName . If the endpoint responds to the client _ hello message with a certificate that does not include the domain name that you specified in FullyQualifiedDomainName , a health checker will retry the handshake . In the second attempt , the health checker will omit FullyQualifiedDomainName from the client _ hello message .
: type Regions : list
: param Regions : A complex type that contains one Region element for each region that you want Amazon Route 53 health checkers to check the specified endpoint from .
( string ) - -
: type AlarmIdentifier : dict
: param AlarmIdentifier : A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy .
Region ( string ) - - [ REQUIRED ] A complex type that identifies the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy .
For the current list of CloudWatch regions , see Amazon CloudWatch in the AWS Regions and Endpoints chapter of the Amazon Web Services General Reference .
Name ( string ) - - [ REQUIRED ] The name of the CloudWatch alarm that you want Amazon Route 53 health checkers to use to determine whether this health check is healthy .
: type InsufficientDataHealthStatus : string
: param InsufficientDataHealthStatus : When CloudWatch has insufficient data about the metric to determine the alarm state , the status that you want Amazon Route 53 to assign to the health check :
Healthy : Amazon Route 53 considers the health check to be healthy .
Unhealthy : Amazon Route 53 considers the health check to be unhealthy .
LastKnownStatus : Amazon Route 53 uses the status of the health check from the last time CloudWatch had sufficient data to determine the alarm state . For new health checks that have no last known status , the default status for the health check is healthy .
: rtype : dict
: return : {
' HealthCheck ' : {
' Id ' : ' string ' ,
' CallerReference ' : ' string ' ,
' HealthCheckConfig ' : {
' IPAddress ' : ' string ' ,
' Port ' : 123,
' Type ' : ' HTTP ' | ' HTTPS ' | ' HTTP _ STR _ MATCH ' | ' HTTPS _ STR _ MATCH ' | ' TCP ' | ' CALCULATED ' | ' CLOUDWATCH _ METRIC ' ,
' ResourcePath ' : ' string ' ,
' FullyQualifiedDomainName ' : ' string ' ,
' SearchString ' : ' string ' ,
' RequestInterval ' : 123,
' FailureThreshold ' : 123,
' MeasureLatency ' : True | False ,
' Inverted ' : True | False ,
' HealthThreshold ' : 123,
' ChildHealthChecks ' : [
' string ' ,
' EnableSNI ' : True | False ,
' Regions ' : [
' us - east - 1 ' | ' us - west - 1 ' | ' us - west - 2 ' | ' eu - west - 1 ' | ' ap - southeast - 1 ' | ' ap - southeast - 2 ' | ' ap - northeast - 1 ' | ' sa - east - 1 ' ,
' AlarmIdentifier ' : {
' Region ' : ' us - east - 1 ' | ' us - east - 2 ' | ' us - west - 1 ' | ' us - west - 2 ' | ' ca - central - 1 ' | ' eu - central - 1 ' | ' eu - west - 1 ' | ' eu - west - 2 ' | ' ap - south - 1 ' | ' ap - southeast - 1 ' | ' ap - southeast - 2 ' | ' ap - northeast - 1 ' | ' ap - northeast - 2 ' | ' sa - east - 1 ' ,
' Name ' : ' string '
' InsufficientDataHealthStatus ' : ' Healthy ' | ' Unhealthy ' | ' LastKnownStatus '
' HealthCheckVersion ' : 123,
' CloudWatchAlarmConfiguration ' : {
' EvaluationPeriods ' : 123,
' Threshold ' : 123.0,
' ComparisonOperator ' : ' GreaterThanOrEqualToThreshold ' | ' GreaterThanThreshold ' | ' LessThanThreshold ' | ' LessThanOrEqualToThreshold ' ,
' Period ' : 123,
' MetricName ' : ' string ' ,
' Namespace ' : ' string ' ,
' Statistic ' : ' Average ' | ' Sum ' | ' SampleCount ' | ' Maximum ' | ' Minimum ' ,
' Dimensions ' : [
' Name ' : ' string ' ,
' Value ' : ' string '
: returns :
IPv4 address : four values between 0 and 255 , separated by periods ( . ) , for example , 192.0.2.44 .
IPv6 address : eight groups of four hexadecimal values , separated by colons ( : ) , for example , 2001:0db8:85a3:0000:0000 : abcd : 0001:2345 . You can also shorten IPv6 addresses as described in RFC 5952 , for example , 2001 : db8:85a3 : : abcd : 1:2345 .""" | pass |
def set_rule ( self , name , properties ) :
"""Set a rules as object attribute .
Arguments :
name ( string ) : Rule name to set as attribute name .
properties ( dict ) : Dictionnary of properties .""" | self . _rule_attrs . append ( name )
setattr ( self , name , properties ) |
def pause_point ( self , msg = 'SHUTIT PAUSE POINT' , shutit_pexpect_child = None , print_input = True , level = 1 , resize = True , color = '32' , default_msg = None , interact = False , wait = - 1 ) :
"""Inserts a pause in the build session , which allows the user to try
things out before continuing . Ignored if we are not in an interactive
mode , or the interactive level is less than the passed - in one .
Designed to help debug the build , or drop to on failure so the
situation can be debugged .
@ param msg : Message to display to user on pause point .
@ param shutit _ pexpect _ child : See send ( )
@ param print _ input : Whether to take input at this point ( i . e . interact ) , or
simply pause pending any input .
Default : True
@ param level : Minimum level to invoke the pause _ point at .
Default : 1
@ param resize : If True , try to resize terminal .
Default : False
@ param color : Color to print message ( typically 31 for red , 32 for green )
@ param default _ msg : Whether to print the standard blurb
@ param wait : Wait a few seconds rather than for input
@ type msg : string
@ type print _ input : boolean
@ type level : integer
@ type resize : boolean
@ type wait : decimal
@ return : True if pause point handled ok , else false""" | shutit_global . shutit_global_object . yield_to_draw ( )
if ( not shutit_global . shutit_global_object . determine_interactive ( ) or shutit_global . shutit_global_object . interactive < 1 or shutit_global . shutit_global_object . interactive < level ) :
return True
shutit_pexpect_child = shutit_pexpect_child or self . get_current_shutit_pexpect_session ( ) . pexpect_child
# Don ' t log log traces while in interactive
log_trace_when_idle_original_value = shutit_global . shutit_global_object . log_trace_when_idle
shutit_global . shutit_global_object . log_trace_when_idle = False
if shutit_pexpect_child :
if shutit_global . shutit_global_object . pane_manager is not None :
shutit_global . shutit_global_object . pane_manager . draw_screen ( draw_type = 'clearscreen' )
shutit_global . shutit_global_object . pane_manager . do_render = False
shutit_pexpect_session = self . get_shutit_pexpect_session_from_child ( shutit_pexpect_child )
# TODO : context added to pause point message
shutit_pexpect_session . pause_point ( msg = msg , print_input = print_input , resize = resize , color = color , default_msg = default_msg , wait = wait , interact = interact )
else :
self . log ( msg , level = logging . DEBUG )
self . log ( 'Nothing to interact with, so quitting to presumably the original shell' , level = logging . DEBUG )
shutit_global . shutit_global_object . handle_exit ( exit_code = 1 )
if shutit_pexpect_child :
if shutit_global . shutit_global_object . pane_manager is not None :
shutit_global . shutit_global_object . pane_manager . do_render = True
shutit_global . shutit_global_object . pane_manager . draw_screen ( draw_type = 'clearscreen' )
self . build [ 'ctrlc_stop' ] = False
# Revert value of log _ trace _ when _ idle
shutit_global . shutit_global_object . log_trace_when_idle = log_trace_when_idle_original_value
return True |
def dedent ( lines ) :
"""De - indent based on the first line ' s indentation .""" | if len ( lines ) != 0 :
first_lstrip = lines [ 0 ] . lstrip ( )
strip_len = len ( lines [ 0 ] ) - len ( first_lstrip )
for line in lines :
if len ( line [ : strip_len ] . strip ( ) ) != 0 :
raise ValueError ( 'less indentation than first line: ' + line )
else :
yield line [ strip_len : ] |
def format_table ( rows , sep = ' ' ) :
"""Format table
: param sep : separator between columns
: type sep : unicode on python2 | str on python3
Given the table : :
table = [
[ ' foo ' , ' bar ' , ' foo ' ] ,
[1 , 2 , 3 ] ,
[ ' 54a5a05d - c83b - 4bb5 - bd95 - d90d6ea4a878 ' ] ,
[ ' foo ' , 45 , ' bar ' , 2345]
` format _ table ` will return : :
foo bar foo
1 2 3
54a5a05d - c83b - 4bb5 - bd95 - d90d6ea4a878
foo 45 bar 2345""" | max_col_length = [ 0 ] * 100
# calculate max length for each col
for row in rows :
for index , ( col , length ) in enumerate ( zip ( row , max_col_length ) ) :
if len ( text_type ( col ) ) > length :
max_col_length [ index ] = len ( text_type ( col ) )
formated_rows = [ ]
for row in rows :
format_str = sep . join ( [ '{:<%s}' % l if i < ( len ( row ) - 1 ) else '{}' for i , ( c , l ) in enumerate ( zip ( row , max_col_length ) ) ] )
formated_rows . append ( format_str . format ( * row ) )
return '\n' . join ( formated_rows ) |
def one_to_many ( df , unitcol , manycol ) :
"""Assert that a many - to - one relationship is preserved between two
columns . For example , a retail store will have have distinct
departments , each with several employees . If each employee may
only work in a single department , then the relationship of the
department to the employees is one to many .
Parameters
df : DataFrame
unitcol : str
The column that encapulates the groups in ` ` manycol ` ` .
manycol : str
The column that must remain unique in the distict pairs
between ` ` manycol ` ` and ` ` unitcol ` `
Returns
df : DataFrame""" | subset = df [ [ manycol , unitcol ] ] . drop_duplicates ( )
for many in subset [ manycol ] . unique ( ) :
if subset [ subset [ manycol ] == many ] . shape [ 0 ] > 1 :
msg = "{} in {} has multiple values for {}" . format ( many , manycol , unitcol )
raise AssertionError ( msg )
return df |
def propagate_type_and_convert_call ( result , node ) :
'''Propagate the types variables and convert tmp call to real call operation''' | calls_value = { }
calls_gas = { }
call_data = [ ]
idx = 0
# use of while len ( ) as result can be modified during the iteration
while idx < len ( result ) :
ins = result [ idx ]
if isinstance ( ins , TmpCall ) :
new_ins = extract_tmp_call ( ins , node . function . contract )
if new_ins :
new_ins . set_node ( ins . node )
ins = new_ins
result [ idx ] = ins
if isinstance ( ins , Argument ) :
if ins . get_type ( ) in [ ArgumentType . GAS ] :
assert not ins . call_id in calls_gas
calls_gas [ ins . call_id ] = ins . argument
elif ins . get_type ( ) in [ ArgumentType . VALUE ] :
assert not ins . call_id in calls_value
calls_value [ ins . call_id ] = ins . argument
else :
assert ins . get_type ( ) == ArgumentType . CALL
call_data . append ( ins . argument )
if isinstance ( ins , ( HighLevelCall , NewContract , InternalDynamicCall ) ) :
if ins . call_id in calls_value :
ins . call_value = calls_value [ ins . call_id ]
if ins . call_id in calls_gas :
ins . call_gas = calls_gas [ ins . call_id ]
if isinstance ( ins , ( Call , NewContract , NewStructure ) ) :
ins . arguments = call_data
call_data = [ ]
if is_temporary ( ins ) :
del result [ idx ]
continue
new_ins = propagate_types ( ins , node )
if new_ins :
if isinstance ( new_ins , ( list , ) ) :
if len ( new_ins ) == 2 :
new_ins [ 0 ] . set_node ( ins . node )
new_ins [ 1 ] . set_node ( ins . node )
del result [ idx ]
result . insert ( idx , new_ins [ 0 ] )
result . insert ( idx + 1 , new_ins [ 1 ] )
idx = idx + 1
else :
assert len ( new_ins ) == 3
new_ins [ 0 ] . set_node ( ins . node )
new_ins [ 1 ] . set_node ( ins . node )
new_ins [ 2 ] . set_node ( ins . node )
del result [ idx ]
result . insert ( idx , new_ins [ 0 ] )
result . insert ( idx + 1 , new_ins [ 1 ] )
result . insert ( idx + 2 , new_ins [ 2 ] )
idx = idx + 2
else :
new_ins . set_node ( ins . node )
result [ idx ] = new_ins
idx = idx + 1
return result |
def set ( self ) -> None :
"""Set the internal flag to ` ` True ` ` . All waiters are awakened .
Calling ` . wait ` once the flag is set will not block .""" | if not self . _value :
self . _value = True
for fut in self . _waiters :
if not fut . done ( ) :
fut . set_result ( None ) |
def system_qos_qos_service_policy_attach_rbridge_id_remove_rb_remove_range ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
system_qos = ET . SubElement ( config , "system-qos" , xmlns = "urn:brocade.com:mgmt:brocade-policer" )
qos = ET . SubElement ( system_qos , "qos" )
service_policy = ET . SubElement ( qos , "service-policy" )
direction_key = ET . SubElement ( service_policy , "direction" )
direction_key . text = kwargs . pop ( 'direction' )
policy_map_name_key = ET . SubElement ( service_policy , "policy-map-name" )
policy_map_name_key . text = kwargs . pop ( 'policy_map_name' )
attach = ET . SubElement ( service_policy , "attach" )
rbridge_id = ET . SubElement ( attach , "rbridge-id" )
remove = ET . SubElement ( rbridge_id , "remove" )
rb_remove_range = ET . SubElement ( remove , "rb-remove-range" )
rb_remove_range . text = kwargs . pop ( 'rb_remove_range' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def filter_desc ( self , graintype = None , group = None , reference = None , size = None , phase = None ) :
'''This routine is to filter for description elements . You can
check what is available in the description by running ,
> > > i . header _ desc ( )
where i is the instance you loaded .
You can run the filter multiple times ! You can filter for the
following types :
Parameters
graintype : string or list
Give graintypes as either ' M ' for only mainstream or more
than one [ ' M ' , ' Z ' ] .
group : integer or list
Group of graintypes , important for oxides and silicates ,
since they are split into groups and not into types .
Example 1 , or give a list [ 1,3 ] .
reference : string or list
Give the reference you want to filter for , try an i . info ( )
to pick the right name ! You can select a single
referennce as string or multiple references in as a list .
size : string
Filter for grain sizes , give ' < 5.0 ' or ' > 5.0 ' as a string
for larger or smaller than a given grainsize in um . Only
data with known grainsizes are chosen . Often grain sizes
are given in a times b , where a and b are the minumum and
maximum measurements from an image . If you give a > 5.0
then grains with the smaller dimension > 5um are taken into
account . If you want < 5.0 then grains with the upper
dimension < 5um are taken into account .''' | # filter for graintype
if graintype != None :
indexing = [ ]
# index file on which lines to pick
if type ( graintype ) == str :
graintype = [ graintype ]
# filter
for typ in graintype :
for i in range ( len ( self . desc ) ) :
if self . desc [ i ] [ self . descdict [ 'Type' ] ] == typ :
indexing . append ( i )
# filter :
self . _filter_desc ( indexing )
# filter for graintype
if phase != None :
indexing = [ ]
# index file on which lines to pick
if type ( phase ) == str :
phase = [ phase ]
# filter
for typ in phase :
for i in range ( len ( self . desc ) ) :
if self . desc [ i ] [ self . descdict [ 'Phase' ] ] == typ :
indexing . append ( i )
# filter :
self . _filter_desc ( indexing )
# filter for group ( oxides and silicates )
if group != None :
indexing = [ ]
# index file on which lines to pick
if type ( group ) != list :
group = [ group ]
# filter
for grp in group :
for i in range ( len ( self . desc ) ) :
if self . desc [ i ] [ self . descdict [ 'Group' ] ] == str ( int ( grp ) ) :
indexing . append ( i )
# filter :
self . _filter_desc ( indexing )
# filter for reference
if reference != None :
indexing = [ ]
# index file on which lines to pick
if type ( reference ) != list :
reference = [ reference ]
# filter
for ri in range ( len ( reference ) ) :
for i in range ( len ( self . desc ) ) :
if self . desc [ i ] [ self . descdict [ 'Reference' ] ] == reference [ ri ] :
indexing . append ( i )
# filter :
self . _filter_desc ( indexing )
# filter for grainzise
if size != None :
indexing = [ ]
# index file on which lines to pick
# filter
operator = size [ 0 : 1 ]
size = float ( size [ 1 : len ( size ) ] )
for i in range ( len ( self . desc ) ) :
if self . desc [ i ] [ self . descdict [ 'Size (microns)' ] ] != '' :
try : # print self . desc [ i ] [ self . descdict [ ' Size ( microns ) ' ] ]
comperator1 = self . desc [ i ] [ self . descdict [ 'Size (microns)' ] ] . split ( 'x' ) [ 0 ]
comperator2 = self . desc [ i ] [ self . descdict [ 'Size (microns)' ] ] . split ( 'x' ) [ 1 ]
comperator = [ float ( comperator1 ) , float ( comperator2 ) ]
if operator == '<' :
comperator = np . min ( comperator )
else :
comperator = np . max ( comperator )
except IndexError or AttributeError :
try :
comperator = float ( self . desc [ i ] [ self . descdict [ 'Size (microns)' ] ] )
except ValueError :
continue
if operator == '>' :
if comperator > size :
indexing . append ( i )
elif operator == '<' :
if comperator < size :
indexing . append ( i )
else :
continue
# filter :
self . _filter_desc ( indexing ) |
def tag ( self , resource_id ) :
"""Update the request URI to include the Tag for specific retrieval .
Args :
resource _ id ( string ) : The tag name .""" | self . _request_uri = '{}/{}' . format ( self . _request_uri , self . tcex . safetag ( resource_id ) ) |
def uncluster_annotations ( self , input_annotations , reverse_pipe ) :
'''Update the annotations hash provided by pplacer to include all
representatives within each cluster
Parameters
input _ annotations : hash
Classifications for each representative sequence of the clusters .
each key being the sequence name , and the entry being the taxonomy
string as a list .
reverse _ pipe : bool
True / False , whether the reverse reads pipeline is being followed .
Returns
output _ annotations : hash
An updated version of the above , which includes all reads from
each cluster''' | output_annotations = { }
for placed_alignment_file_path , clusters in self . seq_library . iteritems ( ) :
if reverse_pipe and placed_alignment_file_path . endswith ( "_reverse_clustered.fa" ) :
continue
placed_alignment_file = os . path . basename ( placed_alignment_file_path )
cluster_classifications = input_annotations [ placed_alignment_file ]
if reverse_pipe :
placed_alignment_base = placed_alignment_file . replace ( '_forward_clustered.fa' , '' )
else :
placed_alignment_base = placed_alignment_file . replace ( '_clustered.fa' , '' )
output_annotations [ placed_alignment_base ] = { }
for rep_read_name , rep_read_taxonomy in cluster_classifications . iteritems ( ) :
if reverse_pipe :
orfm_regex = OrfM . regular_expression ( )
clusters = { ( orfm_regex . match ( key ) . groups ( 0 ) [ 0 ] if orfm_regex . match ( key ) else key ) : item for key , item in clusters . iteritems ( ) }
for read in clusters [ rep_read_name ] :
output_annotations [ placed_alignment_base ] [ read . name ] = rep_read_taxonomy
return output_annotations |
def find ( self , tagtype , ** kwargs ) :
'''Get the first tag with a type in this token''' | for t in self . __tags :
if t . tagtype == tagtype :
return t
if 'default' in kwargs :
return kwargs [ 'default' ]
else :
raise LookupError ( "Token {} is not tagged with the speficied tagtype ({})" . format ( self , tagtype ) ) |
def lognormal ( mu , sigma , random_state ) :
'''mu : float or array _ like of floats
sigma : float or array _ like of floats
random _ state : an object of numpy . random . RandomState''' | return np . exp ( normal ( mu , sigma , random_state ) ) |
def subset ( self , sel0 = None , sel1 = None ) :
"""Make a sub - selection of variants and haplotypes .
Parameters
sel0 : array _ like
Boolean array or array of indices selecting variants .
sel1 : array _ like
Boolean array or array of indices selecting haplotypes .
Returns
out : HaplotypeArray
See Also
HaplotypeArray . take , HaplotypeArray . compress""" | return subset_haplotype_array ( self , sel0 , sel1 , cls = type ( self ) , subset = subset ) |
def delta ( d1 , d2 , opt = 'd' ) :
"""Compute difference between given 2 dates in month / day .""" | delta = 0
if opt == 'm' :
while True :
mdays = monthrange ( d1 . year , d1 . month ) [ 1 ]
d1 += timedelta ( days = mdays )
if d1 <= d2 :
delta += 1
else :
break
else :
delta = ( d2 - d1 ) . days
return delta |
def setup_scrapy ( ) :
"""This is not needed as we pass ' LOG _ ENABLED ' : False to scrapy at init time""" | def replace_configure_logging ( install_root_handler = False , settings = None ) :
fake_use ( install_root_handler )
fake_use ( settings )
def replace_log_scrapy_info ( settings = None ) :
fake_use ( settings )
# scrapy stuff
import scrapy . utils . log
# for configure _ logging
import scrapy . crawler
# configure _ logging , log _ scrapy _ info
logging_settings = { 'LOG_ENABLED' : False , 'LOG_LEVEL' : logging . WARN , }
scrapy . utils . log . configure_logging ( install_root_handler = False , settings = logging_settings )
# are you watching closely ? ! ?
scrapy . crawler . configure_logging = replace_configure_logging
scrapy . crawler . log_scrapy_info = replace_log_scrapy_info |
def set_xlimits_for_all ( self , row_column_list = None , min = None , max = None ) :
"""Set x - axis limits of specified subplots .
: param row _ column _ list : a list containing ( row , column ) tuples to
specify the subplots , or None to indicate * all * subplots .
: type row _ column _ list : list or None
: param min : minimal axis value
: param max : maximum axis value""" | if row_column_list is None :
self . limits [ 'xmin' ] = min
self . limits [ 'xmax' ] = max
else :
for row , column in row_column_list :
self . set_xlimits ( row , column , min , max ) |
def read_messages ( self ) -> str :
"""returns a string of new device messages separated by newlines
: return :""" | if self . backend == Backends . grc :
errors = self . __dev . read_errors ( )
if "FATAL: " in errors :
self . fatal_error_occurred . emit ( errors [ errors . index ( "FATAL: " ) : ] )
return errors
elif self . backend == Backends . native :
messages = "\n" . join ( self . __dev . device_messages )
self . __dev . device_messages . clear ( )
if messages and not messages . endswith ( "\n" ) :
messages += "\n"
if "successfully started" in messages :
self . ready_for_action . emit ( )
elif "failed to start" in messages :
self . fatal_error_occurred . emit ( messages [ messages . index ( "failed to start" ) : ] )
return messages
elif self . backend == Backends . network :
return ""
else :
raise ValueError ( "Unsupported Backend" ) |
def delete ( self ) :
"""Delete a record from the database .""" | if self . _on_delete is not None :
return self . _on_delete ( self )
return self . _query . delete ( ) |
def suspend ( self ) :
"""Suspends this VirtualBox VM .""" | vm_state = yield from self . _get_vm_state ( )
if vm_state == "running" :
yield from self . _control_vm ( "pause" )
self . status = "suspended"
log . info ( "VirtualBox VM '{name}' [{id}] suspended" . format ( name = self . name , id = self . id ) )
else :
log . warn ( "VirtualBox VM '{name}' [{id}] cannot be suspended, current state: {state}" . format ( name = self . name , id = self . id , state = vm_state ) ) |
def _from_dict ( cls , _dict ) :
"""Initialize a DialogNode object from a json dictionary .""" | args = { }
if 'dialog_node' in _dict :
args [ 'dialog_node' ] = _dict . get ( 'dialog_node' )
else :
raise ValueError ( 'Required property \'dialog_node\' not present in DialogNode JSON' )
if 'description' in _dict :
args [ 'description' ] = _dict . get ( 'description' )
if 'conditions' in _dict :
args [ 'conditions' ] = _dict . get ( 'conditions' )
if 'parent' in _dict :
args [ 'parent' ] = _dict . get ( 'parent' )
if 'previous_sibling' in _dict :
args [ 'previous_sibling' ] = _dict . get ( 'previous_sibling' )
if 'output' in _dict :
args [ 'output' ] = DialogNodeOutput . _from_dict ( _dict . get ( 'output' ) )
if 'context' in _dict :
args [ 'context' ] = _dict . get ( 'context' )
if 'metadata' in _dict :
args [ 'metadata' ] = _dict . get ( 'metadata' )
if 'next_step' in _dict :
args [ 'next_step' ] = DialogNodeNextStep . _from_dict ( _dict . get ( 'next_step' ) )
if 'title' in _dict :
args [ 'title' ] = _dict . get ( 'title' )
if 'type' in _dict or 'node_type' in _dict :
args [ 'node_type' ] = _dict . get ( 'type' ) or _dict . get ( 'node_type' )
if 'event_name' in _dict :
args [ 'event_name' ] = _dict . get ( 'event_name' )
if 'variable' in _dict :
args [ 'variable' ] = _dict . get ( 'variable' )
if 'actions' in _dict :
args [ 'actions' ] = [ DialogNodeAction . _from_dict ( x ) for x in ( _dict . get ( 'actions' ) ) ]
if 'digress_in' in _dict :
args [ 'digress_in' ] = _dict . get ( 'digress_in' )
if 'digress_out' in _dict :
args [ 'digress_out' ] = _dict . get ( 'digress_out' )
if 'digress_out_slots' in _dict :
args [ 'digress_out_slots' ] = _dict . get ( 'digress_out_slots' )
if 'user_label' in _dict :
args [ 'user_label' ] = _dict . get ( 'user_label' )
if 'disabled' in _dict :
args [ 'disabled' ] = _dict . get ( 'disabled' )
if 'created' in _dict :
args [ 'created' ] = string_to_datetime ( _dict . get ( 'created' ) )
if 'updated' in _dict :
args [ 'updated' ] = string_to_datetime ( _dict . get ( 'updated' ) )
return cls ( ** args ) |
def getCanonicalID ( iname , xrd_tree ) :
"""Return the CanonicalID from this XRDS document .
@ param iname : the XRI being resolved .
@ type iname : unicode
@ param xrd _ tree : The XRDS output from the resolver .
@ type xrd _ tree : ElementTree
@ returns : The XRI CanonicalID or None .
@ returntype : unicode or None""" | xrd_list = xrd_tree . findall ( xrd_tag )
xrd_list . reverse ( )
try :
canonicalID = xri . XRI ( xrd_list [ 0 ] . findall ( canonicalID_tag ) [ 0 ] . text )
except IndexError :
return None
childID = canonicalID . lower ( )
for xrd in xrd_list [ 1 : ] : # XXX : can ' t use rsplit until we require python > = 2.4.
parent_sought = childID [ : childID . rindex ( '!' ) ]
parent = xri . XRI ( xrd . findtext ( canonicalID_tag ) )
if parent_sought != parent . lower ( ) :
raise XRDSFraud ( "%r can not come from %s" % ( childID , parent ) )
childID = parent_sought
root = xri . rootAuthority ( iname )
if not xri . providerIsAuthoritative ( root , childID ) :
raise XRDSFraud ( "%r can not come from root %r" % ( childID , root ) )
return canonicalID |
def _onShortcutSelectAndScroll ( self , down ) :
"""Ctrl + Shift + Up / Down pressed .
Select line and scroll viewport""" | cursor = self . textCursor ( )
cursor . movePosition ( QTextCursor . Down if down else QTextCursor . Up , QTextCursor . KeepAnchor )
self . setTextCursor ( cursor )
self . _onShortcutScroll ( down ) |
def addvFunc ( self , solution , EndOfPrdvP ) :
'''Creates the value function for this period and adds it to the solution .
Parameters
solution : ConsumerSolution
The solution to this single period problem , likely including the
consumption function , marginal value function , etc .
EndOfPrdvP : np . array
Array of end - of - period marginal value of assets corresponding to the
asset values in self . aNrmNow .
Returns
solution : ConsumerSolution
The single period solution passed as an input , but now with the
value function ( defined over market resources m ) as an attribute .''' | self . makeEndOfPrdvFunc ( EndOfPrdvP )
solution . vFunc = self . makevFunc ( solution )
return solution |
def _collapse_table_root ( current , dsn , pc ) :
"""Create a table with items in root given the current time series entry
: param dict current : Current time series entry
: param str dsn : Dataset name
: param str pc : paleoData or chronData
: return dict _ tmp _ table : Table data""" | logger_ts . info ( "enter collapse_table_root" )
_table_name , _variable_name = _get_current_names ( current , dsn , pc )
_tmp_table = { 'columns' : { } }
try :
for k , v in current . items ( ) : # These are the main table keys that we should be looking for
for i in [ 'filename' , 'googleWorkSheetKey' , 'tableName' , "missingValue" , "tableMD5" , "dataMD5" ] :
if i in k :
try :
_tmp_table [ i ] = v
except Exception : # Not all keys are available . It ' s okay if we hit a KeyError .
pass
except Exception as e :
print ( "Error: Unable to collapse: {}, {}" . format ( dsn , e ) )
logger_ts . error ( "collapse_table_root: Unable to collapse: {}, {}, {}" . format ( _table_name , dsn , e ) )
return _tmp_table |
def get_uncompleted_tasks ( self ) :
"""Return all of a user ' s uncompleted tasks .
. . warning : : Requires Todoist premium .
: return : A list of uncompleted tasks .
: rtype : list of : class : ` pytodoist . todoist . Task `
> > > from pytodoist import todoist
> > > user = todoist . login ( ' john . doe @ gmail . com ' , ' password ' )
> > > uncompleted _ tasks = user . get _ uncompleted _ tasks ( )
> > > for task in uncompleted _ tasks :
. . . task . complete ( )""" | tasks = ( p . get_uncompleted_tasks ( ) for p in self . get_projects ( ) )
return list ( itertools . chain . from_iterable ( tasks ) ) |
def _highlightBracket ( self , bracket , qpart , block , columnIndex ) :
"""Highlight bracket and matching bracket
Return tuple of QTextEdit . ExtraSelection ' s""" | try :
matchedBlock , matchedColumnIndex = self . _findMatchingBracket ( bracket , qpart , block , columnIndex )
except _TimeoutException : # not found , time is over
return [ ]
# highlight nothing
if matchedBlock is not None :
self . currentMatchedBrackets = ( ( block , columnIndex ) , ( matchedBlock , matchedColumnIndex ) )
return [ self . _makeMatchSelection ( block , columnIndex , True ) , self . _makeMatchSelection ( matchedBlock , matchedColumnIndex , True ) ]
else :
self . currentMatchedBrackets = None
return [ self . _makeMatchSelection ( block , columnIndex , False ) ] |
def grading_means_passed ( self ) :
'''Information if the given grading means passed .
Non - graded assignments are always passed .''' | if self . assignment . is_graded ( ) :
if self . grading and self . grading . means_passed :
return True
else :
return False
else :
return True |
def cleanup ( ) :
"""Clean up the installation directory .""" | lib_dir = os . path . join ( os . environ [ 'CONTAINER_DIR' ] , '_lib' )
if os . path . exists ( lib_dir ) :
shutil . rmtree ( lib_dir )
os . mkdir ( lib_dir ) |
def classical_strength_of_connection ( A , theta = 0.0 , norm = 'abs' ) :
"""Classical Strength Measure .
Return a strength of connection matrix using the classical AMG measure
An off - diagonal entry A [ i , j ] is a strong connection iff : :
A [ i , j ] > = theta * max ( | A [ i , k ] | ) , where k ! = i ( norm = ' abs ' )
- A [ i , j ] > = theta * max ( - A [ i , k ] ) , where k ! = i ( norm = ' min ' )
Parameters
A : csr _ matrix or bsr _ matrix
Square , sparse matrix in CSR or BSR format
theta : float
Threshold parameter in [ 0,1 ] .
norm : ' string '
' abs ' : to use the absolute value ,
' min ' : to use the negative value ( see above )
Returns
S : csr _ matrix
Matrix graph defining strong connections . S [ i , j ] = 1 if vertex i
is strongly influenced by vertex j .
See Also
symmetric _ strength _ of _ connection : symmetric measure used in SA
evolution _ strength _ of _ connection : relaxation based strength measure
Notes
- A symmetric A does not necessarily yield a symmetric strength matrix S
- Calls C + + function classical _ strength _ of _ connection
- The version as implemented is designed form M - matrices . Trottenberg et
al . use max A [ i , k ] over all negative entries , which is the same . A
positive edge weight never indicates a strong connection .
- See [ 2000BrHeMc ] _ and [ 2001bTrOoSc ] _
References
. . [ 2000BrHeMc ] Briggs , W . L . , Henson , V . E . , McCormick , S . F . , " A multigrid
tutorial " , Second edition . Society for Industrial and Applied
Mathematics ( SIAM ) , Philadelphia , PA , 2000 . xii + 193 pp .
. . [ 2001bTrOoSc ] Trottenberg , U . , Oosterlee , C . W . , Schuller , A . , " Multigrid " ,
Academic Press , Inc . , San Diego , CA , 2001 . xvi + 631 pp .
Examples
> > > import numpy as np
> > > from pyamg . gallery import stencil _ grid
> > > from pyamg . strength import classical _ strength _ of _ connection
> > > n = 3
> > > stencil = np . array ( [ [ - 1.0 , - 1.0 , - 1.0 ] ,
. . . [ - 1.0 , 8.0 , - 1.0 ] ,
. . . [ - 1.0 , - 1.0 , - 1.0 ] ] )
> > > A = stencil _ grid ( stencil , ( n , n ) , format = ' csr ' )
> > > S = classical _ strength _ of _ connection ( A , 0.0)""" | if sparse . isspmatrix_bsr ( A ) :
blocksize = A . blocksize [ 0 ]
else :
blocksize = 1
if not sparse . isspmatrix_csr ( A ) :
warn ( "Implicit conversion of A to csr" , sparse . SparseEfficiencyWarning )
A = sparse . csr_matrix ( A )
if ( theta < 0 or theta > 1 ) :
raise ValueError ( 'expected theta in [0,1]' )
Sp = np . empty_like ( A . indptr )
Sj = np . empty_like ( A . indices )
Sx = np . empty_like ( A . data )
if norm == 'abs' :
amg_core . classical_strength_of_connection_abs ( A . shape [ 0 ] , theta , A . indptr , A . indices , A . data , Sp , Sj , Sx )
elif norm == 'min' :
amg_core . classical_strength_of_connection_min ( A . shape [ 0 ] , theta , A . indptr , A . indices , A . data , Sp , Sj , Sx )
else :
raise ValueError ( 'Unknown norm' )
S = sparse . csr_matrix ( ( Sx , Sj , Sp ) , shape = A . shape )
if blocksize > 1 :
S = amalgamate ( S , blocksize )
# Strength represents " distance " , so take the magnitude
S . data = np . abs ( S . data )
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry ( S )
return S |
def send_unsent ( self ) :
"""Emails that were not being able to send will be stored in : attr : ` self . unsent ` .
Use this function to attempt to send these again""" | for i in range ( len ( self . unsent ) ) :
recipients , msg_string = self . unsent . pop ( i )
self . _attempt_send ( recipients , msg_string ) |
def _send_batch ( self , batch ) :
"""Sends a batch to the destination server via HTTP REST API""" | try :
json_batch = '[' + ',' . join ( batch ) + ']'
# Make JSON array string
logger . debug ( consts . LOG_MSG_SENDING_BATCH , len ( batch ) , len ( json_batch ) , self . _rest_url )
res = self . _session . post ( self . _rest_url , data = json_batch , headers = consts . CONTENT_TYPE_JSON )
logger . debug ( consts . LOG_MSG_BATCH_SENT_RESULT , res . status_code , res . content )
if res . status_code == 400 :
self . _notify ( logging . CRITICAL , consts . LOG_MSG_BAD_TOKEN )
raise exceptions . BadToken ( consts . LOG_MSG_BAD_TOKEN )
elif not res . ok :
raise exceptions . SendFailed ( "Got bad response code - %s: %s" % ( res . status_code , res . content if res . content else 'No info' ) )
except broken_pipe_errors as ex :
self . _is_connected . clear ( )
raise exceptions . BatchTooBig ( consts . LOG_MSG_BATCH_TOO_BIG % str ( ex ) )
except requests . exceptions . RequestException as ex :
raise exceptions . SendFailed ( str ( ex ) ) |
def lambdafan ( func ) :
"""simple decorator that will auto fan out async style in lambda .
outside of lambda , this will invoke synchrously .""" | if 'AWS_LAMBDA_FUNCTION_NAME' not in os . environ :
return func
@ functools . wraps ( func )
def scaleout ( * args , ** kw ) :
client = boto3 . client ( 'lambda' )
client . invoke ( FunctionName = os . environ [ 'AWS_LAMBDA_FUNCTION_NAME' ] , InvocationType = 'Event' , Payload = dumps ( { 'event' : 'fanout' , 'function' : func . __name__ , 'args' : args , 'kwargs' : kw } ) , Qualifier = os . environ [ 'AWS_LAMBDA_FUNCTION_VERSION' ] )
return scaleout |
def _jtime ( self , timestamp ) :
"""Convert datetime or unix _ timestamp into Time""" | if isinstance ( timestamp , datetime ) :
timestamp = time . mktime ( timestamp . timetuple ( ) )
return self . _sc . _jvm . Time ( long ( timestamp * 1000 ) ) |
def calc_thresholds ( rbh , file_name , thresholds = [ False , False , False , False ] , stdevs = 2 ) :
"""if thresholds are not specififed , calculate based on the distribution of normalized bit scores""" | calc_threshold = thresholds [ - 1 ]
norm_threshold = { }
for pair in itertools . permutations ( [ i for i in rbh ] , 2 ) :
if pair [ 0 ] not in norm_threshold :
norm_threshold [ pair [ 0 ] ] = { }
norm_threshold [ pair [ 0 ] ] [ pair [ 1 ] ] = { }
out = open ( file_name , 'w' )
print ( '#### summary of rbh comparisons\n' , file = out )
comparisons = [ ]
for genome in rbh :
for compare in rbh [ genome ] :
pair = '' . join ( sorted ( [ genome , compare ] ) )
if pair in comparisons :
continue
comparisons . append ( pair )
scores = { 'percent identity' : [ ] , 'e-value' : [ ] , 'bit score' : [ ] , 'normalized bit score' : [ ] , 'alignment length fraction' : [ ] }
print ( '### blast between %s and %s\n' % ( genome , compare ) , file = out )
for id in rbh [ genome ] [ compare ] :
pident , length_fraction , e , bit , norm_bit = rbh [ genome ] [ compare ] [ id ] [ 3 : ]
scores [ 'percent identity' ] . append ( pident )
scores [ 'alignment length fraction' ] . append ( length_fraction )
scores [ 'e-value' ] . append ( e )
scores [ 'bit score' ] . append ( bit )
scores [ 'normalized bit score' ] . append ( norm_bit )
if calc_threshold is True :
norms = scores [ 'normalized bit score' ]
average = numpy . average ( norms )
std = numpy . std ( norms )
normal_thresh = average - ( std * stdevs )
print ( '## average normalized bit score: %s' % average , file = out )
print ( '## standard deviation of normalized bit scores: %s' % std , file = out )
print ( '## normalized bit score threshold set to: %s\n' % ( normal_thresh ) , file = out )
norm_threshold [ genome ] [ compare ] , norm_threshold [ compare ] [ genome ] = normal_thresh , normal_thresh
for score in scores :
print ( '## %s' % ( score ) , file = out )
if len ( scores [ score ] ) > 0 :
print ( '## average: %s' % numpy . average ( scores [ score ] ) , file = out )
# hist = histogram ( scores [ score ] , [ ] )
# for line in hist :
# print > > out , line
print ( '' , file = out )
out . close ( )
if calc_threshold is True :
return thresholds [ 0 : - 1 ] + [ norm_threshold ]
else :
return thresholds |
def _partition ( iter_dims , data_sources ) :
"""Partition data sources into
1 . Dictionary of data sources associated with radio sources .
2 . List of data sources to feed multiple times .
3 . List of data sources to feed once .""" | src_nr_vars = set ( source_var_types ( ) . values ( ) )
iter_dims = set ( iter_dims )
src_data_sources = collections . defaultdict ( list )
feed_many = [ ]
feed_once = [ ]
for ds in data_sources : # Is this data source associated with
# a radio source ( point , gaussian , etc . ? )
src_int = src_nr_vars . intersection ( ds . shape )
if len ( src_int ) > 1 :
raise ValueError ( "Data source '{}' contains multiple " "source types '{}'" . format ( ds . name , src_int ) )
elif len ( src_int ) == 1 : # Yep , record appropriately and iterate
src_data_sources [ src_int . pop ( ) ] . append ( ds )
continue
# Are we feeding this data source multiple times
# ( Does it possess dimensions on which we iterate ? )
if len ( iter_dims . intersection ( ds . shape ) ) > 0 :
feed_many . append ( ds )
continue
# Assume this is a data source that we only feed once
feed_once . append ( ds )
return src_data_sources , feed_many , feed_once |
def sfo ( x0 , rho , optimizer , num_steps = 50 ) :
"""Proximal operator for an arbitrary function minimized via the Sum - of - Functions optimizer ( SFO )
Notes
SFO is a function optimizer for the
case where the target function breaks into a sum over minibatches , or a sum
over contributing functions . It is
described in more detail in [ 1 ] _ .
Parameters
x0 : array _ like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step ( larger value - > stays closer to x0)
optimizer : SFO instance
Instance of the SFO object in ` SFO _ admm . py `
num _ steps : int , optional
Number of SFO steps to take
Returns
theta : array _ like
The parameter vector found after running ` num _ steps ` iterations of the SFO optimizer
References
. . [ 1 ] Jascha Sohl - Dickstein , Ben Poole , and Surya Ganguli . Fast large - scale optimization by unifying stochastic
gradient and quasi - Newton methods . International Conference on Machine Learning ( 2014 ) . ` arXiv preprint
arXiv : 1311.2115 ( 2013 ) < http : / / arxiv . org / abs / 1311.2115 > ` _ .""" | # set the current parameter value of SFO to the given value
optimizer . set_theta ( x0 , float ( rho ) )
# set the previous ADMM location as the flattened paramter array
optimizer . theta_admm_prev = optimizer . theta_original_to_flat ( x0 )
# run the optimizer for n steps
return optimizer . optimize ( num_steps = num_steps ) |
def parse_reference ( reference ) :
"""parse provided image reference into < image _ repository > : < tag >
: param reference : str , e . g . ( registry . fedoraproject . org / fedora : 27)
: return : collection ( tuple or list ) , ( " registry . fedoraproject . org / fedora " , " 27 " )""" | if ":" in reference :
im , tag = reference . rsplit ( ":" , 1 )
if "/" in tag : # this is case when there is port in the registry URI
return ( reference , "latest" )
else :
return ( im , tag )
else :
return ( reference , "latest" ) |
def reference_pix_from_wcs ( frames , pixref , origin = 1 ) :
"""Compute reference pixels between frames using WCS information .
The sky world coordinates are computed on * pixref * using
the WCS of the first frame in the sequence . Then , the
pixel coordinates of the reference sky world - coordinates
are computed for the rest of the frames .
The results is a list with the position of the reference pixel
in each image""" | result = [ ]
with frames [ 0 ] . open ( ) as hdulist :
wcsh = wcs . WCS ( hdulist [ 0 ] . header )
skyref = wcsh . wcs_pix2world ( [ pixref ] , origin )
result . append ( pixref )
for idx , frame in enumerate ( frames [ 1 : ] ) :
with frame . open ( ) as hdulist :
wcsh = wcs . WCS ( hdulist [ 0 ] . header )
pixval = wcsh . wcs_world2pix ( skyref , origin )
result . append ( tuple ( pixval [ 0 ] ) )
return result |
def _apply_krauss ( krauss : Union [ Tuple [ np . ndarray ] , Sequence [ Any ] ] , args : 'ApplyChannelArgs' ) -> np . ndarray :
"""Directly apply the kraus operators to the target tensor .""" | # Initialize output .
args . out_buffer [ : ] = 0
# Stash initial state into buffer0.
np . copyto ( dst = args . auxiliary_buffer0 , src = args . target_tensor )
# Special case for single - qubit operations .
if krauss [ 0 ] . shape == ( 2 , 2 ) :
return _apply_krauss_single_qubit ( krauss , args )
# Fallback to np . einsum for the general case .
return _apply_krauss_multi_qubit ( krauss , args ) |
async def create_account ( self , ** params ) :
"""Describes , validates data .""" | logging . debug ( "\n\n[+] -- Create account debugging. " )
model = { "unique" : [ "email" , "public_key" ] , "required" : ( "public_key" , ) , "default" : { "count" : len ( settings . AVAILABLE_COIN_ID ) , "level" : 2 , "news_count" : 0 , "email" : None } , "optional" : ( "phone" , ) }
message = json . loads ( params . get ( "message" , "{}" ) )
data = { ** message . get ( "message" ) , "public_key" : message [ "public_key" ] }
# check if all required
required = all ( [ True if i in data . keys ( ) else False for i in model [ "required" ] ] )
if not required :
return { "error" : 400 , "reason" : "Missed required fields" }
# Unique constraint
get_account = await self . collection . find_one ( { "public_key" : data . get ( "public_key" ) } )
# Try get account with current public key
if get_account :
return { "error" : 400 , "reason" : "Unique violation error" }
# Reload data .
row = { i : data [ i ] for i in data if i in model [ "required" ] or i in model [ "optional" ] }
row . update ( { i : model [ "default" ] [ i ] for i in model [ "default" ] } )
if data . get ( "email" ) :
row [ "email" ] = data . get ( "email" )
row . update ( { "id" : await self . autoincrement ( ) } )
await self . collection . insert_one ( row )
account = await self . collection . find_one ( { "public_key" : row [ "public_key" ] } )
# Create wallets
for coinid in coin_ids :
database = client [ coinid ]
wallet_collection = database [ settings . WALLET ]
wallet = await wallet_collection . insert_one ( { "account_id" : account [ "id" ] , "wallet" : self . account . validator [ coinid ] ( account [ "public_key" ] ) } )
return account |
def request_json ( self , url , params = None , data = None , as_objects = True , retry_on_error = True , method = None ) :
"""Get the JSON processed from a page .
: param url : the url to grab content from .
: param params : a dictionary containing the GET data to put in the url
: param data : a dictionary containing the extra data to submit
: param as _ objects : if True return reddit objects else raw json dict .
: param retry _ on _ error : if True retry the request , if it fails , for up
to 3 attempts
: returns : JSON processed page""" | if not url . endswith ( '.json' ) :
url += '.json'
response = self . _request ( url , params , data , method = method , retry_on_error = retry_on_error )
hook = self . _json_reddit_objecter if as_objects else None
# Request url just needs to be available for the objecter to use
self . _request_url = url
# pylint : disable = W0201
if response == '' : # Some of the v1 urls don ' t return anything , even when they ' re
# successful .
return response
data = json . loads ( response , object_hook = hook )
delattr ( self , '_request_url' )
# Update the modhash
if isinstance ( data , dict ) and 'data' in data and 'modhash' in data [ 'data' ] :
self . modhash = data [ 'data' ] [ 'modhash' ]
return data |
def do_kick ( self , sender , body , args ) :
"""Kick a member from the chatroom . Must be Admin to kick users""" | if sender . get ( 'ADMIN' ) != True :
return
for user in args :
self . kick_user ( user ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.