signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def create_for ( line , search_result ) :
'''Create a new " for loop " line as a replacement for the original code .'''
|
try :
return line . format ( search_result . group ( "indented_for" ) , search_result . group ( "var" ) , search_result . group ( "start" ) , search_result . group ( "stop" ) , search_result . group ( "cond" ) )
except IndexError :
return line . format ( search_result . group ( "indented_for" ) , search_result . group ( "var" ) , search_result . group ( "start" ) , search_result . group ( "stop" ) )
|
def log_url ( self , url_data ) :
"""Write csv formatted url check info ."""
|
row = [ ]
if self . has_part ( "urlname" ) :
row . append ( url_data . base_url )
if self . has_part ( "parentname" ) :
row . append ( url_data . parent_url )
if self . has_part ( "baseref" ) :
row . append ( url_data . base_ref )
if self . has_part ( "result" ) :
row . append ( url_data . result )
if self . has_part ( "warningstring" ) :
row . append ( self . linesep . join ( x [ 1 ] for x in url_data . warnings ) )
if self . has_part ( "infostring" ) :
row . append ( self . linesep . join ( url_data . info ) )
if self . has_part ( "valid" ) :
row . append ( url_data . valid )
if self . has_part ( "url" ) :
row . append ( url_data . url )
if self . has_part ( "line" ) :
row . append ( url_data . line )
if self . has_part ( "column" ) :
row . append ( url_data . column )
if self . has_part ( "name" ) :
row . append ( url_data . name )
if self . has_part ( "dltime" ) :
row . append ( url_data . dltime )
if self . has_part ( "dlsize" ) :
row . append ( url_data . size )
if self . has_part ( "checktime" ) :
row . append ( url_data . checktime )
if self . has_part ( "cached" ) :
row . append ( 0 )
if self . has_part ( "level" ) :
row . append ( url_data . level )
if self . has_part ( "modified" ) :
row . append ( self . format_modified ( url_data . modified ) )
self . writerow ( map ( strformat . unicode_safe , row ) )
self . flush ( )
|
def _unscaled_dist ( self , X , X2 = None ) :
"""Compute the Euclidean distance between each row of X and X2 , or between
each pair of rows of X if X2 is None ."""
|
# X , = self . _ slice _ X ( X )
if X2 is None :
Xsq = np . sum ( np . square ( X ) , 1 )
r2 = - 2. * tdot ( X ) + ( Xsq [ : , None ] + Xsq [ None , : ] )
util . diag . view ( r2 ) [ : , ] = 0.
# force diagnoal to be zero : sometime numerically a little negative
r2 = np . clip ( r2 , 0 , np . inf )
return np . sqrt ( r2 )
else : # X2 , = self . _ slice _ X ( X2)
X1sq = np . sum ( np . square ( X ) , 1 )
X2sq = np . sum ( np . square ( X2 ) , 1 )
r2 = - 2. * np . dot ( X , X2 . T ) + ( X1sq [ : , None ] + X2sq [ None , : ] )
r2 = np . clip ( r2 , 0 , np . inf )
return np . sqrt ( r2 )
|
def has_methods ( * method_names ) :
"""Return a test function that , when given an object ( class or an
instance ) , returns ` ` True ` ` if that object has all of the ( regular ) methods
in ` ` method _ names ` ` . Note : this is testing for regular methods only and the
test function will correctly return ` ` False ` ` if an instance has one of the
specified methods as a classmethod or a staticmethod . However , it will
incorrectly return ` ` True ` ` ( false positives ) for classmethods and
staticmethods on a * class * ."""
|
def test ( obj ) :
for method_name in method_names :
try :
method = getattr ( obj , method_name )
except AttributeError :
return False
else :
if not callable ( method ) :
return False
if not isinstance ( obj , type ) :
try : # An instance method is a method type with a _ _ self _ _
# attribute that references the instance .
if method . __self__ is not obj :
return False
except AttributeError :
return False
return True
return test
|
def _get_delete_query ( self ) :
"""Get the query builder for a delete operation on the pivot .
: rtype : orator . orm . Builder"""
|
foreign = self . get_attribute ( self . __foreign_key )
query = self . new_query ( ) . where ( self . __foreign_key , foreign )
return query . where ( self . __other_key , self . get_attribute ( self . __other_key ) )
|
def make_processitem_stringlist_string ( string , condition = 'contains' , negate = False , preserve_case = False ) :
"""Create a node for ProcessItem / StringList / string
: return : A IndicatorItem represented as an Element node"""
|
document = 'ProcessItem'
search = 'ProcessItem/StringList/string'
content_type = 'string'
content = string
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node
|
def write_logfile ( ) : # type : ( ) - > None
"""Write a DEBUG log file COMMAND - YYYYMMDD - HHMMSS . fffff . log ."""
|
command = os . path . basename ( os . path . realpath ( os . path . abspath ( sys . argv [ 0 ] ) ) )
now = datetime . datetime . now ( ) . strftime ( '%Y%m%d-%H%M%S.%f' )
filename = '{}-{}.log' . format ( command , now )
with open ( filename , 'w' ) as logfile :
if six . PY3 :
logfile . write ( _LOGFILE_STREAM . getvalue ( ) )
else :
logfile . write ( _LOGFILE_STREAM . getvalue ( ) . decode ( # type : ignore
errors = 'replace' ) )
|
def split_demultiplexed_sampledata ( data , demultiplexed ) :
"""splits demultiplexed samples into separate entries in the global sample
datadict"""
|
datadicts = [ ]
samplename = dd . get_sample_name ( data )
for fastq in demultiplexed :
barcode = os . path . basename ( fastq ) . split ( "." ) [ 0 ]
datadict = copy . deepcopy ( data )
datadict = dd . set_sample_name ( datadict , samplename + "-" + barcode )
datadict = dd . set_description ( datadict , samplename + "-" + barcode )
datadict [ "rgnames" ] [ "rg" ] = samplename + "-" + barcode
datadict [ "name" ] = [ "" , samplename + "-" + barcode ]
datadict [ "files" ] = [ fastq ]
datadicts . append ( datadict )
return datadicts
|
def arithmetic_crossover ( random , mom , dad , args ) :
"""Return the offspring of arithmetic crossover on the candidates .
This function performs arithmetic crossover ( AX ) , which is similar to a
generalized weighted averaging of the candidate elements . The allele
of each parent is weighted by the * ax _ alpha * keyword argument , and
the allele of the complement parent is weighted by 1 - * ax _ alpha * .
This averaging is only done on the alleles listed in the * ax _ points *
keyword argument . If this argument is ` ` None ` ` , then all alleles
are used . This means that if this function is used with all default
values , then offspring are simple averages of their parents .
This function also makes use of the bounder function as specified
in the EC ' s ` ` evolve ` ` method .
. . Arguments :
random - - the random number generator object
mom - - the first parent candidate
dad - - the second parent candidate
args - - a dictionary of keyword arguments
Optional keyword arguments in args :
- * crossover _ rate * - - the rate at which crossover is performed
( default 1.0)
- * ax _ alpha * - - the weight for the averaging ( default 0.5)
- * ax _ points * - - a list of points specifying the alleles to
recombine ( default None )"""
|
ax_alpha = args . setdefault ( 'ax_alpha' , 0.5 )
ax_points = args . setdefault ( 'ax_points' , None )
crossover_rate = args . setdefault ( 'crossover_rate' , 1.0 )
bounder = args [ '_ec' ] . bounder
children = [ ]
if random . random ( ) < crossover_rate :
bro = copy . copy ( dad )
sis = copy . copy ( mom )
if ax_points is None :
ax_points = list ( range ( min ( len ( bro ) , len ( sis ) ) ) )
for i in ax_points :
bro [ i ] = ax_alpha * mom [ i ] + ( 1 - ax_alpha ) * dad [ i ]
sis [ i ] = ax_alpha * dad [ i ] + ( 1 - ax_alpha ) * mom [ i ]
bro = bounder ( bro , args )
sis = bounder ( sis , args )
children . append ( bro )
children . append ( sis )
else :
children . append ( mom )
children . append ( dad )
return children
|
def get_token ( self , appname , username , password ) :
"""get the security token by connecting to TouchWorks API"""
|
ext_exception = TouchWorksException ( TouchWorksErrorMessages . GET_TOKEN_FAILED_ERROR )
data = { 'Username' : username , 'Password' : password }
resp = self . _http_request ( TouchWorksEndPoints . GET_TOKEN , data )
try :
logger . debug ( 'token : %s' % resp )
if not resp . text :
raise ext_exception
try :
uuid . UUID ( resp . text , version = 4 )
return SecurityToken ( resp . text )
except ValueError :
logger . error ( 'response was not valid uuid string. %s' % resp . text )
raise ext_exception
except Exception as ex :
logger . exception ( ex )
raise ext_exception
|
def apply_cloud_providers_config ( overrides , defaults = None ) :
'''Apply the loaded cloud providers configuration .'''
|
if defaults is None :
defaults = PROVIDER_CONFIG_DEFAULTS
config = defaults . copy ( )
if overrides :
config . update ( overrides )
# Is the user still using the old format in the new configuration file ? !
for name , settings in six . iteritems ( config . copy ( ) ) :
if '.' in name :
log . warning ( 'Please switch to the new providers configuration syntax' )
# Let ' s help out and migrate the data
config = old_to_new ( config )
# old _ to _ new will migrate the old data into the ' providers ' key of
# the config dictionary . Let ' s map it correctly
for prov_name , prov_settings in six . iteritems ( config . pop ( 'providers' ) ) :
config [ prov_name ] = prov_settings
break
providers = { }
ext_count = 0
for key , val in six . iteritems ( config ) :
if key in ( 'conf_file' , 'include' , 'default_include' , 'user' ) :
continue
if not isinstance ( val , ( list , tuple ) ) :
val = [ val ]
else : # Need to check for duplicate cloud provider entries per " alias " or
# we won ' t be able to properly reference it .
handled_providers = set ( )
for details in val :
if 'driver' not in details :
if 'extends' not in details :
log . error ( 'Please check your cloud providers configuration. ' 'There\'s no \'driver\' nor \'extends\' definition ' 'referenced.' )
continue
if details [ 'driver' ] in handled_providers :
log . error ( 'You can only have one entry per cloud provider. For ' 'example, if you have a cloud provider configuration ' 'section named, \'production\', you can only have a ' 'single entry for EC2, Joyent, Openstack, and so ' 'forth.' )
raise salt . exceptions . SaltCloudConfigError ( 'The cloud provider alias \'{0}\' has multiple entries ' 'for the \'{1[driver]}\' driver.' . format ( key , details ) )
handled_providers . add ( details [ 'driver' ] )
for entry in val :
if 'driver' not in entry :
entry [ 'driver' ] = '-only-extendable-{0}' . format ( ext_count )
ext_count += 1
if key not in providers :
providers [ key ] = { }
provider = entry [ 'driver' ]
if provider not in providers [ key ] :
providers [ key ] [ provider ] = entry
# Is any provider extending data ! ?
while True :
keep_looping = False
for provider_alias , entries in six . iteritems ( providers . copy ( ) ) :
for driver , details in six . iteritems ( entries ) : # Set a holder for the defined profiles
providers [ provider_alias ] [ driver ] [ 'profiles' ] = { }
if 'extends' not in details :
continue
extends = details . pop ( 'extends' )
if ':' in extends :
alias , provider = extends . split ( ':' )
if alias not in providers :
raise salt . exceptions . SaltCloudConfigError ( 'The \'{0}\' cloud provider entry in \'{1}\' is ' 'trying to extend data from \'{2}\' though ' '\'{2}\' is not defined in the salt cloud ' 'providers loaded data.' . format ( details [ 'driver' ] , provider_alias , alias ) )
if provider not in providers . get ( alias ) :
raise salt . exceptions . SaltCloudConfigError ( 'The \'{0}\' cloud provider entry in \'{1}\' is ' 'trying to extend data from \'{2}:{3}\' though ' '\'{3}\' is not defined in \'{1}\'' . format ( details [ 'driver' ] , provider_alias , alias , provider ) )
details [ 'extends' ] = '{0}:{1}' . format ( alias , provider )
# change provider details ' - only - extendable - ' to extended
# provider name
details [ 'driver' ] = provider
elif providers . get ( extends ) :
raise salt . exceptions . SaltCloudConfigError ( 'The \'{0}\' cloud provider entry in \'{1}\' is ' 'trying to extend from \'{2}\' and no provider was ' 'specified. Not extending!' . format ( details [ 'driver' ] , provider_alias , extends ) )
elif extends not in providers :
raise salt . exceptions . SaltCloudConfigError ( 'The \'{0}\' cloud provider entry in \'{1}\' is ' 'trying to extend data from \'{2}\' though \'{2}\' ' 'is not defined in the salt cloud providers loaded ' 'data.' . format ( details [ 'driver' ] , provider_alias , extends ) )
else :
if driver in providers . get ( extends ) :
details [ 'extends' ] = '{0}:{1}' . format ( extends , driver )
elif '-only-extendable-' in providers . get ( extends ) :
details [ 'extends' ] = '{0}:{1}' . format ( extends , '-only-extendable-{0}' . format ( ext_count ) )
else : # We ' re still not aware of what we ' re trying to extend
# from . Let ' s try on next iteration
details [ 'extends' ] = extends
keep_looping = True
if not keep_looping :
break
while True : # Merge provided extends
keep_looping = False
for alias , entries in six . iteritems ( providers . copy ( ) ) :
for driver , details in six . iteritems ( entries ) :
if 'extends' not in details : # Extends resolved or non existing , continue !
continue
if 'extends' in details [ 'extends' ] : # Since there ' s a nested extends , resolve this one in the
# next iteration
keep_looping = True
continue
# Let ' s get a reference to what we ' re supposed to extend
extends = details . pop ( 'extends' )
# Split the setting in ( alias , driver )
ext_alias , ext_driver = extends . split ( ':' )
# Grab a copy of what should be extended
extended = providers . get ( ext_alias ) . get ( ext_driver ) . copy ( )
# Merge the data to extend with the details
extended = salt . utils . dictupdate . update ( extended , details )
# Update the providers dictionary with the merged data
providers [ alias ] [ driver ] = extended
# Update name of the driver , now that it ' s populated with extended information
if driver . startswith ( '-only-extendable-' ) :
providers [ alias ] [ ext_driver ] = providers [ alias ] [ driver ]
# Delete driver with old name to maintain dictionary size
del providers [ alias ] [ driver ]
if not keep_looping :
break
# Now clean up any providers entry that was just used to be a data tree to
# extend from
for provider_alias , entries in six . iteritems ( providers . copy ( ) ) :
for driver , details in six . iteritems ( entries . copy ( ) ) :
if not driver . startswith ( '-only-extendable-' ) :
continue
log . info ( "There's at least one cloud driver under the '%s' " 'cloud provider alias which does not have the required ' "'driver' setting. Removing it from the available " 'providers listing.' , provider_alias )
providers [ provider_alias ] . pop ( driver )
if not providers [ provider_alias ] :
providers . pop ( provider_alias )
return providers
|
def find ( self , pattern ) :
"""Searches for a pattern in the current memory segment"""
|
pos = self . current_segment . data . find ( pattern )
if pos == - 1 :
return - 1
return pos + self . current_position
|
def response_helper ( self , response , ** kwargs ) :
"""Response component helper that allows using a marshmallow
: class : ` Schema < marshmallow . Schema > ` in response definition .
: param dict parameter : response fields . May contain a marshmallow
Schema class or instance ."""
|
self . resolve_schema ( response )
if "headers" in response :
for header in response [ "headers" ] . values ( ) :
self . resolve_schema ( header )
return response
|
def _setOutputNames ( self , rootname , suffix = '_drz' ) :
"""Define the default output filenames for drizzle products ,
these are based on the original rootname of the image
filename should be just 1 filename , so call this in a loop
for chip names contained inside a file ."""
|
# Define FITS output filenames for intermediate products
# Build names based on final DRIZZLE output name
# where ' output ' normally would have been created
# by ' process _ input ( ) '
outFinal = rootname + suffix + '.fits'
outSci = rootname + suffix + '_sci.fits'
outWeight = rootname + suffix + '_wht.fits'
outContext = rootname + suffix + '_ctx.fits'
outMedian = rootname + '_med.fits'
# Build names based on input name
origFilename = self . _filename . replace ( '.fits' , '_OrIg.fits' )
outSky = rootname + '_sky.fits'
outSingle = rootname + '_single_sci.fits'
outSWeight = rootname + '_single_wht.fits'
crCorImage = rootname + '_crclean.fits'
# Build outputNames dictionary
fnames = { 'origFilename' : origFilename , 'outFinal' : outFinal , 'outMedian' : outMedian , 'outSci' : outSci , 'outWeight' : outWeight , 'outContext' : outContext , 'outSingle' : outSingle , 'outSWeight' : outSWeight , 'outSContext' : None , 'outSky' : outSky , 'crcorImage' : crCorImage , 'ivmFile' : None }
return fnames
|
def emit_accepted ( self ) :
"""Sends signal that the file dialog was closed properly .
Sends :
filename"""
|
if self . result ( ) :
filename = self . selectedFiles ( ) [ 0 ]
if os . path . isdir ( os . path . dirname ( filename ) ) :
self . dlg_accepted . emit ( filename )
|
def __roman_to_cyrillic ( self , word ) :
"""Transliterate a Russian word back into the Cyrillic alphabet .
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process , is transliterated back
into the Cyrillic alphabet , its original form .
: param word : The word that is transliterated .
: type word : str or unicode
: return : word , the transliterated word .
: rtype : unicode
: note : This helper method is invoked by the stem method of the subclass
RussianStemmer . It is not to be invoked directly !"""
|
word = ( word . replace ( "i^u" , "\u044E" ) . replace ( "i^a" , "\u044F" ) . replace ( "shch" , "\u0449" ) . replace ( "kh" , "\u0445" ) . replace ( "t^s" , "\u0446" ) . replace ( "ch" , "\u0447" ) . replace ( "e`" , "\u044D" ) . replace ( "i`" , "\u0439" ) . replace ( "sh" , "\u0448" ) . replace ( "k" , "\u043A" ) . replace ( "e" , "\u0435" ) . replace ( "zh" , "\u0436" ) . replace ( "a" , "\u0430" ) . replace ( "b" , "\u0431" ) . replace ( "v" , "\u0432" ) . replace ( "g" , "\u0433" ) . replace ( "d" , "\u0434" ) . replace ( "e" , "\u0435" ) . replace ( "z" , "\u0437" ) . replace ( "i" , "\u0438" ) . replace ( "l" , "\u043B" ) . replace ( "m" , "\u043C" ) . replace ( "n" , "\u043D" ) . replace ( "o" , "\u043E" ) . replace ( "p" , "\u043F" ) . replace ( "r" , "\u0440" ) . replace ( "s" , "\u0441" ) . replace ( "t" , "\u0442" ) . replace ( "u" , "\u0443" ) . replace ( "f" , "\u0444" ) . replace ( "''" , "\u044A" ) . replace ( "y" , "\u044B" ) . replace ( "'" , "\u044C" ) )
return word
|
def get_grade_entry_query_session ( self ) :
"""Gets the ` ` OsidSession ` ` associated with the grade entry query service .
return : ( osid . grading . GradeEntryQuerySession ) - a
` ` GradeEntryQuerySession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ grade _ entry _ query ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ grade _ entry _ query ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_grade_entry_query ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . GradeEntryQuerySession ( runtime = self . _runtime )
|
def normalize_path_in ( self , client_file_name ) :
"""Translate a ( possibly incomplete ) file or module name received from debugging client
into an absolute file name ."""
|
_logger . p_debug ( "normalize_path_in(%s) with os.getcwd()=>%s" , client_file_name , os . getcwd ( ) )
# remove client CWD from file _ path
if client_file_name . startswith ( self . _CLIENT_CWD ) :
file_name = client_file_name [ len ( self . _CLIENT_CWD ) : ]
else :
file_name = client_file_name
# Try to find file using it ' s absolute path
if os . path . isabs ( file_name ) and os . path . exists ( file_name ) :
_logger . p_debug ( " => found absolute path: '%s'" , file_name )
return file_name
# Can we find the file relatively to launch CWD ( useful with buildout )
f = os . path . join ( self . _CWD , file_name )
if os . path . exists ( f ) :
_logger . p_debug ( " => found path relative to self._CWD: '%s'" , f )
return f
# Can we find file relatively to launch script
f = os . path . join ( sys . path [ 0 ] , file_name )
if os . path . exists ( f ) and self . canonic ( f ) == self . mainpyfile :
_logger . p_debug ( " => found path relative to launch script: '%s'" , f )
return f
# Try as an absolute path after adding . py extension
root , ext = os . path . splitext ( file_name )
if ext == '' :
f = file_name + '.py'
if os . path . isabs ( f ) :
_logger . p_debug ( " => found absolute path after adding .py extension: '%s'" , f )
return f
# Can we find the file in system path
for dir_name in sys . path :
while os . path . islink ( dir_name ) :
dir_name = os . readlink ( dir_name )
f = os . path . join ( dir_name , file_name )
if os . path . exists ( f ) :
_logger . p_debug ( " => found path in sys.path: '%s'" , f )
return f
return None
|
def write_string ( self ) :
"""Return a representation of the id map as a string . This string is
properly formatted to be written in ' / etc / subuid ' or ' / etc / subgid ' ."""
|
map_as_str = [ ]
for name , id_range_set in self . __map . items ( ) :
for id_range in id_range_set :
map_as_str . append ( name + ':' + str ( id_range . first ) + ':' + str ( id_range . count ) + '\n' )
# Remove trailing newline
if len ( map_as_str ) > 0 :
map_as_str [ - 1 ] = map_as_str [ - 1 ] [ : - 1 ]
return '' . join ( map_as_str )
|
def set ( self , item_name , item_value ) :
"""Sets the value of an option in the configuration .
: param str item _ name : The name of the option to set .
: param item _ value : The value of the option to set ."""
|
if self . prefix :
item_name = self . prefix + self . seperator + item_name
item_names = item_name . split ( self . seperator )
item_last = item_names . pop ( )
node = self . _storage
for item_name in item_names :
if not item_name in node :
node [ item_name ] = { }
node = node [ item_name ]
node [ item_last ] = item_value
return
|
def data ( self , resource_value , return_value = False ) :
"""Alias for metric _ name method
| HTTP Method | API Endpoint URI ' s |
| POST | / v2 / customMetrics / { id } | { name } / data |
Example
The weight value is optional .
. . code - block : : javascript
" value " : 1,
" weight " : 1,
* * Keyed Example * *
The weight value is optional .
. . code - block : : javascript
" value " : 1,
" weight " : 1,
" name " : " src1"
Args :
resource _ name ( string ) : The metric name ."""
|
if return_value :
self . _request_entity = None
self . _request . add_payload ( 'returnValue' , True )
self . _request_uri = '{}/{}/data' . format ( self . _request_uri , resource_value )
|
def NDLimitExceeded_originator_switch_info_switchIpV6Address ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
NDLimitExceeded = ET . SubElement ( config , "NDLimitExceeded" , xmlns = "http://brocade.com/ns/brocade-notification-stream" )
originator_switch_info = ET . SubElement ( NDLimitExceeded , "originator-switch-info" )
switchIpV6Address = ET . SubElement ( originator_switch_info , "switchIpV6Address" )
switchIpV6Address . text = kwargs . pop ( 'switchIpV6Address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def nonuniform_scale_samples ( params , bounds , dists ) :
"""Rescale samples in 0 - to - 1 range to other distributions
Arguments
problem : dict
problem definition including bounds
params : numpy . ndarray
numpy array of dimensions num _ params - by - N ,
where N is the number of samples
dists : list
list of distributions , one for each parameter
unif : uniform with lower and upper bounds
triang : triangular with width ( scale ) and location of peak
location of peak is in percentage of width
lower bound assumed to be zero
norm : normal distribution with mean and standard deviation
lognorm : lognormal with ln - space mean and standard deviation"""
|
b = np . array ( bounds )
# initializing matrix for converted values
conv_params = np . zeros_like ( params )
# loop over the parameters
for i in range ( conv_params . shape [ 1 ] ) : # setting first and second arguments for distributions
b1 = b [ i ] [ 0 ]
b2 = b [ i ] [ 1 ]
if dists [ i ] == 'triang' : # checking for correct parameters
if b1 <= 0 or b2 <= 0 or b2 >= 1 :
raise ValueError ( '''Triangular distribution: Scale must be
greater than zero; peak on interval [0,1]''' )
else :
conv_params [ : , i ] = sp . stats . triang . ppf ( params [ : , i ] , c = b2 , scale = b1 , loc = 0 )
elif dists [ i ] == 'unif' :
if b1 >= b2 :
raise ValueError ( '''Uniform distribution: lower bound
must be less than upper bound''' )
else :
conv_params [ : , i ] = params [ : , i ] * ( b2 - b1 ) + b1
elif dists [ i ] == 'norm' :
if b2 <= 0 :
raise ValueError ( '''Normal distribution: stdev must be > 0''' )
else :
conv_params [ : , i ] = sp . stats . norm . ppf ( params [ : , i ] , loc = b1 , scale = b2 )
# lognormal distribution ( ln - space , not base - 10)
# paramters are ln - space mean and standard deviation
elif dists [ i ] == 'lognorm' : # checking for valid parameters
if b2 <= 0 :
raise ValueError ( '''Lognormal distribution: stdev must be > 0''' )
else :
conv_params [ : , i ] = np . exp ( sp . stats . norm . ppf ( params [ : , i ] , loc = b1 , scale = b2 ) )
else :
valid_dists = [ 'unif' , 'triang' , 'norm' , 'lognorm' ]
raise ValueError ( 'Distributions: choose one of %s' % ", " . join ( valid_dists ) )
return conv_params
|
def create_link_button ( self , text = "None" , uri = "None" ) :
"""Function creates a link button with corresponding text and
URI reference"""
|
link_btn = Gtk . LinkButton ( uri , text )
return link_btn
|
def set_style ( rcdict , theme = None , grid = True , gridlines = u'-' , ticks = False , spines = True ) :
"""This code has been modified from seaborn . rcmod . set _ style ( )
: : Arguments : :
rcdict ( str ) : dict of " context " properties ( filled by set _ context ( ) )
theme ( str ) : name of theme to use when setting color properties
grid ( bool ) : turns off axis grid if False ( default : True )
ticks ( bool ) : removes x , y axis ticks if True ( default : False )
spines ( bool ) : removes axis spines if False ( default : True )"""
|
# extract style and color info for theme
styleMap , clist = get_theme_style ( theme )
# extract style variables
figureFace = styleMap [ 'figureFace' ]
axisFace = styleMap [ 'axisFace' ]
textColor = styleMap [ 'textColor' ]
edgeColor = styleMap [ 'edgeColor' ]
gridColor = styleMap [ 'gridColor' ]
if not spines :
edgeColor = 'none'
style_dict = { 'figure.edgecolor' : figureFace , 'figure.facecolor' : figureFace , 'axes.facecolor' : axisFace , 'axes.edgecolor' : edgeColor , 'axes.labelcolor' : textColor , 'axes.grid' : grid , 'grid.linestyle' : gridlines , 'grid.color' : gridColor , 'text.color' : textColor , 'xtick.color' : textColor , 'ytick.color' : textColor , 'patch.edgecolor' : axisFace , 'patch.facecolor' : gridColor , 'savefig.facecolor' : figureFace , 'savefig.edgecolor' : figureFace }
# update rcdict with style params
rcdict . update ( style_dict )
# Show or hide the axes ticks
if ticks :
rcdict . update ( { "xtick.major.size" : 6 , "ytick.major.size" : 6 , "xtick.minor.size" : 3 , "ytick.minor.size" : 3 } )
base_style . update ( rcdict )
# update matplotlib with rcdict ( incl . context , font , & style )
mpl . rcParams . update ( rcdict )
# update seaborn with rcdict ( incl . context , font , & style )
try :
import seaborn as sns
sns . set_style ( rc = rcdict )
except Exception :
pass
try :
from cycler import cycler
# set color cycle to jt - style color list
mpl . rcParams [ 'axes.prop_cycle' ] = cycler ( color = clist )
except Exception :
pass
# replace default blue , green , etc . with jt colors
for code , color in zip ( "bgrmyck" , clist [ : 7 ] ) :
rgb = mpl . colors . colorConverter . to_rgb ( color )
mpl . colors . colorConverter . colors [ code ] = rgb
mpl . colors . colorConverter . cache [ code ] = rgb
|
def cmd_help ( * args ) :
"""Arguments : [ < command > ]
List available commands"""
|
if len ( args ) == 0 :
print ( "Possible commands are:" )
print ( "" )
for cmd_name in sorted ( COMMANDS . keys ( ) ) :
cmd_func = COMMANDS [ cmd_name ]
print ( "{}:" . format ( cmd_name ) )
print ( "=" * ( len ( cmd_name ) + 1 ) )
print ( " {}" . format ( cmd_func . __doc__ . strip ( ) ) )
print ( "" )
else :
cmd_name = args [ 0 ]
cmd_func = COMMANDS [ cmd_name ]
print ( "{}:" . format ( cmd_name ) )
print ( "=" * ( len ( cmd_name ) + 1 ) )
print ( " {}" . format ( cmd_func . __doc__ . strip ( ) ) )
|
def from_bytes ( cls , bitstream , prefix_len = None ) :
'''Look at the type of the message , instantiate the correct class and
let it parse the message .'''
|
# Convert to ConstBitStream ( if not already provided )
if not isinstance ( bitstream , ConstBitStream ) :
if isinstance ( bitstream , Bits ) :
bitstream = ConstBitStream ( auto = bitstream )
else :
bitstream = ConstBitStream ( bytes = bitstream )
# Skip the reserved bits
rsvd1 = bitstream . read ( 8 )
# Read the flags ( and ignore them , no flags are defined yet )
flags = bitstream . readlist ( '8*bool' )
# Read the type
type_nr = bitstream . read ( 'uint:8' )
# Skip the reserved bits
rsvd2 = bitstream . read ( 8 )
# Read the length
length = bitstream . read ( 'uint:16' )
# Read the data
data = bitstream . read ( length * 8 )
# Look for the right class
from pylisp . utils . lcaf import type_registry
type_class = type_registry . get_type_class ( type_nr )
if not type_class :
raise ValueError ( "Can't handle LCAF type {0}" . format ( type_nr ) )
# Let the specific class handle it from now on
return type_class . _from_data_bytes ( data , prefix_len , rsvd1 , flags , rsvd2 )
|
def view_torrent ( self , torrent_id ) :
"""Retrieves and parses the torrent page for a given ` torrent _ id ` .
: param torrent _ id : the ID of the torrent to view
: raises TorrentNotFoundError : if the torrent does not exist
: returns : a : class : ` TorrentPage ` with a snapshot view of the torrent
detail page"""
|
params = { 'page' : 'view' , 'tid' : torrent_id , }
r = requests . get ( self . base_url , params = params )
content = self . _get_page_content ( r )
# Check if the content div has any child elements
if not len ( content ) : # The " torrent not found " text in the page has some unicode junk
# that we can safely ignore .
text = str ( content . text . encode ( 'ascii' , 'ignore' ) )
if TORRENT_NOT_FOUND_TEXT in text :
raise TorrentNotFoundError ( TORRENT_NOT_FOUND_TEXT )
cell_td_elems = content . findall ( './/td' )
name = cell_td_elems [ 3 ] . text
category_href = content . findall ( ".//td[@class='viewcategory']/a[2]" ) [ 0 ] . attrib [ 'href' ]
category_value = category_href . split ( 'cats=' ) [ 1 ]
category = Category . lookup_category ( category_value )
# parse the submitter details
submitter_a_elem = cell_td_elems [ 7 ] . findall ( 'a' ) [ 0 ]
submitter_id = submitter_a_elem . attrib [ 'href' ] . split ( '=' ) [ 1 ]
submitter_name = submitter_a_elem . findall ( 'span' ) [ 0 ] . text
submitter = User ( submitter_id , submitter_name )
tracker = cell_td_elems [ 11 ] . text
date_created = datetime . datetime . strptime ( cell_td_elems [ 5 ] . text , '%Y-%m-%d, %H:%M %Z' )
seeders = int ( content . findall ( ".//span[@class='viewsn']" ) [ 0 ] . text )
leechers = int ( content . findall ( ".//span[@class='viewln']" ) [ 0 ] . text )
downloads = int ( content . findall ( ".//span[@class='viewdn']" ) [ 0 ] . text )
file_size = cell_td_elems [ 21 ] . text
# note that the tree returned by html5lib might not exactly match the
# original contents of the description div
description = ElementTree . tostring ( content . findall ( ".//div[@class='viewdescription']" ) [ 0 ] , encoding = 'utf8' , method = 'html' )
return TorrentPage ( torrent_id , name , submitter , category , tracker , date_created , seeders , leechers , downloads , file_size , description )
|
def delete_object ( request , model , post_delete_redirect , object_id = None , slug = None , slug_field = 'slug' , template_name = None , template_loader = loader , extra_context = None , login_required = False , context_processors = None , template_object_name = 'object' ) :
"""Generic object - delete function .
The given template will be used to confirm deletetion if this view is
fetched using GET ; for safty , deletion will only be performed if this
view is POSTed .
Templates : ` ` < app _ label > / < model _ name > _ confirm _ delete . html ` `
Context :
object
the original object being deleted"""
|
if extra_context is None :
extra_context = { }
if login_required and not request . user . is_authenticated :
return redirect_to_login ( request . path )
obj = lookup_object ( model , object_id , slug , slug_field )
if request . method == 'POST' :
obj . delete ( )
msg = ugettext ( "The %(verbose_name)s was deleted." ) % { "verbose_name" : model . _meta . verbose_name }
messages . success ( request , msg , fail_silently = True )
return HttpResponseRedirect ( post_delete_redirect )
else :
if not template_name :
template_name = "%s/%s_confirm_delete.html" % ( model . _meta . app_label , model . _meta . object_name . lower ( ) )
t = template_loader . get_template ( template_name )
c = { template_object_name : obj , }
apply_extra_context ( extra_context , c )
response = HttpResponse ( t . render ( context = c , request = request ) )
return response
|
def stylize ( ax , name , feature ) :
'''Stylization modifications to the plots'''
|
ax . set_ylabel ( feature )
ax . set_title ( name , fontsize = 'small' )
|
def get_subs_dict ( self , qnodes = None ) :
"""Return substitution dict for replacements into the template
Subclasses may want to customize this method ."""
|
# d = self . qparams . copy ( )
d = self . qparams
d . update ( self . optimize_params ( qnodes = qnodes ) )
# clean null values
subs_dict = { k : v for k , v in d . items ( ) if v is not None }
# print ( " subs _ dict : " , subs _ dict )
return subs_dict
|
async def umount ( self ) :
"""Unmount this partition ."""
|
self . _data = await self . _handler . unmount ( system_id = self . block_device . node . system_id , device_id = self . block_device . id , id = self . id )
|
def remove_node ( self , node ) :
"""Removes a node and its attributes from the hypergraph . Removes
every hyperedge that contains this node .
: param node : reference to the node being added .
: raises : ValueError - - No such node exists .
Examples :
> > > H = UndirectedHypergraph ( )
> > > H . add _ node ( " A " , label = " positive " )
> > > H . remove _ node ( " A " )"""
|
if not self . has_node ( node ) :
raise ValueError ( "No such node exists." )
# Loop over every hyperedge in the star of the node ;
# i . e . , over every hyperedge that contains the node
for hyperedge_id in self . _star [ node ] :
frozen_nodes = self . _hyperedge_attributes [ hyperedge_id ] [ "__frozen_nodes" ]
# Remove the node set composing the hyperedge
del self . _node_set_to_hyperedge [ frozen_nodes ]
# Remove this hyperedge ' s attributes
del self . _hyperedge_attributes [ hyperedge_id ]
# Remove node ' s star
del self . _star [ node ]
# Remove node ' s attributes dictionary
del self . _node_attributes [ node ]
|
def golden_search ( f , a , b , xatol = 1e-6 , ftol = 1e-8 , expand_bounds = False ) :
"""Find minimum of a function on interval [ a , b ]
using golden section search .
If expand _ bounds = True , expand the interval so that the function is
first evaluated at x = a and x = b ."""
|
ratio = 2 / ( 1 + math . sqrt ( 5 ) )
if not expand_bounds :
x0 = a
x3 = b
else :
x0 = ( ratio * a - ( 1 - ratio ) * b ) / ( 2 * ratio - 1 )
x3 = ( ratio * b - ( 1 - ratio ) * a ) / ( 2 * ratio - 1 )
x1 = ratio * x0 + ( 1 - ratio ) * x3
x2 = ( 1 - ratio ) * x0 + ratio * x3
f1 = f ( x1 )
f2 = f ( x2 )
f0 = max ( abs ( f1 ) , abs ( f2 ) )
while True :
if abs ( x0 - x3 ) < xatol or abs ( f1 - f2 ) < ftol * f0 :
break
if f2 < f1 :
x0 = x1
x1 = x2
x2 = ratio * x1 + ( 1 - ratio ) * x3
f1 = f2
f2 = f ( x2 )
else :
x3 = x2
x2 = x1
x1 = ratio * x2 + ( 1 - ratio ) * x0
f2 = f1
f1 = f ( x1 )
if f2 < f1 :
return x2
else :
return x1
|
def get_sam_name ( username ) :
r'''Gets the SAM name for a user . It basically prefixes a username without a
backslash with the computer name . If the user does not exist , a SAM
compatible name will be returned using the local hostname as the domain .
i . e . salt . utils . get _ same _ name ( ' Administrator ' ) would return ' DOMAIN . COM \ Administrator '
. . note : : Long computer names are truncated to 15 characters'''
|
try :
sid_obj = win32security . LookupAccountName ( None , username ) [ 0 ]
except pywintypes . error :
return '\\' . join ( [ platform . node ( ) [ : 15 ] . upper ( ) , username ] )
username , domain , _ = win32security . LookupAccountSid ( None , sid_obj )
return '\\' . join ( [ domain , username ] )
|
def get_data ( self ) :
"""Saves the handler ' s data for : func : ` . reloader . do _ reload `"""
|
data = { }
data [ 'guarded' ] = self . guarded [ : ]
data [ 'voiced' ] = copy . deepcopy ( self . voiced )
data [ 'opers' ] = copy . deepcopy ( self . opers )
data [ 'features' ] = self . features . copy ( )
data [ 'uptime' ] = self . uptime . copy ( )
data [ 'abuselist' ] = self . abuselist . copy ( )
data [ 'who_map' ] = self . who_map . copy ( )
return data
|
def run_linter ( self , linter ) -> None :
"""Run a checker class"""
|
self . current = linter . name
if ( linter . name not in self . parser [ "all" ] . as_list ( "linters" ) or linter . base_pyversion > sys . version_info ) : # noqa : W503
return
if any ( x not in self . installed for x in linter . requires_install ) :
raise ModuleNotInstalled ( linter . requires_install )
linter . add_output_hook ( self . out_func )
linter . set_config ( self . fn , self . parser [ linter . name ] )
linter . run ( self . files )
self . status_code = self . status_code or linter . status_code
|
def p_expression_lor ( self , p ) :
'expression : expression LOR expression'
|
p [ 0 ] = Lor ( p [ 1 ] , p [ 3 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def remove_points_from_interval ( self , start , end ) :
"""Allow removal of all points from the time series within a interval
[ start : end ] ."""
|
for s , e , v in self . iterperiods ( start , end ) :
try :
del self . _d [ s ]
except KeyError :
pass
|
def _get_stream_parameters ( kind , device , channels , dtype , latency , samplerate ) :
"""Generate PaStreamParameters struct ."""
|
if device is None :
if kind == 'input' :
device = _pa . Pa_GetDefaultInputDevice ( )
elif kind == 'output' :
device = _pa . Pa_GetDefaultOutputDevice ( )
info = device_info ( device )
if channels is None :
channels = info [ 'max_' + kind + '_channels' ]
dtype = np . dtype ( dtype )
try :
sample_format = _np2pa [ dtype ]
except KeyError :
raise ValueError ( "Invalid " + kind + " sample format" )
if samplerate is None :
samplerate = info [ 'default_samplerate' ]
parameters = ffi . new ( "PaStreamParameters*" , ( device , channels , sample_format , latency , ffi . NULL ) )
return parameters , dtype , samplerate
|
def route ( app_or_blueprint , rule , ** options ) :
"""An alternative to : meth : ` flask . Flask . route ` or : meth : ` flask . Blueprint . route ` that
always adds the ` ` POST ` ` method to the allowed endpoint request methods .
You should use this for all your view functions that would need to use Sijax .
We ' re doing this because Sijax uses ` ` POST ` ` for data passing ,
which means that every endpoint that wants Sijax support
would have to accept ` ` POST ` ` requests .
Registering functions that would use Sijax should happen like this : :
@ flask _ sijax . route ( app , ' / ' )
def index ( ) :
pass
If you remember to make your view functions accessible via POST
like this , you can avoid using this decorator : :
@ app . route ( ' / ' , methods = [ ' GET ' , ' POST ' ] )
def index ( ) :
pass"""
|
def decorator ( f ) :
methods = options . pop ( 'methods' , ( 'GET' , 'POST' ) )
if 'POST' not in methods :
methods = tuple ( methods ) + ( 'POST' , )
options [ 'methods' ] = methods
app_or_blueprint . add_url_rule ( rule , None , f , ** options )
return f
return decorator
|
def on ( self , event , handler = None ) :
"""Create , add or update an event with a handler or more attached ."""
|
if isinstance ( event , str ) and ' ' in event : # event is list str - based
self . on ( event . split ( ' ' ) , handler )
elif isinstance ( event , list ) : # many events contains same handler
for each in event :
self . on ( each , handler )
elif isinstance ( event , dict ) : # event is a dict of < event , handler >
for key , value in event . items ( ) :
self . on ( key , value )
elif isinstance ( handler , list ) : # handler is a list of handlers
for each in handler :
self . on ( event , each )
elif isinstance ( handler , Event ) : # handler is Event object
self . events [ event ] = handler
# add or update an event
setattr ( self , event , self . events [ event ] )
# self . event . trigger ( )
elif event in self . events : # add a handler to an existing event
self . events [ event ] . on ( handler )
else : # create new event with a handler attached
self . on ( event , Event ( handler ) )
|
def read_element_using_argtuple ( self , argtuple ) :
"""takes a tuple of keys
returns node found in cfg _ dict
found by traversing cfg _ dict by successive
application of keys from element _ path"""
|
# doesn ' t support DELIMITED , only dict - based formats
if self . format == FMT_DELIMITED :
return None
node = self . cfg_dict
for key in argtuple :
node = node [ key ]
return node
|
def queued ( values , qsize ) :
"""Queues up readings from * values * ( the number of readings queued is
determined by * qsize * ) and begins yielding values only when the queue is
full . For example , to " cascade " values along a sequence of LEDs : :
from gpiozero import LEDBoard , Button
from gpiozero . tools import queued
from signal import pause
leds = LEDBoard ( 5 , 6 , 13 , 19 , 26)
btn = Button ( 17)
for i in range ( 4 ) :
leds [ i ] . source = queued ( leds [ i + 1 ] , 5)
leds [ i ] . source _ delay = 0.01
leds [ 4 ] . source = btn
pause ( )"""
|
values = [ _normalize ( v ) for v in values ]
if qsize < 1 :
raise ValueError ( "qsize must be 1 or larger" )
q = [ ]
it = iter ( values )
try :
for i in range ( qsize ) :
q . append ( next ( it ) )
for i in cycle ( range ( qsize ) ) :
yield q [ i ]
q [ i ] = next ( it )
except StopIteration :
pass
|
def harvest_collection ( community_name ) :
"""Harvest a Zenodo community ' s record metadata .
Examples
You can harvest record metadata for a Zenodo community via its identifier
name . For example , the identifier for LSST Data Management ' s Zenodo
collection is ` ` ' lsst - dm ' ` ` :
> > > import zenodio . harvest import harvest _ collection
> > > collection = harvest _ collection ( ' lsst - dm ' )
` ` collection ` ` is a : class : ` ~ zenodio . harvest . Datacite3Collection ` instance .
Use its : meth : ` ~ zenodio . harvest . Datacite3Collection . records ` method to
generate : class : ` ~ zenodio . harvest . Datacite3Record ` objects for individual
records in the Zenodo collection .
Parameters
community _ name : str
Zenodo community identifier .
Returns
collection : : class : ` zenodio . harvest . Datacite3Collection `
The : class : ` ~ zenodio . harvest . Datacite3Collection ` instance with record
metadata downloaded from Zenodo ."""
|
url = zenodo_harvest_url ( community_name )
r = requests . get ( url )
r . status_code
xml_content = r . content
return Datacite3Collection . from_collection_xml ( xml_content )
|
def fit ( self , X , y , ** fit_params ) :
"""See ` ` NeuralNet . fit ` ` .
In contrast to ` ` NeuralNet . fit ` ` , ` ` y ` ` is non - optional to
avoid mistakenly forgetting about ` ` y ` ` . However , ` ` y ` ` can be
set to ` ` None ` ` in case it is derived dynamically from"""
|
# pylint : disable = useless - super - delegation
# this is actually a pylint bug :
# https : / / github . com / PyCQA / pylint / issues / 1085
return super ( ) . fit ( X , y , ** fit_params )
|
def _from_any ( any_pb ) :
"""Convert an ` ` Any ` ` protobuf into the actual class .
Uses the type URL to do the conversion .
. . note : :
This assumes that the type URL is already registered .
: type any _ pb : : class : ` google . protobuf . any _ pb2 . Any `
: param any _ pb : An any object to be converted .
: rtype : object
: returns : The instance ( of the correct type ) stored in the any
instance ."""
|
klass = _TYPE_URL_MAP [ any_pb . type_url ]
return klass . FromString ( any_pb . value )
|
def trigger_event ( self , event , client , args , force_dispatch = False ) :
"""Trigger a new event that will be dispatched to all modules ."""
|
self . controller . process_event ( event , client , args , force_dispatch = force_dispatch )
|
def module2uri ( self , module_name ) :
"""Convert an encoded module name to an unencoded source uri"""
|
encoded_str = super ( EncodedModuleLoader , self ) . module2uri ( module_name )
encoded = encoded_str . encode ( 'ASCII' )
compressed = base64 . b64decode ( encoded , b'+&' )
return zlib . decompress ( compressed )
|
def graph_draw ( self , mode ) :
"""Draws grid graph using networkx
This method is for debugging purposes only .
Use ding0 . tools . plots . plot _ mv _ topology ( ) for advanced plotting .
Parameters
mode : str
Mode selection ' MV ' or ' LV ' .
Notes
The geo coords ( for used crs see database import in class ` NetworkDing0 ` )
are used as positions for drawing but networkx uses cartesian crs .
Since no coordinate transformation is performed , the drawn graph representation is falsified !"""
|
g = self . _graph
if mode == 'MV' : # get draw params from nodes and edges ( coordinates , colors , demands , etc . )
nodes_pos = { } ;
demands = { } ;
demands_pos = { }
nodes_color = [ ]
for node in g . nodes ( ) :
if isinstance ( node , ( StationDing0 , LVLoadAreaCentreDing0 , CableDistributorDing0 , GeneratorDing0 , CircuitBreakerDing0 ) ) :
nodes_pos [ node ] = ( node . geo_data . x , node . geo_data . y )
# TODO : MOVE draw / color settings to config
if node == self . station ( ) :
nodes_color . append ( ( 1 , 0.5 , 0.5 ) )
else : # demands [ node ] = ' d = ' + ' { : . 3f } ' . format ( node . grid . region . peak _ load _ sum )
# demands _ pos [ node ] = tuple ( [ a + b for a , b in zip ( nodes _ pos [ node ] , [ 0.003 ] * len ( nodes _ pos [ node ] ) ) ] )
nodes_color . append ( ( 0.5 , 0.5 , 1 ) )
edges_color = [ ]
for edge in self . graph_edges ( ) :
if edge [ 'branch' ] . critical :
edges_color . append ( ( 1 , 0 , 0 ) )
else :
edges_color . append ( ( 0 , 0 , 0 ) )
plt . figure ( )
nx . draw_networkx ( g , nodes_pos , node_color = nodes_color , edge_color = edges_color , font_size = 8 )
# nx . draw _ networkx _ labels ( g , demands _ pos , labels = demands , font _ size = 8)
plt . show ( )
elif mode == 'LV' :
nodes_pos = { }
nodes_color = [ ]
for node in g . nodes ( ) : # get neighbors of station ( = first node of each branch )
station_neighbors = sorted ( g . neighbors ( self . station ( ) ) , key = lambda _ : repr ( _ ) )
# set x - offset according to count of branches
if len ( station_neighbors ) % 2 == 0 :
x_pos_start = - ( len ( station_neighbors ) // 2 - 0.5 )
else :
x_pos_start = - ( len ( station_neighbors ) // 2 )
# set positions
if isinstance ( node , CableDistributorDing0 ) :
if node . in_building :
nodes_pos [ node ] = ( x_pos_start + node . branch_no - 1 + 0.25 , - node . load_no - 2 )
nodes_color . append ( ( 0.5 , 0.5 , 0.5 ) )
else :
nodes_pos [ node ] = ( x_pos_start + node . branch_no - 1 , - node . load_no - 2 )
nodes_color . append ( ( 0.5 , 0.5 , 0.5 ) )
elif isinstance ( node , LoadDing0 ) :
nodes_pos [ node ] = ( x_pos_start + node . branch_no - 1 + 0.5 , - node . load_no - 2 - 0.25 )
nodes_color . append ( ( 0.5 , 0.5 , 1 ) )
elif isinstance ( node , GeneratorDing0 ) : # get neighbor of geno
neighbor = list ( g . neighbors ( node ) ) [ 0 ]
# neighbor is cable distributor of building
if isinstance ( neighbor , CableDistributorDing0 ) :
nodes_pos [ node ] = ( x_pos_start + neighbor . branch_no - 1 + 0.5 , - neighbor . load_no - 2 + 0.25 )
else :
nodes_pos [ node ] = ( 1 , 1 )
nodes_color . append ( ( 0.5 , 1 , 0.5 ) )
elif isinstance ( node , StationDing0 ) :
nodes_pos [ node ] = ( 0 , 0 )
nodes_color . append ( ( 1 , 0.5 , 0.5 ) )
plt . figure ( )
nx . draw_networkx ( g , nodes_pos , node_color = nodes_color , font_size = 8 , node_size = 100 )
plt . show ( )
|
def disable_beacons ( self ) :
'''Enable beacons'''
|
self . opts [ 'beacons' ] [ 'enabled' ] = False
# Fire the complete event back along with updated list of beacons
evt = salt . utils . event . get_event ( 'minion' , opts = self . opts )
evt . fire_event ( { 'complete' : True , 'beacons' : self . opts [ 'beacons' ] } , tag = '/salt/minion/minion_beacons_disabled_complete' )
return True
|
def overlay_gateway_site_extend_vlan_add ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
overlay_gateway = ET . SubElement ( config , "overlay-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-tunnels" )
name_key = ET . SubElement ( overlay_gateway , "name" )
name_key . text = kwargs . pop ( 'name' )
site = ET . SubElement ( overlay_gateway , "site" )
name_key = ET . SubElement ( site , "name" )
name_key . text = kwargs . pop ( 'name' )
extend = ET . SubElement ( site , "extend" )
vlan = ET . SubElement ( extend , "vlan" )
add = ET . SubElement ( vlan , "add" )
add . text = kwargs . pop ( 'add' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def __or ( funcs , args ) :
"""Support list sugar for " or " of two predicates . Used inside ` select ` ."""
|
results = [ ]
for f in funcs :
result = f ( args )
if result :
results . extend ( result )
return results
|
def get_histogram ( data ) :
"""Return the histogram relative to the given data
Assume that the data are already sorted"""
|
count = len ( data )
if count < 2 :
raise StatisticsError ( 'Too few data points ({}) for get_histogram' . format ( count ) )
min_ = data [ 0 ]
max_ = data [ - 1 ]
std = stdev ( data )
bins = get_histogram_bins ( min_ , max_ , std , count )
res = { x : 0 for x in bins }
for value in data :
for bin_ in bins :
if value <= bin_ :
res [ bin_ ] += 1
break
return sorted ( iteritems ( res ) )
|
def rpc_message_to_error ( rpc_error , request ) :
"""Converts a Telegram ' s RPC Error to a Python error .
: param rpc _ error : the RpcError instance .
: param request : the request that caused this error .
: return : the RPCError as a Python exception that represents this error ."""
|
# Try to get the error by direct look - up , otherwise regex
cls = rpc_errors_dict . get ( rpc_error . error_message , None )
if cls :
return cls ( request )
for msg_regex , cls in rpc_errors_re :
m = re . match ( msg_regex , rpc_error . error_message )
if m :
capture = int ( m . group ( 1 ) ) if m . groups ( ) else None
return cls ( request , capture = capture )
# Some errors are negative :
# * - 500 for " No workers running " ,
# * - 503 for " Timeout "
# We treat them as if they were positive , so - 500 will be treated
# as a ` ServerError ` , etc .
cls = base_errors . get ( abs ( rpc_error . error_code ) )
if cls :
return cls ( request , rpc_error . error_message )
return RPCError ( request , rpc_error . error_message , rpc_error . error_code )
|
def switch_to_external_wf ( self ) :
"""External workflow switcher .
This method copies main workflow information into
a temporary dict ` main _ wf ` and makes external workflow
acting as main workflow ."""
|
# External WF name should be stated at main wf diagram and type should be service task .
if ( self . current . task_type == 'ServiceTask' and self . current . task . task_spec . type == 'external' ) :
log . debug ( "Entering to EXTERNAL WF" )
# Main wf information is copied to main _ wf .
main_wf = self . wf_state . copy ( )
# workflow name from main wf diagram is assigned to current workflow name .
# workflow name must be either in task _ data with key ' external _ wf ' or in main diagram ' s
# topic .
self . current . workflow_name = self . current . task_data . pop ( 'external_wf' , False ) or self . current . task . task_spec . topic
# For external WF , check permission and authentication . But after cleaning current task .
self . _clear_current_task ( )
# check for auth and perm . current task cleared , do against new workflow _ name
self . check_for_authentication ( )
self . check_for_permission ( )
# wf knowledge is taken for external wf .
self . workflow_spec = self . get_worfklow_spec ( )
# New WF instance is created for external wf .
self . workflow = self . create_workflow ( )
# Current WF is this WF instance .
self . current . workflow = self . workflow
# main _ wf : main wf information .
# in _ external : it states external wf in progress .
# finished : it shows that main wf didn ' t finish still progress in external wf .
self . wf_state = { 'main_wf' : main_wf , 'in_external' : True , 'finished' : False }
|
def ensure_dir_exists ( directory ) :
"Creates local directories if they don ' t exist ."
|
if directory . startswith ( 'gs://' ) :
return
if not os . path . exists ( directory ) :
dbg ( "Making dir {}" . format ( directory ) )
os . makedirs ( directory , exist_ok = True )
|
def adjustReplicas ( self , old_required_number_of_instances : int , new_required_number_of_instances : int ) :
"""Add or remove replicas depending on ` f `"""
|
# TODO : refactor this
replica_num = old_required_number_of_instances
while replica_num < new_required_number_of_instances :
self . replicas . add_replica ( replica_num )
self . processStashedMsgsForReplica ( replica_num )
replica_num += 1
while replica_num > new_required_number_of_instances :
replica_num -= 1
self . replicas . remove_replica ( replica_num )
pop_keys ( self . msgsForFutureReplicas , lambda inst_id : inst_id < new_required_number_of_instances )
if len ( self . primaries_disconnection_times ) < new_required_number_of_instances :
self . primaries_disconnection_times . extend ( [ None ] * ( new_required_number_of_instances - len ( self . primaries_disconnection_times ) ) )
elif len ( self . primaries_disconnection_times ) > new_required_number_of_instances :
self . primaries_disconnection_times = self . primaries_disconnection_times [ : new_required_number_of_instances ]
|
def get_args ( self , state , all_params , remainder , argspec , im_self ) :
'''Determines the arguments for a controller based upon parameters
passed the argument specification for the controller .'''
|
args = [ ]
varargs = [ ]
kwargs = dict ( )
valid_args = argspec . args [ : ]
if ismethod ( state . controller ) or im_self :
valid_args . pop ( 0 )
# pop off ` self `
pecan_state = state . request . pecan
remainder = [ x for x in remainder if x ]
if im_self is not None :
args . append ( im_self )
# grab the routing args from nested REST controllers
if 'routing_args' in pecan_state :
remainder = pecan_state [ 'routing_args' ] + list ( remainder )
del pecan_state [ 'routing_args' ]
# handle positional arguments
if valid_args and remainder :
args . extend ( remainder [ : len ( valid_args ) ] )
remainder = remainder [ len ( valid_args ) : ]
valid_args = valid_args [ len ( args ) : ]
# handle wildcard arguments
if [ i for i in remainder if i ] :
if not argspec [ 1 ] :
abort ( 404 )
varargs . extend ( remainder )
# get the default positional arguments
if argspec [ 3 ] :
defaults = dict ( izip ( argspec [ 0 ] [ - len ( argspec [ 3 ] ) : ] , argspec [ 3 ] ) )
else :
defaults = dict ( )
# handle positional GET / POST params
for name in valid_args :
if name in all_params :
args . append ( all_params . pop ( name ) )
elif name in defaults :
args . append ( defaults [ name ] )
else :
break
# handle wildcard GET / POST params
if argspec [ 2 ] :
for name , value in six . iteritems ( all_params ) :
if name not in argspec [ 0 ] :
kwargs [ name ] = value
return args , varargs , kwargs
|
def _cmd_line_parser ( ) :
'''return a command line parser . It is used when generating the documentation'''
|
parser = argparse . ArgumentParser ( )
parser . add_argument ( '--path' , help = ( 'path to test files, ' 'if not provided the script folder is used' ) )
parser . add_argument ( '--text_output' , action = 'store_true' , help = 'option to save the results to text file' )
parser . add_argument ( '--format' , default = 'rst' , nargs = '?' , choices = [ 'rst' , 'md' ] , help = 'text formatting' )
return parser
|
def validate_key ( request , group = None , perm = None , keytype = None ) :
"""Validate the given key"""
|
def update_last_access ( ) :
if KEY_LAST_USED_UPDATE :
request . key . save ( )
if request . user . is_authenticated ( ) and is_valid_consumer ( request ) :
if not group and not perm and not keytype :
return update_last_access ( )
elif keytype :
if request . key . is_type ( keytype ) :
return update_last_access ( )
elif group :
if request . key . belongs_to_group ( group ) :
return update_last_access ( )
elif perm :
if request . key . has_perm ( perm ) :
return update_last_access ( )
raise AccessForbidden
raise AccessUnauthorized
|
async def _watch ( self ) :
"""Start the watching loop ."""
|
file_name = os . path . basename ( self . _file_path )
logger . info ( 'Watching %s "%s"' , self . THING , self . _file_path , )
while self . _running :
evt = await self . _watcher . get_event ( )
if evt . name == file_name :
await self . _load ( )
logger . info ( 'Reloading changed %s from "%s"' , self . THING , self . _file_path )
|
async def connect ( self ) :
"""Connects to Telegram ."""
|
await self . _sender . connect ( self . _connection ( self . session . server_address , self . session . port , self . session . dc_id , loop = self . _loop , loggers = self . _log , proxy = self . _proxy ) )
self . session . auth_key = self . _sender . auth_key
self . session . save ( )
await self . _sender . send ( self . _init_with ( functions . help . GetConfigRequest ( ) ) )
self . _updates_handle = self . _loop . create_task ( self . _update_loop ( ) )
|
def parse_dsn ( dsn_string ) :
"""Parse a connection string and return the associated driver"""
|
dsn = urlparse ( dsn_string )
scheme = dsn . scheme . split ( '+' ) [ 0 ]
username = password = host = port = None
host = dsn . netloc
if '@' in host :
username , host = host . split ( '@' )
if ':' in username :
username , password = username . split ( ':' )
password = unquote ( password )
username = unquote ( username )
if ':' in host :
host , port = host . split ( ':' )
port = int ( port )
database = dsn . path . split ( '?' ) [ 0 ] [ 1 : ]
query = dsn . path . split ( '?' ) [ 1 ] if '?' in dsn . path else dsn . query
kwargs = dict ( parse_qsl ( query , True ) )
if scheme == 'sqlite' :
return SQLiteDriver , [ dsn . path ] , { }
elif scheme == 'mysql' :
kwargs [ 'user' ] = username or 'root'
kwargs [ 'db' ] = database
if port :
kwargs [ 'port' ] = port
if host :
kwargs [ 'host' ] = host
if password :
kwargs [ 'passwd' ] = password
return MySQLDriver , [ ] , kwargs
elif scheme == 'postgresql' :
kwargs [ 'user' ] = username or 'postgres'
kwargs [ 'database' ] = database
if port :
kwargs [ 'port' ] = port
if 'unix_socket' in kwargs :
kwargs [ 'host' ] = kwargs . pop ( 'unix_socket' )
elif host :
kwargs [ 'host' ] = host
if password :
kwargs [ 'password' ] = password
return PostgreSQLDriver , [ ] , kwargs
else :
raise ValueError ( 'Unknown driver %s' % dsn_string )
|
def save_to ( self , nameprefix , switch = False ) :
"""saves logger data to a different set of files , for
` ` switch = True ` ` also the loggers name prefix is switched to
the new value"""
|
if not nameprefix or not isinstance ( nameprefix , basestring ) :
raise _Error ( 'filename prefix must be a nonempty string' )
if nameprefix == self . default_prefix :
raise _Error ( 'cannot save to default name "' + nameprefix + '...", chose another name' )
if nameprefix == self . name_prefix :
return
for name in self . file_names :
open ( nameprefix + name + '.dat' , 'w' ) . write ( open ( self . name_prefix + name + '.dat' ) . read ( ) )
if switch :
self . name_prefix = nameprefix
|
def list_directory ( self , * args , ** kwargs ) :
""": meth : ` . WNetworkClientProto . list _ directory ` method implementation"""
|
return tuple ( self . dav_client ( ) . list ( self . session_path ( ) ) )
|
def _get_file_event_handler ( self , file_path , save_name ) :
"""Get or create an event handler for a particular file .
file _ path : the file ' s actual path
save _ name : its path relative to the run directory ( aka the watch directory )"""
|
self . _file_pusher . update_file ( save_name , file_path )
# track upload progress
if save_name not in self . _file_event_handlers :
if save_name == 'wandb-history.jsonl' :
self . _file_event_handlers [ 'wandb-history.jsonl' ] = FileEventHandlerTextStream ( file_path , 'wandb-history.jsonl' , self . _api )
elif save_name == 'wandb-events.jsonl' :
self . _file_event_handlers [ 'wandb-events.jsonl' ] = FileEventHandlerTextStream ( file_path , 'wandb-events.jsonl' , self . _api )
elif 'tfevents' in save_name or 'graph.pbtxt' in save_name : # overwrite the tensorboard but not every reload - - just
# frequently enough to resemble realtime
self . _file_event_handlers [ save_name ] = FileEventHandlerThrottledOverwrite ( file_path , save_name , self . _api , self . _file_pusher )
# Don ' t try to stream tensorboard files for now .
# elif ' tfevents ' in save _ name :
# # TODO : This is hard - coded , but we want to give users control
# # over streaming files ( or detect them ) .
# self . _ api . get _ file _ stream _ api ( ) . set _ file _ policy ( save _ name ,
# BinaryFilePolicy ( ) )
# self . _ file _ event _ handlers [ save _ name ] = FileEventHandlerBinaryStream (
# file _ path , save _ name , self . _ api )
# Overwrite handler ( non - deferred ) has a bug , wherein if the file is truncated
# during upload , the request to Google hangs ( at least , this is my working
# theory ) . So for now we defer uploading everything til the end of the run .
# TODO : send wandb - summary during run . One option is to copy to a temporary
# file before uploading .
elif save_name == config . FNAME :
self . _file_event_handlers [ save_name ] = FileEventHandlerConfig ( file_path , save_name , self . _api , self . _file_pusher , self . _run )
elif save_name == 'wandb-summary.json' : # Load the summary into the syncer process for meta etc to work
self . _run . summary . load ( )
self . _api . get_file_stream_api ( ) . set_file_policy ( save_name , OverwriteFilePolicy ( ) )
self . _file_event_handlers [ save_name ] = FileEventHandlerSummary ( file_path , save_name , self . _api , self . _file_pusher , self . _run )
elif save_name . startswith ( 'media/' ) : # Save media files immediately
self . _file_event_handlers [ save_name ] = FileEventHandlerOverwrite ( file_path , save_name , self . _api , self . _file_pusher )
else :
Handler = FileEventHandlerOverwriteDeferred
for policy , globs in six . iteritems ( self . _user_file_policies ) :
if policy == "end" :
continue
for g in globs :
if any ( save_name in p for p in glob . glob ( os . path . join ( self . _run . dir , g ) ) ) :
if policy == "live" :
Handler = FileEventHandlerThrottledOverwriteMinWait
self . _file_event_handlers [ save_name ] = Handler ( file_path , save_name , self . _api , self . _file_pusher )
return self . _file_event_handlers [ save_name ]
|
def list_staged_files ( self ) -> typing . List [ str ] :
""": return : staged files
: rtype : list of str"""
|
staged_files : typing . List [ str ] = [ x . a_path for x in self . repo . index . diff ( 'HEAD' ) ]
LOGGER . debug ( 'staged files: %s' , staged_files )
return staged_files
|
def _generic_summary ( arg , exact_nunique = False , prefix = None ) :
"""Compute a set of summary metrics from the input value expression
Parameters
arg : value expression
exact _ nunique : boolean , default False
Compute the exact number of distinct values ( slower )
prefix : string , default None
String prefix for metric names
Returns
summary : ( count , # nulls , nunique )"""
|
metrics = [ arg . count ( ) , arg . isnull ( ) . sum ( ) . name ( 'nulls' ) ]
if exact_nunique :
unique_metric = arg . nunique ( ) . name ( 'uniques' )
else :
unique_metric = arg . approx_nunique ( ) . name ( 'uniques' )
metrics . append ( unique_metric )
return _wrap_summary_metrics ( metrics , prefix )
|
def video_get_size ( self , num = 0 ) :
"""Get the video size in pixels as 2 - tuple ( width , height ) .
@ param num : video number ( default 0 ) ."""
|
r = libvlc_video_get_size ( self , num )
if isinstance ( r , tuple ) and len ( r ) == 2 :
return r
else :
raise VLCException ( 'invalid video number (%s)' % ( num , ) )
|
def kube_pod_status_phase ( self , metric , scraper_config ) :
"""Phase a pod is in ."""
|
metric_name = scraper_config [ 'namespace' ] + '.pod.status_phase'
status_phase_counter = Counter ( )
for sample in metric . samples : # Counts aggregated cluster - wide to avoid no - data issues on pod churn ,
# pod granularity available in the service checks
tags = [ self . _label_to_tag ( 'namespace' , sample [ self . SAMPLE_LABELS ] , scraper_config ) , self . _label_to_tag ( 'phase' , sample [ self . SAMPLE_LABELS ] , scraper_config ) , ] + scraper_config [ 'custom_tags' ]
status_phase_counter [ tuple ( sorted ( tags ) ) ] += sample [ self . SAMPLE_VALUE ]
for tags , count in iteritems ( status_phase_counter ) :
self . gauge ( metric_name , count , tags = list ( tags ) )
|
def get_model ( self ) :
"""Get a model if the formula was previously satisfied ."""
|
if self . lingeling and self . status == True :
model = pysolvers . lingeling_model ( self . lingeling )
return model if model != None else [ ]
|
def _hexplot ( matrix , fig , colormap ) :
"""Internal function to plot a hexagonal map ."""
|
umatrix_min = matrix . min ( )
umatrix_max = matrix . max ( )
n_rows , n_columns = matrix . shape
cmap = plt . get_cmap ( colormap )
offsets = np . zeros ( ( n_columns * n_rows , 2 ) )
facecolors = [ ]
for row in range ( n_rows ) :
for col in range ( n_columns ) :
if row % 2 == 0 :
offsets [ row * n_columns + col ] = [ col + 0.5 , 2 * n_rows - 2 * row ]
facecolors . append ( cmap ( ( matrix [ row , col ] - umatrix_min ) / ( umatrix_max ) * 255 ) )
else :
offsets [ row * n_columns + col ] = [ col , 2 * n_rows - 2 * row ]
facecolors . append ( cmap ( ( matrix [ row , col ] - umatrix_min ) / ( umatrix_max ) * 255 ) )
polygon = np . zeros ( ( 6 , 2 ) , float )
polygon [ : , 0 ] = 1.1 * np . array ( [ 0.5 , 0.5 , 0.0 , - 0.5 , - 0.5 , 0.0 ] )
polygon [ : , 1 ] = 1.1 * np . array ( [ - np . sqrt ( 3 ) / 6 , np . sqrt ( 3 ) / 6 , np . sqrt ( 3 ) / 2 + np . sqrt ( 3 ) / 6 , np . sqrt ( 3 ) / 6 , - np . sqrt ( 3 ) / 6 , - np . sqrt ( 3 ) / 2 - np . sqrt ( 3 ) / 6 ] )
polygons = np . expand_dims ( polygon , 0 ) + np . expand_dims ( offsets , 1 )
ax = fig . gca ( )
collection = mcoll . PolyCollection ( polygons , offsets = offsets , facecolors = facecolors , edgecolors = facecolors , linewidths = 1.0 , offset_position = "data" )
ax . add_collection ( collection , autolim = False )
corners = ( ( - 0.5 , - 0.5 ) , ( n_columns + 0.5 , 2 * n_rows + 0.5 ) )
ax . update_datalim ( corners )
ax . autoscale_view ( tight = True )
return offsets
|
def _iflat_tasks_wti ( self , status = None , op = "==" , nids = None , with_wti = True ) :
"""Generators that produces a flat sequence of task .
if status is not None , only the tasks with the specified status are selected .
nids is an optional list of node identifiers used to filter the tasks .
Returns :
( task , work _ index , task _ index ) if with _ wti is True else task"""
|
nids = as_set ( nids )
if status is None :
for wi , work in enumerate ( self ) :
for ti , task in enumerate ( work ) :
if nids and task . node_id not in nids :
continue
if with_wti :
yield task , wi , ti
else :
yield task
else : # Get the operator from the string .
op = operator_from_str ( op )
# Accept Task . S _ FLAG or string .
status = Status . as_status ( status )
for wi , work in enumerate ( self ) :
for ti , task in enumerate ( work ) :
if nids and task . node_id not in nids :
continue
if op ( task . status , status ) :
if with_wti :
yield task , wi , ti
else :
yield task
|
def _call_marginalizevperp ( self , o , integrate_method = 'dopr54_c' , ** kwargs ) :
"""Call the DF , marginalizing over perpendicular velocity"""
|
# Get d , l , vlos
l = o . ll ( obs = [ 1. , 0. , 0. ] , ro = 1. ) * _DEGTORAD
vlos = o . vlos ( ro = 1. , vo = 1. , obs = [ 1. , 0. , 0. , 0. , 0. , 0. ] )
R = o . R ( use_physical = False )
phi = o . phi ( use_physical = False )
# Get local circular velocity , projected onto the los
if isinstance ( self . _pot , list ) :
vcirc = calcRotcurve ( [ p for p in self . _pot if not p . isNonAxi ] , R ) [ 0 ]
else :
vcirc = calcRotcurve ( self . _pot , R ) [ 0 ]
vcirclos = vcirc * math . sin ( phi + l )
# Marginalize
alphalos = phi + l
if not 'nsigma' in kwargs or ( 'nsigma' in kwargs and kwargs [ 'nsigma' ] is None ) :
nsigma = _NSIGMA
else :
nsigma = kwargs [ 'nsigma' ]
kwargs . pop ( 'nsigma' , None )
# BOVY : add asymmetric drift here ?
if math . fabs ( math . sin ( alphalos ) ) < math . sqrt ( 1. / 2. ) :
sigmaR1 = nu . sqrt ( self . _initdf . sigmaT2 ( R , phi = phi , use_physical = False ) )
# Slight abuse
cosalphalos = math . cos ( alphalos )
tanalphalos = math . tan ( alphalos )
return integrate . quad ( _marginalizeVperpIntegrandSinAlphaSmall , - nsigma , nsigma , args = ( self , R , cosalphalos , tanalphalos , vlos - vcirclos , vcirc , sigmaR1 , phi ) , ** kwargs ) [ 0 ] / math . fabs ( cosalphalos ) * sigmaR1
else :
sigmaR1 = nu . sqrt ( self . _initdf . sigmaR2 ( R , phi = phi , use_physical = False ) )
sinalphalos = math . sin ( alphalos )
cotalphalos = 1. / math . tan ( alphalos )
return integrate . quad ( _marginalizeVperpIntegrandSinAlphaLarge , - nsigma , nsigma , args = ( self , R , sinalphalos , cotalphalos , vlos - vcirclos , vcirc , sigmaR1 , phi ) , ** kwargs ) [ 0 ] / math . fabs ( sinalphalos ) * sigmaR1
|
def _data ( self ) :
"""Cached data built from instance raw _ values as a dictionary ."""
|
d = { }
# Iterate all keys and values
for k , v in self . _row_values . items ( ) : # Split related model fields
attrs = k . rsplit ( '__' , 1 )
# Set value depending case
if len ( attrs ) == 2 : # Related model field , store nested
fk , fn = attrs
if fk not in d :
d [ fk ] = { }
d [ fk ] [ fn ] = v
else : # Own model field , store directly
d [ k ] = v
# Return ( + cache ) data
return d
|
def model ( self , * args , ** kwargs ) : # type : ( * Any , * * Any ) - > Part
"""Retrieve single KE - chain part model .
Uses the same interface as the : func : ` part ` method but returns only a single pykechain
: class : ` models . Part ` instance of category ` MODEL ` .
If additional ` keyword = value ` arguments are provided , these are added to the request parameters . Please
refer to the documentation of the KE - chain API for additional query parameters .
: return : a single : class : ` models . Part `
: raises NotFoundError : When no ` Part ` is found
: raises MultipleFoundError : When more than a single ` Part ` is found"""
|
kwargs [ 'category' ] = Category . MODEL
_parts = self . parts ( * args , ** kwargs )
if len ( _parts ) == 0 :
raise NotFoundError ( "No model fits criteria" )
if len ( _parts ) != 1 :
raise MultipleFoundError ( "Multiple models fit criteria" )
return _parts [ 0 ]
|
def write_dag ( self , out = sys . stdout ) :
"""Write info for all GO Terms in obo file , sorted numerically ."""
|
for rec in sorted ( self . values ( ) ) :
print ( rec , file = out )
|
def assets2s3 ( ) :
"""Upload assets files to S3"""
|
import flask_s3
header ( "Assets2S3..." )
print ( "" )
print ( "Building assets files..." )
print ( "" )
build_assets ( application . app )
print ( "" )
print ( "Uploading assets files to S3 ..." )
flask_s3 . create_all ( application . app )
print ( "" )
|
def kernels ( gandi , vm , datacenter , flavor , match ) :
"""List available kernels ."""
|
if vm :
vm = gandi . iaas . info ( vm )
dc_list = gandi . datacenter . filtered_list ( datacenter , vm )
for num , dc in enumerate ( dc_list ) :
if num :
gandi . echo ( '\n' )
output_datacenter ( gandi , dc , [ 'dc_name' ] )
kmap = gandi . kernel . list ( dc [ 'id' ] , flavor , match )
for _flavor in kmap :
gandi . separator_line ( )
output_kernels ( gandi , _flavor , kmap [ _flavor ] )
|
def cell_has_code ( lines ) :
"""Is there any code in this cell ?"""
|
for i , line in enumerate ( lines ) :
stripped_line = line . strip ( )
if stripped_line . startswith ( '#' ) :
continue
# Two consecutive blank lines ?
if not stripped_line :
if i > 0 and not lines [ i - 1 ] . strip ( ) :
return False
continue
return True
return False
|
def RegisterMessageHandler ( self , handler , lease_time , limit = 1000 ) :
"""Leases a number of message handler requests up to the indicated limit ."""
|
self . UnregisterMessageHandler ( )
self . handler_stop = False
self . handler_thread = threading . Thread ( name = "message_handler" , target = self . _MessageHandlerLoop , args = ( handler , lease_time , limit ) )
self . handler_thread . daemon = True
self . handler_thread . start ( )
|
def restart_on_change_helper ( lambda_f , restart_map , stopstart = False , restart_functions = None ) :
"""Helper function to perform the restart _ on _ change function .
This is provided for decorators to restart services if files described
in the restart _ map have changed after an invocation of lambda _ f ( ) .
@ param lambda _ f : function to call .
@ param restart _ map : { file : [ service , . . . ] }
@ param stopstart : whether to stop , start or restart a service
@ param restart _ functions : nonstandard functions to use to restart services
{ svc : func , . . . }
@ returns result of lambda _ f ( )"""
|
if restart_functions is None :
restart_functions = { }
checksums = { path : path_hash ( path ) for path in restart_map }
r = lambda_f ( )
# create a list of lists of the services to restart
restarts = [ restart_map [ path ] for path in restart_map if path_hash ( path ) != checksums [ path ] ]
# create a flat list of ordered services without duplicates from lists
services_list = list ( OrderedDict . fromkeys ( itertools . chain ( * restarts ) ) )
if services_list :
actions = ( 'stop' , 'start' ) if stopstart else ( 'restart' , )
for service_name in services_list :
if service_name in restart_functions :
restart_functions [ service_name ] ( service_name )
else :
for action in actions :
service ( action , service_name )
return r
|
def verify ( self , otp , for_time = None , valid_window = 0 ) :
"""Verifies the OTP passed in against the current time OTP
@ param [ String / Integer ] otp the OTP to check against
@ param [ Integer ] valid _ window extends the validity to this many counter ticks before and after the current one"""
|
if for_time is None :
for_time = datetime . datetime . now ( )
if valid_window :
for i in range ( - valid_window , valid_window + 1 ) :
if utils . strings_equal ( str ( otp ) , str ( self . at ( for_time , i ) ) ) :
return True
return False
return utils . strings_equal ( str ( otp ) , str ( self . at ( for_time ) ) )
|
def get_service_list ( self ) -> list :
"""Get a list of docker services .
Only the manager nodes can retrieve all the services
Returns :
list , all the ids of the services in swarm"""
|
# Initialising empty list
services = [ ]
# Raise an exception if we are not a manager
if not self . _manager :
raise RuntimeError ( 'Only the Swarm manager node can retrieve' ' all the services.' )
service_list = self . _client . services . list ( )
for s_list in service_list :
services . append ( s_list . short_id )
return services
|
def create_figures ( n , * fig_args , ** fig_kwargs ) :
'''Create multiple figures .
Args and Kwargs are passed to ` matplotlib . figure . Figure ` .
This routine is provided in order to avoid usage of pyplot which
is stateful and not thread safe . As drawing routines in tf - matplotlib
are called from py - funcs in their respective thread , avoid usage
of pyplot where possible .'''
|
return [ create_figure ( * fig_args , ** fig_kwargs ) for _ in range ( n ) ]
|
def push ( self , x ) :
"""append items to the stack ; input can be a single value or a list"""
|
if isinstance ( x , list ) :
for item in x :
self . stack . append ( item )
else :
self . stack . append ( x )
|
def DocbookSlidesPdf ( env , target , source = None , * args , ** kw ) :
"""A pseudo - Builder , providing a Docbook toolchain for PDF slides output ."""
|
# Init list of targets / sources
target , source = __extend_targets_sources ( target , source )
# Init XSL stylesheet
__init_xsl_stylesheet ( kw , env , '$DOCBOOK_DEFAULT_XSL_SLIDESPDF' , [ 'slides' , 'fo' , 'plain.xsl' ] )
# Setup builder
__builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder )
# Create targets
result = [ ]
for t , s in zip ( target , source ) :
t , stem = __ensure_suffix_stem ( t , '.pdf' )
xsl = __builder . __call__ ( env , stem + '.fo' , s , ** kw )
env . Depends ( xsl , kw [ 'DOCBOOK_XSL' ] )
result . extend ( xsl )
result . extend ( __fop_builder . __call__ ( env , t , xsl , ** kw ) )
return result
|
def listen ( self ) :
"""Instructs handler to listen to Django request and handle
CategoryList editor requests ( if any ) .
: return : None on success otherwise and exception from SitecatsException family is raised ."""
|
requested_action = self . _request . POST . get ( 'category_action' , False )
if not requested_action :
return None
# No action supplied . Pass .
if requested_action not in self . KNOWN_ACTIONS :
raise SitecatsSecurityException ( 'Unknown `category_action` - `%s` - requested.' )
category_base_id = self . _request . POST . get ( 'category_base_id' , False )
if category_base_id == 'None' :
category_base_id = None
else :
category_base_id = int ( category_base_id )
if category_base_id not in self . _lists . keys ( ) :
raise SitecatsSecurityException ( 'Unknown `category_base_id` - `%s` - requested.' )
category_list = self . _lists [ category_base_id ]
if category_list . editor is None :
raise SitecatsSecurityException ( 'Editor is disabled for `%s` category.' % category_list . alias )
action_method = getattr ( self , 'action_%s' % requested_action )
try :
return action_method ( self . _request , category_list )
except SitecatsNewCategoryException as e :
messages . error ( self . _request , e , extra_tags = self . error_messages_extra_tags , fail_silently = True )
return None
except SitecatsValidationError as e :
messages . error ( self . _request , e . messages [ 0 ] , extra_tags = self . error_messages_extra_tags , fail_silently = True )
return None
finally :
self . _request . POST = { }
|
def findlinestarts ( code , dup_lines = False ) :
"""Find the offsets in a byte code which are start of lines in the source .
Generate pairs ( offset , lineno ) as described in Python / compile . c ."""
|
if PYTHON3 :
byte_increments = code . co_lnotab [ 0 : : 2 ]
line_increments = code . co_lnotab [ 1 : : 2 ]
else :
byte_increments = [ ord ( c ) for c in code . co_lnotab [ 0 : : 2 ] ]
line_increments = [ ord ( c ) for c in code . co_lnotab [ 1 : : 2 ] ]
lastlineno = None
lineno = code . co_firstlineno
addr = 0
for byte_incr , line_incr in zip ( byte_increments , line_increments ) :
if byte_incr :
if ( lineno != lastlineno or ( not dup_lines and 0 < byte_incr < 255 ) ) :
yield ( addr , lineno )
lastlineno = lineno
addr += byte_incr
if line_incr >= 0x80 : # line _ increments is an array of 8 - bit signed integers
line_incr -= 0x100
lineno += line_incr
if ( lineno != lastlineno or ( not dup_lines and 0 < byte_incr < 255 ) ) :
yield ( addr , lineno )
|
def roundvals ( self , col : str , precision : int = 2 ) :
"""Round floats in a column . Numbers are going to be
converted to floats if they are not already
: param col : column name
: type col : str
: param precision : float precision , defaults to 2
: param precision : int , optional
: example : ` ` ds . roundvals ( " mycol " ) ` `"""
|
try :
self . df [ col ] = self . df [ col ] . astype ( "float64" )
self . df [ col ] = self . df [ col ] . apply ( lambda x : round ( x , precision ) )
except Exception as e :
self . err ( e , "Can not round column values" )
return
self . ok ( "Rounded values in column " + col )
|
def initialize ( template , service_name , environment = 'dev' ) :
"""Adds SERVICE _ NAME , SERVICE _ ENVIRONMENT , and DEFAULT _ TAGS to the template
: param template :
: param service _ name :
: param environment :
: return :"""
|
template . SERVICE_NAME = os . getenv ( 'SERVICE_NAME' , service_name )
template . SERVICE_ENVIRONMENT = os . getenv ( 'ENV' , environment ) . lower ( )
template . DEFAULT_TAGS = troposphere . Tags ( ** { 'service-name' : template . SERVICE_NAME , 'environment' : template . SERVICE_ENVIRONMENT } )
template . add_version ( "2010-09-09" )
template . add_description ( "Stack for %s microservice" % service_name )
|
def setAutoRangeOff ( self ) :
"""Turns off the auto range checkbox .
Calls _ refreshNodeFromTarget , not _ updateTargetFromNode , because setting auto range off
does not require a redraw of the target ."""
|
# TODO : catch exceptions . How ?
# / argos / hdf - eos / DeepBlue - SeaWiFS - 1.0 _ L3_20100101 _ v002-20110527T191319Z . h5 / aerosol _ optical _ thickness _ stddev _ ocean
if self . getRefreshBlocked ( ) :
logger . debug ( "setAutoRangeOff blocked for {}" . format ( self . nodeName ) )
return
if self . autoRangeCti :
self . autoRangeCti . data = False
self . _forceRefreshAutoRange ( )
|
def add_error ( self , error , critical = False ) :
"""Adds an error to the state .
Args :
error : The text that will be added to the error list .
critical : If set to True and the error is checked with check _ errors , will
dfTimewolf will abort ."""
|
self . errors . append ( ( error , critical ) )
|
def go_to_line ( self , line ) :
"""Moves the text cursor to given line .
: param line : Line to go to .
: type line : int
: return : Method success .
: rtype : bool"""
|
cursor = self . textCursor ( )
cursor . setPosition ( self . document ( ) . findBlockByNumber ( line - 1 ) . position ( ) )
self . setTextCursor ( cursor )
return True
|
def read_args ( ** kwargs ) :
"""Read controlfile parameter ."""
|
if kwargs . get ( "control" ) :
args = Namespace ( control = kwargs [ "control" ] )
elif config . CONTROLFILE :
args = Namespace ( control = config . CONTROLFILE )
elif config . DB . get ( "control_table_name" ) :
args = Namespace ( control = "sql" )
elif config . AWS . get ( "control_table_name" ) :
args = Namespace ( control = "dynamodb" )
else : # read cli args
parser = argparse . ArgumentParser ( )
parser . add_argument ( "--control" , required = True , help = "Control file, can be path." )
args = parser . parse_args ( )
return args
|
def infer_ast_from_something ( self , obj , context = None ) :
"""infer astroid for the given class"""
|
if hasattr ( obj , "__class__" ) and not isinstance ( obj , type ) :
klass = obj . __class__
else :
klass = obj
try :
modname = klass . __module__
except AttributeError as exc :
raise exceptions . AstroidBuildingError ( "Unable to get module for {class_repr}." , cls = klass , class_repr = safe_repr ( klass ) , ) from exc
except Exception as exc :
raise exceptions . AstroidImportError ( "Unexpected error while retrieving module for {class_repr}:\n" "{error}" , cls = klass , class_repr = safe_repr ( klass ) , ) from exc
try :
name = klass . __name__
except AttributeError as exc :
raise exceptions . AstroidBuildingError ( "Unable to get name for {class_repr}:\n" , cls = klass , class_repr = safe_repr ( klass ) , ) from exc
except Exception as exc :
raise exceptions . AstroidImportError ( "Unexpected error while retrieving name for {class_repr}:\n" "{error}" , cls = klass , class_repr = safe_repr ( klass ) , ) from exc
# take care , on living object _ _ module _ _ is regularly wrong : (
modastroid = self . ast_from_module_name ( modname )
if klass is obj :
for inferred in modastroid . igetattr ( name , context ) :
yield inferred
else :
for inferred in modastroid . igetattr ( name , context ) :
yield inferred . instantiate_class ( )
|
def read_uic_tag ( fh , tagid , planecount , offset ) :
"""Read a single UIC tag value from file and return tag name and value .
UIC1Tags use an offset ."""
|
def read_int ( count = 1 ) :
value = struct . unpack ( '<%iI' % count , fh . read ( 4 * count ) )
return value [ 0 ] if count == 1 else value
try :
name , dtype = TIFF . UIC_TAGS [ tagid ]
except IndexError : # unknown tag
return '_TagId%i' % tagid , read_int ( )
Fraction = TIFF . UIC_TAGS [ 4 ] [ 1 ]
if offset :
pos = fh . tell ( )
if dtype not in ( int , None ) :
off = read_int ( )
if off < 8 :
if dtype is str :
return name , ''
log . warning ( "read_uic_tag: invalid offset for tag '%s' (%i)" , name , off )
return name , off
fh . seek ( off )
if dtype is None : # skip
name = '_' + name
value = read_int ( )
elif dtype is int : # int
value = read_int ( )
elif dtype is Fraction : # fraction
value = read_int ( 2 )
value = value [ 0 ] / value [ 1 ]
elif dtype is julian_datetime : # datetime
value = julian_datetime ( * read_int ( 2 ) )
elif dtype is read_uic_image_property : # ImagePropertyEx
value = read_uic_image_property ( fh )
elif dtype is str : # pascal string
size = read_int ( )
if 0 <= size < 2 ** 10 :
value = struct . unpack ( '%is' % size , fh . read ( size ) ) [ 0 ] [ : - 1 ]
value = bytes2str ( stripnull ( value ) )
elif offset :
value = ''
log . warning ( "read_uic_tag: corrupt string in tag '%s'" , name )
else :
raise ValueError ( 'read_uic_tag: invalid string size %i' % size )
elif dtype == '%ip' : # sequence of pascal strings
value = [ ]
for _ in range ( planecount ) :
size = read_int ( )
if 0 <= size < 2 ** 10 :
string = struct . unpack ( '%is' % size , fh . read ( size ) ) [ 0 ] [ : - 1 ]
string = bytes2str ( stripnull ( string ) )
value . append ( string )
elif offset :
log . warning ( "read_uic_tag: corrupt string in tag '%s'" , name )
else :
raise ValueError ( 'read_uic_tag: invalid string size: %i' % size )
else : # struct or numpy type
dtype = '<' + dtype
if '%i' in dtype :
dtype = dtype % planecount
if '(' in dtype : # numpy type
value = fh . read_array ( dtype , 1 ) [ 0 ]
if value . shape [ - 1 ] == 2 : # assume fractions
value = value [ ... , 0 ] / value [ ... , 1 ]
else : # struct format
value = struct . unpack ( dtype , fh . read ( struct . calcsize ( dtype ) ) )
if len ( value ) == 1 :
value = value [ 0 ]
if offset :
fh . seek ( pos + 4 )
return name , value
|
def handle_program_options ( ) :
"""Parses the given options passed in at the command line ."""
|
parser = argparse . ArgumentParser ( description = "Calculate the alpha diversity\
of a set of samples using one or more \
metrics and output a kernal density \
estimator-smoothed histogram of the \
results." )
parser . add_argument ( "-m" , "--map_file" , help = "QIIME mapping file." )
parser . add_argument ( "-i" , "--biom_fp" , help = "Path to the BIOM table" )
parser . add_argument ( "-c" , "--category" , help = "Specific category from the mapping file." )
parser . add_argument ( "-d" , "--diversity" , default = [ "shannon" ] , nargs = "+" , help = "The alpha diversity metric. Default \
value is 'shannon', which will calculate the Shannon\
entropy. Multiple metrics can be specified (space separated).\
The full list of metrics is available at:\
http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\
Beta diversity metrics will be supported in the future." )
parser . add_argument ( "--x_label" , default = [ None ] , nargs = "+" , help = "The name of the diversity metric to be displayed on the\
plot as the X-axis label. If multiple metrics are specified,\
then multiple entries for the X-axis label should be given." )
parser . add_argument ( "--color_by" , help = "A column name in the mapping file containing\
hexadecimal (#FF0000) color values that will\
be used to color the groups. Each sample ID must\
have a color entry." )
parser . add_argument ( "--plot_title" , default = "" , help = "A descriptive title that will appear at the top \
of the output plot. Surround with quotes if there are\
spaces in the title." )
parser . add_argument ( "-o" , "--output_dir" , default = "." , help = "The directory plots will be saved to." )
parser . add_argument ( "--image_type" , default = "png" , help = "The type of image to save: png, svg, pdf, eps, etc..." )
parser . add_argument ( "--save_calculations" , help = "Path and name of text file to store the calculated " "diversity metrics." )
parser . add_argument ( "--suppress_stats" , action = "store_true" , help = "Do not display " "significance testing results which are shown by default." )
parser . add_argument ( "--show_available_metrics" , action = "store_true" , help = "Supply this parameter to see which alpha diversity metrics " " are available for usage. No calculations will be performed" " if this parameter is provided." )
return parser . parse_args ( )
|
def update_dispute ( self , idempotency_key = None , ** params ) :
"""Return a deferred ."""
|
url = self . instance_url ( ) + '/dispute'
headers = populate_headers ( idempotency_key )
d = self . request ( 'post' , url , params , headers )
d . addCallback ( lambda response : self . refresh_from ( { 'dispute' : response } , api_key , True ) )
return d . addCallback ( lambda _ : self . dispute )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.