signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def vars ( self , exclude_where = False , exclude_select = False ) :
""": return : variables in query"""
|
def edges_get_all_vars ( e ) :
output = set ( )
if is_text ( e . value ) :
output . add ( e . value )
if is_expression ( e . value ) :
output |= e . value . vars ( )
if e . domain . key :
output . add ( e . domain . key )
if e . domain . where :
output |= e . domain . where . vars ( )
if e . range :
output |= e . range . min . vars ( )
output |= e . range . max . vars ( )
if e . domain . partitions :
for p in e . domain . partitions :
if p . where :
output |= p . where . vars ( )
return output
output = set ( )
try :
output |= self . frum . vars ( )
except Exception :
pass
if not exclude_select :
for s in listwrap ( self . select ) :
output |= s . value . vars ( )
for s in listwrap ( self . edges ) :
output |= edges_get_all_vars ( s )
for s in listwrap ( self . groupby ) :
output |= edges_get_all_vars ( s )
if not exclude_where :
output |= self . where . vars ( )
for s in listwrap ( self . sort ) :
output |= s . value . vars ( )
try :
output |= UNION ( e . vars ( ) for e in self . window )
except Exception :
pass
return output
|
def compile ( file ) :
"""Compile a Python program into byte code
: param file : file to be compiled
: raises check50 . Failure : if compilation fails e . g . if there is a SyntaxError"""
|
log ( _ ( "compiling {} into byte code..." ) . format ( file ) )
try :
py_compile . compile ( file , doraise = True )
except py_compile . PyCompileError as e :
log ( _ ( "Exception raised: " ) )
for line in e . msg . splitlines ( ) :
log ( line )
raise Failure ( _ ( "{} raised while compiling {} (rerun with --log for more details)" ) . format ( e . exc_type_name , file ) )
|
def where_earliest ( cls , user_id ) :
"""Get earilest session by created _ at timestamp"""
|
return cls . query . filter_by ( user_id = user_id ) . order_by ( cls . created_at . asc ( ) ) . first ( )
|
def is_email_valid ( email ) :
"""Check if email is valid"""
|
pattern = re . compile ( r'[\w\.-]+@[\w\.-]+[.]\w+' )
return bool ( pattern . match ( email ) )
|
def _render ( self , value , format ) :
"""Writes javascript to call momentjs function"""
|
template = '<script>\ndocument.write(moment(\"{t}\").{f});\n</script>'
return Markup ( template . format ( t = value , f = format ) )
|
def babel_extract ( fileobj , keywords , comment_tags , options ) :
"""Babel extraction method for Jinja templates .
. . versionchanged : : 2.3
Basic support for translation comments was added . If ` comment _ tags `
is now set to a list of keywords for extraction , the extractor will
try to find the best preceeding comment that begins with one of the
keywords . For best results , make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before .
: param fileobj : the file - like object the messages should be extracted from
: param keywords : a list of keywords ( i . e . function names ) that should be
recognized as translation functions
: param comment _ tags : a list of translator tags to search for and include
in the results .
: param options : a dictionary of additional options ( optional )
: return : an iterator over ` ` ( lineno , funcname , message , comments ) ` ` tuples .
( comments will be empty currently )"""
|
extensions = set ( )
for extension in options . get ( 'extensions' , '' ) . split ( ',' ) :
extension = extension . strip ( )
if not extension :
continue
extensions . add ( import_string ( extension ) )
if InternationalizationExtension not in extensions :
extensions . add ( InternationalizationExtension )
environment = get_spontaneous_environment ( options . get ( 'block_start_string' , BLOCK_START_STRING ) , options . get ( 'block_end_string' , BLOCK_END_STRING ) , options . get ( 'variable_start_string' , VARIABLE_START_STRING ) , options . get ( 'variable_end_string' , VARIABLE_END_STRING ) , options . get ( 'comment_start_string' , COMMENT_START_STRING ) , options . get ( 'comment_end_string' , COMMENT_END_STRING ) , options . get ( 'line_statement_prefix' ) or LINE_STATEMENT_PREFIX , options . get ( 'line_comment_prefix' ) or LINE_COMMENT_PREFIX , str ( options . get ( 'trim_blocks' , TRIM_BLOCKS ) ) . lower ( ) in ( '1' , 'on' , 'yes' , 'true' ) , NEWLINE_SEQUENCE , frozenset ( extensions ) , # fill with defaults so that environments are shared
# with other spontaneus environments . The rest of the
# arguments are optimizer , undefined , finalize , autoescape ,
# loader , cache size , auto reloading setting and the
# bytecode cache
True , Undefined , None , False , None , 0 , False , None )
source = fileobj . read ( ) . decode ( options . get ( 'encoding' , 'utf-8' ) )
try :
node = environment . parse ( source )
tokens = list ( environment . lex ( environment . preprocess ( source ) ) )
except TemplateSyntaxError as e : # skip templates with syntax errors
return
finder = _CommentFinder ( tokens , comment_tags )
for lineno , func , message in extract_from_ast ( node , keywords ) :
yield lineno , func , message , finder . find_comments ( lineno )
|
def _get_biodata ( base_file , args ) :
"""Retrieve biodata genome targets customized by install parameters ."""
|
with open ( base_file ) as in_handle :
config = yaml . safe_load ( in_handle )
config [ "install_liftover" ] = False
config [ "genome_indexes" ] = args . aligners
ann_groups = config . pop ( "annotation_groups" , { } )
config [ "genomes" ] = [ _setup_genome_annotations ( g , args , ann_groups ) for g in config [ "genomes" ] if g [ "dbkey" ] in args . genomes ]
return config
|
def setMode ( self , mode , polarity , den , iovalue , data_length , reference , input_range , clock_enable , burn_out , channel ) :
'''def setMode ( self
, mode = self . AD7730 _ IDLE _ MODE
, polarity = self . AD7730 _ UNIPOLAR _ MODE
, den = self . AD7730 _ IODISABLE _ MODE
, iovalue = 0b00
, data _ lenght = self . AD7730_24bitDATA _ MODE
, reference = self . AD7730 _ REFERENCE _ 5V
, input _ range = self . AD7730_40mVIR _ MODE
, clock _ enable = self . AD7730 _ MCLK _ ENABLE _ MODE
, burn _ out = self . AD7730 _ BURNOUT _ DISABLE
, channel = self . AD7730 _ AIN1P _ AIN1N'''
|
mode_MSB = ( mode << 5 ) + ( polarity << 4 ) + ( den << 3 ) + ( iovalue << 1 ) + data_length
mode_LSB = ( reference << 7 ) + ( 0b0 << 6 ) + ( input_range << 4 ) + ( clock_enable << 3 ) + ( burn_out << 2 ) + channel
self . single_write ( self . AD7730_MODE_REG , [ mode_MSB , mode_LSB ] )
|
def _get_entry_link ( self , entry ) :
"""Returns a unique link for an entry"""
|
entry_link = None
for link in entry . link :
if '/data/' not in link . href and '/lh/' not in link . href :
entry_link = link . href
break
return entry_link or entry . link [ 0 ] . href
|
def add_attrs ( self , symbol , ** kwargs ) :
"""Helper for setting symbol extension attributes"""
|
for key , val in kwargs . items ( ) :
symbol . add_extension_attribute ( self . extension_name , key , val )
|
def process_read_exception ( exc , path , ignore = None ) :
'''Common code for raising exceptions when reading a file fails
The ignore argument can be an iterable of integer error codes ( or a single
integer error code ) that should be ignored .'''
|
if ignore is not None :
if isinstance ( ignore , six . integer_types ) :
ignore = ( ignore , )
else :
ignore = ( )
if exc . errno in ignore :
return
if exc . errno == errno . ENOENT :
raise CommandExecutionError ( '{0} does not exist' . format ( path ) )
elif exc . errno == errno . EACCES :
raise CommandExecutionError ( 'Permission denied reading from {0}' . format ( path ) )
else :
raise CommandExecutionError ( 'Error {0} encountered reading from {1}: {2}' . format ( exc . errno , path , exc . strerror ) )
|
def process_notice ( self , notice ) :
"""This method is called on notices that need processing . Here ,
we call ` ` on _ object ` ` and ` ` on _ account ` ` slots ."""
|
id = notice [ "id" ]
_a , _b , _ = id . split ( "." )
if id in self . subscription_objects :
self . on_object ( notice )
elif "." . join ( [ _a , _b , "x" ] ) in self . subscription_objects :
self . on_object ( notice )
elif id [ : 4 ] == "2.6." : # Treat account updates separately
self . on_account ( notice )
|
def tfidf_corpus ( docs = CORPUS ) :
"""Count the words in a corpus and return a TfidfVectorizer ( ) as well as all the TFIDF vecgtors for the corpus
Args :
docs ( iterable of strs ) : a sequence of documents ( strings )
Returns :
( TfidfVectorizer , tfidf _ vectors )"""
|
vectorizer = TfidfVectorizer ( )
vectorizer = vectorizer . fit ( docs )
return vectorizer , vectorizer . transform ( docs )
|
def popone ( self , key , * default ) :
"""Remove first of given key and return corresponding value .
If key is not found , default is returned if given .
> > > m = MutableMultiMap ( [ ( ' a ' , 1 ) , ( ' b ' , 2 ) , ( ' b ' , 3 ) , ( ' c ' , 4 ) ] )
> > > m . popone ( ' b ' )
> > > m . items ( )
[ ( ' a ' , 1 ) , ( ' b ' , 3 ) , ( ' c ' , 4 ) ]
> > > m . popone ( ' b ' )
> > > m . popone ( ' b ' )
Traceback ( most recent call last ) :
KeyError : ' b '
> > > m . popone ( ' b ' , ' default ' )
' default '"""
|
try :
value = self [ key ]
except KeyError :
if default :
return default [ 0 ]
raise
# Delete this one .
self . _remove_pairs ( [ self . _key_ids [ self . _conform_key ( key ) ] . pop ( 0 ) ] )
return value
|
def extract_listing ( pid ) :
"""Extract listing ; return list of tuples ( artist ( s ) , title , label ) ."""
|
print ( "Extracting tracklisting..." )
listing_etree = open_listing_page ( pid + '/segments.inc' )
track_divs = listing_etree . xpath ( '//div[@class="segment__track"]' )
listing = [ ]
for track_div in track_divs :
try :
artist_names = track_div . xpath ( './/span[@property="byArtist"]' '//span[@class="artist"]/text()' )
except ValueError :
artist_names = [ '' ]
if not artist_names :
artist_names = [ '' ]
if len ( artist_names ) > 1 :
artists = ', ' . join ( artist_names [ : - 1 ] ) + ' & ' + artist_names [ - 1 ]
else :
artists = artist_names [ 0 ]
try :
title , = track_div . xpath ( './/p/span[@property="name"]/text()' )
except ValueError :
title = ''
try :
label , = track_div . xpath ( './/abbr[@title="Record Label"]' '/span[@property="name"]/text()' )
except ValueError :
label = ''
listing . append ( ( artists , title , label ) )
return listing
|
def get_poll_options ( tweet ) :
"""Get the text in the options of a poll as a list
- If there is no poll in the Tweet , return an empty list
- If the Tweet is in activity - streams format , raise ' NotAvailableError '
Args :
tweet ( Tweet or dict ) : A Tweet object or dictionary
Returns :
list : list of strings , or , in the case where there is no poll ,
an empty list
Raises :
NotAvailableError for activity - streams format
Example :
> > > from tweet _ parser . getter _ methods . tweet _ text import get _ poll _ options
> > > original = {
. . . " created _ at " : " Wed May 24 20:17:19 + 0000 2017 " ,
. . . " entities " : { " polls " : [ { " options " : [ { " text " : " a " } ,
. . . { " text " : " b " } ,
. . . { " text " : " c " } ]
> > > get _ poll _ options ( original )
[ ' a ' , ' b ' , ' c ' ]
> > > activity = { " postedTime " : " 2017-05-24T20:17:19.000Z " ,
. . . " body " : " some tweet text " }
> > > get _ poll _ options ( activity )
Traceback ( most recent call last ) :
NotAvailableError : Gnip activity - streams format does not return poll options"""
|
if is_original_format ( tweet ) :
try :
poll_options_text = [ ]
for p in tweet [ "entities" ] [ "polls" ] :
for o in p [ "options" ] :
poll_options_text . append ( o [ "text" ] )
return poll_options_text
except KeyError :
return [ ]
else :
raise NotAvailableError ( "Gnip activity-streams format does not" + " return poll options" )
|
def delete ( self , tname , where = None , where_not = None , columns = None , astype = None ) :
'''Delete records from the provided table . Parameters , matching and output are identical to
` find ( ) ` .
Parameters
tname : str
Table to delete records from .
where : dict or None ( default ` None ` )
Dictionary of < column , value > where value can be of str type for exact match or a
compiled regex expression for more advanced matching .
where _ not : dict or None ( default ` None ` )
Identical to ` where ` but for negative - matching .
columns : list of str , str or None ( default ` None ` )
Column ( s ) to return for the deleted records , if any .
astype : str , type or None ( default ` None ` )
Type to cast the output to . Possible values are : ` nonetype ` , ` dataframe ` , ` str ` ,
` dict ` , ` json ` . If this is ` None ` , falls back to the type provided to the constructor .
If a type was provided to the constructor but the user wants to avoid any casting ,
" nonetype " should be passed as the value .
Returns
records : str , list or dataframe
Records deleted from the table . Output type depends on ` astype ` parameter .
See Also
PandasDatabase . find'''
|
tname = self . _check_tname ( tname )
where = PandasDatabase . _check_conditions ( where )
where_not = PandasDatabase . _check_conditions ( where_not )
columns = PandasDatabase . _check_type_iter ( str , columns )
# Find the rows to be deleted
delrows = self . find ( tname , where = where , where_not = where_not , astype = DataFrame )
# Remove them from the table
dataframe = self . _db [ tname ]
dataframe = dataframe [ ~ ( dataframe . index . isin ( delrows . index ) ) ]
self . _db [ tname ] = dataframe
self . _print ( 'Deleted %d records from table "%s"' % ( len ( delrows ) , tname ) )
# Save the changes to disk if required
if self . auto_save :
self . save ( )
# Return deleted rows
return self . _output ( delrows , astype = astype )
|
def retry ( retry_count ) :
"""Retry decorator used during file upload and download ."""
|
def func ( f ) :
@ functools . wraps ( f )
def wrapper ( * args , ** kwargs ) :
for backoff in range ( retry_count ) :
try :
return f ( * args , ** kwargs )
except Exception :
time . sleep ( 2 ** backoff )
else :
raise SbgError ( '{}: failed to complete: {}' . format ( threading . current_thread ( ) . getName ( ) , f . __name__ ) )
return wrapper
return func
|
def raw ( key = None ) :
'''Return the raw pillar data that is available in the module . This will
show the pillar as it is loaded as the _ _ pillar _ _ dict .
CLI Example :
. . code - block : : bash
salt ' * ' pillar . raw
With the optional key argument , you can select a subtree of the
pillar raw data . : :
salt ' * ' pillar . raw key = ' roles ' '''
|
if key :
ret = __pillar__ . get ( key , { } )
else :
ret = __pillar__
return ret
|
def export ( self , top = True ) :
"""Exports object to its string representation .
Args :
top ( bool ) : if True appends ` internal _ name ` before values .
All non list objects should be exported with value top = True ,
all list objects , that are embedded in as fields inlist objects
should be exported with ` top ` = False
Returns :
str : The objects string representation"""
|
out = [ ]
if top :
out . append ( self . _internal_name )
out . append ( self . _to_str ( self . number_of_records_per_hour ) )
out . append ( self . _to_str ( self . data_period_name_or_description ) )
out . append ( self . _to_str ( self . data_period_start_day_of_week ) )
out . append ( self . _to_str ( self . data_period_start_day ) )
out . append ( self . _to_str ( self . data_period_end_day ) )
return "," . join ( out )
|
def tocimxml ( self , ignore_host = False , ignore_namespace = False ) :
"""Return the CIM - XML representation of this CIM instance path ,
as an object of an appropriate subclass of : term : ` Element ` .
If the instance path has no namespace specified or if
` ignore _ namespace ` is ` True ` , the returned CIM - XML representation is an
` INSTANCENAME ` element consistent with : term : ` DSP0201 ` .
Otherwise , if the instance path has no host specified or if
` ignore _ host ` is ` True ` , the returned CIM - XML representation is a
` LOCALINSTANCEPATH ` element consistent with : term : ` DSP0201 ` .
Otherwise , the returned CIM - XML representation is a
` INSTANCEPATH ` element consistent with : term : ` DSP0201 ` .
The order of keybindings in the returned CIM - XML representation is
preserved from the : class : ` ~ pywbem . CIMInstanceName ` object .
Parameters :
ignore _ host ( : class : ` py : bool ` ) : Ignore the host of the
instance path , even if a host is specified .
ignore _ namespace ( : class : ` py : bool ` ) : Ignore the namespace and host of
the instance path , even if a namespace and / or host is specified .
Returns :
The CIM - XML representation , as an object of an appropriate subclass
of : term : ` Element ` ."""
|
kbs = [ ]
# We no longer check that the keybindings is a NocaseDict because we
# ensure that in the keybindings ( ) property setter method .
for key , value in self . keybindings . items ( ) : # Keybindings can be integers , booleans , strings or references .
# References can only by instance names .
if isinstance ( value , CIMInstanceName ) :
kbs . append ( cim_xml . KEYBINDING ( key , cim_xml . VALUE_REFERENCE ( value . tocimxml ( ) ) ) )
continue
if isinstance ( value , six . text_type ) :
type_ = 'string'
elif isinstance ( value , six . binary_type ) :
type_ = 'string'
value = _to_unicode ( value )
elif isinstance ( value , bool ) : # Note : Bool is a subtype of int , therefore bool is tested
# before int .
type_ = 'boolean'
if value :
value = 'TRUE'
else :
value = 'FALSE'
elif isinstance ( value , number_types ) : # Numeric CIM data types derive from Python number types .
type_ = 'numeric'
value = str ( value )
else : # Double check the type of the keybindings , because they can be
# set individually .
raise TypeError ( _format ( "Keybinding {0!A} has invalid type: {1}" , key , builtin_type ( value ) ) )
kbs . append ( cim_xml . KEYBINDING ( key , cim_xml . KEYVALUE ( value , type_ ) ) )
instancename_xml = cim_xml . INSTANCENAME ( self . classname , kbs )
if self . namespace is None or ignore_namespace :
return instancename_xml
localnsp_xml = cim_xml . LOCALNAMESPACEPATH ( [ cim_xml . NAMESPACE ( ns ) for ns in self . namespace . split ( '/' ) ] )
if self . host is None or ignore_host :
return cim_xml . LOCALINSTANCEPATH ( localnsp_xml , instancename_xml )
return cim_xml . INSTANCEPATH ( cim_xml . NAMESPACEPATH ( cim_xml . HOST ( self . host ) , localnsp_xml ) , instancename_xml )
|
def parse_django_adminopt_node ( env , sig , signode ) :
"""A copy of sphinx . directives . CmdoptionDesc . parse _ signature ( )"""
|
from sphinx . domains . std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re . finditer ( sig ) :
optname , args = m . groups ( )
if count :
signode += addnodes . desc_addname ( ', ' , ', ' )
signode += addnodes . desc_name ( optname , optname )
signode += addnodes . desc_addname ( args , args )
if not count :
firstname = optname
count += 1
if not count :
for m in simple_option_desc_re . finditer ( sig ) :
optname , args = m . groups ( )
if count :
signode += addnodes . desc_addname ( ', ' , ', ' )
signode += addnodes . desc_name ( optname , optname )
signode += addnodes . desc_addname ( args , args )
if not count :
firstname = optname
count += 1
if not firstname :
raise ValueError
return firstname
|
def iter_file_commands ( self ) :
"""Iterator returning FileCommand objects .
If an invalid file command is found , the line is silently
pushed back and iteration ends ."""
|
while True :
line = self . next_line ( )
if line is None :
break
elif len ( line ) == 0 or line . startswith ( b'#' ) :
continue
# Search for file commands in order of likelihood
elif line . startswith ( b'M ' ) :
yield self . _parse_file_modify ( line [ 2 : ] )
elif line . startswith ( b'D ' ) :
path = self . _path ( line [ 2 : ] )
yield commands . FileDeleteCommand ( path )
elif line . startswith ( b'R ' ) :
old , new = self . _path_pair ( line [ 2 : ] )
yield commands . FileRenameCommand ( old , new )
elif line . startswith ( b'C ' ) :
src , dest = self . _path_pair ( line [ 2 : ] )
yield commands . FileCopyCommand ( src , dest )
elif line . startswith ( b'deleteall' ) :
yield commands . FileDeleteAllCommand ( )
else :
self . push_line ( line )
break
|
def log ( ctx , archive_name ) :
'''Get the version log for an archive'''
|
_generate_api ( ctx )
ctx . obj . api . get_archive ( archive_name ) . log ( )
|
def interpolate_timestamp ( capture_times ) :
'''Interpolate time stamps in case of identical timestamps'''
|
timestamps = [ ]
num_file = len ( capture_times )
time_dict = OrderedDict ( )
if num_file < 2 :
return capture_times
# trace identical timestamps ( always assume capture _ times is sorted )
time_dict = OrderedDict ( )
for i , t in enumerate ( capture_times ) :
if t not in time_dict :
time_dict [ t ] = { "count" : 0 , "pointer" : 0 }
interval = 0
if i != 0 :
interval = ( t - capture_times [ i - 1 ] ) . total_seconds ( )
time_dict [ capture_times [ i - 1 ] ] [ "interval" ] = interval
time_dict [ t ] [ "count" ] += 1
if len ( time_dict ) >= 2 : # set time interval as the last available time interval
time_dict [ time_dict . keys ( ) [ - 1 ] ] [ "interval" ] = time_dict [ time_dict . keys ( ) [ - 2 ] ] [ "interval" ]
else : # set time interval assuming capture interval is 1 second
time_dict [ time_dict . keys ( ) [ 0 ] ] [ "interval" ] = time_dict [ time_dict . keys ( ) [ 0 ] ] [ "count" ] * 1.
# interpolate timestamps
for t in capture_times :
d = time_dict [ t ]
s = datetime . timedelta ( seconds = d [ "pointer" ] * d [ "interval" ] / float ( d [ "count" ] ) )
updated_time = t + s
time_dict [ t ] [ "pointer" ] += 1
timestamps . append ( updated_time )
return timestamps
|
def _pcolormesh_array2d ( self , array , * args , ** kwargs ) :
"""Render an ` ~ gwpy . types . Array2D ` using ` Axes . pcolormesh `"""
|
x = numpy . concatenate ( ( array . xindex . value , array . xspan [ - 1 : ] ) )
y = numpy . concatenate ( ( array . yindex . value , array . yspan [ - 1 : ] ) )
xcoord , ycoord = numpy . meshgrid ( x , y , copy = False , sparse = True )
return self . pcolormesh ( xcoord , ycoord , array . value . T , * args , ** kwargs )
|
def _build_params ( self ) :
'''método que constrói o dicionario com os parametros que serão usados
na requisição HTTP Post ao PagSeguro
Returns :
Um dicionário com os parametros definidos no objeto Payment .'''
|
params = { }
params [ 'email' ] = self . email
params [ 'token' ] = self . token
params [ 'currency' ] = self . currency
# Atributos opcionais
if self . receiver_email :
params [ 'receiver_email' ] = self . receiver_email
if self . reference :
params [ 'reference' ] = self . reference
if self . extra_amount :
params [ 'extra_amount' ] = self . extra_amount
if self . redirect_url :
params [ 'redirect_url' ] = self . redirect_url
if self . notification_url :
params [ 'notification_url' ] = self . notification_url
if self . max_uses :
params [ 'max_uses' ] = self . max_uses
if self . max_age :
params [ 'max_age' ] = self . max_age
# TODO : Incluir metadata aqui
# Itens
for index , item in enumerate ( self . items , start = 1 ) :
params [ 'itemId%d' % index ] = item [ 'item_id' ]
params [ 'itemDescription%d' % index ] = item [ 'description' ]
params [ 'itemAmount%d' % index ] = '%.2f' % item [ 'amount' ]
params [ 'itemQuantity%s' % index ] = item [ 'quantity' ]
if item . get ( 'shipping_cost' ) :
params [ 'itemShippingCost%d' % index ] = item [ 'shipping_cost' ]
if item . get ( 'weight' ) :
params [ 'itemWeight%d' % index ] = item [ 'weight' ]
# Sender
if self . client . get ( 'email' ) :
params [ 'senderEmail' ] = self . client . get ( 'email' )
if self . client . get ( 'name' ) :
params [ 'senderName' ] = ' ' . join ( self . client . get ( 'name' ) . split ( ) )
if self . client . get ( 'phone_area_code' ) :
params [ 'senderAreaCode' ] = self . client . get ( 'phone_area_code' )
if self . client . get ( 'phone_number' ) :
params [ 'senderPhone' ] = self . client . get ( 'phone_number' )
if self . client . get ( 'cpf' ) :
params [ 'senderCPF' ] = self . client . get ( 'cpf' )
if self . client . get ( 'sender_born_date' ) :
params [ 'senderBornDate' ] = self . client . get ( 'sender_born_date' )
# Shipping
if self . shipping . get ( 'type' ) :
params [ 'shippingType' ] = self . shipping . get ( 'type' )
if self . shipping . get ( 'cost' ) :
params [ 'shippingCost' ] = '%.2f' % self . shipping . get ( 'cost' )
if self . shipping . get ( 'country' ) :
params [ 'shippingAddressCountry' ] = self . shipping . get ( 'country' )
if self . shipping . get ( 'state' ) :
params [ 'shippingAddressState' ] = self . shipping . get ( 'state' )
if self . shipping . get ( 'city' ) :
params [ 'shippingAddressCity' ] = self . shipping . get ( 'city' )
if self . shipping . get ( 'postal_code' ) :
params [ 'shippingAddressPostalCode' ] = self . shipping . get ( 'postal_code' )
if self . shipping . get ( 'district' ) :
params [ 'shippingAddressDistrict' ] = self . shipping . get ( 'district' )
if self . shipping . get ( 'street' ) :
params [ 'shippingAddressStreet' ] = self . shipping . get ( 'street' )
if self . shipping . get ( 'number' ) :
params [ 'shippingAddressNumber' ] = self . shipping . get ( 'number' )
if self . shipping . get ( 'complement' ) :
params [ 'shippingAddressComplement' ] = self . shipping . get ( 'complement' )
return params
|
def init ( project_name ) :
"""build a minimal flask project"""
|
# the destination path
dst_path = os . path . join ( os . getcwd ( ) , project_name )
start_init_info ( dst_path )
# create dst path
_mkdir_p ( dst_path )
os . chdir ( dst_path )
# create files
init_code ( 'manage.py' , _manage_basic_code )
init_code ( 'requirement.txt' , _requirement_code )
# create app /
app_path = os . path . join ( dst_path , 'app' )
_mkdir_p ( app_path )
os . chdir ( app_path )
# create files
init_code ( 'views.py' , _views_basic_code )
init_code ( 'forms.py' , _forms_basic_code )
init_code ( '__init__.py' , _init_basic_code )
create_templates_static_files ( app_path )
init_done_info ( )
|
def hscan ( self , name , cursor = 0 , match = None , count = None ) :
"""Incrementally return key / value slices in a hash . Also return a cursor
indicating the scan position .
` ` match ` ` allows for filtering the keys by pattern
` ` count ` ` allows for hint the minimum number of returns"""
|
with self . pipe as pipe :
f = Future ( )
res = pipe . hscan ( self . redis_key ( name ) , cursor = cursor , match = match , count = count )
def cb ( ) :
data = { }
m_decode = self . memberparse . decode
for k , v in res [ 1 ] . items ( ) :
k = m_decode ( k )
v = self . _value_decode ( k , v )
data [ k ] = v
f . set ( ( res [ 0 ] , data ) )
pipe . on_execute ( cb )
return f
|
def get_staking_cutoff ( self , round_num = 0 , tournament = 1 ) :
"""Compute staking cutoff for the given round and tournament .
Args :
round _ num ( int , optional ) : The round you are interested in ,
defaults to current round .
tournament ( int , optional ) : ID of the tournament , defaults to 1
Returns :
decimal . Decimal : cutoff probability
Raises :
ValueError : in case of missing prize pool information"""
|
query = '''
query($number: Int!
$tournament: Int!) {
rounds(number: $number
tournament: $tournament) {
selection {
outcome
pCutoff
bCutoff
}
}
}
'''
arguments = { 'number' : round_num , 'tournament' : tournament }
result = self . raw_query ( query , arguments )
result = result [ 'data' ] [ 'rounds' ] [ 0 ] [ 'selection' ]
key = 'bCutoff' if round_num >= 154 or round_num == 0 else 'pCutoff'
return utils . parse_float_string ( result [ key ] )
|
def run ( self , args ) :
"""Generate dummy strings for all source po files ."""
|
configuration = self . configuration
source_messages_dir = configuration . source_messages_dir
for locale , converter in zip ( configuration . dummy_locales , [ Dummy ( ) , Dummy2 ( ) , ArabicDummy ( ) ] ) :
print ( 'Processing source language files into dummy strings, locale "{}"' . format ( locale ) )
for source_file in configuration . source_messages_dir . walkfiles ( '*.po' ) :
if args . verbose :
print ( ' ' , source_file . relpath ( ) )
make_dummy ( source_messages_dir . joinpath ( source_file ) , locale , converter )
if args . verbose :
print ( )
|
def GpuUsage ( ** kargs ) :
"""Get the current GPU usage of available GPUs"""
|
usage = ( False , None )
gpu_status = { 'vent_usage' : { 'dedicated' : [ ] , 'mem_mb' : { } } }
path_dirs = PathDirs ( ** kargs )
path_dirs . host_config ( )
template = Template ( template = path_dirs . cfg_file )
# get running jobs using gpus
try :
d_client = docker . from_env ( )
c = d_client . containers . list ( all = False , filters = { 'label' : 'vent-plugin' } )
for container in c :
if ( 'vent.gpu' in container . attrs [ 'Config' ] [ 'Labels' ] and container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.gpu' ] == 'yes' ) :
device = container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.gpu.device' ]
if ( 'vent.gpu.dedicated' in container . attrs [ 'Config' ] [ 'Labels' ] and container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.gpu.dedicated' ] == 'yes' ) :
gpu_status [ 'vent_usage' ] [ 'dedicated' ] . append ( device )
elif 'vent.gpu.mem_mb' in container . attrs [ 'Config' ] [ 'Labels' ] :
if device not in gpu_status [ 'vent_usage' ] [ 'mem_mb' ] :
gpu_status [ 'vent_usage' ] [ 'mem_mb' ] [ device ] = 0
gpu_status [ 'vent_usage' ] [ 'mem_mb' ] [ device ] += int ( container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.gpu.mem_mb' ] )
except Exception as e : # pragma : no cover
logger . error ( 'Could not get running jobs ' + str ( e ) )
port = '3476'
# default docker gateway
host = '172.17.0.1'
result = template . option ( 'nvidia-docker-plugin' , 'port' )
if result [ 0 ] :
port = result [ 1 ]
result = template . option ( 'nvidia-docker-plugin' , 'host' )
if result [ 0 ] :
host = result [ 1 ]
else :
try : # now just requires ip , ifconfig
route = check_output ( ( 'ip' , 'route' ) ) . decode ( 'utf-8' ) . split ( '\n' )
default = ''
# grab the default network device .
for device in route :
if 'default' in device :
default = device . split ( ) [ 4 ]
break
# grab the IP address for the default device
ip_addr = check_output ( ( 'ifconfig' , default ) ) . decode ( 'utf-8' )
ip_addr = ip_addr . split ( '\n' ) [ 1 ] . split ( ) [ 1 ]
host = ip_addr
except Exception as e : # pragma : no cover
logger . error ( 'Something with the ip addresses' 'went wrong ' + str ( e ) )
# have to get the info separately to determine how much memory is availabe
nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/info/json'
try :
r = requests . get ( nd_url )
if r . status_code == 200 :
status = r . json ( )
for i , device in enumerate ( status [ 'Devices' ] ) :
gm = int ( round ( math . log ( int ( device [ 'Memory' ] [ 'Global' ] ) , 2 ) ) )
gpu_status [ i ] = { 'global_memory' : 2 ** gm , 'cores' : device [ 'Cores' ] }
else :
usage = ( False , 'Unable to get GPU usage request error code: ' + str ( r . status_code ) )
except Exception as e : # pragma : no cover
usage = ( False , 'Error: ' + str ( e ) )
# get actual status of each gpu
nd_url = 'http://' + host + ':' + port + '/v1.0/gpu/status/json'
try :
r = requests . get ( nd_url )
if r . status_code == 200 :
status = r . json ( )
for i , device in enumerate ( status [ 'Devices' ] ) :
if i not in gpu_status :
gpu_status [ i ] = { }
gpu_status [ i ] [ 'utilization' ] = device [ 'Utilization' ]
gpu_status [ i ] [ 'memory' ] = device [ 'Memory' ]
gpu_status [ i ] [ 'processes' ] = device [ 'Processes' ]
usage = ( True , gpu_status )
else :
usage = ( False , 'Unable to get GPU usage request error code: ' + str ( r . status_code ) )
except Exception as e : # pragma : no cover
usage = ( False , 'Error: ' + str ( e ) )
return usage
|
def web_err ( i ) :
"""Input : {
http - http object
type - content type
bin - bytes to output
Output : {
return - 0"""
|
http = i [ 'http' ]
tp = i [ 'type' ]
bin = i [ 'bin' ]
try :
bin = bin . decode ( 'utf-8' )
except Exception as e :
pass
if tp == 'json' :
rx = ck . dumps_json ( { 'dict' : { 'return' : 1 , 'error' : bin } } )
if rx [ 'return' ] > 0 :
bin2 = rx [ 'error' ] . encode ( 'utf8' )
else :
bin2 = rx [ 'string' ] . encode ( 'utf-8' )
elif tp == 'con' :
bin2 = bin . encode ( 'utf8' )
else :
bin2 = b'<html><body><pre>' + bin . encode ( 'utf8' ) + b'</pre></body></html>'
i [ 'bin' ] = bin2
return web_out ( i )
|
def response ( code , ** kwargs ) :
"""Generic HTTP JSON response method
: param code : HTTP code ( int )
: param kwargs : Data structure for response ( dict )
: return : HTTP Json response"""
|
_ret_json = jsonify ( kwargs )
resp = make_response ( _ret_json , code )
resp . headers [ "Content-Type" ] = "application/json; charset=utf-8"
return resp
|
def commit_and_run ( self , commit , conf , command = "sh" ) :
"""Commit this container id and run the provided command in it and clean up afterwards"""
|
image_hash = None
try :
image_hash = conf . harpoon . docker_api . commit ( commit ) [ "Id" ]
new_conf = conf . clone ( )
new_conf . bash = NotSpecified
new_conf . command = command
new_conf . image_name = image_hash
new_conf . container_id = None
new_conf . container_name = "{0}-intervention-{1}" . format ( conf . container_id , str ( uuid . uuid1 ( ) ) )
container_id = self . create_container ( new_conf , False , True )
new_conf . container_id = container_id
try :
self . start_container ( new_conf , tty = True , detach = False , is_dependency = False , no_intervention = True )
finally :
self . stop_container ( new_conf )
yield
except Exception as error :
log . error ( "Something failed about creating the intervention image\terror=%s" , error )
raise
finally :
try :
if image_hash :
log . info ( "Removing intervened image\thash=%s" , image_hash )
conf . harpoon . docker_api . remove_image ( image_hash )
except Exception as error :
log . error ( "Failed to kill intervened image\thash=%s\terror=%s" , image_hash , error )
|
def admin_url ( model , url , object_id = None ) :
"""Returns the URL for the given model and admin url name ."""
|
opts = model . _meta
url = "admin:%s_%s_%s" % ( opts . app_label , opts . object_name . lower ( ) , url )
args = ( )
if object_id is not None :
args = ( object_id , )
return reverse ( url , args = args )
|
def on_task ( self , task , response ) :
'''Deal one task'''
|
start_time = time . time ( )
response = rebuild_response ( response )
try :
assert 'taskid' in task , 'need taskid in task'
project = task [ 'project' ]
updatetime = task . get ( 'project_updatetime' , None )
md5sum = task . get ( 'project_md5sum' , None )
project_data = self . project_manager . get ( project , updatetime , md5sum )
assert project_data , "no such project!"
if project_data . get ( 'exception' ) :
ret = ProcessorResult ( logs = ( project_data . get ( 'exception_log' ) , ) , exception = project_data [ 'exception' ] )
else :
ret = project_data [ 'instance' ] . run_task ( project_data [ 'module' ] , task , response )
except Exception as e :
logstr = traceback . format_exc ( )
ret = ProcessorResult ( logs = ( logstr , ) , exception = e )
process_time = time . time ( ) - start_time
if not ret . extinfo . get ( 'not_send_status' , False ) :
if ret . exception :
track_headers = dict ( response . headers )
else :
track_headers = { }
for name in ( 'etag' , 'last-modified' ) :
if name not in response . headers :
continue
track_headers [ name ] = response . headers [ name ]
status_pack = { 'taskid' : task [ 'taskid' ] , 'project' : task [ 'project' ] , 'url' : task . get ( 'url' ) , 'track' : { 'fetch' : { 'ok' : response . isok ( ) , 'redirect_url' : response . url if response . url != response . orig_url else None , 'time' : response . time , 'error' : response . error , 'status_code' : response . status_code , 'encoding' : getattr ( response , '_encoding' , None ) , 'headers' : track_headers , 'content' : response . text [ : 500 ] if ret . exception else None , } , 'process' : { 'ok' : not ret . exception , 'time' : process_time , 'follows' : len ( ret . follows ) , 'result' : ( None if ret . result is None else utils . text ( ret . result ) [ : self . RESULT_RESULT_LIMIT ] ) , 'logs' : ret . logstr ( ) [ - self . RESULT_LOGS_LIMIT : ] , 'exception' : ret . exception , } , 'save' : ret . save , } , }
if 'schedule' in task :
status_pack [ 'schedule' ] = task [ 'schedule' ]
# FIXME : unicode _ obj should used in scheduler before store to database
# it ' s used here for performance .
self . status_queue . put ( utils . unicode_obj ( status_pack ) )
# FIXME : unicode _ obj should used in scheduler before store to database
# it ' s used here for performance .
if ret . follows :
for each in ( ret . follows [ x : x + 1000 ] for x in range ( 0 , len ( ret . follows ) , 1000 ) ) :
self . newtask_queue . put ( [ utils . unicode_obj ( newtask ) for newtask in each ] )
for project , msg , url in ret . messages :
try :
self . on_task ( { 'taskid' : utils . md5string ( url ) , 'project' : project , 'url' : url , 'process' : { 'callback' : '_on_message' , } } , { 'status_code' : 200 , 'url' : url , 'save' : ( task [ 'project' ] , msg ) , } )
except Exception as e :
logger . exception ( 'Sending message error.' )
continue
if ret . exception :
logger_func = logger . error
else :
logger_func = logger . info
logger_func ( 'process %s:%s %s -> [%d] len:%d -> result:%.10r fol:%d msg:%d err:%r' % ( task [ 'project' ] , task [ 'taskid' ] , task . get ( 'url' ) , response . status_code , len ( response . content ) , ret . result , len ( ret . follows ) , len ( ret . messages ) , ret . exception ) )
return True
|
def _request_process_json_bulk ( self , response_data ) :
"""Handle bulk JSON response
Return :
( string ) : The response data
( string ) : The response status"""
|
status = 'Failure'
data = response_data . get ( self . request_entity , [ ] )
if data :
status = 'Success'
return data , status
|
def import_cluster_template ( self , api_cluster_template , add_repositories = False ) :
"""Create a cluster according to the provided template
@ param api _ cluster _ template : cluster template to import
@ param add _ repositories : if true the parcels repositories in the cluster template will be added .
@ return : Command handing cluster import
@ since : API v12"""
|
return self . _post ( "importClusterTemplate" , ApiCommand , False , api_cluster_template , params = dict ( addRepositories = add_repositories ) , api_version = 12 )
|
def aggr ( array , op , initial_value , ty ) :
"""Computes the aggregate of elements in the array .
Args :
array ( WeldObject / Numpy . ndarray ) : Input array to aggregate
op ( str ) : Op string used to aggregate the array ( + / * )
initial _ value ( int ) : Initial value for aggregation
ty ( WeldType ) : Type of each element in the input array
Returns :
A WeldObject representing this computation"""
|
weld_obj = WeldObject ( encoder_ , decoder_ )
array_var = weld_obj . update ( array )
if isinstance ( array , WeldObject ) :
array_var = array . obj_id
weld_obj . dependencies [ array_var ] = array
weld_template = """
result(
for(
%(array)s,
merger[%(ty)s,%(op)s],
|b, i, e| merge(b, e)
)
)
"""
weld_obj . weld_code = weld_template % { "array" : array_var , "ty" : ty , "op" : op }
return weld_obj
|
def list_market_profit_and_loss ( self , market_ids , include_settled_bets = None , include_bsp_bets = None , net_of_commission = None , session = None , lightweight = None ) :
"""Retrieve profit and loss for a given list of OPEN markets .
: param list market _ ids : List of markets to calculate profit and loss
: param bool include _ settled _ bets : Option to include settled bets ( partially settled markets only )
: param bool include _ bsp _ bets : Option to include BSP bets
: param bool net _ of _ commission : Option to return profit and loss net of users current commission
rate for this market including any special tariffs
: param requests . session session : Requests session object
: param bool lightweight : If True will return dict not a resource
: rtype : list [ resources . MarketProfitLoss ]"""
|
params = clean_locals ( locals ( ) )
method = '%s%s' % ( self . URI , 'listMarketProfitAndLoss' )
( response , elapsed_time ) = self . request ( method , params , session )
return self . process_response ( response , resources . MarketProfitLoss , elapsed_time , lightweight )
|
def update_payload ( self , fields = None ) :
"""Rename ` ` system _ ids ` ` to ` ` system _ uuids ` ` ."""
|
payload = super ( HostCollection , self ) . update_payload ( fields )
if 'system_ids' in payload :
payload [ 'system_uuids' ] = payload . pop ( 'system_ids' )
return payload
|
def component_title ( component ) :
"""Label , title and caption
Title is the label text plus the title text
Title may contain italic tag , etc ."""
|
title = u''
label_text = u''
title_text = u''
if component . get ( 'label' ) :
label_text = component . get ( 'label' )
if component . get ( 'title' ) :
title_text = component . get ( 'title' )
title = unicode_value ( label_text )
if label_text != '' and title_text != '' :
title += ' '
title += unicode_value ( title_text )
if component . get ( 'type' ) == 'abstract' and title == '' :
title = 'Abstract'
return title
|
def winnow_by_keys ( dct , keys = None , filter_func = None ) :
"""separates a dict into has - keys and not - has - keys pairs , using either
a list of keys or a filtering function ."""
|
has = { }
has_not = { }
for key in dct :
key_passes_check = False
if keys is not None :
key_passes_check = key in keys
elif filter_func is not None :
key_passes_check = filter_func ( key )
if key_passes_check :
has [ key ] = dct [ key ]
else :
has_not [ key ] = dct [ key ]
return WinnowedResult ( has , has_not )
|
def list ( self , networkipv4 = None , ipv4 = None ) :
"""List all DHCPRelayIPv4.
: param : networkipv4 : networkipv4 id - list all dhcprelay filtering by networkipv4 id
ipv4 : ipv4 id - list all dhcprelay filtering by ipv4 id
: return : Following dictionary :
" networkipv4 " : < networkipv4 _ id > ,
" id " : < id > ,
" ipv4 " : {
" oct4 " : < oct4 > ,
" oct2 " : < oct2 > ,
" oct3 " : < oct3 > ,
" oct1 " : < oct1 > ,
" ip _ formated " : " < string IPv4 > " ,
" networkipv4 " : < networkipv4 _ id > ,
" id " : < ipv4 _ id > ,
" descricao " : " < string description > "
: raise NetworkAPIException : Falha ao acessar fonte de dados"""
|
uri = 'api/dhcprelayv4/?'
if networkipv4 :
uri += 'networkipv4=%s&' % networkipv4
if ipv4 :
uri += 'ipv4=%s' % ipv4
return self . get ( uri )
|
def delete_queue ( queues ) :
"""Delete the given queues ."""
|
current_queues . delete ( queues = queues )
click . secho ( 'Queues {} have been deleted.' . format ( queues or current_queues . queues . keys ( ) ) , fg = 'green' )
|
def validate ( self , instance , value ) : # pylint : disable = inconsistent - return - statements
"""Check if input is a valid string based on the choices"""
|
if not isinstance ( value , string_types ) :
self . error ( instance , value )
for key , val in self . choices . items ( ) :
test_value = value if self . case_sensitive else value . upper ( )
test_key = key if self . case_sensitive else key . upper ( )
test_val = val if self . case_sensitive else [ _ . upper ( ) for _ in val ]
if test_value == test_key or test_value in test_val :
return key
self . error ( instance , value , extra = 'Not an available choice.' )
|
def get_area ( self ) :
"""Compute area as the sum of the mesh cells area values ."""
|
mesh = self . mesh
_ , _ , _ , area = mesh . get_cell_dimensions ( )
return numpy . sum ( area )
|
def encode ( g , top = None , cls = PENMANCodec , ** kwargs ) :
"""Serialize the graph * g * from * top * to PENMAN notation .
Args :
g : the Graph object
top : the node identifier for the top of the serialized graph ; if
unset , the original top of * g * is used
cls : serialization codec class
kwargs : keyword arguments passed to the constructor of * cls *
Returns :
the PENMAN - serialized string of the Graph * g *
Example :
> > > encode ( Graph ( [ ( ' h ' , ' instance ' , ' hi ' ) ] ) )
( h / hi )"""
|
codec = cls ( ** kwargs )
return codec . encode ( g , top = top )
|
def remove_renderer ( self , rend ) :
'''Remove a renderer from the current view .
* * Example * *
rend = v . add _ renderer ( AtomRenderer )
v . remove _ renderer ( rend )
. . versionadded : : 0.3'''
|
if rend in self . widget . renderers :
self . widget . renderers . remove ( rend )
else :
raise Exception ( "The renderer is not in this viewer" )
|
def _find_caller ( self ) :
"""Find the stack frame of the caller so that we can note the source file
name , line number , and function name ."""
|
rv = ( '(unknown file)' , 0 , '(unknown function)' , '(code not available)' , [ ] , None )
f = inspect . currentframe ( )
while hasattr ( f , 'f_code' ) :
co = f . f_code
filename = os . path . normcase ( co . co_filename )
# When lggr is imported as a module , the ` _ src _ file ` filename ends
# in ' . pyc ' , while the filename grabbed from inspect will end in
# ' . py ' . We use splitext here to compare absolute paths without the
# extension , which restores the intended behavior of dropping down
# the callstack until we reach the first file not part of this
# library .
if os . path . splitext ( filename ) [ 0 ] == os . path . splitext ( _srcfile ) [ 0 ] :
f = f . f_back
# get out of this logging file
continue
sinfo = traceback . extract_stack ( f )
fname , lno , fnc , cc , i = inspect . getframeinfo ( f , context = 10 )
# Mark the calling line with a >
cc = map ( lambda info : ( '> ' if info [ 0 ] == i else '| ' ) + info [ 1 ] , enumerate ( cc ) )
code = '>' + cc [ i ]
rv = ( fname , lno , fnc , code , cc , sinfo )
break
return rv
|
def place ( vertices_resources , nets , machine , constraints ) :
"""Assigns vertices to chips in Reverse - Cuthill - McKee ( RCM ) order .
The ` RCM < https : / / en . wikipedia . org / wiki / Cuthill % E2%80%93McKee _ algorithm > ` _
algorithm ( in graph - centric terms ) is a simple breadth - first - search - like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion . Placement is performed by
sequentially assigning vertices in RCM order to chips , also iterated over
in RCM order .
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled ' Generic topology mapping strategies for
large - scale parallel architectures ' published in the Proceedings of the
international conference on Supercomputing , 2011.
This is a thin wrapper around the : py : func : ` sequential
< rig . place _ and _ route . place . sequential . place > ` placement algorithm which
uses an RCM ordering for iterating over chips and vertices .
Parameters
breadth _ first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices _ resources . True by default ."""
|
return sequential_place ( vertices_resources , nets , machine , constraints , rcm_vertex_order ( vertices_resources , nets ) , rcm_chip_order ( machine ) )
|
def _fix_valid_indices ( cls , valid_indices , insertion_index , dim ) :
"""Add indices for H & S inserted elements ."""
|
# TODO : make this accept an immutable sequence for valid _ indices
# ( a tuple ) and return an immutable sequence rather than mutating an
# argument .
indices = np . array ( sorted ( valid_indices [ dim ] ) )
slice_index = np . sum ( indices <= insertion_index )
indices [ slice_index : ] += 1
indices = np . insert ( indices , slice_index , insertion_index + 1 )
valid_indices [ dim ] = indices . tolist ( )
return valid_indices
|
def parse_line ( self , text , fh = None ) :
"""Parse a line into whatever TAP category it belongs ."""
|
match = self . ok . match ( text )
if match :
return self . _parse_result ( True , match , fh )
match = self . not_ok . match ( text )
if match :
return self . _parse_result ( False , match , fh )
if self . diagnostic . match ( text ) :
return Diagnostic ( text )
match = self . plan . match ( text )
if match :
return self . _parse_plan ( match )
match = self . bail . match ( text )
if match :
return Bail ( match . group ( "reason" ) )
match = self . version . match ( text )
if match :
return self . _parse_version ( match )
return Unknown ( )
|
def normalize_data_values ( type_string , data_value ) :
"""Decodes utf - 8 bytes to strings for abi string values .
eth - abi v1 returns utf - 8 bytes for string values .
This can be removed once eth - abi v2 is required ."""
|
_type = parse_type_string ( type_string )
if _type . base == "string" :
if _type . arrlist is not None :
return tuple ( ( normalize_to_text ( value ) for value in data_value ) )
else :
return normalize_to_text ( data_value )
return data_value
|
def list_worksheets ( self ) :
"""List what worksheet keys exist
Returns a list of tuples of the form :
( WORKSHEET _ ID , WORKSHEET _ NAME )
You can then retrieve the specific WORKSHEET _ ID in the future by
constructing a new GSpreadsheet ( worksheet = WORKSHEET _ ID , . . . )"""
|
worksheets = self . get_worksheets ( )
return [ ( x . link [ 3 ] . href . split ( '/' ) [ - 1 ] , x . title . text ) for x in worksheets . entry ]
|
def remove ( self , address ) :
"""Remove an address or multiple addresses
: param address : list of addresses to remove
: type address : str or list [ str ]"""
|
recipients = [ ]
if isinstance ( address , str ) :
address = { address }
# set
elif isinstance ( address , ( list , tuple ) ) :
address = set ( address )
for recipient in self . _recipients :
if recipient . address not in address :
recipients . append ( recipient )
if len ( recipients ) != len ( self . _recipients ) :
self . _track_changes ( )
self . _recipients = recipients
|
def adjust_level ( logger , level ) :
"""Increase a logger ' s verbosity up to the requested level .
: param logger : The logger to change ( a : class : ` ~ logging . Logger ` object ) .
: param level : The log level to enable ( a string or number ) .
This function is used by functions like : func : ` install ( ) ` ,
: func : ` increase _ verbosity ( ) ` and : func : ` . enable _ system _ logging ( ) ` to adjust
a logger ' s level so that log messages up to the requested log level are
propagated to the configured output handler ( s ) .
It uses : func : ` logging . Logger . getEffectiveLevel ( ) ` to check whether
` logger ` propagates or swallows log messages of the requested ` level ` and
sets the logger ' s level to the requested level if it would otherwise
swallow log messages .
Effectively this function will " widen the scope of logging " when asked to
do so but it will never " narrow the scope of logging " . This is because I am
convinced that filtering of log messages should ( primarily ) be decided by
handlers ."""
|
level = level_to_number ( level )
if logger . getEffectiveLevel ( ) > level :
logger . setLevel ( level )
|
def fetch ( bank , key ) :
'''Fetch a key value .'''
|
_init_client ( )
query = "SELECT data FROM {0} WHERE bank='{1}' AND etcd_key='{2}'" . format ( _table_name , bank , key )
cur , _ = run_query ( client , query )
r = cur . fetchone ( )
cur . close ( )
if r is None :
return { }
return __context__ [ 'serial' ] . loads ( r [ 0 ] )
|
def previous_theme ( self ) :
"""Cycle to preview the previous theme from the internal list of themes ."""
|
theme = self . term . theme_list . previous ( self . term . theme )
while not self . term . check_theme ( theme ) :
theme = self . term . theme_list . previous ( theme )
self . term . set_theme ( theme )
self . draw ( )
message = self . term . theme . display_string
self . term . show_notification ( message , timeout = 1 )
|
def discover_and_apply ( self , directory = None , dry_run = False ) :
"""Retrieve the patches and try to apply them against the datamodel
: param directory : Directory to search the patch in ( default : patches _ dir )
: param dry _ run : Don ' t actually apply the patches"""
|
directory = directory or self . patches_dir
patches_dict = { p . base_version : p for p in self . discover ( directory ) }
current_version = self . manifest . version
if not patches_dict . get ( current_version ) :
print ( 'No patch to apply' )
return
if dry_run :
msg = 'Datamodel should be in version %s !'
else :
msg = 'Datamodel in now in version %s !'
pss = [ ]
while True :
patch = patches_dict . get ( current_version )
if not patch :
print ( msg % current_version )
if pss :
print ( )
print ( yellow ( '\n' . join ( pss ) ) )
return
print ( 'Applying patch %s => %s' % ( patch . base_version , patch . target_version ) )
patch_pss = [ patch . ps ] if patch . ps else [ ]
if not dry_run :
patch_pss += self . apply_patch ( patch )
if patch_pss :
pss . append ( "Patch %s:\n%s" % ( patch . target_version , tabulate ( '\n' . join ( patch_pss ) ) ) )
self . manifest . reload ( )
current_version = patch . target_version
|
def _load_from_file ( metadata , load_func ) :
"""Load configuration from a file .
The file path is derived from an environment variable
named after the service of the form FOO _ SETTINGS ."""
|
config_filename = get_config_filename ( metadata )
if config_filename is None :
return dict ( )
with open ( config_filename , "r" ) as file_ :
data = load_func ( file_ . read ( ) )
return dict ( data )
|
def format_row ( self , row , key , color ) :
"""For a given row from the table , format it ( i . e . floating
points and color if applicable ) ."""
|
value = row [ key ]
if isinstance ( value , bool ) or value is None :
return '+' if value else ''
if not isinstance ( value , Number ) :
return value
# determine if integer value
is_integer = float ( value ) . is_integer ( )
template = '{}' if is_integer else '{:' + self . floatfmt + '}'
# if numeric , there could be a ' best ' key
key_best = key + '_best'
if ( key_best in row ) and row [ key_best ] :
template = color + template + Ansi . ENDC . value
return template . format ( value )
|
def _is_ipv4 ( self , ip ) :
"""Return true if given arg is a valid IPv4 address"""
|
try :
p = IPy . IP ( ip )
except ValueError :
return False
if p . version ( ) == 4 :
return True
return False
|
def get_version ( self ) :
"""Get game version ."""
|
return mgz . const . VERSIONS [ self . _header . version ] , str ( self . _header . sub_version ) [ : 5 ]
|
def _fromJSON ( cls , jsonobject ) :
"""Generates a new instance of : class : ` maspy . core . Ci ` from a decoded
JSON object ( as generated by : func : ` maspy . core . Ci . _ reprJSON ( ) ` ) .
: param jsonobject : decoded JSON object
: returns : a new instance of : class : ` Ci `"""
|
newInstance = cls ( jsonobject [ 0 ] , jsonobject [ 1 ] )
attribDict = { }
attribDict [ 'dataProcessingRef' ] = jsonobject [ 2 ]
attribDict [ 'precursor' ] = jsonobject [ 3 ]
attribDict [ 'product' ] = jsonobject [ 4 ]
attribDict [ 'params' ] = [ tuple ( param ) for param in jsonobject [ 5 ] ]
attribDict [ 'attrib' ] = jsonobject [ 6 ]
attribDict [ 'arrayInfo' ] = dict ( )
for arrayType , jsonEntry in viewitems ( jsonobject [ 7 ] ) :
arrayEntry = { 'dataProcessingRef' : jsonEntry [ 'dataProcessingRef' ] , 'params' : [ tuple ( _ ) for _ in jsonEntry [ 'params' ] ] }
attribDict [ 'arrayInfo' ] [ arrayType ] = arrayEntry
for key , value in viewitems ( attribDict ) :
setattr ( newInstance , key , value )
return newInstance
|
def runlist_create ( name , ** kwargs ) :
"""Create runlist and upload it into the storage ."""
|
ctx = Context ( ** kwargs )
ctx . execute_action ( 'runlist:create' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } )
|
def verify_predictions ( predictions ) :
"""Ensures that predictions is stored as a numpy array and checks that
all values are either 0 or 1."""
|
# Check that it contains only zeros and ones
predictions = np . array ( predictions , copy = False )
if not np . array_equal ( predictions , predictions . astype ( bool ) ) :
raise ValueError ( "predictions contains invalid values. " + "The only permitted values are 0 or 1." )
if predictions . ndim == 1 :
predictions = predictions [ : , np . newaxis ]
return predictions
|
def _interactive_input_tensor_to_features_dict ( feature_map , hparams ) :
"""Convert the interactive input format ( see above ) to a dictionary .
Args :
feature _ map : dict with inputs .
hparams : model hyperparameters
Returns :
a features dictionary , as expected by the decoder ."""
|
inputs = tf . convert_to_tensor ( feature_map [ "inputs" ] )
input_is_image = False if len ( inputs . get_shape ( ) ) < 3 else True
x = inputs
if input_is_image :
x = tf . image . resize_images ( x , [ 299 , 299 ] )
x = tf . reshape ( x , [ 1 , 299 , 299 , - 1 ] )
x = tf . to_int32 ( x )
else : # Remove the batch dimension .
num_samples = x [ 0 ]
length = x [ 2 ]
x = tf . slice ( x , [ 3 ] , tf . to_int32 ( [ length ] ) )
x = tf . reshape ( x , [ 1 , - 1 , 1 , 1 ] )
# Transform into a batch of size num _ samples to get that many random
# decodes .
x = tf . tile ( x , tf . to_int32 ( [ num_samples , 1 , 1 , 1 ] ) )
p_hparams = hparams . problem_hparams
input_space_id = tf . constant ( p_hparams . input_space_id )
target_space_id = tf . constant ( p_hparams . target_space_id )
features = { }
features [ "input_space_id" ] = input_space_id
features [ "target_space_id" ] = target_space_id
features [ "decode_length" ] = ( IMAGE_DECODE_LENGTH if input_is_image else inputs [ 1 ] )
features [ "inputs" ] = x
return features
|
def set_default_value ( self , data_id , value = EMPTY , initial_dist = 0.0 ) :
"""Set the default value of a data node in the dispatcher .
: param data _ id :
Data node id .
: type data _ id : str
: param value :
Data node default value .
. . note : : If ` EMPTY ` the previous default value is removed .
: type value : T , optional
: param initial _ dist :
Initial distance in the ArciDispatch algorithm when the data node
default value is used .
: type initial _ dist : float , int , optional
: return :
Self .
: rtype : BlueDispatcher"""
|
self . deferred . append ( ( 'set_default_value' , _call_kw ( locals ( ) ) ) )
return self
|
def Animation_seekAnimations ( self , animations , currentTime ) :
"""Function path : Animation . seekAnimations
Domain : Animation
Method name : seekAnimations
Parameters :
Required arguments :
' animations ' ( type : array ) - > List of animation ids to seek .
' currentTime ' ( type : number ) - > Set the current time of each animation .
No return value .
Description : Seek a set of animations to a particular time within each animation ."""
|
assert isinstance ( animations , ( list , tuple ) ) , "Argument 'animations' must be of type '['list', 'tuple']'. Received type: '%s'" % type ( animations )
assert isinstance ( currentTime , ( float , int ) ) , "Argument 'currentTime' must be of type '['float', 'int']'. Received type: '%s'" % type ( currentTime )
subdom_funcs = self . synchronous_command ( 'Animation.seekAnimations' , animations = animations , currentTime = currentTime )
return subdom_funcs
|
def reduce ( fname , reduction_factor ) :
"""Produce a submodel from ` fname ` by sampling the nodes randomly .
Supports source models , site models and exposure models . As a special
case , it is also able to reduce . csv files by sampling the lines .
This is a debugging utility to reduce large computations to small ones ."""
|
if fname . endswith ( '.csv' ) :
with open ( fname ) as f :
line = f . readline ( )
# read the first line
if csv . Sniffer ( ) . has_header ( line ) :
header = line
all_lines = f . readlines ( )
else :
header = None
f . seek ( 0 )
all_lines = f . readlines ( )
lines = general . random_filter ( all_lines , reduction_factor )
shutil . copy ( fname , fname + '.bak' )
print ( 'Copied the original file in %s.bak' % fname )
_save_csv ( fname , lines , header )
print ( 'Extracted %d lines out of %d' % ( len ( lines ) , len ( all_lines ) ) )
return
elif fname . endswith ( '.npy' ) :
array = numpy . load ( fname )
shutil . copy ( fname , fname + '.bak' )
print ( 'Copied the original file in %s.bak' % fname )
arr = numpy . array ( general . random_filter ( array , reduction_factor ) )
numpy . save ( fname , arr )
print ( 'Extracted %d rows out of %d' % ( len ( arr ) , len ( array ) ) )
return
node = nrml . read ( fname )
model = node [ 0 ]
if model . tag . endswith ( 'exposureModel' ) :
total = len ( model . assets )
model . assets . nodes = general . random_filter ( model . assets , reduction_factor )
num_nodes = len ( model . assets )
elif model . tag . endswith ( 'siteModel' ) :
total = len ( model )
model . nodes = general . random_filter ( model , reduction_factor )
num_nodes = len ( model )
elif model . tag . endswith ( 'sourceModel' ) :
reduce_source_model ( fname , reduction_factor )
return
elif model . tag . endswith ( 'logicTree' ) :
for smpath in logictree . collect_info ( fname ) . smpaths :
reduce_source_model ( smpath , reduction_factor )
return
else :
raise RuntimeError ( 'Unknown model tag: %s' % model . tag )
save_bak ( fname , node , num_nodes , total )
|
def check_connection ( self ) :
"""Try to open the local file . Under NT systems the case sensitivity
is checked ."""
|
if ( self . parent_url is not None and not self . parent_url . startswith ( u"file:" ) ) :
msg = _ ( "local files are only checked without parent URL or when the parent URL is also a file" )
raise LinkCheckerError ( msg )
if self . is_directory ( ) :
self . set_result ( _ ( "directory" ) )
else :
url = fileutil . pathencode ( self . url )
self . url_connection = urlopen ( url )
self . check_case_sensitivity ( )
|
def list ( self , ** params ) :
"""Retrieve all sources
Returns all deal sources available to the user according to the parameters provided
: calls : ` ` get / deal _ sources ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of DealSources .
: rtype : list"""
|
_ , _ , deal_sources = self . http_client . get ( "/deal_sources" , params = params )
return deal_sources
|
def pprint ( obj , file_ = None ) :
"""Prints debug information for various public objects like methods ,
functions , constructors etc ."""
|
if file_ is None :
file_ = sys . stdout
# functions , methods
if callable ( obj ) and hasattr ( obj , "_code" ) :
obj . _code . pprint ( file_ )
return
# classes
if isinstance ( obj , type ) and hasattr ( obj , "_constructors" ) :
constructors = obj . _constructors
for names , func in sorted ( constructors . items ( ) ) :
func . _code . pprint ( file_ )
return
raise TypeError ( "unkown type" )
|
def get_variant ( self , index = None ) :
"""Get the variant with the associated index .
Returns :
` Variant ` object , or None if no variant with the given index exists ."""
|
for variant in self . iter_variants ( ) :
if variant . index == index :
return variant
|
def as_dictionary ( self , is_proof = True ) :
"""Return the DDO as a JSON dict .
: param if is _ proof : if False then do not include the ' proof ' element .
: return : dict"""
|
if self . _created is None :
self . _created = DDO . _get_timestamp ( )
data = { '@context' : DID_DDO_CONTEXT_URL , 'id' : self . _did , 'created' : self . _created , }
if self . _public_keys :
values = [ ]
for public_key in self . _public_keys :
values . append ( public_key . as_dictionary ( ) )
data [ 'publicKey' ] = values
if self . _authentications :
values = [ ]
for authentication in self . _authentications :
values . append ( authentication )
data [ 'authentication' ] = values
if self . _services :
values = [ ]
for service in self . _services :
values . append ( service . as_dictionary ( ) )
data [ 'service' ] = values
if self . _proof and is_proof :
data [ 'proof' ] = self . _proof
return data
|
def create_or_update_alarm ( connection = None , name = None , metric = None , namespace = None , statistic = None , comparison = None , threshold = None , period = None , evaluation_periods = None , unit = None , description = '' , dimensions = None , alarm_actions = None , insufficient_data_actions = None , ok_actions = None , region = None , key = None , keyid = None , profile = None ) :
'''Create or update a cloudwatch alarm .
Params are the same as :
https : / / boto . readthedocs . io / en / latest / ref / cloudwatch . html # boto . ec2 . cloudwatch . alarm . MetricAlarm .
Dimensions must be a dict . If the value of Dimensions is a string , it will
be json decoded to produce a dict . alarm _ actions , insufficient _ data _ actions ,
and ok _ actions must be lists of string . If the passed - in value is a string ,
it will be split on " , " to produce a list . The strings themselves for
alarm _ actions , insufficient _ data _ actions , and ok _ actions must be Amazon
resource names ( ARN ' s ) ; however , this method also supports an arn lookup
notation , as follows :
arn : aws : . . . . ARN as per http : / / docs . aws . amazon . com / general / latest / gr / aws - arns - and - namespaces . html
scaling _ policy : < as _ name > : < scaling _ policy _ name > The named autoscale group scaling policy , for the named group ( e . g . scaling _ policy : my - asg : ScaleDown )
This is convenient for setting up autoscaling as follows . First specify a
boto _ asg . present state for an ASG with scaling _ policies , and then set up
boto _ cloudwatch _ alarm . present states which have alarm _ actions that
reference the scaling _ policy .
CLI example :
salt myminion boto _ cloudwatch . create _ alarm name = myalarm . . . region = us - east - 1'''
|
# clean up argument types , so that CLI works
if threshold :
threshold = float ( threshold )
if period :
period = int ( period )
if evaluation_periods :
evaluation_periods = int ( evaluation_periods )
if isinstance ( dimensions , six . string_types ) :
dimensions = salt . utils . json . loads ( dimensions )
if not isinstance ( dimensions , dict ) :
log . error ( "could not parse dimensions argument: must be json encoding of a dict: '%s'" , dimensions )
return False
if isinstance ( alarm_actions , six . string_types ) :
alarm_actions = alarm_actions . split ( "," )
if isinstance ( insufficient_data_actions , six . string_types ) :
insufficient_data_actions = insufficient_data_actions . split ( "," )
if isinstance ( ok_actions , six . string_types ) :
ok_actions = ok_actions . split ( "," )
# convert provided action names into ARN ' s
if alarm_actions :
alarm_actions = convert_to_arn ( alarm_actions , region = region , key = key , keyid = keyid , profile = profile )
if insufficient_data_actions :
insufficient_data_actions = convert_to_arn ( insufficient_data_actions , region = region , key = key , keyid = keyid , profile = profile )
if ok_actions :
ok_actions = convert_to_arn ( ok_actions , region = region , key = key , keyid = keyid , profile = profile )
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
alarm = boto . ec2 . cloudwatch . alarm . MetricAlarm ( connection = connection , name = name , metric = metric , namespace = namespace , statistic = statistic , comparison = comparison , threshold = threshold , period = period , evaluation_periods = evaluation_periods , unit = unit , description = description , dimensions = dimensions , alarm_actions = alarm_actions , insufficient_data_actions = insufficient_data_actions , ok_actions = ok_actions )
conn . create_alarm ( alarm )
log . info ( 'Created/updated alarm %s' , name )
return True
|
def keys ( self ) -> Iterator [ str ] :
"""return all possible paths one can take from this ApiNode"""
|
if self . param :
yield self . param_name
yield from self . paths . keys ( )
|
def create ( type_dict , * type_parameters ) :
"""StructFactory . create ( * type _ parameters ) expects :
class name ,
( ( binding requirement1 , ) ,
( binding requirement2 , bound _ to _ scope ) ,
( ( attribute _ name1 , attribute _ sig1 ( serialized ) ) ,
( attribute _ name2 , attribute _ sig2 . . . ) ,
( attribute _ nameN , . . . ) )"""
|
name , parameters = type_parameters
for param in parameters :
assert isinstance ( param , tuple )
typemap = dict ( ( attr , TypeSignature . deserialize ( param , type_dict ) ) for attr , param in parameters )
attributes = { 'TYPEMAP' : typemap }
return TypeMetaclass ( str ( name ) , ( Structural , ) , attributes )
|
def insert_draft_child ( self , child_pid ) :
"""Insert a draft child to versioning ."""
|
if child_pid . status != PIDStatus . RESERVED :
raise PIDRelationConsistencyError ( "Draft child should have status 'RESERVED'" )
if not self . draft_child :
with db . session . begin_nested ( ) :
super ( PIDNodeVersioning , self ) . insert_child ( child_pid , index = - 1 )
else :
raise PIDRelationConsistencyError ( "Draft child already exists for this relation: {0}" . format ( self . draft_child ) )
|
def validate_and_discover ( self , context ) :
""": type context : models . QualiDriverModels . AutoLoadCommandContext"""
|
logger = self . _get_logger ( context )
logger . info ( 'Autodiscovery started' )
si = None
resource = None
with CloudShellSessionContext ( context ) as cloudshell_session :
self . _check_if_attribute_not_empty ( context . resource , ADDRESS )
resource = context . resource
si = self . _check_if_vcenter_user_pass_valid ( context , cloudshell_session , resource . attributes )
resource . attributes = VCenterAutoModelDiscovery . _make_attributes_slash_backslash_agnostic ( resource . attributes )
auto_attr = [ ]
if not si :
error_message = 'Could not connect to the vCenter: {0}, with given credentials' . format ( context . resource . address )
logger . error ( error_message )
raise ValueError ( error_message )
try :
all_dc = self . pv_service . get_all_items_in_vcenter ( si , vim . Datacenter )
dc = self . _validate_datacenter ( si , all_dc , auto_attr , resource . attributes )
all_items_in_dc = self . pv_service . get_all_items_in_vcenter ( si , None , dc )
dc_name = dc . name
for key , value in resource . attributes . items ( ) :
if key in [ USER , PASSWORD , DEFAULT_DATACENTER , VM_CLUSTER ] :
continue
validation_method = self . _get_validation_method ( key )
validation_method ( si , all_items_in_dc , auto_attr , dc_name , resource . attributes , key )
except vim . fault . NoPermission :
logger . exception ( 'Autodiscovery failed due to permissions error:' )
raise Exception ( "vCenter permissions for configured resource(s) are invalid" )
logger . info ( 'Autodiscovery completed' )
return AutoLoadDetails ( [ ] , auto_attr )
|
def get_axes_ratio ( ax ) :
"""Return height / width ratio of the given Axes object .
The ratio is calculated in ' display coordinate ' ,
defined in matplotlib document on transformation .
Thus , the calculated ratio is what one would feels when the Axes
is displayed to the her / him ."""
|
ax_bbox_points_in_fig_coord = ax . get_position ( ) . get_points ( )
ax_bbox_points_in_display_coord = [ ax . figure . transFigure . transform ( point ) for point in ax_bbox_points_in_fig_coord ]
lower_left_coord , upper_right_coord = ax_bbox_points_in_display_coord
ax_bbox_dimension_in_display_coord = upper_right_coord - lower_left_coord
width , height = ax_bbox_dimension_in_display_coord
ratio = height / width
return ratio
|
def _makeApiCall ( self , entry , * args , ** kwargs ) :
"""This function is used to dispatch calls to other functions
for a given API Reference entry"""
|
x = self . _processArgs ( entry , * args , ** kwargs )
routeParams , payload , query , paginationHandler , paginationLimit = x
route = self . _subArgsInRoute ( entry , routeParams )
# TODO : Check for limit being in the Query of the api ref
if paginationLimit and 'limit' in entry . get ( 'query' , [ ] ) :
query [ 'limit' ] = paginationLimit
if query :
_route = route + '?' + urllib . parse . urlencode ( query )
else :
_route = route
response = self . _makeHttpRequest ( entry [ 'method' ] , _route , payload )
if paginationHandler :
paginationHandler ( response )
while response . get ( 'continuationToken' ) :
query [ 'continuationToken' ] = response [ 'continuationToken' ]
_route = route + '?' + urllib . parse . urlencode ( query )
response = self . _makeHttpRequest ( entry [ 'method' ] , _route , payload )
paginationHandler ( response )
else :
return response
|
def binormal_curve_single ( obj , u , normalize ) :
"""Evaluates the curve binormal vector at the given u parameter .
Curve binormal is the cross product of the normal and the tangent vectors .
The output returns a list containing the starting point ( i . e . origin ) of the vector and the vector itself .
: param obj : input curve
: type obj : abstract . Curve
: param u : parameter
: type u : float
: param normalize : if True , the returned vector is converted to a unit vector
: type normalize : bool
: return : a list containing " point " and " vector " pairs
: rtype : tuple"""
|
# Cross product of tangent and normal vectors gives binormal vector
tan_vector = tangent_curve_single ( obj , u , normalize )
norm_vector = normal_curve_single ( obj , u , normalize )
point = tan_vector [ 0 ]
vector = linalg . vector_cross ( tan_vector [ 1 ] , norm_vector [ 1 ] )
vector = linalg . vector_normalize ( vector ) if normalize else vector
return tuple ( point ) , tuple ( vector )
|
def _updateCallSetIds ( self , variantFile ) :
"""Updates the call set IDs based on the specified variant file ."""
|
if len ( self . _callSetIdMap ) == 0 :
for sample in variantFile . header . samples :
self . addCallSetFromName ( sample )
|
def __get_user_env_vars ( self ) :
"""Return the user defined environment variables"""
|
return ( os . environ . get ( self . GP_URL_ENV_VAR ) , os . environ . get ( self . GP_INSTANCE_ID_ENV_VAR ) , os . environ . get ( self . GP_USER_ID_ENV_VAR ) , os . environ . get ( self . GP_PASSWORD_ENV_VAR ) , os . environ . get ( self . GP_IAM_API_KEY_ENV_VAR ) )
|
def adjust_bounding_box ( bounds1 , bounds2 ) :
"""If the bounds 2 corners are outside of bounds1 , they will be adjusted to bounds1 corners
@ params
bounds1 - The source bounding box
bounds2 - The target bounding box that has to be within bounds1
@ return
A bounding box tuple in ( y1 , x1 , y2 , x2 ) format"""
|
# out of bound check
# If it is completely outside of target bounds , return target bounds
if ( ( bounds2 [ 0 ] > bounds1 [ 0 ] and bounds2 [ 2 ] > bounds1 [ 0 ] ) or ( bounds2 [ 2 ] < bounds1 [ 2 ] and bounds2 [ 2 ] < bounds1 [ 0 ] ) ) :
return bounds1
if ( ( bounds2 [ 1 ] < bounds1 [ 1 ] and bounds2 [ 3 ] < bounds1 [ 1 ] ) or ( bounds2 [ 3 ] > bounds1 [ 3 ] and bounds2 [ 1 ] > bounds1 [ 3 ] ) ) :
return bounds1
new_bounds = list ( bounds2 )
# Adjust Y axis ( Longitude )
if ( bounds2 [ 0 ] > bounds1 [ 0 ] or bounds2 [ 0 ] < bounds1 [ 3 ] ) :
new_bounds [ 0 ] = bounds1 [ 0 ]
if ( bounds2 [ 2 ] < bounds1 [ 2 ] or bounds2 [ 2 ] > bounds1 [ 0 ] ) :
new_bounds [ 2 ] = bounds1 [ 2 ]
# Adjust X axis ( Latitude )
if ( bounds2 [ 1 ] < bounds1 [ 1 ] or bounds2 [ 1 ] > bounds1 [ 3 ] ) :
new_bounds [ 1 ] = bounds1 [ 1 ]
if ( bounds2 [ 3 ] > bounds1 [ 3 ] or bounds2 [ 3 ] < bounds1 [ 1 ] ) :
new_bounds [ 3 ] = bounds1 [ 3 ]
return tuple ( new_bounds )
|
def exception_format ( ) :
"""Convert exception info into a string suitable for display ."""
|
return "" . join ( traceback . format_exception ( sys . exc_info ( ) [ 0 ] , sys . exc_info ( ) [ 1 ] , sys . exc_info ( ) [ 2 ] ) )
|
def _preferredThemes ( self ) :
"""Return a list of themes in the order of preference that this user has
selected via L { PrivateApplication . preferredTheme } ."""
|
themes = getInstalledThemes ( self . store . parent )
_reorderForPreference ( themes , self . preferredTheme )
return themes
|
def dummy_func ( arg1 , arg2 , arg3 = None , arg4 = [ 1 , 2 , 3 ] , arg5 = { } , ** kwargs ) :
"""test func for kwargs parseing"""
|
foo = kwargs . get ( 'foo' , None )
bar = kwargs . pop ( 'bar' , 4 )
foo2 = kwargs [ 'foo2' ]
foobar = str ( foo ) + str ( bar ) + str ( foo2 )
return foobar
|
def _create_connection ( self ) :
"""Creates a transport channel .
: return : transport channel instance
: rtype : : class : ` fatbotslim . irc . tcp . TCP ` or : class : ` fatbotslim . irc . tcp . SSL `"""
|
transport = SSL if self . ssl else TCP
return transport ( self . server , self . port )
|
def create ( self , card_data ) :
"""创建卡券
: param card _ data : 卡券信息
: return : 创建的卡券 ID"""
|
result = self . _post ( 'card/create' , data = card_data , result_processor = lambda x : x [ 'card_id' ] )
return result
|
def compare_params ( defined , existing , return_old_value = False ) :
'''. . versionadded : : 2017.7
Compares Zabbix object definition against existing Zabbix object .
: param defined : Zabbix object definition taken from sls file .
: param existing : Existing Zabbix object taken from result of an API call .
: param return _ old _ value : Default False . If True , returns dict ( " old " = old _ val , " new " = new _ val ) for rollback purpose .
: return : Params that are different from existing object . Result extended by
object ID can be passed directly to Zabbix API update method .'''
|
# Comparison of data types
if not isinstance ( defined , type ( existing ) ) :
raise SaltException ( 'Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. ' 'Existing value: "{2}", defined value: "{3}").' . format ( type ( existing ) , type ( defined ) , existing , defined ) )
# Comparison of values
if not salt . utils . data . is_iter ( defined ) :
if six . text_type ( defined ) != six . text_type ( existing ) and return_old_value :
return { 'new' : six . text_type ( defined ) , 'old' : six . text_type ( existing ) }
elif six . text_type ( defined ) != six . text_type ( existing ) and not return_old_value :
return six . text_type ( defined )
# Comparison of lists of values or lists of dicts
if isinstance ( defined , list ) :
if len ( defined ) != len ( existing ) :
log . info ( 'Different list length!' )
return { 'new' : defined , 'old' : existing } if return_old_value else defined
else :
difflist = [ ]
for ditem in defined :
d_in_e = [ ]
for eitem in existing :
comp = compare_params ( ditem , eitem , return_old_value )
if return_old_value :
d_in_e . append ( comp [ 'new' ] )
else :
d_in_e . append ( comp )
if all ( d_in_e ) :
difflist . append ( ditem )
# If there is any difference in a list then whole defined list must be returned and provided for update
if any ( difflist ) and return_old_value :
return { 'new' : defined , 'old' : existing }
elif any ( difflist ) and not return_old_value :
return defined
# Comparison of dicts
if isinstance ( defined , dict ) :
try : # defined must be a subset of existing to be compared
if set ( defined ) <= set ( existing ) :
intersection = set ( defined ) & set ( existing )
diffdict = { 'new' : { } , 'old' : { } } if return_old_value else { }
for i in intersection :
comp = compare_params ( defined [ i ] , existing [ i ] , return_old_value )
if return_old_value :
if comp or ( not comp and isinstance ( comp , list ) ) :
diffdict [ 'new' ] . update ( { i : defined [ i ] } )
diffdict [ 'old' ] . update ( { i : existing [ i ] } )
else :
if comp or ( not comp and isinstance ( comp , list ) ) :
diffdict . update ( { i : defined [ i ] } )
return diffdict
return { 'new' : defined , 'old' : existing } if return_old_value else defined
except TypeError :
raise SaltException ( 'Zabbix object comparison failed (data type mismatch). Expecting {0}, got {1}. ' 'Existing value: "{2}", defined value: "{3}").' . format ( type ( existing ) , type ( defined ) , existing , defined ) )
|
def parse_value_refarray ( self , tup_tree ) :
"""Parse a VALUE . REFARRAY element and return the array of instance paths
or class paths it represents as a list of CIMInstanceName or
CIMClassName objects , respectively .
< ! ELEMENT VALUE . REFARRAY ( VALUE . REFERENCE | VALUE . NULL ) * >"""
|
self . check_node ( tup_tree , 'VALUE.REFARRAY' )
children = self . list_of_various ( tup_tree , ( 'VALUE.REFERENCE' , 'VALUE.NULL' ) )
return children
|
def load_from_filename ( self , file_name , sep = '\n' ) :
"""Utility function to load messages from a local filename to a queue"""
|
fp = open ( file_name , 'rb' )
n = self . load_from_file ( fp , sep )
fp . close ( )
return n
|
def visit_grouping ( self , grouping , asfrom = False , ** kwargs ) :
"""TODO :"""
|
return { 'type' : 'grouping' , 'grouping' : grouping . element . _compiler_dispatch ( self , ** kwargs ) }
|
def local_position_ned_encode ( self , time_boot_ms , x , y , z , vx , vy , vz ) :
'''The filtered local position ( e . g . fused computer vision and
accelerometers ) . Coordinate frame is right - handed ,
Z - axis down ( aeronautical frame , NED / north - east - down
convention )
time _ boot _ ms : Timestamp ( milliseconds since system boot ) ( uint32 _ t )
x : X Position ( float )
y : Y Position ( float )
z : Z Position ( float )
vx : X Speed ( float )
vy : Y Speed ( float )
vz : Z Speed ( float )'''
|
return MAVLink_local_position_ned_message ( time_boot_ms , x , y , z , vx , vy , vz )
|
def hex_to_rgb ( hex_value ) :
"""Convert a hexadecimal color value to a 3 - tuple of integers
suitable for use in an ` ` rgb ( ) ` ` triplet specifying that color .
The hexadecimal value will be normalized before being converted .
Examples :
> > > hex _ to _ rgb ( ' # fff ' )
(255 , 255 , 255)
> > > hex _ to _ rgb ( ' # 000080 ' )
(0 , 0 , 128)"""
|
hex_digits = normalize_hex ( hex_value )
return tuple ( [ int ( s , 16 ) for s in ( hex_digits [ 1 : 3 ] , hex_digits [ 3 : 5 ] , hex_digits [ 5 : 7 ] ) ] )
|
def conn_options ( prs , conn ) :
"""Set options of connecting to TonicDNS API server
Arguments :
prs : parser object of argparse
conn : dictionary of connection information"""
|
if conn . get ( 'server' ) and conn . get ( 'username' ) and conn . get ( 'password' ) :
prs . set_defaults ( server = conn . get ( 'server' ) , username = conn . get ( 'username' ) , password = conn . get ( 'password' ) )
elif conn . get ( 'server' ) and conn . get ( 'username' ) :
prs . set_defaults ( server = conn . get ( 'server' ) , username = conn . get ( 'username' ) )
if conn . get ( 'auto_update_soa' ) :
prs . set_defaults ( auto_update_soa = conn . get ( 'auto_update_soa' ) )
else :
prs . set_defaults ( auto_update_soa = False )
if not conn . get ( 'server' ) :
set_option ( prs , 'server' )
if not conn . get ( 'username' ) :
set_option ( prs , 'username' )
if not conn . get ( 'password' ) :
set_option ( prs , 'password' )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.