signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def deliver_dashboard ( schedule ) :
"""Given a schedule , delivery the dashboard as an email report""" | dashboard = schedule . dashboard
dashboard_url = _get_url_path ( 'Superset.dashboard' , dashboard_id = dashboard . id , )
# Create a driver , fetch the page , wait for the page to render
driver = create_webdriver ( )
window = config . get ( 'WEBDRIVER_WINDOW' ) [ 'dashboard' ]
driver . set_window_size ( * window )
driver . get ( dashboard_url )
time . sleep ( PAGE_RENDER_WAIT )
# Set up a function to retry once for the element .
# This is buggy in certain selenium versions with firefox driver
get_element = getattr ( driver , 'find_element_by_class_name' )
element = retry_call ( get_element , fargs = [ 'grid-container' ] , tries = 2 , delay = PAGE_RENDER_WAIT , )
try :
screenshot = element . screenshot_as_png
except WebDriverException : # Some webdrivers do not support screenshots for elements .
# In such cases , take a screenshot of the entire page .
screenshot = driver . screenshot ( )
# pylint : disable = no - member
finally :
destroy_webdriver ( driver )
# Generate the email body and attachments
email = _generate_mail_content ( schedule , screenshot , dashboard . dashboard_title , dashboard_url , )
subject = __ ( '%(prefix)s %(title)s' , prefix = config . get ( 'EMAIL_REPORTS_SUBJECT_PREFIX' ) , title = dashboard . dashboard_title , )
_deliver_email ( schedule , subject , email ) |
def get_parent_page ( self ) :
"""For ' parent ' in cms . api . create _ page ( )""" | if self . current_level == 1 : # ' root ' page
return None
else :
return self . page_data [ ( self . current_level - 1 , self . current_count ) ] |
def from_xxx ( cls , xxx ) :
"""Create a new Language instance from a LanguageID string
: param xxx : LanguageID as string
: return : Language instance with instance . xxx ( ) = = xxx if xxx is valid else instance of UnknownLanguage""" | xxx = str ( xxx ) . lower ( )
if xxx is 'unknown' :
return UnknownLanguage ( xxx )
try :
return cls . _from_xyz ( 'LanguageID' , xxx )
except NotALanguageException :
log . warning ( 'Unknown LanguageId: {}' . format ( xxx ) )
return UnknownLanguage ( xxx ) |
def logger ( message , level = 10 ) :
"""Handle logging .""" | logging . getLogger ( __name__ ) . log ( level , str ( message ) ) |
def send_errors_to_logging ( ) :
"""Send all VTK error / warning messages to Python ' s logging module""" | error_output = vtk . vtkStringOutputWindow ( )
error_win = vtk . vtkOutputWindow ( )
error_win . SetInstance ( error_output )
obs = Observer ( )
return obs . observe ( error_output ) |
def normalize_strategy_parameters ( params ) :
"""Normalize strategy parameters to be a list of strings .
Parameters
params : ( space - delimited ) string or sequence of strings / numbers Parameters
expected by : class : ` SampleStrategy ` object , in various forms , where the first
parameter is the name of the strategy .
Returns
params : tuple of strings
Strategy parameters as a list of strings""" | def fixup_numbers ( val ) :
try : # See if it is a number
return str ( float ( val ) )
except ValueError : # ok , it is not a number we know of , perhaps a string
return str ( val )
if isinstance ( params , basestring ) :
params = params . split ( ' ' )
# No number
return tuple ( fixup_numbers ( p ) for p in params ) |
def _make_new_contig_from_nucmer_and_spades ( self , original_contig , hits , circular_spades , log_fh = None , log_outprefix = None ) :
'''Tries to make new circularised contig from contig called original _ contig . hits = list of nucmer hits , all with ref = original contg . circular _ spades = set of query contig names that spades says are circular''' | writing_log_file = None not in [ log_fh , log_outprefix ]
hits_to_circular_contigs = [ x for x in hits if x . qry_name in circular_spades ]
if len ( hits_to_circular_contigs ) == 0 :
if writing_log_file :
print ( log_outprefix , original_contig , 'No matches to SPAdes circular contigs' , sep = '\t' , file = log_fh )
return None , None
for hit in hits_to_circular_contigs :
print ( log_outprefix , original_contig , 'Checking hit:' , hit , sep = '\t' , file = log_fh )
percent_query_covered = 100 * ( hit . hit_length_qry / hit . qry_length )
if self . min_spades_circular_percent <= percent_query_covered :
print ( log_outprefix , '\t' , original_contig , '\t\tHit is long enough. Percent of contig covered by hit is ' , percent_query_covered , sep = '' , file = log_fh )
# the spades contig hit is long enough , but now check that
# the input contig is covered by hits from this spades contig
hit_intervals = [ x . ref_coords ( ) for x in hits_to_circular_contigs if x . qry_name == hit . qry_name ]
if len ( hit_intervals ) > 0 :
pyfastaq . intervals . merge_overlapping_in_list ( hit_intervals )
percent_covered = 100 * pyfastaq . intervals . length_sum_from_list ( hit_intervals ) / hit . ref_length
if writing_log_file :
print ( log_outprefix , '\t' , original_contig , '\t\treference bases covered by spades contig:' , ', ' . join ( [ str ( x ) for x in hit_intervals ] ) , sep = '' , file = log_fh )
print ( log_outprefix , '\t' , original_contig , '\t\t ... which is ' , percent_covered , ' percent of ' , hit . ref_length , ' bases' , sep = '' , file = log_fh )
if self . min_spades_circular_percent <= percent_covered :
if writing_log_file :
print ( log_outprefix , original_contig , '\tUsing hit to call as circular (enough bases covered)' , sep = '\t' , file = log_fh )
return pyfastaq . sequences . Fasta ( original_contig , self . reassembly_contigs [ hit . qry_name ] . seq ) , hit . qry_name
elif writing_log_file :
print ( log_outprefix , original_contig , '\tNot using hit to call as circular (not enough bases covered)' , sep = '\t' , file = log_fh )
else :
print ( log_outprefix , original_contig , '\tNot using hit to call as circular (hit too short)' , sep = '\t' , file = log_fh )
if writing_log_file :
print ( log_outprefix , original_contig , 'No suitable matches to SPAdes circular contigs' , sep = '\t' , file = log_fh )
return None , None |
def engine_list ( self ) :
""": returns : Return list of engines supported by GNS3 for the GNS3VM""" | download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VMware.Workstation.{version}.zip" . format ( version = __version__ )
vmware_informations = { "engine_id" : "vmware" , "description" : 'VMware is the recommended choice for best performances.<br>The GNS3 VM can be <a href="{}">downloaded here</a>.' . format ( download_url ) , "support_when_exit" : True , "support_headless" : True , "support_ram" : True }
if sys . platform . startswith ( "darwin" ) :
vmware_informations [ "name" ] = "VMware Fusion"
else :
vmware_informations [ "name" ] = "VMware Workstation / Player"
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VirtualBox.{version}.zip" . format ( version = __version__ )
virtualbox_informations = { "engine_id" : "virtualbox" , "name" : "VirtualBox" , "description" : 'VirtualBox doesn\'t support nested virtualization, this means running Qemu based VM could be very slow.<br>The GNS3 VM can be <a href="{}">downloaded here</a>' . format ( download_url ) , "support_when_exit" : True , "support_headless" : True , "support_ram" : True }
remote_informations = { "engine_id" : "remote" , "name" : "Remote" , "description" : "Use a remote GNS3 server as the GNS3 VM." , "support_when_exit" : False , "support_headless" : False , "support_ram" : False }
return [ vmware_informations , virtualbox_informations , remote_informations ] |
def set_organization ( self , organization ) : # type : ( Union [ hdx . data . organization . Organization , Dict , str ] ) - > None
"""Set the dataset ' s organization .
Args :
organization ( Union [ Organization , Dict , str ] ) : Either an Organization id or Organization metadata from an Organization object or dictionary .
Returns :
None""" | if isinstance ( organization , hdx . data . organization . Organization ) or isinstance ( organization , dict ) :
if 'id' not in organization :
organization = hdx . data . organization . Organization . read_from_hdx ( organization [ 'name' ] , configuration = self . configuration )
organization = organization [ 'id' ]
elif not isinstance ( organization , str ) :
raise HDXError ( 'Type %s cannot be added as a organization!' % type ( organization ) . __name__ )
if is_valid_uuid ( organization ) is False and organization != 'hdx' :
raise HDXError ( '%s is not a valid organization id!' % organization )
self . data [ 'owner_org' ] = organization |
def to_python ( self ) -> typing . Dict :
"""Get object as JSON serializable
: return :""" | self . clean ( )
result = { }
for name , value in self . values . items ( ) :
if name in self . props :
value = self . props [ name ] . export ( self )
if isinstance ( value , TelegramObject ) :
value = value . to_python ( )
if isinstance ( value , LazyProxy ) :
value = str ( value )
result [ self . props_aliases . get ( name , name ) ] = value
return result |
def comments_between_tokens ( token1 , token2 ) :
"""Find all comments between two tokens""" | if token2 is None :
buf = token1 . end_mark . buffer [ token1 . end_mark . pointer : ]
elif ( token1 . end_mark . line == token2 . start_mark . line and not isinstance ( token1 , yaml . StreamStartToken ) and not isinstance ( token2 , yaml . StreamEndToken ) ) :
return
else :
buf = token1 . end_mark . buffer [ token1 . end_mark . pointer : token2 . start_mark . pointer ]
line_no = token1 . end_mark . line + 1
column_no = token1 . end_mark . column + 1
pointer = token1 . end_mark . pointer
comment_before = None
for line in buf . split ( '\n' ) :
pos = line . find ( '#' )
if pos != - 1 :
comment = Comment ( line_no , column_no + pos , token1 . end_mark . buffer , pointer + pos , token1 , token2 , comment_before )
yield comment
comment_before = comment
pointer += len ( line ) + 1
line_no += 1
column_no = 1 |
def get ( self , reference , country , target = datetime . date . today ( ) ) :
"""Get the inflation / deflation value change for the target date based
on the reference date . Target defaults to today and the instance ' s
reference and country will be used if they are not provided as
parameters""" | # Set country & reference to object ' s country & reference respectively
reference = self . reference if reference is None else reference
# Get the reference and target indices ( values ) from the source
reference_value = self . data . get ( reference , country ) . value
target_value = self . data . get ( target , country ) . value
# Compute the inflation value and return it
return self . _compute_inflation ( target_value , reference_value ) |
def _plot_neuron3d ( neuron , inline , ** kwargs ) :
'''Generates a figure of the neuron ,
that contains a soma and a list of trees .''' | return _plotly ( neuron , plane = '3d' , title = 'neuron-3D' , inline = inline , ** kwargs ) |
def set_led ( self , led , value ) :
"""Sets specified LED ( value of 0 to 127 ) to the specified value , 0 / False
for off and 1 ( or any True / non - zero value ) for on .""" | if led < 0 or led > 127 :
raise ValueError ( 'LED must be value of 0 to 127.' )
# Calculate position in byte buffer and bit offset of desired LED .
pos = led // 8
offset = led % 8
if not value : # Turn off the specified LED ( set bit to zero ) .
self . buffer [ pos ] &= ~ ( 1 << offset )
else : # Turn on the speciried LED ( set bit to one ) .
self . buffer [ pos ] |= ( 1 << offset ) |
def version ( ) :
"""Flask - AppBuilder package version""" | click . echo ( click . style ( "F.A.B Version: {0}." . format ( current_app . appbuilder . version ) , bg = "blue" , fg = "white" ) ) |
def retrieve ( self , id ) :
"""Retrieve a single source
Returns a single source available to the user by the provided id
If a source with the supplied unique identifier does not exist it returns an error
: calls : ` ` get / sources / { id } ` `
: param int id : Unique identifier of a Source .
: return : Dictionary that support attriubte - style access and represent Source resource .
: rtype : dict""" | _ , _ , source = self . http_client . get ( "/sources/{id}" . format ( id = id ) )
return source |
def siblings ( self , as_resources = False ) :
'''method to return hierarchical siblings of this resource .
Args :
as _ resources ( bool ) : if True , opens each as appropriate resource type instead of return URI only
Returns :
( list ) : list of resources''' | siblings = set ( )
# loop through parents and get children
for parent in self . parents ( as_resources = True ) :
for sibling in parent . children ( as_resources = as_resources ) :
siblings . add ( sibling )
# remove self
if as_resources :
siblings . remove ( self )
if not as_resources :
siblings . remove ( self . uri )
return list ( siblings ) |
def computeMatchProbabilityOmega ( k , bMax , theta , nTrials = 100 ) :
"""The Omega match probability estimates the probability of matching when
both vectors have exactly b components in common . This function computes
this probability for b = 1 to bMax .
For each value of b this function :
1 ) Creates nTrials instances of Xw ( b ) which are vectors with b components
where each component is uniform in [ - 1 / k , 1 / k ] .
2 ) Creates nTrials instances of Xi ( b ) which are vectors with b components
where each component is uniform in [ 0 , 2 / k ] .
3 ) Does every possible dot product of Xw ( b ) dot Xi ( b ) , i . e . nTrials * nTrials
dot products .
4 ) Counts the fraction of cases where Xw ( b ) dot Xi ( b ) > = theta
Returns an array with bMax entries , where each entry contains the
probability computed in 4 ) .""" | omegaProb = np . zeros ( bMax + 1 )
for b in range ( 1 , bMax + 1 ) :
xwb = getSparseTensor ( b , b , nTrials , fixedRange = 1.0 / k )
xib = getSparseTensor ( b , b , nTrials , onlyPositive = True , fixedRange = 2.0 / k )
r = xwb . matmul ( xib . t ( ) )
numMatches = ( ( r >= theta ) . sum ( ) ) . item ( )
omegaProb [ b ] = numMatches / float ( nTrials * nTrials )
print ( omegaProb )
return omegaProb |
def get_server_type ( ) :
"""Checks server . ini for server type .""" | server_location_file = os . path . expanduser ( SERVER_LOCATION_FILE )
if not os . path . exists ( server_location_file ) :
raise Exception ( "%s not found. Please run 'loom server set " "<servertype>' first." % server_location_file )
config = ConfigParser . SafeConfigParser ( )
config . read ( server_location_file )
server_type = config . get ( 'server' , 'type' )
return server_type |
def get_config_template ( namespace , method , version ) :
"""Get the configuration template for a method .
The method should exist in the methods repository .
Args :
namespace ( str ) : Method ' s namespace
method ( str ) : method name
version ( int ) : snapshot _ id of the method
Swagger :
https : / / api . firecloud . org / # ! / Method _ Repository / createMethodTemplate""" | body = { "methodNamespace" : namespace , "methodName" : method , "methodVersion" : int ( version ) }
return __post ( "template" , json = body ) |
def parse_streams ( self ) :
"""Try to parse all input streams from file""" | logger . debug ( "Parsing streams of {}" . format ( self . input_file ) )
cmd = [ self . ffmpeg_normalize . ffmpeg_exe , '-i' , self . input_file , '-c' , 'copy' , '-t' , '0' , '-map' , '0' , '-f' , 'null' , NUL ]
cmd_runner = CommandRunner ( cmd )
cmd_runner . run_command ( )
output = cmd_runner . get_output ( )
logger . debug ( "Stream parsing command output:" )
logger . debug ( output )
output_lines = [ line . strip ( ) for line in output . split ( '\n' ) ]
for line in output_lines :
if not line . startswith ( 'Stream' ) :
continue
stream_id_match = re . search ( r'#0:([\d]+)' , line )
if stream_id_match :
stream_id = int ( stream_id_match . group ( 1 ) )
if stream_id in self . _stream_ids ( ) :
continue
else :
continue
if 'Audio' in line :
logger . debug ( "Found audio stream at index {}" . format ( stream_id ) )
sample_rate_match = re . search ( r'(\d+) Hz' , line )
sample_rate = int ( sample_rate_match . group ( 1 ) ) if sample_rate_match else None
bit_depth_match = re . search ( r's(\d+)p?,' , line )
bit_depth = int ( bit_depth_match . group ( 1 ) ) if bit_depth_match else None
self . streams [ 'audio' ] [ stream_id ] = AudioStream ( self , stream_id , sample_rate , bit_depth )
elif 'Video' in line :
logger . debug ( "Found video stream at index {}" . format ( stream_id ) )
self . streams [ 'video' ] [ stream_id ] = VideoStream ( self , stream_id )
elif 'Subtitle' in line :
logger . debug ( "Found subtitle stream at index {}" . format ( stream_id ) )
self . streams [ 'subtitle' ] [ stream_id ] = SubtitleStream ( self , stream_id )
if not self . streams [ 'audio' ] :
raise FFmpegNormalizeError ( "Input file {} does not contain any audio streams" . format ( self . input_file ) )
if os . path . splitext ( self . output_file ) [ 1 ] . lower ( ) in [ '.wav' , '.mp3' , '.aac' ] :
logger . warning ( "Output file only supports one stream. " "Keeping only first audio stream." )
first_stream = list ( self . streams [ 'audio' ] . values ( ) ) [ 0 ]
self . streams [ 'audio' ] = { first_stream . stream_id : first_stream }
self . streams [ 'video' ] = { }
self . streams [ 'subtitle' ] = { } |
def context_exists ( self , name ) :
"""Check if a given context exists .""" | contexts = self . data [ 'contexts' ]
for context in contexts :
if context [ 'name' ] == name :
return True
return False |
def KMA ( inputfile_1 , gene_list , kma_db , out_path , sample_name , min_cov , mapping_path ) :
"""This function is called when KMA is the method of choice . The
function calls kma externally and waits for it to finish .
The kma output files with the prefixes . res and . aln are parsed
throught to obtain the required alignment informations . The subject
and query sequences as well as the start and stop position ,
coverage , and subject length are stored in a results directory
which is returned in the end .""" | # Get full path to input of output files
inputfile_1 = os . path . abspath ( inputfile_1 )
kma_outfile = os . path . abspath ( out_path + "/kma_out_" + sample_name )
kma_cmd = "%s -i %s -t_db %s -o %s -1t1 -gapopen -5 -gapextend -2 -penalty -3 -reward 1" % ( mapping_path , inputfile_1 , kma_db , kma_outfile )
# - ID 90
# Call KMA
os . system ( kma_cmd )
if os . path . isfile ( kma_outfile + ".aln" ) == False :
os . system ( kma_cmd )
# Fetch kma output files
align_filename = kma_outfile + ".aln"
res_filename = kma_outfile + ".res"
results = dict ( )
# Open KMA result file
with open ( res_filename , "r" ) as res_file :
header = res_file . readline ( )
# Parse through each line
for line in res_file :
data = [ data . strip ( ) for data in line . split ( "\t" ) ]
gene = data [ 0 ]
# Check if gene one of the user specified genes
if gene not in gene_list :
continue
# Store subject length and coverage
sbjct_len = int ( data [ 3 ] )
identity = float ( data [ 6 ] )
coverage = float ( data [ 7 ] )
# Result dictionary assumes that more hits can occur
if gene not in results :
hit = '1'
results [ gene ] = dict ( )
# Gene will only be there once with KMA
else :
hit = str ( len ( results [ gene ] ) ) + 1
results [ gene ] [ hit ] = dict ( )
results [ gene ] [ hit ] [ 'sbjct_length' ] = sbjct_len
results [ gene ] [ hit ] [ 'coverage' ] = coverage / 100
results [ gene ] [ hit ] [ "sbjct_string" ] = [ ]
results [ gene ] [ hit ] [ "query_string" ] = [ ]
results [ gene ] [ hit ] [ "homology" ] = [ ]
results [ gene ] [ hit ] [ 'identity' ] = identity
# Open KMA alignment file
with open ( align_filename , "r" ) as align_file :
hit_no = dict ( )
gene = ""
# Parse through alignments
for line in align_file : # Check when a new gene alignment start
if line . startswith ( "#" ) :
gene = line [ 1 : ] . strip ( )
if gene not in hit_no :
hit_no [ gene ] = str ( 1 )
else :
hit_no [ gene ] += str ( int ( hit_no [ gene ] ) + 1 )
else : # Check if gene is one of the user specified genes
if gene in results :
if hit_no [ gene ] not in results [ gene ] :
sys . exit ( "Unexpected database redundency" )
line_data = line . split ( "\t" ) [ - 1 ] . strip ( )
if line . startswith ( "template" ) :
results [ gene ] [ hit_no [ gene ] ] [ "sbjct_string" ] += [ line_data ]
elif line . startswith ( "query" ) :
results [ gene ] [ hit_no [ gene ] ] [ "query_string" ] += [ line_data ]
else :
results [ gene ] [ hit_no [ gene ] ] [ "homology" ] += [ line_data ]
# Concatinate all sequences lists and find subject start and subject end
seq_start_search_str = re . compile ( "^-*(\w+)" )
seq_end_search_str = re . compile ( "\w+(-*)$" )
for gene in gene_list :
if gene in results :
for hit in results [ gene ] :
results [ gene ] [ hit ] [ 'sbjct_string' ] = "" . join ( results [ gene ] [ hit ] [ 'sbjct_string' ] )
results [ gene ] [ hit ] [ 'query_string' ] = "" . join ( results [ gene ] [ hit ] [ 'query_string' ] )
results [ gene ] [ hit ] [ 'homology' ] = "" . join ( results [ gene ] [ hit ] [ 'homology' ] )
seq_start_object = seq_start_search_str . search ( results [ gene ] [ hit ] [ 'query_string' ] )
sbjct_start = seq_start_object . start ( 1 ) + 1
seq_end_object = seq_end_search_str . search ( results [ gene ] [ hit ] [ 'query_string' ] )
sbjct_end = seq_end_object . start ( 1 ) + 1
results [ gene ] [ hit ] [ 'query_string' ] = results [ gene ] [ hit ] [ 'query_string' ] [ sbjct_start - 1 : sbjct_end - 1 ]
results [ gene ] [ hit ] [ 'sbjct_string' ] = results [ gene ] [ hit ] [ 'sbjct_string' ] [ sbjct_start - 1 : sbjct_end - 1 ]
# if sbjct _ start :
results [ gene ] [ hit ] [ "sbjct_start" ] = sbjct_start
results [ gene ] [ hit ] [ "sbjct_end" ] = sbjct_end
else :
results [ gene ] = ""
return results |
def _open_repo ( args , path_key = '<path>' ) :
"""Open and return the repository containing the specified file .
The file is specified by looking up ` path _ key ` in ` args ` . This value or
` None ` is passed to ` open _ repository ` .
Returns : A ` Repository ` instance .
Raises :
ExitError : If there is a problem opening the repo .""" | path = pathlib . Path ( args [ path_key ] ) if args [ path_key ] else None
try :
repo = open_repository ( path )
except ValueError as exc :
raise ExitError ( ExitCode . DATA_ERR , str ( exc ) )
return repo |
def a2b_hashed_base58 ( s ) :
"""If the passed string is hashed _ base58 , return the binary data .
Otherwise raises an EncodingError .""" | data = a2b_base58 ( s )
data , the_hash = data [ : - 4 ] , data [ - 4 : ]
if double_sha256 ( data ) [ : 4 ] == the_hash :
return data
raise EncodingError ( "hashed base58 has bad checksum %s" % s ) |
def agenerator ( ) :
"""Arandom number generator""" | free_mem = psutil . virtual_memory ( ) . available
mem_24 = 0.24 * free_mem
mem_26 = 0.26 * free_mem
a = MemEater ( int ( mem_24 ) )
b = MemEater ( int ( mem_26 ) )
sleep ( 5 )
return free_mem / 1000 / 1000 , psutil . virtual_memory ( ) . available / 1000 / 1000 |
def __uncache ( self , file ) :
"""Uncaches given file .
: param file : File to uncache .
: type file : unicode""" | if file in self . __files_cache :
self . __files_cache . remove_content ( file ) |
def get_construction_table ( self , fragment_list = None , use_lookup = None , perform_checks = True ) :
"""Create a construction table for a Zmatrix .
A construction table is basically a Zmatrix without the values
for the bond lengths , angles and dihedrals .
It contains the whole information about which reference atoms
are used by each atom in the Zmatrix .
The absolute references in cartesian space are one of the following
magic strings : :
[ ' origin ' , ' e _ x ' , ' e _ y ' , ' e _ z ' ]
This method creates a so called " chemical " construction table ,
which makes use of the connectivity table in this molecule .
Args :
fragment _ list ( sequence ) : There are four possibilities to specify
the sequence of fragments :
1 . A list of tuples is given . Each tuple contains the fragment
with its corresponding construction table in the form of : :
[ ( frag1 , c _ table1 ) , ( frag2 , c _ table2 ) . . . ]
If the construction table of a fragment is not complete ,
the rest of each fragment ' s
construction table is calculated automatically .
2 . It is possible to omit the construction tables for some
or all fragments as in the following example : :
[ ( frag1 , c _ table1 ) , frag2 , ( frag3 , c _ table3 ) . . . ]
3 . If ` ` self ` ` contains more atoms than the union over all
fragments , the rest of the molecule without the fragments
is automatically prepended using
: meth : ` ~ Cartesian . get _ without ` : :
self . get _ without ( fragments ) + fragment _ list
4 . If fragment _ list is ` ` None ` ` then fragmentation , etc .
is done automatically . The fragments are then sorted by
their number of atoms , in order to use the largest fragment
as reference for the other ones .
use _ lookup ( bool ) : Use a lookup variable for
: meth : ` ~ chemcoord . Cartesian . get _ bonds ` . The default is
specified in ` ` settings [ ' defaults ' ] [ ' use _ lookup ' ] ` `
perform _ checks ( bool ) : The checks for invalid references are
performed using : meth : ` ~ chemcoord . Cartesian . correct _ dihedral `
and : meth : ` ~ chemcoord . Cartesian . correct _ absolute _ refs ` .
Returns :
: class : ` pandas . DataFrame ` : Construction table""" | if use_lookup is None :
use_lookup = settings [ 'defaults' ] [ 'use_lookup' ]
if fragment_list is None :
self . get_bonds ( use_lookup = use_lookup )
self . _give_val_sorted_bond_dict ( use_lookup = use_lookup )
fragments = sorted ( self . fragmentate ( use_lookup = use_lookup ) , key = len , reverse = True )
# During function execution the bonding situation does not change ,
# so the lookup may be used now .
use_lookup = True
else :
fragments = fragment_list
def prepend_missing_parts_of_molecule ( fragment_list ) :
for fragment in fragment_list :
if pd . api . types . is_list_like ( fragment ) :
try :
full_index |= fragment [ 0 ] . index
except NameError :
full_index = fragment [ 0 ] . index
else :
try :
full_index |= fragment . index
except NameError :
full_index = fragment . index
if not self . index . difference ( full_index ) . empty :
missing_part = self . get_without ( self . loc [ full_index ] , use_lookup = use_lookup )
fragment_list = missing_part + fragment_list
return fragment_list
fragments = prepend_missing_parts_of_molecule ( fragments )
if isinstance ( fragments [ 0 ] , tuple ) :
fragment , references = fragments [ 0 ]
full_table = fragment . _get_frag_constr_table ( use_lookup = use_lookup , predefined_table = references )
else :
fragment = fragments [ 0 ]
full_table = fragment . _get_frag_constr_table ( use_lookup = use_lookup )
for fragment in fragments [ 1 : ] :
finished_part = self . loc [ full_table . index ]
if pd . api . types . is_list_like ( fragment ) :
fragment , references = fragment
if len ( references ) < min ( 3 , len ( fragment ) ) :
raise ValueError ( 'If you specify references for a ' 'fragment, it has to consist of at least' 'min(3, len(fragment)) rows.' )
constr_table = fragment . _get_frag_constr_table ( predefined_table = references , use_lookup = use_lookup )
else :
i , b = fragment . get_shortest_distance ( finished_part ) [ : 2 ]
constr_table = fragment . _get_frag_constr_table ( start_atom = i , use_lookup = use_lookup )
if len ( full_table ) == 1 :
a , d = 'e_z' , 'e_x'
elif len ( full_table ) == 2 :
if b == full_table . index [ 0 ] :
a = full_table . index [ 1 ]
else :
a = full_table . index [ 0 ]
d = 'e_x'
else :
if b in full_table . index [ : 2 ] :
if b == full_table . index [ 0 ] :
a = full_table . index [ 2 ]
d = full_table . index [ 1 ]
else :
a = full_table . loc [ b , 'b' ]
d = full_table . index [ 2 ]
else :
a , d = full_table . loc [ b , [ 'b' , 'a' ] ]
if len ( constr_table ) >= 1 :
constr_table . iloc [ 0 , : ] = b , a , d
if len ( constr_table ) >= 2 :
constr_table . iloc [ 1 , [ 1 , 2 ] ] = b , a
if len ( constr_table ) >= 3 :
constr_table . iloc [ 2 , 2 ] = b
full_table = pd . concat ( [ full_table , constr_table ] )
c_table = full_table
if perform_checks :
c_table = self . correct_dihedral ( c_table )
c_table = self . correct_dihedral ( c_table , use_lookup = use_lookup )
c_table = self . correct_absolute_refs ( c_table )
return c_table |
def geocode ( query ) :
"""Geocode a query string to ( lat , lon ) with the Nominatim geocoder .
Parameters
query : string
the query string to geocode
Returns
point : tuple
the ( lat , lon ) coordinates returned by the geocoder""" | # send the query to the nominatim geocoder and parse the json response
url_template = 'https://nominatim.openstreetmap.org/search?format=json&limit=1&q={}'
url = url_template . format ( query )
response = requests . get ( url , timeout = 60 )
results = response . json ( )
# if results were returned , parse lat and long out of the result
if len ( results ) > 0 and 'lat' in results [ 0 ] and 'lon' in results [ 0 ] :
lat = float ( results [ 0 ] [ 'lat' ] )
lon = float ( results [ 0 ] [ 'lon' ] )
point = ( lat , lon )
log ( 'Geocoded "{}" to {}' . format ( query , point ) )
return point
else :
raise Exception ( 'Nominatim geocoder returned no results for query "{}"' . format ( query ) ) |
def interfaces ( self ) :
"""list [ dict ] : A list of dictionary items describing the operational
state of interfaces .
This method currently only lists the Physical Interfaces (
Gigabitethernet , tengigabitethernet , fortygigabitethernet ,
hundredgigabitethernet ) and Loopback interfaces . It currently
excludes VLAN interfaces , FCoE , Port - Channels , Management and Fibre
Channel ports .""" | urn = "{urn:brocade.com:mgmt:brocade-interface-ext}"
int_ns = 'urn:brocade.com:mgmt:brocade-interface-ext'
result = [ ]
has_more = ''
last_interface_name = ''
last_interface_type = ''
while ( has_more == '' ) or ( has_more == 'true' ) :
request_interface = self . get_interface_detail_request ( last_interface_name , last_interface_type )
interface_result = self . _callback ( request_interface , 'get' )
has_more = interface_result . find ( '%shas-more' % urn ) . text
for item in interface_result . findall ( '%sinterface' % urn ) :
interface_type = item . find ( '%sinterface-type' % urn ) . text
interface_name = item . find ( '%sinterface-name' % urn ) . text
last_interface_type = interface_type
last_interface_name = interface_name
if "gigabitethernet" in interface_type :
interface_role = item . find ( '%sport-role' % urn ) . text
if_name = item . find ( '%sif-name' % urn ) . text
interface_state = item . find ( '%sif-state' % urn ) . text
interface_proto_state = item . find ( '%sline-protocol-state' % urn ) . text
interface_mac = item . find ( '%scurrent-hardware-address' % urn ) . text
item_results = { 'interface-type' : interface_type , 'interface-name' : interface_name , 'interface-role' : interface_role , 'if-name' : if_name , 'interface-state' : interface_state , 'interface-proto-state' : interface_proto_state , 'interface-mac' : interface_mac }
result . append ( item_results )
# Loopback interfaces . Probably for other non - physical interfaces , too .
ip_result = [ ]
request_interface = ET . Element ( 'get-ip-interface' , xmlns = int_ns )
interface_result = self . _callback ( request_interface , 'get' )
for interface in interface_result . findall ( '%sinterface' % urn ) :
int_type = interface . find ( '%sinterface-type' % urn ) . text
int_name = interface . find ( '%sinterface-name' % urn ) . text
if int_type == 'unknown' :
continue
int_state = interface . find ( '%sif-state' % urn ) . text
int_proto_state = interface . find ( '%sline-protocol-state' % urn ) . text
ip_address = interface . find ( './/%sipv4' % urn ) . text
results = { 'interface-type' : int_type , 'interface-name' : int_name , 'interface-role' : None , 'if-name' : None , 'interface-state' : int_state , 'interface-proto-state' : int_proto_state , 'interface-mac' : None , 'ip-address' : ip_address }
x = next ( ( x for x in result if int_type == x [ 'interface-type' ] and int_name == x [ 'interface-name' ] ) , None )
if x is not None :
results . update ( x )
ip_result . append ( results )
return ip_result |
def etag ( self , etag ) :
"""Sets the etag of this BulkResponse .
etag
: param etag : The etag of this BulkResponse .
: type : str""" | if etag is None :
raise ValueError ( "Invalid value for `etag`, must not be `None`" )
if etag is not None and not re . search ( '[A-Za-z0-9]{0,256}' , etag ) :
raise ValueError ( "Invalid value for `etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`" )
self . _etag = etag |
def _determine_slot ( self , * args ) :
"""figure out what slot based on command and args""" | if len ( args ) <= 1 :
raise RedisClusterException ( "No way to dispatch this command to Redis Cluster. Missing key." )
command = args [ 0 ]
if command in [ 'EVAL' , 'EVALSHA' ] :
numkeys = args [ 2 ]
keys = args [ 3 : 3 + numkeys ]
slots = { self . connection_pool . nodes . keyslot ( key ) for key in keys }
if len ( slots ) != 1 :
raise RedisClusterException ( "{0} - all keys must map to the same key slot" . format ( command ) )
return slots . pop ( )
key = args [ 1 ]
return self . connection_pool . nodes . keyslot ( key ) |
def addEvent ( self , event , fd , action ) :
"""Add a new win32 event to the event loop .""" | self . _events [ event ] = ( fd , action ) |
def updateData ( self , exten , data ) :
"""Write out updated data and header to
the original input file for this object .""" | _extnum = self . _interpretExten ( exten )
fimg = fileutil . openImage ( self . _filename , mode = 'update' , memmap = False )
fimg [ _extnum ] . data = data
fimg [ _extnum ] . header = self . _image [ _extnum ] . header
fimg . close ( ) |
def _get_vsan_eligible_disks ( service_instance , host , host_names ) :
'''Helper function that returns a dictionary of host _ name keys with either a list of eligible
disks that can be added to VSAN or either an ' Error ' message or a message saying no
eligible disks were found . Possible keys / values look like :
return = { ' host _ 1 ' : { ' Error ' : ' VSAN System Config Manager is unset . . . ' } ,
' host _ 2 ' : { ' Eligible ' : ' The host xxx does not have any VSAN eligible disks . ' } ,
' host _ 3 ' : { ' Eligible ' : [ disk1 , disk2 , disk3 , disk4 ] ,
' host _ 4 ' : { ' Eligible ' : [ ] } }''' | ret = { }
for host_name in host_names : # Get VSAN System Config Manager , if available .
host_ref = _get_host_ref ( service_instance , host , host_name = host_name )
vsan_system = host_ref . configManager . vsanSystem
if vsan_system is None :
msg = 'VSAN System Config Manager is unset for host \'{0}\'. ' 'VSAN configuration cannot be changed without a configured ' 'VSAN System.' . format ( host_name )
log . debug ( msg )
ret . update ( { host_name : { 'Error' : msg } } )
continue
# Get all VSAN suitable disks for this host .
suitable_disks = [ ]
query = vsan_system . QueryDisksForVsan ( )
for item in query :
if item . state == 'eligible' :
suitable_disks . append ( item )
# No suitable disks were found to add . Warn and move on .
# This isn ' t an error as the state may run repeatedly after all eligible disks are added .
if not suitable_disks :
msg = 'The host \'{0}\' does not have any VSAN eligible disks.' . format ( host_name )
log . warning ( msg )
ret . update ( { host_name : { 'Eligible' : msg } } )
continue
# Get disks for host and combine into one list of Disk Objects
disks = _get_host_ssds ( host_ref ) + _get_host_non_ssds ( host_ref )
# Get disks that are in both the disks list and suitable _ disks lists .
matching = [ ]
for disk in disks :
for suitable_disk in suitable_disks :
if disk . canonicalName == suitable_disk . disk . canonicalName :
matching . append ( disk )
ret . update ( { host_name : { 'Eligible' : matching } } )
return ret |
def table_from_root ( source , treename = None , columns = None , ** kwargs ) :
"""Read a Table from a ROOT tree""" | import root_numpy
# parse column filters into tree2array ` ` selection ` ` keyword
# NOTE : not all filters can be passed directly to root _ numpy , so we store
# those separately and apply them after - the - fact before returning
try :
selection = kwargs . pop ( 'selection' )
except KeyError : # no filters
filters = None
else :
rootfilters = [ ]
filters = [ ]
for col , op_ , value in parse_column_filters ( selection ) :
try :
opstr = [ key for key in OPERATORS if OPERATORS [ key ] is op_ ] [ 0 ]
except ( IndexError , KeyError ) : # cannot filter with root _ numpy
filters . append ( ( col , op_ , value ) )
else : # can filter with root _ numpy
rootfilters . append ( '{0} {1} {2!r}' . format ( col , opstr , value ) )
kwargs [ 'selection' ] = ' && ' . join ( rootfilters )
# pass file name ( not path )
if not isinstance ( source , string_types ) :
source = source . name
# find single tree ( if only one tree present )
if treename is None :
trees = root_numpy . list_trees ( source )
if len ( trees ) == 1 :
treename = trees [ 0 ]
elif not trees :
raise ValueError ( "No trees found in %s" % source )
else :
raise ValueError ( "Multiple trees found in %s, please select on " "via the `treename` keyword argument, e.g. " "`treename='events'`. Available trees are: %s." % ( source , ', ' . join ( map ( repr , trees ) ) ) )
# read , filter , and return
t = Table ( root_numpy . root2array ( source , treename , branches = columns , ** kwargs ) )
if filters :
return filter_table ( t , * filters )
return t |
def prepare_native_return_state ( native_state ) :
"""Hook target for native function call returns .
Recovers and stores the return value from native memory and toggles the
state , s . t . execution continues in the Soot engine .""" | javavm_simos = native_state . project . simos
ret_state = native_state . copy ( )
# set successor flags
ret_state . regs . _ip = ret_state . callstack . ret_addr
ret_state . scratch . guard = ret_state . solver . true
ret_state . history . jumpkind = 'Ijk_Ret'
# if available , lookup the return value in native memory
ret_var = ret_state . callstack . invoke_return_variable
if ret_var is not None : # get return symbol from native state
native_cc = javavm_simos . get_native_cc ( )
ret_symbol = native_cc . get_return_val ( native_state ) . to_claripy ( )
# convert value to java type
if ret_var . type in ArchSoot . primitive_types : # return value has a primitive type
# = > we need to manually cast the return value to the correct size , as this
# would be usually done by the java callee
ret_value = javavm_simos . cast_primitive ( ret_state , ret_symbol , to_type = ret_var . type )
else : # return value has a reference type
# = > ret _ symbol is a opaque ref
# = > lookup corresponding java reference
ret_value = ret_state . jni_references . lookup ( ret_symbol )
else :
ret_value = None
# teardown return state
SimEngineSoot . prepare_return_state ( ret_state , ret_value )
# finally , delete all local references
ret_state . jni_references . clear_local_references ( )
return [ ret_state ] |
def eval_in_system_namespace ( self , exec_str ) :
"""Get Callable for specified string ( for GUI - based editing )""" | ns = self . cmd_namespace
try :
return eval ( exec_str , ns )
except Exception as e :
self . logger . warning ( 'Could not execute %s, gave error %s' , exec_str , e )
return None |
def update ( self , ip_address = values . unset , friendly_name = values . unset , cidr_prefix_length = values . unset ) :
"""Update the IpAddressInstance
: param unicode ip _ address : An IP address in dotted decimal notation from which you want to accept traffic . Any SIP requests from this IP address will be allowed by Twilio . IPv4 only supported today .
: param unicode friendly _ name : A human readable descriptive text for this resource , up to 64 characters long .
: param unicode cidr _ prefix _ length : An integer representing the length of the CIDR prefix to use with this IP address when accepting traffic . By default the entire IP address is used .
: returns : Updated IpAddressInstance
: rtype : twilio . rest . api . v2010 . account . sip . ip _ access _ control _ list . ip _ address . IpAddressInstance""" | return self . _proxy . update ( ip_address = ip_address , friendly_name = friendly_name , cidr_prefix_length = cidr_prefix_length , ) |
def extract_headers ( lines , max_wrap_lines ) :
"""Extracts email headers from the given lines . Returns a dict with the
detected headers and the amount of lines that were processed .""" | hdrs = { }
header_name = None
# Track overlong headers that extend over multiple lines
extend_lines = 0
lines_processed = 0
for n , line in enumerate ( lines ) :
if not line . strip ( ) :
header_name = None
continue
match = HEADER_RE . match ( line )
if match :
header_name , header_value = match . groups ( )
header_name = header_name . strip ( ) . lower ( )
extend_lines = 0
if header_name in HEADER_MAP :
hdrs [ HEADER_MAP [ header_name ] ] = header_value . strip ( )
lines_processed = n + 1
else :
extend_lines += 1
if extend_lines < max_wrap_lines and header_name in HEADER_MAP :
hdrs [ HEADER_MAP [ header_name ] ] = join_wrapped_lines ( [ hdrs [ HEADER_MAP [ header_name ] ] , line . strip ( ) ] )
lines_processed = n + 1
else : # no more headers found
break
return hdrs , lines_processed |
def readline ( self , fmt = None ) :
"""Return next unformatted " line " . If format is given , unpack content ,
otherwise return byte string .""" | prefix_size = self . _fix ( )
if fmt is None :
content = self . read ( prefix_size )
else :
fmt = self . endian + fmt
fmt = _replace_star ( fmt , prefix_size )
content = struct . unpack ( fmt , self . read ( prefix_size ) )
try :
suffix_size = self . _fix ( )
except EOFError : # when endian is invalid and prefix _ size > total file size
suffix_size = - 1
if prefix_size != suffix_size :
raise IOError ( _FIX_ERROR )
return content |
def execute ( self ) :
"""Run all child tasks concurrently in separate threads .
Return last result after all child tasks have completed execution .""" | with self . _lock_c :
self . count = 0
self . numtasks = 0
self . taskset = [ ]
self . results = { }
self . totaltime = time . time ( )
# Start all tasks
for task in self . taskseq :
self . taskset . append ( task )
self . numtasks += 1
task . init_and_start ( self )
num_tasks = self . getNumTasks ( )
# Wait on each task to clean up results
while num_tasks > 0 :
self . check_state ( )
for i in range ( num_tasks ) :
try :
try :
task = self . getTask ( i )
except IndexError : # A task got deleted from the set . Jump back out
# to outer loop and repoll the number of tasks
break
# self . logger . debug ( " waiting on % s " % task )
res = task . wait ( timeout = self . idletime )
# self . logger . debug ( " finished : % s " % task )
self . child_done ( res , task )
except TaskTimeout :
continue
except Exception as e : # self . logger . warning ( " Subtask propagated exception : % s " % str ( e ) )
self . child_done ( e , task )
continue
# wait a bit and try again
# self . ev _ quit . wait ( self . idletime )
# re - get number of tasks , in case some were added or deleted
num_tasks = self . getNumTasks ( )
# Scan results for errors ( exceptions ) and raise the first one we find
for key in self . results . keys ( ) :
value = self . results [ key ]
if isinstance ( value , Exception ) :
( count , task ) = key
self . logger . error ( "Child task %s terminated with exception: %s" % ( task . tag , str ( value ) ) )
raise value
# Return value of last child to complete
return value |
def _validateIterCommonParams ( MaxObjectCount , OperationTimeout ) :
"""Validate common parameters for an iter . . . operation .
MaxObjectCount must be a positive non - zero integer or None .
OperationTimeout must be positive integer or zero
Raises :
ValueError : if these parameters are invalid""" | if MaxObjectCount is None or MaxObjectCount <= 0 :
raise ValueError ( _format ( "MaxObjectCount must be > 0 but is {0}" , MaxObjectCount ) )
if OperationTimeout is not None and OperationTimeout < 0 :
raise ValueError ( _format ( "OperationTimeout must be >= 0 but is {0}" , OperationTimeout ) ) |
def has_same_sumformula ( self , other ) :
"""Determines if ` ` other ` ` has the same sumformula
Args :
other ( molecule ) :
Returns :
bool :""" | same_atoms = True
for atom in set ( self [ 'atom' ] ) :
own_atom_number = len ( self [ self [ 'atom' ] == atom ] )
other_atom_number = len ( other [ other [ 'atom' ] == atom ] )
same_atoms = ( own_atom_number == other_atom_number )
if not same_atoms :
break
return same_atoms |
def top_stories ( self , raw = False , limit = None ) :
"""Returns list of item ids of current top stories
Args :
limit ( int ) : specifies the number of stories to be returned .
raw ( bool ) : Flag to indicate whether to represent all
objects in raw json .
Returns :
` list ` object containing ids of top stories .""" | top_stories = self . _get_stories ( 'topstories' , limit )
if raw :
top_stories = [ story . raw for story in top_stories ]
return top_stories |
def _get_stories ( self , page , limit ) :
"""Hacker News has different categories ( i . e . stories ) like
' topstories ' , ' newstories ' , ' askstories ' , ' showstories ' , ' jobstories ' .
This method , first fetches the relevant story ids of that category
The URL is : https : / / hacker - news . firebaseio . com / v0 / < story _ name > . json
e . g . https : / / hacker - news . firebaseio . com / v0 / topstories . json
Then , asynchronously it fetches each story and returns the Item objects
The URL for individual story is :
https : / / hacker - news . firebaseio . com / v0 / item / < item _ id > . json
e . g . https : / / hacker - news . firebaseio . com / v0 / item / 69696969 . json""" | url = urljoin ( self . base_url , f"{page}.json" )
story_ids = self . _get_sync ( url ) [ : limit ]
return self . get_items_by_ids ( item_ids = story_ids ) |
def encode_timeseries_put ( self , tsobj ) :
'''Returns an Erlang - TTB encoded tuple with the appropriate data and
metadata from a TsObject .
: param tsobj : a TsObject
: type tsobj : TsObject
: rtype : term - to - binary encoded object''' | if tsobj . columns :
raise NotImplementedError ( 'columns are not used' )
if tsobj . rows and isinstance ( tsobj . rows , list ) :
req_rows = [ ]
for row in tsobj . rows :
req_r = [ ]
for cell in row :
req_r . append ( self . encode_to_ts_cell ( cell ) )
req_rows . append ( tuple ( req_r ) )
req = tsputreq_a , tsobj . table . name , [ ] , req_rows
mc = MSG_CODE_TS_TTB_MSG
rc = MSG_CODE_TS_TTB_MSG
return Msg ( mc , encode ( req ) , rc )
else :
raise RiakError ( "TsObject requires a list of rows" ) |
def add_multi_sign_transaction ( self , m : int , pub_keys : List [ bytes ] or List [ str ] , signer : Account ) :
"""This interface is used to generate an Transaction object which has multi signature .
: param tx : a Transaction object which will be signed .
: param m : the amount of signer .
: param pub _ keys : a list of public keys .
: param signer : an Account object which will sign the transaction .
: return : a Transaction object which has been signed .""" | for index , pk in enumerate ( pub_keys ) :
if isinstance ( pk , str ) :
pub_keys [ index ] = pk . encode ( 'ascii' )
pub_keys = ProgramBuilder . sort_public_keys ( pub_keys )
tx_hash = self . hash256 ( )
sig_data = signer . generate_signature ( tx_hash )
if self . sig_list is None or len ( self . sig_list ) == 0 :
self . sig_list = [ ]
elif len ( self . sig_list ) >= TX_MAX_SIG_SIZE :
raise SDKException ( ErrorCode . param_err ( 'the number of transaction signatures should not be over 16' ) )
else :
for i in range ( len ( self . sig_list ) ) :
if self . sig_list [ i ] . public_keys == pub_keys :
if len ( self . sig_list [ i ] . sig_data ) + 1 > len ( pub_keys ) :
raise SDKException ( ErrorCode . param_err ( 'too more sigData' ) )
if self . sig_list [ i ] . m != m :
raise SDKException ( ErrorCode . param_err ( 'M error' ) )
self . sig_list [ i ] . sig_data . append ( sig_data )
return
sig = Sig ( pub_keys , m , [ sig_data ] )
self . sig_list . append ( sig ) |
def colors ( palette ) :
"""Example endpoint return a list of colors by palette
This is using docstring for specifications
tags :
- colors
parameters :
- name : palette
in : path
type : string
enum : [ ' all ' , ' rgb ' , ' cmyk ' ]
required : true
default : all
description : Which palette to filter ?
operationId : get _ colors
consumes :
- application / json
produces :
- application / json
security :
colors _ auth :
- ' write : colors '
- ' read : colors '
schemes : [ ' http ' , ' https ' ]
deprecated : false
externalDocs :
description : Project repository
url : http : / / github . com / rochacbruno / flasgger
definitions :
Palette :
type : object
properties :
palette _ name :
type : array
items :
$ ref : ' # / definitions / Color '
Color :
type : string
responses :
200:
description : A list of colors ( may be filtered by palette )
schema :
$ ref : ' # / definitions / Palette '
examples :
rgb : [ ' red ' , ' green ' , ' blue ' ]""" | all_colors = { 'cmyk' : [ 'cian' , 'magenta' , 'yellow' , 'black' ] , 'rgb' : [ 'red' , 'green' , 'blue' ] }
if palette == 'all' :
result = all_colors
else :
result = { palette : all_colors . get ( palette ) }
return jsonify ( result ) |
def map ( self , func , value_shape = None , dtype = None ) :
"""Apply an array - > array function to each block""" | mapped = self . values . map ( func , value_shape = value_shape , dtype = dtype )
return self . _constructor ( mapped ) . __finalize__ ( self , noprop = ( 'dtype' , ) ) |
def batch_means ( x , f = lambda y : y , theta = .5 , q = .95 , burn = 0 ) :
"""TODO : Use Bayesian CI .
Returns the half - width of the frequentist confidence interval
( q ' th quantile ) of the Monte Carlo estimate of E [ f ( x ) ] .
: Parameters :
x : sequence
Sampled series . Must be a one - dimensional array .
f : function
The MCSE of E [ f ( x ) ] will be computed .
theta : float between 0 and 1
The batch length will be set to len ( x ) * * theta .
q : float between 0 and 1
The desired quantile .
: Example :
> > > batch _ means ( x , f = lambda x : x * * 2 , theta = . 5 , q = . 95)
: Reference :
Flegal , James M . and Haran , Murali and Jones , Galin L . ( 2007 ) .
Markov chain Monte Carlo : Can we trust the third significant figure ?
< Publication >
: Note :
Requires SciPy""" | try :
import scipy
from scipy import stats
except ImportError :
raise ImportError ( 'SciPy must be installed to use batch_means.' )
x = x [ burn : ]
n = len ( x )
b = np . int ( n ** theta )
a = n / b
t_quant = stats . t . isf ( 1 - q , a - 1 )
Y = np . array ( [ np . mean ( f ( x [ i * b : ( i + 1 ) * b ] ) ) for i in xrange ( a ) ] )
sig = b / ( a - 1. ) * sum ( ( Y - np . mean ( f ( x ) ) ) ** 2 )
return t_quant * sig / np . sqrt ( n ) |
def safe_extract_proto_from_ipfs ( ipfs_client , ipfs_hash , protodir ) :
"""Tar files might be dangerous ( see https : / / bugs . python . org / issue21109,
and https : / / docs . python . org / 3 / library / tarfile . html , TarFile . extractall warning )
we extract only simple files""" | spec_tar = get_from_ipfs_and_checkhash ( ipfs_client , ipfs_hash )
with tarfile . open ( fileobj = io . BytesIO ( spec_tar ) ) as f :
for m in f . getmembers ( ) :
if ( os . path . dirname ( m . name ) != "" ) :
raise Exception ( "tarball has directories. We do not support it." )
if ( not m . isfile ( ) ) :
raise Exception ( "tarball contains %s which is not a files" % m . name )
fullname = os . path . join ( protodir , m . name )
if ( os . path . exists ( fullname ) ) :
raise Exception ( "%s already exists." % fullname )
# now it is safe to call extractall
f . extractall ( protodir ) |
def create_api ( self ) :
"""Create the REST API .""" | created_api = self . client . create_rest_api ( name = self . trigger_settings . get ( 'api_name' , self . app_name ) )
api_id = created_api [ 'id' ]
self . log . info ( "Successfully created API" )
return api_id |
def export_plotter_vtkjs ( plotter , filename , compress_arrays = False ) :
"""Export a plotter ' s rendering window to the VTKjs format .""" | sceneName = os . path . split ( filename ) [ 1 ]
doCompressArrays = compress_arrays
# Generate timestamp and use it to make subdirectory within the top level output dir
timeStamp = time . strftime ( "%a-%d-%b-%Y-%H-%M-%S" )
root_output_directory = os . path . split ( filename ) [ 0 ]
output_dir = os . path . join ( root_output_directory , timeStamp )
mkdir_p ( output_dir )
renderers = plotter . ren_win . GetRenderers ( )
scDirs = [ ]
sceneComponents = [ ]
textureToSave = { }
for rIdx in range ( renderers . GetNumberOfItems ( ) ) :
renderer = renderers . GetItemAsObject ( rIdx )
renProps = renderer . GetViewProps ( )
for rpIdx in range ( renProps . GetNumberOfItems ( ) ) :
renProp = renProps . GetItemAsObject ( rpIdx )
if not renProp . GetVisibility ( ) :
continue
if hasattr ( renProp , 'GetMapper' ) and renProp . GetMapper ( ) is not None :
mapper = renProp . GetMapper ( )
dataObject = mapper . GetInputDataObject ( 0 , 0 )
dataset = None
if dataObject is None :
continue
if dataObject . IsA ( 'vtkCompositeDataSet' ) :
if dataObject . GetNumberOfBlocks ( ) == 1 :
dataset = dataObject . GetBlock ( 0 )
else :
gf = vtk . vtkCompositeDataGeometryFilter ( )
gf . SetInputData ( dataObject )
gf . Update ( )
dataset = gf . GetOutput ( )
else :
dataset = mapper . GetInput ( )
if dataset and not isinstance ( dataset , ( vtk . vtkPolyData , vtk . vtkImageData ) ) : # All data must be PolyData surfaces
gf = vtk . vtkGeometryFilter ( )
gf . SetInputData ( dataset )
gf . Update ( )
dataset = gf . GetOutputDataObject ( 0 )
if dataset : # and dataset . GetPoints ( ) : # NOTE : vtkImageData does not have points
componentName = 'data_%d_%d' % ( rIdx , rpIdx )
# getComponentName ( renProp )
scalarVisibility = mapper . GetScalarVisibility ( )
# arrayAccessMode = mapper . GetArrayAccessMode ( )
# colorArrayName = mapper . GetArrayName ( ) # TODO : if arrayAccessMode = = 1 else mapper . GetArrayId ( )
colorMode = mapper . GetColorMode ( )
scalarMode = mapper . GetScalarMode ( )
lookupTable = mapper . GetLookupTable ( )
dsAttrs = None
arrayLocation = ''
if scalarVisibility :
if scalarMode == 3 or scalarMode == 1 : # VTK _ SCALAR _ MODE _ USE _ POINT _ FIELD _ DATA or VTK _ SCALAR _ MODE _ USE _ POINT _ DATA
dsAttrs = dataset . GetPointData ( )
arrayLocation = 'pointData'
# VTK _ SCALAR _ MODE _ USE _ CELL _ FIELD _ DATA or VTK _ SCALAR _ MODE _ USE _ CELL _ DATA
elif scalarMode == 4 or scalarMode == 2 :
dsAttrs = dataset . GetCellData ( )
arrayLocation = 'cellData'
colorArray = None
dataArray = None
if dsAttrs :
dataArray = dsAttrs . GetArray ( 0 )
# Force getting the active array
if dataArray : # component = - 1 = > let specific instance get scalar from vector before mapping
colorArray = lookupTable . MapScalars ( dataArray , colorMode , - 1 )
colorArrayName = '__CustomRGBColorArray__'
colorArray . SetName ( colorArrayName )
colorMode = 0
else :
colorArrayName = ''
color_array_info = { 'colorArray' : colorArray , 'location' : arrayLocation }
scDirs . append ( write_data_set ( '' , dataset , output_dir , color_array_info , new_name = componentName , compress = doCompressArrays ) )
# Handle texture if any
textureName = None
if renProp . GetTexture ( ) and renProp . GetTexture ( ) . GetInput ( ) :
textureData = renProp . GetTexture ( ) . GetInput ( )
textureName = 'texture_%d' % get_object_id ( textureData )
textureToSave [ textureName ] = textureData
representation = renProp . GetProperty ( ) . GetRepresentation ( ) if hasattr ( renProp , 'GetProperty' ) else 2
colorToUse = renProp . GetProperty ( ) . GetDiffuseColor ( ) if hasattr ( renProp , 'GetProperty' ) else [ 1 , 1 , 1 ]
if representation == 1 :
colorToUse = renProp . GetProperty ( ) . GetColor ( ) if hasattr ( renProp , 'GetProperty' ) else [ 1 , 1 , 1 ]
pointSize = renProp . GetProperty ( ) . GetPointSize ( ) if hasattr ( renProp , 'GetProperty' ) else 1.0
opacity = renProp . GetProperty ( ) . GetOpacity ( ) if hasattr ( renProp , 'GetProperty' ) else 1.0
edgeVisibility = renProp . GetProperty ( ) . GetEdgeVisibility ( ) if hasattr ( renProp , 'GetProperty' ) else false
p3dPosition = renProp . GetPosition ( ) if renProp . IsA ( 'vtkProp3D' ) else [ 0 , 0 , 0 ]
p3dScale = renProp . GetScale ( ) if renProp . IsA ( 'vtkProp3D' ) else [ 1 , 1 , 1 ]
p3dOrigin = renProp . GetOrigin ( ) if renProp . IsA ( 'vtkProp3D' ) else [ 0 , 0 , 0 ]
p3dRotateWXYZ = renProp . GetOrientationWXYZ ( ) if renProp . IsA ( 'vtkProp3D' ) else [ 0 , 0 , 0 , 0 ]
sceneComponents . append ( { "name" : componentName , "type" : "httpDataSetReader" , "httpDataSetReader" : { "url" : componentName } , "actor" : { "origin" : p3dOrigin , "scale" : p3dScale , "position" : p3dPosition , } , "actorRotation" : p3dRotateWXYZ , "mapper" : { "colorByArrayName" : colorArrayName , "colorMode" : colorMode , "scalarMode" : scalarMode } , "property" : { "representation" : representation , "edgeVisibility" : edgeVisibility , "diffuseColor" : colorToUse , "pointSize" : pointSize , "opacity" : opacity } , "lookupTable" : { "tableRange" : lookupTable . GetRange ( ) , "hueRange" : lookupTable . GetHueRange ( ) if hasattr ( lookupTable , 'GetHueRange' ) else [ 0.5 , 0 ] } } )
if textureName :
sceneComponents [ - 1 ] [ 'texture' ] = textureName
# Save texture data if any
for key , val in textureToSave . items ( ) :
write_data_set ( '' , val , output_dir , None , new_name = key , compress = doCompressArrays )
cameraClippingRange = plotter . camera . GetClippingRange ( )
sceneDescription = { "fetchGzip" : doCompressArrays , "background" : plotter . background_color , "camera" : { "focalPoint" : plotter . camera . GetFocalPoint ( ) , "position" : plotter . camera . GetPosition ( ) , "viewUp" : plotter . camera . GetViewUp ( ) , "clippingRange" : [ elt for elt in cameraClippingRange ] } , "centerOfRotation" : plotter . camera . GetFocalPoint ( ) , "scene" : sceneComponents }
indexFilePath = os . path . join ( output_dir , 'index.json' )
with open ( indexFilePath , 'w' ) as outfile :
json . dump ( sceneDescription , outfile , indent = 4 )
# Now zip up the results and get rid of the temp directory
sceneFileName = os . path . join ( root_output_directory , '%s%s' % ( sceneName , FILENAME_EXTENSION ) )
try :
import zlib
compression = zipfile . ZIP_DEFLATED
except :
compression = zipfile . ZIP_STORED
zf = zipfile . ZipFile ( sceneFileName , mode = 'w' )
try :
for dirName , subdirList , fileList in os . walk ( output_dir ) :
for fname in fileList :
fullPath = os . path . join ( dirName , fname )
relPath = '%s/%s' % ( sceneName , os . path . relpath ( fullPath , output_dir ) )
zf . write ( fullPath , arcname = relPath , compress_type = compression )
finally :
zf . close ( )
shutil . rmtree ( output_dir )
print ( 'Finished exporting dataset to: ' , sceneFileName ) |
def parse_view ( query ) :
"""Parses asql query to view object .
Args :
query ( str ) : asql query
Returns :
View instance : parsed view .""" | try :
idx = query . lower ( ) . index ( 'where' )
query = query [ : idx ]
except ValueError :
pass
if not query . endswith ( ';' ) :
query = query . strip ( )
query += ';'
result = _view_stmt . parseString ( query )
return View ( result ) |
def clone ( self , choices ) :
"""Make a copy of this parameter , supply different choices .
@ param choices : A sequence of L { Option } instances .
@ type choices : C { list }
@ rtype : L { ChoiceParameter }""" | return self . __class__ ( self . name , choices , self . label , self . description , self . multiple , self . viewFactory ) |
def _set_show_firmware_option ( self , v , load = False ) :
"""Setter method for show _ firmware _ option , mapped from YANG variable / show / show _ firmware _ dummy / show _ firmware _ option ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ show _ firmware _ option is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ show _ firmware _ option ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = show_firmware_option . show_firmware_option , is_container = 'container' , presence = False , yang_name = "show-firmware-option" , rest_name = "firmware" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'show firmware' , u'alt-name' : u'firmware' , u'display-when' : u'(/local-node/swbd-number = "4000")' } } , namespace = 'urn:brocade.com:mgmt:brocade-firmware' , defining_module = 'brocade-firmware' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """show_firmware_option must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=show_firmware_option.show_firmware_option, is_container='container', presence=False, yang_name="show-firmware-option", rest_name="firmware", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'show firmware', u'alt-name': u'firmware', u'display-when': u'(/local-node/swbd-number = "4000")'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""" , } )
self . __show_firmware_option = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def graph_structure ( self , x1x2 ) :
"""Architecture of FlowNetCorr in Figure 2 of FlowNet 1.0.
Args :
x : 2CHW .""" | with argscope ( [ tf . layers . conv2d ] , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) , padding = 'valid' , strides = 2 , kernel_size = 3 , data_format = 'channels_first' ) , argscope ( [ tf . layers . conv2d_transpose ] , padding = 'same' , activation = tf . identity , data_format = 'channels_first' , strides = 2 , kernel_size = 4 ) : # extract features
x = tf . layers . conv2d ( pad ( x1x2 , 3 ) , 64 , kernel_size = 7 , name = 'conv1' )
conv2 = tf . layers . conv2d ( pad ( x , 2 ) , 128 , kernel_size = 5 , name = 'conv2' )
conv3 = tf . layers . conv2d ( pad ( conv2 , 2 ) , 256 , kernel_size = 5 , name = 'conv3' )
conv2a , _ = tf . split ( conv2 , 2 , axis = 0 )
conv3a , conv3b = tf . split ( conv3 , 2 , axis = 0 )
corr = correlation ( conv3a , conv3b , kernel_size = 1 , max_displacement = 20 , stride_1 = 1 , stride_2 = 2 , pad = 20 , data_format = 'NCHW' )
corr = tf . nn . leaky_relu ( corr , 0.1 )
conv_redir = tf . layers . conv2d ( conv3a , 32 , kernel_size = 1 , strides = 1 , name = 'conv_redir' )
in_conv3_1 = tf . concat ( [ conv_redir , corr ] , axis = 1 , name = 'in_conv3_1' )
conv3_1 = tf . layers . conv2d ( pad ( in_conv3_1 , 1 ) , 256 , name = 'conv3_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv3_1 , 1 ) , 512 , name = 'conv4' )
conv4 = tf . layers . conv2d ( pad ( x , 1 ) , 512 , name = 'conv4_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv4 , 1 ) , 512 , name = 'conv5' )
conv5 = tf . layers . conv2d ( pad ( x , 1 ) , 512 , name = 'conv5_1' , strides = 1 )
x = tf . layers . conv2d ( pad ( conv5 , 1 ) , 1024 , name = 'conv6' )
conv6 = tf . layers . conv2d ( pad ( x , 1 ) , 1024 , name = 'conv6_1' , strides = 1 )
flow6 = tf . layers . conv2d ( pad ( conv6 , 1 ) , 2 , name = 'predict_flow6' , strides = 1 , activation = tf . identity )
flow6_up = tf . layers . conv2d_transpose ( flow6 , 2 , name = 'upsampled_flow6_to_5' )
x = tf . layers . conv2d_transpose ( conv6 , 512 , name = 'deconv5' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
# return flow6
concat5 = tf . concat ( [ conv5 , x , flow6_up ] , axis = 1 , name = 'concat5' )
flow5 = tf . layers . conv2d ( pad ( concat5 , 1 ) , 2 , name = 'predict_flow5' , strides = 1 , activation = tf . identity )
flow5_up = tf . layers . conv2d_transpose ( flow5 , 2 , name = 'upsampled_flow5_to_4' )
x = tf . layers . conv2d_transpose ( concat5 , 256 , name = 'deconv4' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat4 = tf . concat ( [ conv4 , x , flow5_up ] , axis = 1 , name = 'concat4' )
flow4 = tf . layers . conv2d ( pad ( concat4 , 1 ) , 2 , name = 'predict_flow4' , strides = 1 , activation = tf . identity )
flow4_up = tf . layers . conv2d_transpose ( flow4 , 2 , name = 'upsampled_flow4_to_3' )
x = tf . layers . conv2d_transpose ( concat4 , 128 , name = 'deconv3' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat3 = tf . concat ( [ conv3_1 , x , flow4_up ] , axis = 1 , name = 'concat3' )
flow3 = tf . layers . conv2d ( pad ( concat3 , 1 ) , 2 , name = 'predict_flow3' , strides = 1 , activation = tf . identity )
flow3_up = tf . layers . conv2d_transpose ( flow3 , 2 , name = 'upsampled_flow3_to_2' )
x = tf . layers . conv2d_transpose ( concat3 , 64 , name = 'deconv2' , activation = lambda x : tf . nn . leaky_relu ( x , 0.1 ) )
concat2 = tf . concat ( [ conv2a , x , flow3_up ] , axis = 1 , name = 'concat2' )
flow2 = tf . layers . conv2d ( pad ( concat2 , 1 ) , 2 , name = 'predict_flow2' , strides = 1 , activation = tf . identity )
return tf . identity ( flow2 , name = 'flow2' ) |
def show ( self , bAsync = True ) :
"""Make the window visible .
@ see : L { hide }
@ type bAsync : bool
@ param bAsync : Perform the request asynchronously .
@ raise WindowsError : An error occured while processing this request .""" | if bAsync :
win32 . ShowWindowAsync ( self . get_handle ( ) , win32 . SW_SHOW )
else :
win32 . ShowWindow ( self . get_handle ( ) , win32 . SW_SHOW ) |
def gt ( self , event_property , value ) :
"""A greater - than filter chain .
> > > request _ time = EventExpression ( ' request ' , ' elapsed _ ms ' )
> > > filtered = request _ time . gt ( ' elapsed _ ms ' , 500)
> > > print ( filtered )
request ( elapsed _ ms ) . gt ( elapsed _ ms , 500)""" | c = self . copy ( )
c . filters . append ( filters . GT ( event_property , value ) )
return c |
def retrieve ( cat_name , mw_instance = 'https://en.wikipedia.org' , types = [ 'page' , 'subcat' , 'file' ] , clean_subcat_names = False ) :
"""Retrieve pages that belong to a given category .
Args :
cat _ name : Category name e . g . ' Category : Presidents _ of _ the _ United _ States ' .
mw _ instance : Which MediaWiki instance to use ( the URL ' origin ' ) . Defaults to ' https : / / en . wikipedia . org ' .
types : Which types of pages to retrieve . Defaults to ` [ ' page ' , ' subcat ' , ' file ' ] ` .
clean _ subcat _ names : If ` True ` , removes the e . g . ' Category : ' prefix of the titles . Defaults to ` False ` .
Returns :
Array of pages where a page is a dictionary of ` { ' name ' : ' some name ' , ' link ' : ' some absolute link ' } ` .""" | cmtype = f'&cmtype={"|".join(types)}'
base_url = f'{mw_instance}/w/api.php?action=query&format=json&list=categorymembers&cmtitle={cat_name}&cmlimit=500{cmtype}'
cont = ''
result = [ ]
while True :
url = f'{base_url}&cmcontinue={cont}'
r = requests . get ( url , timeout = 30 )
r . raise_for_status ( )
r_json = r . json ( )
if 'query' in r_json :
for item in r_json [ 'query' ] [ 'categorymembers' ] :
title = item [ 'title' ]
if clean_subcat_names and ':' in title : # cut away ' : ' and evertyhing before
index_sep = title . index ( ':' )
title = title [ index_sep + 1 : ]
# spaces need to be converted in links
link = f'{mw_instance}/wiki/{title.replace(" ", "_")}'
result . append ( { 'name' : title , 'link' : link } )
if 'continue' not in r_json :
break
else :
cont = r_json [ 'continue' ] [ 'cmcontinue' ]
return result |
def formatSI ( n ) -> str :
"""Format the integer or float n to 3 significant digits + SI prefix .""" | s = ''
if n < 0 :
n = - n
s += '-'
if type ( n ) is int and n < 1000 :
s = str ( n ) + ' '
elif n < 1e-22 :
s = '0.00 '
else :
assert n < 9.99e26
log = int ( math . floor ( math . log10 ( n ) ) )
i , j = divmod ( log , 3 )
for _try in range ( 2 ) :
templ = '%.{}f' . format ( 2 - j )
val = templ % ( n * 10 ** ( - 3 * i ) )
if val != '1000' :
break
i += 1
j = 0
s += val + ' '
if i != 0 :
s += 'yzafpnum kMGTPEZY' [ i + 8 ]
return s |
def build_api_struct ( self ) :
"""Calls the clean method of the class and returns the info in a
structure that Atlas API is accepting .""" | self . clean ( )
data = { "type" : self . measurement_type }
# add all options
for option in self . used_options :
option_key , option_value = self . v2_translator ( option )
data . update ( { option_key : option_value } )
return data |
def _compute_term_r ( self , C , mag , rrup ) :
"""Compute distance term
d = log10 ( max ( R , rmin ) ) ;""" | if mag > self . M1 :
rrup_min = 0.55
elif mag > self . M2 :
rrup_min = - 2.80 * mag + 14.55
else :
rrup_min = - 0.295 * mag + 2.65
R = np . maximum ( rrup , rrup_min )
return np . log10 ( R ) |
def modify_request ( self , http_request = None ) :
"""Sets HTTP request components based on the URI .""" | if http_request is None :
http_request = HttpRequest ( )
if http_request . uri is None :
http_request . uri = Uri ( )
# Determine the correct scheme .
if self . scheme :
http_request . uri . scheme = self . scheme
if self . port :
http_request . uri . port = self . port
if self . host :
http_request . uri . host = self . host
# Set the relative uri path
if self . path :
http_request . uri . path = self . path
if self . query :
http_request . uri . query = self . query . copy ( )
return http_request |
def gen_pdf ( rst_content , style_text , header = None , footer = FOOTER ) :
"""Create PDF file from ` rst _ content ` using ` style _ text ` as style .
Optinally , add ` header ` or ` footer ` .
Args :
rst _ content ( str ) : Content of the PDF file in restructured text markup .
style _ text ( str ) : Style for the : mod : ` rst2pdf ` module .
header ( str , default None ) : Header which will be rendered to each page .
footer ( str , default FOOTER ) : Footer , which will be rendered to each
page . See : attr : ` FOOTER ` for details .
Returns :
obj : StringIO file instance containing PDF file .""" | out_file_obj = StringIO ( )
with NamedTemporaryFile ( ) as f :
f . write ( style_text )
f . flush ( )
pdf = _init_pdf ( f . name , header , footer )
# create PDF
pdf . createPdf ( text = rst_content , output = out_file_obj , compressed = True )
# rewind file pointer to begin
out_file_obj . seek ( 0 )
return out_file_obj |
def get_times ( ) :
"""Produce a deepcopy of the current timing data ( no risk of interference
with active timing or other operaitons ) .
Returns :
Times : gtimer timing data structure object .""" | if f . root . stopped :
return copy . deepcopy ( f . root . times )
else :
t = timer ( )
times = collapse . collapse_times ( )
f . root . self_cut += timer ( ) - t
return times |
def parse_xml_node ( self , node ) :
'''Parse an xml . dom Node object representing a message sending object
into this object .''' | self . _targets = [ ]
for c in node . getElementsByTagNameNS ( RTS_NS , 'targets' ) :
if c . getElementsByTagNameNS ( RTS_NS , 'WaitTime' ) :
new_target = WaitTime ( )
elif c . getElementsByTagNameNS ( RTS_NS , 'Preceding' ) :
new_target = Preceding ( )
else :
new_target = Condition ( )
new_target . parse_xml_node ( c )
self . _targets . append ( new_target )
return self |
def argsort2 ( indexable , key = None , reverse = False ) :
"""Returns the indices that would sort a indexable object .
This is similar to np . argsort , but it is written in pure python and works
on both lists and dictionaries .
Args :
indexable ( list or dict ) : indexable to sort by
Returns :
list : indices : list of indices such that sorts the indexable
Example :
> > > # DISABLE _ DOCTEST
> > > import utool as ut
> > > # argsort works on dicts
> > > dict _ = indexable = { ' a ' : 3 , ' b ' : 2 , ' c ' : 100}
> > > indices = ut . argsort2 ( indexable )
> > > assert list ( ut . take ( dict _ , indices ) ) = = sorted ( dict _ . values ( ) )
> > > # argsort works on lists
> > > indexable = [ 100 , 2 , 432 , 10]
> > > indices = ut . argsort2 ( indexable )
> > > assert list ( ut . take ( indexable , indices ) ) = = sorted ( indexable )
> > > # argsort works on iterators
> > > indexable = reversed ( range ( 100 ) )
> > > indices = ut . argsort2 ( indexable )
> > > assert indices [ 0 ] = = 99""" | # Create an iterator of value / key pairs
if isinstance ( indexable , dict ) :
vk_iter = ( ( v , k ) for k , v in indexable . items ( ) )
else :
vk_iter = ( ( v , k ) for k , v in enumerate ( indexable ) )
# Sort by values and extract the keys
if key is None :
indices = [ k for v , k in sorted ( vk_iter , reverse = reverse ) ]
else :
indices = [ k for v , k in sorted ( vk_iter , key = lambda vk : key ( vk [ 0 ] ) , reverse = reverse ) ]
return indices |
def export ( self ) :
"""See DiskExportManager . export""" | with LogTask ( 'Exporting disk {} to {}' . format ( self . name , self . dst ) ) :
with utils . RollbackContext ( ) as rollback :
rollback . prependDefer ( shutil . rmtree , self . dst , ignore_errors = True )
self . copy ( )
if not self . disk [ 'format' ] == 'iso' :
self . sparse ( )
self . calc_sha ( 'sha1' )
self . update_lago_metadata ( )
self . write_lago_metadata ( )
self . compress ( )
rollback . clear ( ) |
def neverCalledWith ( cls , spy , * args , ** kwargs ) : # pylint : disable = invalid - name
"""Checking the inspector is never called with partial args / kwargs
Args : SinonSpy , args / kwargs""" | cls . __is_spy ( spy )
if not ( spy . neverCalledWith ( * args , ** kwargs ) ) :
raise cls . failException ( cls . message ) |
def release_filename ( self , id_ ) :
"""Release a file name .""" | entry = self . __entries . get ( id_ )
if entry is None :
raise ValueError ( "Invalid filename id (%d)" % id_ )
# Decrease reference count and check if the entry has to be removed . . .
if entry . dec_ref_count ( ) == 0 :
del self . __entries [ id_ ]
del self . __id_lut [ entry . filename ] |
def _maybe_run_matchers ( self , text , run_matchers ) :
"""OverlayedText should be smart enough to not run twice the same
matchers but this is an extra handle of control over that .""" | if run_matchers is True or ( run_matchers is not False and text not in self . _overlayed_already ) :
text . overlay ( self . matchers )
self . _overlayed_already . append ( text ) |
def _find_family_class ( dev ) :
"""! @ brief Search the families list for matching entry .""" | for familyInfo in FAMILIES : # Skip if wrong vendor .
if dev . vendor != familyInfo . vendor :
continue
# Scan each level of families
for familyName in dev . families :
for regex in familyInfo . matches : # Require the regex to match the entire family name .
match = regex . match ( familyName )
if match and match . span ( ) == ( 0 , len ( familyName ) ) :
return familyInfo . klass
else : # Default target superclass .
return CoreSightTarget |
def files_set_public_or_private ( self , request , set_public , files_queryset , folders_queryset ) :
"""Action which enables or disables permissions for selected files and files in selected folders to clipboard ( set them private or public ) .""" | if not self . has_change_permission ( request ) :
raise PermissionDenied
if request . method != 'POST' :
return None
check_files_edit_permissions ( request , files_queryset )
check_folder_edit_permissions ( request , folders_queryset )
# We define it like that so that we can modify it inside the set _ files
# function
files_count = [ 0 ]
def set_files ( files ) :
for f in files :
if f . is_public != set_public :
f . is_public = set_public
f . save ( )
files_count [ 0 ] += 1
def set_folders ( folders ) :
for f in folders :
set_files ( f . files )
set_folders ( f . children . all ( ) )
set_files ( files_queryset )
set_folders ( folders_queryset )
if set_public :
self . message_user ( request , _ ( "Successfully disabled permissions for %(count)d files." ) % { "count" : files_count [ 0 ] , } )
else :
self . message_user ( request , _ ( "Successfully enabled permissions for %(count)d files." ) % { "count" : files_count [ 0 ] , } )
return None |
def normpath ( path ) :
"""Normalize ` ` path ` ` , collapsing redundant separators and up - level refs .""" | scheme , netloc , path_ = parse ( path )
return unparse ( scheme , netloc , os . path . normpath ( path_ ) ) |
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : CredentialContext for this CredentialInstance
: rtype : twilio . rest . notify . v1 . credential . CredentialContext""" | if self . _context is None :
self . _context = CredentialContext ( self . _version , sid = self . _solution [ 'sid' ] , )
return self . _context |
def get_perm_codename ( perm , fail_silently = True ) :
"""Get permission codename from permission - string .
Examples
> > > get _ perm _ codename ( ' app _ label . codename _ model ' )
' codename _ model '
> > > get _ perm _ codename ( ' app _ label . codename ' )
' codename '
> > > get _ perm _ codename ( ' codename _ model ' )
' codename _ model '
> > > get _ perm _ codename ( ' codename ' )
' codename '
> > > get _ perm _ codename ( ' app _ label . app _ label . codename _ model ' )
' app _ label . codename _ model '""" | try :
perm = perm . split ( '.' , 1 ) [ 1 ]
except IndexError as e :
if not fail_silently :
raise e
return perm |
def _to_temperature ( self , temperature ) :
"""Step to a given temperature .
: param temperature : Get to this temperature .""" | self . _to_value ( self . _temperature , temperature , self . command_set . temperature_steps , self . _warmer , self . _cooler ) |
def _maybe_numeric_slice ( df , slice_ , include_bool = False ) :
"""want nice defaults for background _ gradient that don ' t break
with non - numeric data . But if slice _ is passed go with that .""" | if slice_ is None :
dtypes = [ np . number ]
if include_bool :
dtypes . append ( bool )
slice_ = IndexSlice [ : , df . select_dtypes ( include = dtypes ) . columns ]
return slice_ |
def tween2 ( self , val , frm , to ) :
"""linearly maps val between frm and to to a number between 0 and 1""" | return self . tween ( Mapping . linlin ( val , frm , to , 0 , 1 ) ) |
def _lt_from_gt ( self , other ) :
"""Return a < b . Computed by @ total _ ordering from ( not a > b ) and ( a ! = b ) .""" | op_result = self . __gt__ ( other )
if op_result is NotImplemented :
return NotImplemented
return not op_result and self != other |
def validate_keys ( self , * keys ) :
"""Validation helper to ensure that keys are present in data
This method makes sure that all of keys received here are
present in the data received from the caller .
It is better to call this method in the ` validate ( ) ` method of
your event . Not in the ` clean ( ) ` one , since the first will be
called locally , making it easier to debug things and find
problems .""" | current_keys = set ( self . data . keys ( ) )
needed_keys = set ( keys )
if not needed_keys . issubset ( current_keys ) :
raise ValidationError ( 'One of the following keys are missing from the ' 'event\'s data: {}' . format ( ', ' . join ( needed_keys . difference ( current_keys ) ) ) )
return True |
def limit_chord_unlock_tasks ( app ) :
"""Set max _ retries for chord . unlock tasks to avoid infinitely looping
tasks . ( see celery / celery # 1700 or celery / celery # 2725)""" | task = app . tasks [ 'celery.chord_unlock' ]
if task . max_retries is None :
retries = getattr ( app . conf , 'CHORD_UNLOCK_MAX_RETRIES' , None )
task . max_retries = retries |
def timedelta_to_duration ( dt ) :
"""Return a string according to the DURATION property format
from a timedelta object""" | days , secs = dt . days , dt . seconds
res = 'P'
if days // 7 :
res += str ( days // 7 ) + 'W'
days %= 7
if days :
res += str ( days ) + 'D'
if secs :
res += 'T'
if secs // 3600 :
res += str ( secs // 3600 ) + 'H'
secs %= 3600
if secs // 60 :
res += str ( secs // 60 ) + 'M'
secs %= 60
if secs :
res += str ( secs ) + 'S'
return res |
def _init_map ( self ) :
"""stub""" | SimpleDifficultyItemFormRecord . _init_map ( self )
SourceItemFormRecord . _init_map ( self )
PDFPreviewFormRecord . _init_map ( self )
PublishedFormRecord . _init_map ( self )
ProvenanceFormRecord . _init_map ( self )
super ( MecQBankBaseMixin , self ) . _init_map ( ) |
def _actionsFreqs ( self , * args , ** kwargs ) :
"""NAME :
actionsFreqs ( _ actionsFreqs )
PURPOSE :
evaluate the actions and frequencies ( jr , lz , jz , Omegar , Omegaphi , Omegaz )
INPUT :
Either :
a ) R , vR , vT , z , vz [ , phi ] :
1 ) floats : phase - space value for single object ( phi is optional ) ( each can be a Quantity )
2 ) numpy . ndarray : [ N ] phase - space values for N objects ( each can be a Quantity )
b ) Orbit instance : initial condition used if that ' s it , orbit ( t ) if there is a time given as well as the second argument
fixed _ quad = ( False ) if True , use n = 10 fixed _ quad integration
scipy . integrate . quadrature or . fixed _ quad keywords
OUTPUT :
( jr , lz , jz , Omegar , Omegaphi , Omegaz )
HISTORY :
2013-12-28 - Written - Bovy ( IAS )""" | fixed_quad = kwargs . pop ( 'fixed_quad' , False )
if len ( args ) == 5 : # R , vR . vT , z , vz
R , vR , vT , z , vz = args
elif len ( args ) == 6 : # R , vR . vT , z , vz , phi
R , vR , vT , z , vz , phi = args
else :
self . _parse_eval_args ( * args )
R = self . _eval_R
vR = self . _eval_vR
vT = self . _eval_vT
z = self . _eval_z
vz = self . _eval_vz
if isinstance ( R , float ) :
R = nu . array ( [ R ] )
vR = nu . array ( [ vR ] )
vT = nu . array ( [ vT ] )
z = nu . array ( [ z ] )
vz = nu . array ( [ vz ] )
if self . _c : # pragma : no cover
pass
else :
Lz = R * vT
Lx = - z * vT
Ly = z * vR - R * vz
L2 = Lx * Lx + Ly * Ly + Lz * Lz
E = _evaluatePotentials ( self . _pot , R , z ) + vR ** 2. / 2. + vT ** 2. / 2. + vz ** 2. / 2.
L = nu . sqrt ( L2 )
# Actions
Jphi = Lz
Jz = L - nu . fabs ( Lz )
# Jr requires some more work
# Set up an actionAngleAxi object for EL and rap / rperi calculations
axiR = nu . sqrt ( R ** 2. + z ** 2. )
axivT = L / axiR
axivR = ( R * vR + z * vz ) / axiR
Jr = [ ]
Or = [ ]
Op = [ ]
for ii in range ( len ( axiR ) ) :
axiaA = actionAngleAxi ( axiR [ ii ] , axivR [ ii ] , axivT [ ii ] , pot = self . _2dpot )
( rperi , rap ) = axiaA . calcRapRperi ( )
EL = axiaA . calcEL ( )
E , L = EL
Jr . append ( self . _calc_jr ( rperi , rap , E , L , fixed_quad , ** kwargs ) )
# Radial period
if Jr [ - 1 ] < 10. ** - 9. : # Circular orbit
Or . append ( epifreq ( self . _pot , axiR [ ii ] , use_physical = False ) )
Op . append ( omegac ( self . _pot , axiR [ ii ] , use_physical = False ) )
continue
Rmean = m . exp ( ( m . log ( rperi ) + m . log ( rap ) ) / 2. )
Or . append ( self . _calc_or ( Rmean , rperi , rap , E , L , fixed_quad , ** kwargs ) )
Op . append ( self . _calc_op ( Or [ - 1 ] , Rmean , rperi , rap , E , L , fixed_quad , ** kwargs ) )
Op = nu . array ( Op )
Oz = copy . copy ( Op )
Op [ vT < 0. ] *= - 1.
return ( nu . array ( Jr ) , Jphi , Jz , nu . array ( Or ) , Op , Oz ) |
def get_securitygroup ( self , group_id , ** kwargs ) :
"""Returns the information about the given security group .
: param string id : The ID for the security group
: returns : A diction of information about the security group""" | if 'mask' not in kwargs :
kwargs [ 'mask' ] = ( 'id,' 'name,' 'description,' '''rules[id, remoteIp, remoteGroupId,
direction, ethertype, portRangeMin,
portRangeMax, protocol, createDate, modifyDate],''' '''networkComponentBindings[
networkComponent[
id,
port,
guest[
id,
hostname,
primaryBackendIpAddress,
primaryIpAddress
]
]
]''' )
return self . security_group . getObject ( id = group_id , ** kwargs ) |
def add_singles ( self , results ) :
"""Add singles to the bacckground estimate and find candidates
Parameters
results : dict of arrays
Dictionary of dictionaries indexed by ifo and keys such as ' snr ' ,
' chisq ' , etc . The specific format it determined by the
LiveBatchMatchedFilter class .
Returns
coinc _ results : dict of arrays
A dictionary of arrays containing the coincident results .""" | # Let ' s see how large everything is
logging . info ( 'BKG Coincs %s stored %s bytes' , len ( self . coincs ) , self . coincs . nbytes )
# If there are no results just return
valid_ifos = [ k for k in results . keys ( ) if results [ k ] and k in self . ifos ]
if len ( valid_ifos ) == 0 :
return { }
# Add single triggers to the internal buffer
self . _add_singles_to_buffer ( results , ifos = valid_ifos )
# Calculate zerolag and background coincidences
_ , coinc_results = self . _find_coincs ( results , ifos = valid_ifos )
# record if a coinc is possible in this chunk
if len ( valid_ifos ) == 2 :
coinc_results [ 'coinc_possible' ] = True
return coinc_results |
def get_release_component ( comp ) :
"""Split the argument passed on the command line into a component name and expected version""" | name , vers = comp . split ( "-" )
if name not in comp_names :
print ( "Known components:" )
for comp in comp_names :
print ( "- %s" % comp )
raise EnvironmentError ( "Unknown release component name '%s'" % name )
return name , vers |
def connect ( cls , host , public_key , private_key , verbose = 0 , use_cache = True ) :
"""Connect the client with the given host and the provided credentials .
Parameters
host : str
The Cytomine host ( without protocol ) .
public _ key : str
The Cytomine public key .
private _ key : str
The Cytomine private key .
verbose : int
The verbosity level of the client .
use _ cache : bool
True to use HTTP cache , False otherwise .
Returns
client : Cytomine
A connected Cytomine client .""" | return cls ( host , public_key , private_key , verbose , use_cache ) |
def left_button_down ( self , obj , event_type ) :
"""Register the event for a left button down click""" | # Get 2D click location on window
click_pos = self . iren . GetEventPosition ( )
# Get corresponding click location in the 3D plot
picker = vtk . vtkWorldPointPicker ( )
picker . Pick ( click_pos [ 0 ] , click_pos [ 1 ] , 0 , self . renderer )
self . pickpoint = np . asarray ( picker . GetPickPosition ( ) ) . reshape ( ( - 1 , 3 ) )
if np . any ( np . isnan ( self . pickpoint ) ) :
self . pickpoint [ : ] = 0 |
def renamed_tree ( self , source , dest ) :
"""Directory was renamed in file explorer or in project explorer .""" | dirname = osp . abspath ( to_text_string ( source ) )
tofile = to_text_string ( dest )
for fname in self . get_filenames ( ) :
if osp . abspath ( fname ) . startswith ( dirname ) :
new_filename = fname . replace ( dirname , tofile )
self . renamed ( source = fname , dest = new_filename ) |
def vor_to_am ( vor ) :
r"""Given a Voronoi tessellation object from Scipy ' s ` ` spatial ` ` module ,
converts to a sparse adjacency matrix network representation in COO format .
Parameters
vor : Voronoi Tessellation object
This object is produced by ` ` scipy . spatial . Voronoi ` `
Returns
A sparse adjacency matrix in COO format . The network is undirected
and unweighted , so the adjacency matrix is upper - triangular and all the
weights are set to 1.""" | # Create adjacency matrix in lil format for quick matrix construction
N = vor . vertices . shape [ 0 ]
rc = [ [ ] , [ ] ]
for ij in vor . ridge_dict . keys ( ) :
row = vor . ridge_dict [ ij ] . copy ( )
# Make sure voronoi cell closes upon itself
row . append ( row [ 0 ] )
# Add connections to rc list
rc [ 0 ] . extend ( row [ : - 1 ] )
rc [ 1 ] . extend ( row [ 1 : ] )
rc = sp . vstack ( rc ) . T
# Make adj mat upper triangular
rc = sp . sort ( rc , axis = 1 )
# Remove any pairs with ends at infinity ( - 1)
keep = ~ sp . any ( rc == - 1 , axis = 1 )
rc = rc [ keep ]
data = sp . ones_like ( rc [ : , 0 ] )
# Build adj mat in COO format
M = N = sp . amax ( rc ) + 1
am = sprs . coo_matrix ( ( data , ( rc [ : , 0 ] , rc [ : , 1 ] ) ) , shape = ( M , N ) )
# Remove diagonal , and convert to csr remove duplicates
am = sp . sparse . triu ( A = am , k = 1 , format = 'csr' )
# The convert back to COO and return
am = am . tocoo ( )
return am |
def heatmap_seaborn ( dfr , outfilename = None , title = None , params = None ) :
"""Returns seaborn heatmap with cluster dendrograms .
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file ( indicates output format )""" | # Decide on figure layout size : a minimum size is required for
# aesthetics , and a maximum to avoid core dumps on rendering .
# If we hit the maximum size , we should modify font size .
maxfigsize = 120
calcfigsize = dfr . shape [ 0 ] * 1.1
figsize = min ( max ( 8 , calcfigsize ) , maxfigsize )
if figsize == maxfigsize :
scale = maxfigsize / calcfigsize
sns . set_context ( "notebook" , font_scale = scale )
# Add a colorbar ?
if params . classes is None :
col_cb = None
else :
col_cb = get_seaborn_colorbar ( dfr , params . classes )
# Labels are defined before we build the clustering
# If a label mapping is missing , use the key text as fall back
params . labels = get_safe_seaborn_labels ( dfr , params . labels )
# Add attributes to parameter object , and draw heatmap
params . colorbar = col_cb
params . figsize = figsize
params . linewidths = 0.25
fig = get_seaborn_clustermap ( dfr , params , title = title )
# Save to file
if outfilename :
fig . savefig ( outfilename )
# Return clustermap
return fig |
def job_path ( cls , project , location , job ) :
"""Return a fully - qualified job string .""" | return google . api_core . path_template . expand ( "projects/{project}/locations/{location}/jobs/{job}" , project = project , location = location , job = job , ) |
def template2features ( sent , i , token_syntax , debug = True ) :
""": type token : object""" | columns = [ ]
for j in range ( len ( sent [ 0 ] ) ) :
columns . append ( [ t [ j ] for t in sent ] )
matched = re . match ( "T\[(?P<index1>\-?\d+)(\,(?P<index2>\-?\d+))?\](\[(?P<column>.*)\])?(\.(?P<function>.*))?" , token_syntax )
column = matched . group ( "column" )
column = int ( column ) if column else 0
index1 = int ( matched . group ( "index1" ) )
index2 = matched . group ( "index2" )
index2 = int ( index2 ) if index2 else None
func = matched . group ( "function" )
if debug :
prefix = "%s=" % token_syntax
else :
prefix = ""
if i + index1 < 0 :
return [ "%sBOS" % prefix ]
if i + index1 >= len ( sent ) :
return [ "%sEOS" % prefix ]
if index2 is not None :
if i + index2 >= len ( sent ) :
return [ "%sEOS" % prefix ]
word = " " . join ( columns [ column ] [ i + index1 : i + index2 + 1 ] )
else :
word = sent [ i + index1 ] [ column ]
if func is not None :
result = apply_function ( func , word )
else :
result = word
return [ "%s%s" % ( prefix , result ) ] |
def neg_loglik ( self , beta ) :
"""Creates the negative log - likelihood of the model
Parameters
beta : np . array
Contains untransformed starting values for latent variables
Returns
The negative logliklihood of the model""" | lmda , Y , ___ , theta = self . _model ( beta )
return - np . sum ( logpdf ( Y , self . latent_variables . z_list [ - 3 ] . prior . transform ( beta [ - 3 ] ) , loc = theta , scale = np . exp ( lmda / 2.0 ) , skewness = self . latent_variables . z_list [ - 4 ] . prior . transform ( beta [ - 4 ] ) ) ) |
def weighted_random_choice ( items ) :
"""Returns a weighted random choice from a list of items .
: param items : A list of tuples ( object , weight )
: return : A random object , whose likelihood is proportional to its weight .""" | l = list ( items )
r = random . random ( ) * sum ( [ i [ 1 ] for i in l ] )
for x , p in l :
if p > r :
return x
r -= p
return None |
def find_shape ( self , canvas_x , canvas_y ) :
'''Look up shape based on canvas coordinates .''' | shape_x , shape_y , w = self . canvas_to_shapes_transform . dot ( [ canvas_x , canvas_y , 1 ] )
if hasattr ( self . space , 'point_query_first' ) : # Assume ` pymunk < 5.0 ` .
shape = self . space . point_query_first ( ( shape_x , shape_y ) )
else : # Assume ` pymunk > = 5.0 ` , where ` point _ query _ first ` method has been
# deprecated .
info = self . space . point_query_nearest ( ( shape_x , shape_y ) , 0 , [ pymunk . ShapeFilter . ALL_CATEGORIES ] )
shape = info . shape if info else None
if shape :
return self . bodies [ shape . body ]
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.