signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def randint ( self , a : int , b : int , n : Optional [ int ] = None ) -> Union [ List [ int ] , int ] :
"""Generate n numbers as a list or a single one if no n is given .
n is used to minimize the number of requests made and return type changes to be compatible
with : py : mod : ` random ` ' s interface"""
|
max_n = self . config . MAX_NUMBER_OF_INTEGERS
return self . _generate_randoms ( self . _request_randints , max_n = max_n , a = a , b = b , n = n )
|
def ensemble_mean_std_max_min ( ens ) :
"""Calculate ensemble statistics between a results from an ensemble of climate simulations
Returns a dataset containing ensemble mean , standard - deviation ,
minimum and maximum for input climate simulations .
Parameters
ens : Ensemble dataset ( see xclim . utils . create _ ensemble )
Returns
xarray dataset with containing data variables of ensemble statistics
Examples
> > > from xclim import utils
> > > import glob
> > > ncfiles = glob . glob ( ' / * tas * . nc ' )
Create ensemble dataset
> > > ens = utils . create _ ensemble ( ncfiles )
Calculate ensemble statistics
> > > ens _ means _ std = utils . ensemble _ mean _ std _ max _ min ( ens )
> > > print ( ens _ mean _ std [ ' tas _ mean ' ] )"""
|
dsOut = ens . drop ( ens . data_vars )
for v in ens . data_vars :
dsOut [ v + '_mean' ] = ens [ v ] . mean ( dim = 'realization' )
dsOut [ v + '_stdev' ] = ens [ v ] . std ( dim = 'realization' )
dsOut [ v + '_max' ] = ens [ v ] . max ( dim = 'realization' )
dsOut [ v + '_min' ] = ens [ v ] . min ( dim = 'realization' )
for vv in dsOut . data_vars :
dsOut [ vv ] . attrs = ens [ v ] . attrs
if 'description' in dsOut [ vv ] . attrs . keys ( ) :
vv . split ( )
dsOut [ vv ] . attrs [ 'description' ] = dsOut [ vv ] . attrs [ 'description' ] + ' : ' + vv . split ( '_' ) [ - 1 ] + ' of ensemble'
return dsOut
|
def from_first_relation ( cls , vertex0 , vertex1 ) :
"""Intialize a fresh match based on the first relation"""
|
result = cls ( [ ( vertex0 , vertex1 ) ] )
result . previous_ends1 = set ( [ vertex1 ] )
return result
|
def get_best_fit_rot_mat ( from_coord , to_coord ) :
"""Compute best - fit rotation matrix .
The best - fit rotation matrix rotates from _ coord such that the RMSD
between the 2 sets of coordinates are minimized after the rotation .
Parameters
from _ coord , to _ coord : np . array
Nx3 coordinate arrays , where N is the number of atoms . The
from _ coord will rotated such that the rotation will minimize
the RMSD between the rotated from _ coord and to _ coord .
Returns
np . array
3x3 rotation matrix"""
|
superimpose_inst . set ( to_coord . astype ( 'float64' ) , from_coord . astype ( 'float64' ) )
superimpose_inst . run ( )
return superimpose_inst . get_rotran ( ) [ 0 ] . T
|
def _entry_offset ( self , index , entries , description ) :
'''Gets the offset of the first entry that matches the description .
@ index - Index into the entries list to begin searching .
@ entries - Dictionary of result entries .
@ description - Case insensitive description .
Returns the offset , if a matching description is found .
Returns - 1 if a matching description is not found .'''
|
description = description . lower ( )
for ( offset , infos ) in entries [ index : ] :
for info in infos :
if info [ 'description' ] . lower ( ) . startswith ( description ) :
return offset
return - 1
|
def _large_mrca ( self , ts ) :
"""Find the MRCA using a temporary table ."""
|
cursor = self . db . cursor ( )
cursor . execute ( """
DROP TABLE IF EXISTS _mrca_temp
""" )
cursor . execute ( """
CREATE TEMPORARY TABLE _mrca_temp(
child TEXT PRIMARY KEY REFERENCES taxa (tax_id) NOT NULL
)
""" )
cursor . executemany ( """
INSERT INTO _mrca_temp
VALUES (?)
""" , ( ( tid , ) for tid in ts ) )
cursor . execute ( """
SELECT parent
FROM _mrca_temp
JOIN parents USING (child)
JOIN taxa
ON parent = taxa.tax_id
JOIN ranks USING (rank)
GROUP BY parent
HAVING COUNT(*) = ?
ORDER BY rank_order DESC
LIMIT 1
""" , ( len ( ts ) , ) )
return cursor . fetchall ( )
|
def get_min_instability ( self , min_voltage = None , max_voltage = None ) :
"""The minimum instability along a path for a specific voltage range .
Args :
min _ voltage : The minimum allowable voltage .
max _ voltage : The maximum allowable voltage .
Returns :
Minimum decomposition energy of all compounds along the insertion
path ( a subset of the path can be chosen by the optional arguments )"""
|
data = [ ]
for pair in self . _select_in_voltage_range ( min_voltage , max_voltage ) :
if pair . decomp_e_charge is not None :
data . append ( pair . decomp_e_charge )
if pair . decomp_e_discharge is not None :
data . append ( pair . decomp_e_discharge )
return min ( data ) if len ( data ) > 0 else None
|
def restore ( self , repository , snapshot , body = None , params = None ) :
"""Restore a snapshot .
` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / modules - snapshots . html > ` _
: arg repository : A repository name
: arg snapshot : A snapshot name
: arg body : Details of what to restore
: arg master _ timeout : Explicit operation timeout for connection to master
node
: arg wait _ for _ completion : Should this request wait until the operation
has completed before returning , default False"""
|
for param in ( repository , snapshot ) :
if param in SKIP_IN_PATH :
raise ValueError ( "Empty value passed for a required argument." )
return self . transport . perform_request ( 'POST' , _make_path ( '_snapshot' , repository , snapshot , '_restore' ) , params = params , body = body )
|
def remove_vowels_from_string ( input_string : str ) -> str :
"""This function removes all vowels from a given string and returns the string without vowels .
Examples :
> > > remove _ vowels _ from _ string ( " " )
> > > remove _ vowels _ from _ string ( " abcdef \n ghijklm " )
' bcdf \n ghjklm '
> > > remove _ vowels _ from _ string ( " abcdef " )
' bcdf '
> > > remove _ vowels _ from _ string ( " aaaaa " )
> > > remove _ vowels _ from _ string ( " aaBAA " )
> > > remove _ vowels _ from _ string ( " zbcd " )
' zbcd '"""
|
vowels = 'aeiouAEIOU'
return '' . join ( [ char for char in input_string if char not in vowels ] )
|
def com_google_fonts_check_xavgcharwidth ( ttFont ) :
"""Check if OS / 2 xAvgCharWidth is correct ."""
|
current_value = ttFont [ 'OS/2' ] . xAvgCharWidth
ACCEPTABLE_ERROR = 10
# Width deviation tolerance in font units
# Since version 3 , the average is computed using _ all _ glyphs in a font .
if ttFont [ 'OS/2' ] . version >= 3 :
calculation_rule = "the average of the widths of all glyphs in the font"
if not ttFont [ 'hmtx' ] . metrics : # May contain just ' . notdef ' , which is valid .
yield FAIL , Message ( "missing-glyphs" , "CRITICAL: Found no glyph width data in the hmtx table!" )
return
width_sum = 0
count = 0
for glyph_id in ttFont [ 'glyf' ] . glyphs : # At least . notdef must be present .
width = ttFont [ 'hmtx' ] . metrics [ glyph_id ] [ 0 ]
# The OpenType spec doesn ' t exclude negative widths , but only positive
# widths seems to be the assumption in the wild ?
if width > 0 :
count += 1
width_sum += width
expected_value = int ( round ( width_sum / count ) )
else : # Version 2 and below only consider lowercase latin glyphs and space .
calculation_rule = ( "the weighted average of the widths of the latin" " lowercase glyphs in the font" )
weightFactors = { 'a' : 64 , 'b' : 14 , 'c' : 27 , 'd' : 35 , 'e' : 100 , 'f' : 20 , 'g' : 14 , 'h' : 42 , 'i' : 63 , 'j' : 3 , 'k' : 6 , 'l' : 35 , 'm' : 20 , 'n' : 56 , 'o' : 56 , 'p' : 17 , 'q' : 4 , 'r' : 49 , 's' : 56 , 't' : 71 , 'u' : 31 , 'v' : 10 , 'w' : 18 , 'x' : 3 , 'y' : 18 , 'z' : 2 , 'space' : 166 }
glyph_order = ttFont . getGlyphOrder ( )
if not all ( character in glyph_order for character in weightFactors ) :
yield FAIL , Message ( "missing-glyphs" , "Font is missing the required latin lowercase " "letters and/or space." )
return
width_sum = 0
for glyph_id in weightFactors :
width = ttFont [ 'hmtx' ] . metrics [ glyph_id ] [ 0 ]
width_sum += ( width * weightFactors [ glyph_id ] )
expected_value = int ( width_sum / 1000.0 + 0.5 )
# round to closest int
difference = abs ( current_value - expected_value )
# We accept matches and off - by - ones due to rounding as correct .
if current_value == expected_value or difference == 1 :
yield PASS , "OS/2 xAvgCharWidth value is correct."
elif difference < ACCEPTABLE_ERROR :
yield INFO , ( f"OS/2 xAvgCharWidth is {current_value} but it should be" f" {expected_value} which corresponds to {calculation_rule}." " These are similar values, which" " may be a symptom of the slightly different" " calculation of the xAvgCharWidth value in" " font editors. There's further discussion on" " this at https://github.com/googlefonts/fontbakery" "/issues/1622" )
else :
yield WARN , ( f"OS/2 xAvgCharWidth is {current_value} but it should be" f" {expected_value} which corresponds to {calculation_rule}." )
|
def format_options ( options ) :
'''Helper function for formatting the content of the options line'''
|
# split on one of the following tokens : ' - ' or ' [ [ ' or ' ] ] '
lines = [ '' ]
for token in re . split ( r'( -|\[\[|\]\])' , options ) :
if token in [ '[[' , ']]' ] :
lines . append ( token )
lines . append ( '' )
elif token == ' -' :
lines . append ( token )
else :
lines [ - 1 ] += token
# join all non - empty lines and wrap them into ' span ' - tags
return '<span style="display:block">' + '</span><span style="display:block">' . join ( line for line in lines if line . strip ( ) ) + '</span>'
|
def starting_offset ( source_code , offset ) :
"""Return the offset in which the completion should be inserted
Usually code assist proposals should be inserted like : :
completion = proposal . name
result = ( source _ code [ : starting _ offset ] +
completion + source _ code [ offset : ] )
Where starting _ offset is the offset returned by this function ."""
|
word_finder = worder . Worder ( source_code , True )
expression , starting , starting_offset = word_finder . get_splitted_primary_before ( offset )
return starting_offset
|
def check_cluster ( cluster_config , data_path , java_home , check_replicas , batch_size , minutes , start_time , end_time , ) :
"""Check the integrity of the Kafka log files in a cluster .
start _ time and end _ time should be in the format specified
by TIME _ FORMAT _ REGEX .
: param data _ path : the path to the log folder on the broker
: type data _ path : str
: param java _ home : the JAVA _ HOME of the broker
: type java _ home : str
: param check _ replicas : also checks the replica files
: type check _ replicas : bool
: param batch _ size : the size of the batch
: type batch _ size : int
: param minutes : check the files modified in the last N minutes
: type minutes : int
: param start _ time : check the files modified after start _ time
: type start _ time : str
: param end _ time : check the files modified before end _ time
: type end _ time : str"""
|
brokers = get_broker_list ( cluster_config )
broker_files = find_files ( data_path , brokers , minutes , start_time , end_time )
if not check_replicas : # remove replicas
broker_files = filter_leader_files ( cluster_config , broker_files )
processes = [ ]
print ( "Starting {n} parallel processes" . format ( n = len ( broker_files ) ) )
try :
for broker , host , files in broker_files :
print ( " Broker: {host}, {n} files to check" . format ( host = host , n = len ( files ) ) , )
p = Process ( name = "dump_process_" + host , target = check_files_on_host , args = ( java_home , host , files , batch_size ) , )
p . start ( )
processes . append ( p )
print ( "Processes running:" )
for process in processes :
process . join ( )
except KeyboardInterrupt :
print ( "Terminating all processes" )
for process in processes :
process . terminate ( )
process . join ( )
print ( "All processes terminated" )
sys . exit ( 1 )
|
def _local_call ( self , call_conf ) :
'''Execute local call'''
|
try :
ret = self . _get_caller ( call_conf ) . call ( )
except SystemExit :
ret = 'Data is not available at this moment'
self . out . error ( ret )
except Exception as ex :
ret = 'Unhandled exception occurred: {}' . format ( ex )
log . debug ( ex , exc_info = True )
self . out . error ( ret )
return ret
|
def adapter_remove_nio_binding ( self , adapter_number , port_number ) :
"""Removes an adapter NIO binding .
: param adapter _ number : adapter number
: param port _ number : port number
: returns : NIO instance"""
|
try :
adapter = self . _adapters [ adapter_number ]
except IndexError :
raise IOUError ( 'Adapter {adapter_number} does not exist on IOU "{name}"' . format ( name = self . _name , adapter_number = adapter_number ) )
if not adapter . port_exists ( port_number ) :
raise IOUError ( "Port {port_number} does not exist in adapter {adapter}" . format ( adapter = adapter , port_number = port_number ) )
nio = adapter . get_nio ( port_number )
if isinstance ( nio , NIOUDP ) :
self . manager . port_manager . release_udp_port ( nio . lport , self . _project )
adapter . remove_nio ( port_number )
log . info ( 'IOU "{name}" [{id}]: {nio} removed from {adapter_number}/{port_number}' . format ( name = self . _name , id = self . _id , nio = nio , adapter_number = adapter_number , port_number = port_number ) )
if self . ubridge :
bridge_name = "IOL-BRIDGE-{}" . format ( self . application_id + 512 )
yield from self . _ubridge_send ( "iol_bridge delete_nio_udp {name} {bay} {unit}" . format ( name = bridge_name , bay = adapter_number , unit = port_number ) )
return nio
|
def execute_once ( self , swap = None , spell_changes = None , spell_destructions = None , random_fill = False ) :
"""Execute the board only one time . Do not execute chain reactions .
Arguments :
swap - pair of adjacent positions
spell _ changes - sequence of ( position , tile ) changes
spell _ destructions - sequence of positions to be destroyed
Return : ( copy of the board , destroyed tile groups )"""
|
bcopy = self . copy ( )
# work with a copy , not self
total_destroyed_tile_groups = list ( )
# swap if any
bcopy . _swap ( swap )
# spell changes if any
bcopy . _change ( spell_changes )
# spell destructions and record if any
# first convert simple positions to groups
spell_destructions = spell_destructions or tuple ( )
destruction_groups = [ [ p ] for p in spell_destructions ]
destroyed_tile_groups = bcopy . _destroy ( destruction_groups )
total_destroyed_tile_groups . extend ( destroyed_tile_groups )
# execute one time only
# look for matched groups
matched_position_groups = bcopy . _match ( )
# destroy and record matched groups
destroyed_tile_groups = bcopy . _destroy ( matched_position_groups )
total_destroyed_tile_groups . extend ( destroyed_tile_groups )
bcopy . _fall ( )
if random_fill :
bcopy . _random_fill ( )
return bcopy , total_destroyed_tile_groups
|
def tparse ( instring , lenout = _default_len_out ) :
"""Parse a time string and return seconds past the J2000
epoch on a formal calendar .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / tparse _ c . html
: param instring : Input time string , UTC .
: type instring : str
: param lenout : Available space in output error message string .
: type lenout : int
: return : Equivalent UTC seconds past J2000 , Descriptive error message .
: rtype : tuple"""
|
errmsg = stypes . stringToCharP ( lenout )
lenout = ctypes . c_int ( lenout )
instring = stypes . stringToCharP ( instring )
sp2000 = ctypes . c_double ( )
libspice . tparse_c ( instring , lenout , ctypes . byref ( sp2000 ) , errmsg )
return sp2000 . value , stypes . toPythonString ( errmsg )
|
def output ( self , value ) :
"""Sets the client ' s output ( on , off , int )
Sets the general purpose output on some display modules to this value .
Use on to set all outputs to high state , and off to set all to low state .
The meaning of the integer value depends on your specific device , usually
it is a bit pattern describing the state of each output line .
Return None or LCDd response on error"""
|
response = self . request ( ( "output %s" % ( value ) ) . encode ( ) )
if "success" in response :
return None
else :
return response
|
def responsive_sleep ( self , seconds , wait_reason = '' ) :
"""Sleep for the specified number of seconds , logging every
' wait _ log _ interval ' seconds with progress info ."""
|
for x in xrange ( int ( seconds ) ) :
if ( self . config . wait_log_interval and not x % self . config . wait_log_interval ) :
self . config . logger . debug ( '%s: %dsec of %dsec' % ( wait_reason , x , seconds ) )
self . quit_check ( )
time . sleep ( 1.0 )
|
def add_msg ( self , pkt ) :
"""Add a TLS message ( e . g . TLSClientHello or TLSApplicationData )
inside the latest record to be sent through the socket .
We believe a good automaton should not use the first test ."""
|
if not self . buffer_out :
self . add_record ( )
r = self . buffer_out [ - 1 ]
if isinstance ( r , TLS13 ) :
self . buffer_out [ - 1 ] . inner . msg . append ( pkt )
else :
self . buffer_out [ - 1 ] . msg . append ( pkt )
|
def get_weights_fn ( modality_type , value = None ) :
"""Gets default weights function ; if none available , return value ."""
|
if modality_type in ( ModalityType . CTC_SYMBOL , ModalityType . IDENTITY_SYMBOL , ModalityType . MULTI_LABEL , ModalityType . SYMBOL , ModalityType . SYMBOL_ONE_HOT ) :
return common_layers . weights_nonzero
elif modality_type in ModalityType . get_choices ( ) :
return common_layers . weights_all
return value
|
def from_bytes ( self , raw ) :
'''Return an Ethernet object reconstructed from raw bytes , or an
Exception if we can ' t resurrect the packet .'''
|
if len ( raw ) < TCP . _MINLEN :
raise NotEnoughDataError ( "Not enough bytes ({}) to reconstruct an TCP object" . format ( len ( raw ) ) )
fields = struct . unpack ( TCP . _PACKFMT , raw [ : TCP . _MINLEN ] )
self . _src = fields [ 0 ]
self . _dst = fields [ 1 ]
self . _seq = fields [ 2 ]
self . _ack = fields [ 3 ]
offset = fields [ 4 ] >> 12
self . _flags = fields [ 4 ] & 0x01ff
self . _window = fields [ 5 ]
csum = fields [ 6 ]
self . _urg = fields [ 7 ]
headerlen = offset * 4
optlen = headerlen - TCP . _MINLEN
self . _options . from_bytes ( raw [ TCP . _MINLEN : headerlen ] )
return raw [ headerlen : ]
|
def _set_reserved_vlan ( self , v , load = False ) :
"""Setter method for reserved _ vlan , mapped from YANG variable / reserved _ vlan ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ reserved _ vlan is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ reserved _ vlan ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = reserved_vlan . reserved_vlan , is_container = 'container' , presence = False , yang_name = "reserved-vlan" , rest_name = "reserved-vlan" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Sets the range of vlans used for internal purposes' , u'sort-priority' : u'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG' , u'cli-suppress-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'callpoint' : u'nsmReservedVlanConfig' } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """reserved_vlan must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=reserved_vlan.reserved_vlan, is_container='container', presence=False, yang_name="reserved-vlan", rest_name="reserved-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sets the range of vlans used for internal purposes', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_VLAN_CONFIG', u'cli-suppress-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'nsmReservedVlanConfig'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""" , } )
self . __reserved_vlan = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def insert_one ( self , data , using_name = True ) :
"""Insert one record .
Ref : http : / / helpdesk . knackhq . com / support / solutions / articles / 5000446111 - api - reference - root - access # create
For more information of the raw structure of all data type , read this :
http : / / helpdesk . knackhq . com / support / solutions / articles / 5000446405 - field - types
: param data : dict type data
: param using _ name : if you are using field _ name in data ,
please set using _ name = True ( it ' s the default ) , otherwise , False
* * 中文文档 * *
插入一条记录"""
|
data = self . convert_values ( data )
if using_name :
data = self . convert_keys ( data )
res = self . post ( self . post_url , data )
return res
|
def serve ( self , host = '127.0.0.1' , port = 8888 , limit = 100 , ** kwargs ) :
"""Start a local proxy server .
The server distributes incoming requests to a pool of found proxies .
When the server receives an incoming request , it chooses the optimal
proxy ( based on the percentage of errors and average response time )
and passes to it the incoming request .
In addition to the parameters listed below are also accept all the
parameters of the : meth : ` . find ` method and passed it to gather proxies
to a pool .
: ref : ` Example of usage < proxybroker - examples - server > ` .
: param str host : ( optional ) Host of local proxy server
: param int port : ( optional ) Port of local proxy server
: param int limit :
( optional ) When will be found a requested number of working
proxies , checking of new proxies will be lazily paused .
Checking will be resumed if all the found proxies will be discarded
in the process of working with them ( see : attr : ` max _ error _ rate ` ,
: attr : ` max _ resp _ time ` ) . And will continue until it finds one
working proxy and paused again . The default value is 100
: param int max _ tries :
( optional ) The maximum number of attempts to handle an incoming
request . If not specified , it will use the value specified during
the creation of the : class : ` Broker ` object . Attempts can be made
with different proxies . The default value is 3
: param int min _ req _ proxy :
( optional ) The minimum number of processed requests to estimate the
quality of proxy ( in accordance with : attr : ` max _ error _ rate ` and
: attr : ` max _ resp _ time ` ) . The default value is 5
: param int max _ error _ rate :
( optional ) The maximum percentage of requests that ended with
an error . For example : 0.5 = 50 % . If proxy . error _ rate exceeds this
value , proxy will be removed from the pool .
The default value is 0.5
: param int max _ resp _ time :
( optional ) The maximum response time in seconds .
If proxy . avg _ resp _ time exceeds this value , proxy will be removed
from the pool . The default value is 8
: param bool prefer _ connect :
( optional ) Flag that indicates whether to use the CONNECT method
if possible . For example : If is set to True and a proxy supports
HTTP proto ( GET or POST requests ) and CONNECT method , the server
will try to use CONNECT method and only after that send the
original request . The default value is False
: param list http _ allowed _ codes :
( optional ) Acceptable HTTP codes returned by proxy on requests .
If a proxy return code , not included in this list , it will be
considered as a proxy error , not a wrong / unavailable address .
For example , if a proxy will return a ` ` 404 Not Found ` ` response -
this will be considered as an error of a proxy .
Checks only for HTTP protocol , HTTPS not supported at the moment .
By default the list is empty and the response code is not verified
: param int backlog :
( optional ) The maximum number of queued connections passed to
listen . The default value is 100
: raises ValueError :
If : attr : ` limit ` is less than or equal to zero .
Because a parsing of providers will be endless
. . versionadded : : 0.2.0"""
|
if limit <= 0 :
raise ValueError ( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' )
self . _server = Server ( host = host , port = port , proxies = self . _proxies , timeout = self . _timeout , max_tries = kwargs . pop ( 'max_tries' , self . _max_tries ) , loop = self . _loop , ** kwargs )
self . _server . start ( )
task = asyncio . ensure_future ( self . find ( limit = limit , ** kwargs ) )
self . _all_tasks . append ( task )
|
def process_args ( self , args ) :
"""Process the args we have . ' args ' is always a ShutItInit object ."""
|
shutit_global . shutit_global_object . yield_to_draw ( )
assert isinstance ( args , ShutItInit ) , shutit_util . print_debug ( )
if args . action == 'version' :
shutit_global . shutit_global_object . shutit_print ( 'ShutIt version: ' + shutit . shutit_version )
shutit_global . shutit_global_object . handle_exit ( exit_code = 0 )
# What are we asking shutit to do ?
self . action [ 'list_configs' ] = args . action == 'list_configs'
self . action [ 'list_modules' ] = args . action == 'list_modules'
self . action [ 'list_deps' ] = args . action == 'list_deps'
self . action [ 'skeleton' ] = args . action == 'skeleton'
self . action [ 'build' ] = args . action == 'build'
self . action [ 'run' ] = args . action == 'run'
# Logging
if not self . logging_setup_done :
self . logfile = args . logfile
self . loglevel = args . loglevel
if self . loglevel is None or self . loglevel == '' :
self . loglevel = 'INFO'
self . setup_logging ( )
shutit_global . shutit_global_object . setup_panes ( action = args . action )
# This mode is a bit special - it ' s the only one with different arguments
if self . action [ 'skeleton' ] :
self . handle_skeleton ( args )
shutit_global . shutit_global_object . handle_exit ( )
elif self . action [ 'run' ] :
self . handle_run ( args )
sys . exit ( 0 )
elif self . action [ 'build' ] or self . action [ 'list_configs' ] or self . action [ 'list_modules' ] :
self . handle_build ( args )
else :
self . fail ( 'Should not get here: action was: ' + str ( self . action ) )
self . nocolor = args . nocolor
|
def readSources ( self ) :
"""Read the source elements .
< source filename = " LightCondensed . ufo " location = " location - token - aaa " name = " master - token - aaa1 " >
< info mute = " 1 " copy = " 1 " / >
< kerning mute = " 1 " / >
< glyph mute = " 1 " name = " thirdGlyph " / >
< / source >"""
|
for sourceCount , sourceElement in enumerate ( self . root . findall ( ".sources/source" ) ) : # shall we just read the UFO here ?
filename = sourceElement . attrib . get ( 'filename' )
# filename is a path relaive to the documentpath . resolve first .
sourcePath = os . path . abspath ( os . path . join ( os . path . dirname ( self . path ) , filename ) )
sourceName = sourceElement . attrib . get ( 'name' )
if sourceName is None : # if the source element has no name attribute
# ( some authoring tools do not need them )
# then we should make a temporary one . We still need it for reference .
sourceName = "temp_master.%d" % ( sourceCount )
self . reportProgress ( "prep" , 'load' , sourcePath )
if not os . path . exists ( sourcePath ) :
raise MutatorError ( "Source not found at %s" % sourcePath )
sourceObject = self . _instantiateFont ( sourcePath )
# read the locations
sourceLocationObject = None
sourceLocationObject = self . locationFromElement ( sourceElement )
if sourceLocationObject is None :
raise MutatorError ( "No location defined for source %s" % sourceName )
# read lib flag
for libElement in sourceElement . findall ( '.lib' ) :
if libElement . attrib . get ( 'copy' ) == '1' :
self . libSource = sourceName
# read the groups flag
for groupsElement in sourceElement . findall ( '.groups' ) :
if groupsElement . attrib . get ( 'copy' ) == '1' :
self . groupsSource = sourceName
# read the info flag
for infoElement in sourceElement . findall ( ".info" ) :
if infoElement . attrib . get ( 'copy' ) == '1' :
self . infoSource = sourceName
if infoElement . attrib . get ( 'mute' ) == '1' :
self . muted [ 'info' ] . append ( sourceName )
# read the features flag
for featuresElement in sourceElement . findall ( ".features" ) :
if featuresElement . attrib . get ( 'copy' ) == '1' :
if self . featuresSource is not None :
self . featuresSource = None
else :
self . featuresSource = sourceName
mutedGlyphs = [ ]
for glyphElement in sourceElement . findall ( ".glyph" ) :
glyphName = glyphElement . attrib . get ( 'name' )
if glyphName is None :
continue
if glyphElement . attrib . get ( 'mute' ) == '1' :
if not sourceName in self . muted [ 'glyphs' ] :
self . muted [ 'glyphs' ] [ sourceName ] = [ ]
self . muted [ 'glyphs' ] [ sourceName ] . append ( glyphName )
for kerningElement in sourceElement . findall ( ".kerning" ) :
if kerningElement . attrib . get ( 'mute' ) == '1' :
self . muted [ 'kerning' ] . append ( sourceName )
# store
self . sources [ sourceName ] = sourceObject , sourceLocationObject
self . reportProgress ( "prep" , 'done' )
|
def sapm_celltemp ( poa_global , wind_speed , temp_air , model = 'open_rack_cell_glassback' ) :
'''Estimate cell and module temperatures per the Sandia PV Array
Performance Model ( SAPM , SAND2004-3535 ) , from the incident
irradiance , wind speed , ambient temperature , and SAPM module
parameters .
Parameters
poa _ global : float or Series
Total incident irradiance in W / m ^ 2.
wind _ speed : float or Series
Wind speed in m / s at a height of 10 meters .
temp _ air : float or Series
Ambient dry bulb temperature in degrees C .
model : string , list , or dict , default ' open _ rack _ cell _ glassback '
Model to be used .
If string , can be :
* ' open _ rack _ cell _ glassback ' ( default )
* ' roof _ mount _ cell _ glassback '
* ' open _ rack _ cell _ polymerback '
* ' insulated _ back _ polymerback '
* ' open _ rack _ polymer _ thinfilm _ steel '
* ' 22x _ concentrator _ tracker '
If dict , supply the following parameters
( if list , in the following order ) :
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance .
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
( see SAPM eqn . 11 ) .
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance , E0.
Returns
DataFrame with columns ' temp _ cell ' and ' temp _ module ' .
Values in degrees C .
References
[1 ] King , D . et al , 2004 , " Sandia Photovoltaic Array Performance
Model " , SAND Report 3535 , Sandia National Laboratories , Albuquerque ,
NM .
See Also
sapm'''
|
temp_models = TEMP_MODEL_PARAMS [ 'sapm' ]
if isinstance ( model , str ) :
model = temp_models [ model . lower ( ) ]
elif isinstance ( model , ( dict , pd . Series ) ) :
model = [ model [ 'a' ] , model [ 'b' ] , model [ 'deltaT' ] ]
a = model [ 0 ]
b = model [ 1 ]
deltaT = model [ 2 ]
E0 = 1000.
# Reference irradiance
temp_module = pd . Series ( poa_global * np . exp ( a + b * wind_speed ) + temp_air )
temp_cell = temp_module + ( poa_global / E0 ) * ( deltaT )
return pd . DataFrame ( { 'temp_cell' : temp_cell , 'temp_module' : temp_module } )
|
def _init_project_service ( self , version ) :
"""Method to initialize the Project Service from the config data
Args :
version ( string ) : Version of Boss API to use .
Returns :
None
Raises :
( KeyError ) : if given invalid version ."""
|
project_cfg = self . _load_config_section ( CONFIG_PROJECT_SECTION )
self . _token_project = project_cfg [ CONFIG_TOKEN ]
proto = project_cfg [ CONFIG_PROTOCOL ]
host = project_cfg [ CONFIG_HOST ]
self . _project = ProjectService ( host , version )
self . _project . base_protocol = proto
self . _project . set_auth ( self . _token_project )
|
def exec_cmd ( rtsp , cmd ) :
'''根据命令执行操作'''
|
global CUR_RANGE , CUR_SCALE
if cmd in ( 'exit' , 'teardown' ) :
rtsp . do_teardown ( )
elif cmd == 'pause' :
CUR_SCALE = 1 ;
CUR_RANGE = 'npt=now-'
rtsp . do_pause ( )
elif cmd == 'help' :
PRINT ( play_ctrl_help ( ) )
elif cmd == 'forward' :
if CUR_SCALE < 0 :
CUR_SCALE = 1
CUR_SCALE *= 2 ;
CUR_RANGE = 'npt=now-'
elif cmd == 'backward' :
if CUR_SCALE > 0 :
CUR_SCALE = - 1
CUR_SCALE *= 2 ;
CUR_RANGE = 'npt=now-'
elif cmd == 'begin' :
CUR_SCALE = 1 ;
CUR_RANGE = 'npt=beginning-'
elif cmd == 'live' :
CUR_SCALE = 1 ;
CUR_RANGE = 'npt=end-'
elif cmd . startswith ( 'play' ) :
m = re . search ( r'range[:\s]+(?P<range>[^\s]+)' , cmd )
if m :
CUR_RANGE = m . group ( 'range' )
m = re . search ( r'scale[:\s]+(?P<scale>[\d\.]+)' , cmd )
if m :
CUR_SCALE = int ( m . group ( 'scale' ) )
if cmd not in ( 'pause' , 'exit' , 'teardown' , 'help' ) :
rtsp . do_play ( CUR_RANGE , CUR_SCALE )
|
def _upsert ( context , params , data ) :
"""Insert or update data and add / update appropriate timestamps"""
|
table = params . get ( "table" )
table = datastore . get_table ( table , primary_id = False )
unique_keys = ensure_list ( params . get ( "unique" ) )
data [ "__last_seen" ] = datetime . datetime . utcnow ( )
if len ( unique_keys ) :
updated = table . update ( data , unique_keys , return_count = True )
if updated :
return
data [ "__first_seen" ] = data [ "__last_seen" ]
table . insert ( data )
|
def _call ( self , x ) :
"""Apply the functional to the given point ."""
|
# Since the proximal projects onto our feasible set we can simply
# check if it changes anything
proj = self . proximal ( 1 ) ( x )
return np . inf if x . dist ( proj ) > 0 else 0
|
def count ( self , with_limit_and_skip = False ) :
"""Get the size of the results set for this query .
Returns the number of documents in the results set for this query . Does
not take : meth : ` limit ` and : meth : ` skip ` into account by default - set
` with _ limit _ and _ skip ` to ` ` True ` ` if that is the desired behavior .
Raises : class : ` ~ pymongo . errors . OperationFailure ` on a database error .
When used with MongoDB > = 2.6 , : meth : ` ~ count ` uses any : meth : ` ~ hint `
applied to the query . In the following example the hint is passed to
the count command :
collection . find ( { ' field ' : ' value ' } ) . hint ( ' field _ 1 ' ) . count ( )
The : meth : ` count ` method obeys the
: attr : ` ~ pymongo . collection . Collection . read _ preference ` of the
: class : ` ~ pymongo . collection . Collection ` instance on which
: meth : ` ~ pymongo . collection . Collection . find ` was called .
: Parameters :
- ` with _ limit _ and _ skip ` ( optional ) : take any : meth : ` limit ` or
: meth : ` skip ` that has been applied to this cursor into account when
getting the count
. . note : : The ` with _ limit _ and _ skip ` parameter requires server
version * * > = 1.1.4 - * *
. . versionchanged : : 2.8
The : meth : ` ~ count ` method now supports : meth : ` ~ hint ` ."""
|
validate_boolean ( "with_limit_and_skip" , with_limit_and_skip )
cmd = SON ( [ ( "count" , self . __collection . name ) , ( "query" , self . __spec ) ] )
if self . __max_time_ms is not None :
cmd [ "maxTimeMS" ] = self . __max_time_ms
if self . __comment :
cmd [ "$comment" ] = self . __comment
if self . __hint is not None :
cmd [ "hint" ] = self . __hint
if with_limit_and_skip :
if self . __limit :
cmd [ "limit" ] = self . __limit
if self . __skip :
cmd [ "skip" ] = self . __skip
return self . __collection . _count ( cmd , self . __collation )
|
def validate ( self , val ) :
"""Validates that the val matches the expected fields for this struct .
val must be a dict , and must contain only fields represented by this struct and its
ancestors .
Returns two element tuple : ( bool , string )
- ` bool ` - True if valid , False if not
- ` string ` - Description of validation error , or None if valid
: Parameters :
val
Value to validate . Must be a dict"""
|
if type ( val ) is not dict :
return False , "%s is not a dict" % ( str ( val ) )
for k , v in val . items ( ) :
field = self . field ( k )
if field :
ok , msg = self . contract . validate ( field , field . is_array , v )
if not ok :
return False , "field '%s': %s" % ( field . name , msg )
else :
return False , "field '%s' not found in struct %s" % ( k , self . name )
all_fields = self . get_all_fields ( [ ] )
for field in all_fields :
if not val . has_key ( field . name ) and not field . optional :
return False , "field '%s' missing from: %s" % ( field . name , str ( val ) )
return True , None
|
def _generate_compose_file ( self , command , additional_volumes = None , additional_env_vars = None ) :
"""Writes a config file describing a training / hosting environment .
This method generates a docker compose configuration file , it has an entry for each container
that will be created ( based on self . hosts ) . it calls
: meth : ~ sagemaker . local _ session . SageMakerContainer . _ create _ docker _ host to generate the config
for each individual container .
Args :
command ( str ) : either ' train ' or ' serve '
additional _ volumes ( list ) : a list of volumes that will be mapped to the containers
additional _ env _ vars ( dict ) : a dictionary with additional environment variables to be
passed on to the containers .
Returns : ( dict ) A dictionary representation of the configuration that was written ."""
|
boto_session = self . sagemaker_session . boto_session
additional_volumes = additional_volumes or [ ]
additional_env_vars = additional_env_vars or { }
environment = [ ]
optml_dirs = set ( )
aws_creds = _aws_credentials ( boto_session )
if aws_creds is not None :
environment . extend ( aws_creds )
additional_env_var_list = [ '{}={}' . format ( k , v ) for k , v in additional_env_vars . items ( ) ]
environment . extend ( additional_env_var_list )
if os . environ . get ( DOCKER_COMPOSE_HTTP_TIMEOUT_ENV ) is None :
os . environ [ DOCKER_COMPOSE_HTTP_TIMEOUT_ENV ] = DOCKER_COMPOSE_HTTP_TIMEOUT
if command == 'train' :
optml_dirs = { 'output' , 'output/data' , 'input' }
services = { h : self . _create_docker_host ( h , environment , optml_dirs , command , additional_volumes ) for h in self . hosts }
content = { # Use version 2.3 as a minimum so that we can specify the runtime
'version' : '2.3' , 'services' : services , 'networks' : { 'sagemaker-local' : { 'name' : 'sagemaker-local' } } }
docker_compose_path = os . path . join ( self . container_root , DOCKER_COMPOSE_FILENAME )
yaml_content = yaml . dump ( content , default_flow_style = False )
logger . info ( 'docker compose file: \n{}' . format ( yaml_content ) )
with open ( docker_compose_path , 'w' ) as f :
f . write ( yaml_content )
return content
|
def __quarters ( self , from_date = None ) :
"""Get a set of quarters with available items from a given index date .
: param from _ date :
: return : list of ` pandas . Period ` corresponding to quarters"""
|
s = Search ( using = self . _es_conn , index = self . _es_index )
if from_date : # Work around to solve conversion problem of ' _ _ ' to ' . ' in field name
q = Q ( 'range' )
q . __setattr__ ( self . _sort_on_field , { 'gte' : from_date } )
s = s . filter ( q )
# from : to parameters ( = > from : 0 , size : 0)
s = s [ 0 : 0 ]
s . aggs . bucket ( self . TIMEFRAME , 'date_histogram' , field = self . _timeframe_field , interval = 'quarter' , min_doc_count = 1 )
response = s . execute ( )
quarters = [ ]
for quarter in response . aggregations [ self . TIMEFRAME ] . buckets :
period = pandas . Period ( quarter . key_as_string , 'Q' )
quarters . append ( period )
return quarters
|
def competition_urls ( self ) :
"""Returns ' kaggle : / / ' urls ."""
|
return [ KaggleFile ( self . _competition_name , fname ) . to_url ( ) for fname in self . competition_files # pylint : disable = not - an - iterable
]
|
def save_as_pil ( self , fname , pixel_array = None ) :
"""This method saves the image from a numpy array using Pillow
( PIL fork )
: param fname : Location and name of the image file to be saved .
: param pixel _ array : Numpy pixel array , i . e . ` ` numpy ( ) ` ` return value
This method will return True if successful"""
|
if pixel_array is None :
pixel_array = self . numpy
from PIL import Image as pillow
pil_image = pillow . fromarray ( pixel_array . astype ( 'uint8' ) )
pil_image . save ( fname )
return True
|
def screenshot_raw ( self , includes = 'subtitles' ) :
"""Mapped mpv screenshot _ raw command , see man mpv ( 1 ) . Returns a pillow Image object ."""
|
from PIL import Image
res = self . node_command ( 'screenshot-raw' , includes )
if res [ 'format' ] != 'bgr0' :
raise ValueError ( 'Screenshot in unknown format "{}". Currently, only bgr0 is supported.' . format ( res [ 'format' ] ) )
img = Image . frombytes ( 'RGBA' , ( res [ 'w' ] , res [ 'h' ] ) , res [ 'data' ] )
b , g , r , a = img . split ( )
return Image . merge ( 'RGB' , ( r , g , b ) )
|
def invert_pixel_mask ( mask ) :
'''Invert pixel mask ( 0 - > 1 , 1 ( and greater ) - > 0 ) .
Parameters
mask : array - like
Mask .
Returns
inverted _ mask : array - like
Inverted Mask .'''
|
inverted_mask = np . ones ( shape = ( 80 , 336 ) , dtype = np . dtype ( '>u1' ) )
inverted_mask [ mask >= 1 ] = 0
return inverted_mask
|
def com_google_fonts_check_fvar_name_entries ( ttFont ) :
"""All name entries referenced by fvar instances exist on the name table ?"""
|
failed = False
for instance in ttFont [ "fvar" ] . instances :
entries = [ entry for entry in ttFont [ "name" ] . names if entry . nameID == instance . subfamilyNameID ]
if len ( entries ) == 0 :
failed = True
yield FAIL , ( f"Named instance with coordinates {instance.coordinates}" f" lacks an entry on the name table (nameID={instance.subfamilyNameID})." )
if not failed :
yield PASS , "OK"
|
def _openbsd_remotes_on ( port , which_end ) :
'''OpenBSD specific helper function .
Returns set of ipv4 host addresses of remote established connections
on local or remote tcp port .
Parses output of shell ' netstat ' to get connections
$ netstat - nf inet
Active Internet connections
Proto Recv - Q Send - Q Local Address Foreign Address ( state )
tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED
tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED'''
|
remotes = set ( )
try :
data = subprocess . check_output ( [ 'netstat' , '-nf' , 'inet' ] )
# pylint : disable = minimum - python - version
except subprocess . CalledProcessError :
log . error ( 'Failed netstat' )
raise
lines = data . split ( '\n' )
for line in lines :
if 'ESTABLISHED' not in line :
continue
chunks = line . split ( )
local_host , local_port = chunks [ 3 ] . rsplit ( '.' , 1 )
remote_host , remote_port = chunks [ 4 ] . rsplit ( '.' , 1 )
if which_end == 'remote_port' and int ( remote_port ) != port :
continue
if which_end == 'local_port' and int ( local_port ) != port :
continue
remotes . add ( remote_host )
return remotes
|
def write_top_half ( f , row_metadata_df , col_metadata_df , metadata_null , filler_null ) :
"""Write the top half of the gct file : top - left filler values , row metadata
headers , and top - right column metadata .
Args :
f ( file handle ) : handle for output file
row _ metadata _ df ( pandas df )
col _ metadata _ df ( pandas df )
metadata _ null ( string ) : how to represent missing values in the metadata
filler _ null ( string ) : what value to fill the top - left filler block with
Returns :
None"""
|
# Initialize the top half of the gct including the third line
size_of_top_half_df = ( 1 + col_metadata_df . shape [ 1 ] , 1 + row_metadata_df . shape [ 1 ] + col_metadata_df . shape [ 0 ] )
top_half_df = pd . DataFrame ( np . full ( size_of_top_half_df , filler_null , dtype = object ) )
# Assemble the third line of the gct : " id " , then rhds , then cids
top_half_df . iloc [ 0 , : ] = np . hstack ( ( "id" , row_metadata_df . columns . values , col_metadata_df . index . values ) )
# Insert the chds
top_half_df . iloc [ range ( 1 , top_half_df . shape [ 0 ] ) , 0 ] = col_metadata_df . columns . values
# Insert the column metadata , but first convert to strings and replace NaNs
col_metadata_indices = ( range ( 1 , top_half_df . shape [ 0 ] ) , range ( 1 + row_metadata_df . shape [ 1 ] , top_half_df . shape [ 1 ] ) )
# pd . DataFrame . at to insert into dataframe ( python3)
top_half_df . at [ col_metadata_indices [ 0 ] , col_metadata_indices [ 1 ] ] = ( col_metadata_df . astype ( str ) . replace ( "nan" , value = metadata_null ) . T . values )
# Write top _ half _ df to file
top_half_df . to_csv ( f , header = False , index = False , sep = "\t" )
|
def terminate ( self ) :
"""Terminate all processes"""
|
for w in self . q . values ( ) :
try :
w . terminate ( )
except :
pass
self . q = { }
|
def setposition ( self , moves = [ ] ) :
"""Move list is a list of moves ( i . e . [ ' e2e4 ' , ' e7e5 ' , . . . ] ) each entry as a string . Moves must be in full algebraic notation ."""
|
self . put ( 'position startpos moves %s' % Engine . _movelisttostr ( moves ) )
self . isready ( )
|
def _run_scalpel_paired ( align_bams , items , ref_file , assoc_files , region = None , out_file = None ) :
"""Detect indels with Scalpel .
This is used for paired tumor / normal samples ."""
|
config = items [ 0 ] [ "config" ]
if out_file is None :
out_file = "%s-paired-variants.vcf.gz" % os . path . splitext ( align_bams [ 0 ] ) [ 0 ]
if not utils . file_exists ( out_file ) :
with file_transaction ( config , out_file ) as tx_out_file :
paired = get_paired_bams ( align_bams , items )
if not paired . normal_bam :
ann_file = _run_scalpel_caller ( align_bams , items , ref_file , assoc_files , region , out_file )
return ann_file
vcfstreamsort = config_utils . get_program ( "vcfstreamsort" , config )
perl_exports = utils . get_perl_exports ( os . path . dirname ( tx_out_file ) )
tmp_path = "%s-scalpel-work" % utils . splitext_plus ( out_file ) [ 0 ]
db_file = os . path . join ( tmp_path , "main" , "somatic.db" )
if not os . path . exists ( db_file + ".dir" ) :
if os . path . exists ( tmp_path ) :
utils . remove_safe ( tmp_path )
opts = " " . join ( _scalpel_options_from_config ( items , config , out_file , region , tmp_path ) )
opts += " --ref {}" . format ( ref_file )
opts += " --dir %s" % tmp_path
# caling
cl = ( "{perl_exports} && " "scalpel-discovery --somatic {opts} --tumor {paired.tumor_bam} --normal {paired.normal_bam}" )
do . run ( cl . format ( ** locals ( ) ) , "Genotyping paired variants with Scalpel" , { } )
# filtering to adjust input parameters
bed_opts = " " . join ( _scalpel_bed_file_opts ( items , config , out_file , region , tmp_path ) )
use_defaults = True
if use_defaults :
scalpel_tmp_file = os . path . join ( tmp_path , "main/somatic.indel.vcf" )
# Uses default filters but can tweak min - alt - count - tumor and min - phred - fisher
# to swap precision for sensitivity
else :
scalpel_tmp_file = os . path . join ( tmp_path , "main/somatic-indel-filter.vcf.gz" )
with file_transaction ( config , scalpel_tmp_file ) as tx_indel_file :
cmd = ( "{perl_exports} && " "scalpel-export --somatic {bed_opts} --ref {ref_file} --db {db_file} " "--min-alt-count-tumor 5 --min-phred-fisher 10 --min-vaf-tumor 0.1 " "| bgzip -c > {tx_indel_file}" )
do . run ( cmd . format ( ** locals ( ) ) , "Scalpel somatic indel filter" , { } )
scalpel_tmp_file = bgzip_and_index ( scalpel_tmp_file , config )
scalpel_tmp_file_common = bgzip_and_index ( os . path . join ( tmp_path , "main/common.indel.vcf" ) , config )
compress_cmd = "| bgzip -c" if out_file . endswith ( "gz" ) else ""
bcftools_cmd_chi2 = get_scalpel_bcftools_filter_expression ( "chi2" , config )
bcftools_cmd_common = get_scalpel_bcftools_filter_expression ( "reject" , config )
fix_ambig = vcfutils . fix_ambiguous_cl ( )
add_contig = vcfutils . add_contig_to_header_cl ( dd . get_ref_file ( items [ 0 ] ) , tx_out_file )
cl2 = ( "vcfcat <({bcftools_cmd_chi2} {scalpel_tmp_file}) " "<({bcftools_cmd_common} {scalpel_tmp_file_common}) | " " {fix_ambig} | {vcfstreamsort} | {add_contig} {compress_cmd} > {tx_out_file}" )
do . run ( cl2 . format ( ** locals ( ) ) , "Finalising Scalpel variants" , { } )
return out_file
|
def get_library_version ( module_name ) :
"""Get version number from ` ` module _ name ` ` ' s ` ` _ _ version _ _ ` ` attribute .
. . versionadded : : 1.2.0
: param module _ name : The module name , e . g . ` ` luma . oled ` ` .
: type module _ name : str
: rtype : str"""
|
try :
module = importlib . import_module ( 'luma.' + module_name )
if hasattr ( module , '__version__' ) :
return module . __version__
else :
return None
except ImportError :
return None
|
def disvg ( paths = None , colors = None , filename = os_path . join ( getcwd ( ) , 'disvg_output.svg' ) , stroke_widths = None , nodes = None , node_colors = None , node_radii = None , openinbrowser = True , timestamp = False , margin_size = 0.1 , mindim = 600 , dimensions = None , viewbox = None , text = None , text_path = None , font_size = None , attributes = None , svg_attributes = None , svgwrite_debug = False , paths2Drawing = False ) :
"""Takes in a list of paths and creates an SVG file containing said paths .
REQUIRED INPUTS :
: param paths - a list of paths
OPTIONAL INPUT :
: param colors - specifies the path stroke color . By default all paths
will be black ( # 00000 ) . This paramater can be input in a few ways
1 ) a list of strings that will be input into the path elements stroke
attribute ( so anything that is understood by the svg viewer ) .
2 ) a string of single character colors - - e . g . setting colors = ' rrr ' is
equivalent to setting colors = [ ' red ' , ' red ' , ' red ' ] ( see the
' color _ dict ' dictionary above for a list of possibilities ) .
3 ) a list of rgb 3 - tuples - - e . g . colors = [ ( 255 , 0 , 0 ) , . . . ] .
: param filename - the desired location / filename of the SVG file
created ( by default the SVG will be stored in the current working
directory and named ' disvg _ output . svg ' ) .
: param stroke _ widths - a list of stroke _ widths to use for paths
( default is 0.5 % of the SVG ' s width or length )
: param nodes - a list of points to draw as filled - in circles
: param node _ colors - a list of colors to use for the nodes ( by default
nodes will be red )
: param node _ radii - a list of radii to use for the nodes ( by default
nodes will be radius will be 1 percent of the svg ' s width / length )
: param text - string or list of strings to be displayed
: param text _ path - if text is a list , then this should be a list of
path ( or path segments of the same length . Note : the path must be
long enough to display the text or the text will be cropped by the svg
viewer .
: param font _ size - a single float of list of floats .
: param openinbrowser - Set to True to automatically open the created
SVG in the user ' s default web browser .
: param timestamp - if True , then the a timestamp will be appended to
the output SVG ' s filename . This will fix issues with rapidly opening
multiple SVGs in your browser .
: param margin _ size - The min margin ( empty area framing the collection
of paths ) size used for creating the canvas and background of the SVG .
: param mindim - The minimum dimension ( height or width ) of the output
SVG ( default is 600 ) .
: param dimensions - The ( x , y ) display dimensions of the output SVG .
I . e . this specifies the ` width ` and ` height ` SVG attributes . Note that
these also can be used to specify units other than pixels . Using this
will override the ` mindim ` parameter .
: param viewbox - This specifies the coordinated system used in the svg .
The SVG ` viewBox ` attribute works together with the the ` height ` and
` width ` attrinutes . Using these three attributes allows for shifting
and scaling of the SVG canvas without changing the any values other
than those in ` viewBox ` , ` height ` , and ` width ` . ` viewbox ` should be
input as a 4 - tuple , ( min _ x , min _ y , width , height ) , or a string
" min _ x min _ y width height " . Using this will override the ` mindim `
parameter .
: param attributes - a list of dictionaries of attributes for the input
paths . Note : This will override any other conflicting settings .
: param svg _ attributes - a dictionary of attributes for output svg .
: param svgwrite _ debug - This parameter turns on / off ` svgwrite ` ' s
debugging mode . By default svgwrite _ debug = False . This increases
speed and also prevents ` svgwrite ` from raising of an error when not
all ` svg _ attributes ` key - value pairs are understood .
: param paths2Drawing - If true , an ` svgwrite . Drawing ` object is
returned and no file is written . This ` Drawing ` can later be saved
using the ` svgwrite . Drawing . save ( ) ` method .
NOTES :
* The ` svg _ attributes ` parameter will override any other conflicting
settings .
* Any ` extra ` parameters that ` svgwrite . Drawing ( ) ` accepts can be
controlled by passing them in through ` svg _ attributes ` .
* The unit of length here is assumed to be pixels in all variables .
* If this function is used multiple times in quick succession to
display multiple SVGs ( all using the default filename ) , the
svgviewer / browser will likely fail to load some of the SVGs in time .
To fix this , use the timestamp attribute , or give the files unique
names , or use a pause command ( e . g . time . sleep ( 1 ) ) between uses ."""
|
_default_relative_node_radius = 5e-3
_default_relative_stroke_width = 1e-3
_default_path_color = '#000000'
# black
_default_node_color = '#ff0000'
# red
_default_font_size = 12
# append directory to filename ( if not included )
if os_path . dirname ( filename ) == '' :
filename = os_path . join ( getcwd ( ) , filename )
# append time stamp to filename
if timestamp :
fbname , fext = os_path . splitext ( filename )
dirname = os_path . dirname ( filename )
tstamp = str ( time ( ) ) . replace ( '.' , '' )
stfilename = os_path . split ( fbname ) [ 1 ] + '_' + tstamp + fext
filename = os_path . join ( dirname , stfilename )
# check paths and colors are set
if isinstance ( paths , Path ) or is_path_segment ( paths ) :
paths = [ paths ]
if paths :
if not colors :
colors = [ _default_path_color ] * len ( paths )
else :
assert len ( colors ) == len ( paths )
if isinstance ( colors , str ) :
colors = str2colorlist ( colors , default_color = _default_path_color )
elif isinstance ( colors , list ) :
for idx , c in enumerate ( colors ) :
if is3tuple ( c ) :
colors [ idx ] = "rgb" + str ( c )
# check nodes and nodes _ colors are set ( node _ radii are set later )
if nodes :
if not node_colors :
node_colors = [ _default_node_color ] * len ( nodes )
else :
assert len ( node_colors ) == len ( nodes )
if isinstance ( node_colors , str ) :
node_colors = str2colorlist ( node_colors , default_color = _default_node_color )
elif isinstance ( node_colors , list ) :
for idx , c in enumerate ( node_colors ) :
if is3tuple ( c ) :
node_colors [ idx ] = "rgb" + str ( c )
# set up the viewBox and display dimensions of the output SVG
# along the way , set stroke _ widths and node _ radii if not provided
assert paths or nodes
stuff2bound = [ ]
if viewbox :
if not isinstance ( viewbox , str ) :
viewbox = '%s %s %s %s' % viewbox
if dimensions is None :
dimensions = viewbox . split ( ' ' ) [ 2 : 4 ]
elif dimensions :
dimensions = tuple ( map ( str , dimensions ) )
def strip_units ( s ) :
return re . search ( r'\d*\.?\d*' , s . strip ( ) ) . group ( )
viewbox = '0 0 %s %s' % tuple ( map ( strip_units , dimensions ) )
else :
if paths :
stuff2bound += paths
if nodes :
stuff2bound += nodes
if text_path :
stuff2bound += text_path
xmin , xmax , ymin , ymax = big_bounding_box ( stuff2bound )
dx = xmax - xmin
dy = ymax - ymin
if dx == 0 :
dx = 1
if dy == 0 :
dy = 1
# determine stroke _ widths to use ( if not provided ) and max _ stroke _ width
if paths :
if not stroke_widths :
sw = max ( dx , dy ) * _default_relative_stroke_width
stroke_widths = [ sw ] * len ( paths )
max_stroke_width = sw
else :
assert len ( paths ) == len ( stroke_widths )
max_stroke_width = max ( stroke_widths )
else :
max_stroke_width = 0
# determine node _ radii to use ( if not provided ) and max _ node _ diameter
if nodes :
if not node_radii :
r = max ( dx , dy ) * _default_relative_node_radius
node_radii = [ r ] * len ( nodes )
max_node_diameter = 2 * r
else :
assert len ( nodes ) == len ( node_radii )
max_node_diameter = 2 * max ( node_radii )
else :
max_node_diameter = 0
extra_space_for_style = max ( max_stroke_width , max_node_diameter )
xmin -= margin_size * dx + extra_space_for_style / 2
ymin -= margin_size * dy + extra_space_for_style / 2
dx += 2 * margin_size * dx + extra_space_for_style
dy += 2 * margin_size * dy + extra_space_for_style
viewbox = "%s %s %s %s" % ( xmin , ymin , dx , dy )
if dx > dy :
szx = str ( mindim ) + 'px'
szy = str ( int ( ceil ( mindim * dy / dx ) ) ) + 'px'
else :
szx = str ( int ( ceil ( mindim * dx / dy ) ) ) + 'px'
szy = str ( mindim ) + 'px'
dimensions = szx , szy
# Create an SVG file
if svg_attributes is not None :
dimensions = ( svg_attributes . get ( "width" , dimensions [ 0 ] ) , svg_attributes . get ( "height" , dimensions [ 1 ] ) )
debug = svg_attributes . get ( "debug" , svgwrite_debug )
dwg = Drawing ( filename = filename , size = dimensions , debug = debug , ** svg_attributes )
else :
dwg = Drawing ( filename = filename , size = dimensions , debug = svgwrite_debug , viewBox = viewbox )
# add paths
if paths :
for i , p in enumerate ( paths ) :
if isinstance ( p , Path ) :
ps = p . d ( )
elif is_path_segment ( p ) :
ps = Path ( p ) . d ( )
else : # assume this path , p , was input as a Path d - string
ps = p
if attributes :
good_attribs = { 'd' : ps }
for key in attributes [ i ] :
val = attributes [ i ] [ key ]
if key != 'd' :
try :
dwg . path ( ps , ** { key : val } )
good_attribs . update ( { key : val } )
except Exception as e :
warn ( str ( e ) )
dwg . add ( dwg . path ( ** good_attribs ) )
else :
dwg . add ( dwg . path ( ps , stroke = colors [ i ] , stroke_width = str ( stroke_widths [ i ] ) , fill = 'none' ) )
# add nodes ( filled in circles )
if nodes :
for i_pt , pt in enumerate ( [ ( z . real , z . imag ) for z in nodes ] ) :
dwg . add ( dwg . circle ( pt , node_radii [ i_pt ] , fill = node_colors [ i_pt ] ) )
# add texts
if text :
assert isinstance ( text , str ) or ( isinstance ( text , list ) and isinstance ( text_path , list ) and len ( text_path ) == len ( text ) )
if isinstance ( text , str ) :
text = [ text ]
if not font_size :
font_size = [ _default_font_size ]
if not text_path :
pos = complex ( xmin + margin_size * dx , ymin + margin_size * dy )
text_path = [ Line ( pos , pos + 1 ) . d ( ) ]
else :
if font_size :
if isinstance ( font_size , list ) :
assert len ( font_size ) == len ( text )
else :
font_size = [ font_size ] * len ( text )
else :
font_size = [ _default_font_size ] * len ( text )
for idx , s in enumerate ( text ) :
p = text_path [ idx ]
if isinstance ( p , Path ) :
ps = p . d ( )
elif is_path_segment ( p ) :
ps = Path ( p ) . d ( )
else : # assume this path , p , was input as a Path d - string
ps = p
# paragraph = dwg . add ( dwg . g ( font _ size = font _ size [ idx ] ) )
# paragraph . add ( dwg . textPath ( ps , s ) )
pathid = 'tp' + str ( idx )
dwg . defs . add ( dwg . path ( d = ps , id = pathid ) )
txter = dwg . add ( dwg . text ( '' , font_size = font_size [ idx ] ) )
txter . add ( txt . TextPath ( '#' + pathid , s ) )
if paths2Drawing :
return dwg
# save svg
if not os_path . exists ( os_path . dirname ( filename ) ) :
makedirs ( os_path . dirname ( filename ) )
dwg . save ( )
# re - open the svg , make the xml pretty , and save it again
xmlstring = md_xml_parse ( filename ) . toprettyxml ( )
with open ( filename , 'w' ) as f :
f . write ( xmlstring )
# try to open in web browser
if openinbrowser :
try :
open_in_browser ( filename )
except :
print ( "Failed to open output SVG in browser. SVG saved to:" )
print ( filename )
|
def add_page ( self , pattern , classname ) :
"""Add a new page to the web application . Only available after that the Plugin Manager is loaded"""
|
if not self . _loaded :
raise PluginManagerNotLoadedException ( )
self . _app . add_mapping ( pattern , classname )
|
def get_all_group_policies ( self , group_name , marker = None , max_items = None ) :
"""List the names of the policies associated with the specified group .
: type group _ name : string
: param group _ name : The name of the group the policy is associated with .
: type marker : string
: param marker : Use this only when paginating results and only in
follow - up request after you ' ve received a response
where the results are truncated . Set this to the
value of the Marker element in the response you
just received .
: type max _ items : int
: param max _ items : Use this only when paginating results to indicate
the maximum number of groups you want in the
response ."""
|
params = { 'GroupName' : group_name }
if marker :
params [ 'Marker' ] = marker
if max_items :
params [ 'MaxItems' ] = max_items
return self . get_response ( 'ListGroupPolicies' , params , list_marker = 'PolicyNames' )
|
def esi_client_factory ( token = None , datasource = None , spec_file = None , version = None , ** kwargs ) :
"""Generates an ESI client .
: param token : : class : ` esi . Token ` used to access authenticated endpoints .
: param datasource : Name of the ESI datasource to access .
: param spec _ file : Absolute path to a swagger spec file to load .
: param version : Base ESI API version . Accepted values are ' legacy ' , ' latest ' , ' dev ' , or ' vX ' where X is a number .
: param kwargs : Explicit resource versions to build , in the form Character = ' v4 ' . Same values accepted as version .
: return : : class : ` bravado . client . SwaggerClient `
If a spec _ file is specified , specific versioning is not available . Meaning the version and resource version kwargs
are ignored in favour of the versions available in the spec _ file ."""
|
client = requests_client . RequestsClient ( )
if token or datasource :
client . authenticator = TokenAuthenticator ( token = token , datasource = datasource )
api_version = version or app_settings . ESI_API_VERSION
if spec_file :
return read_spec ( spec_file , http_client = client )
else :
spec = build_spec ( api_version , http_client = client , ** kwargs )
return SwaggerClient ( spec )
|
def upload_file ( self , url , file , callback = None , extra_headers = { } ) :
"""Uploads a file to W & B with failure resumption
Args :
url ( str ) : The url to download
file ( str ) : The path to the file you want to upload
callback ( : obj : ` func ` , optional ) : A callback which is passed the number of
bytes uploaded since the last time it was called , used to report progress
Returns :
The requests library response object"""
|
extra_headers = extra_headers . copy ( )
response = None
if os . stat ( file . name ) . st_size == 0 :
raise CommError ( "%s is an empty file" % file . name )
try :
progress = Progress ( file , callback = callback )
response = requests . put ( url , data = progress , headers = extra_headers )
response . raise_for_status ( )
except requests . exceptions . RequestException as e :
total = progress . len
status = self . _status_request ( url , total )
# TODO ( adrian ) : there ' s probably even more stuff we should add here
# like if we ' re offline , we should retry then too
if status . status_code in ( 308 , 408 , 500 , 502 , 503 , 504 ) :
util . sentry_reraise ( retry . TransientException ( exc = e ) )
else :
util . sentry_reraise ( e )
return response
|
def _namify_arguments ( mapping ) :
"""Ensure that a mapping of names to parameters has the parameters set to the
correct name ."""
|
result = [ ]
for name , parameter in mapping . iteritems ( ) :
parameter . name = name
result . append ( parameter )
return result
|
def _create_controls ( self , can_kill ) :
"""Creates the button controls , and links them to event handlers"""
|
DEBUG_MSG ( "_create_controls()" , 1 , self )
# Need the following line as Windows toolbars default to 15x16
self . SetToolBitmapSize ( wx . Size ( 16 , 16 ) )
self . AddSimpleTool ( _NTB_X_PAN_LEFT , _load_bitmap ( 'stock_left.xpm' ) , 'Left' , 'Scroll left' )
self . AddSimpleTool ( _NTB_X_PAN_RIGHT , _load_bitmap ( 'stock_right.xpm' ) , 'Right' , 'Scroll right' )
self . AddSimpleTool ( _NTB_X_ZOOMIN , _load_bitmap ( 'stock_zoom-in.xpm' ) , 'Zoom in' , 'Increase X axis magnification' )
self . AddSimpleTool ( _NTB_X_ZOOMOUT , _load_bitmap ( 'stock_zoom-out.xpm' ) , 'Zoom out' , 'Decrease X axis magnification' )
self . AddSeparator ( )
self . AddSimpleTool ( _NTB_Y_PAN_UP , _load_bitmap ( 'stock_up.xpm' ) , 'Up' , 'Scroll up' )
self . AddSimpleTool ( _NTB_Y_PAN_DOWN , _load_bitmap ( 'stock_down.xpm' ) , 'Down' , 'Scroll down' )
self . AddSimpleTool ( _NTB_Y_ZOOMIN , _load_bitmap ( 'stock_zoom-in.xpm' ) , 'Zoom in' , 'Increase Y axis magnification' )
self . AddSimpleTool ( _NTB_Y_ZOOMOUT , _load_bitmap ( 'stock_zoom-out.xpm' ) , 'Zoom out' , 'Decrease Y axis magnification' )
self . AddSeparator ( )
self . AddSimpleTool ( _NTB_SAVE , _load_bitmap ( 'stock_save_as.xpm' ) , 'Save' , 'Save plot contents as images' )
self . AddSeparator ( )
bind ( self , wx . EVT_TOOL , self . _onLeftScroll , id = _NTB_X_PAN_LEFT )
bind ( self , wx . EVT_TOOL , self . _onRightScroll , id = _NTB_X_PAN_RIGHT )
bind ( self , wx . EVT_TOOL , self . _onXZoomIn , id = _NTB_X_ZOOMIN )
bind ( self , wx . EVT_TOOL , self . _onXZoomOut , id = _NTB_X_ZOOMOUT )
bind ( self , wx . EVT_TOOL , self . _onUpScroll , id = _NTB_Y_PAN_UP )
bind ( self , wx . EVT_TOOL , self . _onDownScroll , id = _NTB_Y_PAN_DOWN )
bind ( self , wx . EVT_TOOL , self . _onYZoomIn , id = _NTB_Y_ZOOMIN )
bind ( self , wx . EVT_TOOL , self . _onYZoomOut , id = _NTB_Y_ZOOMOUT )
bind ( self , wx . EVT_TOOL , self . _onSave , id = _NTB_SAVE )
bind ( self , wx . EVT_TOOL_ENTER , self . _onEnterTool , id = self . GetId ( ) )
if can_kill :
bind ( self , wx . EVT_TOOL , self . _onClose , id = _NTB_CLOSE )
bind ( self , wx . EVT_MOUSEWHEEL , self . _onMouseWheel )
|
def show_busy ( self ) :
"""Hide the question group box and enable the busy cursor ."""
|
self . progress_bar . show ( )
self . question_group . setEnabled ( False )
self . question_group . setVisible ( False )
enable_busy_cursor ( )
self . repaint ( )
qApp . processEvents ( )
self . busy = True
|
def get_template_names ( self ) :
"""Return the page ' s specified template name , or a fallback if one hasn ' t been chosen ."""
|
posted_name = self . request . POST . get ( 'template_name' )
if posted_name :
return [ posted_name , ]
else :
return super ( PagePreviewView , self ) . get_template_names ( )
|
def _long_to_bytes ( n , length , byteorder ) :
"""Convert a long to a bytestring
For use in python version prior to 3.2
Source :
http : / / bugs . python . org / issue16580 # msg177208"""
|
if byteorder == 'little' :
indexes = range ( length )
else :
indexes = reversed ( range ( length ) )
return bytearray ( ( n >> i * 8 ) & 0xff for i in indexes )
|
def post ( self , resource , data = None , json = None ) :
"""Sends a POST request
Returns :
RTMResponse"""
|
return self . do ( resource , 'POST' , data = data , json = json )
|
def get_dict ( cls ) :
"""Return dictionary with conspect / subconspect info ."""
|
mdt = cls . get ( )
if not mdt :
return { }
return conspectus . subs_by_mdt . get ( mdt , { } )
|
def router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
router = ET . SubElement ( config , "router" , xmlns = "urn:brocade.com:mgmt:brocade-common-def" )
fabric_virtual_gateway = ET . SubElement ( router , "fabric-virtual-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-anycast-gateway" )
address_family = ET . SubElement ( fabric_virtual_gateway , "address-family" )
ipv6 = ET . SubElement ( address_family , "ipv6" )
gateway_mac_address = ET . SubElement ( ipv6 , "gateway-mac-address" )
gateway_mac_address . text = kwargs . pop ( 'gateway_mac_address' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def partition_range ( stop , annotations = None ) :
"""Partition the range from 0 to ` stop ` based on annotations .
> > > partition _ range ( 50 , annotations = [ [ ( 0 , 21 ) , ( 30 , 35 ) ] ,
. . . [ ( 15 , 32 ) , ( 40 , 46 ) ] ] )
[ ( 0 , 15 , { 0 } ) ,
(15 , 21 , { 0 , 1 } ) ,
(21 , 30 , { 1 } ) ,
(30 , 32 , { 0 , 1 } ) ,
(32 , 35 , { 0 } ) ,
(35 , 40 , set ( ) ) ,
(40 , 46 , { 1 } ) ,
(46 , 50 , set ( ) ) ]
: arg stop : End point ( not included ) of the range ( similar to the ` stop `
argument of the built - in : func : ` range ` function ) .
: type stop : int
: arg annotations : For each annotation level , a list of ( ` start ` , ` stop ` )
pairs defining an annotated region .
: type annotations : list
: return : Partitioning of the range as ( ` start ` , ` stop ` , ` levels ` ) tuples
defining a region with a set of annotation levels .
: rtype : list
All regions ( ` start ` , ` stop ` ) are defined as in slicing notation , so
zero - based and ` stop ` is not included .
The ` annotations ` argument is a list of annotations . An annotation is a
list of regions as ( ` start ` , ` stop ` ) tuples . The level of each annotation
is its index in ` annotations ` .
Annotation regions can overlap ( overlap within one level is ignored ) and
do not need to be sorted ."""
|
annotations = annotations or [ ]
partitioning = [ ]
part_start , part_levels = 0 , None
# We loop over the range , only touching positions where levels potentially
# change .
for p in sorted ( set ( itertools . chain ( [ 0 , stop ] , * itertools . chain ( * annotations ) ) ) ) :
if p == stop :
partitioning . append ( ( part_start , p , part_levels ) )
break
# Annotation levels for position p .
levels = { level for level , regions in enumerate ( annotations ) if any ( x <= p < y for x , y in regions ) }
if p == 0 :
part_levels = levels
continue
if levels != part_levels :
partitioning . append ( ( part_start , p , part_levels ) )
part_start , part_levels = p , levels
return partitioning
|
def insert ( self , collection , doc , callback = None ) :
"""Insert an item into a collection
Arguments :
collection - the collection to be modified
doc - The document to insert . May not yet have an _ id attribute ,
in which case Meteor will generate one for you .
Keyword Arguments :
callback - Optional . If present , called with an error object as the first argument and ,
if no error , the _ id as the second ."""
|
self . call ( "/" + collection + "/insert" , [ doc ] , callback = callback )
|
def wait_for_ilo_after_reset ( ilo_object ) :
"""Continuously polls for iLO to come up after reset ."""
|
is_ilo_up_after_reset = lambda : ilo_object . get_product_name ( ) is not None
is_ilo_up_after_reset . __name__ = 'is_ilo_up_after_reset'
wait_for_operation_to_complete ( is_ilo_up_after_reset , failover_exc = exception . IloConnectionError , failover_msg = 'iLO is not up after reset.' )
|
def setOrga ( request , hproPk = None ) :
"""Change the current orga"""
|
if settings . PIAPI_STANDALONE :
request . session [ 'plugit-standalone-organame' ] = request . GET . get ( 'name' )
request . session [ 'plugit-standalone-orgapk' ] = request . GET . get ( 'pk' )
else :
( _ , _ , hproject ) = getPlugItObject ( hproPk )
from organizations . models import Organization
orga = get_object_or_404 ( Organization , pk = request . GET . get ( 'orga' ) )
if request . user . is_superuser or orga . isMember ( request . user ) or orga . isOwner ( request . user ) :
request . session [ 'plugit-orgapk-' + str ( hproject . pk ) ] = orga . pk
return HttpResponse ( '' )
|
def _resolve_dotted_name ( dotted_name ) :
"""Returns objects from strings
Deals e . g . with ' torch . nn . Softmax ( dim = - 1 ) ' .
Modified from palladium :
https : / / github . com / ottogroup / palladium / blob / 8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3 / palladium / util . py
with added support for instantiated objects ."""
|
if not isinstance ( dotted_name , str ) :
return dotted_name
if '.' not in dotted_name :
return dotted_name
args = None
params = None
match = P_PARAMS . match ( dotted_name )
if match :
dotted_name = match . group ( 'name' )
params = match . group ( 'params' )
module , name = dotted_name . rsplit ( '.' , 1 )
attr = import_module ( module )
attr = getattr ( attr , name )
if params :
args , kwargs = _parse_args_kwargs ( params [ 1 : - 1 ] )
attr = attr ( * args , ** kwargs )
return attr
|
def mean ( data ) :
"""Return the sample arithmetic mean of data .
If ` ` data ` ` is empty , StatisticsError will be raised ."""
|
if iter ( data ) is data :
data = list ( data )
n = len ( data )
if n < 1 :
raise StatisticsError ( 'mean requires at least one data point' )
return sum ( data ) / n
|
def vxvyvz_to_vrpmllpmbb ( vx , vy , vz , l , b , d , XYZ = False , degree = False ) :
"""NAME :
vxvyvz _ to _ vrpmllpmbb
PURPOSE :
Transform velocities in the rectangular Galactic coordinate frame to the spherical Galactic coordinate frame ( can take vector inputs )
INPUT :
vx - velocity towards the Galactic Center ( km / s )
vy - velocity in the direction of Galactic rotation ( km / s )
vz - velocity towards the North Galactic Pole ( km / s )
l - Galactic longitude
b - Galactic lattitude
d - distance ( kpc )
XYZ - ( bool ) If True , then l , b , d is actually X , Y , Z ( rectangular Galactic coordinates )
degree - ( bool ) if True , l and b are in degrees
OUTPUT :
( vr , pmll x cos ( b ) , pmbb ) in ( km / s , mas / yr , mas / yr ) ; pmll = mu _ l * cos ( b )
For vector inputs [ : , 3]
HISTORY :
2009-10-24 - Written - Bovy ( NYU )
2014-06-14 - Re - written w / numpy functions for speed and w / decorators for beauty - Bovy ( IAS )"""
|
# Whether to use degrees and scalar input is handled by decorators
if XYZ : # undo the incorrect conversion that the decorator did
if degree :
l *= 180. / nu . pi
b *= 180. / nu . pi
lbd = XYZ_to_lbd ( l , b , d , degree = False )
l = lbd [ : , 0 ]
b = lbd [ : , 1 ]
d = lbd [ : , 2 ]
R = nu . zeros ( ( 3 , 3 , len ( l ) ) )
R [ 0 , 0 ] = nu . cos ( l ) * nu . cos ( b )
R [ 0 , 1 ] = - nu . sin ( l )
R [ 0 , 2 ] = - nu . cos ( l ) * nu . sin ( b )
R [ 1 , 0 ] = nu . sin ( l ) * nu . cos ( b )
R [ 1 , 1 ] = nu . cos ( l )
R [ 1 , 2 ] = - nu . sin ( l ) * nu . sin ( b )
R [ 2 , 0 ] = nu . sin ( b )
R [ 2 , 2 ] = nu . cos ( b )
invxyz = nu . array ( [ [ vx , vx , vx ] , [ vy , vy , vy ] , [ vz , vz , vz ] ] )
vrvlvb = ( R . T * invxyz . T ) . sum ( - 1 )
vrvlvb [ : , 1 ] /= d * _K
vrvlvb [ : , 2 ] /= d * _K
return vrvlvb
|
def get_objectives ( self ) :
"""Gets all ` ` Objectives ` ` .
In plenary mode , the returned list contains all known objectives
or an error results . Otherwise , the returned list may contain
only those objectives that are accessible through this session .
return : ( osid . learning . ObjectiveList ) - an ` ` ObjectiveList ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'learning' , collection = 'Objective' , runtime = self . _runtime )
result = collection . find ( self . _view_filter ( ) ) . sort ( '_id' , DESCENDING )
return objects . ObjectiveList ( result , runtime = self . _runtime , proxy = self . _proxy )
|
def run_notebook ( notebook_path ) :
"""Execute a notebook via nbconvert and collect output .
: returns ( parsed nb object , execution errors )"""
|
dirname , __ = os . path . split ( notebook_path )
os . chdir ( dirname )
with tempfile . NamedTemporaryFile ( suffix = ".ipynb" ) as fout :
args = [ "jupyter-nbconvert" , "--to" , "notebook" , "--execute" , "--allow-errors" , "--ExecutePreprocessor.timeout=300" , "--output" , fout . name , notebook_path ]
try :
subprocess . check_call ( args )
except subprocess . CalledProcessError as e :
if e . returncode == 1 : # print the message and ignore error with code 1 as this indicates there were errors in the notebook
print ( e . output )
pass
else : # all other codes indicate some other problem , rethrow
raise
fout . seek ( 0 )
nb = nbformat . read ( fout , nbformat . current_nbformat )
errors = [ output for cell in nb . cells if "outputs" in cell for output in cell [ "outputs" ] if output . output_type == "error" ]
return nb , errors
|
def __read_graph ( self , network_filename ) :
"""Read . ncol network file
: param network _ filename : complete path for the . ncol file
: return : an undirected network"""
|
self . g = nx . read_edgelist ( network_filename , nodetype = int )
|
def update_dataset ( dataset_id , name , data_type , val , unit_id , metadata = { } , flush = True , ** kwargs ) :
"""Update an existing dataset"""
|
if dataset_id is None :
raise HydraError ( "Dataset must have an ID to be updated." )
user_id = kwargs . get ( 'user_id' )
dataset = db . DBSession . query ( Dataset ) . filter ( Dataset . id == dataset_id ) . one ( )
# This dataset been seen before , so it may be attached
# to other scenarios , which may be locked . If they are locked , we must
# not change their data , so new data must be created for the unlocked scenarios
locked_scenarios = [ ]
unlocked_scenarios = [ ]
for dataset_rs in dataset . resourcescenarios :
if dataset_rs . scenario . locked == 'Y' :
locked_scenarios . append ( dataset_rs )
else :
unlocked_scenarios . append ( dataset_rs )
# Are any of these scenarios locked ?
if len ( locked_scenarios ) > 0 : # If so , create a new dataset and assign to all unlocked datasets .
dataset = add_dataset ( data_type , val , unit_id , metadata = metadata , name = name , user_id = kwargs [ 'user_id' ] )
for unlocked_rs in unlocked_scenarios :
unlocked_rs . dataset = dataset
else :
dataset . type = data_type
dataset . value = val
dataset . set_metadata ( metadata )
dataset . unit_id = unit_id
dataset . name = name
dataset . created_by = kwargs [ 'user_id' ]
dataset . hash = dataset . set_hash ( )
# Is there a dataset in the DB already which is identical to the updated dataset ?
existing_dataset = db . DBSession . query ( Dataset ) . filter ( Dataset . hash == dataset . hash , Dataset . id != dataset . id ) . first ( )
if existing_dataset is not None and existing_dataset . check_user ( user_id ) :
log . warning ( "An identical dataset %s has been found to dataset %s." " Deleting dataset and returning dataset %s" , existing_dataset . id , dataset . id , existing_dataset . id )
db . DBSession . delete ( dataset )
dataset = existing_dataset
if flush == True :
db . DBSession . flush ( )
return dataset
|
def get_value ( self , node ) :
"""Convert value from an AST node ."""
|
if not isinstance ( node , ast . Dict ) :
raise TypeError ( "must be a dictionary" )
evaluator = SafeEvaluator ( )
try :
value = evaluator . run ( node )
except Exception as ex : # TODO : Handle errors .
raise ex
try : # Ensure value is a serializable dictionary .
value = json . loads ( json . dumps ( value ) )
if not isinstance ( value , dict ) :
raise TypeError
except ( TypeError , ValueError ) :
raise TypeError ( "must be serializable" )
return value
|
def sweNextTransit ( obj , jd , lat , lon , flag ) :
"""Returns the julian date of the next transit of
an object . The flag should be ' RISE ' or ' SET ' ."""
|
sweObj = SWE_OBJECTS [ obj ]
flag = swisseph . CALC_RISE if flag == 'RISE' else swisseph . CALC_SET
trans = swisseph . rise_trans ( jd , sweObj , lon , lat , 0 , 0 , 0 , flag )
return trans [ 1 ] [ 0 ]
|
def convert_image_to_rgb_mode ( image , fill_color = ( 255 , 255 , 255 ) ) :
"""Convert the specified image instance to RGB mode .
@ param image : a Python Library Image ( PIL ) instance to convert its
pixel format to RGB , discarding the alpha channel .
@ param fill _ color : color to be used to fill transparent pixels when
discaring the alpha channel . By default , the white color .
@ return : a Python Library Image instance with pixel format of RGB ."""
|
if image . mode not in ( 'RGBA' , 'LA' ) :
return image
# In most cases simply discarding the alpha channel will give
# undesirable result , because transparent pixels also have some
# unpredictable colors . It is much better to fill transparent pixels
# with a specified color .
background_image = Image . new ( image . mode [ : - 1 ] , image . size , fill_color )
background_image . paste ( image , image . split ( ) [ - 1 ] )
return background_image
|
def post_unpack_merkleblock ( d , f ) :
"""A post - processing " post _ unpack " to merkleblock messages .
It validates the merkle proofs ( throwing an exception if there ' s
an error ) , and returns the list of transaction hashes in " tx _ hashes " .
The transactions are supposed to be sent immediately after the merkleblock message ."""
|
level_widths = [ ]
count = d [ "total_transactions" ]
while count > 1 :
level_widths . append ( count )
count += 1
count //= 2
level_widths . append ( 1 )
level_widths . reverse ( )
tx_acc = [ ]
flags = d [ "flags" ]
hashes = list ( reversed ( d [ "hashes" ] ) )
left_hash , flag_index = _recurse ( level_widths , 0 , 0 , hashes , flags , 0 , tx_acc )
if len ( hashes ) > 0 :
raise ValueError ( "extra hashes: %s" % hashes )
idx , r = divmod ( flag_index - 1 , 8 )
if idx != len ( flags ) - 1 :
raise ValueError ( "not enough flags consumed" )
if flags [ idx ] > ( 1 << ( r + 1 ) ) - 1 :
raise ValueError ( "unconsumed 1 flag bits set" )
if left_hash != d [ "header" ] . merkle_root :
raise ValueError ( "merkle root %s does not match calculated hash %s" % ( b2h_rev ( d [ "header" ] . merkle_root ) , b2h_rev ( left_hash ) ) )
d [ "tx_hashes" ] = tx_acc
return d
|
def local_service ( self , name_or_id ) :
"""Get the locally synced information for a service .
This method is safe to call outside of the background event loop
without any race condition . Internally it uses a thread - safe mutex to
protect the local copies of supervisor data and ensure that it cannot
change while this method is iterating over it .
Args :
name _ or _ id ( string or int ) : Either a short name for the service or
a numeric id .
Returns :
ServiceState : the current state of the service synced locally
at the time of the call ."""
|
if not self . _loop . inside_loop ( ) :
self . _state_lock . acquire ( )
try :
if isinstance ( name_or_id , int ) :
if name_or_id not in self . _name_map :
raise ArgumentError ( "Unknown ID used to look up service" , id = name_or_id )
name = self . _name_map [ name_or_id ]
else :
name = name_or_id
if name not in self . services :
raise ArgumentError ( "Unknown service name" , name = name )
return copy ( self . services [ name ] )
finally :
if not self . _loop . inside_loop ( ) :
self . _state_lock . release ( )
|
def select_army ( self , shift ) :
"""Select the entire army ."""
|
action = sc_pb . Action ( )
action . action_ui . select_army . selection_add = shift
return action
|
def __get_resource_entry_data ( self , bundleId , languageId , resourceKey , fallback = False ) :
"""` ` GET / { serviceInstanceId } / v2 / bundles / { bundleId } / { languageId }
/ { resourceKey } ` `
Gets the resource entry information ."""
|
url = self . __get_base_bundle_url ( ) + '/' + bundleId + '/' + languageId + '/' + resourceKey
params = { 'fallback' : 'true' } if fallback else None
response = self . __perform_rest_call ( requestURL = url , params = params )
if not response :
return None
resourceEntryData = response . get ( self . __RESPONSE_RESOURCE_ENTRY_KEY )
return resourceEntryData
|
def _get_logger ( self ) :
"""Get the appropriate logger
Prevents uninitialized servers in write - only mode from failing ."""
|
if self . logger :
return self . logger
elif self . server :
return self . server . logger
else :
return default_logger
|
def verify ( full , dataset_uri ) :
"""Verify the integrity of a dataset ."""
|
dataset = dtoolcore . DataSet . from_uri ( dataset_uri )
all_okay = True
generated_manifest = dataset . generate_manifest ( )
generated_identifiers = set ( generated_manifest [ "items" ] . keys ( ) )
manifest_identifiers = set ( dataset . identifiers )
for i in generated_identifiers . difference ( manifest_identifiers ) :
message = "Unknown item: {} {}" . format ( i , generated_manifest [ "items" ] [ i ] [ "relpath" ] )
click . secho ( message , fg = "red" )
all_okay = False
for i in manifest_identifiers . difference ( generated_identifiers ) :
message = "Missing item: {} {}" . format ( i , dataset . item_properties ( i ) [ "relpath" ] )
click . secho ( message , fg = "red" )
all_okay = False
for i in manifest_identifiers . intersection ( generated_identifiers ) :
generated_hash = generated_manifest [ "items" ] [ i ] [ "size_in_bytes" ]
manifest_hash = dataset . item_properties ( i ) [ "size_in_bytes" ]
if generated_hash != manifest_hash :
message = "Altered item size: {} {}" . format ( i , dataset . item_properties ( i ) [ "relpath" ] )
click . secho ( message , fg = "red" )
all_okay = False
if full :
for i in manifest_identifiers . intersection ( generated_identifiers ) :
generated_hash = generated_manifest [ "items" ] [ i ] [ "hash" ]
manifest_hash = dataset . item_properties ( i ) [ "hash" ]
if generated_hash != manifest_hash :
message = "Altered item hash: {} {}" . format ( i , dataset . item_properties ( i ) [ "relpath" ] )
click . secho ( message , fg = "red" )
all_okay = False
if not all_okay :
sys . exit ( 1 )
else :
click . secho ( "All good :)" , fg = "green" )
|
def dot_eth_label ( name ) :
"""Convert from a name , like ' ethfinex . eth ' , to a label , like ' ethfinex '
If name is already a label , this should be a noop , except for converting to a string
and validating the name syntax ."""
|
label = name_to_label ( name , registrar = 'eth' )
if len ( label ) < MIN_ETH_LABEL_LENGTH :
raise InvalidLabel ( 'name %r is too short' % label )
else :
return label
|
def p_recipe ( self , t ) :
"""recipe : RECIPE _ LINE
| RECIPE _ LINE recipe"""
|
if len ( t ) == 3 :
t [ 0 ] = t [ 1 ] + t [ 2 ]
else :
t [ 0 ] = t [ 1 ]
|
def add_edge ( self , u , v , ** attr ) :
"""Add an edge from u to v and update edge attributes"""
|
if u not in self . vertices :
self . vertices [ u ] = [ ]
self . pred [ u ] = [ ]
self . succ [ u ] = [ ]
if v not in self . vertices :
self . vertices [ v ] = [ ]
self . pred [ v ] = [ ]
self . succ [ v ] = [ ]
vertex = ( u , v )
self . edges [ vertex ] = { }
self . edges [ vertex ] . update ( attr )
self . vertices [ u ] . append ( v )
self . pred [ v ] . append ( u )
self . succ [ u ] . append ( v )
|
def buy_item ( self , item_name , abbr ) :
url = 'https://www.duolingo.com/2017-06-30/users/{}/purchase-store-item'
url = url . format ( self . user_data . id )
data = { 'name' : item_name , 'learningLanguage' : abbr }
request = self . _make_req ( url , data )
"""status code ' 200 ' indicates that the item was purchased
returns a text like : { " streak _ freeze " : " 2017-01-10 02:39:59.594327 " }"""
|
if request . status_code == 400 and request . json ( ) [ 'error' ] == 'ALREADY_HAVE_STORE_ITEM' :
raise AlreadyHaveStoreItemException ( 'Already equipped with ' + item_name + '.' )
if not request . ok : # any other error :
raise Exception ( 'Not possible to buy item.' )
|
def _get_mps_od_net ( input_image_shape , batch_size , output_size , anchors , config , weights = { } ) :
"""Initializes an MpsGraphAPI for object detection ."""
|
network = _MpsGraphAPI ( network_id = _MpsGraphNetworkType . kODGraphNet )
c_in , h_in , w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
c_view = c_in
h_view = h_in
w_view = w_in
network . init ( batch_size , c_in , h_in , w_in , c_out , h_out , w_out , weights = weights , config = config )
return network
|
def get_summary ( url , spk = True ) :
'''simple function to retrieve the header of a BSP file and return SPK object'''
|
# connect to file at URL
bspurl = urllib2 . urlopen ( url )
# retrieve the " tip " of a file at URL
bsptip = bspurl . read ( 10 ** 5 )
# first 100kB
# save data in fake file object ( in - memory )
bspstr = StringIO ( bsptip )
# load into DAF object
daf = DAF ( bspstr )
# return either SPK or DAF object
if spk : # make a SPK object
spk = SPK ( daf )
# return representation
return spk
else : # return representation
return daf
|
def direct_normal_radiation ( self , value = 9999.0 ) :
"""Corresponds to IDD Field ` direct _ normal _ radiation `
Args :
value ( float ) : value for IDD Field ` direct _ normal _ radiation `
Unit : Wh / m2
value > = 0.0
Missing value : 9999.0
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value"""
|
if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `direct_normal_radiation`' . format ( value ) )
if value < 0.0 :
raise ValueError ( 'value need to be greater or equal 0.0 ' 'for field `direct_normal_radiation`' )
self . _direct_normal_radiation = value
|
def copy ( self ) :
"""Returns a copy of Markov Chain Model .
Return Type :
MarkovChain : Copy of MarkovChain .
Examples :
> > > from pgmpy . models import MarkovChain
> > > from pgmpy . factors . discrete import State
> > > model = MarkovChain ( )
> > > model . add _ variables _ from ( [ ' intel ' , ' diff ' ] , [ 3 , 2 ] )
> > > intel _ tm = { 0 : { 0 : 0.2 , 1 : 0.4 , 2:0.4 } , 1 : { 0 : 0 , 1 : 0.5 , 2 : 0.5 } , 2 : { 0 : 0.3 , 1 : 0.3 , 2 : 0.4 } }
> > > model . add _ transition _ model ( ' intel ' , intel _ tm )
> > > diff _ tm = { 0 : { 0 : 0.5 , 1 : 0.5 } , 1 : { 0 : 0.25 , 1:0.75 } }
> > > model . add _ transition _ model ( ' diff ' , diff _ tm )
> > > model . set _ start _ state ( [ State ( ' intel ' , 0 ) , State ( ' diff ' , 2 ) ] )
> > > model _ copy = model . copy ( )
> > > model _ copy . transition _ models
> > > { ' diff ' : { 0 : { 0 : 0.1 , 1 : 0.5 , 2 : 0.4 } , 1 : { 0 : 0.2 , 1 : 0.2 , 2 : 0.6 } , 2 : { 0 : 0.7 , 1 : 0.15 , 2 : 0.15 } } ,
' intel ' : { 0 : { 0 : 0.25 , 1 : 0.75 } , 1 : { 0 : 0.5 , 1 : 0.5 } } }"""
|
markovchain_copy = MarkovChain ( variables = list ( self . cardinalities . keys ( ) ) , card = list ( self . cardinalities . values ( ) ) , start_state = self . state )
if self . transition_models :
markovchain_copy . transition_models = self . transition_models . copy ( )
return markovchain_copy
|
def kde ( self , term , bandwidth = 2000 , samples = 1000 , kernel = 'gaussian' ) :
"""Estimate the kernel density of the instances of term in the text .
Args :
term ( str ) : A stemmed term .
bandwidth ( int ) : The kernel bandwidth .
samples ( int ) : The number of evenly - spaced sample points .
kernel ( str ) : The kernel function .
Returns :
np . array : The density estimate ."""
|
# Get the offsets of the term instances .
terms = np . array ( self . terms [ term ] ) [ : , np . newaxis ]
# Fit the density estimator on the terms .
kde = KernelDensity ( kernel = kernel , bandwidth = bandwidth ) . fit ( terms )
# Score an evely - spaced array of samples .
x_axis = np . linspace ( 0 , len ( self . tokens ) , samples ) [ : , np . newaxis ]
scores = kde . score_samples ( x_axis )
# Scale the scores to integrate to 1.
return np . exp ( scores ) * ( len ( self . tokens ) / samples )
|
def check_collision_state ( cls , collision_state , history_id_key , history_id , block_id , checked_ops , affected_opcodes ) :
"""Given a history ID , see if it already exists
at the given block ID ( i . e . it ' s not expired ) ,
using the given collision state .
Return True if so ; False if not .
If there is a collision , set the _ _ collided _ _ field in each checked _ ops that
has a matching history _ id value and has an opcode in affected _ opcodes ."""
|
# seen before in this block ?
if collision_state . has_key ( block_id ) :
if collision_state [ block_id ] . has_key ( history_id_key ) :
if history_id in collision_state [ block_id ] [ history_id_key ] :
rc = True
else :
collision_state [ block_id ] [ history_id_key ] . append ( history_id )
rc = False
else :
collision_state [ block_id ] [ history_id_key ] = [ history_id ]
rc = False
else :
collision_state [ block_id ] = { history_id_key : [ history_id ] }
rc = False
if not rc : # no collision
return rc
# find and mark collided operations
for prev_op in checked_ops :
prev_opcode = op_get_opcode_name ( prev_op [ 'op' ] )
if prev_opcode not in affected_opcodes : # not affected
continue
if history_id_key not in prev_op : # won ' t match
continue
if prev_op [ history_id_key ] == history_id : # collision
cls . nameop_set_collided ( prev_op , history_id_key , history_id )
return True
|
def unit_id ( self , unit_id = None ) :
"""Get or set unit ID field
: param unit _ id : unit ID ( 0 to 255 ) or None for get value
: type unit _ id : int or None
: returns : unit ID or None if set fail
: rtype : int or None"""
|
if unit_id is None :
return self . __unit_id
if 0 <= int ( unit_id ) < 256 :
self . __unit_id = int ( unit_id )
return self . __unit_id
else :
return None
|
def copy ( self ) :
"""Create a flat copy of the dict ."""
|
missing = object ( )
result = object . __new__ ( self . __class__ )
for name in self . __slots__ :
val = getattr ( self , name , missing )
if val is not missing :
setattr ( result , name , val )
return result
|
def read_xml ( self , file_configuration ) :
"""parses C + + code , defined on the file _ configurations and returns
GCCXML generated file content"""
|
xml_file_path = None
delete_xml_file = True
fc = file_configuration
reader = source_reader . source_reader_t ( self . __config , None , self . __decl_factory )
try :
if fc . content_type == fc . CONTENT_TYPE . STANDARD_SOURCE_FILE :
self . logger . info ( 'Parsing source file "%s" ... ' , fc . data )
xml_file_path = reader . create_xml_file ( fc . data )
elif fc . content_type == file_configuration_t . CONTENT_TYPE . GCCXML_GENERATED_FILE :
self . logger . info ( 'Parsing xml file "%s" ... ' , fc . data )
xml_file_path = fc . data
delete_xml_file = False
elif fc . content_type == fc . CONTENT_TYPE . CACHED_SOURCE_FILE : # TODO : raise error when header file does not exist
if not os . path . exists ( fc . cached_source_file ) :
dir_ = os . path . split ( fc . cached_source_file ) [ 0 ]
if dir_ and not os . path . exists ( dir_ ) :
os . makedirs ( dir_ )
self . logger . info ( 'Creating xml file "%s" from source file "%s" ... ' , fc . cached_source_file , fc . data )
xml_file_path = reader . create_xml_file ( fc . data , fc . cached_source_file )
else :
xml_file_path = fc . cached_source_file
else :
xml_file_path = reader . create_xml_file_from_string ( fc . data )
with open ( xml_file_path , "r" ) as xml_file :
xml = xml_file . read ( )
utils . remove_file_no_raise ( xml_file_path , self . __config )
self . __xml_generator_from_xml_file = reader . xml_generator_from_xml_file
return xml
finally :
if xml_file_path and delete_xml_file :
utils . remove_file_no_raise ( xml_file_path , self . __config )
|
def get_consul_configuration_from_environment ( ) -> ConsulConfiguration :
"""Gets configuration on how to connect to Consul from the environment .
: return : the configuration derived from the environment
: raises KeyError : if a required environment variable has not been set"""
|
address = os . environ [ CONSUL_ADDRESS_ENVIRONMENT_VARIABLE ]
if "://" in address :
raise EnvironmentError ( f"Invalid host: {address}. Do not specify scheme in host - set that in " f"{CONSUL_SCHEME_ENVIRONMENT_VARIABLE}" )
host_port_split = address . split ( ":" )
host = host_port_split [ 0 ]
if len ( host_port_split ) == 1 :
_get_logger ( ) . info ( f"No port specified in address read from the environment - using default port: {DEFAULT_CONSUL_PORT}" )
port = DEFAULT_CONSUL_PORT
else :
port = host_port_split [ 1 ]
return ConsulConfiguration ( host = host , port = port , token = os . environ . get ( CONSUL_TOKEN_ENVIRONMENT_VARIABLE , DEFAULT_CONSUL_TOKEN ) , scheme = os . environ . get ( CONSUL_SCHEME_ENVIRONMENT_VARIABLE , DEFAULT_CONSUL_SCHEME ) , datacentre = os . environ . get ( CONSUL_DATACENTRE_ENVIRONMENT_VARIABLE , DEFAULT_CONSUL_DATACENTRE ) , verify = os . environ . get ( CONSUL_VERIFY_ENVIRONMENT_VARIABLE , DEFAULT_CONSUL_VERIFY ) , certificate = os . environ . get ( CONSUL_CERTIFICATE_ENVIRONMENT_VARIABLE , DEFAULT_CONSUL_CERTIFICATE ) )
|
def cookie_parts ( name , kaka ) :
"""Give me the parts of the cookie payload
: param name : A name of a cookie object
: param kaka : The cookie
: return : A list of parts or None if there is no cookie object with the
given name"""
|
cookie_obj = SimpleCookie ( as_unicode ( kaka ) )
morsel = cookie_obj . get ( name )
if morsel :
return morsel . value . split ( "|" )
else :
return None
|
def build_owner ( args ) :
"""Factory for creating owners , based on - - sort option ."""
|
if args . sort_by == 'cover' :
return CoverageOwners ( args . root , args . verbose )
if os . path . isdir ( args . root ) :
pass
else : # File
args . root , args . filter = os . path . split ( args . root )
if args . sort_by == 'date' :
return DateOwners ( args . root , args . filter , args . details , args . verbose , args . max )
else : # by size
return SizeOwners ( args . root , args . filter , args . details , args . verbose , args . max )
|
def find_gui_and_backend ( ) :
"""Return the gui and mpl backend ."""
|
matplotlib = sys . modules [ 'matplotlib' ]
# WARNING : this assumes matplotlib 1.1 or newer ! !
backend = matplotlib . rcParams [ 'backend' ]
# In this case , we need to find what the appropriate gui selection call
# should be for IPython , so we can activate inputhook accordingly
gui = backend2gui . get ( backend , None )
return gui , backend
|
def right ( self , num = None ) :
"""WITH SLICES BEING FLAT , WE NEED A SIMPLE WAY TO SLICE FROM THE RIGHT [ - num : ]"""
|
if num == None :
return FlatList ( [ _get_list ( self ) [ - 1 ] ] )
if num <= 0 :
return Null
return FlatList ( _get_list ( self ) [ - num : ] )
|
def add_masquerade ( zone = None , permanent = True ) :
'''Enable masquerade on a zone .
If zone is omitted , default zone will be used .
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt ' * ' firewalld . add _ masquerade
To enable masquerade on a specific zone
. . code - block : : bash
salt ' * ' firewalld . add _ masquerade dmz'''
|
if zone :
cmd = '--zone={0} --add-masquerade' . format ( zone )
else :
cmd = '--add-masquerade'
if permanent :
cmd += ' --permanent'
return __firewall_cmd ( cmd )
|
def connect_input ( self , name , wire ) :
"""Connect the specified input to a wire ."""
|
self . _inputs [ name ] = wire
wire . sinks . append ( self )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.