signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def parse_phlat_file ( phlatfile , mhc_alleles ) :
"""Parse the input phlat file to pull out the alleles it contains
: param phlatfile : Open file descriptor for a phlat output sum file
: param mhc _ alleles : dictionary of alleles ."""
|
for line in phlatfile :
if line . startswith ( 'Locus' ) :
continue
line = line . strip ( ) . split ( )
if line [ 0 ] . startswith ( 'HLA_D' ) :
line [ 0 ] = line [ 0 ] [ : - 1 ]
# strip the last character
# Sometimes we get an error saying there was insufficient read
# converage . We need to drop that line .
# E . g . HLA _ DQB1 no call due to insufficient reads at this locus
if line [ 1 ] == 'no' :
continue
if line [ 4 ] != 'NA' :
split_field = line [ 1 ] . split ( ':' )
if len ( split_field ) >= 2 and not split_field [ 1 ] == 'xx' :
mhc_alleles [ line [ 0 ] ] . append ( ( line [ 1 ] , line [ 4 ] ) )
if line [ 5 ] != 'NA' :
split_field = line [ 2 ] . split ( ':' )
if len ( split_field ) >= 2 and not split_field [ 1 ] == 'xx' :
mhc_alleles [ line [ 0 ] ] . append ( ( line [ 2 ] , line [ 5 ] ) )
return mhc_alleles
|
def consume ( self , expect_class = None ) :
"""Retrieve the current token , then advance the parser .
If an expected class is provided , it will assert that the current token
matches that class ( is an instance ) .
Note that when calling a token ' s nud ( ) or led ( ) functions , the " current "
token is the token following the token whose method has been called .
Returns :
Token : the previous current token .
Raises :
InvalidTokenError : If an expect _ class is provided and the current
token doesn ' t match that class ."""
|
if expect_class and not isinstance ( self . current_token , expect_class ) :
raise InvalidTokenError ( "Unexpected token at %d: got %r, expected %s" % ( self . current_pos , self . current_token , expect_class . __name__ ) )
current_token = self . current_token
self . _forward ( )
return current_token
|
def delete ( self , user ) :
"""Delete a resource"""
|
if user :
can_delete = yield self . can_delete ( user )
else :
can_delete = False
if not can_delete :
raise exceptions . Unauthorized ( 'User may not delete the resource' )
doc = { '_id' : self . id , '_deleted' : True }
try :
doc [ '_rev' ] = self . _rev
except AttributeError :
pass
db = self . db_client ( )
yield db . save_doc ( doc )
self . _resource = doc
|
def get_resource ( resource_name ) :
"""Return a resource in current directory or in frozen package"""
|
resource_path = None
if hasattr ( sys , "frozen" ) :
resource_path = os . path . normpath ( os . path . join ( os . path . dirname ( sys . executable ) , resource_name ) )
elif not hasattr ( sys , "frozen" ) and pkg_resources . resource_exists ( "gns3server" , resource_name ) :
resource_path = pkg_resources . resource_filename ( "gns3server" , resource_name )
resource_path = os . path . normpath ( resource_path )
return resource_path
|
def get_climate ( self , device_label ) :
"""Get climate history
Args :
device _ label : device label of climate device"""
|
response = None
try :
response = requests . get ( urls . climate ( self . _giid ) , headers = { 'Accept' : 'application/json, text/javascript, */*; q=0.01' , 'Cookie' : 'vid={}' . format ( self . _vid ) } , params = { "deviceLabel" : device_label } )
except requests . exceptions . RequestException as ex :
raise RequestError ( ex )
_validate_response ( response )
return json . loads ( response . text )
|
def make_dbsource ( ** kwargs ) :
"""Returns a mapnik PostGIS or SQLite Datasource ."""
|
if 'spatialite' in connection . settings_dict . get ( 'ENGINE' ) :
kwargs . setdefault ( 'file' , connection . settings_dict [ 'NAME' ] )
return mapnik . SQLite ( wkb_format = 'spatialite' , ** kwargs )
names = ( ( 'dbname' , 'NAME' ) , ( 'user' , 'USER' ) , ( 'password' , 'PASSWORD' ) , ( 'host' , 'HOST' ) , ( 'port' , 'PORT' ) )
for mopt , dopt in names :
val = connection . settings_dict . get ( dopt )
if val :
kwargs . setdefault ( mopt , val )
return mapnik . PostGIS ( ** kwargs )
|
def from_code ( cls , environment , code , globals , uptodate = None ) :
"""Creates a template object from compiled code and the globals . This
is used by the loaders and environment to create a template object ."""
|
namespace = { 'environment' : environment , '__file__' : code . co_filename }
exec code in namespace
rv = cls . _from_namespace ( environment , namespace , globals )
rv . _uptodate = uptodate
return rv
|
def add_options ( self ) :
"""Add program options ."""
|
self . add_bool_option ( "--reveal" , help = "show full announce URL including keys" )
self . add_bool_option ( "--raw" , help = "print the metafile's raw content in all detail" )
self . add_bool_option ( "-V" , "--skip-validation" , help = "show broken metafiles with an invalid structure" )
self . add_value_option ( "-o" , "--output" , "KEY,KEY1.KEY2,..." , action = "append" , default = [ ] , help = "select fields to print, output is separated by TABs;" " note that __file__ is the path to the metafile," " __hash__ is the info hash," " and __size__ is the data size in bytes" )
|
def getURL ( self , size = 'Medium' , urlType = 'url' ) :
"""Retrieves a url for the photo . ( flickr . photos . getSizes )
urlType - ' url ' or ' source '
' url ' - flickr page of photo
' source ' - image file"""
|
method = 'flickr.photos.getSizes'
data = _doget ( method , photo_id = self . id )
for psize in data . rsp . sizes . size :
if psize . label == size :
return getattr ( psize , urlType )
raise FlickrError , "No URL found"
|
def _remote_status ( session , service_id , uuid , url , interval = 3 ) :
"""Poll for remote command status ."""
|
_LOGGER . info ( 'polling for status' )
resp = session . get ( url , params = { 'remoteServiceRequestID' : service_id , 'uuid' : uuid } ) . json ( )
if resp [ 'status' ] == 'SUCCESS' :
return 'completed'
time . sleep ( interval )
return _remote_status ( session , service_id , uuid , url )
|
def resolve_election_tie ( self , candidates ) :
"""call callback to resolve a tie between candidates"""
|
sorted_candidate_ids = list ( sorted ( candidates , key = self . candidate_order_fn ) )
return sorted_candidate_ids [ self . election_tie_cb ( candidates ) ]
|
def list ( path , filename = None , start = None , stop = None , recursive = False , directories = False ) :
"""List files specified by dataPath .
Datapath may include a single wildcard ( ' * ' ) in the filename specifier .
Returns sorted list of absolute path strings ."""
|
path = uri_to_path ( path )
if not filename and recursive :
return listrecursive ( path )
if filename :
if os . path . isdir ( path ) :
path = os . path . join ( path , filename )
else :
path = os . path . join ( os . path . dirname ( path ) , filename )
else :
if os . path . isdir ( path ) and not directories :
path = os . path . join ( path , "*" )
files = glob . glob ( path )
if not directories :
files = [ fpath for fpath in files if not os . path . isdir ( fpath ) ]
files . sort ( )
files = select ( files , start , stop )
return files
|
def uclust_fasta_sort_from_filepath ( fasta_filepath , output_filepath = None , tmp_dir = gettempdir ( ) , HALT_EXEC = False ) :
"""Generates sorted fasta file via uclust - - mergesort ."""
|
if not output_filepath :
_ , output_filepath = mkstemp ( dir = tmp_dir , prefix = 'uclust_fasta_sort' , suffix = '.fasta' )
app = Uclust ( params = { '--tmpdir' : tmp_dir } , TmpDir = tmp_dir , HALT_EXEC = HALT_EXEC )
app_result = app ( data = { '--mergesort' : fasta_filepath , '--output' : output_filepath } )
return app_result
|
def groupby ( self , by = None , axis = 0 , level = None , as_index = True , sort = True , group_keys = True , squeeze = False , observed = False , ** kwargs ) :
"""Group DataFrame or Series using a mapper or by a Series of columns .
A groupby operation involves some combination of splitting the
object , applying a function , and combining the results . This can be
used to group large amounts of data and compute operations on these
groups .
Parameters
by : mapping , function , label , or list of labels
Used to determine the groups for the groupby .
If ` ` by ` ` is a function , it ' s called on each value of the object ' s
index . If a dict or Series is passed , the Series or dict VALUES
will be used to determine the groups ( the Series ' values are first
aligned ; see ` ` . align ( ) ` ` method ) . If an ndarray is passed , the
values are used as - is determine the groups . A label or list of
labels may be passed to group by the columns in ` ` self ` ` . Notice
that a tuple is interpreted a ( single ) key .
axis : { 0 or ' index ' , 1 or ' columns ' } , default 0
Split along rows ( 0 ) or columns ( 1 ) .
level : int , level name , or sequence of such , default None
If the axis is a MultiIndex ( hierarchical ) , group by a particular
level or levels .
as _ index : bool , default True
For aggregated output , return object with group labels as the
index . Only relevant for DataFrame input . as _ index = False is
effectively " SQL - style " grouped output .
sort : bool , default True
Sort group keys . Get better performance by turning this off .
Note this does not influence the order of observations within each
group . Groupby preserves the order of rows within each group .
group _ keys : bool , default True
When calling apply , add group keys to index to identify pieces .
squeeze : bool , default False
Reduce the dimensionality of the return type if possible ,
otherwise return a consistent type .
observed : bool , default False
This only applies if any of the groupers are Categoricals .
If True : only show observed values for categorical groupers .
If False : show all values for categorical groupers .
. . versionadded : : 0.23.0
* * kwargs
Optional , only accepts keyword argument ' mutated ' and is passed
to groupby .
Returns
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups .
See Also
resample : Convenience method for frequency conversion and resampling
of time series .
Notes
See the ` user guide
< http : / / pandas . pydata . org / pandas - docs / stable / groupby . html > ` _ for more .
Examples
> > > df = pd . DataFrame ( { ' Animal ' : [ ' Falcon ' , ' Falcon ' ,
. . . ' Parrot ' , ' Parrot ' ] ,
. . . ' Max Speed ' : [ 380 . , 370 . , 24 . , 26 . ] } )
> > > df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
> > > df . groupby ( [ ' Animal ' ] ) . mean ( )
Max Speed
Animal
Falcon 375.0
Parrot 25.0
* * Hierarchical Indexes * *
We can groupby different levels of a hierarchical index
using the ` level ` parameter :
> > > arrays = [ [ ' Falcon ' , ' Falcon ' , ' Parrot ' , ' Parrot ' ] ,
. . . [ ' Captive ' , ' Wild ' , ' Captive ' , ' Wild ' ] ]
> > > index = pd . MultiIndex . from _ arrays ( arrays , names = ( ' Animal ' , ' Type ' ) )
> > > df = pd . DataFrame ( { ' Max Speed ' : [ 390 . , 350 . , 30 . , 20 . ] } ,
. . . index = index )
> > > df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
> > > df . groupby ( level = 0 ) . mean ( )
Max Speed
Animal
Falcon 370.0
Parrot 25.0
> > > df . groupby ( level = 1 ) . mean ( )
Max Speed
Type
Captive 210.0
Wild 185.0"""
|
from pandas . core . groupby . groupby import groupby
if level is None and by is None :
raise TypeError ( "You have to supply one of 'by' and 'level'" )
axis = self . _get_axis_number ( axis )
return groupby ( self , by = by , axis = axis , level = level , as_index = as_index , sort = sort , group_keys = group_keys , squeeze = squeeze , observed = observed , ** kwargs )
|
def ji_windows ( self , ij_win ) : # what can be given to ij _ win NOT intuitive / right name by now ! ! !
"""For a given specific window , i . e . an element of : attr : ` windows ` , get the windows of all resolutions .
Arguments :
ij _ win { int } - - The index specifying the window for which to return the resolution - windows ."""
|
ji_windows = { }
transform_src = self . _layer_meta [ self . _res_indices [ self . _windows_res ] [ 0 ] ] [ "transform" ]
for res in self . _res_indices :
transform_dst = self . _layer_meta [ self . _res_indices [ res ] [ 0 ] ] [ "transform" ]
ji_windows [ res ] = window_from_window ( window_src = self . windows [ ij_win ] , transform_src = transform_src , transform_dst = transform_dst )
return ji_windows
|
def buscar ( self , id_vlan ) :
"""Get VLAN by its identifier .
: param id _ vlan : VLAN identifier .
: return : Following dictionary :
{ ' vlan ' : { ' id ' : < id _ vlan > ,
' nome ' : < nome _ vlan > ,
' num _ vlan ' : < num _ vlan > ,
' id _ ambiente ' : < id _ ambiente > ,
' id _ tipo _ rede ' : < id _ tipo _ rede > ,
' rede _ oct1 ' : < rede _ oct1 > ,
' rede _ oct2 ' : < rede _ oct2 > ,
' rede _ oct3 ' : < rede _ oct3 > ,
' rede _ oct4 ' : < rede _ oct4 > ,
' bloco ' : < bloco > ,
' mascara _ oct1 ' : < mascara _ oct1 > ,
' mascara _ oct2 ' : < mascara _ oct2 > ,
' mascara _ oct3 ' : < mascara _ oct3 > ,
' mascara _ oct4 ' : < mascara _ oct4 > ,
' broadcast ' : < broadcast > ,
' descricao ' : < descricao > ,
' acl _ file _ name ' : < acl _ file _ name > ,
' acl _ valida ' : < acl _ valida > ,
' ativada ' : < ativada > }
OR { ' id ' : < id _ vlan > ,
' nome ' : < nome _ vlan > ,
' num _ vlan ' : < num _ vlan > ,
' id _ tipo _ rede ' : < id _ tipo _ rede > ,
' id _ ambiente ' : < id _ ambiente > ,
' bloco1 ' : < bloco1 > ,
' bloco2 ' : < bloco2 > ,
' bloco3 ' : < bloco3 > ,
' bloco4 ' : < bloco4 > ,
' bloco5 ' : < bloco5 > ,
' bloco6 ' : < bloco6 > ,
' bloco7 ' : < bloco7 > ,
' bloco8 ' : < bloco8 > ,
' bloco ' : < bloco > ,
' mask _ bloco1 ' : < mask _ bloco1 > ,
' mask _ bloco2 ' : < mask _ bloco2 > ,
' mask _ bloco3 ' : < mask _ bloco3 > ,
' mask _ bloco4 ' : < mask _ bloco4 > ,
' mask _ bloco5 ' : < mask _ bloco5 > ,
' mask _ bloco6 ' : < mask _ bloco6 > ,
' mask _ bloco7 ' : < mask _ bloco7 > ,
' mask _ bloco8 ' : < mask _ bloco8 > ,
' broadcast ' : < broadcast > ,
' descricao ' : < descricao > ,
' acl _ file _ name ' : < acl _ file _ name > ,
' acl _ valida ' : < acl _ valida > ,
' acl _ file _ name _ v6 ' : < acl _ file _ name _ v6 > ,
' acl _ valida _ v6 ' : < acl _ valida _ v6 > ,
' ativada ' : < ativada > } }
: raise VlanNaoExisteError : VLAN does not exist .
: raise InvalidParameterError : VLAN id is none or invalid .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not is_valid_int_param ( id_vlan ) :
raise InvalidParameterError ( u'Vlan id is invalid or was not informed.' )
url = 'vlan/' + str ( id_vlan ) + '/'
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml )
|
def _from_dict ( cls , _dict ) :
"""Initialize a Configuration object from a json dictionary ."""
|
args = { }
if 'configuration_id' in _dict :
args [ 'configuration_id' ] = _dict . get ( 'configuration_id' )
if 'name' in _dict :
args [ 'name' ] = _dict . get ( 'name' )
else :
raise ValueError ( 'Required property \'name\' not present in Configuration JSON' )
if 'created' in _dict :
args [ 'created' ] = string_to_datetime ( _dict . get ( 'created' ) )
if 'updated' in _dict :
args [ 'updated' ] = string_to_datetime ( _dict . get ( 'updated' ) )
if 'description' in _dict :
args [ 'description' ] = _dict . get ( 'description' )
if 'conversions' in _dict :
args [ 'conversions' ] = Conversions . _from_dict ( _dict . get ( 'conversions' ) )
if 'enrichments' in _dict :
args [ 'enrichments' ] = [ Enrichment . _from_dict ( x ) for x in ( _dict . get ( 'enrichments' ) ) ]
if 'normalizations' in _dict :
args [ 'normalizations' ] = [ NormalizationOperation . _from_dict ( x ) for x in ( _dict . get ( 'normalizations' ) ) ]
if 'source' in _dict :
args [ 'source' ] = Source . _from_dict ( _dict . get ( 'source' ) )
return cls ( ** args )
|
def list_features ( self , dataset , reverse = False , start = None , limit = None ) :
"""Lists features in a dataset .
Parameters
dataset : str
The dataset id .
reverse : str , optional
List features in reverse order .
Possible value is " true " .
start : str , optional
The id of the feature after which to start the list ( pagination ) .
limit : str , optional
The maximum number of features to list ( pagination ) .
Returns
request . Response
The response contains the features of a dataset as a GeoJSON FeatureCollection ."""
|
uri = URITemplate ( self . baseuri + '/{owner}/{id}/features' ) . expand ( owner = self . username , id = dataset )
params = { }
if reverse :
params [ 'reverse' ] = 'true'
if start :
params [ 'start' ] = start
if limit :
params [ 'limit' ] = int ( limit )
return self . session . get ( uri , params = params )
|
def _apply_workspaces ( self , combination , mode ) :
"""Allows user to force move a comma separated list of workspaces to the
given output when it ' s activated .
Example :
- DP1 _ workspaces = " 1,2,3" """
|
if len ( combination ) > 1 and mode == "extend" :
sleep ( 3 )
for output in combination :
workspaces = getattr ( self , "{}_workspaces" . format ( output ) , "" ) . split ( "," )
for workspace in workspaces :
if not workspace :
continue
# switch to workspace
cmd = '{} workspace "{}"' . format ( self . py3 . get_wm_msg ( ) , workspace )
self . py3 . command_run ( cmd )
# move it to output
cmd = '{} move workspace to output "{}"' . format ( self . py3 . get_wm_msg ( ) , output )
self . py3 . command_run ( cmd )
# log this
self . py3 . log ( "moved workspace {} to output {}" . format ( workspace , output ) )
|
def plot_run_nlive ( method_names , run_dict , ** kwargs ) :
"""Plot the allocations of live points as a function of logX for the input
sets of nested sampling runs of the type used in the dynamic nested
sampling paper ( Higson et al . 2019 ) .
Plots also include analytically calculated distributions of relative
posterior mass and relative posterior mass remaining .
Parameters
method _ names : list of strs
run _ dict : dict of lists of nested sampling runs .
Keys of run _ dict must be method _ names .
logx _ given _ logl : function , optional
For mapping points ' logl values to logx values .
If not specified the logx coordinates for each run are estimated using
its numbers of live points .
logl _ given _ logx : function , optional
For calculating the relative posterior mass and posterior mass
remaining at each logx coordinate .
logx _ min : float , optional
Lower limit of logx axis . If not specified this is set to the lowest
logx reached by any of the runs .
ymax : bool , optional
Maximum value for plot ' s nlive axis ( yaxis ) .
npoints : int , optional
Number of points to have in the fgivenx plot grids .
figsize : tuple , optional
Size of figure in inches .
post _ mass _ norm : str or None , optional
Specify method _ name for runs use form normalising the analytic
posterior mass curve . If None , all runs are used .
cum _ post _ mass _ norm : str or None , optional
Specify method _ name for runs use form normalising the analytic
cumulative posterior mass remaining curve . If None , all runs are used .
Returns
fig : matplotlib figure"""
|
logx_given_logl = kwargs . pop ( 'logx_given_logl' , None )
logl_given_logx = kwargs . pop ( 'logl_given_logx' , None )
logx_min = kwargs . pop ( 'logx_min' , None )
ymax = kwargs . pop ( 'ymax' , None )
npoints = kwargs . pop ( 'npoints' , 100 )
figsize = kwargs . pop ( 'figsize' , ( 6.4 , 2 ) )
post_mass_norm = kwargs . pop ( 'post_mass_norm' , None )
cum_post_mass_norm = kwargs . pop ( 'cum_post_mass_norm' , None )
if kwargs :
raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) )
assert set ( method_names ) == set ( run_dict . keys ( ) ) , ( 'input method names=' + str ( method_names ) + ' do not match run_dict ' 'keys=' + str ( run_dict . keys ( ) ) )
# Plotting
fig = plt . figure ( figsize = figsize )
ax = plt . gca ( )
colors = plt . rcParams [ 'axes.prop_cycle' ] . by_key ( ) [ 'color' ]
# Reserve colors for certain common method _ names so they are always the
# same regardless of method _ name order for consistency in the paper .
linecolor_dict = { 'standard' : colors [ 2 ] , 'dynamic $G=0$' : colors [ 8 ] , 'dynamic $G=1$' : colors [ 9 ] }
ax . set_prop_cycle ( 'color' , [ colors [ i ] for i in [ 4 , 1 , 6 , 0 , 3 , 5 , 7 ] ] )
integrals_dict = { }
logx_min_list = [ ]
for method_name in method_names :
integrals = np . zeros ( len ( run_dict [ method_name ] ) )
for nr , run in enumerate ( run_dict [ method_name ] ) :
if 'logx' in run :
logx = run [ 'logx' ]
elif logx_given_logl is not None :
logx = logx_given_logl ( run [ 'logl' ] )
else :
logx = nestcheck . ns_run_utils . get_logx ( run [ 'nlive_array' ] , simulate = False )
logx_min_list . append ( logx [ - 1 ] )
logx [ 0 ] = 0
# to make lines extend all the way to the end
if nr == 0 : # Label the first line and store it so we can access its color
try :
line , = ax . plot ( logx , run [ 'nlive_array' ] , linewidth = 1 , label = method_name , color = linecolor_dict [ method_name ] )
except KeyError :
line , = ax . plot ( logx , run [ 'nlive_array' ] , linewidth = 1 , label = method_name )
else : # Set other lines to same color and don ' t add labels
ax . plot ( logx , run [ 'nlive_array' ] , linewidth = 1 , color = line . get_color ( ) )
# for normalising analytic weight lines
integrals [ nr ] = - np . trapz ( run [ 'nlive_array' ] , x = logx )
integrals_dict [ method_name ] = integrals [ np . isfinite ( integrals ) ]
# if not specified , set logx min to the lowest logx reached by a run
if logx_min is None :
logx_min = np . asarray ( logx_min_list ) . min ( )
if logl_given_logx is not None : # Plot analytic posterior mass and cumulative posterior mass
logx_plot = np . linspace ( logx_min , 0 , npoints )
logl = logl_given_logx ( logx_plot )
# Remove any NaNs
logx_plot = logx_plot [ np . where ( ~ np . isnan ( logl ) ) [ 0 ] ]
logl = logl [ np . where ( ~ np . isnan ( logl ) ) [ 0 ] ]
w_an = rel_posterior_mass ( logx_plot , logl )
# Try normalising the analytic distribution of posterior mass to have
# the same area under the curve as the runs with dynamic _ goal = 1 ( the
# ones which we want to compare to it ) . If they are not available just
# normalise it to the average area under all the runs ( which should be
# about the same if they have the same number of samples ) .
w_an *= average_by_key ( integrals_dict , post_mass_norm )
ax . plot ( logx_plot , w_an , linewidth = 2 , label = 'relative posterior mass' , linestyle = ':' , color = 'k' )
# plot cumulative posterior mass
w_an_c = np . cumsum ( w_an )
w_an_c /= np . trapz ( w_an_c , x = logx_plot )
# Try normalising the cumulative distribution of posterior mass to have
# the same area under the curve as the runs with dynamic _ goal = 0 ( the
# ones which we want to compare to it ) . If they are not available just
# normalise it to the average area under all the runs ( which should be
# about the same if they have the same number of samples ) .
w_an_c *= average_by_key ( integrals_dict , cum_post_mass_norm )
ax . plot ( logx_plot , w_an_c , linewidth = 2 , linestyle = '--' , dashes = ( 2 , 3 ) , label = 'posterior mass remaining' , color = 'darkblue' )
ax . set_ylabel ( 'number of live points' )
ax . set_xlabel ( r'$\log X $' )
# set limits
if ymax is not None :
ax . set_ylim ( [ 0 , ymax ] )
else :
ax . set_ylim ( bottom = 0 )
ax . set_xlim ( [ logx_min , 0 ] )
ax . legend ( )
return fig
|
def toeplitz ( vect ) :
"""Find the toeplitz matrix as a list of lists given its first line / column ."""
|
return [ [ vect [ abs ( i - j ) ] for i in xrange ( len ( vect ) ) ] for j in xrange ( len ( vect ) ) ]
|
def removePlugin ( self , package ) :
"""Remove a plugin
: param package : The plugin package name ."""
|
res = dict ( code = 1 , msg = None )
if package and isinstance ( package , string_types ) :
path = os . path . join ( self . plugin_abspath , package )
if os . path . isdir ( path ) :
try :
shutil . rmtree ( path )
except Exception as e :
res . update ( msg = str ( e ) )
else :
res . update ( code = 0 )
else :
res . update ( msg = "No Such Package" )
else :
res . update ( msg = "Invalid Package Format" )
return res
|
def instantiateSong ( fileName ) :
"""Create an AudioSegment with the data from the given file"""
|
ext = detectFormat ( fileName )
if ( ext == "mp3" ) :
return pd . AudioSegment . from_mp3 ( fileName )
elif ( ext == "wav" ) :
return pd . AudioSegment . from_wav ( fileName )
elif ( ext == "ogg" ) :
return pd . AudioSegment . from_ogg ( fileName )
elif ( ext == "flv" ) :
return pd . AudioSegment . from_flv ( fileName )
elif ( ext == "m4a" ) :
return pd . AudioSegment . from_file ( fileName , "mp4" )
else :
return pd . AudioSegment . from_file ( fileName , ext )
|
async def _wait_for_data ( self , current_command , number_of_bytes ) :
"""This is a private utility method .
This method accumulates the requested number of bytes and
then returns the full command
: param current _ command : command id
: param number _ of _ bytes : how many bytes to wait for
: returns : command"""
|
while number_of_bytes :
next_command_byte = await self . read ( )
current_command . append ( next_command_byte )
number_of_bytes -= 1
return current_command
|
def _speak_as_spell_out_inherit ( self , element ) :
"""Speak one letter at a time for each word for elements and descendants .
: param element : The element .
: type element : hatemile . util . html . htmldomelement . HTMLDOMElement"""
|
self . _reverse_speak_as ( element , 'spell-out' )
self . _isolate_text_node ( element )
self . _visit ( element , self . _speak_as_spell_out )
|
def generate_api_docs ( package , api_dir , clean = False , printlog = True ) :
"""Generate a module level API documentation of a python package .
Description
Generates markdown API files for each module in a Python package whereas
the structure is as follows :
` package / package . subpackage / package . subpackage . module . md `
Parameters
package : Python package object
api _ dir : str
Output directory path for the top - level package directory
clean : bool ( default : False )
Removes previously existing API directory if True .
printlog : bool ( default : True )
Prints a progress log to the standard output screen if True ."""
|
if printlog :
print ( '\n\nGenerating Module Files\n%s\n' % ( 50 * '=' ) )
prefix = package . __name__ + "."
# clear the previous version
if clean :
if os . path . isdir ( api_dir ) :
shutil . rmtree ( api_dir )
# get subpackages
api_docs = { }
for importer , pkg_name , is_pkg in pkgutil . iter_modules ( package . __path__ , prefix ) :
if is_pkg :
subpackage = __import__ ( pkg_name , fromlist = "dummy" )
prefix = subpackage . __name__ + "."
# get functions and classes
classes , functions = get_functions_and_classes ( subpackage )
target_dir = os . path . join ( api_dir , subpackage . __name__ )
# create the subdirs
if not os . path . isdir ( target_dir ) :
os . makedirs ( target_dir )
if printlog :
print ( 'created %s' % target_dir )
# create markdown documents in memory
for obj in classes + functions :
md_path = os . path . join ( target_dir , obj [ 0 ] ) + '.md'
if md_path not in api_docs :
api_docs [ md_path ] = object_to_markdownpage ( obj_name = obj [ 0 ] , obj = obj [ 1 ] , s = '' )
else :
api_docs [ md_path ] += object_to_markdownpage ( obj_name = ( obj [ 0 ] ) , obj = obj [ 1 ] , s = '' )
# write to files
for d in sorted ( api_docs ) :
prev = ''
if os . path . isfile ( d ) :
with open ( d , 'r' ) as f :
prev = f . read ( )
if prev == api_docs [ d ] :
msg = 'skipped'
else :
msg = 'updated'
else :
msg = 'created'
if msg != 'skipped' :
with open ( d , 'w' ) as f :
f . write ( api_docs [ d ] )
if printlog :
print ( '%s %s' % ( msg , d ) )
|
def remove_object_metadata_key ( self , container , obj , key , prefix = None ) :
"""Removes the specified key from the storage object ' s metadata . If the key
does not exist in the metadata , nothing is done ."""
|
self . set_object_metadata ( container , obj , { key : "" } , prefix = prefix )
|
def _split_lines ( self ) :
'''Creates the parsed _ lines dict which keeps all record data in document order indexed by the record type .'''
|
parsed_lines = { }
for rt in all_record_types :
parsed_lines [ rt ] = [ ]
parsed_lines [ 0 ] = [ ]
for line in self . lines :
linetype = line [ 0 : 6 ]
if linetype in all_record_types :
parsed_lines [ linetype ] . append ( line )
else :
parsed_lines [ 0 ] . append ( line )
self . parsed_lines = parsed_lines
self . _update_structure_lines ( )
|
def get_one_optional ( self , locator ) :
"""Gets an optional component reference that matches specified locator .
: param locator : the locator to find references by .
: return : a matching component reference or null if nothing was found ."""
|
try :
components = self . find ( locator , False )
return components [ 0 ] if len ( components ) > 0 else None
except Exception as ex :
return None
|
def add ( self , path , compress = None ) :
"""Add ` path ` to the MAR file .
If ` path ` is a file , it will be added directly .
If ` path ` is a directory , it will be traversed recursively and all
files inside will be added .
Args :
path ( str ) : path to file or directory on disk to add to this MAR
file
compress ( str ) : One of ' xz ' , ' bz2 ' , or None . Defaults to None ."""
|
if os . path . isdir ( path ) :
self . add_dir ( path , compress )
else :
self . add_file ( path , compress )
|
def _print_cline ( self , buf , i , icol ) :
"""Print clines after multirow - blocks are finished"""
|
for cl in self . clinebuf :
if cl [ 0 ] == i :
buf . write ( '\\cline{{{cl:d}-{icol:d}}}\n' . format ( cl = cl [ 1 ] , icol = icol ) )
# remove entries that have been written to buffer
self . clinebuf = [ x for x in self . clinebuf if x [ 0 ] != i ]
|
def _get_all_file_version_ids ( self , secure_data_path , limit = None ) :
"""Convenience function that returns a generator that will paginate over the file version ids
secure _ data _ path - - full path to the file in the safety deposit box
limit - - Default ( 100 ) , limits how many records to be returned from the api at once ."""
|
offset = 0
# Prime the versions dictionary so that all the logic can happen in the loop
versions = { 'has_next' : True , 'next_offset' : 0 }
while ( versions [ 'has_next' ] ) :
offset = versions [ 'next_offset' ]
versions = self . get_file_versions ( secure_data_path , limit , offset )
for summary in versions [ 'secure_data_version_summaries' ] :
yield summary
|
def get ( self , request , * args , ** kwargs ) :
"""Method for handling GET requests . Passes the
following arguments to the context :
* * * obj * * - The object to publish
* * * done _ url * * - The result of the ` get _ done _ url ` method"""
|
self . object = self . get_object ( )
return self . render ( request , obj = self . object , done_url = self . get_done_url ( ) )
|
def fastqIterator ( fn , verbose = False , allowNameMissmatch = False ) :
"""A generator function which yields FastqSequence objects read from a file or
stream . This is a general function which wraps fastqIteratorSimple . In
future releases , we may allow dynamic switching of which base iterator is
used .
: param fn : A file - like stream or a string ; if this is a
string , it ' s treated as a filename specifying
the location of an input fastq file , else it ' s
treated as a file - like object , which must have a
readline ( ) method .
: param useMustableString : if True , construct sequences from lists of
chars , rather than python string objects , to
allow more efficient editing . Use with caution .
: param verbose : if True , print messages on progress to stderr .
: param debug : if True , print debugging messages to stderr .
: param sanger : if True , assume quality scores are in sanger
format . Otherwise , assume they ' re in Illumina
format .
: param allowNameMissmatch : don ' t throw error if name in sequence data and
quality data parts of a read don ' t match . Newer
version of CASVA seem to output data like this ,
probably to save space ."""
|
it = fastqIteratorSimple ( fn , verbose = verbose , allowNameMissmatch = allowNameMissmatch )
for s in it :
yield s
|
def decode_html_entities ( s ) :
"""Replaces html entities with the character they represent .
> > > print ( decode _ html _ entities ( " & lt ; 3 & amp ; " ) )"""
|
parser = HTMLParser . HTMLParser ( )
def unesc ( m ) :
return parser . unescape ( m . group ( ) )
return re . sub ( r'(&[^;]+;)' , unesc , ensure_unicode ( s ) )
|
def get_api_connector ( cls ) :
"""Initialize an api connector for future use ."""
|
if cls . _api is None : # pragma : no cover
cls . load_config ( )
cls . debug ( 'initialize connection to remote server' )
apihost = cls . get ( 'api.host' )
if not apihost :
raise MissingConfiguration ( )
apienv = cls . get ( 'api.env' )
if apienv and apienv in cls . apienvs :
apihost = cls . apienvs [ apienv ]
cls . _api = XMLRPCClient ( host = apihost , debug = cls . verbose )
return cls . _api
|
def handle_send ( entity : BaseEntity , author_user : UserType , recipients : List [ Dict ] , parent_user : UserType = None , ) -> None :
"""Send an entity to remote servers .
Using this we will build a list of payloads per protocol . After that , each recipient will get the generated
protocol payload delivered . Delivery to the same endpoint will only be done once so it ' s ok to include
the same endpoint as a receiver multiple times .
Any given user arguments must have ` ` private _ key ` ` and ` ` fid ` ` attributes .
: arg entity : Entity object to send . Can be a base entity or a protocol specific one .
: arg author _ user : User authoring the object .
: arg recipients : A list of recipients to delivery to . Each recipient is a dict
containing at minimum the " fid " , " public " and " protocol " keys .
For ActivityPub and Diaspora payloads , " fid " should be an URL of the endpoint to deliver to .
The " protocol " should be a protocol name that is known for this recipient .
The " public " value should be a boolean to indicate whether the payload should be flagged as a
public payload .
TODO : support guessing the protocol over networks ? Would need caching of results
For private deliveries to Diaspora protocol recipients , " public _ key " is also required .
For example
" fid " : " https : / / domain . tld / receive / users / 1234-5678-0123-4567 " ,
" protocol " : " diaspora " ,
" public " : False ,
" public _ key " : < RSAPublicKey object > ,
" fid " : " https : / / domain2 . tld / receive / public " ,
" protocol " : " diaspora " ,
" public " : True ,
" fid " : " https : / / domain4 . tld / sharedinbox / " ,
" protocol " : " activitypub " ,
" public " : True ,
" fid " : " https : / / domain4 . tld / profiles / jill " ,
" protocol " : " activitypub " ,
" public " : False ,
: arg parent _ user : ( Optional ) User object of the parent object , if there is one . This must be given for the
Diaspora protocol if a parent object exists , so that a proper ` ` parent _ author _ signature ` ` can
be generated . If given , the payload will be sent as this user ."""
|
payloads = [ ]
public_payloads = { "activitypub" : { "auth" : None , "payload" : None , "urls" : set ( ) , } , "diaspora" : { "auth" : None , "payload" : None , "urls" : set ( ) , } , }
# Flatten to unique recipients
unique_recipients = unique_everseen ( recipients )
# Generate payloads and collect urls
for recipient in unique_recipients :
fid = recipient [ "fid" ]
public_key = recipient . get ( "public_key" )
protocol = recipient [ "protocol" ]
public = recipient [ "public" ]
if protocol == "activitypub" :
try :
payload = handle_create_payload ( entity , author_user , protocol , parent_user = parent_user )
if public :
payload [ "to" ] = "https://www.w3.org/ns/activitystreams#Public"
else :
payload [ "to" ] = fid
payload = json . dumps ( payload ) . encode ( "utf-8" )
except Exception as ex :
logger . error ( "handle_send - failed to generate private payload for %s: %s" , fid , ex )
continue
payloads . append ( { "auth" : get_http_authentication ( author_user . private_key , f"{author_user.id}#main-key" ) , "payload" : payload , "content_type" : 'application/ld+json; profile="https://www.w3.org/ns/activitystreams"' , "urls" : { fid } , } )
elif protocol == "diaspora" :
if public :
if public_key :
raise ValueError ( "handle_send - Diaspora recipient cannot be public and use encrypted delivery" )
if not public_payloads [ protocol ] [ "payload" ] :
public_payloads [ protocol ] [ "payload" ] = handle_create_payload ( entity , author_user , protocol , parent_user = parent_user , )
public_payloads [ "diaspora" ] [ "urls" ] . add ( fid )
else :
if not public_key :
raise ValueError ( "handle_send - Diaspora recipient cannot be private without a public key for " "encrypted delivery" )
# Private payload
try :
payload = handle_create_payload ( entity , author_user , "diaspora" , to_user_key = public_key , parent_user = parent_user , )
payload = json . dumps ( payload )
except Exception as ex :
logger . error ( "handle_send - failed to generate private payload for %s: %s" , fid , ex )
continue
payloads . append ( { "urls" : { fid } , "payload" : payload , "content_type" : "application/json" , "auth" : None , } )
# Add public diaspora payload
if public_payloads [ "diaspora" ] [ "payload" ] :
payloads . append ( { "urls" : public_payloads [ "diaspora" ] [ "urls" ] , "payload" : public_payloads [ "diaspora" ] [ "payload" ] , "content_type" : "application/magic-envelope+xml" , "auth" : None , } )
logger . debug ( "handle_send - %s" , payloads )
# Do actual sending
for payload in payloads :
for url in payload [ "urls" ] :
try :
send_document ( url , payload [ "payload" ] , auth = payload [ "auth" ] , headers = { "Content-Type" : payload [ "content_type" ] } , )
except Exception as ex :
logger . error ( "handle_send - failed to send payload to %s: %s, payload: %s" , url , ex , payload [ "payload" ] )
|
def updatePlayer ( name , settings ) :
"""update an existing PlayerRecord setting and save to disk file"""
|
player = delPlayer ( name )
# remove the existing record
_validate ( settings )
player . update ( settings )
player . save ( )
getKnownPlayers ( ) [ player . name ] = player
return player
|
def _is_link ( fs , path ) :
"""Check that the given path is a symbolic link .
Note that unlike ` os . path . islink ` , we * do * propagate file system errors
other than a non - existent path or non - existent directory component .
E . g . , should EPERM or ELOOP be raised , an exception will bubble up ."""
|
try :
return stat . S_ISLNK ( fs . lstat ( path ) . st_mode )
except exceptions . FileNotFound :
return False
|
def get_all ( self ) :
"""Gets all items in file ."""
|
logger . debug ( 'Fetching items. Path: {data_file}' . format ( data_file = self . data_file ) )
return load_file ( self . data_file )
|
def finalize ( self ) :
"""Disconnects signals and frees resources"""
|
self . model ( ) . sigItemChanged . disconnect ( self . repoTreeItemChanged )
selectionModel = self . selectionModel ( )
# need to store reference to prevent crash in PySide
selectionModel . currentChanged . disconnect ( self . currentItemChanged )
|
def approvewitness ( ctx , witnesses , account ) :
"""Approve witness ( es )"""
|
pprint ( ctx . peerplays . approvewitness ( witnesses , account = account ) )
|
def kill_running_submission ( self , submissionid , user_check = True ) :
"""Attempt to kill the remote job associated with this submission id .
: param submissionid :
: param user _ check : Check if the current user owns this submission
: return : True if the job was killed , False if an error occurred"""
|
submission = self . get_submission ( submissionid , user_check )
if not submission :
return False
if "jobid" not in submission :
return False
return self . _client . kill_job ( submission [ "jobid" ] )
|
def magicrun ( text , shell , prompt_template = "default" , aliases = None , envvars = None , extra_commands = None , speed = 1 , test_mode = False , commentecho = False , ) :
"""Echo out each character in ` ` text ` ` as keyboard characters are pressed ,
wait for a RETURN keypress , then run the ` ` text ` ` in a shell context ."""
|
goto_regulartype = magictype ( text , prompt_template , speed )
if goto_regulartype :
return goto_regulartype
run_command ( text , shell , aliases = aliases , envvars = envvars , extra_commands = extra_commands , test_mode = test_mode , )
return goto_regulartype
|
def kill ( self ) :
"""Shut down the socket immediately ."""
|
self . _socket . shutdown ( socket . SHUT_RDWR )
self . _socket . close ( )
|
def FieldDefinitionProtosFromTuples ( field_def_tuples ) :
"""Converts ( field - name , type ) tuples to MetricFieldDefinition protos ."""
|
# TODO : This needs fixing for Python 3.
field_def_protos = [ ]
for field_name , field_type in field_def_tuples :
if field_type in ( int , long ) :
field_type = rdf_stats . MetricFieldDefinition . FieldType . INT
elif issubclass ( field_type , Text ) :
field_type = rdf_stats . MetricFieldDefinition . FieldType . STR
else :
raise ValueError ( "Invalid field type: %s" % field_type )
field_def_protos . append ( rdf_stats . MetricFieldDefinition ( field_name = field_name , field_type = field_type ) )
return field_def_protos
|
async def load_field ( obj , elem_type , params = None , elem = None ) :
"""Loads a field from the reader , based on the field type specification . Demultiplexer .
: param obj :
: param elem _ type :
: param params :
: param elem :
: return :"""
|
if issubclass ( elem_type , x . UVarintType ) or issubclass ( elem_type , x . IntType ) or isinstance ( obj , ( int , bool ) ) :
return set_elem ( elem , obj )
elif issubclass ( elem_type , x . BlobType ) :
fvalue = await load_blob ( obj , elem_type )
return set_elem ( elem , fvalue )
elif issubclass ( elem_type , x . UnicodeType ) or isinstance ( elem , str ) :
return set_elem ( elem , obj )
elif issubclass ( elem_type , x . VariantType ) :
fvalue = await load_variant ( obj , elem = get_elem ( elem ) , elem_type = elem_type , params = params )
return set_elem ( elem , fvalue )
elif issubclass ( elem_type , x . ContainerType ) : # container ~ simple list
fvalue = await load_container ( obj , elem_type , params = params , container = get_elem ( elem ) )
return set_elem ( elem , fvalue )
elif issubclass ( elem_type , x . MessageType ) :
fvalue = await load_message ( obj , msg_type = elem_type , msg = get_elem ( elem ) )
return set_elem ( elem , fvalue )
else :
raise TypeError
|
def raised_funds_by_project ( df ) :
"""Raised funds organized by project ."""
|
df [ 'CaptacaoReal' ] = df [ 'CaptacaoReal' ] . apply ( pd . to_numeric )
return ( df [ [ 'Pronac' , 'CaptacaoReal' ] ] . groupby ( [ 'Pronac' ] ) . sum ( ) )
|
def addDelay ( self , urlPattern = "" , delay = 0 , httpMethod = None ) :
"""Adds delays ."""
|
print ( "addDelay is deprecated please use delays instead" )
delay = { "urlPattern" : urlPattern , "delay" : delay }
if httpMethod :
delay [ "httpMethod" ] = httpMethod
return self . delays ( delays = { "data" : [ delay ] } )
|
def listen ( self , port : int , address : str = "" , ** kwargs : Any ) -> HTTPServer :
"""Starts an HTTP server for this application on the given port .
This is a convenience alias for creating an ` . HTTPServer `
object and calling its listen method . Keyword arguments not
supported by ` HTTPServer . listen < . TCPServer . listen > ` are passed to the
` . HTTPServer ` constructor . For advanced uses
( e . g . multi - process mode ) , do not use this method ; create an
` . HTTPServer ` and call its
` . TCPServer . bind ` / ` . TCPServer . start ` methods directly .
Note that after calling this method you still need to call
` ` IOLoop . current ( ) . start ( ) ` ` to start the server .
Returns the ` . HTTPServer ` object .
. . versionchanged : : 4.3
Now returns the ` . HTTPServer ` object ."""
|
server = HTTPServer ( self , ** kwargs )
server . listen ( port , address )
return server
|
def add_menu ( self , menu ) :
'''add to the default popup menu'''
|
from MAVProxy . modules . mavproxy_map import mp_slipmap
self . default_popup . add ( menu )
self . map . add_object ( mp_slipmap . SlipDefaultPopup ( self . default_popup , combine = True ) )
|
def default_kms_key_name ( self , value ) :
"""Set default KMS encryption key for objects in the bucket .
: type value : str or None
: param value : new KMS key name ( None to clear any existing key ) ."""
|
encryption_config = self . _properties . get ( "encryption" , { } )
encryption_config [ "defaultKmsKeyName" ] = value
self . _patch_property ( "encryption" , encryption_config )
|
def out_name ( stem , timestep = None ) :
"""Return StagPy out file name .
Args :
stem ( str ) : short description of file content .
timestep ( int ) : timestep if relevant .
Returns :
str : the output file name .
Other Parameters :
conf . core . outname ( str ) : the generic name stem , defaults to
` ` ' stagpy ' ` ` ."""
|
if timestep is not None :
stem = ( stem + INT_FMT ) . format ( timestep )
return conf . core . outname + '_' + stem
|
def deprecate ( func ) :
"""A deprecation warning emmiter as a decorator ."""
|
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
warn ( "Deprecated, this will be removed in the future" , DeprecationWarning )
return func ( * args , ** kwargs )
wrapper . __doc__ = "Deprecated.\n" + ( wrapper . __doc__ or "" )
return wrapper
|
def required_header ( header ) :
"""Function that verify if the header parameter is a essential header
: param header : A string represented a header
: returns : A boolean value that represent if the header is required"""
|
if header in IGNORE_HEADERS :
return False
if header . startswith ( 'HTTP_' ) or header == 'CONTENT_TYPE' :
return True
return False
|
def jackknife_indexes ( data ) :
"""Given data points data , where axis 0 is considered to delineate points , return
a list of arrays where each array is a set of jackknife indexes .
For a given set of data Y , the jackknife sample J [ i ] is defined as the data set
Y with the ith data point deleted ."""
|
base = np . arange ( 0 , len ( data ) )
return ( np . delete ( base , i ) for i in base )
|
def mkconstraints ( ) :
"""Make constraint list for binary constraint problem ."""
|
constraints = [ ]
for j in range ( 1 , 10 ) :
vars = [ "%s%d" % ( i , j ) for i in uppercase [ : 9 ] ]
constraints . extend ( ( c , const_different ) for c in combinations ( vars , 2 ) )
for i in uppercase [ : 9 ] :
vars = [ "%s%d" % ( i , j ) for j in range ( 1 , 10 ) ]
constraints . extend ( ( c , const_different ) for c in combinations ( vars , 2 ) )
for b0 in [ 'ABC' , 'DEF' , 'GHI' ] :
for b1 in [ [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] , [ 7 , 8 , 9 ] ] :
vars = [ "%s%d" % ( i , j ) for i in b0 for j in b1 ]
l = list ( ( c , const_different ) for c in combinations ( vars , 2 ) )
constraints . extend ( l )
return constraints
|
def close ( self , name = None ) :
"""Closes the most recently opened element .
: name : if given , this value must match the name given for the most recently opened element . This
is primarily here for providing quick error checking for applications"""
|
tag = self . _open_elements . pop ( )
if name is not None and name != tag :
raise Exception ( "Tag closing mismatch" )
self . _pad ( )
self . _writer . endElement ( _normalize_name ( tag ) )
self . _newline ( )
|
def _process_service_check ( self , data , url , tag_by_host = False , services_incl_filter = None , services_excl_filter = None , custom_tags = None ) :
'''Report a service check , tagged by the service and the backend .
Statuses are defined in ` STATUS _ TO _ SERVICE _ CHECK ` mapping .'''
|
custom_tags = [ ] if custom_tags is None else custom_tags
service_name = data [ 'pxname' ]
status = data [ 'status' ]
haproxy_hostname = to_string ( self . hostname )
check_hostname = haproxy_hostname if tag_by_host else ''
if self . _is_service_excl_filtered ( service_name , services_incl_filter , services_excl_filter ) :
return
if status in Services . STATUS_TO_SERVICE_CHECK :
service_check_tags = [ "service:%s" % service_name ]
service_check_tags . extend ( custom_tags )
hostname = data [ 'svname' ]
if data [ 'back_or_front' ] == Services . BACKEND :
service_check_tags . append ( 'backend:%s' % hostname )
status = Services . STATUS_TO_SERVICE_CHECK [ status ]
message = "%s reported %s:%s %s" % ( haproxy_hostname , service_name , hostname , status )
self . service_check ( self . SERVICE_CHECK_NAME , status , message = message , hostname = check_hostname , tags = service_check_tags )
|
def get_chinese_new_year ( self , year ) :
"""Compute Chinese New Year days . To return a list of holidays .
By default , it ' ll at least return the Chinese New Year holidays chosen
using the following options :
* ` ` include _ chinese _ new _ year _ eve ` `
* ` ` include _ chinese _ new _ year ` ` ( on by default )
* ` ` include _ chinese _ second _ day ` `
If the ` ` shift _ sunday _ holidays ` ` option is on , the rules are the
following .
* If the CNY1 falls on MON - FRI , there ' s not shift .
* If the CNY1 falls on SAT , the CNY2 is shifted to the Monday after .
* If the CNY1 falls on SUN , the CNY1 is shifted to the Monday after ,
and CNY2 is shifted to the Tuesday after ."""
|
days = [ ]
lunar_first_day = ChineseNewYearCalendar . lunar ( year , 1 , 1 )
# Chinese new year ' s eve
if self . include_chinese_new_year_eve :
days . append ( ( lunar_first_day - timedelta ( days = 1 ) , self . chinese_new_year_eve_label ) )
# Chinese new year ( is included by default )
if self . include_chinese_new_year :
days . append ( ( lunar_first_day , self . chinese_new_year_label ) )
if self . include_chinese_second_day :
lunar_second_day = lunar_first_day + timedelta ( days = 1 )
days . append ( ( lunar_second_day , self . chinese_second_day_label ) )
if self . include_chinese_third_day :
lunar_third_day = lunar_first_day + timedelta ( days = 2 )
days . append ( ( lunar_third_day , self . chinese_third_day_label ) )
if self . shift_sunday_holidays :
if lunar_first_day . weekday ( ) == SUN :
if self . shift_start_cny_sunday :
days . append ( ( lunar_first_day - timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , )
else :
if self . include_chinese_third_day :
shift_day = lunar_third_day
else :
shift_day = lunar_second_day
days . append ( ( shift_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , )
if ( lunar_second_day . weekday ( ) == SUN and self . include_chinese_third_day ) :
days . append ( ( lunar_third_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , )
return days
|
def write_config_file ( self , f , comments ) :
"""This method write a sample file , with attributes , descriptions ,
sample values , required flags , using the configuration object
properties ."""
|
if self . conf_hidden :
return False
if comments :
f . write ( "\n" )
f . write ( "# Attribute (" )
f . write ( str ( self . e_type . __name__ ) )
f . write ( ") : " )
f . write ( self . _name . upper ( ) )
f . write ( "\n" )
if self . _desc and self . _desc != argparse . SUPPRESS :
f . write ( "# Description : " )
for i in self . _desc . split ( '\n' ) :
f . write ( "# " )
f . write ( i )
f . write ( "\n" )
f . write ( "\n" )
if not self . conf_required :
f . write ( ";" )
f . write ( self . _name )
f . write ( "=" )
if self . default is not None and not self . hidden :
f . write ( str ( self . default ) )
f . write ( "\n" )
|
def _summary ( self , name = None ) :
"""Return a summarized representation .
Parameters
name : str
name to use in the summary representation
Returns
String with a summarized representation of the index"""
|
if len ( self ) > 0 :
head = self [ 0 ]
if hasattr ( head , 'format' ) and not isinstance ( head , str ) :
head = head . format ( )
tail = self [ - 1 ]
if hasattr ( tail , 'format' ) and not isinstance ( tail , str ) :
tail = tail . format ( )
index_summary = ', %s to %s' % ( pprint_thing ( head ) , pprint_thing ( tail ) )
else :
index_summary = ''
if name is None :
name = type ( self ) . __name__
return '%s: %s entries%s' % ( name , len ( self ) , index_summary )
|
def hex_to_int ( value ) :
"""Convert hex string like " \x0A \xE3 " to 2787."""
|
if version_info . major >= 3 :
return int . from_bytes ( value , "big" )
return int ( value . encode ( "hex" ) , 16 )
|
def store_zonefiles ( self , zonefile_names , zonefiles , zonefile_txids , zonefile_block_heights , peer_zonefile_hashes , peer_hostport , path , con = None ) :
"""Store a list of RPC - fetched zonefiles ( but only ones in peer _ zonefile _ hashes ) from the given peer _ hostport
Return the list of zonefile hashes stored ."""
|
ret = [ ]
with AtlasDBOpen ( con = con , path = path ) as dbcon :
for fetched_zfhash , zonefile_txt in zonefiles . items ( ) :
if fetched_zfhash not in peer_zonefile_hashes or fetched_zfhash not in zonefile_block_heights : # unsolicited
log . warn ( "%s: Unsolicited zonefile %s" % ( self . hostport , fetched_zfhash ) )
continue
rc = self . store_zonefile_data ( fetched_zfhash , zonefile_txt , min ( zonefile_block_heights [ fetched_zfhash ] ) , peer_hostport , dbcon , path )
if rc : # don ' t ask for it again
ret . append ( fetched_zfhash )
return ret
|
def channels_add_all ( self , room_id , ** kwargs ) :
"""Adds all of the users of the Rocket . Chat server to the channel ."""
|
return self . __call_api_post ( 'channels.addAll' , roomId = room_id , kwargs = kwargs )
|
def _distributeCells ( numCellsPop ) :
'''distribute cells across compute nodes using round - robin'''
|
from . . import sim
hostCells = { }
for i in range ( sim . nhosts ) :
hostCells [ i ] = [ ]
for i in range ( numCellsPop ) :
hostCells [ sim . nextHost ] . append ( i )
sim . nextHost += 1
if sim . nextHost >= sim . nhosts :
sim . nextHost = 0
if sim . cfg . verbose :
print ( ( "Distributed population of %i cells on %s hosts: %s, next: %s" % ( numCellsPop , sim . nhosts , hostCells , sim . nextHost ) ) )
return hostCells
|
def tau3_from_mass1_mass2 ( mass1 , mass2 , f_lower ) :
r"""Returns : math : ` \ tau _ 3 ` from the component masses and given frequency ."""
|
mtotal = mass1 + mass2
eta = eta_from_mass1_mass2 ( mass1 , mass2 )
return tau3_from_mtotal_eta ( mtotal , eta , f_lower )
|
def convert_sshkey ( cls , sshkey ) :
"""Return dict param with valid entries for vm / paas methods ."""
|
params = { }
if sshkey :
params [ 'keys' ] = [ ]
for ssh in sshkey :
if os . path . exists ( os . path . expanduser ( ssh ) ) :
if 'ssh_key' in params :
cls . echo ( "Can't have more than one sshkey file." )
continue
with open ( ssh ) as fdesc :
sshkey_ = fdesc . read ( )
if sshkey_ :
params [ 'ssh_key' ] = sshkey_
else :
sshkey_id = Sshkey . usable_id ( ssh )
if sshkey_id :
params [ 'keys' ] . append ( sshkey_id )
else :
cls . echo ( 'This is not a ssh key %s' % ssh )
if not params [ 'keys' ] :
params . pop ( 'keys' )
return params
|
def renew_session ( self ) :
"""Have to be called on user actions to check and renew session"""
|
if ( ( not 'user_uid' in self . cookieInterface . cookies ) or self . cookieInterface . cookies [ 'user_uid' ] != self . session_uid ) and ( not self . expired ) :
self . on_session_expired ( )
if self . expired :
self . session_uid = str ( random . randint ( 1 , 999999999 ) )
self . cookieInterface . set_cookie ( 'user_uid' , self . session_uid , str ( self . session_timeout_seconds ) )
# here we renew the internal timeout timer
if self . timeout_timer :
self . timeout_timer . cancel ( )
self . timeout_timer = threading . Timer ( self . session_timeout_seconds , self . on_session_expired )
self . expired = False
self . timeout_timer . start ( )
|
def start ( self ) :
"""start command in background and does not wait for it .
: rtype : self"""
|
if self . is_started :
raise EasyProcessError ( self , 'process was started twice!' )
if self . use_temp_files :
self . _stdout_file = tempfile . TemporaryFile ( prefix = 'stdout_' )
self . _stderr_file = tempfile . TemporaryFile ( prefix = 'stderr_' )
stdout = self . _stdout_file
stderr = self . _stderr_file
else :
stdout = subprocess . PIPE
stderr = subprocess . PIPE
cmd = list ( map ( uniencode , self . cmd ) )
try :
self . popen = subprocess . Popen ( cmd , stdout = stdout , stderr = stderr , cwd = self . cwd , env = self . env , )
except OSError as oserror :
log . debug ( 'OSError exception: %s' , oserror )
self . oserror = oserror
raise EasyProcessError ( self , 'start error' )
self . is_started = True
log . debug ( 'process was started (pid=%s)' , self . pid )
return self
|
def search ( self , term ) :
"""Search for a user by name .
: param str term : What to search for .
: return : The results as a SearchWrapper iterator or None if no results .
: rtype : SearchWrapper or None"""
|
r = requests . get ( self . apiurl + "/users" , params = { "filter[name]" : term } , headers = self . header )
if r . status_code != 200 :
raise ServerError
jsd = r . json ( )
if jsd [ 'meta' ] [ 'count' ] :
return SearchWrapper ( jsd [ 'data' ] , jsd [ 'links' ] [ 'next' ] if 'next' in jsd [ 'links' ] else None , self . header )
else :
return None
|
def _hm_form_message ( self , thermostat_id , protocol , source , function , start , payload ) :
"""Forms a message payload , excluding CRC"""
|
if protocol == constants . HMV3_ID :
start_low = ( start & constants . BYTEMASK )
start_high = ( start >> 8 ) & constants . BYTEMASK
if function == constants . FUNC_READ :
payload_length = 0
length_low = ( constants . RW_LENGTH_ALL & constants . BYTEMASK )
length_high = ( constants . RW_LENGTH_ALL >> 8 ) & constants . BYTEMASK
else :
payload_length = len ( payload )
length_low = ( payload_length & constants . BYTEMASK )
length_high = ( payload_length >> 8 ) & constants . BYTEMASK
msg = [ thermostat_id , 10 + payload_length , source , function , start_low , start_high , length_low , length_high ]
if function == constants . FUNC_WRITE :
msg = msg + payload
type ( msg )
return msg
else :
assert 0 , "Un-supported protocol found %s" % protocol
|
def docstr ( self , prefix = '' , include_label = True ) :
"""Returns a string summarizing the parameter . Format is :
< prefix > ` ` name ` ` : { ` ` default ` ` , ` ` dtype ` ` }
< prefix > ` ` description ` ` Label : ` ` label ` ` ."""
|
outstr = "%s%s : {%s, %s}\n" % ( prefix , self . name , str ( self . default ) , str ( self . dtype ) . replace ( "<type '" , '' ) . replace ( "'>" , '' ) ) + "%s %s" % ( prefix , self . description )
if include_label :
outstr += " Label: %s" % ( self . label )
return outstr
|
def _pack_cwl ( unpacked_cwl ) :
"""Pack CWL into a single document for submission ."""
|
out_file = "%s-pack%s" % os . path . splitext ( unpacked_cwl )
cmd = "cwltool --pack {unpacked_cwl} > {out_file}"
_run_tool ( cmd . format ( ** locals ( ) ) )
return out_file
|
def section ( self , ctx , optional = False ) :
"""Return section of the config for a specific context ( sub - command ) .
Parameters :
ctx ( Context ) : The Click context object .
optional ( bool ) : If ` ` True ` ` , return an empty config object when section is missing .
Returns :
Section : The configuration section belonging to
the active ( sub - ) command ( based on ` ` ctx . info _ name ` ` ) ."""
|
values = self . load ( )
try :
return values [ ctx . info_name ]
except KeyError :
if optional :
return configobj . ConfigObj ( { } , ** self . DEFAULT_CONFIG_OPTS )
raise LoggedFailure ( "Configuration section '{}' not found!" . format ( ctx . info_name ) )
|
def dangling ( self , targets : sos_targets ) :
'''returns
1 . missing targets , which are missing from the DAG or from the provided targets
2 . existing targets of provided target list , not in DAG'''
|
existing = [ ]
missing = [ ]
if env . config [ 'trace_existing' ] :
for x in self . _all_depends_files . keys ( ) :
if x not in self . _all_output_files :
if x . target_exists ( ) :
existing . append ( x )
else :
missing . append ( x )
else :
missing = [ x for x in self . _all_depends_files . keys ( ) if x not in self . _all_output_files and not x . target_exists ( ) ]
for x in targets :
if x not in self . _all_output_files :
if x . target_exists ( 'target' ) :
existing . append ( x )
else :
missing . append ( x )
return missing , existing
|
def enable_chimera_inline ( ) :
"""Enable IPython magic commands to run some Chimera actions
Currently supported :
- % chimera _ export _ 3D [ < model > ] :
Depicts the Chimera 3D canvas in a WebGL iframe . Requires
a headless Chimera build and a Notebook instance . SLOW .
- % chimera _ run < command > :
Runs Chimera commands meant to be input in the GUI command line"""
|
from IPython . display import IFrame
from IPython . core . magic import register_line_magic
import chimera
import Midas
@ register_line_magic
def chimera_export_3D ( line ) :
if chimera . viewer . __class__ . __name__ == 'NoGuiViewer' :
print ( 'This magic requires a headless Chimera build. ' 'Check http://www.cgl.ucsf.edu/chimera/download.html#unsupported.' , file = sys . stderr )
return
models = eval ( line ) if line else [ ]
def html ( * models ) :
if models :
for m in chimera . openModels . list ( ) :
m . display = False
chimera . selection . clearCurrent ( )
for model in models :
model . display = True
chimera . selection . addCurrent ( model )
chimera . runCommand ( 'focus sel' )
chimera . viewer . windowSize = 800 , 600
path = 'chimera_scene_export.html'
Midas . export ( filename = path , format = 'WebGL' )
return IFrame ( path , * [ x + 20 for x in chimera . viewer . windowSize ] )
return html ( * models )
del chimera_export_3D
@ register_line_magic
def chimera_run ( line ) :
if not line :
print ( "Usage: %chimera_run <chimera command>" , file = sys . stderr )
return
chimera . runCommand ( line )
del chimera_run
|
def log_once ( key ) :
"""Returns True if this is the " first " call for a given key .
Various logging settings can adjust the definition of " first " .
Example :
> > > if log _ once ( " some _ key " ) :
. . . logger . info ( " Some verbose logging statement " )"""
|
global _last_logged
if _disabled :
return False
elif key not in _logged :
_logged . add ( key )
_last_logged = time . time ( )
return True
elif _periodic_log and time . time ( ) - _last_logged > 60.0 :
_logged . clear ( )
_last_logged = time . time ( )
return False
else :
return False
|
def clean_dict ( data ) :
"""Remove None - valued keys from a dictionary , recursively ."""
|
if is_mapping ( data ) :
out = { }
for k , v in data . items ( ) :
if v is not None :
out [ k ] = clean_dict ( v )
return out
elif is_sequence ( data ) :
return [ clean_dict ( d ) for d in data if d is not None ]
return data
|
def get_music_library_information ( self , search_type , start = 0 , max_items = 100 , full_album_art_uri = False , search_term = None , subcategories = None , complete_result = False ) :
"""Retrieve music information objects from the music library .
This method is the main method to get music information items , like
e . g . tracks , albums etc . , from the music library with . It can be used
in a few different ways :
The ` ` search _ term ` ` argument performs a fuzzy search on that string in
the results , so e . g calling : :
get _ music _ library _ information ( ' artists ' , search _ term = ' Metallica ' )
will perform a fuzzy search for the term ' Metallica ' among all the
artists .
Using the ` ` subcategories ` ` argument , will jump directly into that
subcategory of the search and return results from there . So . e . g
knowing that among the artist is one called ' Metallica ' , calling : :
get _ music _ library _ information ( ' artists ' ,
subcategories = [ ' Metallica ' ] )
will jump directly into the ' Metallica ' sub category and return the
albums associated with Metallica and : :
get _ music _ library _ information ( ' artists ' ,
subcategories = [ ' Metallica ' , ' Black ' ] )
will return the tracks of the album ' Black ' by the artist ' Metallica ' .
The order of sub category types is : Genres - > Artists - > Albums - > Tracks .
It is also possible to combine the two , to perform a fuzzy search in a
sub category .
The ` ` start ` ` , ` ` max _ items ` ` and ` ` complete _ result ` ` arguments all
have to do with paging of the results . By default the searches are
always paged , because there is a limit to how many items we can get at
a time . This paging is exposed to the user with the ` ` start ` ` and
` ` max _ items ` ` arguments . So calling : :
get _ music _ library _ information ( ' artists ' , start = 0 , max _ items = 100)
get _ music _ library _ information ( ' artists ' , start = 100 , max _ items = 100)
will get the first and next 100 items , respectively . It is also
possible to ask for all the elements at once : :
get _ music _ library _ information ( ' artists ' , complete _ result = True )
This will perform the paging internally and simply return all the
items .
Args :
search _ type ( str ) :
The kind of information to retrieve . Can be one of :
` ` ' artists ' ` ` , ` ` ' album _ artists ' ` ` , ` ` ' albums ' ` ` ,
` ` ' genres ' ` ` , ` ` ' composers ' ` ` , ` ` ' tracks ' ` ` , ` ` ' share ' ` ` ,
` ` ' sonos _ playlists ' ` ` , or ` ` ' playlists ' ` ` , where playlists
are the imported playlists from the music library .
start ( int , optional ) : starting number of returned matches
( zero based ) . Default 0.
max _ items ( int , optional ) : Maximum number of returned matches .
Default 100.
full _ album _ art _ uri ( bool ) :
whether the album art URI should be absolute ( i . e . including
the IP address ) . Default ` False ` .
search _ term ( str , optional ) :
a string that will be used to perform a fuzzy search among the
search results . If used in combination with subcategories ,
the fuzzy search will be performed in the subcategory .
subcategories ( str , optional ) :
A list of strings that indicate one or more subcategories to
dive into .
complete _ result ( bool ) : if ` True ` , will disable
paging ( ignore ` ` start ` ` and ` ` max _ items ` ` ) and return all
results for the search .
Warning :
Getting e . g . all the tracks in a large collection might
take some time .
Returns :
` SearchResult ` : an instance of ` SearchResult ` .
Note :
* The maximum numer of results may be restricted by the unit ,
presumably due to transfer size consideration , so check the
returned number against that requested .
* The playlists that are returned with the ` ` ' playlists ' ` ` search ,
are the playlists imported from the music library , they
are not the Sonos playlists .
Raises :
` SoCoException ` upon errors ."""
|
search = self . SEARCH_TRANSLATION [ search_type ]
# Add sub categories
if subcategories is not None :
for category in subcategories :
search += '/' + url_escape_path ( really_unicode ( category ) )
# Add fuzzy search
if search_term is not None :
search += ':' + url_escape_path ( really_unicode ( search_term ) )
item_list = [ ]
metadata = { 'total_matches' : 100000 }
while len ( item_list ) < metadata [ 'total_matches' ] : # Change start and max for complete searches
if complete_result :
start , max_items = len ( item_list ) , 100000
# Try and get this batch of results
try :
response , metadata = self . _music_lib_search ( search , start , max_items )
except SoCoUPnPException as exception : # ' No such object ' UPnP errors
if exception . error_code == '701' :
return SearchResult ( [ ] , search_type , 0 , 0 , None )
else :
raise exception
# Parse the results
items = from_didl_string ( response [ 'Result' ] )
for item in items : # Check if the album art URI should be fully qualified
if full_album_art_uri :
self . _update_album_art_to_full_uri ( item )
# Append the item to the list
item_list . append ( item )
# If we are not after the complete results , the stop after 1
# iteration
if not complete_result :
break
metadata [ 'search_type' ] = search_type
if complete_result :
metadata [ 'number_returned' ] = len ( item_list )
# pylint : disable = star - args
return SearchResult ( item_list , ** metadata )
|
def _set_result_from_operation ( self ) :
"""Set the result or exception from the operation if it is complete ."""
|
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time .
with self . _completion_lock : # If the operation isn ' t complete or if the result has already been
# set , do not call set _ result / set _ exception again .
# Note : self . _ result _ set is set to True in set _ result and
# set _ exception , in case those methods are invoked directly .
if not self . _operation . done or self . _result_set :
return
if self . _operation . HasField ( "response" ) :
response = protobuf_helpers . from_any_pb ( self . _result_type , self . _operation . response )
self . set_result ( response )
elif self . _operation . HasField ( "error" ) :
exception = exceptions . GoogleAPICallError ( self . _operation . error . message , errors = ( self . _operation . error , ) , response = self . _operation , )
self . set_exception ( exception )
else :
exception = exceptions . GoogleAPICallError ( "Unexpected state: Long-running operation had neither " "response nor error set." )
self . set_exception ( exception )
|
def refresh ( self , force_cache = False ) :
"""Perform a system refresh .
: param force _ cache : Force an update of the camera cache"""
|
if self . check_if_ok_to_update ( ) or force_cache :
for sync_name , sync_module in self . sync . items ( ) :
_LOGGER . debug ( "Attempting refresh of sync %s" , sync_name )
sync_module . refresh ( force_cache = force_cache )
if not force_cache : # Prevents rapid clearing of motion detect property
self . last_refresh = int ( time . time ( ) )
return True
return False
|
def encode_function_call ( self , function_name , args ) :
"""Return the encoded function call .
Args :
function _ name ( str ) : One of the existing functions described in the
contract interface .
args ( List [ object ] ) : The function arguments that wll be encoded and
used in the contract execution in the vm .
Return :
bin : The encoded function name and arguments so that it can be used
with the evm to execute a funcion call , the binary string follows
the Ethereum Contract ABI ."""
|
if function_name not in self . function_data :
raise ValueError ( 'Unkown function {}' . format ( function_name ) )
description = self . function_data [ function_name ]
function_selector = zpad ( encode_int ( description [ 'prefix' ] ) , 4 )
arguments = encode_abi ( description [ 'encode_types' ] , args )
return function_selector + arguments
|
async def unicode_type ( self , elem ) :
"""Unicode type
: param elem :
: return :"""
|
if self . writing :
await dump_uvarint ( self . iobj , len ( elem ) )
await self . iobj . awrite ( bytes ( elem , 'utf8' ) )
else :
ivalue = await load_uvarint ( self . iobj )
if ivalue == 0 :
return ''
fvalue = bytearray ( ivalue )
await self . iobj . areadinto ( fvalue )
return str ( fvalue , 'utf8' )
|
def payments ( self , virtual_account_id , data = { } , ** kwargs ) :
"""Fetch Payment for Virtual Account Id
Args :
virtual _ account _ id :
Id for which Virtual Account objects has to be retrieved
Returns :
Payment dict for given Virtual Account Id"""
|
url = "{}/{}/payments" . format ( self . base_url , virtual_account_id )
return self . get_url ( url , data , ** kwargs )
|
def ingest_flash ( self ) :
"""Process post - flash ."""
|
self . flash = extract_flash ( self . hdulist [ 0 ] . header , self . hdulist [ 1 ] )
# Set post - flash to zeros
if self . flash is None :
self . flash = np . zeros_like ( self . science )
return
# Apply the flash subtraction if necessary .
# Not applied to ERR , to be consistent with ingest _ dark ( )
if self . flshcorr != 'COMPLETE' :
self . science = self . science - self . flash
|
def parameter_list ( data ) :
"""Create a list of parameter objects from a dict .
: param data : Dictionary to convert to parameter list .
: type data : dict
: return : Parameter list .
: rtype : dict"""
|
items = [ ]
for item in data :
param = Parameter ( item [ 'name' ] , item [ 'value' ] )
if 'meta' in item :
param . meta = item [ 'meta' ]
items . append ( param )
return items
|
def _strip_counters ( self , sub_line ) :
"""Find the codeline end by taking out the counters and durations ."""
|
try :
end = sub_line . rindex ( '}' )
except ValueError :
return sub_line
else :
return sub_line [ : ( end + 1 ) ]
|
def connect ( self ) :
"""Get a connection to this peer .
If an connection to the peer already exists ( either incoming or
outgoing ) , that ' s returned . Otherwise , a new outgoing connection to
this peer is created .
: return :
A future containing a connection to this host ."""
|
# Prefer incoming connections over outgoing connections .
if self . connections : # First value is an incoming connection
future = gen . Future ( )
future . set_result ( self . connections [ 0 ] )
return future
if self . _connecting : # If we ' re in the process of connecting to the peer , just wait
# and re - use that connection .
return self . _connecting
conn_future = self . _connecting = self . connection_class . outgoing ( hostport = self . hostport , process_name = self . tchannel . process_name , serve_hostport = self . tchannel . hostport , handler = self . tchannel . receive_call , tchannel = self . tchannel , )
def on_connect ( _ ) :
if not conn_future . exception ( ) : # We don ' t actually need to handle the exception . That ' s on
# the caller .
connection = conn_future . result ( )
self . register_outgoing_conn ( connection )
self . _connecting = None
conn_future . add_done_callback ( on_connect )
return conn_future
|
def _summarize ( self ) :
"""Game summary implementation ."""
|
self . _achievements_summarized = True
data = None
if self . _postgame :
data = self . _postgame . action
game_type = 'DM' if self . _header . lobby . game_type == 'DM' else 'RM'
self . _summary = { 'players' : list ( self . players ( data , game_type ) ) , 'diplomacy' : self . _diplomacy , 'rec_owner_index' : self . _header . replay . rec_player , 'rec_owner_number' : self . _rec_owner_number ( ) , 'settings' : { 'type' : game_type , 'difficulty' : self . _header . scenario . game_settings . difficulty , # data . resource _ level
'resource_level' : 'standard' , 'population_limit' : self . _header . lobby . population_limit * 25 , 'speed' : mgz . const . SPEEDS . get ( self . _header . replay . game_speed ) , 'reveal_map' : self . _header . lobby . reveal_map , # self . _ get _ starting _ age ( data . starting _ age )
'starting_age' : 'Dark' if game_type == 'RM' else 'Post Imperial' , 'victory_condition' : ( 'conquest' if self . _header . scenario . victory . is_conquest else 'other' ) , # not data . team _ together
'team_together' : True , # data . all _ techs
'all_technologies' : False , 'cheats' : self . _header . replay . cheats_enabled , 'lock_teams' : self . _header . lobby . lock_teams , # data . lock _ speed
'lock_speed' : True , 'record_game' : True } , 'map' : { 'name' : self . _map . name ( ) , 'size' : self . _map . size ( ) , 'x' : self . _header . map_info . size_x , 'y' : self . _header . map_info . size_y , 'nomad' : self . is_nomad ( ) , 'regicide' : self . is_regicide ( ) , 'arena' : self . is_arena ( ) , 'hash' : self . _map_hash ( ) } , 'mod' : self . _get_mod ( ) , 'restore' : { 'restored' : self . _header . initial . restore_time > 0 , 'start_int' : self . _header . initial . restore_time , 'start_time' : mgz . util . convert_to_timestamp ( self . _header . initial . restore_time / 1000 ) } , 'voobly' : { 'ladder' : self . _ladder , 'rated' : self . _ladder != None } , 'number_of_humans' : len ( [ p for p in self . _header . scenario . game_settings . player_info if p [ 'type' ] == 'human' ] ) , 'number_of_ai' : len ( [ p for p in self . _header . scenario . game_settings . player_info if p [ 'type' ] == 'computer' ] ) , 'duration' : mgz . util . convert_to_timestamp ( self . _time / 1000 ) , 'time_int' : self . _time , 'metadata' : { 'hash' : self . _hash , 'version' : mgz . const . VERSIONS [ self . _header . version ] , 'sub_version' : round ( self . _header . sub_version , 2 ) , 'filename' : os . path . basename ( self . _path ) , 'timestamp' : self . _get_timestamp ( ) } , 'action_histogram' : dict ( self . _actions_without_player ) , 'queue' : self . _queue }
self . _summary [ 'finished' ] = guess_finished ( self . _summary , data )
if self . _summary [ 'finished' ] :
self . _summary [ 'won_in' ] = self . _won_in ( ) . title ( )
self . _set_winning_team ( )
if self . _show_chat :
self . _summary [ 'chat' ] = self . _chat
if self . _show_timeline :
self . _summary [ 'timeline' ] = self . _timeline
if self . _show_coords :
self . _summary [ 'coords' ] = self . _coords
|
def currentVersion ( self ) :
"""returns the current version of the site"""
|
if self . _currentVersion is None :
self . __init ( self . _url )
return self . _currentVersion
|
def compute_chunk ( self , graph , dates , sids , initial_workspace ) :
"""Compute the Pipeline terms in the graph for the requested start and end
dates .
This is where we do the actual work of running a pipeline .
Parameters
graph : zipline . pipeline . graph . ExecutionPlan
Dependency graph of the terms to be executed .
dates : pd . DatetimeIndex
Row labels for our root mask .
assets : pd . Int64Index
Column labels for our root mask .
initial _ workspace : dict
Map from term - > output .
Must contain at least entry for ` self . _ root _ mask _ term ` whose shape
is ` ( len ( dates ) , len ( assets ) ) ` , but may contain additional
pre - computed terms for testing or optimization purposes .
Returns
results : dict
Dictionary mapping requested results to outputs ."""
|
self . _validate_compute_chunk_params ( graph , dates , sids , initial_workspace , )
get_loader = self . _get_loader
# Copy the supplied initial workspace so we don ' t mutate it in place .
workspace = initial_workspace . copy ( )
refcounts = graph . initial_refcounts ( workspace )
execution_order = graph . execution_order ( refcounts )
domain = graph . domain
# Many loaders can fetch data more efficiently if we ask them to
# retrieve all their inputs at once . For example , a loader backed by a
# SQL database can fetch multiple columns from the database in a single
# query .
# To enable these loaders to fetch their data efficiently , we group
# together requests for LoadableTerms if they are provided by the same
# loader and they require the same number of extra rows .
# The extra rows condition is a simplification : we don ' t currently have
# a mechanism for asking a loader to fetch different windows of data
# for different terms , so we only batch requests together when they ' re
# going to produce data for the same set of dates . That may change in
# the future if we find a loader that can still benefit significantly
# from batching unequal - length requests .
def loader_group_key ( term ) :
loader = get_loader ( term )
extra_rows = graph . extra_rows [ term ]
return loader , extra_rows
# Only produce loader groups for the terms we expect to load . This
# ensures that we can run pipelines for graphs where we don ' t have a
# loader registered for an atomic term if all the dependencies of that
# term were supplied in the initial workspace .
will_be_loaded = graph . loadable_terms - viewkeys ( workspace )
loader_groups = groupby ( loader_group_key , ( t for t in execution_order if t in will_be_loaded ) , )
for term in graph . execution_order ( refcounts ) : # ` term ` may have been supplied in ` initial _ workspace ` , and in the
# future we may pre - compute loadable terms coming from the same
# dataset . In either case , we will already have an entry for this
# term , which we shouldn ' t re - compute .
if term in workspace :
continue
# Asset labels are always the same , but date labels vary by how
# many extra rows are needed .
mask , mask_dates = graph . mask_and_dates_for_term ( term , self . _root_mask_term , workspace , dates , )
if isinstance ( term , LoadableTerm ) :
loader = get_loader ( term )
to_load = sorted ( loader_groups [ loader_group_key ( term ) ] , key = lambda t : t . dataset )
loaded = loader . load_adjusted_array ( domain , to_load , mask_dates , sids , mask , )
assert set ( loaded ) == set ( to_load ) , ( 'loader did not return an AdjustedArray for each column\n' 'expected: %r\n' 'got: %r' % ( sorted ( to_load ) , sorted ( loaded ) ) )
workspace . update ( loaded )
else :
workspace [ term ] = term . _compute ( self . _inputs_for_term ( term , workspace , graph , domain ) , mask_dates , sids , mask , )
if term . ndim == 2 :
assert workspace [ term ] . shape == mask . shape
else :
assert workspace [ term ] . shape == ( mask . shape [ 0 ] , 1 )
# Decref dependencies of ` ` term ` ` , and clear any terms whose
# refcounts hit 0.
for garbage_term in graph . decref_dependencies ( term , refcounts ) :
del workspace [ garbage_term ]
# At this point , all the output terms are in the workspace .
out = { }
graph_extra_rows = graph . extra_rows
for name , term in iteritems ( graph . outputs ) : # Truncate off extra rows from outputs .
out [ name ] = workspace [ term ] [ graph_extra_rows [ term ] : ]
return out
|
def search ( self , template : str , first : bool = False ) -> _Result :
"""Search the : class : ` Element < Element > ` for the given parse
template .
: param template : The Parse template to use ."""
|
elements = [ r for r in findall ( template , self . xml ) ]
return _get_first_or_list ( elements , first )
|
def set_scene_config ( self , scene_id , config ) :
"""reconfigure a scene by scene ID"""
|
if not scene_id in self . state . scenes : # does that scene _ id exist ?
err_msg = "Requested to reconfigure scene {sceneNum}, which does not exist" . format ( sceneNum = scene_id )
logging . info ( err_msg )
return ( False , 0 , err_msg )
if scene_id == self . state . activeSceneId :
pass
# TODO : maybe calculate next frame , esp . if static scene
self . state . scenes [ scene_id ] = self . state . scenes [ scene_id ] . _replace ( config = config )
sequence_number = self . zmq_publisher . publish_scene_config ( scene_id , config )
logging . debug ( "Reconfigured scene {sceneNum}" . format ( sceneNum = scene_id ) )
return ( True , sequence_number , "OK" )
|
def install_os ( name , ** kwargs ) :
'''Installs the given image on the device . After the installation is complete
the device is rebooted , if reboot = True is given as a keyworded argument .
. . code - block : : yaml
salt : / / images / junos _ image . tgz :
junos :
- install _ os
- timeout : 100
- reboot : True
Parameters :
Required
* path :
Path where the image file is present on the pro xy minion .
Optional
* kwargs : keyworded arguments to be given such as timeout , reboot etc
* timeout :
Set NETCONF RPC timeout . Can be used to RPCs which
take a while to execute . ( default = 30 seconds )
* reboot :
Whether to reboot after installation ( default = False )
* no _ copy :
When True the software package will not be SCP ’ d to the device . ( default = False )'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
ret [ 'changes' ] = __salt__ [ 'junos.install_os' ] ( name , ** kwargs )
return ret
|
def meaculpa ( nick , rest ) :
"Sincerely apologize"
|
if rest :
rest = rest . strip ( )
if rest :
return random . choice ( phrases . direct_apologies ) % dict ( a = nick , b = rest )
else :
return random . choice ( phrases . apologies ) % dict ( a = nick )
|
def file ( self , owner = None , ** kwargs ) :
"""Create the File TI object .
Args :
owner :
* * kwargs :
Return :"""
|
return File ( self . tcex , owner = owner , ** kwargs )
|
def f_measure ( reference_beats , estimated_beats , f_measure_threshold = 0.07 ) :
"""Compute the F - measure of correct vs incorrectly predicted beats .
" Correctness " is determined over a small window .
Examples
> > > reference _ beats = mir _ eval . io . load _ events ( ' reference . txt ' )
> > > reference _ beats = mir _ eval . beat . trim _ beats ( reference _ beats )
> > > estimated _ beats = mir _ eval . io . load _ events ( ' estimated . txt ' )
> > > estimated _ beats = mir _ eval . beat . trim _ beats ( estimated _ beats )
> > > f _ measure = mir _ eval . beat . f _ measure ( reference _ beats ,
estimated _ beats )
Parameters
reference _ beats : np . ndarray
reference beat times , in seconds
estimated _ beats : np . ndarray
estimated beat times , in seconds
f _ measure _ threshold : float
Window size , in seconds
( Default value = 0.07)
Returns
f _ score : float
The computed F - measure score"""
|
validate ( reference_beats , estimated_beats )
# When estimated beats are empty , no beats are correct ; metric is 0
if estimated_beats . size == 0 or reference_beats . size == 0 :
return 0.
# Compute the best - case matching between reference and estimated locations
matching = util . match_events ( reference_beats , estimated_beats , f_measure_threshold )
precision = float ( len ( matching ) ) / len ( estimated_beats )
recall = float ( len ( matching ) ) / len ( reference_beats )
return util . f_measure ( precision , recall )
|
def _update_url_map ( self ) :
'''Assemble any dynamic or configurable URLs'''
|
if HAS_WEBSOCKETS :
self . url_map . update ( { 'ws' : WebsocketEndpoint , } )
# Allow the Webhook URL to be overridden from the conf .
self . url_map . update ( { self . apiopts . get ( 'webhook_url' , 'hook' ) . lstrip ( '/' ) : Webhook , } )
# Enable the single - page JS app URL .
self . url_map . update ( { self . apiopts . get ( 'app_path' , 'app' ) . lstrip ( '/' ) : App , } )
|
def addsshkey ( self , title , key ) :
"""Add a new ssh key for the current user
: param title : title of the new key
: param key : the key itself
: return : true if added , false if it didn ' t add it ( it could be because the name or key already exists )"""
|
data = { 'title' : title , 'key' : key }
request = requests . post ( self . keys_url , headers = self . headers , data = data , verify = self . verify_ssl , auth = self . auth , timeout = self . timeout )
if request . status_code == 201 :
return True
else :
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.