signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def flatten ( self , df , column_name ) :
"""Flatten a column in the dataframe that contains lists"""
|
_exp_list = [ [ md5 , x ] for md5 , value_list in zip ( df [ 'md5' ] , df [ column_name ] ) for x in value_list ]
return pd . DataFrame ( _exp_list , columns = [ 'md5' , column_name ] )
|
def _fetch_all ( self ) :
"""Completely overrides the QuerySet . _ fetch _ all method by adding the
timestamp to all objects
: return : See django . db . models . query . QuerySet . _ fetch _ all for return
values"""
|
if self . _result_cache is None :
self . _result_cache = list ( self . iterator ( ) )
# TODO : Do we have to test for ValuesListIterable , ValuesIterable ,
# and FlatValuesListIterable here ?
if self . _iterable_class == ModelIterable :
for x in self . _result_cache :
self . _set_item_querytime ( x )
if self . _prefetch_related_lookups and not self . _prefetch_done :
self . _prefetch_related_objects ( )
|
def argsort ( self , * args , ** kwargs ) :
"""Return the integer indices that would sort the index .
Parameters
* args
Passed to ` numpy . ndarray . argsort ` .
* * kwargs
Passed to ` numpy . ndarray . argsort ` .
Returns
numpy . ndarray
Integer indices that would sort the index if used as
an indexer .
See Also
numpy . argsort : Similar method for NumPy arrays .
Index . sort _ values : Return sorted copy of Index .
Examples
> > > idx = pd . Index ( [ ' b ' , ' a ' , ' d ' , ' c ' ] )
> > > idx
Index ( [ ' b ' , ' a ' , ' d ' , ' c ' ] , dtype = ' object ' )
> > > order = idx . argsort ( )
> > > order
array ( [ 1 , 0 , 3 , 2 ] )
> > > idx [ order ]
Index ( [ ' a ' , ' b ' , ' c ' , ' d ' ] , dtype = ' object ' )"""
|
result = self . asi8
if result is None :
result = np . array ( self )
return result . argsort ( * args , ** kwargs )
|
def get_ft_names ( mod , include_inner = False ) -> List [ str ] :
"Return all the functions of module ` mod ` ."
|
# If the module has an attribute _ _ all _ _ , it picks those .
# Otherwise , it returns all the functions defined inside a module .
fn_names = [ ]
for elt_name in get_exports ( mod ) :
elt = getattr ( mod , elt_name )
# This removes the files imported from elsewhere
try :
fname = inspect . getfile ( elt )
except :
continue
if mod . __file__ . endswith ( '__init__.py' ) :
if inspect . ismodule ( elt ) :
fn_names . append ( elt_name )
else :
continue
else :
if ( fname != mod . __file__ ) :
continue
if inspect . isclass ( elt ) or inspect . isfunction ( elt ) :
fn_names . append ( elt_name )
else :
continue
if include_inner and inspect . isclass ( elt ) and not is_enum ( elt . __class__ ) :
fn_names . extend ( get_inner_fts ( elt ) )
return fn_names
|
def add_file ( self , filename , overwrite = False ) :
"""Open given file and add its data by guessing its format . The format
must be Motorola S - Records , Intel HEX or TI - TXT . Set ` overwrite ` to
` ` True ` ` to allow already added data to be overwritten ."""
|
with open ( filename , 'r' ) as fin :
self . add ( fin . read ( ) , overwrite )
|
def set_provider_links ( self , resource_ids ) :
"""Sets a provider chain in order from the most recent source to the originating source .
arg : resource _ ids ( osid . id . Id [ ] ) : the new source
raise : InvalidArgument - ` ` resource _ ids ` ` is invalid
raise : NoAccess - ` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
raise : NullArgument - ` ` resource _ ids ` ` is ` ` null ` `
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . learning . ActivityForm . set _ assets _ template
if not isinstance ( resource_ids , list ) :
raise errors . InvalidArgument ( )
if self . get_provider_links_metadata ( ) . is_read_only ( ) :
raise errors . NoAccess ( )
idstr_list = [ ]
for object_id in resource_ids :
if not self . _is_valid_id ( object_id ) :
raise errors . InvalidArgument ( )
idstr_list . append ( str ( object_id ) )
self . _my_map [ 'providerLinkIds' ] = idstr_list
|
def fetch_file ( self , in_path , out_path ) :
'''save a remote file to the specified path'''
|
vvv ( "FETCH %s TO %s" % ( in_path , out_path ) , host = self . host )
try :
self . sftp = self . _connect_sftp ( )
except :
raise errors . AnsibleError ( "failed to open a SFTP connection" )
try :
self . sftp . get ( in_path , out_path )
except IOError :
raise errors . AnsibleError ( "failed to transfer file from %s" % in_path )
|
def parameter_names_flat ( self , include_fixed = False ) :
"""Return the flattened parameter names for all subsequent parameters
of this parameter . We do not include the name for self here !
If you want the names for fixed parameters as well in this list ,
set include _ fixed to True .
if not hasattr ( obj , ' cache ' ) :
obj . cache = FunctionCacher ( )
: param bool include _ fixed : whether to include fixed names here ."""
|
name_list = [ ]
for p in self . flattened_parameters :
name = p . hierarchy_name ( )
if p . size > 1 :
name_list . extend ( [ "{}[{!s}]" . format ( name , i ) for i in p . _indices ( ) ] )
else :
name_list . append ( name )
name_list = np . array ( name_list )
if not include_fixed and self . _has_fixes ( ) :
return name_list [ self . _fixes_ ]
return name_list
|
def mask_reduce ( mask ) :
"""Reduce a boolean mask , removing all false slices in any dimension .
Parameters
mask : ndarray with bool dtype
The mask which is to be reduced
Returns
A boolean mask with no all False slices ."""
|
mask = mask . copy ( )
for i in range ( len ( mask . shape ) ) :
a = mask . copy ( )
j = list ( range ( len ( mask . shape ) ) )
j . remove ( i )
j = tuple ( j )
a = a . max ( axis = j , keepdims = True )
idx = [ slice ( None ) ] * len ( mask . shape )
a = a . flatten ( )
idx [ i ] = [ k for k in range ( len ( a ) ) if a [ k ] ]
mask = mask [ tuple ( idx ) ]
return mask
|
async def clear ( self ) :
"""Close all free connections in pool ."""
|
with ( await self . _cond ) :
while self . _free :
conn = self . _free . popleft ( )
await conn . close ( )
self . _cond . notify ( )
|
def segments_to_parameters ( segments ) :
"""For 3D line segments defined by two points , turn
them in to an origin defined as the closest point along
the line to the zero origin as well as a direction vector
and start and end parameter .
Parameters
segments : ( n , 2 , 3 ) float
Line segments defined by start and end points
Returns
origins : ( n , 3 ) float
Point on line closest to [ 0 , 0 , 0]
vectors : ( n , 3 ) float
Unit line directions
parameters : ( n , 2 ) float
Start and end distance pairs for each line"""
|
segments = np . asanyarray ( segments , dtype = np . float64 )
if not util . is_shape ( segments , ( - 1 , 2 , ( 2 , 3 ) ) ) :
raise ValueError ( 'incorrect segment shape!' , segments . shape )
# make the initial origin one of the end points
endpoint = segments [ : , 0 ]
vectors = segments [ : , 1 ] - endpoint
vectors_norm = np . linalg . norm ( vectors , axis = 1 )
vectors /= vectors_norm . reshape ( ( - 1 , 1 ) )
# find the point along the line nearest the origin
offset = util . diagonal_dot ( endpoint , vectors )
# points nearest [ 0 , 0 , 0 ] will be our new origin
origins = endpoint + ( offset . reshape ( ( - 1 , 1 ) ) * - vectors )
# parametric start and end of line segment
parameters = np . column_stack ( ( offset , offset + vectors_norm ) )
return origins , vectors , parameters
|
def tile ( self , ncols , nrows ) :
"""Automatically tile the panels of the figure .
This will re - arranged all elements of the figure ( first in the
hierarchy ) so that they will uniformly cover the figure area .
Parameters
ncols , nrows : type
The number of columns and rows to arange the elements into .
Notes
ncols * nrows must be larger or equal to number of
elements , otherwise some elements will go outside the figure borders ."""
|
dx = ( self . width / ncols ) . to ( 'px' ) . value
dy = ( self . height / nrows ) . to ( 'px' ) . value
ix , iy = 0 , 0
for el in self :
el . move ( dx * ix , dy * iy )
ix += 1
if ix >= ncols :
ix = 0
iy += 1
if iy > nrows :
break
return self
|
def replace_currency_by_id ( cls , currency_id , currency , ** kwargs ) :
"""Replace Currency
Replace all attributes of Currency
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ currency _ by _ id ( currency _ id , currency , async = True )
> > > result = thread . get ( )
: param async bool
: param str currency _ id : ID of currency to replace ( required )
: param Currency currency : Attributes of currency to replace ( required )
: return : Currency
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_currency_by_id_with_http_info ( currency_id , currency , ** kwargs )
else :
( data ) = cls . _replace_currency_by_id_with_http_info ( currency_id , currency , ** kwargs )
return data
|
def yaml2tree ( cls , yamltree ) :
"""Class method that creates a tree from YAML .
| # Example yamltree data :
| - ! Node & root
| name : " root node "
| parent : null
| data :
| testpara : 111
| - ! Node & child1
| name : " child node "
| parent : * root
| - ! Node & gc1
| name : " grand - child node "
| parent : * child1
: param yamltree : a string of YAML describing the nodes in the
tree , or the path to a file containing the data .
: type yamltree : str
: returns : the root node of the tree .
: rtype : Node"""
|
if not cls . YAML_setup :
cls . setup_yaml ( )
cls . YAML_setup = True
if os . path . isfile ( yamltree ) :
with open ( yamltree ) as fh :
yaml_data = fh . read ( )
else :
yaml_data = yamltree
list_of_nodes = yaml . safe_load ( yaml_data )
yamltree_root = list_of_nodes [ 0 ]
return yamltree_root
|
def experiment ( f = None , local_dependencies = None , start_chiboard = True , default_logdir = "" ) :
"""Decorator that transforms the decorated function into a chi . Experiment
: param start _ chiboard :
: param f :
: param local _ dependencies :
: return :"""
|
if not f :
return lambda f : experiment ( f , local_dependencies , start_chiboard , default_logdir )
else :
if sys . modules [ f . __module__ ] . __name__ == '__main__' :
a = Experiment ( f , local_dependencies , start_chiboard , default_logdir )
a . parse_args_and_run ( )
return Experiment ( f , local_dependencies , start_chiboard , default_logdir ) . run
|
def _raiseImageMissing ( self , pattern ) :
"""Builds an ImageMissing event and triggers the default handler ( or the custom handler ,
if one has been specified ) . Returns True if throwing method should retry , False if it
should skip , and throws an exception if it should abort ."""
|
event = ImageMissingEvent ( self , pattern = pattern , event_type = "MISSING" )
if self . _imageMissingHandler is not None :
self . _imageMissingHandler ( event )
response = ( event . _response or self . _findFailedResponse )
# if response = = " PROMPT " : # Prompt not valid for ImageMissing error
# response = _ findFailedPrompt ( pattern )
if response == "ABORT" :
raise FindFailed ( event )
elif response == "SKIP" :
return False
elif response == "RETRY" :
return True
|
def save_to_temp ( content , file_name = None ) :
"""Save the contents into a temp file ."""
|
# output = " results . html "
temp_dir = tempfile . gettempdir ( )
# tempfile . TemporaryDirectory ( )
# tempfile . NamedTemporaryFile ( mode = ' w + t ' ) as f :
out_file = os . path . join ( temp_dir , file_name )
# if os . path . exists ( output ) and os . path . isfile ( output ) :
file = open ( out_file , 'w' )
file . write ( content )
file . close ( )
# print ( " results saved in results . html file . " )
# return output
# output = str ( pathlib . Path ( f . name ) )
return out_file
|
def GetInterfaceDescription ( self , interface ) :
"""The standardized org . varlink . service . GetInterfaceDescription ( ) varlink method ."""
|
try :
i = self . interfaces [ interface ]
except KeyError :
raise InterfaceNotFound ( interface )
return { 'description' : i . description }
|
def _is_path ( s ) :
"""Return whether an object is a path ."""
|
if isinstance ( s , string_types ) :
try :
return op . exists ( s )
except ( OSError , ValueError ) :
return False
else :
return False
|
def _as_array ( self , include_missing = False , get_non_selected = False , weighted = True , include_transforms_for_dims = False , ) :
"""Get crunch cube as ndarray .
Args
include _ missing ( bool ) : Include rows / cols for missing values .
get _ non _ selected ( bool ) : Get non - selected slices for MR vars .
weighted ( bool ) : Take weighted or unweighted counts .
include _ transforms _ for _ dims ( list ) : For which dims to
include headings & subtotals ( H & S ) transformations .
Returns
res ( ndarray ) : Tabular representation of crunch cube"""
|
return self . _apply_subtotals ( self . _apply_missings ( self . _measure ( weighted ) . raw_cube_array , include_missing = include_missing ) , include_transforms_for_dims , )
|
def pip_remove ( self , name = None , prefix = None , pkgs = None ) :
"""Remove a pip package in given environment by ` name ` or ` prefix ` ."""
|
logger . debug ( str ( ( prefix , pkgs ) ) )
if isinstance ( pkgs , ( list , tuple ) ) :
pkg = ' ' . join ( pkgs )
else :
pkg = pkgs
extra_args = [ 'uninstall' , '--yes' , pkg ]
return self . _call_pip ( name = name , prefix = prefix , extra_args = extra_args )
|
def apply_inference_graph ( model_path ) :
"""Run inference from a different graph , which receives encoded images buffers ."""
|
pred_config = PredictConfig ( session_init = get_model_loader ( model_path ) , model = InferenceOnlyModel ( ) , input_names = [ 'input_img_bytes' ] , output_names = [ 'prediction_img_bytes' ] )
pred = OfflinePredictor ( pred_config )
buf = open ( 'lena.png' , 'rb' ) . read ( )
prediction = pred ( [ buf ] ) [ 0 ]
with open ( 'applied_inference_graph.png' , 'wb' ) as f :
f . write ( prediction [ 0 ] )
|
def cudnnSetTensor ( handle , srcDesc , srcData , value ) :
"""Set all data points of a tensor to a given value : srcDest = alpha .
Parameters
handle : cudnnHandle
Handle to a previously created cuDNN context .
srcDesc : cudnnTensorDescriptor
Handle to a previously initialized tensor descriptor .
srcData : void _ p
Pointer to data of the tensor described by srcDesc descriptor .
value : float
Value that all elements of the tensor will be set to ."""
|
dataType , _ , _ , _ , _ , _ , _ , _ , _ = cudnnGetTensor4dDescriptor ( srcDesc )
if dataType == cudnnDataType [ 'CUDNN_DATA_DOUBLE' ] :
alphaRef = ctypes . byref ( ctypes . c_double ( alpha ) )
else :
alphaRef = ctypes . byref ( ctypes . c_float ( alpha ) )
status = _libcudnn . cudnnSetTensor ( handle , srcDesc , srcData , alphaRef )
cudnnCheckStatus ( status )
|
def upload_cbn_dir ( dir_path , manager ) :
"""Uploads CBN data to edge store
: param str dir _ path : Directory full of CBN JGIF files
: param pybel . Manager manager :"""
|
t = time . time ( )
for jfg_path in os . listdir ( dir_path ) :
if not jfg_path . endswith ( '.jgf' ) :
continue
path = os . path . join ( dir_path , jfg_path )
log . info ( 'opening %s' , path )
with open ( path ) as f :
cbn_jgif_dict = json . load ( f )
graph = pybel . from_cbn_jgif ( cbn_jgif_dict )
out_path = os . path . join ( dir_path , jfg_path . replace ( '.jgf' , '.bel' ) )
with open ( out_path , 'w' ) as o :
pybel . to_bel ( graph , o )
strip_annotations ( graph )
enrich_pubmed_citations ( manager = manager , graph = graph )
pybel . to_database ( graph , manager = manager )
log . info ( '' )
log . info ( 'done in %.2f' , time . time ( ) - t )
|
def object_permission_set ( self ) :
'''All users have view permissions . Admin users , and users with
org : admin can create , update , and delete any user . Any user can update
or delete themselves . Only admins can create or modify other admin
users .'''
|
return Or ( AllowOnlySafeHttpMethod , AllowAdmin , And ( AllowPermission ( 'org:admin' ) , ObjAttrTrue ( lambda _ , u : not u . is_superuser ) , ObjAttrTrue ( lambda r , _ : r . data . get ( 'admin' ) is not True ) ) , And ( AllowModify , ObjAttrTrue ( lambda req , user : user == req . user ) , ObjAttrTrue ( lambda r , _ : r . data . get ( 'admin' ) is not True ) ) , )
|
def find_initial_count ( data_tuple ) :
"""The function finds and returns the count of the elements occurring before the first tuple in the given input .
Examples :
> > > find _ initial _ count ( ( 1 , 5 , 7 , ( 4 , 6 ) , 10 ) )
> > > find _ initial _ count ( ( 2 , 9 , ( 5 , 7 ) , 11 ) )
> > > find _ initial _ count ( ( 11 , 15 , 5 , 8 , ( 2 , 3 ) , 8 ) )
Args :
data _ tuple : Input sequence of various elements possibly including tuples
Returns :
The count of elements occurring before the first tuple in the input sequence"""
|
# Counting elements before the first tuple
for index , element in enumerate ( data_tuple ) :
if type ( element ) is tuple :
break
return index
|
def is_using_format ( cls , markers , used_formats = None ) :
"""Args :
markers ( str | unicode ) : Space separated list of markers to look for
used _ formats ( str | unicode ) : Formats to consider ( default : cls . used _ formats )
Returns :
( bool ) : True if any one of the ' markers ' is seen in ' used _ formats '"""
|
if used_formats is None :
used_formats = cls . used_formats
if not markers or not used_formats :
return False
return any ( marker in used_formats for marker in flattened ( markers , split = ( " " , UNIQUE ) ) )
|
def get_parent_label ( self , treepos ) :
"""Given the treeposition of a node , return the label of its parent .
Returns None , if the tree has no parent ."""
|
parent_pos = self . get_parent_treepos ( treepos )
if parent_pos is not None :
parent = self . dgtree [ parent_pos ]
return parent . label ( )
else :
return None
|
def atualizar_software_sat ( retorno ) :
"""Constrói uma : class : ` RespostaSAT ` para o retorno ( unicode ) da função
: meth : ` ~ satcfe . base . FuncoesSAT . atualizar _ software _ sat ` ."""
|
resposta = analisar_retorno ( forcar_unicode ( retorno ) , funcao = 'AtualizarSoftwareSAT' )
if resposta . EEEEE not in ( '14000' , ) :
raise ExcecaoRespostaSAT ( resposta )
return resposta
|
async def _readline ( self , reader ) :
"""Readline helper"""
|
ret = await reader . readline ( )
if len ( ret ) == 0 and reader . at_eof ( ) :
raise EOFError ( )
return ret
|
def cache_call ( self , method , * options ) :
"""Call a remote method and store the result locally . Subsequent
calls to the same method with the same arguments will return the
cached result without invoking the remote procedure . Cached results are
kept indefinitely and must be manually refreshed with a call to
: py : meth : ` . cache _ call _ refresh ` .
: param str method : The name of the remote procedure to execute .
: return : The return value from the remote function ."""
|
options_hash = self . encode ( options )
if len ( options_hash ) > 20 :
options_hash = hashlib . new ( 'sha1' , options_hash ) . digest ( )
options_hash = sqlite3 . Binary ( options_hash )
with self . cache_lock :
cursor = self . cache_db . cursor ( )
cursor . execute ( 'SELECT return_value FROM cache WHERE method = ? AND options_hash = ?' , ( method , options_hash ) )
return_value = cursor . fetchone ( )
if return_value :
return_value = bytes ( return_value [ 0 ] )
return self . decode ( return_value )
return_value = self . call ( method , * options )
store_return_value = sqlite3 . Binary ( self . encode ( return_value ) )
with self . cache_lock :
cursor = self . cache_db . cursor ( )
cursor . execute ( 'INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)' , ( method , options_hash , store_return_value ) )
self . cache_db . commit ( )
return return_value
|
def wait_until_first_element_is_found ( self , elements , timeout = None ) :
"""Search list of elements and wait until one of them is found
: param elements : list of PageElements or element locators as a tuple ( locator _ type , locator _ value ) to be found
sequentially
: param timeout : max time to wait
: returns : first element found
: rtype : toolium . pageelements . PageElement or tuple
: raises TimeoutException : If no element in the list is found after the timeout"""
|
try :
return self . _wait_until ( self . _expected_condition_find_first_element , elements , timeout )
except TimeoutException as exception :
msg = 'None of the page elements has been found after %s seconds'
timeout = timeout if timeout else self . get_explicitly_wait ( )
self . logger . error ( msg , timeout )
exception . msg += "\n {}" . format ( msg % timeout )
raise exception
|
def bdecode ( f_or_data ) :
"""bdecodes data by looking up the type byte ,
and using it to look up the respective decoding function ,
which in turn is used to return the decoded object
The parameter can be a file opened in bytes mode ,
bytes or a string ( the last of which will be decoded )"""
|
if isinstance ( f_or_data , str ) :
f_or_data = f_or_data . encode ( )
if isinstance ( f_or_data , bytes ) :
f_or_data = BytesIO ( f_or_data )
# TODO : the following line is the only one that needs readahead .
# peek returns a arbitrary amount of bytes , so we have to slice .
if f_or_data . seekable ( ) :
first_byte = f_or_data . read ( 1 )
f_or_data . seek ( - 1 , SEEK_CUR )
else :
first_byte = f_or_data . peek ( 1 ) [ : 1 ]
btype = TYPES . get ( first_byte )
if btype is not None :
return btype ( f_or_data )
else : # Used in dicts and lists to designate an end
assert_btype ( f_or_data . read ( 1 ) , _TYPE_END )
return None
|
def meta_project ( self , attr_list = None , all_but = None , new_attr_dict = None ) :
"""* Wrapper of * ` ` PROJECT ` `
Project the metadata based on a list of attribute names
: param attr _ list : list of the metadata fields to select
: param all _ but : list of metadata that must be excluded from the projection .
: param new _ attr _ dict : an optional dictionary of the form { ' new _ field _ 1 ' : function1,
' new _ field _ 2 ' : function2 , . . . } in which every function computes
the new field based on the values of the others
: return : a new GMQLDataset
Notice that if attr _ list is specified , all _ but cannot be specified and viceversa .
Examples : :
new _ dataset = dataset . meta _ project ( attr _ list = [ ' antibody ' , ' ID ' ] ,
new _ attr _ dict = { ' new _ meta ' : dataset [ ' ID ' ] + 100 } )"""
|
return self . project ( projected_meta = attr_list , new_attr_dict = new_attr_dict , all_but_meta = all_but )
|
def MakeDeployableBinary ( self , template_path , output_path ) :
"""This will add the config to the client template ."""
|
context = self . context + [ "Client Context" ]
utils . EnsureDirExists ( os . path . dirname ( output_path ) )
client_config_data = self . GetClientConfig ( context )
shutil . copyfile ( template_path , output_path )
zip_file = zipfile . ZipFile ( output_path , mode = "a" )
zip_info = zipfile . ZipInfo ( filename = "config.yaml" )
zip_file . writestr ( zip_info , client_config_data )
zip_file . close ( )
return output_path
|
def recipe_get ( backend , recipe ) :
"""Get the latest files for this recipe ."""
|
recipe_root_dir = DKRecipeDisk . find_recipe_root_dir ( )
if recipe_root_dir is None :
if recipe is None :
raise click . ClickException ( "\nPlease change to a recipe folder or provide a recipe name arguement" )
# raise click . ClickException ( ' You must be in a Recipe folder ' )
kitchen_root_dir = DKKitchenDisk . is_kitchen_root_dir ( )
if not kitchen_root_dir :
raise click . ClickException ( "\nPlease change to a recipe folder or a kitchen root dir." )
recipe_name = recipe
start_dir = DKKitchenDisk . find_kitchen_root_dir ( )
else :
recipe_name = DKRecipeDisk . find_recipe_name ( )
if recipe is not None :
if recipe_name != recipe :
raise click . ClickException ( "\nThe recipe name argument '%s' is inconsistent with the current directory '%s'" % ( recipe , recipe_root_dir ) )
start_dir = recipe_root_dir
kitchen_name = Backend . get_kitchen_name_soft ( )
click . secho ( "%s - Getting the latest version of Recipe '%s' in Kitchen '%s'" % ( get_datetime ( ) , recipe_name , kitchen_name ) , fg = 'green' )
check_and_print ( DKCloudCommandRunner . get_recipe ( backend . dki , kitchen_name , recipe_name , start_dir ) )
|
def get ( token : Union [ str , int ] = None ) -> 'Role' :
"""Return enum instance corresponding to input token .
: param token : token identifying role to indy - sdk : ' STEWARD ' , ' TRUSTEE ' , ' TRUST _ ANCHOR ' , ' ' or None
: return : enum instance corresponding to input token"""
|
if token is None :
return Role . USER
for role in Role :
if role == Role . ROLE_REMOVE :
continue
# ROLE _ REMOVE is not a sensible role to parse from any configuration
if isinstance ( token , int ) and token in role . value :
return role
if str ( token ) . upper ( ) == role . name or token in ( str ( v ) for v in role . value ) : # could be numeric string
return role
return None
|
def _setup_subpix ( self , nside = 2 ** 16 ) :
"""Subpixels for random position generation ."""
|
# Only setup once . . .
if hasattr ( self , 'subpix' ) :
return
# Simulate over full ROI
self . roi_radius = self . config [ 'coords' ] [ 'roi_radius' ]
# Setup background spatial stuff
logger . info ( "Setup subpixels..." )
self . nside_pixel = self . config [ 'coords' ] [ 'nside_pixel' ]
self . nside_subpixel = self . nside_pixel * 2 ** 4
# Could be config parameter
epsilon = np . degrees ( hp . max_pixrad ( self . nside_pixel ) )
# Pad roi radius to cover edge healpix
subpix = ugali . utils . healpix . query_disc ( self . nside_subpixel , self . roi . vec , self . roi_radius + epsilon )
superpix = ugali . utils . healpix . superpixel ( subpix , self . nside_subpixel , self . nside_pixel )
self . subpix = subpix [ np . in1d ( superpix , self . roi . pixels ) ]
|
def relation_set ( relation_id = None , relation_settings = None , ** kwargs ) :
"""Attempt to use leader - set if supported in the current version of Juju ,
otherwise falls back on relation - set .
Note that we only attempt to use leader - set if the provided relation _ id is
a peer relation id or no relation id is provided ( in which case we assume
we are within the peer relation context ) ."""
|
try :
if relation_id in relation_ids ( 'cluster' ) :
return leader_set ( settings = relation_settings , ** kwargs )
else :
raise NotImplementedError
except NotImplementedError :
return _relation_set ( relation_id = relation_id , relation_settings = relation_settings , ** kwargs )
|
def get_mutations ( aln_df ) :
"""Get a list of residue numbers ( in the original sequence ' s numbering ) that are mutated
Args :
aln _ df ( DataFrame ) : Alignment DataFrame
just _ resnums : If only the residue numbers should be returned , instead of a list of tuples of
( original _ residue , resnum , mutated _ residue )
Returns :
list : Residue mutations"""
|
mutation_df = aln_df [ aln_df [ 'type' ] == 'mutation' ]
tuples = [ ]
if not mutation_df . empty :
subset = mutation_df [ [ 'id_a_aa' , 'id_a_pos' , 'id_b_aa' ] ]
subset [ 'id_a_pos' ] = subset [ 'id_a_pos' ] . astype ( int )
tuples = [ tuple ( x ) for x in subset . values ]
return tuples
|
def has_object_permission ( self , request , view , obj = None ) :
"""Check object permissions based on filters ."""
|
filter_and_actions = self . _get_filter_and_actions ( request . query_params . get ( 'sign' ) , view . action , '{}.{}' . format ( obj . _meta . app_label , obj . _meta . model_name ) )
if not filter_and_actions :
return False
qs = view . queryset . filter ( ** filter_and_actions [ 'filters' ] )
return qs . filter ( id = obj . id ) . exists ( )
|
def _create_pax_generic_header ( cls , pax_headers , type = tarfile . XHDTYPE ) :
"""Return a POSIX . 1-2001 extended or global header sequence
that contains a list of keyword , value pairs . The values
must be unicode objects ."""
|
records = [ ]
for keyword , value in pax_headers . iteritems ( ) :
try :
keyword = keyword . encode ( "utf8" )
except Exception :
pass
try :
value = value . encode ( "utf8" )
except Exception :
pass
l = len ( keyword ) + len ( value ) + 3
n = p = 0
while True :
n = l + len ( str ( p ) )
if n == p :
break
p = n
records . append ( "%d %s=%s\n" % ( p , keyword , value ) )
records = "" . join ( records )
# We use a hardcoded " . / . / @ PaxHeader " name like star does
# instead of the one that POSIX recommends .
info = { }
info [ "name" ] = "././@PaxHeader"
info [ "type" ] = type
info [ "size" ] = len ( records )
info [ "magic" ] = tarfile . POSIX_MAGIC
# Create pax header + record blocks .
return cls . _create_header ( info , tarfile . USTAR_FORMAT ) + cls . _create_payload ( records )
|
def create_arj ( archive , compression , cmd , verbosity , interactive , filenames ) :
"""Create an ARJ archive ."""
|
cmdlist = [ cmd , 'a' , '-r' ]
if not interactive :
cmdlist . append ( '-y' )
cmdlist . append ( archive )
cmdlist . extend ( filenames )
return cmdlist
|
def _parse_engine ( self ) :
"""Parse the storage engine in the config .
Returns :
str"""
|
if self . _parser . has_option ( 'storage' , 'engine' ) :
engine = str ( self . _parser . get ( 'storage' , 'engine' ) )
else :
engine = ENGINE_DROPBOX
assert isinstance ( engine , str )
if engine not in [ ENGINE_DROPBOX , ENGINE_GDRIVE , ENGINE_COPY , ENGINE_ICLOUD , ENGINE_BOX , ENGINE_FS ] :
raise ConfigError ( 'Unknown storage engine: {}' . format ( engine ) )
return str ( engine )
|
def handle_log_data ( self , m ) :
'''handling incoming log data'''
|
if self . download_file is None :
return
# lose some data
# import random
# if random . uniform ( 0,1 ) < 0.05:
# print ( ' dropping ' , str ( m ) )
# return
if m . ofs != self . download_ofs :
self . download_file . seek ( m . ofs )
self . download_ofs = m . ofs
if m . count != 0 :
s = bytearray ( m . data [ : m . count ] )
self . download_file . write ( s )
self . download_set . add ( m . ofs // 90 )
self . download_ofs += m . count
self . download_last_timestamp = time . time ( )
if m . count == 0 or ( m . count < 90 and len ( self . download_set ) == 1 + ( m . ofs // 90 ) ) :
dt = time . time ( ) - self . download_start
self . download_file . close ( )
size = os . path . getsize ( self . download_filename )
speed = size / ( 1000.0 * dt )
print ( "Finished downloading %s (%u bytes %u seconds, %.1f kbyte/sec %u retries)" % ( self . download_filename , size , dt , speed , self . retries ) )
self . download_file = None
self . download_filename = None
self . download_set = set ( )
self . master . mav . log_request_end_send ( self . target_system , self . target_component )
if len ( self . download_queue ) :
self . log_download_next ( )
|
def unsubscribe ( self , connection ) :
"""This should only be called by ` ` ServerConnection . unsubscribe _ session ` ` or our book - keeping will be broken"""
|
self . _subscribed_connections . discard ( connection )
self . _last_unsubscribe_time = current_time ( )
|
def get_index_text ( self , prefix , name_obj ) :
"""Produce index text by directive attributes"""
|
( name , _ ) = name_obj
msg = '{name} ({obj_type})'
parts = { 'name' : name , 'prefix' : prefix , 'obj_type' : self . long_name , }
try :
( obj_ns , obj_name ) = name . rsplit ( '.' , 1 )
parts [ 'name' ] = obj_name
parts [ 'namespace' ] = obj_ns
msg = '{name} ({namespace} {obj_type})'
except ValueError :
pass
return msg . format ( ** parts )
|
def schedule_host_downtime ( self , host , start_time , end_time , fixed , trigger_id , duration , author , comment ) :
"""Schedule a host downtime
Format of the line that triggers function call : :
SCHEDULE _ HOST _ DOWNTIME ; < host _ name > ; < start _ time > ; < end _ time > ; < fixed > ;
< trigger _ id > ; < duration > ; < author > ; < comment >
: param host : host to schedule downtime
: type host : alignak . object . host . Host
: param start _ time : downtime start time
: type start _ time :
: param end _ time : downtime end time
: type end _ time :
: param fixed : is downtime fixed
: type fixed : bool
: param trigger _ id : downtime id that triggered this one
: type trigger _ id : str
: param duration : downtime duration
: type duration : int
: param author : downtime author
: type author : str
: param comment : downtime comment
: type comment : str
: return : None"""
|
data = { 'ref' : host . uuid , 'ref_type' : host . my_type , 'start_time' : start_time , 'end_time' : end_time , 'fixed' : fixed , 'trigger_id' : trigger_id , 'duration' : duration , 'author' : author , 'comment' : comment }
downtime = Downtime ( data )
downtime . add_automatic_comment ( host )
host . add_downtime ( downtime )
self . send_an_element ( host . get_update_status_brok ( ) )
if trigger_id not in ( '' , 0 ) :
for item in self . daemon . hosts :
if trigger_id in item . downtimes :
host . downtimes [ trigger_id ] . trigger_me ( downtime . uuid )
|
def _get_file_magic_name ( self , buffer ) :
"""Return the filetype guessed for a buffer
: param buffer : bytes
: return : str of filetype"""
|
default = "Unknown"
ftype = None
try : # Magic is optional
import magic
except ImportError :
return default
try : # There are several implementations of magic ,
# unfortunately all called magic
# We use this one : https : / / github . com / ahupp / python - magic /
getattr ( magic , "MagicException" )
except AttributeError : # Looks like no magic was installed
return default
try :
ftype = magic . from_buffer ( buffer [ : 1024 ] )
except magic . MagicError as e :
log . exception ( "Error getting the magic type!" )
return default
if not ftype :
return default
else :
return self . _patch_magic ( buffer , ftype )
|
def _get_color ( self , color ) :
"""Returns a QColor built from a Pygments color string ."""
|
qcolor = QtGui . QColor ( )
qcolor . setRgb ( int ( color [ : 2 ] , base = 16 ) , int ( color [ 2 : 4 ] , base = 16 ) , int ( color [ 4 : 6 ] , base = 16 ) )
return qcolor
|
def length_from_embedding ( emb ) :
"""Compute the length of each sequence in the batch .
Args :
emb : a sequence embedding Tensor with shape [ batch , max _ time , 1 , depth ] .
Returns :
a Tensor with shape [ batch ] ."""
|
return tf . cast ( tf . reduce_sum ( mask_from_embedding ( emb ) , [ 1 , 2 , 3 ] ) , tf . int32 )
|
def bsinPoints ( pb , pe ) :
"""Return Bezier control points , when pb and pe stand for a full period
from ( 0,0 ) to ( 2 * pi , 0 ) , respectively , in the user ' s coordinate system .
The returned points can be used to draw up to four Bezier curves for
the complete phase of the sine function graph ( 0 to 360 degrees ) ."""
|
v = pe - pb
assert v . y == 0 , "begin and end points must have same y coordinate"
f = abs ( v ) * 0.5 / math . pi
# represents the unit
cp1 = 5.34295228e-01
cp2 = 1.01474288e+00
y_ampl = ( 0 , f )
y_cp1 = ( 0 , f * cp1 )
y_cp2 = ( 0 , f * cp2 )
p0 = pb
p4 = pe
p1 = pb + v * 0.25 - y_ampl
p2 = pb + v * 0.5
p3 = pb + v * 0.75 + y_ampl
k1 = pb + v * ( 1. / 12. ) - y_cp1
k2 = pb + v * ( 2. / 12. ) - y_cp2
k3 = pb + v * ( 4. / 12. ) - y_cp2
k4 = pb + v * ( 5. / 12. ) - y_cp1
k5 = pb + v * ( 7. / 12. ) + y_cp1
k6 = pb + v * ( 8. / 12. ) + y_cp2
k7 = pb + v * ( 10. / 12. ) + y_cp2
k8 = pb + v * ( 11. / 12. ) + y_cp1
return p0 , k1 , k2 , p1 , k3 , k4 , p2 , k5 , k6 , p3 , k7 , k8 , p4
|
def context ( self , name , ctx ) :
"""Execute the block with the given context applied . This manager
ensures that the context is removed even if an exception is raised
within the context ."""
|
self . enter_context ( name , ctx )
try :
yield
finally :
self . exit_context ( name )
|
def get_version_without_beta ( version_info : FileVersionResult ) -> str :
"""Get the project ' s version string * without * any test or beta build labels .
: param version _ info : The current version _ info of the project .
: return : The current version string , without any beta build string values ."""
|
if not version_info :
raise TypeError ( "version_info cannot be 'None'!" )
if not version_info . uniform :
raise ValueError ( "version_info is not uniform!" )
beta_flag = ".123."
current_version = version_info . version_result
# We can just split and take the first value since :
# - If the flag * is not there * , we get the entire string back .
# OR
# - If the flag * is there * , we want everything before it ,
# AKA the first value .
return current_version . split ( beta_flag ) [ 0 ]
|
def _query_select_options ( self , query , select_columns = None ) :
"""Add select load options to query . The goal
is to only SQL select what is requested
: param query : SQLAlchemy Query obj
: param select _ columns : ( list ) of columns
: return : SQLAlchemy Query obj"""
|
if select_columns :
_load_options = list ( )
for column in select_columns :
if "." in column :
model_relation = self . get_related_model ( column . split ( "." ) [ 0 ] )
if not self . is_model_already_joinded ( query , model_relation ) :
query = query . join ( model_relation )
_load_options . append ( Load ( model_relation ) . load_only ( column . split ( "." ) [ 1 ] ) )
else :
if not self . is_relation ( column ) and not hasattr ( getattr ( self . obj , column ) , "__call__" ) :
_load_options . append ( Load ( self . obj ) . load_only ( column ) )
else :
_load_options . append ( Load ( self . obj ) )
query = query . options ( * tuple ( _load_options ) )
return query
|
def augment_main_keys ( self , keys , file ) :
"""Add the main key if it is missing ."""
|
nfile = file
ansigenome_block = """
ansigenome_info:
galaxy_id: ''
travis: False
synopsis: |
Describe your role in a few paragraphs....
usage: |
Describe how to use in more detail...
#custom: |
# Any custom output you want after the usage section..
"""
for key in keys :
if key [ 0 ] not in nfile :
if key [ 0 ] == "ansigenome_info" : # make sure ansigenome _ info is always on the bottom
nfile = nfile + "\n{0}" . format ( ansigenome_block )
else :
nfile = "\n{0}: {1}\n\n" . format ( key [ 0 ] , key [ 1 ] ) + nfile
return nfile
|
def transform ( input_file , input_type , output_type , verbose , quiet ) :
"""container - transform is a small utility to transform various docker
container formats to one another .
Default input type is compose , default output type is ECS
Default is to read from STDIN if no INPUT _ FILE is provided
All options may be set by environment variables with the prefix " CT _ "
followed by the full argument name ."""
|
converter = Converter ( input_file , input_type , output_type )
output = converter . convert ( verbose )
click . echo ( click . style ( output , fg = 'green' ) )
if not quiet :
for message in converter . messages :
click . echo ( click . style ( message , fg = 'red' , bold = True ) , err = True )
|
def _update_cache ( self , courseid ) :
"""Updates the cache
: param courseid : the ( valid ) course id of the course
: raise InvalidNameException , CourseNotFoundException , CourseUnreadableException"""
|
path_to_descriptor = self . _get_course_descriptor_path ( courseid )
try :
course_descriptor = loads_json_or_yaml ( path_to_descriptor , self . _filesystem . get ( path_to_descriptor ) . decode ( "utf8" ) )
except Exception as e :
raise CourseUnreadableException ( str ( e ) )
last_modif = { path_to_descriptor : self . _filesystem . get_last_modification_time ( path_to_descriptor ) }
translations_fs = self . _filesystem . from_subfolder ( "$i18n" )
if translations_fs . exists ( ) :
for f in translations_fs . list ( folders = False , files = True , recursive = False ) :
lang = f [ 0 : len ( f ) - 3 ]
if translations_fs . exists ( lang + ".mo" ) :
last_modif [ "$i18n/" + lang + ".mo" ] = translations_fs . get_last_modification_time ( lang + ".mo" )
self . _cache [ courseid ] = ( self . _course_class ( courseid , course_descriptor , self . get_course_fs ( courseid ) , self . _task_factory , self . _hook_manager ) , last_modif )
self . _task_factory . update_cache_for_course ( courseid )
|
def make_method_names ( self ) :
"""Create tokens for setting _ _ testname _ _ on functions"""
|
lst = [ ]
for group in self . all_groups :
for single in group . singles :
name , english = single . name , single . english
if english [ 1 : - 1 ] != name . replace ( '_' , ' ' ) :
lst . extend ( self . tokens . make_name_modifier ( not group . root , single . identifier , english ) )
return lst
|
def _cnn_tranch_filtering ( in_file , vrn_files , tensor_type , data ) :
"""Filter CNN scored VCFs in tranches using standard SNP and Indel truth sets ."""
|
out_file = "%s-filter.vcf.gz" % utils . splitext_plus ( in_file ) [ 0 ]
if not utils . file_uptodate ( out_file , in_file ) :
runner = broad . runner_from_config ( data [ "config" ] )
gatk_type = runner . gatk_type ( )
assert gatk_type == "gatk4" , "CNN filtering requires GATK4"
if "train_hapmap" not in vrn_files :
raise ValueError ( "CNN filtering requires HapMap training inputs: %s" % vrn_files )
with file_transaction ( data , out_file ) as tx_out_file :
params = [ "-T" , "FilterVariantTranches" , "--variant" , in_file , "--output" , tx_out_file , "--snp-truth-vcf" , vrn_files [ "train_hapmap" ] , "--indel-truth-vcf" , vrn_files [ "train_indels" ] ]
if tensor_type == "reference" :
params += [ "--info-key" , "CNN_1D" , "--tranche" , "99" ]
else :
assert tensor_type == "read_tensor"
params += [ "--info-key" , "CNN_2D" , "--tranche" , "99" ]
runner . run_gatk ( params )
return vcfutils . bgzip_and_index ( out_file , data [ "config" ] )
|
def pick_enclosure_link ( post , parameter = '' ) :
'''Override URL of the Post to point to url of the first enclosure with
href attribute non - empty and type matching specified regexp parameter ( empty = any ) .
Missing " type " attribute for enclosure will be matched as an empty string .
If none of the enclosures match , link won ' t be updated .'''
|
for e in ( post . enclosures or list ( ) ) :
href = e . get ( 'href' )
if not href :
continue
if parameter and not re . search ( parameter , e . get ( 'type' , '' ) ) :
continue
return dict ( link = href )
|
def col_strip ( df , col_name , dest = False ) :
"""Performs str . strip ( ) a column of a DataFrame
Parameters :
df - DataFrame
DataFrame to operate on
col _ name - string
Name of column to strip
dest - bool , default False
Whether to apply the result to the DataFrame or return it .
True is apply , False is return ."""
|
if dest :
df [ col_name ] = df [ col_name ] . str . strip ( )
else :
return df [ col_name ] . str . strip ( )
|
def plot ( self , attribute = None , ax = None , ** kwargs ) :
"""Plot the rose diagram .
Parameters
attribute : ( n , ) ndarray , optional
Variable to specify colors of the colorbars .
ax : Matplotlib Axes instance , optional
If given , the figure will be created inside this axis .
Default = None . Note , this axis should have a polar projection .
* * kwargs : keyword arguments , optional
Keywords used for creating and designing the plot .
Note : ' c ' and ' color ' cannot be passed when attribute is not None
Returns
fig : Matplotlib Figure instance
Moran scatterplot figure
ax : matplotlib Axes instance
Axes in which the figure is plotted"""
|
from splot . giddy import dynamic_lisa_rose
fig , ax = dynamic_lisa_rose ( self , attribute = attribute , ax = ax , ** kwargs )
return fig , ax
|
def fromJSON ( value ) :
"""loads the GP object from a JSON string"""
|
j = json . loads ( value )
v = GPString ( )
if "defaultValue" in j :
v . value = j [ 'defaultValue' ]
else :
v . value = j [ 'value' ]
if 'paramName' in j :
v . paramName = j [ 'paramName' ]
elif 'name' in j :
v . paramName = j [ 'name' ]
return v
|
def _make_from_epo ( cls , trg_comp , qr_comp , trg_chrom_sizes , qr_chrom_sizes ) :
"""crate a chain of collinear rings from the given components .
The target of the chain will always be on the forward strand .
This is done to avoid confusion when mapping psl files . So ,
if trg _ comp . strand = - , qr _ comp . strand = - ( resp . + ) the
chain header will have tStrand = + , qStrand = + ( resp . - ) . No strand
changes on the other cases .
: param trg _ comp : target ( i . e , the first ) component
: type trg _ comp : L { EPOitem }
: param qr _ comp : query ( i . e , the second ) component
: type qr _ comp : L { EPOitem }
: param trg _ chrom _ sizes : chromosome sizes of the target
: type trg _ chrom _ sizes : dictionary of the type ( chrom ) - - > size
: param qr _ chrom _ sizes : chromosome sizes of the query
: type qr _ chrom _ sizes : dictionary of the type ( chrom ) - - > size
: return : A L { Chain } instance"""
|
# size , target , query arrays
S , T , Q = [ ] , [ ] , [ ]
# the target strand of the chain must be on the forward strand
trg_intervals = trg_comp . intervals ( reverse = trg_comp . strand == '-' )
qr_intervals = qr_comp . intervals ( reverse = trg_comp . strand == '-' )
if len ( trg_intervals ) == 0 or len ( qr_intervals ) == 0 :
log . warning ( "deletion/insertion only intervals" )
return None
A , B = rem_dash ( trg_intervals , qr_intervals )
# correct for when cigar starts / ends with dashes ( in number of bases )
tr_start_correction = max ( B [ 0 ] [ 0 ] - A [ 0 ] [ 0 ] , 0 )
tr_end_correction = max ( A [ - 1 ] [ 1 ] - B [ - 1 ] [ 1 ] , 0 )
qr_start_correction = max ( A [ 0 ] [ 0 ] - B [ 0 ] [ 0 ] , 0 )
qr_end_correction = max ( B [ - 1 ] [ 1 ] - A [ - 1 ] [ 1 ] , 0 )
a , b = A . pop ( 0 ) , B . pop ( 0 )
# intervals are 0 - base , halfo - open = > lengths = coordinate difference
while A or B :
if a [ 1 ] < b [ 1 ] :
T . append ( 0 ) ;
Q . append ( A [ 0 ] [ 0 ] - a [ 1 ] ) ;
S . append ( min ( a [ 1 ] , b [ 1 ] ) - max ( a [ 0 ] , b [ 0 ] ) )
a = A . pop ( 0 )
elif b [ 1 ] < a [ 1 ] :
Q . append ( 0 ) ;
T . append ( B [ 0 ] [ 0 ] - b [ 1 ] ) ;
S . append ( min ( a [ 1 ] , b [ 1 ] ) - max ( a [ 0 ] , b [ 0 ] ) )
b = B . pop ( 0 )
elif A and B :
assert 1 > 2 , "there are dash columns"
else :
break
S . append ( min ( a [ 1 ] , b [ 1 ] ) - max ( a [ 0 ] , b [ 0 ] ) )
assert len ( T ) == len ( Q ) == len ( S ) - 1 , "(S, T, Q) = (%d, %d, %d)" % tuple ( map ( len , ( S , T , Q ) ) )
tSize = trg_chrom_sizes [ trg_comp . chrom ]
qSize = qr_chrom_sizes [ qr_comp . chrom ]
# # UCSC coordinates are 0 - based , half - open and e ! coordinates are 1 - base , closed
# # chain _ start = epo _ start - 1 and chain _ end = epo _ end
if qr_comp . strand == '+' :
chain = Chain ( 0 , trg_comp . chrom , tSize , "+" , ( trg_comp . start - 1 ) + tr_start_correction , trg_comp . end - tr_end_correction , qr_comp . chrom , qSize , ( qr_comp . strand == trg_comp . strand and '+' or '-' ) , ( qr_comp . start - 1 ) + qr_start_correction , qr_comp . end - qr_end_correction , qr_comp . gabid )
else :
chain = Chain ( 0 , trg_comp . chrom , tSize , "+" , ( trg_comp . start - 1 ) + tr_start_correction , trg_comp . end - tr_end_correction , qr_comp . chrom , qSize , ( qr_comp . strand == trg_comp . strand and '+' or '-' ) , ( qr_comp . start - 1 ) + qr_end_correction , qr_comp . end - qr_start_correction , qr_comp . gabid )
# strand correction . in UCSC coordinates this is : size - coord
if chain . qStrand == '-' :
chain = chain . _replace ( qEnd = chain . qSize - chain . qStart , qStart = chain . qSize - chain . qEnd )
assert chain . tEnd - chain . tStart == sum ( S ) + sum ( T ) , "[%s] %d != %d" % ( str ( chain ) , chain . tEnd - chain . tStart , sum ( S ) + sum ( T ) )
assert chain . qEnd - chain . qStart == sum ( S ) + sum ( Q ) , "[%s] %d != %d" % ( str ( chain ) , chain . qEnd - chain . qStart , sum ( S ) + sum ( Q ) )
return chain , S , T , Q
|
def _getPlotData ( self ) :
"""Turns the resultsByClass Dict into a list of bin groups skipping the uncertain group if empty
return : ( label list , ydata list )
: rtype : tuple ( list ( str ) , list ( float ) )"""
|
resultsByClass = self . resultsByClass
try :
if resultsByClass [ 'Uncertain' ] == 0 : # remove uncertain tag if present and = 0
resultsByClass . pop ( 'Uncertain' , None )
except KeyError :
pass
plotData = list ( zip ( * resultsByClass . items ( ) ) )
# ( labels , ydata )
return plotData
|
def _open_url ( url ) :
"""Open a HTTP connection to the URL and return a file - like object ."""
|
response = requests . get ( url , stream = True )
if response . status_code != 200 :
raise IOError ( "Unable to download {}, HTTP {}" . format ( url , response . status_code ) )
return response
|
def jsonGraph ( fdefs , calls , outfile = 'nout.json' ) :
'''For reference , each node has :
node . name ( string )
node . source ( string )
node . weight ( int )
node . pclass ( class node object )
Each call contains a node in call . source and call . target'''
|
outpath = os . path . join ( 'data' , outfile )
data = dict ( )
ids = dict ( )
nodelist = [ ]
for fnlist in fdefs . values ( ) :
for fn in fnlist :
if isInCalls ( fn , calls ) :
tagged_node = getTaggedNode ( fn , ids )
nodelist . append ( tagged_node )
else : # print ( " omitted " )
pass
linklist = [ ]
# list of links , NOT a linked list ; D
for call in calls :
for link in linklist :
if call . source . id == link [ "source" ] :
if call . target . id == link [ "target" ] :
link [ "value" ] += 1
break
else :
link = dict ( )
link [ "source" ] = call . source . id
link [ "target" ] = call . target . id
link [ "value" ] = 1
linklist . append ( link )
data [ "links" ] = linklist
data [ "nodes" ] = nodelist
with open ( outpath , 'w+' ) as f :
f . write ( json . dumps ( data , indent = 2 ) )
return
|
def pOparapar ( self , Opar , apar ) :
"""NAME :
pOparapar
PURPOSE :
return the probability of a given parallel ( frequency , angle ) offset pair
INPUT :
Opar - parallel frequency offset ( array ) ( can be Quantity )
apar - parallel angle offset along the stream ( scalar ) ( can be Quantity )
OUTPUT :
p ( Opar , apar )
HISTORY :
2015-11-17 - Written - Bovy ( UofT )"""
|
if _APY_LOADED and isinstance ( Opar , units . Quantity ) :
Opar = Opar . to ( 1 / units . Gyr ) . value / bovy_conversion . freq_in_Gyr ( self . _vo , self . _ro )
if _APY_LOADED and isinstance ( apar , units . Quantity ) :
apar = apar . to ( units . rad ) . value
if isinstance ( Opar , ( int , float , numpy . float32 , numpy . float64 ) ) :
Opar = numpy . array ( [ Opar ] )
out = numpy . zeros ( len ( Opar ) )
# Compute ts and where they were at impact for all
ts = apar / Opar
apar_impact = apar - Opar * self . _timpact
dOpar_impact = self . _kick_interpdOpar ( apar_impact )
Opar_b4impact = Opar - dOpar_impact
# Evaluate the smooth model in the two regimes :
# stripped before or after impact
afterIndx = ( ts < self . _timpact ) * ( ts >= 0. )
out [ afterIndx ] = super ( streamgapdf , self ) . pOparapar ( Opar [ afterIndx ] , apar )
out [ True ^ afterIndx ] = super ( streamgapdf , self ) . pOparapar ( Opar_b4impact [ True ^ afterIndx ] , apar_impact [ True ^ afterIndx ] , tdisrupt = self . _tdisrupt - self . _timpact )
return out
|
def _quote ( self , value , multiline = True ) :
"""Return a safely quoted version of a value .
Raise a ConfigObjError if the value cannot be safely quoted .
If multiline is ` ` True ` ` ( default ) then use triple quotes
if necessary .
* Don ' t quote values that don ' t need it .
* Recursively quote members of a list and return a comma joined list .
* Multiline is ` ` False ` ` for lists .
* Obey list syntax for empty and single member lists .
If ` ` list _ values = False ` ` then the value is only quoted if it contains
a ` ` \\ n ` ` ( is multiline ) or ' # ' .
If ` ` write _ empty _ values ` ` is set , and the value is an empty string , it
won ' t be quoted ."""
|
if multiline and self . write_empty_values and value == '' : # Only if multiline is set , so that it is used for values not
# keys , and not values that are part of a list
return ''
if multiline and isinstance ( value , ( list , tuple ) ) :
if not value :
return ','
elif len ( value ) == 1 :
return self . _quote ( value [ 0 ] , multiline = False ) + ','
return ', ' . join ( [ self . _quote ( val , multiline = False ) for val in value ] )
if not isinstance ( value , string_types ) :
if self . stringify :
value = str ( value )
else :
raise TypeError ( 'Value "%s" is not a string.' % value )
if not value :
return '""'
no_lists_no_quotes = not self . list_values and '\n' not in value and '#' not in value
need_triple = multiline and ( ( ( "'" in value ) and ( '"' in value ) ) or ( '\n' in value ) )
hash_triple_quote = multiline and not need_triple and ( "'" in value ) and ( '"' in value ) and ( '#' in value )
check_for_single = ( no_lists_no_quotes or not need_triple ) and not hash_triple_quote
if check_for_single :
if not self . list_values : # we don ' t quote if ` ` list _ values = False ` `
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value : # will only happen if multiline is off - e . g . ' \ n ' in key
raise ConfigObjError ( 'Value "%s" cannot be safely quoted.' % value )
elif ( ( value [ 0 ] not in wspace_plus ) and ( value [ - 1 ] not in wspace_plus ) and ( ',' not in value ) ) :
quot = noquot
else :
quot = self . _get_single_quote ( value )
else : # if value has ' \ n ' or " ' " * and * ' " ' , it will need triple quotes
quot = self . _get_triple_quote ( value )
if quot == noquot and '#' in value and self . list_values :
quot = self . _get_single_quote ( value )
return quot % value
|
def edit ( self , * args , ** kwargs ) :
"""Edit the wiki page .
Convenience function that utilizes
: meth : ` . AuthenticatedReddit . edit _ wiki _ page ` populating both the
` ` subreddit ` ` and ` ` page ` ` parameters ."""
|
return self . subreddit . edit_wiki_page ( self . page , * args , ** kwargs )
|
def get_figure ( neuron , plane , title ) :
'''Returns the plotly figure containing the neuron'''
|
data = list ( _make_trace ( neuron , plane ) )
axis = dict ( gridcolor = 'rgb(255, 255, 255)' , zerolinecolor = 'rgb(255, 255, 255)' , showbackground = True , backgroundcolor = 'rgb(230, 230,230)' )
if plane != '3d' :
soma_2d = [ # filled circle
{ 'type' : 'circle' , 'xref' : 'x' , 'yref' : 'y' , 'fillcolor' : 'rgba(50, 171, 96, 0.7)' , 'x0' : neuron . soma . center [ 0 ] - neuron . soma . radius , 'y0' : neuron . soma . center [ 1 ] - neuron . soma . radius , 'x1' : neuron . soma . center [ 0 ] + neuron . soma . radius , 'y1' : neuron . soma . center [ 1 ] + neuron . soma . radius , 'line' : { 'color' : 'rgba(50, 171, 96, 1)' , } , } , ]
else :
soma_2d = [ ]
theta = np . linspace ( 0 , 2 * np . pi , 100 )
phi = np . linspace ( 0 , np . pi , 100 )
z = np . outer ( np . ones ( 100 ) , np . cos ( phi ) ) + neuron . soma . center [ 2 ]
r = neuron . soma . radius
data . append ( go . Surface ( x = ( np . outer ( np . cos ( theta ) , np . sin ( phi ) ) + neuron . soma . center [ 0 ] ) * r , y = ( np . outer ( np . sin ( theta ) , np . sin ( phi ) ) + neuron . soma . center [ 1 ] ) * r , z = z * r , cauto = False , surfacecolor = [ 'black' ] * len ( z ) , showscale = False , ) )
layout = dict ( autosize = True , title = title , scene = dict ( # This is used for 3D plots
xaxis = axis , yaxis = axis , zaxis = axis , camera = dict ( up = dict ( x = 0 , y = 0 , z = 1 ) , eye = dict ( x = - 1.7428 , y = 1.0707 , z = 0.7100 , ) ) , aspectmode = 'data' ) , yaxis = dict ( scaleanchor = "x" ) , # This is used for 2D plots
shapes = soma_2d , )
res = dict ( data = data , layout = layout )
return res
|
def ensure_parent_directory ( path , ensure_parent = True ) :
"""Ensures the parent directory exists .
: param string path : the path of the file
: param bool ensure _ parent : if ` ` True ` ` , ensure the parent directory of ` ` path ` ` exists ;
if ` ` False ` ` , ensure ` ` path ` ` exists
: raises : OSError : if the path cannot be created"""
|
parent_directory = os . path . abspath ( path )
if ensure_parent :
parent_directory = os . path . dirname ( parent_directory )
if not os . path . exists ( parent_directory ) :
try :
os . makedirs ( parent_directory )
except ( IOError , OSError ) :
raise OSError ( u"Directory '%s' cannot be created" % parent_directory )
|
def copy ( self ) :
"""Returns a deep copy of a REBOUND simulation . You need to reset
any function pointers on the copy .
Returns
A rebound . Simulation object ."""
|
w = c_int ( 0 )
sim = Simulation ( )
clibrebound . _reb_copy_simulation_with_messages ( byref ( sim ) , byref ( self ) , byref ( w ) )
for majorerror , value , message in BINARY_WARNINGS :
if w . value & value :
if majorerror :
raise RuntimeError ( message )
else : # Just a warning
warnings . warn ( message , RuntimeWarning )
return sim
|
def matrix ( mat ) :
"""Convert a ROOT TMatrix into a NumPy matrix .
Parameters
mat : ROOT TMatrixT
A ROOT TMatrixD or TMatrixF
Returns
mat : numpy . matrix
A NumPy matrix
Examples
> > > from root _ numpy import matrix
> > > from ROOT import TMatrixD
> > > a = TMatrixD ( 4 , 4)
> > > a [ 1 ] [ 2 ] = 2
> > > matrix ( a )
matrix ( [ [ 0 . , 0 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 2 . , 0 . ] ,
[ 0 . , 0 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 0 . , 0 . ] ] )"""
|
import ROOT
if isinstance ( mat , ( ROOT . TMatrixD , ROOT . TMatrixDSym ) ) :
return _librootnumpy . matrix_d ( ROOT . AsCObject ( mat ) )
elif isinstance ( mat , ( ROOT . TMatrixF , ROOT . TMatrixFSym ) ) :
return _librootnumpy . matrix_f ( ROOT . AsCObject ( mat ) )
raise TypeError ( "unable to convert object of type {0} " "into a numpy matrix" . format ( type ( mat ) ) )
|
def is_filtered ( self , relpath ) :
"""Say whether this relpath is filtered out"""
|
# Only include files under the parent _ dir
if relpath . startswith ( "../" ) :
return True
# Ignore files that we don ' t want timestamps from
if self . timestamps_for is not None and type ( self . timestamps_for ) is list :
match = False
for line in self . timestamps_for :
if fnmatch . fnmatch ( relpath , line ) :
match = True
break
if not match :
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self . exclude or not any ( [ self . exclude , self . include ] )
# Anything not matching exclude gets included
if self . exclude :
for line in self . exclude :
if fnmatch . fnmatch ( relpath , line ) :
matched = False
# Anything matching include gets included
if self . include :
for line in self . include :
if fnmatch . fnmatch ( relpath , line ) :
matched = True
break
return not matched
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : StepContext for this StepInstance
: rtype : twilio . rest . studio . v1 . flow . engagement . step . StepContext"""
|
if self . _context is None :
self . _context = StepContext ( self . _version , flow_sid = self . _solution [ 'flow_sid' ] , engagement_sid = self . _solution [ 'engagement_sid' ] , sid = self . _solution [ 'sid' ] , )
return self . _context
|
def remove_callback_for_action ( self , action , callback ) :
"""Remove a callback for a specific action
This is mainly for cleanup purposes or a plugin that replaces a GUI widget .
: param str action : the cation of which the callback is going to be remove
: param callback : the callback to be removed"""
|
if action in self . __action_to_callbacks :
if callback in self . __action_to_callbacks [ action ] :
self . __action_to_callbacks [ action ] . remove ( callback )
|
def update ( dct , dct_merge ) :
"""Recursively merge dicts ."""
|
for key , value in dct_merge . items ( ) :
if key in dct and isinstance ( dct [ key ] , dict ) :
dct [ key ] = update ( dct [ key ] , value )
else :
dct [ key ] = value
return dct
|
def _key_question ( self , text ) :
"""Action for ' ? '"""
|
if self . get_current_line_to_cursor ( ) :
last_obj = self . get_last_obj ( )
if last_obj and not last_obj . isdigit ( ) :
self . show_object_info ( last_obj )
self . insert_text ( text )
# In case calltip and completion are shown at the same time :
if self . is_completion_widget_visible ( ) :
self . completion_text += '?'
|
def check_is_working ( self ) :
"""Returns True if the wash alert web interface seems to be
working properly , or False otherwise .
> > > l . check _ is _ working ( )"""
|
try :
r = requests . post ( "http://{}/" . format ( LAUNDRY_DOMAIN ) , timeout = 60 , data = { "locationid" : "5faec7e9-a4aa-47c2-a514-950c03fac460" , "email" : "pennappslabs@gmail.com" , "washers" : 0 , "dryers" : 0 , "locationalert" : "OK" } )
r . raise_for_status ( )
return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r . text
except requests . exceptions . HTTPError :
return False
|
def make_multi_cols ( self , num_class , name ) :
'''make cols for multi - class predictions'''
|
cols = [ 'c' + str ( i ) + '_' for i in xrange ( num_class ) ]
cols = map ( lambda x : x + name , cols )
return cols
|
def search ( self , text = None , lang = None , location = None , unit = None , limit = 50 , offset = 0 , reduced = False , local = None , scope = SearchScope . PUBLIC ) :
"""Search the Iotic Space for public Things with metadata matching the search parameters :
text , lang ( uage ) , location , unit , limit , offset . Note that only things which have at least one point defined can
be found .
Returns dict of results as below ( first with reduced = False , second with reduced = True ) - OR -
# ! python
# reduced = False returns dict similar to below
"2b2d8b068e404861b19f9e060877e002 " : {
" long " : - 1.74803,
" matches " : 3.500,
" lat " : 52.4539,
" label " : " Weather Station # 2 " ,
" owner " : " 3bbf307b43b1460289fe707619dece3d " ,
" points " : {
" a300cc90147f4e2990195639de0af201 " : {
" matches " : 3.000,
" label " : " Feed 201 " ,
" type " : " Feed " ,
" storesRecent " : true
" a300cc90147f4e2990195639de0af202 " : {
" matches " : 1.500,
" label " : " Feed 202 " ,
" type " : " Feed " ,
" storesRecent " : false
"76a3b24b02d34f20b675257624b0e001 " : {
" long " : 0.716356,
" matches " : 2.000,
" lat " : 52.244384,
" label " : " Weather Station # 1 " ,
" owner " : " 3bbf307b43b1460289fe707619dece3d " ,
" points " : {
" fb1a4a4dbb2642ab9f836892da93f101 " : {
" matches " : 1.000,
" label " : " My weather feed " ,
" type " : " Feed " ,
" storesRecent " : false
" fb1a4a4dbb2642ab9f836892da93c102 " : {
" matches " : 1.000,
" label " : None ,
" type " : " Control " ,
" storesRecent " : false
# reduced = True returns dict similar to below
"2b2d8b068e404861b19f9e060877e002 " : {
" a300cc90147f4e2990195639de0af201 " : " Feed " ,
" a300cc90147f4e2990195639de0af202 " : " Feed "
"76a3b24b02d34f20b675257624b0e001 " : {
" fb1a4a4dbb2642ab9f836892da93f101 " : " Feed " ,
" fb1a4a4dbb2642ab9f836892da93f102 " : " Control "
Raises [ IOTException ] ( . / Exceptions . m . html # IoticAgent . IOT . Exceptions . IOTException )
containing the error if the infrastructure detects a problem
Raises [ LinkException ] ( . . / Core / AmqpLink . m . html # IoticAgent . Core . AmqpLink . LinkException )
if there is a communications problem between you and the infrastructure
` text ` ( optional ) ( string ) The text to search for . Label and description will be searched
for both Thing and Point and each word will be used as a tag search too . Text search is case - insensitive . Tag
search is language neutral .
` lang ` ( optional ) ( string ) The two - character ISO 639-1 language code to search in , e . g . " en " " fr "
Language is used to limit search to only labels and descriptions in that language . You will only get labels ` in
that language ` back from search and then only if there are any in that language .
` location ` ( optional ) ( dictionary ) Latitude , longitude and radius to search within .
All values are float , Radius is in kilometers ( km ) . E . g . ` { " lat " = 1.2345 , " long " = 54.321 , " radius " = 6.789 } ` . Note :
If ` text ` has not been specified , radius can at most be 25km .
` unit ` ( optional ) ( string ) Valid URL of a unit in an ontology . Or use a constant from the
[ units ] ( . . / Units . m . html # IoticAgent . Units ) class - such as [ METRE ] ( . . / Units . m . html # IoticAgent . Units . METRE ) .
` limit ` ( optional ) ( integer ) Return this many search results .
` offset ` ( optional ) ( integer ) Return results starting at this offset - good for paging .
` reduced ` ( optional ) ( boolean ) If ` true ` , return the reduced results just containing points and
their type .
` local ` ( optional ) ( boolean ) * * Deprecated * * , use ` scope ` instead . If ` true ` , perform search at container level .
Check the local _ meta flag to determine whether local metadata functionality is available . ( Takes precedence over
` scope ` . )
` scope ` ( optional ) ( [ SearchScope ] ( . . / Core / Const . m . html # IoticAgent . Core . Const . SearchScope ) ) Whether to perform
PUBLIC , LOCAL ( container level ) or LOCAL _ OWN ( container level restricted to own things ) search . Check the
[ local _ meta ] ( # IoticAgent . IOT . Client . Client . local _ meta ) flag to determine whether local metadata functionality is
available . ( Note that PUBLIC and LOCAL _ OWN scopes are always available . )"""
|
logger . info ( "search(text=\"%s\", lang=\"%s\", location=\"%s\", unit=\"%s\", limit=%s, offset=%s, reduced=%s)" , text , lang , location , unit , limit , offset , reduced )
evt = self . _request_search ( text , lang , location , unit , limit , offset , SearchType . REDUCED if reduced else SearchType . FULL , local , scope )
self . _wait_and_except_if_failed ( evt )
return evt . payload [ 'result' ]
|
def table_drop ( self ) :
"""Drops all tables ."""
|
for engine in self . engines ( ) :
tables = self . _get_tables ( engine , create_drop = True )
logger . info ( 'Drop all tables for %s' , engine )
self . metadata . drop_all ( engine , tables = tables )
|
def extraneous_whitespace ( logical_line ) :
"""Avoid extraneous whitespace in the following situations :
- Immediately inside parentheses , brackets or braces .
- Immediately before a comma , semicolon , or colon ."""
|
line = logical_line
for char in '([{' :
found = line . find ( char + ' ' )
if found > - 1 :
return found + 1 , "E201 whitespace after '%s'" % char
for char in '}])' :
found = line . find ( ' ' + char )
if found > - 1 and line [ found - 1 ] != ',' :
return found , "E202 whitespace before '%s'" % char
for char in ',;:' :
found = line . find ( ' ' + char )
if found > - 1 :
return found , "E203 whitespace before '%s'" % char
|
def _node_has_variant ( node : BaseEntity , variant : str ) -> bool :
"""Return true if the node has at least one of the given variant .
: param variant : : data : ` PMOD ` , : data : ` HGVS ` , : data : ` GMOD ` , or : data : ` FRAGMENT `"""
|
return VARIANTS in node and any ( variant_dict [ KIND ] == variant for variant_dict in node [ VARIANTS ] )
|
def format_seq ( self , outstream = None , linewidth = 70 ) :
"""Print a sequence in a readable format .
: param outstream : if ` None ` , formatted sequence is returned as a
string ; otherwise , it is treated as a file - like
object and the formatted sequence is printed to the
outstream
: param linewidth : width for wrapping sequences over multiple lines ; set
to 0 for no wrapping"""
|
if linewidth == 0 or len ( self . seq ) <= linewidth :
if outstream is None :
return self . seq
else :
print ( self . seq , file = outstream )
return
i = 0
seq = ''
while i < len ( self . seq ) :
if outstream is None :
seq += self . seq [ i : i + linewidth ] + '\n'
else :
print ( self . seq [ i : i + linewidth ] , file = outstream )
i += linewidth
if outstream is None :
return seq
|
def _get_rsr_data_version ( self ) :
"""Check the version of the RSR data from the version file in the RSR
directory"""
|
rsr_data_version_path = os . path . join ( self . rsr_dir , RSR_DATA_VERSION_FILENAME )
if not os . path . exists ( rsr_data_version_path ) :
return "v0.0.0"
with open ( rsr_data_version_path , 'r' ) as fpt : # Get the version from the file
return fpt . readline ( ) . strip ( )
|
def Delete ( self ) :
"""Delete this port and commit change to cloud .
> > > clc . v2 . Server ( " WA1BTDIX01 " ) . PublicIPs ( ) . public _ ips [ 0 ] . ports [ 0 ] . Delete ( ) . WaitUntilComplete ( )"""
|
self . public_ip . ports = [ o for o in self . public_ip . ports if o != self ]
return ( self . public_ip . Update ( ) )
|
def has_commit ( self ) :
""": return :
: rtype : boolean"""
|
current_revision = self . history . current_revision
revision_id = self . state . revision_id
return current_revision . revision_id != revision_id
|
def filter_nonspellcheckable_tokens ( line , block_out_regexes = None ) :
"""Return line with paths , urls and emails filtered out .
Block out other strings of text matching : block _ out _ regexes : if passed in ."""
|
all_block_out_regexes = [ r"[^\s]*:[^\s]*[/\\][^\s]*" , r"[^\s]*[/\\][^\s]*" , r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b" ] + ( block_out_regexes or list ( ) )
for block_regex in all_block_out_regexes :
for marker in re . finditer ( block_regex , line ) :
spaces = " " * ( marker . end ( ) - marker . start ( ) )
line = line [ : marker . start ( ) ] + spaces + line [ marker . end ( ) : ]
return line
|
def hide_routemap_holder_route_map_content_continue_holder_cont ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
hide_routemap_holder = ET . SubElement ( config , "hide-routemap-holder" , xmlns = "urn:brocade.com:mgmt:brocade-ip-policy" )
route_map = ET . SubElement ( hide_routemap_holder , "route-map" )
name_key = ET . SubElement ( route_map , "name" )
name_key . text = kwargs . pop ( 'name' )
action_rm_key = ET . SubElement ( route_map , "action-rm" )
action_rm_key . text = kwargs . pop ( 'action_rm' )
instance_key = ET . SubElement ( route_map , "instance" )
instance_key . text = kwargs . pop ( 'instance' )
content = ET . SubElement ( route_map , "content" )
continue_holder = ET . SubElement ( content , "continue-holder" )
cont = ET . SubElement ( continue_holder , "continue" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def install_vendored ( cls , prefix , root = None , expose = None ) :
"""Install an importer for all vendored code with the given import prefix .
All distributions listed in ` ` expose ` ` will also be made available for import in direct ,
un - prefixed form .
: param str prefix : The import prefix the installed importer will be responsible for .
: param str root : The root path of the distribution containing the vendored code . NB : This is the
the path to the pex code , which serves as the root under which code is vendored
at ` ` pex / vendor / _ vendored ` ` .
: param expose : Optional names of distributions to expose for direct , un - prefixed import .
: type expose : list of str
: raise : : class : ` ValueError ` if any distributions to expose cannot be found ."""
|
from pex import vendor
root = cls . _abs_root ( root )
vendored_path_items = [ spec . relpath for spec in vendor . iter_vendor_specs ( ) ]
installed = list ( cls . _iter_installed_vendor_importers ( prefix , root , vendored_path_items ) )
assert len ( installed ) <= 1 , ( 'Unexpected extra importers installed for vendored code:\n\t{}' . format ( '\n\t' . join ( map ( str , installed ) ) ) )
if installed :
vendor_importer = installed [ 0 ]
else : # Install all vendored code for pex internal access to it through the vendor import ` prefix ` .
vendor_importer = cls . install ( uninstallable = True , prefix = prefix , path_items = vendored_path_items , root = root )
if expose : # But only expose the bits needed .
exposed_paths = [ ]
for path in cls . expose ( expose , root ) :
sys . path . insert ( 0 , path )
exposed_paths . append ( os . path . relpath ( path , root ) )
vendor_importer . _expose ( exposed_paths )
|
def _gaussian_membership_sigma ( smoothness , eps = 0.0005 ) : # 275us @ smothness = 10
r"""Compute the sigma required for a gaussian , such that in a neighbourhood of
smoothness the maximum error is ' eps ' .
The error is here the difference between the clipped integral and one ."""
|
error = 0
deltas = [ 0.1 , 0.01 , 0.001 , 0.0001 ]
sigma = smoothness * 0.3
point = - 1. * ( smoothness + 0.5 )
for delta in deltas :
while error < eps :
sigma += delta
error = scipy . stats . norm . cdf ( 0.5 , point , sigma ) - scipy . stats . norm . cdf ( - 0.5 , point , sigma )
# x , mu , sigma
sigma -= delta
return sigma
|
def _relative ( self , uri ) :
"""if uri is relative , re - relate it to our basedir"""
|
if uri . startswith ( "http:" ) or uri . startswith ( "https:" ) or uri . startswith ( "file:" ) or uri . startswith ( "/" ) :
return uri
elif exists ( uri ) :
return relpath ( uri , self . basedir )
else :
return uri
|
def _remove_curly_braces ( text ) :
"""Remove everything in curly braces .
Curly braces may be nested , so we keep track of depth .
Args :
text : a string
Returns :
a string"""
|
current_pos = 0
depth = 0
ret = ""
for match in re . finditer ( "[{}]" , text ) :
if depth == 0 :
ret += text [ current_pos : match . start ( ) ]
depth += 1 if text [ match . start ( ) ] == "{" else - 1
current_pos = match . end ( )
if depth != 0 : # Many articles have mismatched braces , but it still seems better to remove
# them than not .
pass
else :
ret += text [ current_pos : ]
return ret
|
def fcm_send_bulk_data_messages ( api_key , registration_ids = None , condition = None , collapse_key = None , delay_while_idle = False , time_to_live = None , restricted_package_name = None , low_priority = False , dry_run = False , data_message = None , content_available = None , timeout = 5 , json_encoder = None ) :
"""Arguments correspond to those from pyfcm / fcm . py .
Sends push message to multiple devices ,
can send to over 1000 devices
Args :
api _ key
registration _ ids ( list ) : FCM device registration IDs .
data _ message ( dict ) : Data message payload to send alone or with the notification message
Keyword Args :
collapse _ key ( str , optional ) : Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed . Defaults to ` ` None ` ` .
delay _ while _ idle ( bool , optional ) : If ` ` True ` ` indicates that the
message should not be sent until the device becomes active .
time _ to _ live ( int , optional ) : How long ( in seconds ) the message
should be kept in FCM storage if the device is offline . The
maximum time to live supported is 4 weeks . Defaults to ` ` None ` `
which uses the FCM default of 4 weeks .
low _ priority ( boolean , optional ) : Whether to send notification with
the low priority flag . Defaults to ` ` False ` ` .
restricted _ package _ name ( str , optional ) : Package name of the
application where the registration IDs must match in order to
receive the message . Defaults to ` ` None ` ` .
dry _ run ( bool , optional ) : If ` ` True ` ` no message will be sent but
request will be tested .
Returns :
: tuple : ` multicast _ id ( long ) , success ( int ) , failure ( int ) , canonical _ ids ( int ) , results ( list ) ` :
Response from FCM server .
Raises :
AuthenticationError : If : attr : ` api _ key ` is not set or provided or there is an error authenticating the sender .
FCMServerError : Internal server error or timeout error on Firebase cloud messaging server
InvalidDataError : Invalid data provided
InternalPackageError : JSON parsing error , mostly from changes in the response of FCM , create a new github issue to resolve it ."""
|
push_service = FCMNotification ( api_key = SETTINGS . get ( "FCM_SERVER_KEY" ) if api_key is None else api_key , json_encoder = json_encoder , )
return push_service . multiple_devices_data_message ( registration_ids = registration_ids , condition = condition , collapse_key = collapse_key , delay_while_idle = delay_while_idle , time_to_live = time_to_live , restricted_package_name = restricted_package_name , low_priority = low_priority , dry_run = dry_run , data_message = data_message , content_available = content_available , timeout = timeout )
|
def _create_tar ( self , name ) :
"""Create TAR file ."""
|
fileobj = utils . create_spooled_temporary_file ( )
mode = 'w:gz' if self . compress else 'w'
tar_file = tarfile . open ( name = name , fileobj = fileobj , mode = mode )
for media_filename in self . _explore_storage ( ) :
tarinfo = tarfile . TarInfo ( media_filename )
media_file = self . media_storage . open ( media_filename )
tarinfo . size = len ( media_file )
tar_file . addfile ( tarinfo , media_file )
# Close the TAR for writing
tar_file . close ( )
return fileobj
|
def add_vertex ( self , v , partition ) :
"""Add a vertex to the graph
: param v : vertex name
: param partition : partition to add to"""
|
self . graph . add_vertex ( v )
self . partitions [ partition ] . add ( v )
|
def new_figure_manager_given_figure ( num , figure ) :
"""Create a new figure manager instance for the given figure ."""
|
canvas = FigureCanvasAgg ( figure )
manager = FigureManagerBase ( canvas , num )
return manager
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.