signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def add_file_argument ( self , * args , ** kwargs ) :
"""Add an argument that represents the location of a file
: param args :
: param kwargs :
: return :"""
|
rval = self . add_argument ( * args , ** kwargs )
self . file_args . append ( rval )
return rval
|
def grad_local_log_likelihood ( self , x ) :
"""d / dx y ^ T Cx + y ^ T d - exp ( Cx + d )
= y ^ T C - exp ( Cx + d ) ^ T C
= ( y - lmbda ) ^ T C"""
|
# Observation likelihoods
lmbda = np . exp ( np . dot ( x , self . C . T ) + np . dot ( self . inputs , self . D . T ) )
return ( self . data - lmbda ) . dot ( self . C )
|
def parse_reqtype ( self ) :
"""Return the authentication body ."""
|
if self . job_args [ 'os_auth_version' ] == 'v1.0' :
return dict ( )
else :
setup = { 'username' : self . job_args . get ( 'os_user' ) }
# Check if any prefix items are set . A prefix should be a
# dictionary with keys matching the os _ * credential type .
prefixes = self . job_args . get ( 'os_prefix' )
if self . job_args . get ( 'os_token' ) is not None :
auth_body = { 'auth' : { 'token' : { 'id' : self . job_args . get ( 'os_token' ) } } }
if not self . job_args . get ( 'os_tenant' ) :
raise exceptions . AuthenticationProblem ( 'To use token auth you must specify the tenant id. Set' ' the tenant ID with [ --os-tenant ]' )
elif self . job_args . get ( 'os_password' ) is not None :
setup [ 'password' ] = self . job_args . get ( 'os_password' )
if prefixes :
prefix = prefixes . get ( 'os_password' )
if not prefix :
raise NotImplementedError ( 'the `password` method is not implemented for this' ' auth plugin' )
else :
prefix = 'passwordCredentials'
auth_body = { 'auth' : { prefix : setup } }
elif self . job_args . get ( 'os_apikey' ) is not None :
setup [ 'apiKey' ] = self . job_args . get ( 'os_apikey' )
if prefixes :
prefix = prefixes . get ( 'os_apikey' )
if not prefix :
raise NotImplementedError ( 'the `apikey` method is not implemented for this' ' auth plugin' )
else :
prefix = 'apiKeyCredentials'
auth_body = { 'auth' : { prefix : setup } }
else :
raise exceptions . AuthenticationProblem ( 'No Password, APIKey, or Token Specified' )
if self . job_args . get ( 'os_tenant' ) :
auth = auth_body [ 'auth' ]
auth [ 'tenantName' ] = self . job_args . get ( 'os_tenant' )
LOG . debug ( 'AUTH Request body: [ %s ]' , auth_body )
return auth_body
|
def send ( self , to , subject , body , reply_to = None , ** kwargs ) :
"""Send simple message"""
|
if self . provider == "SES" :
self . mail . send ( to = to , subject = subject , body = body , reply_to = reply_to , ** kwargs )
elif self . provider == "FLASK-MAIL" :
msg = flask_mail . Message ( recipients = to , subject = subject , body = body , reply_to = reply_to , sender = self . app . config . get ( "MAIL_DEFAULT_SENDER" ) )
self . mail . send ( msg )
|
def get_file_details ( filepath , hash_algorithms = [ 'sha256' ] ) :
"""< Purpose >
To get file ' s length and hash information . The hash is computed using the
sha256 algorithm . This function is used in the signerlib . py and updater . py
modules .
< Arguments >
filepath :
Absolute file path of a file .
hash _ algorithms :
< Exceptions >
securesystemslib . exceptions . FormatError : If hash of the file does not match
HASHDICT _ SCHEMA .
securesystemslib . exceptions . Error : If ' filepath ' does not exist .
< Returns >
A tuple ( length , hashes ) describing ' filepath ' ."""
|
# Making sure that the format of ' filepath ' is a path string .
# ' securesystemslib . exceptions . FormatError ' is raised on incorrect format .
securesystemslib . formats . PATH_SCHEMA . check_match ( filepath )
securesystemslib . formats . HASHALGORITHMS_SCHEMA . check_match ( hash_algorithms )
# The returned file hashes of ' filepath ' .
file_hashes = { }
# Does the path exists ?
if not os . path . exists ( filepath ) :
raise securesystemslib . exceptions . Error ( 'Path ' + repr ( filepath ) + ' doest' ' not exist.' )
filepath = os . path . abspath ( filepath )
# Obtaining length of the file .
file_length = os . path . getsize ( filepath )
# Obtaining hash of the file .
for algorithm in hash_algorithms :
digest_object = securesystemslib . hash . digest_filename ( filepath , algorithm )
file_hashes . update ( { algorithm : digest_object . hexdigest ( ) } )
# Performing a format check to ensure ' file _ hash ' corresponds HASHDICT _ SCHEMA .
# Raise ' securesystemslib . exceptions . FormatError ' if there is a mismatch .
securesystemslib . formats . HASHDICT_SCHEMA . check_match ( file_hashes )
return file_length , file_hashes
|
def dump ( self , validate = True ) :
"""Create bencoded : attr : ` metainfo ` ( i . e . the content of a torrent file )
: param bool validate : Whether to run : meth : ` validate ` first
: return : : attr : ` metainfo ` as bencoded : class : ` bytes `"""
|
if validate :
self . validate ( )
return bencode ( self . convert ( ) )
|
def split ( self , amt ) :
"""return 2 trades , 1 with specific amt and the other with self . quantity - amt"""
|
ratio = abs ( amt / self . qty )
t1 = Trade ( self . tid , self . ts , amt , self . px , fees = ratio * self . fees , ** self . kwargs )
t2 = Trade ( self . tid , self . ts , self . qty - amt , self . px , fees = ( 1. - ratio ) * self . fees , ** self . kwargs )
return [ t1 , t2 ]
|
def plot_elbo ( self , figsize = ( 15 , 7 ) ) :
"""Plots the ELBO progress ( if present )"""
|
import matplotlib . pyplot as plt
plt . figure ( figsize = figsize )
plt . plot ( self . elbo_records )
plt . xlabel ( "Iterations" )
plt . ylabel ( "ELBO" )
plt . show ( )
|
def add ( self , path ) :
"""Adds a new resource with the given path to the resource set .
Parameters :
* * * path ( str , unicode ) : * * path of the resource to be protected
Raises :
TypeError when the path is not a string or a unicode string"""
|
if not isinstance ( path , str ) and not isinstance ( path , unicode ) :
raise TypeError ( 'The value passed for parameter path is not a str' ' or unicode' )
resource = Resource ( path )
self . resources [ path ] = resource
return resource
|
def get_view_root ( view_name : str ) -> XmlNode :
'''Parses xml file and return root XmlNode'''
|
try :
path = join ( deps . views_folder , '{0}.{1}' . format ( view_name , deps . view_ext ) )
parser = Parser ( )
if path not in _XML_CACHE :
with open ( path , 'rb' ) as xml_file :
_XML_CACHE [ path ] = parser . parse ( xml_file , view_name )
return _XML_CACHE [ path ]
except FileNotFoundError as error :
error = ViewError ( 'View is not found' )
error . add_info ( 'View name' , view_name )
error . add_info ( 'Path' , path )
raise error
except CoreError as error :
error . add_view_info ( ViewInfo ( view_name , None ) )
raise
except :
info = exc_info ( )
error = ViewError ( 'Unknown error occured during parsing xml' , ViewInfo ( view_name , None ) )
error . add_cause ( info [ 1 ] )
raise error from info [ 1 ]
|
def standard_lstm_lm_200 ( dataset_name = None , vocab = None , pretrained = False , ctx = cpu ( ) , root = os . path . join ( get_home_dir ( ) , 'models' ) , ** kwargs ) :
r"""Standard 2 - layer LSTM language model with tied embedding and output weights .
Both embedding and hidden dimensions are 200.
Parameters
dataset _ name : str or None , default None
The dataset name on which the pre - trained model is trained .
Options are ' wikitext - 2 ' . If specified , then the returned vocabulary is extracted from
the training set of the dataset .
If None , then vocab is required , for specifying embedding weight size , and is directly
returned .
The pre - trained model achieves 108.25/102.26 ppl on Val and Test of wikitext - 2 respectively .
vocab : gluonnlp . Vocab or None , default None
Vocabulary object to be used with the language model .
Required when dataset _ name is not specified .
pretrained : bool , default False
Whether to load the pre - trained weights for model .
ctx : Context , default CPU
The context in which to load the pre - trained weights .
root : str , default ' $ MXNET _ HOME / models '
Location for keeping the model parameters .
MXNET _ HOME defaults to ' ~ / . mxnet ' .
Returns
gluon . Block , gluonnlp . Vocab"""
|
predefined_args = { 'embed_size' : 200 , 'hidden_size' : 200 , 'mode' : 'lstm' , 'num_layers' : 2 , 'tie_weights' : True , 'dropout' : 0.2 }
mutable_args = [ 'dropout' ]
assert all ( ( k not in kwargs or k in mutable_args ) for k in predefined_args ) , 'Cannot override predefined model settings.'
predefined_args . update ( kwargs )
return _get_rnn_model ( StandardRNN , 'standard_lstm_lm_200' , dataset_name , vocab , pretrained , ctx , root , ** predefined_args )
|
def getuserurl ( iduser , * args , ** kwargs ) :
"""Request Users URL .
If iduser is set , you ' ll get a response adequate for a MambuUser object .
If not set , you ' ll get a response adequate for a MambuUsers object .
See mambuuser module and pydoc for further information .
Currently implemented filter parameters :
* fullDetails
* branchId
* limit
* offset
See Mambu official developer documentation for further details , and
info on parameters that may be implemented here in the future ."""
|
getparams = [ ]
if kwargs :
try :
if kwargs [ "fullDetails" ] == True :
getparams . append ( "fullDetails=true" )
else :
getparams . append ( "fullDetails=false" )
except Exception as ex :
pass
try :
getparams . append ( "branchId=%s" % kwargs [ "branchId" ] )
except Exception as ex :
pass
try :
getparams . append ( "offset=%s" % kwargs [ "offset" ] )
except Exception as ex :
pass
try :
getparams . append ( "limit=%s" % kwargs [ "limit" ] )
except Exception as ex :
pass
useridparam = "" if iduser == "" else "/" + iduser
url = getmambuurl ( * args , ** kwargs ) + "users" + useridparam + ( "" if len ( getparams ) == 0 else "?" + "&" . join ( getparams ) )
return url
|
def _read_frame ( self ) :
"""Read a single frame from the trajectory"""
|
self . _secfile . get_next ( "Frame Number" )
frame = ATRJFrame ( )
# Read the time and energy
energy_lines = self . _secfile . get_next ( "Time/Energy" )
energy_words = energy_lines [ 0 ] . split ( )
frame . time = float ( energy_words [ 0 ] ) * picosecond
frame . step = int ( energy_words [ 1 ] )
frame . total_energy = float ( energy_words [ 2 ] ) * kcalmol
# Read the coordinates
coord_lines = self . _secfile . get_next ( "Coordinates" )
frame . coordinates = np . zeros ( ( self . num_atoms , 3 ) , float )
for index , line in enumerate ( coord_lines ) :
words = line . split ( )
frame . coordinates [ index , 0 ] = float ( words [ 1 ] )
frame . coordinates [ index , 1 ] = float ( words [ 2 ] )
frame . coordinates [ index , 2 ] = float ( words [ 3 ] )
frame . coordinates *= angstrom
# Done
return frame
|
def find_cubes ( numbers ) :
"""This function computes the cube of each number in the input list using a lambda function .
Examples :
> > > find _ cubes ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] )
[1 , 8 , 27 , 64 , 125 , 216 , 343 , 512 , 729 , 1000]
> > > find _ cubes ( [ 10 , 20 , 30 ] )
[1000 , 8000 , 27000]
> > > find _ cubes ( [ 12 , 15 ] )
[1728 , 3375]
Args :
numbers : A list of integers .
Returns :
A list of cubes of each integer in the input list ."""
|
cube_values = list ( map ( lambda n : n ** 3 , numbers ) )
return cube_values
|
def console_load_xp ( con : tcod . console . Console , filename : str ) -> bool :
"""Update a console from a REXPaint ` . xp ` file ."""
|
return bool ( lib . TCOD_console_load_xp ( _console ( con ) , filename . encode ( "utf-8" ) ) )
|
def get_dataset ( self , dataset_id , ds_info , out = None ) :
"""Load a dataset ."""
|
var_path = ds_info . get ( 'file_key' , '{}' . format ( dataset_id . name ) )
dtype = ds_info . get ( 'dtype' , np . float32 )
if var_path + '/shape' not in self : # loading a scalar value
shape = 1
else :
shape = self [ var_path + '/shape' ]
if shape [ 0 ] == 1 : # Remove the time dimenstion from dataset
shape = shape [ 1 ] , shape [ 2 ]
file_units = ds_info . get ( 'file_units' )
if file_units is None :
try :
file_units = self [ var_path + '/attr/units' ]
# they were almost completely CF compliant . . .
if file_units == "none" :
file_units = "1"
except KeyError : # no file units specified
file_units = None
if out is None :
out = np . ma . empty ( shape , dtype = dtype )
out . mask = np . zeros ( shape , dtype = np . bool )
out . data [ : ] = np . require ( self [ var_path ] [ 0 ] [ : : - 1 ] , dtype = dtype )
valid_min = self [ var_path + '/attr/valid_min' ]
valid_max = self [ var_path + '/attr/valid_max' ]
try :
scale_factor = self [ var_path + '/attr/scale_factor' ]
scale_offset = self [ var_path + '/attr/add_offset' ]
except KeyError :
scale_factor = scale_offset = None
if valid_min is not None and valid_max is not None :
out . mask [ : ] |= ( out . data < valid_min ) | ( out . data > valid_max )
factors = ( scale_factor , scale_offset )
if factors [ 0 ] != 1 or factors [ 1 ] != 0 :
out . data [ : ] *= factors [ 0 ]
out . data [ : ] += factors [ 1 ]
ds_info . update ( { "units" : ds_info . get ( "units" , file_units ) , "platform_name" : PLATFORM_NAME . get ( self [ '/attr/platform' ] , self [ '/attr/platform' ] ) , "sensor" : SENSOR_NAME . get ( self [ '/attr/sensor' ] , self [ '/attr/sensor' ] ) , } )
ds_info . update ( dataset_id . to_dict ( ) )
cls = ds_info . pop ( "container" , Dataset )
return cls ( out , ** ds_info )
|
def stationary_distribution ( P , C = None , mincount_connectivity = 0 ) :
"""Simple estimator for stationary distribution for multiple strongly connected sets"""
|
# can be replaced by msmtools . analysis . stationary _ distribution in next msmtools release
from msmtools . analysis . dense . stationary_vector import stationary_distribution as msmstatdist
if C is None :
if is_connected ( P , strong = True ) :
return msmstatdist ( P )
else :
raise ValueError ( 'Computing stationary distribution for disconnected matrix. Need count matrix.' )
# disconnected sets
n = np . shape ( C ) [ 0 ]
ctot = np . sum ( C )
pi = np . zeros ( n )
# treat each weakly connected set separately
sets = connected_sets ( C , mincount_connectivity = mincount_connectivity , strong = False )
for s in sets : # compute weight
w = np . sum ( C [ s , : ] ) / ctot
pi [ s ] = w * msmstatdist ( P [ s , : ] [ : , s ] )
# reinforce normalization
pi /= np . sum ( pi )
return pi
|
def get_similar ( self , limit = None ) :
"""Returns similar tracks for this track on the network ,
based on listening data ."""
|
params = self . _get_params ( )
if limit :
params [ "limit" ] = limit
doc = self . _request ( self . ws_prefix + ".getSimilar" , True , params )
seq = [ ]
for node in doc . getElementsByTagName ( self . ws_prefix ) :
title = _extract ( node , "name" )
artist = _extract ( node , "name" , 1 )
match = _number ( _extract ( node , "match" ) )
seq . append ( SimilarItem ( Track ( artist , title , self . network ) , match ) )
return seq
|
def set_xlsx_colwidths ( worksheet , fld2col_widths , fldnames ) :
"""Set xlsx column widths using fld2col _ widths ."""
|
for col_idx , fld in enumerate ( fldnames ) :
col_width = fld2col_widths . get ( fld , None )
if col_width is not None :
worksheet . set_column ( col_idx , col_idx , col_width )
|
def lpc ( blk , order = None ) :
"""Find the Linear Predictive Coding ( LPC ) coefficients as a ZFilter object ,
the analysis whitening filter . This implementation uses the autocorrelation
method , using the Levinson - Durbin algorithm or Numpy pseudo - inverse for
linear system solving , when needed .
Parameters
blk :
An iterable with well - defined length . Don ' t use this function with Stream
objects !
order :
The order of the resulting ZFilter object . Defaults to ` ` len ( blk ) - 1 ` ` .
Returns
A FIR filter , as a ZFilter object . The mean squared error over the given
block is in its " error " attribute .
Hint
See ` ` lpc . kautocor ` ` example , which should apply equally for this strategy .
See Also
levinson _ durbin :
Levinson - Durbin algorithm for solving Yule - Walker equations ( Toeplitz
matrix linear system ) .
lpc . nautocor :
LPC coefficients from linear system solved with Numpy pseudo - inverse .
lpc . kautocor :
LPC coefficients obtained with Levinson - Durbin algorithm ."""
|
if order < 100 :
return lpc . nautocor ( blk , order )
try :
return lpc . kautocor ( blk , order )
except ParCorError :
return lpc . nautocor ( blk , order )
|
def pack_balance_proof_update ( nonce : Nonce , balance_hash : BalanceHash , additional_hash : AdditionalHash , canonical_identifier : CanonicalIdentifier , partner_signature : Signature , ) -> bytes :
"""Packs balance proof data to be signed for updateNonClosingBalanceProof
Packs the given arguments in a byte array in the same configuration the
contracts expect the signed data for updateNonClosingBalanceProof to have ."""
|
return pack_balance_proof ( nonce = nonce , balance_hash = balance_hash , additional_hash = additional_hash , canonical_identifier = canonical_identifier , msg_type = MessageTypeId . BALANCE_PROOF_UPDATE , ) + partner_signature
|
def flush ( self ) :
"""Flushes the current queue by by calling : func : ` sender ` ' s : func : ` send ` method ."""
|
local_sender = self . sender
if not local_sender :
return
while True : # get at most send _ buffer _ size items and send them
data = [ ]
while len ( data ) < local_sender . send_buffer_size :
item = self . get ( )
if not item :
break
data . append ( item )
if len ( data ) == 0 :
break
local_sender . send ( data )
|
def generate_supremacy_circuit_google_v2_grid ( n_rows : int , n_cols : int , cz_depth : int , seed : int ) -> circuits . Circuit :
"""Generates Google Random Circuits v2 as in github . com / sboixo / GRCS cz _ v2.
See also https : / / arxiv . org / abs / 1807.10749
Args :
n _ rows : number of rows of a 2D lattice .
n _ cols : number of columns .
cz _ depth : number of layers with CZ gates .
seed : seed for the random instance .
Returns :
A circuit corresponding to instance
inst _ { n _ rows } x { n _ cols } _ { cz _ depth + 1 } _ { seed }
The mapping of qubits is cirq . GridQubit ( j , k ) - > q [ j * n _ cols + k ]
( as in the QASM mapping )"""
|
qubits = [ devices . GridQubit ( i , j ) for i in range ( n_rows ) for j in range ( n_cols ) ]
return generate_supremacy_circuit_google_v2 ( qubits , cz_depth , seed )
|
def get_assessment_parts_by_search ( self , assessment_part_query , assessment_part_search ) :
"""Pass through to provider AssessmentPartSearchSession . get _ assessment _ parts _ by _ search"""
|
# Implemented from azosid template for -
# osid . resource . ResourceSearchSession . get _ resources _ by _ search _ template
if not self . _can ( 'search' ) :
raise PermissionDenied ( )
return self . _provider_session . get_assessment_parts_by_search ( assessment_part_query , assessment_part_search )
|
def take_complement ( list_ , index_list ) :
"""Returns items in ` ` list _ ` ` not indexed by index _ list"""
|
mask = not_list ( index_to_boolmask ( index_list , len ( list_ ) ) )
return compress ( list_ , mask )
|
def d2ASbr_dV2 ( dSbr_dVa , dSbr_dVm , Sbr , Cbr , Ybr , V , lam ) :
"""Computes 2nd derivatives of | complex power flow | * * 2 w . r . t . V ."""
|
diaglam = spdiag ( lam )
diagSbr_conj = spdiag ( conj ( Sbr ) )
Saa , Sav , Sva , Svv = d2Sbr_dV2 ( Cbr , Ybr , V , diagSbr_conj * lam )
Haa = 2 * ( Saa + dSbr_dVa . T * diaglam * conj ( dSbr_dVa ) ) . real ( )
Hva = 2 * ( Sva + dSbr_dVm . T * diaglam * conj ( dSbr_dVa ) ) . real ( )
Hav = 2 * ( Sav + dSbr_dVa . T * diaglam * conj ( dSbr_dVm ) ) . real ( )
Hvv = 2 * ( Svv + dSbr_dVm . T * diaglam * conj ( dSbr_dVm ) ) . real ( )
return Haa , Hav , Hva , Hvv
|
def autodetect_format ( content ) :
"""Return format identifier for given fragment or raise FormatAutodetectionError ."""
|
formats = set ( )
for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS . values ( ) :
guess = impl . guess_format ( content )
if guess is not None :
formats . add ( guess )
if len ( formats ) == 1 :
return formats . pop ( )
elif not formats :
raise FormatAutodetectionError ( "No suitable formats" )
else :
raise FormatAutodetectionError ( "Multiple suitable formats (%r)" % formats )
|
def diff ( self ) :
"""The Difference between a PDA and a DFA"""
|
self . mmb . complement ( self . alphabet )
self . mmb . minimize ( )
print 'start intersection'
self . mmc = self . _intesect ( )
print 'end intersection'
return self . mmc
|
def _module_info_from_proto ( module_info_def , import_scope = None ) :
"""Deserializes ` module _ info _ def ` proto .
Args :
module _ info _ def : An instance of ` module _ pb2 . SonnetModule ` .
import _ scope : Optional ` string ` . Name scope to use .
Returns :
An instance of ` ModuleInfo ` .
Raises :
base _ errors . ModuleInfoError : If the probobuf is of the wrong type or
if some of its fields are missing ."""
|
graph = tf . get_default_graph ( )
def prepend_name_scope ( name_scope ) :
return ops . prepend_name_scope ( name_scope , import_scope )
def process_leafs ( name ) :
return _path_to_graph_element ( prepend_name_scope ( name ) , graph )
connected_subgraphs = [ ]
module_info = ModuleInfo ( module_name = module_info_def . module_name , scope_name = prepend_name_scope ( module_info_def . scope_name ) , class_name = module_info_def . class_name , connected_subgraphs = connected_subgraphs )
for connected_subgraph_def in module_info_def . connected_subgraphs :
connected_subgraph = ConnectedSubGraph ( module = module_info , name_scope = prepend_name_scope ( connected_subgraph_def . name_scope ) , inputs = _nested_from_proto ( connected_subgraph_def . inputs , process_leafs ) , outputs = _nested_from_proto ( connected_subgraph_def . outputs , process_leafs ) )
connected_subgraphs . append ( connected_subgraph )
return module_info
|
def msg ( self , target , msg ) :
"""Sends a message to an user or channel .
: param target : user or channel to send to .
: type target : str
: param msg : message to send .
: type msg : str"""
|
self . cmd ( u'PRIVMSG' , u'{0} :{1}' . format ( target , msg ) )
|
def spin_gen_op ( oper , gauge ) :
"""Generates the generic spin matrices for the system"""
|
slaves = len ( gauge )
oper [ 'O' ] = np . array ( [ spin_gen ( slaves , i , c ) for i , c in enumerate ( gauge ) ] )
oper [ 'O_d' ] = np . transpose ( oper [ 'O' ] , ( 0 , 2 , 1 ) )
oper [ 'O_dO' ] = np . einsum ( '...ij,...jk->...ik' , oper [ 'O_d' ] , oper [ 'O' ] )
oper [ 'Sfliphop' ] = spinflipandhop ( slaves )
|
def request ( self , method , uri , params = None , data = None , headers = None , auth = None , timeout = None , allow_redirects = False ) :
"""Makes an HTTP request to this domain .
: param string method : The HTTP method .
: param string uri : The HTTP uri .
: param dict params : Query parameters .
: param object data : The request body .
: param dict headers : The HTTP headers .
: param tuple auth : Basic auth tuple of ( username , password )
: param int timeout : The request timeout .
: param bool allow _ redirects : True if the client should follow HTTP
redirects ."""
|
url = self . absolute_url ( uri )
return self . twilio . request ( method , url , params = params , data = data , headers = headers , auth = auth , timeout = timeout , allow_redirects = allow_redirects )
|
def generate ( data , algorithms = ( DEFAULT_ALOGRITHM , ) ) :
"""Yields subresource integrity Hash objects for the given data &
algorithms
> > > for ihash in generate ( b " alert ( ' Hello , world . ' ) ; " ) :
. . . print ( ' % s % s ' % ( ihash . algorithm , ihash . b58digest ) )
sha384 H8BRh8j48O9oYatfu5AZzq6A9RINhZO5H16dQZngK7T62em8MUt1FLm52t + eX6xO
> > > list ( generate ( b " alert ( ' Hello , world . ' ) ; " , [ ' sha256 ' , ' sha384 ' ] ) )
. . . # doctest : + ELLIPSIS , + NORMALIZE _ WHITESPACE
[ subresource _ integrity . Hash ( ' sha256 ' , ' qz . . . / Tng = ' , ' ' ) ,
subresource _ integrity . Hash ( ' sha384 ' , ' H8BR . . . + eX6xO ' , ' ' ) ]"""
|
return ( Hash . fromresource ( data , algorithm ) for algorithm in algorithms )
|
def get_models ( self , model , page = None ) :
"""Get all the models from the server .
Args :
model ( string ) : The class as a string .
page ( string , optional ) : The page number as a string
Returns :
list : A list of instances of the requested model ."""
|
if page is not None :
return self . _store . find_all ( self . _get_model_class ( model ) , params = { 'page' : int ( page ) } )
else :
return self . _store . find_all ( self . _get_model_class ( model ) )
|
def rect ( self , x : int , y : int , width : int , height : int , clear : bool , bg_blend : int = tcod . constants . BKGND_DEFAULT , ) -> None :
"""Draw a the background color on a rect optionally clearing the text .
If ` clear ` is True the affected tiles are changed to space character .
Args :
x ( int ) : The x coordinate from the left .
y ( int ) : The y coordinate from the top .
width ( int ) : Maximum width to render the text .
height ( int ) : Maximum lines to render the text .
clear ( bool ) : If True all text in the affected area will be
removed .
bg _ blend ( int ) : Background blending flag .
. . deprecated : : 8.5
Console methods which depend on console defaults have been
deprecated .
Use : any : ` Console . draw _ rect ` instead , calling this function will
print a warning detailing which default values need to be made
explicit ."""
|
self . __deprecate_defaults ( "draw_rect" , bg_blend , clear = bool ( clear ) )
lib . TCOD_console_rect ( self . console_c , x , y , width , height , clear , bg_blend )
|
def _set_bridge_domain_type ( self , v , load = False ) :
"""Setter method for bridge _ domain _ type , mapped from YANG variable / bridge _ domain / bridge _ domain _ type ( enumeration )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ bridge _ domain _ type is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ bridge _ domain _ type ( ) directly .
YANG Description : This leaf specifies Bridge Domain Type ."""
|
parent = getattr ( self , "_parent" , None )
if parent is not None and load is False :
raise AttributeError ( "Cannot set keys directly when" + " within an instantiated list" )
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'p2mp' : { 'value' : 1 } , u'p2p' : { 'value' : 2 } } , ) , is_leaf = True , yang_name = "bridge-domain-type" , rest_name = "bridge-domain-type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Set the Bridge Domain Type' , u'key-default' : u'p2mp' } } , is_keyval = True , namespace = 'urn:brocade.com:mgmt:brocade-bridge-domain' , defining_module = 'brocade-bridge-domain' , yang_type = 'enumeration' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """bridge_domain_type must be of a type compatible with enumeration""" , 'defined-type' : "brocade-bridge-domain:enumeration" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'p2mp': {'value': 1}, u'p2p': {'value': 2}},), is_leaf=True, yang_name="bridge-domain-type", rest_name="bridge-domain-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set the Bridge Domain Type', u'key-default': u'p2mp'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-bridge-domain', defining_module='brocade-bridge-domain', yang_type='enumeration', is_config=True)""" , } )
self . __bridge_domain_type = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_example ( cls ) -> dict :
"""Returns an example value for the Dict type .
If an example isn ' t a defined attribute on the class we return
a dict of example values based on each property ' s annotation ."""
|
if cls . example is not None :
return cls . example
return { k : v . get_example ( ) for k , v in cls . properties . items ( ) }
|
def end_date ( self ) -> Optional [ datetime . date ] :
"""Returns the end date of the set of intervals , or ` ` None ` ` if empty ."""
|
if not self . intervals :
return None
return self . end_datetime ( ) . date ( )
|
def first ( self ) :
"""Return a ( value , source ) pair for the first object found for
this view . This amounts to the first element returned by
` resolve ` . If no values are available , a NotFoundError is
raised ."""
|
pairs = self . resolve ( )
try :
return iter_first ( pairs )
except ValueError :
raise NotFoundError ( u"{0} not found" . format ( self . name ) )
|
def rotoreflection ( axis , angle , origin = ( 0 , 0 , 0 ) ) :
"""Returns a roto - reflection symmetry operation
Args :
axis ( 3x1 array ) : Axis of rotation / mirror normal
angle ( float ) : Angle in degrees
origin ( 3x1 array ) : Point left invariant by roto - reflection .
Defaults to ( 0 , 0 , 0 ) .
Return :
Roto - reflection operation"""
|
rot = SymmOp . from_origin_axis_angle ( origin , axis , angle )
refl = SymmOp . reflection ( axis , origin )
m = np . dot ( rot . affine_matrix , refl . affine_matrix )
return SymmOp ( m )
|
def read_string ( self , lpBaseAddress , nChars , fUnicode = False ) :
"""Reads an ASCII or Unicode string
from the address space of the process .
@ see : L { peek _ string }
@ type lpBaseAddress : int
@ param lpBaseAddress : Memory address to begin reading .
@ type nChars : int
@ param nChars : String length to read , in characters .
Remember that Unicode strings have two byte characters .
@ type fUnicode : bool
@ param fUnicode : C { True } is the string is expected to be Unicode ,
C { False } if it ' s expected to be ANSI .
@ rtype : str , compat . unicode
@ return : String read from the process memory space .
@ raise WindowsError : On error an exception is raised ."""
|
if fUnicode :
nChars = nChars * 2
szString = self . read ( lpBaseAddress , nChars )
if fUnicode :
szString = compat . unicode ( szString , 'U16' , 'ignore' )
return szString
|
def search_users ( self , query , n = 20 , maxUsers = 60 ) :
"""Method to perform the searchfy searches .
: param query : Query to be performed .
: param n : Number of results per query .
: param maxUsers : Max . number of users to be recovered .
: return : List of users"""
|
# Connecting to the API
api = self . _connectToAPI ( )
# Verifying the limits of the API
self . _rate_limit_status ( api = api , mode = "search_users" )
aux = [ ]
page = 0
# print " Getting page % s of new users . . . " % page + 1
# Making the call to the API
try :
newUsers = api . search_users ( query , n , page )
for n in newUsers :
aux . append ( n . _json )
# keep grabbing tweets until there are no tweets left to grab
while len ( aux ) < maxUsers & len ( newUsers ) > 0 :
page += 1
print "Getting page %s of new users..." % page
# Grabbing new Users
newUsers = api . search_users ( query , n , page )
# Save the users found
aux . extend ( newUsers )
except :
pass
res = [ ]
# Extracting the information from each profile
for a in aux :
res . append ( self . _processUser ( a ) )
return res
|
def _annotate_somatic ( data , retriever = None ) :
"""Annotate somatic calls if we have cosmic data installed ."""
|
if is_human ( data ) :
paired = vcfutils . get_paired ( [ data ] )
if paired :
r = dd . get_variation_resources ( data )
if r . get ( "cosmic" ) and objectstore . file_exists_or_remote ( r [ "cosmic" ] ) :
return True
return False
|
def get_braintree_gateway_by_id ( cls , braintree_gateway_id , ** kwargs ) :
"""Find BraintreeGateway
Return single instance of BraintreeGateway by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . get _ braintree _ gateway _ by _ id ( braintree _ gateway _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str braintree _ gateway _ id : ID of braintreeGateway to return ( required )
: return : BraintreeGateway
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _get_braintree_gateway_by_id_with_http_info ( braintree_gateway_id , ** kwargs )
else :
( data ) = cls . _get_braintree_gateway_by_id_with_http_info ( braintree_gateway_id , ** kwargs )
return data
|
def _create_slack_with_env_var ( env_var : EnvVar ) -> SlackClient :
"""Create a : obj : ` SlackClient ` with a token from an env var ."""
|
token = os . getenv ( env_var )
if token :
return SlackClient ( token = token )
raise MissingToken ( f"Could not acquire token from {env_var}" )
|
def iterate ( self , scopes ) :
"""Yields ScopeInfo instances for the specified scopes , plus relevant related scopes .
Relevant scopes are :
- All tasks in a requested goal .
- All subsystems tied to a request scope .
Yields in a sensible order : Sorted by scope , but with subsystems tied to a request scope
following that scope , e . g . ,
goal1
goal1 . task11
subsys . goal1 . task11
goal1 . task12
goal2 . task21"""
|
scope_infos = [ self . _scope_to_info [ s ] for s in self . _expand_tasks ( scopes ) ]
if scope_infos :
for scope_info in self . _expand_subsystems ( scope_infos ) :
yield scope_info
|
def domain ( self , expparams ) :
"""Returns a list of ` ` Domain ` ` s , one for each input expparam .
: param numpy . ndarray expparams : Array of experimental parameters . This
array must be of dtype agreeing with the ` ` expparams _ dtype ` `
property , or , in the case where ` ` n _ outcomes _ constant ` ` is ` ` True ` ` ,
` ` None ` ` should be a valid input .
: rtype : list of ` ` Domain ` `"""
|
return [ IntegerDomain ( min = 0 , max = n_o - 1 ) for n_o in self . n_outcomes ( expparams ) ]
|
def get_config ( self , key , default = UndefinedKey ) :
"""Return tree config representation of value found at key
: param key : key to use ( dot separated ) . E . g . , a . b . c
: type key : basestring
: param default : default value if key not found
: type default : config
: return : config value
: type return : ConfigTree"""
|
value = self . get ( key , default )
if isinstance ( value , dict ) :
return value
elif value is None :
return None
else :
raise ConfigException ( u"{key} has type '{type}' rather than 'config'" . format ( key = key , type = type ( value ) . __name__ ) )
|
def http_method ( self , data ) :
"""The HTTP method for this request ."""
|
data = data . upper ( )
if data in [ 'DELETE' , 'GET' , 'POST' , 'PUT' ] :
self . _http_method = data
# set content type for commit methods ( best guess )
if self . _headers . get ( 'Content-Type' ) is None and data in [ 'POST' , 'PUT' ] :
self . add_header ( 'Content-Type' , 'application/json' )
else :
raise AttributeError ( 'Request Object Error: {} is not a valid HTTP method.' . format ( data ) )
|
def long_press ( self , on_element ) :
"""Long press on an element .
: Args :
- on _ element : The element to long press ."""
|
self . _actions . append ( lambda : self . _driver . execute ( Command . LONG_PRESS , { 'element' : on_element . id } ) )
return self
|
def imresize ( img , size , interpolate = "bilinear" , channel_first = False ) :
"""Resize image by pil module .
Args :
img ( numpy . ndarray ) : Image array to save .
Image shape is considered as ( height , width , channel ) for RGB or ( height , width ) for gray - scale by default .
size ( tupple of int ) : ( width , height ) .
channel _ first ( bool ) :
This argument specifies the shape of img is whether ( height , width , channel ) or ( channel , height , width ) .
Default value isyou can get the array whose shape is False , which means the img shape is ( height , width , channels )
interpolate ( str ) :
must be one of [ " nearest " , " box " , " bilinear " , " hamming " , " bicubic " , " lanczos " ]
Returns :
numpy . ndarray whose shape is ( ' size ' [ 1 ] , ' size ' [ 0 ] , channel ) or ( size [ 1 ] , size [ 0 ] )"""
|
img = _imresize_before ( img , size , channel_first , interpolate , list ( interpolations_map . keys ( ) ) )
expand_flag = False
if len ( img . shape ) == 3 and img . shape [ - 1 ] == 1 : # ( h , w , 1 ) can not be handled by pil . Image , temporally reshape to ( h , w )
img = img . reshape ( img . shape [ 0 ] , img . shape [ 1 ] )
expand_flag = True
resample = interpolations_map [ interpolate ]
if img . dtype == np . uint8 :
resized = pil_resize_from_ndarray ( img , size , resample )
else :
dtype = img . dtype
img_float32 = np . asarray ( img , np . float32 )
if len ( img . shape ) == 3 :
resized = np . stack ( [ pil_resize_from_ndarray ( img_float32 [ ... , i ] , size , resample ) for i in range ( img . shape [ - 1 ] ) ] , axis = 2 )
else :
resized = pil_resize_from_ndarray ( img_float32 , size , resample )
resized = np . asarray ( resized , dtype )
if expand_flag :
resized = resized [ ... , np . newaxis ]
return _imresize_after ( resized , channel_first )
|
def visitStart ( self , ctx : ShExDocParser . StartContext ) :
"""start : KW _ START ' = ' shapeExpression"""
|
shexpr = ShexShapeExpressionParser ( self . context , None )
shexpr . visit ( ctx . shapeExpression ( ) )
self . context . schema . start = shexpr . expr
|
def validate_unique ( self , * args , ** kwargs ) :
"""Checked whether more than one EighthSignup exists for a User on a given EighthBlock ."""
|
super ( EighthSignup , self ) . validate_unique ( * args , ** kwargs )
if self . has_conflict ( ) :
raise ValidationError ( { NON_FIELD_ERRORS : ( "EighthSignup already exists for the User and the EighthScheduledActivity's block" , ) } )
|
def track ( * args ) :
"""Track additional files . It is often useful to use glob . glob here .
For instance :
track ( ' config . ini ' , glob . glob ( ' templates / * . pt ' ) , glob . glob ( ' db / * . db ' ) )
@ param args : A list where each element is either a filename or an
iterable of filenames"""
|
for arg in args :
if isinstance ( arg , str ) :
arg = [ arg ]
for filename in arg :
realname , modified_time = _get_filename_and_modified ( filename )
if realname and realname not in _process_files :
_process_files [ realname ] = modified_time
|
def build_schema_mof ( self , schema_classes ) :
"""Build a string that includes the ` ` # include pragma ` ` statements for the
DMTF schema CIM classes defined in ` schema _ classes ` using the DMTF CIM
schema defined by this object .
The class names in this list can be just leaf classes . The pywbem
MOF compiler will search for dependent classes .
It builds a compilable MOF string in the form : :
# pragma locale ( " en _ US " )
# pragma include ( " System / CIM _ ComputerSystem . mof " )
with a ` ` # pragma include ` ` for each classname in the ` schema _ classes `
list
Parameters :
schema _ classes ( : term : ` py : list ` of : term : ` string ` or : term : ` string ` ) :
These must be class names of classes in the DMTF CIM schema
represented by this : class : ` ~ pywbem _ mock . DMTFCIMSchema ` object .
This parameter can be a string if there is only a single class name
to be included .
If the returned string is compiled , the MOF compiler will search
the directory defined by ` schema _ mof _ dir ` for dependent classes .
Returns :
: term : ` string ` : Valid MOF containing pragma statements for
all of the classes in ` schema _ classes ` .
Raises :
ValueError : If any of the classnames in ` schema _ classes ` are not in
the DMTF CIM schema installed"""
|
if isinstance ( schema_classes , six . string_types ) :
schema_classes = [ schema_classes ]
schema_lines = [ ]
with open ( self . schema_mof_file , 'r' ) as f :
schema_lines = f . readlines ( )
output_lines = [ '#pragma locale ("en_US")\n' ]
for cln in schema_classes :
test_cln = '/{0}.mof' . format ( cln )
# May contain Unicode
found = False
for line in schema_lines :
if line . find ( test_cln ) != - 1 :
output_lines . append ( line )
found = True
break
if not found :
raise ValueError ( _format ( "Class {0!A} not in DMTF CIM schema {1!A}" , cln , self . schema_mof_file ) )
return '' . join ( output_lines )
|
def can_approve ( self , user , ** data ) :
"""Admins of repository service or sys admins can approve a repository
: param user : a User
: param data : data that the user wants to update"""
|
service_id = data . get ( 'service_id' , self . service_id )
try :
service = yield Service . get ( service_id )
is_repo_admin = user . is_org_admin ( service . organisation_id )
is_reseller_preverifying = user . is_reseller ( ) and data . get ( 'pre_verified' , False )
raise Return ( is_repo_admin or is_reseller_preverifying )
except couch . NotFound :
pass
raise Return ( False )
|
def register ( self , service , provider , singleton = False ) :
"""Registers a service provider for a given service .
@ param service
A key that identifies the service being registered .
@ param provider
This is either the service being registered , or a callable that will
either instantiate it or return it .
@ param singleton
Indicates that the service is to be registered as a singleton .
This is only relevant if the provider is a callable . Services that
are not callable will always be registered as singletons ."""
|
def get_singleton ( * args , ** kwargs ) :
result = self . _get_singleton ( service )
if not result :
instantiator = self . _get_instantiator ( provider )
result = instantiator ( * args , ** kwargs )
self . _set_singleton ( service , result )
return result
# Providers are always registered in self . providers as callable methods
if not callable ( provider ) :
self . _set_provider ( service , lambda * args , ** kwargs : provider )
elif singleton :
self . _set_provider ( service , get_singleton )
else :
self . _set_provider ( service , self . _get_instantiator ( provider ) )
|
def getenv ( key , value = None ) :
"""Like ` os . getenv ` but returns unicode under Windows + Python 2
Args :
key ( pathlike ) : The env var to get
value ( object ) : The value to return if the env var does not exist
Returns :
` fsnative ` or ` object ` :
The env var or the passed value if it doesn ' t exist"""
|
key = path2fsn ( key )
if is_win and PY2 :
return environ . get ( key , value )
return os . getenv ( key , value )
|
def get_module_data_path ( modname , relpath = None , attr_name = 'DATAPATH' ) :
"""Return module * modname * data path
Note : relpath is ignored if module has an attribute named * attr _ name *
Handles py2exe / cx _ Freeze distributions"""
|
datapath = getattr ( sys . modules [ modname ] , attr_name , '' )
if datapath :
return datapath
else :
datapath = get_module_path ( modname )
parentdir = osp . join ( datapath , osp . pardir )
if osp . isfile ( parentdir ) : # Parent directory is not a directory but the ' library . zip ' file :
# this is either a py2exe or a cx _ Freeze distribution
datapath = osp . abspath ( osp . join ( osp . join ( parentdir , osp . pardir ) , modname ) )
if relpath is not None :
datapath = osp . abspath ( osp . join ( datapath , relpath ) )
return datapath
|
def setCurrentSchemaColumn ( self , column ) :
"""Sets the current item based on the inputed column .
: param column | < orb . Column > | | None"""
|
if column == self . _column :
self . treeWidget ( ) . setCurrentItem ( self )
return True
for c in range ( self . childCount ( ) ) :
if self . child ( c ) . setCurrentSchemaColumn ( column ) :
self . setExpanded ( True )
return True
return None
|
def create_subscription ( self , client_id , client_secret , callback_url , object_type = model . Subscription . OBJECT_TYPE_ACTIVITY , aspect_type = model . Subscription . ASPECT_TYPE_CREATE , verify_token = model . Subscription . VERIFY_TOKEN_DEFAULT ) :
"""Creates a webhook event subscription .
http : / / strava . github . io / api / partner / v3 / events / # create - a - subscription
: param client _ id : application ' s ID , obtained during registration
: type client _ id : int
: param client _ secret : application ' s secret , obtained during registration
: type client _ secret : str
: param callback _ url : callback URL where Strava will first send a GET request to validate , then subsequently send POST requests with updates
: type callback _ url : str
: param object _ type : object _ type ( currently only ` activity ` is supported )
: type object _ type : str
: param aspect _ type : object _ type ( currently only ` create ` is supported )
: type aspect _ type : str
: param verify _ token : a token you can use to verify Strava ' s GET callback request
: type verify _ token : str
: return : An instance of : class : ` stravalib . model . Subscription ` .
: rtype : : class : ` stravalib . model . Subscription `
Notes :
` object _ type ` and ` aspect _ type ` are given defaults because there is currently only one valid value for each .
` verify _ token ` is set to a default in the event that the author doesn ' t want to specify one .
The appliction must have permission to make use of the webhook API . Access can be requested by contacting developers - at - strava . com ."""
|
params = dict ( client_id = client_id , client_secret = client_secret , object_type = object_type , aspect_type = aspect_type , callback_url = callback_url , verify_token = verify_token )
raw = self . protocol . post ( '/push_subscriptions' , use_webhook_server = True , ** params )
return model . Subscription . deserialize ( raw , bind_client = self )
|
def add ( self , event_state , event_type , event_value , proc_list = None , proc_desc = "" , peak_time = 6 ) :
"""Add a new item to the logs list .
If ' event ' is a ' new one ' , add it at the beginning of the list .
If ' event ' is not a ' new one ' , update the list .
If event < peak _ time then the alert is not set ."""
|
proc_list = proc_list or glances_processes . getlist ( )
# Add or update the log
event_index = self . __event_exist ( event_type )
if event_index < 0 : # Event did not exist , add it
self . _create_event ( event_state , event_type , event_value , proc_list , proc_desc , peak_time )
else : # Event exist , update it
self . _update_event ( event_index , event_state , event_type , event_value , proc_list , proc_desc , peak_time )
return self . len ( )
|
def pipupdate ( ) :
"""Update all currently installed pip packages"""
|
packages = [ d for d in pkg_resources . working_set ]
subprocess . call ( 'pip install --upgrade ' + ' ' . join ( packages ) )
|
def equal_args ( * args , ** kwargs ) :
"""A memoized key factory that compares the equality ( ` = = ` ) of a stable sort of the parameters ."""
|
key = args
if kwargs :
key += _kwargs_separator + tuple ( sorted ( kwargs . items ( ) ) )
return key
|
def get_django_response ( proxy_response , strict_cookies = False ) :
"""This method is used to create an appropriate response based on the
Content - Length of the proxy _ response . If the content is bigger than
MIN _ STREAMING _ LENGTH , which is found on utils . py ,
than django . http . StreamingHttpResponse will be created ,
else a django . http . HTTPResponse will be created instead
: param proxy _ response : An Instance of urllib3 . response . HTTPResponse that
will create an appropriate response
: param strict _ cookies : Whether to only accept RFC - compliant cookies
: returns : Returns an appropriate response based on the proxy _ response
content - length"""
|
status = proxy_response . status
headers = proxy_response . headers
logger . debug ( 'Proxy response headers: %s' , headers )
content_type = headers . get ( 'Content-Type' )
logger . debug ( 'Content-Type: %s' , content_type )
if should_stream ( proxy_response ) :
logger . info ( 'Content-Length is bigger than %s' , DEFAULT_AMT )
response = StreamingHttpResponse ( proxy_response . stream ( DEFAULT_AMT ) , status = status , content_type = content_type )
else :
content = proxy_response . data or b''
response = HttpResponse ( content , status = status , content_type = content_type )
logger . info ( 'Normalizing response headers' )
set_response_headers ( response , headers )
logger . debug ( 'Response headers: %s' , getattr ( response , '_headers' ) )
cookies = proxy_response . headers . getlist ( 'set-cookie' )
logger . info ( 'Checking for invalid cookies' )
for cookie_string in cookies :
cookie_dict = cookie_from_string ( cookie_string , strict_cookies = strict_cookies )
# if cookie is invalid cookie _ dict will be None
if cookie_dict :
response . set_cookie ( ** cookie_dict )
logger . debug ( 'Response cookies: %s' , response . cookies )
return response
|
def get_token_info ( self ) :
"""Get information about the current access token ."""
|
response = requests . get ( self . _format_url ( OAUTH2_ROOT + 'token_info' , { 'token' : self . creds [ 'access_token' ] } ) )
data = response . json ( )
if response . status_code != 200 :
raise _token_error_from_data ( data )
else :
return data
|
def hash_file ( tar_filename ) :
"""hash the file"""
|
hasher = sha1 ( )
with open ( tar_filename , 'rb' ) as afile :
buf = afile . read ( BLOCKSIZE )
while len ( buf ) > 0 :
hasher . update ( buf )
buf = afile . read ( BLOCKSIZE )
data = HASHTMPL % ( hasher . hexdigest ( ) , os . path . basename ( tar_filename ) )
create_file ( '%s.sha1' % tar_filename , data )
|
def mksls ( src , dst = None ) :
'''Convert a kickstart file to an SLS file'''
|
mode = 'command'
sls = { }
ks_opts = { }
with salt . utils . files . fopen ( src , 'r' ) as fh_ :
for line in fh_ :
if line . startswith ( '#' ) :
continue
if mode == 'command' :
if line . startswith ( 'auth ' ) or line . startswith ( 'authconfig ' ) :
ks_opts [ 'auth' ] = parse_auth ( line )
elif line . startswith ( 'autopart' ) :
ks_opts [ 'autopath' ] = parse_autopart ( line )
elif line . startswith ( 'autostep' ) :
ks_opts [ 'autostep' ] = parse_autostep ( line )
elif line . startswith ( 'bootloader' ) :
ks_opts [ 'bootloader' ] = parse_bootloader ( line )
elif line . startswith ( 'btrfs' ) :
ks_opts [ 'btrfs' ] = parse_btrfs ( line )
elif line . startswith ( 'cdrom' ) :
ks_opts [ 'cdrom' ] = True
elif line . startswith ( 'clearpart' ) :
ks_opts [ 'clearpart' ] = parse_clearpart ( line )
elif line . startswith ( 'cmdline' ) :
ks_opts [ 'cmdline' ] = True
elif line . startswith ( 'device' ) :
ks_opts [ 'device' ] = parse_device ( line )
elif line . startswith ( 'dmraid' ) :
ks_opts [ 'dmraid' ] = parse_dmraid ( line )
elif line . startswith ( 'driverdisk' ) :
ks_opts [ 'driverdisk' ] = parse_driverdisk ( line )
elif line . startswith ( 'firewall' ) :
ks_opts [ 'firewall' ] = parse_firewall ( line )
elif line . startswith ( 'firstboot' ) :
ks_opts [ 'firstboot' ] = parse_firstboot ( line )
elif line . startswith ( 'group' ) :
ks_opts [ 'group' ] = parse_group ( line )
elif line . startswith ( 'graphical' ) :
ks_opts [ 'graphical' ] = True
elif line . startswith ( 'halt' ) :
ks_opts [ 'halt' ] = True
elif line . startswith ( 'harddrive' ) :
ks_opts [ 'harddrive' ] = True
elif line . startswith ( 'ignoredisk' ) :
ks_opts [ 'ignoredisk' ] = parse_ignoredisk ( line )
elif line . startswith ( 'install' ) :
ks_opts [ 'install' ] = True
elif line . startswith ( 'iscsi' ) :
ks_opts [ 'iscsi' ] = parse_iscsi ( line )
elif line . startswith ( 'iscsiname' ) :
ks_opts [ 'iscsiname' ] = parse_iscsiname ( line )
elif line . startswith ( 'keyboard' ) :
ks_opts [ 'keyboard' ] = parse_keyboard ( line )
elif line . startswith ( 'lang' ) :
ks_opts [ 'lang' ] = parse_lang ( line )
elif line . startswith ( 'logvol' ) :
if 'logvol' not in ks_opts . keys ( ) :
ks_opts [ 'logvol' ] = [ ]
ks_opts [ 'logvol' ] . append ( parse_logvol ( line ) )
elif line . startswith ( 'logging' ) :
ks_opts [ 'logging' ] = parse_logging ( line )
elif line . startswith ( 'mediacheck' ) :
ks_opts [ 'mediacheck' ] = True
elif line . startswith ( 'monitor' ) :
ks_opts [ 'monitor' ] = parse_monitor ( line )
elif line . startswith ( 'multipath' ) :
ks_opts [ 'multipath' ] = parse_multipath ( line )
elif line . startswith ( 'network' ) :
if 'network' not in ks_opts . keys ( ) :
ks_opts [ 'network' ] = [ ]
ks_opts [ 'network' ] . append ( parse_network ( line ) )
elif line . startswith ( 'nfs' ) :
ks_opts [ 'nfs' ] = True
elif line . startswith ( 'part ' ) or line . startswith ( 'partition' ) :
if 'part' not in ks_opts . keys ( ) :
ks_opts [ 'part' ] = [ ]
ks_opts [ 'part' ] . append ( parse_partition ( line ) )
elif line . startswith ( 'poweroff' ) :
ks_opts [ 'poweroff' ] = True
elif line . startswith ( 'raid' ) :
if 'raid' not in ks_opts . keys ( ) :
ks_opts [ 'raid' ] = [ ]
ks_opts [ 'raid' ] . append ( parse_raid ( line ) )
elif line . startswith ( 'reboot' ) :
ks_opts [ 'reboot' ] = parse_reboot ( line )
elif line . startswith ( 'repo' ) :
ks_opts [ 'repo' ] = parse_repo ( line )
elif line . startswith ( 'rescue' ) :
ks_opts [ 'rescue' ] = parse_rescue ( line )
elif line . startswith ( 'rootpw' ) :
ks_opts [ 'rootpw' ] = parse_rootpw ( line )
elif line . startswith ( 'selinux' ) :
ks_opts [ 'selinux' ] = parse_selinux ( line )
elif line . startswith ( 'services' ) :
ks_opts [ 'services' ] = parse_services ( line )
elif line . startswith ( 'shutdown' ) :
ks_opts [ 'shutdown' ] = True
elif line . startswith ( 'sshpw' ) :
ks_opts [ 'sshpw' ] = parse_sshpw ( line )
elif line . startswith ( 'skipx' ) :
ks_opts [ 'skipx' ] = True
elif line . startswith ( 'text' ) :
ks_opts [ 'text' ] = True
elif line . startswith ( 'timezone' ) :
ks_opts [ 'timezone' ] = parse_timezone ( line )
elif line . startswith ( 'updates' ) :
ks_opts [ 'updates' ] = parse_updates ( line )
elif line . startswith ( 'upgrade' ) :
ks_opts [ 'upgrade' ] = parse_upgrade ( line )
elif line . startswith ( 'url' ) :
ks_opts [ 'url' ] = True
elif line . startswith ( 'user' ) :
ks_opts [ 'user' ] = parse_user ( line )
elif line . startswith ( 'vnc' ) :
ks_opts [ 'vnc' ] = parse_vnc ( line )
elif line . startswith ( 'volgroup' ) :
ks_opts [ 'volgroup' ] = parse_volgroup ( line )
elif line . startswith ( 'xconfig' ) :
ks_opts [ 'xconfig' ] = parse_xconfig ( line )
elif line . startswith ( 'zerombr' ) :
ks_opts [ 'zerombr' ] = True
elif line . startswith ( 'zfcp' ) :
ks_opts [ 'zfcp' ] = parse_zfcp ( line )
if line . startswith ( '%include' ) :
rules = shlex . split ( line )
if not ks_opts [ 'include' ] :
ks_opts [ 'include' ] = [ ]
ks_opts [ 'include' ] . append ( rules [ 1 ] )
if line . startswith ( '%ksappend' ) :
rules = shlex . split ( line )
if not ks_opts [ 'ksappend' ] :
ks_opts [ 'ksappend' ] = [ ]
ks_opts [ 'ksappend' ] . append ( rules [ 1 ] )
if line . startswith ( '%packages' ) :
mode = 'packages'
if 'packages' not in ks_opts . keys ( ) :
ks_opts [ 'packages' ] = { 'packages' : { } }
parser = argparse . ArgumentParser ( )
opts = shlex . split ( line )
opts . pop ( 0 )
parser . add_argument ( '--default' , dest = 'default' , action = 'store_true' )
parser . add_argument ( '--excludedocs' , dest = 'excludedocs' , action = 'store_true' )
parser . add_argument ( '--ignoremissing' , dest = 'ignoremissing' , action = 'store_true' )
parser . add_argument ( '--instLangs' , dest = 'instLangs' , action = 'store' )
parser . add_argument ( '--multilib' , dest = 'multilib' , action = 'store_true' )
parser . add_argument ( '--nodefaults' , dest = 'nodefaults' , action = 'store_true' )
parser . add_argument ( '--optional' , dest = 'optional' , action = 'store_true' )
parser . add_argument ( '--nobase' , dest = 'nobase' , action = 'store_true' )
args = clean_args ( vars ( parser . parse_args ( opts ) ) )
ks_opts [ 'packages' ] [ 'options' ] = args
continue
if line . startswith ( '%pre' ) :
mode = 'pre'
parser = argparse . ArgumentParser ( )
opts = shlex . split ( line )
opts . pop ( 0 )
parser . add_argument ( '--interpreter' , dest = 'interpreter' , action = 'store' )
parser . add_argument ( '--erroronfail' , dest = 'erroronfail' , action = 'store_true' )
parser . add_argument ( '--log' , dest = 'log' , action = 'store' )
args = clean_args ( vars ( parser . parse_args ( opts ) ) )
ks_opts [ 'pre' ] = { 'options' : args , 'script' : '' }
continue
if line . startswith ( '%post' ) :
mode = 'post'
parser = argparse . ArgumentParser ( )
opts = shlex . split ( line )
opts . pop ( 0 )
parser . add_argument ( '--nochroot' , dest = 'nochroot' , action = 'store_true' )
parser . add_argument ( '--interpreter' , dest = 'interpreter' , action = 'store' )
parser . add_argument ( '--erroronfail' , dest = 'erroronfail' , action = 'store_true' )
parser . add_argument ( '--log' , dest = 'log' , action = 'store' )
args = clean_args ( vars ( parser . parse_args ( opts ) ) )
ks_opts [ 'post' ] = { 'options' : args , 'script' : '' }
continue
if line . startswith ( '%end' ) :
mode = None
if mode == 'packages' :
if line . startswith ( '-' ) :
package = line . replace ( '-' , '' , 1 ) . strip ( )
ks_opts [ 'packages' ] [ 'packages' ] [ package ] = False
else :
ks_opts [ 'packages' ] [ 'packages' ] [ line . strip ( ) ] = True
if mode == 'pre' :
ks_opts [ 'pre' ] [ 'script' ] += line
if mode == 'post' :
ks_opts [ 'post' ] [ 'script' ] += line
# Set language
sls [ ks_opts [ 'lang' ] [ 'lang' ] ] = { 'locale' : [ 'system' ] }
# Set keyboard
sls [ ks_opts [ 'keyboard' ] [ 'xlayouts' ] ] = { 'keyboard' : [ 'system' ] }
# Set timezone
sls [ ks_opts [ 'timezone' ] [ 'timezone' ] ] = { 'timezone' : [ 'system' ] }
if 'utc' in ks_opts [ 'timezone' ] . keys ( ) :
sls [ ks_opts [ 'timezone' ] [ 'timezone' ] ] [ 'timezone' ] . append ( 'utc' )
# Set network
if 'network' in ks_opts . keys ( ) :
for interface in ks_opts [ 'network' ] :
device = interface . get ( 'device' , None )
if device is not None :
del interface [ 'device' ]
sls [ device ] = { 'proto' : interface [ 'bootproto' ] }
del interface [ 'bootproto' ]
if 'onboot' in interface . keys ( ) :
if 'no' in interface [ 'onboot' ] :
sls [ device ] [ 'enabled' ] = False
else :
sls [ device ] [ 'enabled' ] = True
del interface [ 'onboot' ]
if 'noipv4' in interface . keys ( ) :
sls [ device ] [ 'ipv4' ] = { 'enabled' : False }
del interface [ 'noipv4' ]
if 'noipv6' in interface . keys ( ) :
sls [ device ] [ 'ipv6' ] = { 'enabled' : False }
del interface [ 'noipv6' ]
for option in interface :
if type ( interface [ option ] ) is bool :
sls [ device ] [ option ] = { 'enabled' : [ interface [ option ] ] }
else :
sls [ device ] [ option ] = interface [ option ]
if 'hostname' in interface :
sls [ 'system' ] = { 'network.system' : { 'enabled' : True , 'hostname' : interface [ 'hostname' ] , 'apply_hostname' : True , } }
# Set selinux
if 'selinux' in ks_opts . keys ( ) :
for mode in ks_opts [ 'selinux' ] :
sls [ mode ] = { 'selinux' : [ 'mode' ] }
# Get package data together
if 'nobase' not in ks_opts [ 'packages' ] [ 'options' ] :
sls [ 'base' ] = { 'pkg_group' : [ 'installed' ] }
packages = ks_opts [ 'packages' ] [ 'packages' ]
for package in packages :
if not packages [ package ] :
continue
if package and packages [ package ] is True :
if package . startswith ( '@' ) :
pkg_group = package . replace ( '@' , '' , 1 )
sls [ pkg_group ] = { 'pkg_group' : [ 'installed' ] }
else :
sls [ package ] = { 'pkg' : [ 'installed' ] }
elif packages [ package ] is False :
sls [ package ] = { 'pkg' : [ 'absent' ] }
if dst :
with salt . utils . files . fopen ( dst , 'w' ) as fp_ :
salt . utils . yaml . safe_dump ( sls , fp_ , default_flow_style = False )
else :
return salt . utils . yaml . safe_dump ( sls , default_flow_style = False )
|
def visit_For ( self , node ) :
"""Handle iterator variable in for loops .
Iterate variable may be the correct one at the end of the loop ."""
|
body = node . body
if node . target . id in self . naming :
body = [ ast . Assign ( targets = [ node . target ] , value = node . iter ) ] + body
self . visit_any_conditionnal ( body , node . orelse )
else :
iter_dep = self . visit ( node . iter )
self . naming [ node . target . id ] = iter_dep
self . visit_any_conditionnal ( body , body + node . orelse )
|
def connect ( self ) :
'''Connects to the IRC server with the options defined in ` config `'''
|
self . _connect ( )
try :
self . _listen ( )
except ( KeyboardInterrupt , SystemExit ) :
pass
finally :
self . close ( )
|
def identical ( args ) :
"""% prog identical * . fasta
Given multiple fasta files , find all the exactly identical records
based on the computed md5 hexdigest or GCG checksum of each sequence .
Output is an N + 1 column file ( where N = number of input fasta files ) .
If there are duplicates within a given fasta file , they will all be
listed out in the same row separated by a comma .
Example output :
tta1 . fsa tta2 . fsa
t0 2131 na
t1 3420 na
t2 3836,3847 852
t3 148 890
t4 584 614
t5 623 684
t6 1281 470
t7 3367 na"""
|
from jcvi . utils . cbook import AutoVivification
allowed_checksum = [ "MD5" , "GCG" ]
p = OptionParser ( identical . __doc__ )
p . add_option ( "--ignore_case" , default = False , action = "store_true" , help = "ignore case when comparing sequences [default: %default]" )
p . add_option ( "--ignore_N" , default = False , action = "store_true" , help = "ignore N and X's when comparing sequences [default: %default]" )
p . add_option ( "--ignore_stop" , default = False , action = "store_true" , help = "ignore stop codon when comparing sequences [default: %default]" )
p . add_option ( "--output_uniq" , default = False , action = "store_true" , help = "output uniq sequences in FASTA format" + " [default: %default]" )
p . add_option ( "--checksum" , default = "MD5" , choices = allowed_checksum , help = "specify checksum method [default: %default]" )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) == 0 :
sys . exit ( not p . print_help ( ) )
d = AutoVivification ( )
files = [ ]
for fastafile in args :
f = Fasta ( fastafile )
pf = fastafile . rsplit ( "." , 1 ) [ 0 ]
files . append ( pf )
logging . debug ( "Hashing individual elements of {0}" . format ( fastafile ) )
for name , rec in f . iteritems_ordered ( ) :
seq = re . sub ( ' ' , '' , str ( rec . seq ) )
hashed = hash_fasta ( seq , ignore_case = opts . ignore_case , ignore_N = opts . ignore_N , ignore_stop = opts . ignore_stop , checksum = opts . checksum )
if not d [ hashed ] :
d [ hashed ] [ 'seq' ] = seq
d [ hashed ] [ 'count' ] = 0
if not d [ hashed ] [ 'names' ] [ pf ] :
d [ hashed ] [ 'names' ] [ pf ] = set ( )
d [ hashed ] [ 'names' ] [ pf ] . add ( name )
fw = must_open ( opts . outfile , "w" )
if opts . output_uniq :
uniqfile = "_" . join ( files ) + ".uniq.fasta"
uniqfw = must_open ( uniqfile , "w" )
header = "\t" . join ( str ( x ) for x in ( args ) )
print ( "\t" . join ( str ( x ) for x in ( "" , header ) ) , file = fw )
for idx , hashed in enumerate ( d . keys ( ) ) :
line = [ ]
line . append ( "t{0}" . format ( idx ) )
for fastafile in files :
if fastafile in d [ hashed ] [ 'names' ] . keys ( ) :
line . append ( "," . join ( d [ hashed ] [ 'names' ] [ fastafile ] ) )
if opts . output_uniq :
d [ hashed ] [ 'count' ] += len ( d [ hashed ] [ 'names' ] [ fastafile ] )
else :
line . append ( "na" )
print ( "\t" . join ( line ) , file = fw )
if opts . output_uniq :
seqid = "\t" . join ( str ( x ) for x in ( "t{0}" . format ( idx ) , d [ hashed ] [ 'count' ] ) )
rec = SeqRecord ( Seq ( d [ hashed ] [ 'seq' ] ) , id = seqid , description = "" )
SeqIO . write ( [ rec ] , uniqfw , "fasta" )
fw . close ( )
if opts . output_uniq :
logging . debug ( "Uniq sequences written to `{0}`" . format ( uniqfile ) )
uniqfw . close ( )
|
def from_array ( array ) :
"""Deserialize a new ForceReply from a given dictionary .
: return : new ForceReply instance .
: rtype : ForceReply"""
|
if array is None or not array :
return None
# end if
assert_type_or_raise ( array , dict , parameter_name = "array" )
data = { }
data [ 'force_reply' ] = bool ( array . get ( 'force_reply' ) )
data [ 'selective' ] = bool ( array . get ( 'selective' ) ) if array . get ( 'selective' ) is not None else None
instance = ForceReply ( ** data )
instance . _raw = array
return instance
|
def __send_api_file ( self , file_name ) :
"""Send apidoc files from the apidoc folder to the browser .
This method replaces all absolute urls in the file by
the current url .
: param file _ name : the apidoc file ."""
|
file_name = join ( self . app . static_folder , file_name )
with codecs . open ( file_name , 'r' , 'utf-8' ) as file :
data = file . read ( )
# replaces the hard coded url by the current url .
api_project = self . __read_api_project ( )
old_url = api_project . get ( 'url' )
# replaces the project ' s url only if it is present in the file .
if old_url :
new_url = request . url_root . strip ( '/' )
data = data . replace ( old_url , new_url )
# creates a flask response to send
# the file to the browser
headers = Headers ( )
headers [ 'Content-Length' ] = getsize ( file_name )
response = self . app . response_class ( data , mimetype = mimetypes . guess_type ( file_name ) [ 0 ] , headers = headers , direct_passthrough = True )
response . last_modified = int ( getmtime ( file_name ) )
return response
|
def setcontext ( context , _local = local ) :
"""Set the current context to that given .
Attributes provided by ` ` context ` ` override those in the current
context . If ` ` context ` ` doesn ' t specify a particular attribute ,
the attribute from the current context shows through ."""
|
oldcontext = getcontext ( )
_local . __bigfloat_context__ = oldcontext + context
|
def find_version ( ) :
"""Only define version in one place"""
|
version_file = read_file ( '__init__.py' )
version_match = re . search ( r'^__version__ = ["\']([^"\']*)["\']' , version_file , re . M )
if version_match :
return version_match . group ( 1 )
raise RuntimeError ( 'Unable to find version string.' )
|
def plot_histogram ( self , title_prefix = "" , title_override = "" , figsize = ( 8 , 6 ) ) :
"""Plots a histogram of the results after the Monte Carlo simulation is
run . NOTE - This method must be called AFTER " roll _ mc " .
: param title _ prefix : If desired , prefix the title ( such as " Alg 1 " )
: param title _ override : Override the title string entirely
: param figsize : The size of the histogram plot
: return : a seaborn figure of the histogram"""
|
# Check that roll _ mc has been called
if not self . arr_res :
raise ValueError ( "Call roll_mc before plotting the histogram." )
# Find a title using either the override or _ construct _ title method
if title_override :
title = title_override
else :
title = title_prefix + PBE . _construct_title ( self . num_dice , self . dice_type , self . add_val , self . num_attribute , self . keep_attribute , self . keep_dice , self . reroll , self . num_arrays )
# Construct the histogram
f = self . _plot_hist ( self . arr_res , self . pbe_res , title , figsize )
return f
|
def get_tcntobj ( go2obj , ** kws ) :
"""Return a TermCounts object if the user provides an annotation file , otherwise None ."""
|
# kws : gaf gene2go
annots = read_annotations ( ** kws )
if annots :
return TermCounts ( go2obj , annots )
|
def getElementById ( self , node , id , default = join ) :
"""Return the first child of node matching an id reference ."""
|
attrget = self . getAttr
ELEMENT_NODE = node . ELEMENT_NODE
for child in node . childNodes :
if child . nodeType == ELEMENT_NODE :
if attrget ( child , 'id' ) == id :
return child
if default is not join :
return default
raise KeyError , name
|
def _set_evpn_neighbor_ipv6 ( self , v , load = False ) :
"""Setter method for evpn _ neighbor _ ipv6 , mapped from YANG variable / rbridge _ id / router / router _ bgp / address _ family / l2vpn / evpn / neighbor / evpn _ neighbor _ ipv6 ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ evpn _ neighbor _ ipv6 is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ evpn _ neighbor _ ipv6 ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "evpn_neighbor_ipv6_address" , evpn_neighbor_ipv6 . evpn_neighbor_ipv6 , yang_name = "evpn-neighbor-ipv6" , rest_name = "evpn-neighbor-ipv6" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'evpn-neighbor-ipv6-address' , extensions = { u'tailf-common' : { u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-incomplete-no' : None , u'cli-drop-node-name' : None , u'cli-suppress-key-abbreviation' : None , u'cli-incomplete-command' : None , u'callpoint' : u'AfEvpnNeighborIpv6Addr' } } ) , is_container = 'list' , yang_name = "evpn-neighbor-ipv6" , rest_name = "evpn-neighbor-ipv6" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-no-key-completion' : None , u'cli-suppress-mode' : None , u'cli-incomplete-no' : None , u'cli-drop-node-name' : None , u'cli-suppress-key-abbreviation' : None , u'cli-incomplete-command' : None , u'callpoint' : u'AfEvpnNeighborIpv6Addr' } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """evpn_neighbor_ipv6 must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("evpn_neighbor_ipv6_address",evpn_neighbor_ipv6.evpn_neighbor_ipv6, yang_name="evpn-neighbor-ipv6", rest_name="evpn-neighbor-ipv6", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='evpn-neighbor-ipv6-address', extensions={u'tailf-common': {u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'AfEvpnNeighborIpv6Addr'}}), is_container='list', yang_name="evpn-neighbor-ipv6", rest_name="evpn-neighbor-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'AfEvpnNeighborIpv6Addr'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""" , } )
self . __evpn_neighbor_ipv6 = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def edit ( self , text ) :
"""Replace the body of the object with ` text ` .
: returns : The updated object ."""
|
url = self . reddit_session . config [ 'edit' ]
data = { 'thing_id' : self . fullname , 'text' : text }
response = self . reddit_session . request_json ( url , data = data )
self . reddit_session . evict ( self . reddit_session . config [ 'user' ] )
return response [ 'data' ] [ 'things' ] [ 0 ]
|
def post ( self , path , body ) :
"""POST request ."""
|
return self . _make_request ( 'post' , self . _format_url ( API_ROOT + path ) , { 'json' : body } )
|
def toggle_input ( self ) :
"""Change behaviour of radio button based on input ."""
|
current_index = self . input . currentIndex ( )
# If current input is not a radio button enabler , disable radio button .
if self . input . itemData ( current_index , Qt . UserRole ) != ( self . radio_button_enabler ) :
self . disable_radio_button ( )
# Otherwise , enable radio button .
else :
self . enable_radio_button ( )
|
def main ( ) :
"""% prog database . fa query . fa [ options ]
Wrapper for NCBI BLAST + ."""
|
p = OptionParser ( main . __doc__ )
p . add_option ( "--format" , default = " \'6 qseqid sseqid pident length " "mismatch gapopen qstart qend sstart send evalue bitscore\' " , help = "0-11, learn more with \"blastp -help\". [default: %default]" )
p . add_option ( "--path" , dest = "blast_path" , default = None , help = "specify BLAST+ path including the program name" )
p . add_option ( "--prog" , dest = "blast_program" , default = "blastp" , help = "specify BLAST+ program to use. See complete list here: " "http://www.ncbi.nlm.nih.gov/books/NBK52640/#chapter1.Installation" " [default: %default]" )
p . set_align ( evalue = .01 )
p . add_option ( "--best" , default = 1 , type = "int" , help = "Only look for best N hits [default: %default]" )
p . set_cpus ( )
p . add_option ( "--nprocs" , default = 1 , type = "int" , help = "number of BLAST processes to run in parallel. " + "split query.fa into `nprocs` chunks, " + "each chunk uses -num_threads=`cpus`" )
p . set_params ( )
p . set_outfile ( )
opts , args = p . parse_args ( )
if len ( args ) != 2 or opts . blast_program is None :
sys . exit ( not p . print_help ( ) )
bfasta_fn , afasta_fn = args
for fn in ( afasta_fn , bfasta_fn ) :
assert op . exists ( fn )
afasta_fn = op . abspath ( afasta_fn )
bfasta_fn = op . abspath ( bfasta_fn )
out_fh = must_open ( opts . outfile , "w" )
extra = opts . extra
blast_path = opts . blast_path
blast_program = opts . blast_program
blast_bin = blast_path or blast_program
if op . basename ( blast_bin ) != blast_program :
blast_bin = op . join ( blast_bin , blast_program )
nprocs , cpus = opts . nprocs , opts . cpus
if nprocs > 1 :
logging . debug ( "Dispatch job to %d processes" % nprocs )
outdir = "outdir"
fs = split ( [ afasta_fn , outdir , str ( nprocs ) ] )
queries = fs . names
else :
queries = [ afasta_fn ]
dbtype = "prot" if op . basename ( blast_bin ) in ( "blastp" , "blastx" ) else "nucl"
db = bfasta_fn
if dbtype == "prot" :
nin = db + ".pin"
else :
nin = db + ".nin"
nin00 = db + ".00.nin"
nin = nin00 if op . exists ( nin00 ) else ( db + ".nin" )
run_formatdb ( infile = db , outfile = nin , dbtype = dbtype )
lock = Lock ( )
blastplus_template = "{0} -db {1} -outfmt {2}"
blast_cmd = blastplus_template . format ( blast_bin , bfasta_fn , opts . format )
blast_cmd += " -evalue {0} -max_target_seqs {1}" . format ( opts . evalue , opts . best )
blast_cmd += " -num_threads {0}" . format ( cpus )
if extra :
blast_cmd += " " + extra . strip ( )
args = [ ( out_fh , blast_cmd , query , lock ) for query in queries ]
g = Jobs ( target = blastplus , args = args )
g . run ( )
|
def skip ( stackframe = 1 ) :
"""Must be called from within ` _ _ enter _ _ ( ) ` . Performs some magic to have a
# ContextSkipped exception be raised the moment the with context is entered .
The # ContextSkipped must then be handled in ` _ _ exit _ _ ( ) ` to suppress the
propagation of the exception .
> Important : This function does not raise an exception by itself , thus
> the ` _ _ enter _ _ ( ) ` method will continue to execute after using this function ."""
|
def trace ( frame , event , args ) :
raise ContextSkipped
sys . settrace ( lambda * args , ** kwargs : None )
frame = sys . _getframe ( stackframe + 1 )
frame . f_trace = trace
|
def parse_duration ( duration ) :
"""Attepmts to parse an ISO8601 formatted ` ` duration ` ` .
Returns a ` ` datetime . timedelta ` ` object ."""
|
duration = str ( duration ) . upper ( ) . strip ( )
elements = ELEMENTS . copy ( )
for pattern in ( SIMPLE_DURATION , COMBINED_DURATION ) :
if pattern . match ( duration ) :
found = pattern . match ( duration ) . groupdict ( )
del found [ 'time' ]
elements . update ( dict ( ( k , int ( v or 0 ) ) for k , v in found . items ( ) ) )
return datetime . timedelta ( days = ( elements [ 'days' ] + _months_to_days ( elements [ 'months' ] ) + _years_to_days ( elements [ 'years' ] ) ) , hours = elements [ 'hours' ] , minutes = elements [ 'minutes' ] , seconds = elements [ 'seconds' ] )
return ParseError ( )
|
def create_object ( object_type , data , ** api_opts ) :
'''Create raw infoblox object . This is a low level api call .
CLI Example :
. . code - block : : bash
salt - call infoblox . update _ object object _ type = record : host data = { }'''
|
if '__opts__' in globals ( ) and __opts__ [ 'test' ] :
return { 'Test' : 'Would attempt to create object: {0}' . format ( object_type ) }
infoblox = _get_infoblox ( ** api_opts )
return infoblox . create_object ( object_type , data )
|
def hash_file ( path , hashobj , conn = None ) :
'''Get the hexdigest hash value of a file'''
|
if os . path . isdir ( path ) :
return ''
with salt . utils . files . fopen ( path , 'r' ) as f :
hashobj . update ( salt . utils . stringutils . to_bytes ( f . read ( ) ) )
return hashobj . hexdigest ( )
|
def find_elements_by_css_selector ( self , css_selector ) :
"""Finds elements by css selector .
: Args :
- css _ selector - CSS selector string , ex : ' a . nav # home '
: Returns :
- list of WebElement - a list with elements if any was found . An
empty list if not
: Usage :
elements = driver . find _ elements _ by _ css _ selector ( ' . foo ' )"""
|
return self . find_elements ( by = By . CSS_SELECTOR , value = css_selector )
|
def _resolve_model ( obj ) :
"""Resolve supplied ` obj ` to a Django model class .
` obj ` must be a Django model class itself , or a string
representation of one . Useful in situations like GH # 1225 where
Django may not have resolved a string - based reference to a model in
another model ' s foreign key definition .
String representations should have the format :
' appname . ModelName '"""
|
if isinstance ( obj , six . string_types ) and len ( obj . split ( '.' ) ) == 2 :
app_name , model_name = obj . split ( '.' )
resolved_model = apps . get_model ( app_name , model_name )
if resolved_model is None :
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured ( msg . format ( app_name , model_name ) )
return resolved_model
elif inspect . isclass ( obj ) and issubclass ( obj , models . Model ) :
return obj
raise ValueError ( "{0} is not a Django model" . format ( obj ) )
|
def check_author ( author , ** kwargs ) :
"""Check the presence of the author in the AUTHORS / THANKS files .
Rules :
- the author full name and email must appear in AUTHORS file
: param authors : name of AUTHORS files
: type authors : ` list `
: param path : path to the repository home
: type path : str
: return : errors
: rtype : ` list `"""
|
errors = [ ]
authors = kwargs . get ( "authors" )
if not authors :
errors . append ( '1:A100: ' + _author_codes [ 'A100' ] )
return errors
exclude_author_names = kwargs . get ( "exclude_author_names" )
if exclude_author_names and author in exclude_author_names :
return [ ]
path = kwargs . get ( "path" )
if not path :
path = os . getcwd ( )
for afile in authors :
if not os . path . exists ( path + os . sep + afile ) :
errors . append ( '1:A101: ' + _author_codes [ 'A101' ] . format ( afile ) )
if errors :
return errors
status = subprocess . Popen ( [ 'grep' , '-q' , author ] + [ path + os . sep + afile for afile in authors ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = path ) . wait ( )
if status :
errors . append ( '1:A102: ' + _author_codes [ 'A102' ] . format ( author ) )
return errors
|
def load_config_yaml ( self , flags , config_dict ) :
"""Load config dict and yaml dict and then override both with flags dict ."""
|
if config_dict is None :
print ( 'Config File not specified. Using only input flags.' )
return flags
try :
config_yaml_dict = self . cfg_from_file ( flags [ 'YAML_FILE' ] , config_dict )
except KeyError :
print ( 'Yaml File not specified. Using only input flags and config file.' )
return config_dict
print ( 'Using input flags, config file, and yaml file.' )
config_yaml_flags_dict = self . _merge_a_into_b_simple ( flags , config_yaml_dict )
return config_yaml_flags_dict
|
def cube2map ( data_cube , layout ) :
r"""Cube to Map
This method transforms the input data from a 3D cube to a 2D map with a
specified layout
Parameters
data _ cube : np . ndarray
Input data cube , 3D array of 2D images
Layout : tuple
2D layout of 2D images
Returns
np . ndarray 2D map
Raises
ValueError
For invalid data dimensions
ValueError
For invalid layout
Examples
> > > from modopt . base . transform import cube2map
> > > a = np . arange ( 16 ) . reshape ( ( 4 , 2 , 2 ) )
> > > cube2map ( a , ( 2 , 2 ) )
array ( [ [ 0 , 1 , 4 , 5 ] ,
[ 2 , 3 , 6 , 7 ] ,
[ 8 , 9 , 12 , 13 ] ,
[10 , 11 , 14 , 15 ] ] )"""
|
if data_cube . ndim != 3 :
raise ValueError ( 'The input data must have 3 dimensions.' )
if data_cube . shape [ 0 ] != np . prod ( layout ) :
raise ValueError ( 'The desired layout must match the number of input ' 'data layers.' )
return np . vstack ( [ np . hstack ( data_cube [ slice ( layout [ 1 ] * i , layout [ 1 ] * ( i + 1 ) ) ] ) for i in range ( layout [ 0 ] ) ] )
|
def merge_multipartobject ( upload_id , version_id = None ) :
"""Merge multipart object .
: param upload _ id : The : class : ` invenio _ files _ rest . models . MultipartObject `
upload ID .
: param version _ id : Optionally you can define which file version .
( Default : ` ` None ` ` )
: returns : The : class : ` invenio _ files _ rest . models . ObjectVersion ` version
ID ."""
|
mp = MultipartObject . query . filter_by ( upload_id = upload_id ) . one_or_none ( )
if not mp :
raise RuntimeError ( 'Upload ID does not exists.' )
if not mp . completed :
raise RuntimeError ( 'MultipartObject is not completed.' )
try :
obj = mp . merge_parts ( version_id = version_id , progress_callback = progress_updater )
db . session . commit ( )
return str ( obj . version_id )
except Exception :
db . session . rollback ( )
raise
|
def fully_expanded_path ( self ) :
"""Returns the absolutely absolute path . Calls os . (
normpath , normcase , expandvars and expanduser ) ."""
|
return os . path . abspath ( os . path . normpath ( os . path . normcase ( os . path . expandvars ( os . path . expanduser ( self . path ) ) ) ) )
|
def get_ldap_filter ( ldap_filter ) : # type : ( Any ) - > Optional [ Union [ LDAPFilter , LDAPCriteria ] ]
"""Retrieves the LDAP filter object corresponding to the given filter .
Parses it the argument if it is an LDAPFilter instance
: param ldap _ filter : An LDAP filter ( LDAPFilter or string )
: return : The corresponding filter , can be None
: raise ValueError : Invalid filter string found
: raise TypeError : Unknown filter type"""
|
if ldap_filter is None :
return None
if isinstance ( ldap_filter , ( LDAPFilter , LDAPCriteria ) ) : # No conversion needed
return ldap_filter
elif is_string ( ldap_filter ) : # Parse the filter
return _parse_ldap ( ldap_filter )
# Unknown type
raise TypeError ( "Unhandled filter type {0}" . format ( type ( ldap_filter ) . __name__ ) )
|
def fetchone ( self ) :
"""Fetch the next row"""
|
self . _check_executed ( )
if self . _rows is None or self . rownumber >= len ( self . _rows ) :
return None
result = self . _rows [ self . rownumber ]
self . rownumber += 1
return result
|
def _permuted_dicts_of_specs ( specs ) :
"""Create { name : value } dict , one each for every permutation .
Each permutation becomes a dictionary , with the keys being the attr names
and the values being the corresponding value for that permutation . These
dicts can then be directly passed to the Calc constructor ."""
|
permuter = itertools . product ( * specs . values ( ) )
return [ dict ( zip ( specs . keys ( ) , perm ) ) for perm in permuter ]
|
def get_config ( k : str ) -> Any :
"""Returns a configuration variable ' s value or None if it is unset ."""
|
# If the config has been set explicitly , use it .
if k in list ( CONFIG . keys ( ) ) :
return CONFIG [ k ]
# If there is a specific PULUMI _ CONFIG _ < k > environment variable , use it .
env_key = get_config_env_key ( k )
if env_key in os . environ :
return os . environ [ env_key ]
# If the config hasn ' t been set , but there is a process - wide PULUMI _ CONFIG environment variable , use it .
env_dict = get_config_env ( )
if env_dict is not None and k in list ( env_dict . keys ( ) ) :
return env_dict [ k ]
return None
|
def convert_message_to_string ( self , message ) :
"""Convert message from list to string for GitHub API ."""
|
final_message = ''
for submessage in message :
final_message += '* {submessage}\n' . format ( submessage = submessage )
return final_message
|
def enable_result_transforms ( func ) :
"""Decorator that tries to use the object provided using a kwarg called
' electrode _ transformator ' to transform the return values of an import
function . It is intended to be used to transform electrode numbers and
locations , i . e . for use in roll - along - measurement schemes .
The transformator object must have a function . transform , which takes three
parameters : data , electrode , topography and returns three correspondingly
transformed objects ."""
|
@ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) :
func_transformator = kwargs . pop ( 'electrode_transformator' , None )
data , electrodes , topography = func ( * args , ** kwargs )
if func_transformator is not None :
data_transformed , electrodes_transformed , topography_transformed = func_transformator . transform ( data , electrodes , topography )
return data_transformed , electrodes_transformed , topography_transformed
else :
return data , electrodes , topography
return wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.