signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def register_iq_request_handler ( self , type_ , payload_cls , cb , * , with_send_reply = False ) :
"""Register a coroutine function or a function returning an awaitable to
run when an IQ request is received .
: param type _ : IQ type to react to ( must be a request type ) .
: type type _ : : class : ` ~ aioxmpp . IQType `
: param payload _ cls : Payload class to react to ( subclass of
: class : ` ~ xso . XSO ` )
: type payload _ cls : : class : ` ~ . XMLStreamClass `
: param cb : Function or coroutine function to invoke
: param with _ send _ reply : Whether to pass a function to send a reply
to ` cb ` as second argument .
: type with _ send _ reply : : class : ` bool `
: raises ValueError : if there is already a coroutine registered for this
target
: raises ValueError : if ` type _ ` is not a request IQ type
: raises ValueError : if ` type _ ` is not a valid
: class : ` ~ . IQType ` ( and cannot be cast to a
: class : ` ~ . IQType ` )
The callback ` cb ` will be called whenever an IQ stanza with the given
` type _ ` and payload being an instance of the ` payload _ cls ` is received .
The callback must either be a coroutine function or otherwise return an
awaitable . The awaitable must evaluate to a valid value for the
: attr : ` . IQ . payload ` attribute . That value will be set as the payload
attribute value of an IQ response ( with type : attr : ` ~ . IQType . RESULT ` )
which is generated and sent by the stream .
If the awaitable or the function raises an exception , it will be
converted to a : class : ` ~ . stanza . Error ` object . That error object is
then used as payload for an IQ response ( with type
: attr : ` ~ . IQType . ERROR ` ) which is generated and sent by the stream .
If the exception is a subclass of : class : ` aioxmpp . errors . XMPPError ` , it
is converted to an : class : ` ~ . stanza . Error ` instance directly .
Otherwise , it is wrapped in a : class : ` aioxmpp . XMPPCancelError `
with ` ` undefined - condition ` ` .
For this to work , ` payload _ cls ` * must * be registered using
: meth : ` ~ . IQ . as _ payload _ class ` . Otherwise , the payload will
not be recognised by the stream parser and the IQ is automatically
responded to with a ` ` feature - not - implemented ` ` error .
. . warning : :
When using a coroutine function for ` cb ` , there is no guarantee
that concurrent IQ handlers and other coroutines will execute in
any defined order . This implies that the strong ordering guarantees
normally provided by XMPP XML Streams are lost when using coroutine
functions for ` cb ` . For this reason , the use of non - coroutine
functions is allowed .
. . note : :
Using a non - coroutine function for ` cb ` will generally lead to
less readable code . For the sake of readability , it is recommended
to prefer coroutine functions when strong ordering guarantees are
not needed .
. . versionadded : : 0.11
When the argument ` with _ send _ reply ` is true ` cb ` will be
called with two arguments : the IQ stanza to handle and a
unary function ` send _ reply ( result = None ) ` that sends a
response to the IQ request and prevents that an automatic
response is sent . If ` result ` is an instance of
: class : ` ~ aioxmpp . XMPPError ` an error result is generated .
This is useful when the handler function needs to execute
actions which happen after the IQ result has been sent ,
for example , sending other stanzas .
. . versionchanged : : 0.10
Accepts an awaitable as last argument in addition to coroutine
functions .
Renamed from : meth : ` register _ iq _ request _ coro ` .
. . versionadded : : 0.6
If the stream is : meth : ` stop ` \\ - ped ( only if SM is not enabled ) or
: meth : ` close ` \\ ed , running IQ response coroutines are
: meth : ` asyncio . Task . cancel ` \\ - led .
To protect against that , fork from your coroutine using
: func : ` asyncio . ensure _ future ` .
. . versionchanged : : 0.7
The ` type _ ` argument is now supposed to be a : class : ` ~ . IQType `
member .
. . deprecated : : 0.7
Passing a : class : ` str ` as ` type _ ` argument is deprecated and will
raise a : class : ` TypeError ` as of the 1.0 release . See the Changelog
for : ref : ` api - changelog - 0.7 ` for further details on how to upgrade
your code efficiently ."""
|
type_ = self . _coerce_enum ( type_ , structs . IQType )
if not type_ . is_request :
raise ValueError ( "{!r} is not a request IQType" . format ( type_ ) )
key = type_ , payload_cls
if key in self . _iq_request_map :
raise ValueError ( "only one listener is allowed per tag" )
self . _iq_request_map [ key ] = cb , with_send_reply
self . _logger . debug ( "iq request coroutine registered: type=%r, payload=%r" , type_ , payload_cls )
|
def write_file ( infile : str , outfile : str , progress_callback : Callable [ [ float ] , None ] , chunk_size : int = 1024 , file_size : int = None ) :
"""Write a file to another file with progress callbacks .
: param infile : The input filepath
: param outfile : The output filepath
: param progress _ callback : The callback to call for progress
: param chunk _ size : The size of file chunks to copy in between progress
notifications
: param file _ size : The total size of the update file ( for generating
progress percentage ) . If ` ` None ` ` , generated with
` ` seek ` ` / ` ` tell ` ` ."""
|
total_written = 0
with open ( infile , 'rb' ) as img , open ( outfile , 'wb' ) as part :
if None is file_size :
file_size = img . seek ( 0 , 2 )
img . seek ( 0 )
LOG . info ( f'write_file: file size calculated as {file_size}B' )
LOG . info ( f'write_file: writing {infile} ({file_size}B)' f' to {outfile} in {chunk_size}B chunks' )
while True :
chunk = img . read ( chunk_size )
part . write ( chunk )
total_written += len ( chunk )
progress_callback ( total_written / file_size )
if len ( chunk ) != chunk_size :
break
|
def set_year ( self , year ) :
"""Set an user ' s year . This is required on magma just before the login .
It ' s called by default by ` ` login ` ` ."""
|
self . year = YEARS . get ( year , year )
data = { 'idCursus' : self . year }
soup = self . post_soup ( '/~etudiant/login.php' , data = data )
return bool ( soup . select ( 'ul.rMenu-hor' ) )
|
def findTheLost ( config_file , configspec_file , skipHidden = True ) :
"""Find any lost / missing parameters in this cfg file , compared to what
the . cfgspc says should be there . This method is recommended by the
ConfigObj docs . Return a stringified list of item errors ."""
|
# do some sanity checking , but don ' t ( yet ) make this a serious error
if not os . path . exists ( config_file ) :
print ( "ERROR: Config file not found: " + config_file )
return [ ]
if not os . path . exists ( configspec_file ) :
print ( "ERROR: Configspec file not found: " + configspec_file )
return [ ]
tmpObj = configobj . ConfigObj ( config_file , configspec = configspec_file )
simval = configobj . SimpleVal ( )
test = tmpObj . validate ( simval )
if test == True :
return [ ]
# If we get here , there is a dict returned of { key1 : bool , key2 : bool }
# which matches the shape of the config obj . We need to walk it to
# find the Falses , since they are the missing pars .
missing = [ ]
flattened = configobj . flatten_errors ( tmpObj , test )
# But , before we move on , skip / eliminate any ' hidden ' items from our list ,
# since hidden items are really supposed to be missing from the . cfg file .
if len ( flattened ) > 0 and skipHidden :
keepers = [ ]
for tup in flattened :
keep = True
# hidden section
if len ( tup [ 0 ] ) > 0 and isHiddenName ( tup [ 0 ] [ - 1 ] ) :
keep = False
# hidden par ( in a section , or at the top level )
elif tup [ 1 ] is not None and isHiddenName ( tup [ 1 ] ) :
keep = False
if keep :
keepers . append ( tup )
flattened = keepers
flatStr = flattened2str ( flattened , missing = True )
return flatStr
|
def cql_encode_all_types ( self , val , as_text_type = False ) :
"""Converts any type into a CQL string , defaulting to ` ` cql _ encode _ object ` `
if : attr : ` ~ Encoder . mapping ` does not contain an entry for the type ."""
|
encoded = self . mapping . get ( type ( val ) , self . cql_encode_object ) ( val )
if as_text_type and not isinstance ( encoded , six . text_type ) :
return encoded . decode ( 'utf-8' )
return encoded
|
def save ( self ) :
"""Save current draft state ."""
|
response = self . session . request ( "save:Message" , [ self . data ] )
self . data = response
self . message_id = self . data [ "id" ]
return self
|
def raise_failure_exception ( self , expect_or_allow = 'Allowed' ) :
"""Raises a ` ` MockExpectationError ` ` with a useful message .
: raise : ` ` MockExpectationError ` `"""
|
raise MockExpectationError ( "{} '{}' to be called {}on {!r} with {}, but was not. ({}:{})" . format ( expect_or_allow , self . _method_name , self . _call_counter . error_string ( ) , self . _target . obj , self . _expected_argument_string ( ) , self . _caller . filename , self . _caller . lineno , ) )
|
def get_rank ( self , entity , criteria , condition = True ) :
"""Get the rank of a person within an entity according to a criteria .
The person with rank 0 has the minimum value of criteria .
If condition is specified , then the persons who don ' t respect it are not taken into account and their rank is - 1.
Example :
> > > age = person ( ' age ' , period ) # e . g [ 32 , 34 , 2 , 8 , 1]
> > > person . get _ rank ( household , age )
> > > [ 3 , 4 , 0 , 2 , 1]
> > > is _ child = person . has _ role ( Household . CHILD ) # [ False , False , True , True , True ]
> > > person . get _ rank ( household , - age , condition = is _ child ) # Sort in reverse order so that the eldest child gets the rank 0.
> > > [ - 1 , - 1 , 1 , 0 , 2]"""
|
# If entity is for instance ' person . household ' , we get the reference entity ' household ' behind the projector
entity = entity if not isinstance ( entity , Projector ) else entity . reference_entity
positions = entity . members_position
biggest_entity_size = np . max ( positions ) + 1
filtered_criteria = np . where ( condition , criteria , np . inf )
ids = entity . members_entity_id
# Matrix : the value in line i and column j is the value of criteria for the jth person of the ith entity
matrix = np . asarray ( [ entity . value_nth_person ( k , filtered_criteria , default = np . inf ) for k in range ( biggest_entity_size ) ] ) . transpose ( )
# We double - argsort all lines of the matrix .
# Double - argsorting gets the rank of each value once sorted
# For instance , if x = [ 3,1,6,4,0 ] , y = np . argsort ( x ) is [ 4 , 1 , 0 , 3 , 2 ] ( because the value with index 4 is the smallest one , the value with index 1 the second smallest , etc . ) and z = np . argsort ( y ) is [ 2 , 1 , 4 , 3 , 0 ] , the rank of each value .
sorted_matrix = np . argsort ( np . argsort ( matrix ) )
# Build the result vector by taking for each person the value in the right line ( corresponding to its household id ) and the right column ( corresponding to its position )
result = sorted_matrix [ ids , positions ]
# Return - 1 for the persons who don ' t respect the condition
return np . where ( condition , result , - 1 )
|
def format_page ( self , page , link_resolver , output ) :
"""Called by ` project . Project . format _ page ` , to leave full control
to extensions over the formatting of the pages they are
responsible of .
Args :
page : tree . Page , the page to format .
link _ resolver : links . LinkResolver , object responsible
for resolving links potentially mentioned in ` page `
output : str , path to the output directory ."""
|
debug ( 'Formatting page %s' % page . link . ref , 'formatting' )
if output :
actual_output = os . path . join ( output , 'html' )
if not os . path . exists ( actual_output ) :
os . makedirs ( actual_output )
else :
actual_output = None
page . format ( self . formatter , link_resolver , actual_output )
|
def does_not_mutate ( func ) :
"""Prevents methods from mutating the receiver"""
|
def wrapper ( self , * args , ** kwargs ) :
new = self . copy ( )
return func ( new , * args , ** kwargs )
wrapper . __name__ = func . __name__
wrapper . __doc__ = func . __doc__
return wrapper
|
def threshold ( self , data_1 , data_2 , recall_weight = 1.5 ) : # pragma : no cover
"""Returns the threshold that maximizes the expected F score ,
a weighted average of precision and recall for a sample of
data .
Arguments :
data _ 1 - - Dictionary of records from first dataset , where the
keys are record _ ids and the values are dictionaries
with the keys being field names
data _ 2 - - Dictionary of records from second dataset , same form
as data _ 1
recall _ weight - - Sets the tradeoff between precision and
recall . I . e . if you care twice as much about
recall as you do precision , set recall _ weight
to 2."""
|
blocked_pairs = self . _blockData ( data_1 , data_2 )
return self . thresholdBlocks ( blocked_pairs , recall_weight )
|
def context ( name ) :
'''A decorator for theme context processors'''
|
def wrapper ( func ) :
g . theme . context_processors [ name ] = func
return func
return wrapper
|
def connect_full_direct ( self , config ) :
"""Create a fully - connected genome , including direct input - output connections ."""
|
for input_id , output_id in self . compute_full_connections ( config , True ) :
connection = self . create_connection ( config , input_id , output_id )
self . connections [ connection . key ] = connection
|
def call ( cls , iterable , * a , ** kw ) :
"""Calls every item in * iterable * with the specified arguments ."""
|
return cls ( x ( * a , ** kw ) for x in iterable )
|
def exptime ( self ) :
'''exptime : 下一個日期時間
: returns : 下一個預設時間'''
|
return self . nextday + timedelta ( hours = self . __hour - 8 , minutes = self . __minutes )
|
def save ( self , record_key , record_data , overwrite = True , secret_key = '' ) :
'''a method to create a record in the collection folder
: param record _ key : string with name to assign to record ( see NOTES below )
: param record _ data : byte data for record body
: param overwrite : [ optional ] boolean to overwrite records with same name
: param secret _ key : [ optional ] string with key to encrypt data
: return : string with name of record
NOTE : record _ key may only contain alphanumeric , / , _ , . or -
characters and may not begin with the . or / character .
NOTE : using one or more / characters splits the key into
separate segments . these segments will appear as a
sub directories inside the record collection and each
segment is used as a separate index for that record
when using the list method
eg . lab / unittests / 1473719695.2165067 . json is indexed :
[ ' lab ' , ' unittests ' , ' 1473719695.2165067 ' , ' . json ' ]'''
|
title = '%s.save' % self . __class__ . __name__
# validate inputs
input_fields = { 'record_key' : record_key , 'secret_key' : secret_key }
for key , value in input_fields . items ( ) :
if value :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# validate byte data
if not isinstance ( record_data , bytes ) :
raise ValueError ( '%s(record_data=b"...") must be byte data.' % title )
# construct and validate file path
file_root , file_name = os . path . split ( record_key )
self . fields . validate ( file_name , '.record_key_comp' )
while file_root :
file_root , path_node = os . path . split ( file_root )
self . fields . validate ( path_node , '.record_key_comp' )
# verify permissions
if not self . permissions_write :
raise Exception ( '%s requires an access_token with write permissions.' % title )
# retrieve file id
file_id , parent_id = self . _get_id ( record_key )
# check overwrite condition
if file_id :
if overwrite :
try :
self . drive . delete ( fileId = file_id ) . execute ( )
except :
raise DriveConnectionError ( title )
else :
raise Exception ( '%s(record_key="%s") already exists. To overwrite, set overwrite=True' % ( title , record_key ) )
# check size of file
import sys
record_optimal = self . fields . metadata [ 'record_optimal_bytes' ]
record_size = sys . getsizeof ( record_data )
error_prefix = '%s(record_key="%s", record_data=b"...")' % ( title , record_key )
if record_size > record_optimal :
print ( '[WARNING] %s exceeds optimal record data size of %s bytes.' % ( error_prefix , record_optimal ) )
# encrypt data
if secret_key :
from labpack . encryption import cryptolab
record_data , secret_key = cryptolab . encrypt ( record_data , secret_key )
# prepare file body
from googleapiclient . http import MediaInMemoryUpload
media_body = MediaInMemoryUpload ( body = record_data , resumable = True )
# determine path segments
path_segments = record_key . split ( os . sep )
# construct upload kwargs
create_kwargs = { 'body' : { 'name' : path_segments . pop ( ) } , 'media_body' : media_body , 'fields' : 'id' }
# walk through parent directories
parent_id = ''
if path_segments : # construct query and creation arguments
walk_folders = True
folder_kwargs = { 'body' : { 'name' : '' , 'mimeType' : 'application/vnd.google-apps.folder' } , 'fields' : 'id' }
query_kwargs = { 'spaces' : self . drive_space , 'fields' : 'files(id, parents)' }
while path_segments :
folder_name = path_segments . pop ( 0 )
folder_kwargs [ 'body' ] [ 'name' ] = folder_name
# search for folder id in existing hierarchy
if walk_folders :
walk_query = "name = '%s'" % folder_name
if parent_id :
walk_query += "and '%s' in parents" % parent_id
query_kwargs [ 'q' ] = walk_query
try :
response = self . drive . list ( ** query_kwargs ) . execute ( )
except :
raise DriveConnectionError ( title )
file_list = response . get ( 'files' , [ ] )
else :
file_list = [ ]
if file_list :
parent_id = file_list [ 0 ] . get ( 'id' )
# or create folder
# https : / / developers . google . com / drive / v3 / web / folder
else :
try :
if not parent_id :
if self . drive_space == 'appDataFolder' :
folder_kwargs [ 'body' ] [ 'parents' ] = [ self . drive_space ]
else :
del folder_kwargs [ 'body' ] [ 'parents' ]
else :
folder_kwargs [ 'body' ] [ 'parents' ] = [ parent_id ]
response = self . drive . create ( ** folder_kwargs ) . execute ( )
parent_id = response . get ( 'id' )
walk_folders = False
except :
raise DriveConnectionError ( title )
# add parent id to file creation kwargs
if parent_id :
create_kwargs [ 'body' ] [ 'parents' ] = [ parent_id ]
elif self . drive_space == 'appDataFolder' :
create_kwargs [ 'body' ] [ 'parents' ] = [ self . drive_space ]
# modify file time
import re
if re . search ( '\\.drep$' , file_name ) :
from labpack . records . time import labDT
drep_time = labDT . fromEpoch ( 1 ) . isoformat ( )
create_kwargs [ 'body' ] [ 'modifiedTime' ] = drep_time
# send create request
try :
self . drive . create ( ** create_kwargs ) . execute ( )
except :
raise DriveConnectionError ( title )
return record_key
|
def from_ewif_hex ( cls : Type [ SigningKeyType ] , ewif_hex : str , password : str ) -> SigningKeyType :
"""Return SigningKey instance from Duniter EWIF in hexadecimal format
: param ewif _ hex : EWIF string in hexadecimal format
: param password : Password of the encrypted seed"""
|
ewif_bytes = Base58Encoder . decode ( ewif_hex )
if len ( ewif_bytes ) != 39 :
raise Exception ( "Error: the size of EWIF is invalid" )
# extract data
fi = ewif_bytes [ 0 : 1 ]
checksum_from_ewif = ewif_bytes [ - 2 : ]
ewif_no_checksum = ewif_bytes [ 0 : - 2 ]
salt = ewif_bytes [ 1 : 5 ]
encryptedhalf1 = ewif_bytes [ 5 : 21 ]
encryptedhalf2 = ewif_bytes [ 21 : 37 ]
# check format flag
if fi != b"\x02" :
raise Exception ( "Error: bad format version, not EWIF" )
# checksum control
checksum = libnacl . crypto_hash_sha256 ( libnacl . crypto_hash_sha256 ( ewif_no_checksum ) ) [ 0 : 2 ]
if checksum_from_ewif != checksum :
raise Exception ( "Error: bad checksum of the EWIF" )
# SCRYPT
password_bytes = password . encode ( "utf-8" )
scrypt_seed = scrypt ( password_bytes , salt , 16384 , 8 , 8 , 64 )
derivedhalf1 = scrypt_seed [ 0 : 32 ]
derivedhalf2 = scrypt_seed [ 32 : 64 ]
# AES
aes = pyaes . AESModeOfOperationECB ( derivedhalf2 )
decryptedhalf1 = aes . decrypt ( encryptedhalf1 )
decryptedhalf2 = aes . decrypt ( encryptedhalf2 )
# XOR
seed1 = xor_bytes ( decryptedhalf1 , derivedhalf1 [ 0 : 16 ] )
seed2 = xor_bytes ( decryptedhalf2 , derivedhalf1 [ 16 : 32 ] )
seed = bytes ( seed1 + seed2 )
# Password Control
signer = SigningKey ( seed )
salt_from_seed = libnacl . crypto_hash_sha256 ( libnacl . crypto_hash_sha256 ( Base58Encoder . decode ( signer . pubkey ) ) ) [ 0 : 4 ]
if salt_from_seed != salt :
raise Exception ( "Error: bad Password of EWIF address" )
return cls ( seed )
|
def request ( self , verb , subpath , data = '' ) :
"""Generic Vingd - backend authenticated request ( currently HTTP Basic Auth
over HTTPS , but OAuth1 in the future ) .
: returns : Data ` ` dict ` ` , or raises exception ."""
|
if not self . api_key or not self . api_secret :
raise Exception ( "Vingd authentication credentials undefined." )
endpoint = urlparse ( self . api_endpoint )
if endpoint . scheme != 'https' :
raise Exception ( "Invalid Vingd endpoint URL (non-https)." )
host = endpoint . netloc . split ( ':' ) [ 0 ]
port = 443
path = urljoin ( endpoint . path + '/' , subpath )
creds = "%s:%s" % ( self . api_key , self . api_secret )
headers = { 'Authorization' : b'Basic ' + base64 . b64encode ( creds . encode ( 'ascii' ) ) , 'User-Agent' : self . USER_AGENT }
try :
conn = httplib . HTTPSConnection ( host , port )
conn . request ( verb . upper ( ) , quote ( path ) , data , headers )
r = conn . getresponse ( )
content = r . read ( ) . decode ( 'ascii' )
code = r . status
conn . close ( )
except httplib . HTTPException as e :
raise InternalError ( 'HTTP request failed! (Network error? Installation error?)' )
try :
content = json . loads ( content )
except :
raise GeneralException ( content , 'Non-JSON server response' , code )
if 200 <= code <= 299 :
try :
return content [ 'data' ]
except :
raise InvalidData ( 'Invalid server DATA response format!' )
try :
message = content [ 'message' ]
context = content [ 'context' ]
except :
raise InvalidData ( 'Invalid server ERROR response format!' )
if code == Codes . BAD_REQUEST :
raise InvalidData ( message , context )
elif code == Codes . FORBIDDEN :
raise Forbidden ( message , context )
elif code == Codes . NOT_FOUND :
raise NotFound ( message , context )
elif code == Codes . INTERNAL_SERVER_ERROR :
raise InternalError ( message , context )
elif code == Codes . CONFLICT :
raise GeneralException ( message , context )
raise GeneralException ( message , context , code )
|
def select_directory ( self ) :
"""Select directory"""
|
self . __redirect_stdio_emit ( False )
directory = getexistingdirectory ( self , _ ( "Select directory" ) , self . path )
if directory :
directory = to_unicode_from_fs ( osp . abspath ( directory ) )
self . __redirect_stdio_emit ( True )
return directory
|
def random_tournament_graph ( n , random_state = None ) :
"""Return a random tournament graph [ 1 ] _ with n nodes .
Parameters
n : scalar ( int )
Number of nodes .
random _ state : int or np . random . RandomState , optional
Random seed ( integer ) or np . random . RandomState instance to set
the initial state of the random number generator for
reproducibility . If None , a randomly initialized RandomState is
used .
Returns
DiGraph
A DiGraph representing the tournament graph .
References
. . [ 1 ] ` Tournament ( graph theory )
< https : / / en . wikipedia . org / wiki / Tournament _ ( graph _ theory ) > ` _ ,
Wikipedia ."""
|
random_state = check_random_state ( random_state )
num_edges = n * ( n - 1 ) // 2
r = random_state . random_sample ( num_edges )
row = np . empty ( num_edges , dtype = int )
col = np . empty ( num_edges , dtype = int )
_populate_random_tournament_row_col ( n , r , row , col )
data = np . ones ( num_edges , dtype = bool )
adj_matrix = sparse . coo_matrix ( ( data , ( row , col ) ) , shape = ( n , n ) )
return DiGraph ( adj_matrix )
|
def _init_env ( self ) :
'''Initialize some Salt environment .'''
|
from salt . config import minion_config
from salt . grains import core as g_core
g_core . __opts__ = minion_config ( self . DEFAULT_MINION_CONFIG_PATH )
self . grains_core = g_core
|
def unicode_left ( s , width ) :
"""Cut unicode string from left to fit a given width ."""
|
i = 0
j = 0
for ch in s :
j += __unicode_width_mapping [ east_asian_width ( ch ) ]
if width < j :
break
i += 1
return s [ : i ]
|
def recv_exactly ( self , n , timeout = 'default' ) :
"""Recieve exactly n bytes
Aliases : read _ exactly , readexactly , recvexactly"""
|
self . _print_recv_header ( '======== Receiving until exactly {0}B{timeout_text} ========' , timeout , n )
return self . _recv_predicate ( lambda s : n if len ( s ) >= n else 0 , timeout )
|
def _guess_name_of ( self , expr ) :
"""Tries to guess what variable name ' expr ' ends in .
This is a heuristic that roughly emulates what most SQL databases
name columns , based on selected variable names or applied functions ."""
|
if isinstance ( expr , ast . Var ) :
return expr . value
if isinstance ( expr , ast . Resolve ) : # We know the RHS of resolve is a Literal because that ' s what
# Parser . dot _ rhs does .
return expr . rhs . value
if isinstance ( expr , ast . Select ) and isinstance ( expr . rhs , ast . Literal ) :
name = self . _guess_name_of ( expr . lhs )
if name is not None :
return "%s_%s" % ( name , expr . rhs . value )
if isinstance ( expr , ast . Apply ) and isinstance ( expr . func , ast . Var ) :
return expr . func . value
|
def estimate ( self ) :
"""Estimates the ` DAG ` structure that fits best to the given data set ,
according to the scoring method supplied in the constructor .
Exhaustively searches through all models . Only estimates network structure , no parametrization .
Returns
model : ` DAG ` instance
A ` DAG ` with maximal score .
Examples
> > > import pandas as pd
> > > import numpy as np
> > > from pgmpy . estimators import ExhaustiveSearch
> > > # create random data sample with 3 variables , where B and C are identical :
> > > data = pd . DataFrame ( np . random . randint ( 0 , 5 , size = ( 5000 , 2 ) ) , columns = list ( ' AB ' ) )
> > > data [ ' C ' ] = data [ ' B ' ]
> > > est = ExhaustiveSearch ( data )
> > > best _ model = est . estimate ( )
> > > best _ model
< pgmpy . base . DAG . DAG object at 0x7f695c535470 >
> > > best _ model . edges ( )
[ ( ' B ' , ' C ' ) ]"""
|
best_dag = max ( self . all_dags ( ) , key = self . scoring_method . score )
best_model = DAG ( )
best_model . add_nodes_from ( sorted ( best_dag . nodes ( ) ) )
best_model . add_edges_from ( sorted ( best_dag . edges ( ) ) )
return best_model
|
def first ( series , order_by = None ) :
"""Returns the first value of a series .
Args :
series ( pandas . Series ) : column to summarize .
Kwargs :
order _ by : a pandas . Series or list of series ( can be symbolic ) to order
the input series by before summarization ."""
|
if order_by is not None :
series = order_series_by ( series , order_by )
first_s = series . iloc [ 0 ]
return first_s
|
def next_cursor_location ( self ) :
"""Move cursor to the next trace frame ."""
|
self . _verify_entrypoint_selected ( )
self . current_trace_frame_index = min ( self . current_trace_frame_index + 1 , len ( self . trace_tuples ) - 1 )
self . trace ( )
|
def addRnaQuantificationSet ( self ) :
"""Adds an rnaQuantificationSet into this repo"""
|
self . _openRepo ( )
dataset = self . _repo . getDatasetByName ( self . _args . datasetName )
if self . _args . name is None :
name = getNameFromPath ( self . _args . filePath )
else :
name = self . _args . name
rnaQuantificationSet = rna_quantification . SqliteRnaQuantificationSet ( dataset , name )
referenceSetName = self . _args . referenceSetName
if referenceSetName is None :
raise exceptions . RepoManagerException ( "A reference set name must be provided" )
referenceSet = self . _repo . getReferenceSetByName ( referenceSetName )
rnaQuantificationSet . setReferenceSet ( referenceSet )
rnaQuantificationSet . populateFromFile ( self . _args . filePath )
rnaQuantificationSet . setAttributes ( json . loads ( self . _args . attributes ) )
self . _updateRepo ( self . _repo . insertRnaQuantificationSet , rnaQuantificationSet )
|
def export_project ( self ) :
"""Processes groups and misc options specific for eclipse , and run generator"""
|
output = copy . deepcopy ( self . generated_project )
data_for_make = self . workspace . copy ( )
self . exporter . process_data_for_makefile ( data_for_make )
output [ 'path' ] , output [ 'files' ] [ 'makefile' ] = self . gen_file_jinja ( 'makefile_gcc.tmpl' , data_for_make , 'Makefile' , data_for_make [ 'output_dir' ] [ 'path' ] )
expanded_dic = self . workspace . copy ( )
expanded_dic [ 'rel_path' ] = data_for_make [ 'output_dir' ] [ 'rel_path' ]
groups = self . _get_groups ( expanded_dic )
expanded_dic [ 'groups' ] = { }
for group in groups :
expanded_dic [ 'groups' ] [ group ] = [ ]
self . _iterate ( self . workspace , expanded_dic )
# Project file
project_path , output [ 'files' ] [ 'cproj' ] = self . gen_file_jinja ( 'eclipse_makefile.cproject.tmpl' , expanded_dic , '.cproject' , data_for_make [ 'output_dir' ] [ 'path' ] )
project_path , output [ 'files' ] [ 'proj_file' ] = self . gen_file_jinja ( 'eclipse.project.tmpl' , expanded_dic , '.project' , data_for_make [ 'output_dir' ] [ 'path' ] )
return output
|
def probe_wdl ( self , board : chess . Board ) -> int :
"""Probes WDL tables for win / draw / loss - information .
Probing is thread - safe when done with different * board * objects and
if * board * objects are not modified during probing .
Returns ` ` 2 ` ` if the side to move is winning , ` ` 0 ` ` if the position is
a draw and ` ` - 2 ` ` if the side to move is losing .
Returns ` ` 1 ` ` in case of a cursed win and ` ` - 1 ` ` in case of a blessed
loss . Mate can be forced but the position can be drawn due to the
fifty - move rule .
> > > import chess
> > > import chess . syzygy
> > > with chess . syzygy . open _ tablebase ( " data / syzygy / regular " ) as tablebase :
. . . board = chess . Board ( " 8/2K5/4B3/3N4/8/8/4k3/8 b - - 0 1 " )
. . . print ( tablebase . probe _ wdl ( board ) )
: raises : : exc : ` KeyError ` ( or specifically
: exc : ` chess . syzygy . MissingTableError ` ) if the position could not
be found in the tablebase . Use
: func : ` ~ chess . syzygy . Tablebase . get _ wdl ( ) ` if you prefer to get
` ` None ` ` instead of an exception .
Note that probing corrupted table files is undefined behavior ."""
|
# Positions with castling rights are not in the tablebase .
if board . castling_rights :
raise KeyError ( "syzygy tables do not contain positions with castling rights: {}" . format ( board . fen ( ) ) )
# Validate piece count .
if chess . popcount ( board . occupied ) > 7 :
raise KeyError ( "syzygy tables support up to 6 (and experimentally 7) pieces, not {}: {}" . format ( chess . popcount ( board . occupied ) , board . fen ( ) ) )
# Probe .
v , _ = self . probe_ab ( board , - 2 , 2 )
# If en passant is not possible , we are done .
if not board . ep_square or self . variant . captures_compulsory :
return v
# Now handle en passant .
v1 = - 3
# Look at all legal en passant captures .
for move in board . generate_legal_ep ( ) :
board . push ( move )
try :
v0_plus , _ = self . probe_ab ( board , - 2 , 2 )
v0 = - v0_plus
finally :
board . pop ( )
if v0 > v1 :
v1 = v0
if v1 > - 3 :
if v1 >= v :
v = v1
elif v == 0 : # If there is not at least one legal non - en - passant move we are
# forced to play the losing en passant cature .
if all ( board . is_en_passant ( move ) for move in board . generate_legal_moves ( ) ) :
v = v1
return v
|
def get_vmpolicy_macaddr_input_last_rcvd_instance ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_vmpolicy_macaddr = ET . Element ( "get_vmpolicy_macaddr" )
config = get_vmpolicy_macaddr
input = ET . SubElement ( get_vmpolicy_macaddr , "input" )
last_rcvd_instance = ET . SubElement ( input , "last-rcvd-instance" )
last_rcvd_instance . text = kwargs . pop ( 'last_rcvd_instance' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def sed ( self , name , ** kwargs ) :
"""Generate a spectral energy distribution ( SED ) for a source . This
function will fit the normalization of the source in each
energy bin . By default the SED will be generated with the
analysis energy bins but a custom binning can be defined with
the ` ` loge _ bins ` ` parameter .
Parameters
name : str
Source name .
prefix : str
Optional string that will be prepended to all output files
( FITS and rendered images ) .
loge _ bins : ` ~ numpy . ndarray `
Sequence of energies in log10 ( E / MeV ) defining the edges of
the energy bins . If this argument is None then the
analysis energy bins will be used . The energies in this
sequence must align with the bin edges of the underyling
analysis instance .
{ options }
optimizer : dict
Dictionary that overrides the default optimizer settings .
Returns
sed : dict
Dictionary containing output of the SED analysis ."""
|
timer = Timer . create ( start = True )
name = self . roi . get_source_by_name ( name ) . name
# Create schema for method configuration
schema = ConfigSchema ( self . defaults [ 'sed' ] , optimizer = self . defaults [ 'optimizer' ] )
schema . add_option ( 'prefix' , '' )
schema . add_option ( 'outfile' , None , '' , str )
schema . add_option ( 'loge_bins' , None , '' , list )
config = utils . create_dict ( self . config [ 'sed' ] , optimizer = self . config [ 'optimizer' ] )
config = schema . create_config ( config , ** kwargs )
self . logger . info ( 'Computing SED for %s' % name )
o = self . _make_sed ( name , ** config )
self . logger . info ( 'Finished SED' )
outfile = config . get ( 'outfile' , None )
if outfile is None :
outfile = utils . format_filename ( self . workdir , 'sed' , prefix = [ config [ 'prefix' ] , name . lower ( ) . replace ( ' ' , '_' ) ] )
else :
outfile = os . path . join ( self . workdir , os . path . splitext ( outfile ) [ 0 ] )
o [ 'file' ] = None
if config [ 'write_fits' ] :
o [ 'file' ] = os . path . basename ( outfile ) + '.fits'
self . _make_sed_fits ( o , outfile + '.fits' , ** config )
if config [ 'write_npy' ] :
np . save ( outfile + '.npy' , o )
if config [ 'make_plots' ] :
self . _plotter . make_sed_plots ( o , ** config )
self . logger . info ( 'Execution time: %.2f s' , timer . elapsed_time )
return o
|
def grantham_score ( ref_aa , mut_aa ) :
"""https : / / github . com / ashutoshkpandey / Annotation / blob / master / Grantham _ score _ calculator . py"""
|
grantham = { 'S' : { 'R' : 110 , 'L' : 145 , 'P' : 74 , 'T' : 58 , 'A' : 99 , 'V' : 124 , 'G' : 56 , 'I' : 142 , 'F' : 155 , 'Y' : 144 , 'C' : 112 , 'H' : 89 , 'Q' : 68 , 'N' : 46 , 'K' : 121 , 'D' : 65 , 'E' : 80 , 'M' : 135 , 'W' : 177 } , 'R' : { 'R' : 0 , 'L' : 102 , 'P' : 103 , 'T' : 71 , 'A' : 112 , 'V' : 96 , 'G' : 125 , 'I' : 97 , 'F' : 97 , 'Y' : 77 , 'C' : 180 , 'H' : 29 , 'Q' : 43 , 'N' : 86 , 'K' : 26 , 'D' : 96 , 'E' : 54 , 'M' : 91 , 'W' : 101 , 'S' : 0 } , 'L' : { 'R' : 0 , 'L' : 0 , 'P' : 98 , 'T' : 92 , 'A' : 96 , 'V' : 32 , 'G' : 138 , 'I' : 5 , 'F' : 22 , 'Y' : 36 , 'C' : 198 , 'H' : 99 , 'Q' : 113 , 'N' : 153 , 'K' : 107 , 'D' : 172 , 'E' : 138 , 'M' : 15 , 'W' : 61 , 'S' : 0 } , 'P' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 38 , 'A' : 27 , 'V' : 68 , 'G' : 42 , 'I' : 95 , 'F' : 114 , 'Y' : 110 , 'C' : 169 , 'H' : 77 , 'Q' : 76 , 'N' : 91 , 'K' : 103 , 'D' : 108 , 'E' : 93 , 'M' : 87 , 'W' : 147 , 'S' : 0 } , 'T' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 58 , 'V' : 69 , 'G' : 59 , 'I' : 89 , 'F' : 103 , 'Y' : 92 , 'C' : 149 , 'H' : 47 , 'Q' : 42 , 'N' : 65 , 'K' : 78 , 'D' : 85 , 'E' : 65 , 'M' : 81 , 'W' : 128 , 'S' : 0 } , 'A' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 64 , 'G' : 60 , 'I' : 94 , 'F' : 113 , 'Y' : 112 , 'C' : 195 , 'H' : 86 , 'Q' : 91 , 'N' : 111 , 'K' : 106 , 'D' : 126 , 'E' : 107 , 'M' : 84 , 'W' : 148 , 'S' : 0 } , 'V' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 109 , 'I' : 29 , 'F' : 50 , 'Y' : 55 , 'C' : 192 , 'H' : 84 , 'Q' : 96 , 'N' : 133 , 'K' : 97 , 'D' : 152 , 'E' : 121 , 'M' : 21 , 'W' : 88 , 'S' : 0 } , 'G' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 135 , 'F' : 153 , 'Y' : 147 , 'C' : 159 , 'H' : 98 , 'Q' : 87 , 'N' : 80 , 'K' : 127 , 'D' : 94 , 'E' : 98 , 'M' : 127 , 'W' : 184 , 'S' : 0 } , 'I' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 21 , 'Y' : 33 , 'C' : 198 , 'H' : 94 , 'Q' : 109 , 'N' : 149 , 'K' : 102 , 'D' : 168 , 'E' : 134 , 'M' : 10 , 'W' : 61 , 'S' : 0 } , 'F' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 22 , 'C' : 205 , 'H' : 100 , 'Q' : 116 , 'N' : 158 , 'K' : 102 , 'D' : 177 , 'E' : 140 , 'M' : 28 , 'W' : 40 , 'S' : 0 } , 'Y' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 194 , 'H' : 83 , 'Q' : 99 , 'N' : 143 , 'K' : 85 , 'D' : 160 , 'E' : 122 , 'M' : 36 , 'W' : 37 , 'S' : 0 } , 'C' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 174 , 'Q' : 154 , 'N' : 139 , 'K' : 202 , 'D' : 154 , 'E' : 170 , 'M' : 196 , 'W' : 215 , 'S' : 0 } , 'H' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 24 , 'N' : 68 , 'K' : 32 , 'D' : 81 , 'E' : 40 , 'M' : 87 , 'W' : 115 , 'S' : 0 } , 'Q' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 46 , 'K' : 53 , 'D' : 61 , 'E' : 29 , 'M' : 101 , 'W' : 130 , 'S' : 0 } , 'N' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 94 , 'D' : 23 , 'E' : 42 , 'M' : 142 , 'W' : 174 , 'S' : 0 } , 'K' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 0 , 'D' : 101 , 'E' : 56 , 'M' : 95 , 'W' : 110 , 'S' : 0 } , 'D' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 0 , 'D' : 0 , 'E' : 45 , 'M' : 160 , 'W' : 181 , 'S' : 0 } , 'E' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 0 , 'D' : 0 , 'E' : 0 , 'M' : 126 , 'W' : 152 , 'S' : 0 } , 'M' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 0 , 'D' : 0 , 'E' : 0 , 'M' : 0 , 'W' : 67 , 'S' : 0 } , 'W' : { 'R' : 0 , 'L' : 0 , 'P' : 0 , 'T' : 0 , 'A' : 0 , 'V' : 0 , 'G' : 0 , 'I' : 0 , 'F' : 0 , 'Y' : 0 , 'C' : 0 , 'H' : 0 , 'Q' : 0 , 'N' : 0 , 'K' : 0 , 'D' : 0 , 'E' : 0 , 'M' : 0 , 'W' : 0 , 'S' : 0 } }
score = 0
if ref_aa not in grantham or mut_aa not in grantham :
log . error ( '{} to {}: a residue is not in the Grantham matrix' . format ( ref_aa , mut_aa ) )
return score , 'Unknown'
if ref_aa == mut_aa :
return score , 'Conservative'
else :
if int ( grantham [ ref_aa ] [ mut_aa ] ) != 0 :
score += int ( grantham [ ref_aa ] [ mut_aa ] )
else :
score += int ( grantham [ mut_aa ] [ ref_aa ] )
if score > 150 :
return score , "Radical"
elif 150 >= score > 100 :
return score , "Moderately Radical"
elif 100 >= score > 50 :
return score , "Moderately Conservative"
else :
return score , "Conservative"
|
def get_user ( self , userPk ) :
"""Returns the user specified with the user ' s Pk or UUID"""
|
r = self . _request ( 'user/' + str ( userPk ) )
if r : # Set base properties and copy data inside the user
u = User ( )
u . pk = u . id = userPk
u . __dict__ . update ( r . json ( ) )
return u
return None
|
def to_json_dict ( self , filter_fcn = None ) :
"""Create a dict with Entity properties for json encoding .
It can be overridden by subclasses for each standard serialization
doesn ' t work . By default it call _ to _ json _ dict on OneToOne fields
and build a list calling the same method on each OneToMany object ' s
fields .
Fields can be filtered accordingly to ' filter _ fcn ' . This callable
receives field ' s name as first parameter and fields itself as second
parameter . It must return True if field ' s value should be included on
dict and False otherwise . If not provided field will not be filtered .
: type filter _ fcn : callable
: return : dct"""
|
fields , values = self . get_fields ( ) , self . get_values ( )
filtered_fields = fields . items ( )
if filter_fcn is not None :
filtered_fields = ( tpl for tpl in filtered_fields if filter_fcn ( tpl [ 0 ] , tpl [ 1 ] ) )
json_dct = { }
for field_name , field in filtered_fields :
if field_name in values :
value = values [ field_name ]
if value is None :
json_dct [ field_name ] = None
# This conditions is needed because some times you get
# None on an OneToOneField what lead to an error
# on bellow condition , e . g . , calling value . to _ json _ dict ( )
# when value is None
elif isinstance ( field , OneToOneField ) :
json_dct [ field_name ] = value . to_json_dict ( )
elif isinstance ( field , OneToManyField ) :
json_dct [ field_name ] = [ entity . to_json_dict ( ) for entity in value ]
else :
json_dct [ field_name ] = to_json_serializable ( value )
return json_dct
|
def get_dpi ( ) :
"""Returns screen dpi resolution"""
|
def pxmm_2_dpi ( ( pixels , length_mm ) ) :
return pixels * 25.6 / length_mm
return map ( pxmm_2_dpi , zip ( wx . GetDisplaySize ( ) , wx . GetDisplaySizeMM ( ) ) )
|
def change_password ( self , old_password , new_password ) :
"""Update the user ' s password to a new one ."""
|
body = self . _formdata ( { "old_password" : old_password , "password" : new_password , } , [ "old_password" , "password" ] )
content = self . _fetch ( "/current_user/password" , method = "POST" , body = body )
return FastlyUser ( self , content )
|
def add ( self , * l ) :
'''add inner to outer
Args :
* l : element that is passed into Inner init'''
|
for a in flatten ( l ) :
self . _add ( [ self . Inner ( a ) ] , self . l )
|
def read_http_status_codes ( filename = 'HTTP_1.1 Status Code Definitions.html' ) :
r"""Parse the HTTP documentation HTML page in filename
Return :
code _ dict : { 200 : " OK " , . . . }
> > > fn = ' HTTP _ 1.1 Status Code Definitions . html '
> > > code _ dict = read _ http _ status _ codes ( fn )
> > > code _ dict
{ ' 100 ' : ' Continue ' ,
100 : ' Continue ' ,
'101 ' : ' Switching Protocols ' ,
101 : ' Switching Protocols ' ,
'200 ' : ' OK ' ,
200 : ' OK ' , . . .
> > > json . dump ( code _ dict , open ( os . path . join ( DATA _ PATH , fn + ' . json ' ) , ' wt ' ) , indent = 2)"""
|
lines = read_text ( filename )
level_lines = get_markdown_levels ( lines , 3 )
code_dict = { }
for level , line in level_lines :
code , name = ( re . findall ( r'\s(\d\d\d)[\W]+([-\w\s]*)' , line ) or [ [ 0 , '' ] ] ) [ 0 ]
if 1000 > int ( code ) >= 100 :
code_dict [ code ] = name
code_dict [ int ( code ) ] = name
return code_dict
|
def close ( self ) :
"""Close the plot and release its memory ."""
|
from matplotlib . pyplot import close
for ax in self . axes [ : : - 1 ] : # avoid matplotlib / matplotlib # 9970
ax . set_xscale ( 'linear' )
ax . set_yscale ( 'linear' )
# clear the axes
ax . cla ( )
# close the figure
close ( self )
|
def assign_default_storage_policy_to_datastore ( profile_manager , policy , datastore ) :
'''Assigns a storage policy as the default policy to a datastore .
profile _ manager
Reference to the profile manager .
policy
Reference to the policy to assigned .
datastore
Reference to the datastore .'''
|
placement_hub = pbm . placement . PlacementHub ( hubId = datastore . _moId , hubType = 'Datastore' )
log . trace ( 'placement_hub = %s' , placement_hub )
try :
profile_manager . AssignDefaultRequirementProfile ( policy . profileId , [ placement_hub ] )
except vim . fault . NoPermission as exc :
log . exception ( exc )
raise VMwareApiError ( 'Not enough permissions. Required privilege: ' '{0}' . format ( exc . privilegeId ) )
except vim . fault . VimFault as exc :
log . exception ( exc )
raise VMwareApiError ( exc . msg )
except vmodl . RuntimeFault as exc :
log . exception ( exc )
raise VMwareRuntimeError ( exc . msg )
|
def example_list ( a , args ) :
"""list topics and cluster metadata"""
|
if len ( args ) == 0 :
what = "all"
else :
what = args [ 0 ]
md = a . list_topics ( timeout = 10 )
print ( "Cluster {} metadata (response from broker {}):" . format ( md . cluster_id , md . orig_broker_name ) )
if what in ( "all" , "brokers" ) :
print ( " {} brokers:" . format ( len ( md . brokers ) ) )
for b in iter ( md . brokers . values ( ) ) :
if b . id == md . controller_id :
print ( " {} (controller)" . format ( b ) )
else :
print ( " {}" . format ( b ) )
if what not in ( "all" , "topics" ) :
return
print ( " {} topics:" . format ( len ( md . topics ) ) )
for t in iter ( md . topics . values ( ) ) :
if t . error is not None :
errstr = ": {}" . format ( t . error )
else :
errstr = ""
print ( " \"{}\" with {} partition(s){}" . format ( t , len ( t . partitions ) , errstr ) )
for p in iter ( t . partitions . values ( ) ) :
if p . error is not None :
errstr = ": {}" . format ( p . error )
else :
errstr = ""
print ( " partition {} leader: {}, replicas: {}, isrs: {}" . format ( p . id , p . leader , p . replicas , p . isrs , errstr ) )
|
def get_relative_breadcrumbs ( self ) :
"""get the breadcrumbs as relative to the basedir"""
|
basedir = self . basedir
crumbs = self . breadcrumbs
return [ ( relpath ( b , basedir ) , e ) for b , e in crumbs ]
|
def find_complement ( am , sites = None , bonds = None , asmask = False ) :
r"""Finds the complementary sites ( or bonds ) to a given set of inputs
Parameters
am : scipy . sparse matrix
The adjacency matrix of the network .
sites : array _ like ( optional )
The set of sites for which the complement is sought
bonds : array _ like ( optional )
The set of bonds for which the complement is sought
asmask : boolean
If set to ` ` True ` ` the result is returned as a boolean mask of the
correct length with ` ` True ` ` values indicate the complements . The
default is ` ` False ` ` which returns a list of indices instead .
Returns
An array containing indices of the sites ( or bonds ) that are not part of
the input list .
Notes
Either ` ` sites ` ` or ` ` bonds ` ` must be specified"""
|
if ( sites is not None ) and ( bonds is None ) :
inds = sp . unique ( sites )
N = am . shape [ 0 ]
elif ( bonds is not None ) and ( sites is None ) :
inds = sp . unique ( bonds )
N = int ( am . nnz / 2 )
elif ( bonds is not None ) and ( sites is not None ) :
raise Exception ( 'Only one of sites or bonds can be specified' )
else :
raise Exception ( 'Either sites or bonds must be specified' )
mask = sp . ones ( shape = N , dtype = bool )
mask [ inds ] = False
if asmask :
return mask
else :
return sp . arange ( N ) [ mask ]
|
def _init_idxs_strpat ( self , usr_hdrs ) :
"""List of indexes whose values will be strings ."""
|
strpat = self . strpat_hdrs . keys ( )
self . idxs_strpat = [ Idx for Hdr , Idx in self . hdr2idx . items ( ) if Hdr in usr_hdrs and Hdr in strpat ]
|
def hash ( self , val ) :
"""Calculate hash from value ( must be bytes ) ."""
|
if not isinstance ( val , bytes ) :
raise _TypeError ( "val" , "bytes" , val )
rv = self . compute ( val )
if not isinstance ( val , bytes ) :
raise _TypeError ( "val" , "bytes" , rv )
return rv
|
def symbol ( self , id , bp = 0 ) :
"""Adds symbol ' id ' to symbol _ table if it does not exist already ,
if it does it merely updates its binding power and returns it ' s
symbol class"""
|
try :
s = self . symbol_table [ id ]
except KeyError :
class s ( self . symbol_base ) :
pass
s . id = id
s . lbp = bp
self . symbol_table [ id ] = s
else :
s . lbp = max ( bp , s . lbp )
return s
|
def parse_union_type_definition ( lexer : Lexer ) -> UnionTypeDefinitionNode :
"""UnionTypeDefinition"""
|
start = lexer . token
description = parse_description ( lexer )
expect_keyword ( lexer , "union" )
name = parse_name ( lexer )
directives = parse_directives ( lexer , True )
types = parse_union_member_types ( lexer )
return UnionTypeDefinitionNode ( description = description , name = name , directives = directives , types = types , loc = loc ( lexer , start ) , )
|
def multiplexer ( f = None , nruns = 1 , nprocs = 1 , seeding = None , ** args ) :
"""Evaluate a function for different parameters , optionally in parallel .
Parameters
f : function
function f to evaluate , must take only kw arguments as inputs
nruns : int
number of evaluations of f for each set of arguments
nprocs : int
+ if < = 0 , set to actual number of physical processors plus nprocs
( i . e . - 1 = > number of cpus on your machine minus one )
Default is 1 , which means no multiprocessing
seeding : bool ( default : True if nruns > 1 , False otherwise )
whether we need to provide different seeds for RNGS
* * args :
keyword arguments for function f .
Note
see documentation of ` utils `"""
|
if not callable ( f ) :
raise ValueError ( 'multiplexer: function f missing, or not callable' )
if seeding is None :
seeding = ( nruns > 1 )
# extra arguments ( meant to be arguments for f )
fixedargs , listargs , dictargs = { } , { } , { }
listargs [ 'run' ] = list ( range ( nruns ) )
for k , v in args . items ( ) :
if isinstance ( v , list ) :
listargs [ k ] = v
elif isinstance ( v , dict ) :
dictargs [ k ] = v
else :
fixedargs [ k ] = v
# cartesian product
inputs , outputs = cartesian_args ( fixedargs , listargs , dictargs )
for ip in inputs :
ip . pop ( 'run' )
# run is not an argument of f , just an id for output
# distributing different seeds
if seeding :
seeds = distinct_seeds ( len ( inputs ) )
for ip , op , s in zip ( inputs , outputs , seeds ) :
ip [ 'seed' ] = s
op [ 'seed' ] = s
# the actual work happens here
return distribute_work ( f , inputs , outputs , nprocs = nprocs )
|
def remove ( self , handler_id = None ) :
"""Remove a previously added handler and stop sending logs to its sink .
Parameters
handler _ id : | int | or ` ` None ` `
The id of the sink to remove , as it was returned by the | add | method . If ` ` None ` ` , all
handlers are removed . The pre - configured handler is guaranteed to have the index ` ` 0 ` ` .
Raises
ValueError
If ` ` handler _ id ` ` is not ` ` None ` ` but there is no active handler with such id .
Examples
> > > i = logger . add ( sys . stderr , format = " { message } " )
> > > logger . info ( " Logging " )
Logging
> > > logger . remove ( i )
> > > logger . info ( " No longer logging " )"""
|
with self . _lock :
handlers = self . _handlers . copy ( )
if handler_id is None :
for handler in handlers . values ( ) :
handler . stop ( )
handlers . clear ( )
else :
try :
handler = handlers . pop ( handler_id )
except KeyError :
raise ValueError ( "There is no existing handler with id '%s'" % handler_id )
handler . stop ( )
levelnos = ( h . levelno for h in handlers . values ( ) )
self . __class__ . _min_level = min ( levelnos , default = float ( "inf" ) )
self . __class__ . _handlers = handlers
|
def week_to_datetime ( iso_year , iso_week ) :
"datetime instance for the start of the given ISO year and week"
|
gregorian = iso_to_gregorian ( iso_year , iso_week , 0 )
return datetime . datetime . combine ( gregorian , datetime . time ( 0 ) )
|
def _verify_views ( ) :
'''Verify that you have the views you need . This can be disabled by
adding couchbase . skip _ verify _ views : True in config'''
|
global VERIFIED_VIEWS
if VERIFIED_VIEWS or __opts__ . get ( 'couchbase.skip_verify_views' , False ) :
return
cb_ = _get_connection ( )
ddoc = { 'views' : { 'jids' : { 'map' : "function (doc, meta) { if (meta.id.indexOf('/') === -1 && doc.load){ emit(meta.id, null) } }" } , 'jid_returns' : { 'map' : "function (doc, meta) { if (meta.id.indexOf('/') > -1){ key_parts = meta.id.split('/'); emit(key_parts[0], key_parts[1]); } }" } } }
try :
curr_ddoc = cb_ . design_get ( DESIGN_NAME , use_devmode = False ) . value
if curr_ddoc [ 'views' ] == ddoc [ 'views' ] :
VERIFIED_VIEWS = True
return
except couchbase . exceptions . HTTPError :
pass
cb_ . design_create ( DESIGN_NAME , ddoc , use_devmode = False )
VERIFIED_VIEWS = True
|
def to_pandas ( self , wrap = False , ** kwargs ) :
"""Convert to pandas DataFrame . Execute at once .
: param wrap : if True , wrap the pandas DataFrame into a PyODPS DataFrame
: return : pandas DataFrame"""
|
try :
import pandas as pd
except ImportError :
raise DependencyNotInstalledError ( 'to_pandas requires `pandas` library' )
def wrapper ( result ) :
res = result . values
if wrap :
from . . import DataFrame
return DataFrame ( res , schema = self . schema )
return res
return self . execute ( wrapper = wrapper , ** kwargs )
|
def _chunk_write ( chunk , local_file , progress ) :
"""Write a chunk to file and update the progress bar ."""
|
local_file . write ( chunk )
if progress is not None :
progress . update ( len ( chunk ) )
|
def remove ( path , force = False ) :
'''Remove the named file or directory
Args :
path ( str ) : The path to the file or directory to remove .
force ( bool ) : Remove even if marked Read - Only . Default is False
Returns :
bool : True if successful , False if unsuccessful
CLI Example :
. . code - block : : bash
salt ' * ' file . remove C : \\ Temp'''
|
# This must be a recursive function in windows to properly deal with
# Symlinks . The shutil . rmtree function will remove the contents of
# the Symlink source in windows .
path = os . path . expanduser ( path )
if not os . path . isabs ( path ) :
raise SaltInvocationError ( 'File path must be absolute: {0}' . format ( path ) )
# Does the file / folder exists
if not os . path . exists ( path ) and not is_link ( path ) :
raise CommandExecutionError ( 'Path not found: {0}' . format ( path ) )
# Remove ReadOnly Attribute
if force : # Get current file attributes
file_attributes = win32api . GetFileAttributes ( path )
win32api . SetFileAttributes ( path , win32con . FILE_ATTRIBUTE_NORMAL )
try :
if os . path . isfile ( path ) : # A file and a symlinked file are removed the same way
os . remove ( path )
elif is_link ( path ) : # If it ' s a symlink directory , use the rmdir command
os . rmdir ( path )
else :
for name in os . listdir ( path ) :
item = '{0}\\{1}' . format ( path , name )
# If it ' s a normal directory , recurse to remove it ' s contents
remove ( item , force )
# rmdir will work now because the directory is empty
os . rmdir ( path )
except ( OSError , IOError ) as exc :
if force : # Reset attributes to the original if delete fails .
win32api . SetFileAttributes ( path , file_attributes )
raise CommandExecutionError ( 'Could not remove \'{0}\': {1}' . format ( path , exc ) )
return True
|
def phase_parents_by_transmission ( g , window_size ) :
"""Phase parent genotypes from a trio or cross , given progeny genotypes
already phased by Mendelian transmission .
Parameters
g : GenotypeArray
Genotype array , with parents as first two columns and progeny as
remaining columns , where progeny genotypes are already phased .
window _ size : int
Number of previous heterozygous sites to include when phasing each
parent . A number somewhere between 10 and 100 may be appropriate ,
depending on levels of heterozygosity and quality of data .
Returns
g : GenotypeArray
Genotype array with parents phased where possible ."""
|
# setup
check_type ( g , GenotypeArray )
check_dtype ( g . values , 'i1' )
check_ploidy ( g . ploidy , 2 )
if g . is_phased is None :
raise ValueError ( 'genotype array must first have progeny phased by transmission' )
check_min_samples ( g . n_samples , 3 )
# run the phasing
g . _values = memoryview_safe ( g . values )
g . _is_phased = memoryview_safe ( g . is_phased )
_opt_phase_parents_by_transmission ( g . values , g . is_phased . view ( 'u1' ) , window_size )
# outputs
return g
|
def get_netconf_client_capabilities_output_session_session_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_netconf_client_capabilities = ET . Element ( "get_netconf_client_capabilities" )
config = get_netconf_client_capabilities
output = ET . SubElement ( get_netconf_client_capabilities , "output" )
session = ET . SubElement ( output , "session" )
session_id = ET . SubElement ( session , "session-id" )
session_id . text = kwargs . pop ( 'session_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def vector_cross ( vector1 , vector2 ) :
"""Computes the cross - product of the input vectors .
: param vector1 : input vector 1
: type vector1 : list , tuple
: param vector2 : input vector 2
: type vector2 : list , tuple
: return : result of the cross product
: rtype : tuple"""
|
try :
if vector1 is None or len ( vector1 ) == 0 or vector2 is None or len ( vector2 ) == 0 :
raise ValueError ( "Input vectors cannot be empty" )
except TypeError as e :
print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) )
raise TypeError ( "Input must be a list or tuple" )
except Exception :
raise
if not 1 < len ( vector1 ) <= 3 or not 1 < len ( vector2 ) <= 3 :
raise ValueError ( "The input vectors should contain 2 or 3 elements" )
# Convert 2 - D to 3 - D , if necessary
if len ( vector1 ) == 2 :
v1 = [ float ( v ) for v in vector1 ] + [ 0.0 ]
else :
v1 = vector1
if len ( vector2 ) == 2 :
v2 = [ float ( v ) for v in vector2 ] + [ 0.0 ]
else :
v2 = vector2
# Compute cross product
vector_out = [ ( v1 [ 1 ] * v2 [ 2 ] ) - ( v1 [ 2 ] * v2 [ 1 ] ) , ( v1 [ 2 ] * v2 [ 0 ] ) - ( v1 [ 0 ] * v2 [ 2 ] ) , ( v1 [ 0 ] * v2 [ 1 ] ) - ( v1 [ 1 ] * v2 [ 0 ] ) ]
# Return the cross product of the input vectors
return vector_out
|
def _add_chrome_arguments ( self , options ) :
"""Add Chrome arguments from properties file
: param options : chrome options object"""
|
try :
for pref , pref_value in dict ( self . config . items ( 'ChromeArguments' ) ) . items ( ) :
pref_value = '={}' . format ( pref_value ) if pref_value else ''
self . logger . debug ( "Added chrome argument: %s%s" , pref , pref_value )
options . add_argument ( '{}{}' . format ( pref , self . _convert_property_type ( pref_value ) ) )
except NoSectionError :
pass
|
def getGeometry ( self , ra = None , dec = None ) :
"""Return an array of rectangles that represent the ' ra , dec ' corners of the FOV"""
|
import math , ephem
ccds = [ ]
if ra is None :
ra = self . ra
if dec is None :
dec = self . dec
self . ra = ephem . hours ( ra )
self . dec = ephem . degrees ( dec )
for geo in self . geometry [ self . camera ] :
ycen = math . radians ( geo [ "dec" ] ) + dec
xcen = math . radians ( geo [ "ra" ] ) / math . cos ( ycen ) + ra
dy = math . radians ( geo [ "ddec" ] )
dx = math . radians ( geo [ "dra" ] / math . cos ( ycen ) )
ccds . append ( [ xcen - dx / 2.0 , ycen - dy / 2.0 , xcen + dx / 2.0 , ycen + dy / 2.0 ] )
return ccds
|
def create_empty ( self , name = None , renderers = None , RootNetworkList = None , verbose = False ) :
"""Create a new , empty network . The new network may be created as part of
an existing network collection or a new network collection .
: param name ( string , optional ) : Enter the name of the new network .
: param renderers ( string , optional ) : Select the renderer to use for the
new network view . By default , the standard Cytoscape 2D renderer ( Ding )
will be used = [ ' ' ] ,
: param RootNetworkList ( string , optional ) : Choose the network collection
the new network should be part of . If no network collection is selected ,
a new network collection is created . = [ ' - - Create new network collection - - ' ,
' cy : command _ documentation _ generation ' ]
: param verbose : print more"""
|
PARAMS = set_param ( [ "name" , "renderers" , "RootNetworkList" ] , [ name , renderers , RootNetworkList ] )
response = api ( url = self . __url + "/create empty" , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response
|
def choose_raw_dataset ( currently = "" ) :
"""Let the user choose a raw dataset . Return the absolute path ."""
|
folder = os . path . join ( get_project_root ( ) , "raw-datasets" )
files = [ os . path . join ( folder , name ) for name in os . listdir ( folder ) if name . endswith ( ".pickle" ) ]
default = - 1
for i , filename in enumerate ( files ) :
if os . path . basename ( currently ) == os . path . basename ( filename ) :
default = i
if i != default :
print ( "[%i]\t%s" % ( i , os . path . basename ( filename ) ) )
else :
print ( "\033[1m[%i]\033[0m\t%s" % ( i , os . path . basename ( filename ) ) )
i = input_int_default ( "Choose a dataset by number: " , default )
return files [ i ]
|
def heirarchical_matched_filter_and_cluster ( self , segnum , template_norm , window ) :
"""Returns the complex snr timeseries , normalization of the complex snr ,
the correlation vector frequency series , the list of indices of the
triggers , and the snr values at the trigger locations . Returns empty
lists for these for points that are not above the threshold .
Calculated the matched filter , threshold , and cluster .
Parameters
segnum : int
Index into the list of segments at MatchedFilterControl construction
template _ norm : float
The htilde , template normalization factor .
window : int
Size of the window over which to cluster triggers , in samples
Returns
snr : TimeSeries
A time series containing the complex snr at the reduced sample rate .
norm : float
The normalization of the complex snr .
corrrelation : FrequencySeries
A frequency series containing the correlation vector .
idx : Array
List of indices of the triggers .
snrv : Array
The snr values at the trigger locations ."""
|
from pycbc . fft . fftw_pruned import pruned_c2cifft , fft_transpose
htilde = self . htilde
stilde = self . segments [ segnum ]
norm = ( 4.0 * stilde . delta_f ) / sqrt ( template_norm )
correlate ( htilde [ self . kmin_red : self . kmax_red ] , stilde [ self . kmin_red : self . kmax_red ] , self . corr_mem [ self . kmin_red : self . kmax_red ] )
ifft ( self . corr_mem , self . snr_mem )
if not hasattr ( stilde , 'red_analyze' ) :
stilde . red_analyze = slice ( stilde . analyze . start / self . downsample_factor , stilde . analyze . stop / self . downsample_factor )
idx_red , snrv_red = events . threshold ( self . snr_mem [ stilde . red_analyze ] , self . snr_threshold / norm * self . upsample_threshold )
if len ( idx_red ) == 0 :
return [ ] , None , [ ] , [ ] , [ ]
idx_red , _ = events . cluster_reduce ( idx_red , snrv_red , window / self . downsample_factor )
logging . info ( "%s points above threshold at reduced resolution" % ( str ( len ( idx_red ) ) , ) )
# The fancy upsampling is here
if self . upsample_method == 'pruned_fft' :
idx = ( idx_red + stilde . analyze . start / self . downsample_factor ) * self . downsample_factor
idx = smear ( idx , self . downsample_factor )
# cache transposed versions of htilde and stilde
if not hasattr ( self . corr_mem_full , 'transposed' ) :
self . corr_mem_full . transposed = zeros ( len ( self . corr_mem_full ) , dtype = self . dtype )
if not hasattr ( htilde , 'transposed' ) :
htilde . transposed = zeros ( len ( self . corr_mem_full ) , dtype = self . dtype )
htilde . transposed [ self . kmin_full : self . kmax_full ] = htilde [ self . kmin_full : self . kmax_full ]
htilde . transposed = fft_transpose ( htilde . transposed )
if not hasattr ( stilde , 'transposed' ) :
stilde . transposed = zeros ( len ( self . corr_mem_full ) , dtype = self . dtype )
stilde . transposed [ self . kmin_full : self . kmax_full ] = stilde [ self . kmin_full : self . kmax_full ]
stilde . transposed = fft_transpose ( stilde . transposed )
correlate ( htilde . transposed , stilde . transposed , self . corr_mem_full . transposed )
snrv = pruned_c2cifft ( self . corr_mem_full . transposed , self . inter_vec , idx , pretransposed = True )
idx = idx - stilde . analyze . start
idx2 , snrv = events . threshold ( Array ( snrv , copy = False ) , self . snr_threshold / norm )
if len ( idx2 ) > 0 :
correlate ( htilde [ self . kmax_red : self . kmax_full ] , stilde [ self . kmax_red : self . kmax_full ] , self . corr_mem_full [ self . kmax_red : self . kmax_full ] )
idx , snrv = events . cluster_reduce ( idx [ idx2 ] , snrv , window )
else :
idx , snrv = [ ] , [ ]
logging . info ( "%s points at full rate and clustering" % len ( idx ) )
return self . snr_mem , norm , self . corr_mem_full , idx , snrv
else :
raise ValueError ( "Invalid upsample method" )
|
def _create_connection ( self ) :
"""Create a connection .
: return :"""
|
attempts = 0
while True :
attempts += 1
if self . _stopped . is_set ( ) :
break
try :
self . _connection = Connection ( self . hostname , self . username , self . password )
break
except amqpstorm . AMQPError as why :
LOGGER . warning ( why )
if self . max_retries and attempts > self . max_retries :
raise Exception ( 'max number of retries reached' )
time . sleep ( min ( attempts * 2 , 30 ) )
except KeyboardInterrupt :
break
|
def _mom ( self , key , left , right , cache ) :
"""Statistical moments .
Example :
> > > print ( numpy . around ( chaospy . Uniform ( ) . mom ( [ 0 , 1 , 2 , 3 ] ) , 4 ) )
[1 . 0.5 0.3333 0.25 ]
> > > print ( numpy . around ( Mul ( chaospy . Uniform ( ) , 2 ) . mom ( [ 0 , 1 , 2 , 3 ] ) , 4 ) )
[1 . 1 . 1.3333 2 . ]
> > > print ( numpy . around ( Mul ( 2 , chaospy . Uniform ( ) ) . mom ( [ 0 , 1 , 2 , 3 ] ) , 4 ) )
[1 . 1 . 1.3333 2 . ]
> > > print ( numpy . around ( Mul ( chaospy . Uniform ( ) , chaospy . Uniform ( ) ) . mom ( [ 0 , 1 , 2 , 3 ] ) , 4 ) )
[1 . 0.25 0.1111 0.0625]
> > > print ( numpy . around ( Mul ( 2 , 2 ) . mom ( [ 0 , 1 , 2 , 3 ] ) , 4 ) )
[ 1 . 4 . 16 . 64 . ]"""
|
if evaluation . get_dependencies ( left , right ) :
raise evaluation . DependencyError ( "sum of dependent distributions not feasible: " "{} and {}" . format ( left , right ) )
if isinstance ( left , Dist ) :
left = evaluation . evaluate_moment ( left , key , cache = cache )
else :
left = ( numpy . array ( left ) . T ** key ) . T
if isinstance ( right , Dist ) :
right = evaluation . evaluate_moment ( right , key , cache = cache )
else :
right = ( numpy . array ( right ) . T ** key ) . T
return numpy . sum ( left * right )
|
def extend ( self , iterable ) :
"""Extend the right side of this GeventDeque by appending
elements from the iterable argument ."""
|
self . _deque . extend ( iterable )
if len ( self . _deque ) > 0 :
self . notEmpty . set ( )
|
def volumes ( self ) :
"""Gets the Volumes API client .
Returns :
Volumes :"""
|
if not self . __volumes :
self . __volumes = Volumes ( self . __connection )
return self . __volumes
|
def from_tuples ( cls , tuples ) :
"""Creates a graph from an iterable of tuples describing edges like ( source , target , sign )
Parameters
tuples : iterable [ ( str , str , int ) ) ]
Tuples describing signed and directed edges
Returns
caspo . core . graph . Graph
Created object instance"""
|
return cls ( it . imap ( lambda ( source , target , sign ) : ( source , target , { 'sign' : sign } ) , tuples ) )
|
def get_additions_status ( self , level ) :
"""Retrieve the current status of a certain Guest Additions run level .
in level of type : class : ` AdditionsRunLevelType `
Status level to check
return active of type bool
Flag whether the status level has been reached or not
raises : class : ` VBoxErrorNotSupported `
Wrong status level specified ."""
|
if not isinstance ( level , AdditionsRunLevelType ) :
raise TypeError ( "level can only be an instance of type AdditionsRunLevelType" )
active = self . _call ( "getAdditionsStatus" , in_p = [ level ] )
return active
|
def csv ( self , output ) :
"""Output data as excel - compatible CSV"""
|
import csv
csvwriter = csv . writer ( self . outfile )
csvwriter . writerows ( output )
|
def _local_map ( match , loc : str = 'lr' ) -> list :
""": param match :
: param loc : str
" l " or " r " or " lr "
turns on / off left / right local area calculation
: return : list
list of the same size as the string + 2
it ' s the local map that counted { and }
list can contain : None or int > = 0
from the left of the operator match :
in ` b } a ` if a : 0 then } : 0 and b : 1
in ` b { a ` if a : 0 then { : 0 and b : - 1 ( None )
from the right of the operator match :
in ` a { b ` if a : 0 then { : 0 and b : 1
in ` a } b ` if a : 0 then } : 0 and b : - 1 ( None )
Map for + 1 ( needed for r ' $ ' ) and - 1 ( needed for r ' ^ ' )
characters is also stored : + 1 - > + 1 , - 1 - > + 2"""
|
s = match . string
map_ = [ None ] * ( len ( s ) + 2 )
if loc == 'l' or loc == 'lr' :
balance = 0
for i in reversed ( range ( 0 , match . start ( ) ) ) :
map_ [ i ] = balance
c , prev = s [ i ] , ( s [ i - 1 ] if i > 0 else '' )
if ( c == '}' or c == '˲' ) and prev != '\\' :
balance += 1
elif ( c == '{' or c == '˱' ) and prev != '\\' :
balance -= 1
if balance < 0 :
break
map_ [ - 1 ] = balance
if loc == 'r' or loc == 'lr' :
balance = 0
for i in range ( match . end ( ) , len ( s ) ) :
map_ [ i ] = balance
c , prev = s [ i ] , s [ i - 1 ]
if ( c == '{' or c == '˱' ) and prev != '\\' :
balance += 1
elif ( c == '}' or c == '˲' ) and prev != '\\' :
balance -= 1
if balance < 0 :
break
map_ [ len ( s ) ] = balance
return map_
|
def disconnect ( self , abandon_session = False ) :
"""Disconnects from the Responsys soap service
Calls the service logout method and destroys the client ' s session information . Returns
True on success , False otherwise ."""
|
self . connected = False
if ( self . session and self . session . is_expired ) or abandon_session :
try :
self . logout ( )
except :
log . warning ( 'Logout call to responsys failed, session may have not been terminated' , exc_info = True )
del self . session
return True
|
def libvlc_audio_set_mute ( p_mi , status ) :
'''Set mute status .
@ param p _ mi : media player .
@ param status : If status is true then mute , otherwise unmute @ warning This function does not always work . If there are no active audio playback stream , the mute status might not be available . If digital pass - through ( S / PDIF , HDMI . . . ) is in use , muting may be unapplicable . Also some audio output plugins do not support muting at all . @ note To force silent playback , disable all audio tracks . This is more efficient and reliable than mute .'''
|
f = _Cfunctions . get ( 'libvlc_audio_set_mute' , None ) or _Cfunction ( 'libvlc_audio_set_mute' , ( ( 1 , ) , ( 1 , ) , ) , None , None , MediaPlayer , ctypes . c_int )
return f ( p_mi , status )
|
def backup ( ) :
"""zips into db _ backups _ dir and uploads to bucket _ name / s3 _ folder
fab - f . / fabfile . py backup _ dbs"""
|
args = parser . parse_args ( )
s3_backup_dir ( args . datadir , args . aws_access_key_id , args . aws_secret_access_key , args . bucket_name , args . zip_backups_dir , args . backup_aging_time , args . s3_folder , args . project )
|
def filter_dict ( self , query , ** kwargs ) :
'''Filter for : func : ` ~ ommongo . fields . mapping . DictField ` .
* * Examples * * : ` ` query . filter _ dict ( { " User . Fullname " : " Oji " } ) ` `'''
|
for name , value in query . items ( ) :
field = name . split ( "." ) [ 0 ]
try :
getattr ( self . type , field )
except AttributeError :
raise FieldNotFoundException ( "Field not found %s" % ( field ) )
self . query_bypass ( query , raw_output = False , ** kwargs )
return self
|
def _get_on_reboot ( dom ) :
'''Return ` on _ reboot ` setting from the named vm
CLI Example :
. . code - block : : bash
salt ' * ' virt . get _ on _ reboot < domain >'''
|
node = ElementTree . fromstring ( get_xml ( dom ) ) . find ( 'on_reboot' )
return node . text if node is not None else ''
|
def read_image ( img_path , image_dims = None , mean = None ) :
"""Reads an image from file path or URL , optionally resizing to given image dimensions and
subtracting mean .
: param img _ path : path to file , or url to download
: param image _ dims : image dimensions to resize to , or None
: param mean : mean file to subtract , or None
: return : loaded image , in RGB format"""
|
import urllib
filename = img_path . split ( "/" ) [ - 1 ]
if img_path . startswith ( 'http' ) :
urllib . urlretrieve ( img_path , filename )
img = cv2 . imread ( filename )
else :
img = cv2 . imread ( img_path )
img = cv2 . cvtColor ( img , cv2 . COLOR_BGR2RGB )
if image_dims is not None :
img = cv2 . resize ( img , image_dims )
# resize to image _ dims to fit model
img = np . rollaxis ( img , 2 )
# change to ( c , h , w ) order
img = img [ np . newaxis , : ]
# extend to ( n , c , h , w )
if mean is not None :
mean = np . array ( mean )
if mean . shape == ( 3 , ) :
mean = mean [ np . newaxis , : , np . newaxis , np . newaxis ]
# extend to ( n , c , 1 , 1)
img = img . astype ( np . float32 ) - mean
# subtract mean
return img
|
def summary ( self ) :
"""Gets summary ( e . g . residuals , deviance , pValues ) of model on
training set . An exception is thrown if
` trainingSummary is None ` ."""
|
if self . hasSummary :
return GeneralizedLinearRegressionTrainingSummary ( super ( GeneralizedLinearRegressionModel , self ) . summary )
else :
raise RuntimeError ( "No training summary available for this %s" % self . __class__ . __name__ )
|
def count ( self , Class , set = None , recursive = True , ignore = True ) :
"""See : meth : ` AbstractElement . count `"""
|
if self . mode == Mode . MEMORY :
s = 0
for t in self . data :
s += sum ( 1 for e in t . select ( Class , recursive , True ) )
return s
|
def userKicked ( self , kickee , channel , kicker , message ) :
"""Called when I see another user get kicked ."""
|
self . dispatch ( 'population' , 'userKicked' , kickee , channel , kicker , message )
|
def is_jar ( path ) :
"""Check whether given file is a JAR file .
JARs are ZIP files which usually include a manifest
at the canonical location ' META - INF / MANIFEST . MF ' ."""
|
if os . path . isfile ( path ) and zipfile . is_zipfile ( path ) :
try :
with zipfile . ZipFile ( path ) as f :
if "META-INF/MANIFEST.MF" in f . namelist ( ) :
return True
except ( IOError , zipfile . BadZipfile ) :
pass
return False
|
def status ( name , location = '\\' ) :
r'''Determine the status of a task . Is it Running , Queued , Ready , etc .
: param str name : The name of the task for which to return the status
: param str location : A string value representing the location of the task .
Default is ' \ \ ' which is the root for the task scheduler
( C : \ Windows \ System32 \ tasks ) .
: return : The current status of the task . Will be one of the following :
- Unknown
- Disabled
- Queued
- Ready
- Running
: rtype : string
CLI Example :
. . code - block : : bash
salt ' minion - id ' task . list _ status < task _ name >'''
|
# Check for existing folder
if name not in list_tasks ( location ) :
return '{0} not found in {1}' . format ( name , location )
# connect to the task scheduler
with salt . utils . winapi . Com ( ) :
task_service = win32com . client . Dispatch ( "Schedule.Service" )
task_service . Connect ( )
# get the folder where the task is defined
task_folder = task_service . GetFolder ( location )
task = task_folder . GetTask ( name )
return states [ task . State ]
|
def read_http ( self , length ) :
"""Read Hypertext Transfer Protocol version 2.
Structure of HTTP / 2 packet [ RFC 7230 ] :
| Length ( 24 ) |
| Type ( 8 ) | Flags ( 8 ) |
| R | Stream Identifier ( 31 ) |
| Frame Payload ( 0 . . . ) . . ."""
|
_plen = self . _read_binary ( 3 )
_type = self . _read_unpack ( 1 )
_flag = self . _read_binary ( 1 )
_stid = self . _read_binary ( 4 )
|
def K ( self ) :
"""Kernel matrix
Returns
K : array - like , shape = [ n _ samples , n _ samples ]
kernel matrix defined as the adjacency matrix with
ones down the diagonal"""
|
try :
return self . _kernel
except AttributeError :
self . _kernel = self . _build_kernel ( )
return self . _kernel
|
def response_data_to_model_instance ( self , response_data ) :
"""Convert response data to a task type model .
Args :
response _ data ( dict ) : The data from the request ' s response .
Returns :
: class : ` saltant . models . base _ task _ type . BaseTaskType ` :
A model instance representing the task type from the
reponse data ."""
|
# Coerce datetime strings into datetime objects
response_data [ "datetime_created" ] = dateutil . parser . parse ( response_data [ "datetime_created" ] )
# Instantiate a model for the task instance
return super ( BaseTaskTypeManager , self ) . response_data_to_model_instance ( response_data )
|
def _stump ( f , * args , ** kwargs ) :
"""Worker for the common actions of all stump methods , aka the secret
sauce .
* Keyword parameters *
- log : : integer
- Specifies a custom level of logging to pass to the active logger .
- Default : INFO
- print _ time : : bool
- Include timestamp in message
- print _ return : : bool
- include the return value in the functions exit message
- postfix _ only : : bool
- omit the functions entering message
- prefix _ only : : bool
- omit the functions exiting message
* Exceptions : *
- IndexError and ValueError
- will be returned if * args contains a string that does not correspond to
a parameter name of the decorated function , or if there are more ' { } ' s
than there are * args ."""
|
global LOGGER
def aux ( * xs , ** kws ) :
f_kws = kws . copy ( )
f_kws . update ( dict ( zip ( inspect . getfullargspec ( f ) . args , xs ) ) )
level = kwargs . get ( 'log' , logging . INFO )
post = kwargs . get ( 'postfix_only' , False )
pre = kwargs . get ( 'prefix_only' , False )
print_return = kwargs . get ( 'print_return' , False )
print_time = kwargs . get ( 'print_time' , False )
# prepare locals for later uses in string interpolation
fn = f . __name__
timestr = '%s:' % _timestr ( ) if print_time else ''
# get message
try :
message = list ( args ) . pop ( 0 )
timestr = ':' + timestr
except IndexError :
message = fn
fn = ''
# format message
try :
report = '{fn}{timestr}{arg}' . format ( ** locals ( ) , arg = message . format ( ** f_kws ) )
except KeyError :
report = '{fn}{timestr}{error}' . format ( ** locals ( ) , error = 'KeyError in decorator usage' )
if not post :
LOGGER . log ( level , '%s...' , report )
try :
ret = f ( * xs , ** kws )
except Exception as e :
try :
with_message = ' with message %s' % str ( e )
if str ( e ) == '' :
raise Exception ( )
# use default value
except :
with_message = ''
LOGGER . log ( level , '%s...threw exception %s%s' , report , type ( e ) . __name__ , with_message )
raise
if not pre :
if print_return :
LOGGER . log ( level , '%s...done (returning %s)' , report , ret )
else :
LOGGER . log ( level , '%s...done' , report )
return ret
return aux
|
def density ( self , R , Rs , rho0 ) :
"""three dimenstional NFW profile
: param R : radius of interest
: type R : float / numpy array
: param Rs : scale radius
: type Rs : float
: param rho0 : density normalization ( characteristic density )
: type rho0 : float
: return : rho ( R ) density"""
|
return rho0 / ( R / Rs * ( 1 + R / Rs ) ** 2 )
|
def _construct_state_machines ( self ) :
""": return : dict in format < state _ machine _ common _ name : instance _ of _ the _ state _ machine >"""
|
state_machines = dict ( )
for state_machine in [ StateMachineRecomputing ( self . logger , self ) , StateMachineContinuous ( self . logger , self ) , StateMachineDiscrete ( self . logger , self ) , StateMachineFreerun ( self . logger ) ] :
state_machines [ state_machine . name ] = state_machine
return state_machines
|
def _get_best_word_indices_for_kth_hypotheses ( ks : np . ndarray , all_hyp_indices : np . ndarray ) -> np . ndarray :
"""Traverses the matrix of best hypotheses indices collected during beam search in reversed order by
using the kth hypotheses index as a backpointer .
Returns an array containing the indices into the best _ word _ indices collected during beam search to extract
the kth hypotheses .
: param ks : The kth - best hypotheses to extract . Supports multiple for batch _ size > 1 . Shape : ( batch , ) .
: param all _ hyp _ indices : All best hypotheses indices list collected in beam search . Shape : ( batch * beam , steps ) .
: return : Array of indices into the best _ word _ indices collected in beam search
that extract the kth - best hypothesis . Shape : ( batch , ) ."""
|
batch_size = ks . shape [ 0 ]
num_steps = all_hyp_indices . shape [ 1 ]
result = np . zeros ( ( batch_size , num_steps - 1 ) , dtype = all_hyp_indices . dtype )
# first index into the history of the desired hypotheses .
pointer = all_hyp_indices [ ks , - 1 ]
# for each column / step follow the pointer , starting from the penultimate column / step
num_steps = all_hyp_indices . shape [ 1 ]
for step in range ( num_steps - 2 , - 1 , - 1 ) :
result [ : , step ] = pointer
pointer = all_hyp_indices [ pointer , step ]
return result
|
def get_leads ( self , * guids , ** options ) :
"""Supports all the search parameters in the API as well as python underscored variants"""
|
original_options = options
options = self . camelcase_search_options ( options . copy ( ) )
params = { }
for i in xrange ( len ( guids ) ) :
params [ 'guids[%s]' % i ] = guids [ i ]
for k in options . keys ( ) :
if k in SEARCH_OPTIONS :
params [ k ] = options [ k ]
del options [ k ]
leads = self . _call ( 'list/' , params , ** options )
self . log . info ( "retrieved %s leads through API ( %soptions=%s )" % ( len ( leads ) , guids and 'guids=%s, ' % guids or '' , original_options ) )
return leads
|
def _meanvalueattr ( self , v ) :
"""find new position of vertex v according to adjacency in prevlayer .
position is given by the mean value of adjacent positions .
experiments show that meanvalue heuristic performs better than median ."""
|
sug = self . layout
if not self . prevlayer ( ) :
return sug . grx [ v ] . bar
bars = [ sug . grx [ x ] . bar for x in self . _neighbors ( v ) ]
return sug . grx [ v ] . bar if len ( bars ) == 0 else float ( sum ( bars ) ) / len ( bars )
|
def leastsqbound ( func , x0 , bounds , args = ( ) , ** kw ) :
"""Constrained multivariant Levenberg - Marquard optimization
Minimize the sum of squares of a given function using the
Levenberg - Marquard algorithm . Contraints on parameters are inforced using
variable transformations as described in the MINUIT User ' s Guide by
Fred James and Matthias Winkler .
Parameters :
* func functions to call for optimization .
* x0 Starting estimate for the minimization .
* bounds ( min , max ) pair for each element of x , defining the bounds on
that parameter . Use None for one of min or max when there is
no bound in that direction .
* args Any extra arguments to func are places in this tuple .
Returns : ( x , { cov _ x , infodict , mesg } , ier )
Return is described in the scipy . optimize . leastsq function . x and con _ v
are corrected to take into account the parameter transformation , infodic
is not corrected .
Additional keyword arguments are passed directly to the
scipy . optimize . leastsq algorithm ."""
|
# check for full output
if "full_output" in kw and kw [ "full_output" ] :
full = True
else :
full = False
# convert x0 to internal variables
i0 = external2internal ( x0 , bounds )
# perfrom unconstrained optimization using internal variables
r = leastsq ( err , i0 , args = ( bounds , func , args ) , ** kw )
# unpack return convert to external variables and return
if full :
xi , cov_xi , infodic , mesg , ier = r
xe = internal2external ( xi , bounds )
cov_xe = i2e_cov_x ( xi , bounds , cov_xi )
# XXX correct infodic ' fjac ' , ' ipvt ' , and ' qtf '
return xe , cov_xe , infodic , mesg , ier
else :
xi , ier = r
xe = internal2external ( xi , bounds )
return xe , ier
|
def run ( self ) :
"sets up the desired services and runs the requested action"
|
self . addServices ( )
self . catalogServers ( self . hendrix )
action = self . action
fd = self . options [ 'fd' ]
if action . startswith ( 'start' ) :
chalk . blue ( self . _listening_message ( ) )
getattr ( self , action ) ( fd )
# annnnd run the reactor ! #
try :
self . reactor . run ( )
finally :
shutil . rmtree ( PID_DIR , ignore_errors = True )
# cleanup tmp PID dir
elif action == 'restart' :
getattr ( self , action ) ( fd = fd )
else :
getattr ( self , action ) ( )
|
def get_books_by_comment ( self , * args , ** kwargs ) :
"""Pass through to provider CommentBookSession . get _ books _ by _ comment"""
|
# Implemented from kitosid template for -
# osid . resource . ResourceBinSession . get _ bins _ by _ resource
catalogs = self . _get_provider_session ( 'comment_book_session' ) . get_books_by_comment ( * args , ** kwargs )
cat_list = [ ]
for cat in catalogs :
cat_list . append ( Book ( self . _provider_manager , cat , self . _runtime , self . _proxy ) )
return BookList ( cat_list )
|
def calc_gamma_components ( Data_ref , Data ) :
"""Calculates the components of Gamma ( Gamma0 and delta _ Gamma ) ,
assuming that the Data _ ref is uncooled data ( ideally at 3mbar
for best fitting ) . It uses the fact that A _ prime = A / Gamma0 should
be constant for a particular particle under changes in pressure
and therefore uses the reference save to calculate A _ prime ( assuming
the Gamma value found for the uncooled data is actually equal to Gamma0
since only collisions should be causing the damping . Therefore for
the cooled data Gamma0 should equal A / A _ prime and therefore we
can extract Gamma0 and delta _ Gamma .
A _ prime = ConvFactor * * 2 * ( 2 * k _ B * T0 / ( pi * m ) )
Parameters
Data _ ref : DataObject
Reference data set , assumed to be 300K
Data : DataObject
Data object to have the temperature calculated for
Returns
Gamma0 : uncertainties . ufloat
Damping due to the environment
delta _ Gamma : uncertainties . ufloat
Damping due to other effects ( e . g . feedback cooling )"""
|
A_prime = Data_ref . A / Data_ref . Gamma
Gamma0 = Data . A / A_prime
delta_Gamma = Data . Gamma - Gamma0
return Gamma0 , delta_Gamma
|
def configure_update ( self , ns , definition ) :
"""Register an update endpoint .
The definition ' s func should be an update function , which must :
- accept kwargs for the request and path data
- return an updated item
: param ns : the namespace
: param definition : the endpoint definition"""
|
@ self . add_route ( ns . instance_path , Operation . Update , ns )
@ request ( definition . request_schema )
@ response ( definition . response_schema )
@ wraps ( definition . func )
def update ( ** path_data ) :
headers = dict ( )
# NB : using partial here means that marshmallow will not validate required fields
request_data = load_request_data ( definition . request_schema , partial = True )
response_data = require_response_data ( definition . func ( ** merge_data ( path_data , request_data ) ) )
definition . header_func ( headers , response_data )
response_format = self . negotiate_response_content ( definition . response_formats )
return dump_response_data ( definition . response_schema , response_data , headers = headers , response_format = response_format , )
update . __doc__ = "Update some or all of a {} by id" . format ( ns . subject_name )
|
def delete_store_credit_payment_by_id ( cls , store_credit_payment_id , ** kwargs ) :
"""Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . delete _ store _ credit _ payment _ by _ id ( store _ credit _ payment _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str store _ credit _ payment _ id : ID of storeCreditPayment to delete . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _delete_store_credit_payment_by_id_with_http_info ( store_credit_payment_id , ** kwargs )
else :
( data ) = cls . _delete_store_credit_payment_by_id_with_http_info ( store_credit_payment_id , ** kwargs )
return data
|
def _make_info ( identifier , verbose ) :
"""Generates the script for displaying compile - time info ."""
|
module , method = identifier . split ( "." )
redirect = "| tee -a" if verbose else " >>"
return """
info:
echo -e "\\nCompile time:" > $(LOG)
date >> $(LOG)
echo "------------------------------------------------------"{2} $(LOG)
echo " FORTPY" {2} $(LOG)
echo " >>> version 1.7 <<< "{2} $(LOG)
echo "------------------------------------------------------"{2} $(LOG)
echo -e "Compiling on system : $(UNAME)" {2} $(LOG)
echo -e " machine : $(HOSTNAME)" {2} $(LOG)
echo "Compiling for module : {0}" {2} $(LOG)
echo " method : {1}" {2} $(LOG)
echo "------------------------------------------------------"{2} $(LOG)
echo -e "DEBUG mode\\t:\\t$(DEBUG)" {2} $(LOG)
echo -e "GPROF mode\\t:\\t$(GPROF)" {2} $(LOG)
echo "------------------------------------------------------"{2} $(LOG)
echo "F90 : $(F90)" {2} $(LOG)
echo "FFLAGS : $(FFLAGS)" {2} $(LOG)
echo "LDFLAGS: $(LDFLAGS)" {2} $(LOG)
echo "MKLpath:$(MKL)" {2} $(LOG)
echo "------------------------------------------------------"{2} $(LOG)
echo "" {2} $(LOG)
""" . format ( module , method , redirect )
|
def QueryValueEx ( key , value_name ) :
"""This calls the Windows QueryValueEx function in a Unicode safe way ."""
|
regqueryvalueex = advapi32 [ "RegQueryValueExW" ]
regqueryvalueex . restype = ctypes . c_long
regqueryvalueex . argtypes = [ ctypes . c_void_p , ctypes . c_wchar_p , LPDWORD , LPDWORD , LPBYTE , LPDWORD ]
size = 256
data_type = ctypes . wintypes . DWORD ( )
while True :
tmp_size = ctypes . wintypes . DWORD ( size )
buf = ctypes . create_string_buffer ( size )
rc = regqueryvalueex ( key . handle , value_name , LPDWORD ( ) , ctypes . byref ( data_type ) , ctypes . cast ( buf , LPBYTE ) , ctypes . byref ( tmp_size ) )
if rc != ERROR_MORE_DATA :
break
# We limit the size here to ~ 10 MB so the response doesn ' t get too big .
if size > 10 * 1024 * 1024 :
raise OSError ( "Value too big to be read by GRR." )
size *= 2
if rc != ERROR_SUCCESS :
raise ctypes . WinError ( 2 )
return _Reg2Py ( buf , tmp_size . value , data_type . value ) , data_type . value
|
def pyc2py ( filename ) :
"""Find corresponding . py name given a . pyc or . pyo"""
|
if re . match ( ".*py[co]$" , filename ) :
if PYTHON3 :
return re . sub ( r'(.*)__pycache__/(.+)\.cpython-%s.py[co]$' % PYVER , '\\1\\2.py' , filename )
else :
return filename [ : - 1 ]
return filename
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.