signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def serve_private_file ( request , path ) :
"""Serve private files to users with read permission ."""
|
logger . debug ( 'Serving {0} to {1}' . format ( path , request . user ) )
if not permissions . has_read_permission ( request , path ) :
if settings . DEBUG :
raise PermissionDenied
else :
raise Http404 ( 'File not found' )
return server . serve ( request , path = path )
|
def search_biosamples ( self , dataset_id , name = None , individual_id = None ) :
"""Returns an iterator over the Biosamples fulfilling the specified
conditions .
: param str dataset _ id : The dataset to search within .
: param str name : Only Biosamples matching the specified name will
be returned .
: param str individual _ id : Only Biosamples matching matching this
id will be returned .
: return : An iterator over the : class : ` ga4gh . protocol . Biosample `
objects defined by the query parameters ."""
|
request = protocol . SearchBiosamplesRequest ( )
request . dataset_id = dataset_id
request . name = pb . string ( name )
request . individual_id = pb . string ( individual_id )
request . page_size = pb . int ( self . _page_size )
return self . _run_search_request ( request , "biosamples" , protocol . SearchBiosamplesResponse )
|
def listar ( self , id_divisao = None , id_ambiente_logico = None ) :
"""Lista os ambientes filtrados conforme parâmetros informados .
Se os dois parâmetros têm o valor None então retorna todos os ambientes .
Se o id _ divisao é diferente de None então retorna os ambientes filtrados
pelo valor de id _ divisao .
Se o id _ divisao e id _ ambiente _ logico são diferentes de None então retorna
os ambientes filtrados por id _ divisao e id _ ambiente _ logico .
: param id _ divisao : Identificador da divisão de data center .
: param id _ ambiente _ logico : Identificador do ambiente lógico .
: return : Dicionário com a seguinte estrutura :
{ ' ambiente ' : [ { ' id ' : < id _ ambiente > ,
' link ' : < link > ,
' id _ divisao ' : < id _ divisao > ,
' nome _ divisao ' : < nome _ divisao > ,
' id _ ambiente _ logico ' : < id _ ambiente _ logico > ,
' nome _ ambiente _ logico ' : < nome _ ambiente _ logico > ,
' id _ grupo _ l3 ' : < id _ grupo _ l3 > ,
' nome _ grupo _ l3 ' : < nome _ grupo _ l3 > ,
' id _ filter ' : < id _ filter > ,
' filter _ name ' : < filter _ name > ,
' ambiente _ rede ' : < ambiente _ rede > } ,
. . . demais ambientes . . . ] }
: raise DataBaseError : Falha na networkapi ao acessar o banco de dados .
: raise XMLError : Falha na networkapi ao gerar o XML de resposta ."""
|
url = 'ambiente/'
if is_valid_int_param ( id_divisao ) and not is_valid_int_param ( id_ambiente_logico ) :
url = 'ambiente/divisao_dc/' + str ( id_divisao ) + '/'
elif is_valid_int_param ( id_divisao ) and is_valid_int_param ( id_ambiente_logico ) :
url = 'ambiente/divisao_dc/' + str ( id_divisao ) + '/ambiente_logico/' + str ( id_ambiente_logico ) + '/'
code , xml = self . submit ( None , 'GET' , url )
key = 'ambiente'
return get_list_map ( self . response ( code , xml , [ key ] ) , key )
|
def jsonify ( obj , pretty = False ) :
"""Turn a nested object into a ( compressed ) JSON string .
Parameters
obj : dict
Any kind of dictionary structure .
pretty : bool , optional
Whether to format the resulting JSON in a more legible way (
default False ) ."""
|
if pretty :
params = dict ( sort_keys = True , indent = 2 , allow_nan = False , separators = ( "," , ": " ) , ensure_ascii = False )
else :
params = dict ( sort_keys = False , indent = None , allow_nan = False , separators = ( "," , ":" ) , ensure_ascii = False )
try :
return json . dumps ( obj , ** params )
except ( TypeError , ValueError ) as error :
LOGGER . critical ( "The memote result structure is incompatible with the JSON " "standard." )
log_json_incompatible_types ( obj )
raise_with_traceback ( error )
|
def _make_sprite_image ( images , save_path ) :
"""Given an NDArray as a batch images , make a sprite image out of it following the rule
defined in
https : / / www . tensorflow . org / programmers _ guide / embedding
and save it in sprite . png under the path provided by the user ."""
|
if isinstance ( images , np . ndarray ) :
images = nd . array ( images , dtype = images . dtype , ctx = current_context ( ) )
elif not isinstance ( images , ( NDArray , np . ndarray ) ) :
raise TypeError ( 'images must be an MXNet NDArray or numpy.ndarray,' ' while received type {}' . format ( str ( type ( images ) ) ) )
assert isinstance ( images , NDArray )
shape = images . shape
nrow = int ( np . ceil ( np . sqrt ( shape [ 0 ] ) ) )
_save_image ( images , os . path . join ( save_path , 'sprite.png' ) , nrow = nrow , padding = 0 , square_image = True )
|
def get_historical_base_info ( event ) :
"""Gets the base details from the CloudWatch Event ."""
|
data = { 'principalId' : get_principal ( event ) , 'userIdentity' : get_user_identity ( event ) , 'accountId' : event [ 'account' ] , 'userAgent' : event [ 'detail' ] . get ( 'userAgent' ) , 'sourceIpAddress' : event [ 'detail' ] . get ( 'sourceIPAddress' ) , 'requestParameters' : event [ 'detail' ] . get ( 'requestParameters' ) }
if event [ 'detail' ] . get ( 'eventTime' ) :
data [ 'eventTime' ] = event [ 'detail' ] [ 'eventTime' ]
if event [ 'detail' ] . get ( 'eventSource' ) :
data [ 'eventSource' ] = event [ 'detail' ] [ 'eventSource' ]
if event [ 'detail' ] . get ( 'eventName' ) :
data [ 'eventName' ] = event [ 'detail' ] [ 'eventName' ]
return data
|
def addTaxonToFeature ( self , taxonid ) :
"""Given the taxon id , this will add the following triple :
feature in _ taxon taxonid
: param graph :
: param taxonid :
: return :"""
|
self . taxon = taxonid
self . graph . addTriple ( self . fid , self . globaltt [ 'in taxon' ] , self . taxon )
return
|
def get_signature_request_file ( self , signature_request_id , path_or_file = None , file_type = None , filename = None ) :
'''Download the PDF copy of the current documents
Args :
signature _ request _ id ( str ) : Id of the signature request
path _ or _ file ( str or file ) : A writable File - like object or a full path to save the PDF file to .
filename ( str ) : [ DEPRECATED ] Filename to save the PDF file to . This should be a full path .
file _ type ( str ) : Type of file to return . Either " pdf " for a single merged document or " zip " for a collection of individual documents . Defaults to " pdf " if not specified .
Returns :
True if file is downloaded and successfully written , False otherwise .'''
|
request = self . _get_request ( )
url = self . SIGNATURE_REQUEST_DOWNLOAD_PDF_URL + signature_request_id
if file_type :
url += '?file_type=%s' % file_type
return request . get_file ( url , path_or_file or filename )
|
def update ( self , key = values . unset , value = values . unset ) :
"""Update the VariableInstance
: param unicode key : The key
: param unicode value : The value
: returns : Updated VariableInstance
: rtype : twilio . rest . serverless . v1 . service . environment . variable . VariableInstance"""
|
return self . _proxy . update ( key = key , value = value , )
|
def register_rate_producer ( self , rate_name : str , source : Callable [ ... , pd . DataFrame ] = None ) -> Pipeline :
"""Marks a ` ` Callable ` ` as the producer of a named rate .
This is a convenience wrapper around ` ` register _ value _ producer ` ` that makes sure
rate data is appropriately scaled to the size of the simulation time step .
It is equivalent to ` ` register _ value _ producer ( value _ name , source ,
preferred _ combiner = replace _ combiner , preferred _ post _ processor = rescale _ post _ processor ) ` `
Parameters
rate _ name :
The name of the new dynamic rate pipeline .
source :
A callable source for the dynamic rate pipeline .
Returns
Callable
A callable reference to the named dynamic rate pipeline ."""
|
return self . _value_manager . register_rate_producer ( rate_name , source )
|
def retrieve_download_path ( self ) :
"""Retrieves the download path ( looks first into config _ filename _ global
then into the [ DEFAULT ] , then the [ feed ] , section of
config _ filename _ user . The latest takes preeminence )"""
|
section = self . name if self . config . has_section ( self . name ) else self . config . default_section
download_path = self . config . get ( section , 'Download directory' , fallback = '~/Podcasts' )
subdirectory = self . config . get ( section , 'Create subdirectories' , fallback = 'no' )
return [ os . path . expanduser ( download_path ) , subdirectory ]
|
def debug_derivative ( self , guess ) :
"""returns ( explicit , auto )"""
|
from . lmmin import check_derivative
return check_derivative ( self . component . npar , self . data . size , self . lm_model , self . lm_deriv , guess )
|
def get_definition_with_regex ( source , token , start_line = - 1 ) :
"""Find the definition of an object within a source closest to a given line"""
|
if not token :
return None
if DEBUG_EDITOR :
t0 = time . time ( )
patterns = [ # python / cython keyword definitions
r'^c?import.*\W{0}{1}' , r'from.*\W{0}\W.*c?import ' , r'from .* c?import.*\W{0}{1}' , r'class\s*{0}{1}' , r'c?p?def[^=]*\W{0}{1}' , r'cdef.*\[.*\].*\W{0}{1}' , # enaml keyword definitions
r'enamldef.*\W{0}{1}' , r'attr.*\W{0}{1}' , r'event.*\W{0}{1}' , r'id\s*:.*\W{0}{1}' ]
matches = get_matches ( patterns , source , token , start_line )
if not matches :
patterns = [ r'.*\Wself.{0}{1}[^=!<>]*=[^=]' , r'.*\W{0}{1}[^=!<>]*=[^=]' , r'self.{0}{1}[^=!<>]*=[^=]' , r'{0}{1}[^=!<>]*=[^=]' ]
matches = get_matches ( patterns , source , token , start_line )
# find the one closest to the start line ( prefer before the start line )
if matches :
min_dist = len ( source . splitlines ( ) )
best_ind = 0
for match in matches :
dist = abs ( start_line - match )
if match <= start_line or not best_ind :
if dist < min_dist :
min_dist = dist
best_ind = match
if matches :
if DEBUG_EDITOR :
log_dt ( LOG_FILENAME , 'regex definition match' , t0 )
return best_ind
else :
if DEBUG_EDITOR :
log_dt ( LOG_FILENAME , 'regex definition failed match' , t0 )
return None
|
def verify_time ( self , now ) :
'''Verify the time'''
|
return now . time ( ) >= self . start_time and now . time ( ) <= self . end_time
|
def _comprise ( dict1 , dict2 ) :
'''dict1 = { ' a ' : 1 , ' b ' : 2 , ' c ' : 3 , ' d ' : 4}
dict2 = { ' b ' : 2 , ' c ' : 3}
_ comprise ( dict1 , dict2)'''
|
len_1 = dict1 . __len__ ( )
len_2 = dict2 . __len__ ( )
if ( len_2 > len_1 ) :
return ( False )
else :
for k2 in dict2 :
v2 = dict2 [ k2 ]
if ( k2 in dict1 ) :
v1 = dict1 [ k2 ]
if ( v1 == v2 ) :
return ( True )
else :
return ( False )
else :
return ( False )
|
def summary ( self , name , description , labels = None , ** kwargs ) :
"""Use a Summary to track the execution time and invocation count
of the method .
: param name : the name of the metric
: param description : the description of the metric
: param labels : a dictionary of ` { labelname : callable _ or _ value } ` for labels
: param kwargs : additional keyword arguments for creating the Summary"""
|
return self . _track ( Summary , lambda metric , time : metric . observe ( time ) , kwargs , name , description , labels , registry = self . registry )
|
def merge_models ( self , store_in_memory = False , outdir = None , outname = None , force_rerun = False ) :
"""Merge all existing models into a Structure ' s first _ model attribute .
This directly modifies the Biopython Structure object . Chains IDs will start from A and increment for each new
chain ( which is a Model that is converted ) .
Args :
store _ in _ memory ( bool ) : If the modified Biopython Structure object should be stored in the attribute
` ` structure ` `
outdir ( str ) : If ` ` store _ in _ memory ` ` is False , the structure file has to be written somewhere so an output
directory must be specified here
outname ( str ) : If ` ` store _ in _ memory ` ` is False , the structure file has to be written somewhere so an output
filename must be specified here ( i . e . 4BXI _ bio1)
force _ rerun ( bool ) : If merged file should be overwritten if it already exists"""
|
if store_in_memory :
if self . structure :
parsed = copy ( self . structure )
else :
parsed = self . parse_structure ( )
self . structure = merge_all_models_into_first_model ( parsed )
else :
new_structure_path = write_merged_bioassembly ( inpath = self . structure_path , outdir = outdir , outname = outname , force_rerun = force_rerun )
self . load_structure_path ( new_structure_path , file_type = 'pdb' )
|
def _split_license ( license ) :
'''Returns all individual licenses in the input'''
|
return ( x . strip ( ) for x in ( l for l in _regex . split ( license ) if l ) )
|
def getCurrentStrDatetime ( ) :
"""Generating the current Datetime with a given format
Returns :
string : The string of a date ."""
|
# Generating current time
i = datetime . datetime . now ( )
strTime = "%s-%s-%s_%sh%sm" % ( i . year , i . month , i . day , i . hour , i . minute )
return strTime
|
def prune_clade ( self , node_id ) :
"""Prune ` node _ id ` and the edges and nodes that are tipward of it .
Caller must delete the edge to node _ id ."""
|
to_del_nodes = [ node_id ]
while bool ( to_del_nodes ) :
node_id = to_del_nodes . pop ( 0 )
self . _flag_node_as_del_and_del_in_by_target ( node_id )
ebsd = self . _edge_by_source . get ( node_id )
if ebsd is not None :
child_edges = list ( ebsd . values ( ) )
to_del_nodes . extend ( [ i [ '@target' ] for i in child_edges ] )
del self . _edge_by_source [ node_id ]
|
def om ( self , breath , conscious = True ) :
"""Print the string passed via argument ' breath ' ,
and store the string for saving in prosodic . being . om .
[ accessed interactively using the ' / save ' command ]
( The string just prior to this one will remain available at prosodic . being . omm ) ."""
|
# import prosodic
if ( not conscious ) and bool ( being . config [ 'print_to_screen' ] ) :
if not type ( breath ) in [ str , unicode ] :
breath = unicode ( breath )
being . om += breath + "\n"
print self . u2s ( breath )
return breath
|
def build_parser ( parser ) :
"""Generate a subparser"""
|
parser . add_argument ( 'sequence_file' , type = FileType ( 'r' ) , help = """Input fastq file. A fasta-format file may also be provided
if --input-qual is also specified.""" )
parser . add_argument ( '--input-qual' , type = FileType ( 'r' ) , help = """The quality scores associated with the input file. Only
used if input file is fasta.""" )
parser . add_argument ( 'output_file' , type = FileType ( 'w' ) , help = """Output file. Format determined from extension.""" )
output_group = parser . add_argument_group ( "Output" )
output_group . add_argument ( '--report-out' , type = FileType ( 'w' ) , default = sys . stdout , help = """Output file for report [default:
stdout]""" )
output_group . add_argument ( '--details-out' , type = FileType ( 'w' ) , help = """Output file to report fate of each sequence""" )
output_group . add_argument ( '--no-details-comment' , action = 'store_false' , default = True , dest = 'details_comment' , help = """Do not write comment
lines with version and call to start --details-out""" )
parser . add_argument ( '--min-mean-quality' , metavar = 'QUALITY' , type = float , default = DEFAULT_MEAN_SCORE , help = """Minimum mean quality score for
each read [default: %(default)s]""" )
parser . add_argument ( '--min-length' , metavar = 'LENGTH' , type = int , default = 200 , help = """Minimum length to keep sequence [default:
%(default)s]""" )
parser . add_argument ( '--max-length' , metavar = 'LENGTH' , type = int , default = 1000 , help = """Maximum length to keep before truncating
[default: %(default)s]. This operation occurs before
--max-ambiguous""" )
window_group = parser . add_argument_group ( 'Quality window options' )
window_group . add_argument ( '--quality-window-mean-qual' , type = float , help = """Minimum quality score within the window defined by
--quality-window. [default: same as --min-mean-quality]""" )
window_group . add_argument ( '--quality-window-prop' , help = """Proportion of
reads within quality window to that must pass filter. Floats are [default:
%(default).1f]""" , default = 1.0 , type = typed_range ( float , 0.0 , 1.0 ) )
window_group . add_argument ( '--quality-window' , type = int , metavar = 'WINDOW_SIZE' , default = 0 , help = """Window size for truncating sequences. When set
to a non-zero value, sequences are truncated where the mean mean
quality within the window drops below --min-mean-quality.
[default: %(default)s]""" )
parser . add_argument ( '--ambiguous-action' , choices = ( 'truncate' , 'drop' ) , help = """Action to take on ambiguous base in sequence (N's).
[default: no action]""" )
parser . add_argument ( '--max-ambiguous' , default = None , help = """Maximum number
of ambiguous bases in a sequence. Sequences exceeding this count
will be removed.""" , type = int )
parser . add_argument ( '--pct-ambiguous' , help = """Maximun percent of
ambiguous bases in a sequence. Sequences exceeding this percent
will be removed.""" , type = float )
barcode_group = parser . add_argument_group ( 'Barcode/Primer' )
primer_group = barcode_group . add_mutually_exclusive_group ( )
primer_group . add_argument ( '--primer' , help = """IUPAC ambiguous primer to
require""" )
primer_group . add_argument ( '--no-primer' , help = """Do not use a primer.""" , action = 'store_const' , const = '' , dest = 'primer' )
barcode_group . add_argument ( '--barcode-file' , help = """CSV file containing
sample_id,barcode[,primer] in the rows. A single primer for all
sequences may be specified with `--primer`, or `--no-primer` may be
used to indicate barcodes should be used without a primer
check.""" , type = FileType ( 'r' ) )
barcode_group . add_argument ( '--barcode-header' , action = 'store_true' , default = False , help = """Barcodes have a header row [default:
%(default)s]""" )
barcode_group . add_argument ( '--map-out' , help = """Path to write
sequence_id,sample_id pairs""" , type = FileType ( 'w' ) , metavar = 'SAMPLE_MAP' )
barcode_group . add_argument ( '--quoting' , help = """A string naming an
attribute of the csv module defining the quoting behavior for
`SAMPLE_MAP`. [default: %(default)s]""" , default = 'QUOTE_MINIMAL' , choices = [ s for s in dir ( csv ) if s . startswith ( 'QUOTE_' ) ] )
|
def _prepare ( self , data , groupname ) :
"""Clear the group if existing and initialize empty datasets ."""
|
if groupname in self . h5file :
del self . h5file [ groupname ]
group = self . h5file . create_group ( groupname )
group . attrs [ 'version' ] = self . version
data . init_group ( group , self . chunk_size , self . compression , self . compression_opts )
return group
|
def _send_err ( self , msg , errName , errMsg ) :
"""Helper method for sending error messages"""
|
r = message . ErrorMessage ( errName , msg . serial , body = [ errMsg ] , signature = 's' , destination = msg . sender , )
self . conn . sendMessage ( r )
|
def generate_values ( self , * args , ** kwargs ) :
"""Instantiate a random variable and apply annual growth factors .
: return :"""
|
values = super ( ) . generate_values ( * args , ** kwargs , size = ( len ( self . times ) * self . size , ) )
alpha = self . cagr
# @ todo - fill to cover the entire time : define rules for filling first
ref_date = self . ref_date if self . ref_date else self . times [ 0 ] . to_pydatetime ( )
# assert ref _ date > = self . times [ 0 ] . to _ pydatetime ( ) , ' Ref date must be within variable time span . '
# assert ref _ date < = self . times [ - 1 ] . to _ pydatetime ( ) , ' Ref date must be within variable time span . '
start_date = self . times [ 0 ] . to_pydatetime ( )
end_date = self . times [ - 1 ] . to_pydatetime ( )
a = growth_coefficients ( start_date , end_date , ref_date , alpha , self . size )
values *= a . ravel ( )
df = pd . DataFrame ( values )
df . columns = [ kwargs [ 'name' ] ]
df . set_index ( self . _multi_index , inplace = True )
# @ todo this is a hack to return a series with index as I don ' t know how to set an index and rename a series
data_series = df . iloc [ : , 0 ]
data_series . _metadata = kwargs
data_series . index . rename ( [ 'time' , 'samples' ] , inplace = True )
return data_series
|
def t_escaped_FORM_FEED_CHAR ( self , t ) :
r'\ x66'
|
t . lexer . pop_state ( )
t . value = unichr ( 0x000c )
return t
|
def sa_indices ( num_states , num_actions ) :
"""Generate ` s _ indices ` and ` a _ indices ` for ` DiscreteDP ` , for the case
where all the actions are feasible at every state .
Parameters
num _ states : scalar ( int )
Number of states .
num _ actions : scalar ( int )
Number of actions .
Returns
s _ indices : ndarray ( int , ndim = 1)
Array containing the state indices .
a _ indices : ndarray ( int , ndim = 1)
Array containing the action indices .
Examples
> > > s _ indices , a _ indices = qe . markov . sa _ indices ( 4 , 3)
> > > s _ indices
array ( [ 0 , 0 , 0 , 1 , 1 , 1 , 2 , 2 , 2 , 3 , 3 , 3 ] )
> > > a _ indices
array ( [ 0 , 1 , 2 , 0 , 1 , 2 , 0 , 1 , 2 , 0 , 1 , 2 ] )"""
|
L = num_states * num_actions
dtype = np . int_
s_indices = np . empty ( L , dtype = dtype )
a_indices = np . empty ( L , dtype = dtype )
i = 0
for s in range ( num_states ) :
for a in range ( num_actions ) :
s_indices [ i ] = s
a_indices [ i ] = a
i += 1
return s_indices , a_indices
|
def read_excel ( filename , dataset_class = dataset . pandas_dataset . PandasDataset , expectations_config = None , autoinspect_func = None , * args , ** kwargs ) :
"""Read a file using Pandas read _ excel and return a great _ expectations dataset .
Args :
filename ( string ) : path to file to read
dataset _ class ( Dataset class ) : class to which to convert resulting Pandas df
expectations _ config ( string ) : path to great _ expectations config file
Returns :
great _ expectations dataset or ordered dict of great _ expectations datasets ,
if multiple worksheets are imported"""
|
df = pd . read_excel ( filename , * args , ** kwargs )
if isinstance ( df , dict ) :
for key in df :
df [ key ] = _convert_to_dataset_class ( df [ key ] , dataset_class , expectations_config , autoinspect_func )
else :
df = _convert_to_dataset_class ( df , dataset_class , expectations_config , autoinspect_func )
return df
|
def _get_revision ( self ) :
"""Validate and return the revision to use for current command"""
|
assert self . _revisions , "no migration revision exist"
revision = self . _rev or self . _revisions [ - 1 ]
# revision count must be less or equal since revisions are ordered
assert revision in self . _revisions , "invalid revision specified"
return revision
|
def newNsProp ( self , node , name , value ) :
"""Create a new property tagged with a namespace and carried
by a node ."""
|
if node is None :
node__o = None
else :
node__o = node . _o
ret = libxml2mod . xmlNewNsProp ( node__o , self . _o , name , value )
if ret is None :
raise treeError ( 'xmlNewNsProp() failed' )
__tmp = xmlAttr ( _obj = ret )
return __tmp
|
def unparse ( self , pieces , defaults = None ) :
"""Join the parts of a URI back together to form a valid URI .
pieces is a tuble of URI pieces . The scheme must be in pieces [ 0 ] so that
the rest of the pieces can be interpreted ."""
|
return self . parser_for ( pieces [ 0 ] ) ( defaults ) . unparse ( pieces )
|
def psetex ( self , key , milliseconds , value ) :
""": meth : ` ~ tredis . RedisClient . psetex ` works exactly like
: meth : ` ~ tredis . RedisClient . psetex ` with the sole difference that the
expire time is specified in milliseconds instead of seconds .
. . versionadded : : 0.2.0
. . note : : * * Time complexity * * : ` ` O ( 1 ) ` `
: param key : The key to set
: type key : : class : ` str ` , : class : ` bytes `
: param int milliseconds : Number of milliseconds for TTL
: param value : The value to set
: type value : : class : ` str ` , : class : ` bytes `
: rtype : bool
: raises : : exc : ` ~ tredis . exceptions . RedisError `"""
|
return self . _execute ( [ b'PSETEX' , key , ascii ( milliseconds ) , value ] , b'OK' )
|
def sliceit ( iterable , lower = 0 , upper = None ) :
"""Apply a slice on input iterable .
: param iterable : object which provides the method _ _ getitem _ _ or _ _ iter _ _ .
: param int lower : lower bound from where start to get items .
: param int upper : upper bound from where finish to get items .
: return : sliced object of the same type of iterable if not dict , or specific
object . otherwise , simple list of sliced items .
: rtype : Iterable"""
|
if upper is None :
upper = len ( iterable )
try :
result = iterable [ lower : upper ]
except TypeError : # if iterable does not implement the slice method
result = [ ]
if lower < 0 : # ensure lower is positive
lower += len ( iterable )
if upper < 0 : # ensure upper is positive
upper += len ( iterable )
if upper > lower :
iterator = iter ( iterable )
for index in range ( upper ) :
try :
value = next ( iterator )
except StopIteration :
break
else :
if index >= lower :
result . append ( value )
iterablecls = iterable . __class__
if not ( isinstance ( result , iterablecls ) or issubclass ( iterablecls , dict ) ) :
try :
result = iterablecls ( result )
except TypeError :
pass
return result
|
def interact ( self , banner = None ) :
"""Closely emulate the interactive Python console .
The optional banner argument specify the banner to print
before the first interaction ; by default it prints a banner
similar to the one printed by the real Python interpreter ,
followed by the current class name in parentheses ( so as not
to confuse this with the real interpreter - - since it ' s so
close ! ) ."""
|
try :
sys . ps1
# @ UndefinedVariable
except AttributeError :
sys . ps1 = ">>> "
try :
sys . ps2
# @ UndefinedVariable
except AttributeError :
sys . ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None :
self . write ( "Python %s on %s\n%s\n(%s)\n" % ( sys . version , sys . platform , cprt , self . __class__ . __name__ ) )
else :
self . write ( "%s\n" % str ( banner ) )
more = 0
while 1 :
try :
if more :
prompt = sys . ps2
# @ UndefinedVariable
else :
prompt = sys . ps1
# @ UndefinedVariable
try :
line = self . raw_input ( prompt )
# Can be None if sys . stdin was redefined
encoding = getattr ( sys . stdin , "encoding" , None )
if encoding and not isinstance ( line , unicode ) :
line = line . decode ( encoding )
except EOFError :
self . write ( "\n" )
break
else :
more = self . push ( line )
except KeyboardInterrupt :
self . write ( "\nKeyboardInterrupt\n" )
self . resetbuffer ( )
more = 0
|
def list_common_lookups ( kwargs = None , call = None ) :
'''List common lookups for a particular type of item
. . versionadded : : 2015.8.0'''
|
if kwargs is None :
kwargs = { }
args = { }
if 'lookup' in kwargs :
args [ 'lookup' ] = kwargs [ 'lookup' ]
response = _query ( 'common' , 'lookup/list' , args = args )
return response
|
def unique ( lst ) :
"""Return unique elements
: class : ` pandas . unique ` and : class : ` numpy . unique ` cast
mixed type lists to the same type . They are faster , but
some times we want to maintain the type .
Parameters
lst : list - like
List of items
Returns
out : list
Unique items in the order that they appear in the
input .
Examples
> > > import pandas as pd
> > > import numpy as np
> > > lst = [ ' one ' , ' two ' , 123 , ' three ' ]
> > > pd . unique ( lst )
array ( [ ' one ' , ' two ' , ' 123 ' , ' three ' ] , dtype = object )
> > > np . unique ( lst )
array ( [ ' 123 ' , ' one ' , ' three ' , ' two ' ] ,
dtype = ' < U5 ' )
> > > unique ( lst )
[ ' one ' , ' two ' , 123 , ' three ' ]
pandas and numpy cast 123 to a string ! , and numpy does not
even maintain the order ."""
|
seen = set ( )
def make_seen ( x ) :
seen . add ( x )
return x
return [ make_seen ( x ) for x in lst if x not in seen ]
|
def _start_execution ( self , args , stdin , stdout , stderr , env , cwd , temp_dir , cgroups , parent_setup_fn , child_setup_fn , parent_cleanup_fn ) :
"""Actually start the tool and the measurements .
@ param parent _ setup _ fn a function without parameters that is called in the parent process
immediately before the tool is started
@ param child _ setup _ fn a function without parameters that is called in the child process
before the tool is started
@ param parent _ cleanup _ fn a function that is called in the parent process
immediately after the tool terminated , with three parameters :
the result of parent _ setup _ fn , the result of the executed process as ProcessExitCode ,
and the base path for looking up files as parameter values
@ return : a tuple of PID of process and a blocking function , which waits for the process
and a triple of the exit code and the resource usage of the process
and the result of parent _ cleanup _ fn ( do not use os . wait )"""
|
def pre_subprocess ( ) : # Do some other setup the caller wants .
child_setup_fn ( )
# put us into the cgroup ( s )
pid = os . getpid ( )
cgroups . add_task ( pid )
# Set HOME and TMPDIR to fresh directories .
tmp_dir = os . path . join ( temp_dir , "tmp" )
home_dir = os . path . join ( temp_dir , "home" )
self . _create_dirs_in_temp_dir ( tmp_dir , home_dir )
env [ "HOME" ] = home_dir
env [ "TMPDIR" ] = tmp_dir
env [ "TMP" ] = tmp_dir
env [ "TEMPDIR" ] = tmp_dir
env [ "TEMP" ] = tmp_dir
logging . debug ( "Executing run with $HOME and $TMPDIR below %s." , temp_dir )
args = self . _build_cmdline ( args , env = env )
parent_setup = parent_setup_fn ( )
p = subprocess . Popen ( args , stdin = stdin , stdout = stdout , stderr = stderr , env = env , cwd = cwd , close_fds = True , preexec_fn = pre_subprocess )
def wait_and_get_result ( ) :
exitcode , ru_child = self . _wait_for_process ( p . pid , args [ 0 ] )
parent_cleanup = parent_cleanup_fn ( parent_setup , util . ProcessExitCode . from_raw ( exitcode ) , "" )
return exitcode , ru_child , parent_cleanup
return p . pid , wait_and_get_result
|
def _parsed_cmd ( self ) :
"""We need to take into account two cases :
- [ ' python code . py foo bar ' ] : Used mainly with dvc as a library
- [ ' echo ' , ' foo bar ' ] : List of arguments received from the CLI
The second case would need quoting , as it was passed through :
dvc run echo " foo bar " """
|
if len ( self . args . command ) < 2 :
return " " . join ( self . args . command )
return " " . join ( self . _quote_argument ( arg ) for arg in self . args . command )
|
def delete_keyring ( service ) :
"""Delete an existing Ceph keyring ."""
|
keyring = _keyring_path ( service )
if not os . path . exists ( keyring ) :
log ( 'Keyring does not exist at %s' % keyring , level = WARNING )
return
os . remove ( keyring )
log ( 'Deleted ring at %s.' % keyring , level = INFO )
|
def load_pid ( pidfile ) :
"""read pid from pidfile ."""
|
if pidfile and os . path . isfile ( pidfile ) :
with open ( pidfile , "r" , encoding = "utf-8" ) as fobj :
return int ( fobj . readline ( ) . strip ( ) )
return 0
|
def CopyNoFail ( src , root = None ) :
"""Just copy fName into the current working directory , if it exists .
No action is executed , if fName does not exist . No Hash is checked .
Args :
src : The filename we want to copy to ' . ' .
root : The optional source dir we should pull fName from . Defaults
to benchbuild . settings . CFG [ " tmpdir " ] .
Returns :
True , if we copied something ."""
|
if root is None :
root = str ( CFG [ "tmp_dir" ] )
src_path = local . path ( root ) / src
if src_path . exists ( ) :
Copy ( src_path , '.' )
return True
return False
|
def pkginfo_unicode ( pkg_info , field ) :
"""Hack to coax Unicode out of an email Message ( ) - Python 3.3 +"""
|
text = pkg_info [ field ]
field = field . lower ( )
if not isinstance ( text , str ) :
if not hasattr ( pkg_info , 'raw_items' ) : # Python 3.2
return str ( text )
for item in pkg_info . raw_items ( ) :
if item [ 0 ] . lower ( ) == field :
text = item [ 1 ] . encode ( 'ascii' , 'surrogateescape' ) . decode ( 'utf-8' )
break
return text
|
def run_with_standalone_parser ( self ) :
"""Will run the operation as standalone with a new ArgumentParser"""
|
parser = argparse . ArgumentParser ( description = self . description ( ) )
self . configure_parser ( parser )
self . run ( parser . parse_args ( ) )
|
def update_search_letters ( self , text ) :
"""Update search letters with text input in search box ."""
|
self . letters = text
names = [ shortcut . name for shortcut in self . shortcuts ]
results = get_search_scores ( text , names , template = '<b>{0}</b>' )
self . normal_text , self . rich_text , self . scores = zip ( * results )
self . reset ( )
|
def get_default_keystone_session ( self , keystone_sentry , openstack_release = None , api_version = 2 ) :
"""Return a keystone session object and client object assuming standard
default settings
Example call in amulet tests :
self . keystone _ session , self . keystone = u . get _ default _ keystone _ session (
self . keystone _ sentry ,
openstack _ release = self . _ get _ openstack _ release ( ) )
The session can then be used to auth other clients :
neutronclient . Client ( session = session )
aodh _ client . Client ( session = session )
eyc"""
|
self . log . debug ( 'Authenticating keystone admin...' )
# 11 = > xenial _ queens
if api_version == 3 or ( openstack_release and openstack_release >= 11 ) :
client_class = keystone_client_v3 . Client
api_version = 3
else :
client_class = keystone_client . Client
keystone_ip = keystone_sentry . info [ 'public-address' ]
session , auth = self . get_keystone_session ( keystone_ip , api_version = api_version , username = 'admin' , password = 'openstack' , project_name = 'admin' , user_domain_name = 'admin_domain' , project_domain_name = 'admin_domain' )
client = client_class ( session = session )
# This populates the client . service _ catalog
client . auth_ref = auth . get_access ( session )
return session , client
|
def silhouette_samples ( X , labels , metric = 'euclidean' , ** kwds ) :
"""Compute the Silhouette Coefficient for each sample .
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves . Clustering models with a high
Silhouette Coefficient are said to be dense , where samples in the same
cluster are similar to each other , and well separated , where samples in
different clusters are not very similar to each other .
The Silhouette Coefficient is calculated using the mean intra - cluster
distance ( ` ` a ` ` ) and the mean nearest - cluster distance ( ` ` b ` ` ) for each
sample . The Silhouette Coefficient for a sample is ` ` ( b - a ) / max ( a ,
Note that Silhouette Coefficient is only defined if number of labels
is 2 < = n _ labels < = n _ samples - 1.
This function returns the Silhouette Coefficient for each sample .
The best value is 1 and the worst value is - 1 . Values near 0 indicate
overlapping clusters .
Read more in the : ref : ` User Guide < silhouette _ coefficient > ` .
Parameters
X : array [ n _ samples _ a , n _ samples _ a ] if metric = = " precomputed " , or , [ n _ samples _ a , n _ features ] otherwise
Array of pairwise distances between samples , or a feature array .
labels : array , shape = [ n _ samples ]
label values for each sample
metric : string , or callable
The metric to use when calculating distance between instances in a
feature array . If metric is a string , it must be one of the options
allowed by : func : ` sklearn . metrics . pairwise . pairwise _ distances ` . If X is
the distance array itself , use " precomputed " as the metric .
* * kwds : optional keyword parameters
Any further parameters are passed directly to the distance function .
If using a ` ` scipy . spatial . distance ` ` metric , the parameters are still
metric dependent . See the scipy docs for usage examples .
Returns
silhouette : array , shape = [ n _ samples ]
Silhouette Coefficient for each samples .
References
. . [ 1 ] ` Peter J . Rousseeuw ( 1987 ) . " Silhouettes : a Graphical Aid to the
Interpretation and Validation of Cluster Analysis " . Computational
and Applied Mathematics 20 : 53-65.
< http : / / www . sciencedirect . com / science / article / pii / 0377042787901257 > ` _
. . [ 2 ] ` Wikipedia entry on the Silhouette Coefficient
< https : / / en . wikipedia . org / wiki / Silhouette _ ( clustering ) > ` _"""
|
X , labels = check_X_y ( X , labels , accept_sparse = [ 'csc' , 'csr' ] )
le = LabelEncoder ( )
labels = le . fit_transform ( labels )
check_number_of_labels ( len ( le . classes_ ) , X . shape [ 0 ] )
distances = pairwise_distances ( X , metric = metric , ** kwds )
unique_labels = le . classes_
n_samples_per_label = np . bincount ( labels , minlength = len ( unique_labels ) )
# For sample i , store the mean distance of the cluster to which
# it belongs in intra _ clust _ dists [ i ]
intra_clust_dists = np . zeros ( distances . shape [ 0 ] , dtype = distances . dtype )
# For sample i , store the mean distance of the second closest
# cluster in inter _ clust _ dists [ i ]
inter_clust_dists = np . inf + intra_clust_dists
for curr_label in range ( len ( unique_labels ) ) : # Find inter _ clust _ dist for all samples belonging to the same
# label .
mask = labels == curr_label
current_distances = distances [ mask ]
# Leave out current sample .
n_samples_curr_lab = n_samples_per_label [ curr_label ] - 1
if n_samples_curr_lab != 0 :
intra_clust_dists [ mask ] = np . sum ( current_distances [ : , mask ] , axis = 1 ) / n_samples_curr_lab
# Now iterate over all other labels , finding the mean
# cluster distance that is closest to every sample .
for other_label in range ( len ( unique_labels ) ) :
if other_label != curr_label :
other_mask = labels == other_label
other_distances = np . mean ( current_distances [ : , other_mask ] , axis = 1 )
inter_clust_dists [ mask ] = np . minimum ( inter_clust_dists [ mask ] , other_distances )
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np . maximum ( intra_clust_dists , inter_clust_dists )
# score 0 for clusters of size 1 , according to the paper
sil_samples [ n_samples_per_label . take ( labels ) == 1 ] = 0
return sil_samples
|
def call ( self , callsite_addr , addr , retn_target = None , stack_pointer = None ) :
"""Push a stack frame into the call stack . This method is called when calling a function in CFG recovery .
: param int callsite _ addr : Address of the call site
: param int addr : Address of the call target
: param int or None retn _ target : Address of the return target
: param int stack _ pointer : Value of the stack pointer
: return : None"""
|
frame = CallStack ( call_site_addr = callsite_addr , func_addr = addr , ret_addr = retn_target , stack_ptr = stack_pointer )
return self . push ( frame )
|
def _read_encrypt_auth ( self ) :
"""Read Authentication field when Cryptographic Authentication is employed .
Structure of Cryptographic Authentication [ RFC 2328 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| 0 | Key ID | Auth Data Len |
| Cryptographic sequence number |
Octets Bits Name Description
0 0 - Reserved ( must be zero )
2 16 ospf . auth . key _ id Key ID
3 24 ospf . auth . len Auth Data Length
4 32 ospf . auth . seq Cryptographic Sequence Number"""
|
_resv = self . _read_fileng ( 2 )
_keys = self . _read_unpack ( 1 )
_alen = self . _read_unpack ( 1 )
_seqn = self . _read_unpack ( 4 )
auth = dict ( key_id = _keys , len = _alen , seq = _seqn , )
return auth
|
def strip_context_items ( self , a_string ) :
"""Strip Juniper - specific output .
Juniper will also put a configuration context :
[ edit ]
and various chassis contexts :
{ master : 0 } , { backup : 1}
This method removes those lines ."""
|
strings_to_strip = [ r"\[edit.*\]" , r"\{master:.*\}" , r"\{backup:.*\}" , r"\{line.*\}" , r"\{primary.*\}" , r"\{secondary.*\}" , ]
response_list = a_string . split ( self . RESPONSE_RETURN )
last_line = response_list [ - 1 ]
for pattern in strings_to_strip :
if re . search ( pattern , last_line ) :
return self . RESPONSE_RETURN . join ( response_list [ : - 1 ] )
return a_string
|
def wait_for_success ( self , interval = 1 ) :
"""Wait for instance to complete , and check if the instance is successful .
: param interval : time interval to check
: return : None
: raise : : class : ` odps . errors . ODPSError ` if the instance failed"""
|
self . wait_for_completion ( interval = interval )
if not self . is_successful ( retry = True ) :
for task_name , task in six . iteritems ( self . get_task_statuses ( ) ) :
exc = None
if task . status == Instance . Task . TaskStatus . FAILED :
exc = errors . parse_instance_error ( self . get_task_result ( task_name ) )
elif task . status != Instance . Task . TaskStatus . SUCCESS :
exc = errors . ODPSError ( '%s, status=%s' % ( task_name , task . status . value ) )
if exc :
exc . instance_id = self . id
raise exc
|
def status ( self , repository = None , snapshot = None , params = None ) :
"""Return information about all currently running snapshots . By specifying
a repository name , it ' s possible to limit the results to a particular
repository .
` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / modules - snapshots . html > ` _
: arg repository : A repository name
: arg snapshot : A comma - separated list of snapshot names
: arg ignore _ unavailable : Whether to ignore unavailable snapshots ,
defaults to false which means a NotFoundError ` snapshot _ missing _ exception ` is thrown
: arg master _ timeout : Explicit operation timeout for connection to master
node"""
|
return self . transport . perform_request ( 'GET' , _make_path ( '_snapshot' , repository , snapshot , '_status' ) , params = params )
|
async def load_uvarint ( reader ) :
"""Monero portable _ binary _ archive boost integer serialization
: param reader :
: return :"""
|
buffer = _UVARINT_BUFFER
await reader . areadinto ( buffer )
size = buffer [ 0 ]
if size == 0 :
return 0
negative = size < 0
size = - size if negative else size
result = 0
shift = 0
if size > 8 :
raise ValueError ( 'Varint size too big: %s' % size )
# TODO : endianity , rev bytes if needed
for _ in range ( size ) :
await reader . areadinto ( buffer )
result += buffer [ 0 ] << shift
shift += 8
return result if not negative else - result
|
def get_context ( template , line , num_lines = 5 , marker = None ) :
'''Returns debugging context around a line in a given string
Returns : : string'''
|
template_lines = template . splitlines ( )
num_template_lines = len ( template_lines )
# In test mode , a single line template would return a crazy line number like ,
# 357 . Do this sanity check and if the given line is obviously wrong , just
# return the entire template
if line > num_template_lines :
return template
context_start = max ( 0 , line - num_lines - 1 )
# subt 1 for 0 - based indexing
context_end = min ( num_template_lines , line + num_lines )
error_line_in_context = line - context_start - 1
# subtr 1 for 0 - based idx
buf = [ ]
if context_start > 0 :
buf . append ( '[...]' )
error_line_in_context += 1
buf . extend ( template_lines [ context_start : context_end ] )
if context_end < num_template_lines :
buf . append ( '[...]' )
if marker :
buf [ error_line_in_context ] += marker
return '---\n{0}\n---' . format ( '\n' . join ( buf ) )
|
def _luminance ( self , rgb ) :
"""Determine the liminanace of an RGB colour"""
|
a = [ ]
for v in rgb :
v = v / float ( 255 )
if v < 0.03928 :
result = v / 12.92
else :
result = math . pow ( ( ( v + 0.055 ) / 1.055 ) , 2.4 )
a . append ( result )
return a [ 0 ] * 0.2126 + a [ 1 ] * 0.7152 + a [ 2 ] * 0.0722
|
def _run_check ( self , check_method , ds , max_level ) :
"""Runs a check and appends a result to the values list .
@ param bound method check _ method : a given check method
@ param netCDF4 dataset ds
@ param int max _ level : check level
@ return list : list of Result objects"""
|
val = check_method ( ds )
if isinstance ( val , list ) :
check_val = [ ]
for v in val :
res = fix_return_value ( v , check_method . __func__ . __name__ , check_method , check_method . __self__ )
if max_level is None or res . weight > max_level :
check_val . append ( res )
return check_val
else :
check_val = fix_return_value ( val , check_method . __func__ . __name__ , check_method , check_method . __self__ )
if max_level is None or check_val . weight > max_level :
return [ check_val ]
else :
return [ ]
|
def ip ( ip_addr , return_tuple = True ) :
"""Function to check if a address is good
Args :
ip _ addr : IP address in the following format 192.168.1.1
return _ tuple : Set to True it returns a IP , set to False returns True or False
Returns : see return _ tuple for return options"""
|
regex_ip = __re . compile ( "^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))$" )
if return_tuple :
while not regex_ip . match ( ip_addr ) :
print ( "Not a good IP." )
print ( "Please try again." )
ip_addr = input ( "Please enter a IP address in the following format x.x.x.x: " )
return ip_addr
elif not return_tuple :
if not regex_ip . match ( ip_addr ) :
return False
else :
return True
|
def list_objects ( self , query = None , limit = - 1 , offset = - 1 ) :
"""List of all objects in the database . Optinal parameter limit and
offset for pagination . A dictionary of key , value - pairs can be given as
addictional query condition for document properties .
Parameters
query : Dictionary
Filter objects by property - value pairs defined by dictionary .
limit : int
Limit number of items in the result set
offset : int
Set offset in list ( order as defined by object store )
Returns
ObjectListing"""
|
result = [ ]
# Build the document query
doc = { 'active' : True }
if not query is None :
for key in query :
doc [ key ] = query [ key ]
# Iterate over all objects in the MongoDB collection and add them to
# the result
coll = self . collection . find ( doc ) . sort ( [ ( 'timestamp' , pymongo . DESCENDING ) ] )
count = 0
for document in coll : # We are done if the limit is reached . Test first in case limit is
# zero .
if limit >= 0 and len ( result ) == limit :
break
if offset < 0 or count >= offset :
result . append ( self . from_dict ( document ) )
count += 1
return ObjectListing ( result , offset , limit , coll . count ( ) )
|
def set_speech_text ( self , text ) :
"""Set response output speech as plain text type .
Args :
text : str . Response speech used when type is ' PlainText ' . Cannot exceed
8,000 characters ."""
|
self . response . outputSpeech . type = 'PlainText'
self . response . outputSpeech . text = text
|
def cancel ( self , subscription_id , data = { } , ** kwargs ) :
"""Cancel subscription given by subscription _ id
Args :
subscription _ id : Id for which subscription has to be cancelled
Returns :
Subscription Dict for given subscription id"""
|
url = "{}/{}/cancel" . format ( self . base_url , subscription_id )
return self . post_url ( url , data , ** kwargs )
|
def get_device_statistics ( self , begin_date , end_date , device_id = None , uuid = None , major = None , minor = None ) :
"""以设备为维度的数据统计接口
http : / / mp . weixin . qq . com / wiki / 0/8a24bcacad40fe7ee98d1573cb8a6764 . html
: param begin _ date : 起始时间 , 最长时间跨度为30天
: param end _ date : 结束时间 , 最长时间跨度为30天
: param device _ id : 设备编号 , 若填了UUID 、 major 、 minor , 则可不填设备编号 , 若二者都填 , 则以设备编号为优先
: param uuid : UUID
: param major : major
: param minor : minor"""
|
data = { 'device_identifier' : { 'device_id' : device_id , 'uuid' : uuid , 'major' : major , 'minor' : minor } , 'begin_date' : self . _to_timestamp ( begin_date ) , 'end_date' : self . _to_timestamp ( end_date ) }
res = self . _post ( 'shakearound/statistics/device' , data = data , result_processor = lambda x : x [ 'data' ] )
return res
|
def get_config_file ( ) : # type : ( ) - > AnyStr
"""Get model configuration file name from argv"""
|
parser = argparse . ArgumentParser ( description = "Read configuration file." )
parser . add_argument ( '-ini' , help = "Full path of configuration file" )
args = parser . parse_args ( )
ini_file = args . ini
if not FileClass . is_file_exists ( ini_file ) :
print ( "Usage: -ini <full path to the configuration file.>" )
exit ( - 1 )
return ini_file
|
def place_docker ( self , docker , area = 'top' ) :
"""IN DEVELOPMENT
Places a DockWindow instance at the specified area ( ' top ' , ' bottom ' ,
' left ' , ' right ' , or None )"""
|
# map of options
m = dict ( top = _g . QtCore . Qt . TopDockWidgetArea , bottom = _g . QtCore . Qt . BottomDockWidgetArea , left = _g . QtCore . Qt . LeftDockWidgetArea , right = _g . QtCore . Qt . RightDockWidgetArea )
# set the parent
docker . set_parent ( self )
# events
docker . _window . resizeEvent = self . _event_resize
docker . _window . moveEvent = self . _event_move
# Keep it in the window
docker . _window . setFeatures ( docker . _window . DockWidgetMovable )
# set it
self . _window . addDockWidget ( m [ area ] , docker . _window )
return docker
|
def get_portchannel_info_by_intf_output_lacp_actor_port ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_portchannel_info_by_intf = ET . Element ( "get_portchannel_info_by_intf" )
config = get_portchannel_info_by_intf
output = ET . SubElement ( get_portchannel_info_by_intf , "output" )
lacp = ET . SubElement ( output , "lacp" )
actor_port = ET . SubElement ( lacp , "actor-port" )
actor_port . text = kwargs . pop ( 'actor_port' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def full_name_natural_split ( full_name ) :
"""This function splits a full name into a natural first name , last name
and middle initials ."""
|
parts = full_name . strip ( ) . split ( ' ' )
first_name = ""
if parts :
first_name = parts . pop ( 0 )
if first_name . lower ( ) == "el" and parts :
first_name += " " + parts . pop ( 0 )
last_name = ""
if parts :
last_name = parts . pop ( )
if ( last_name . lower ( ) == 'i' or last_name . lower ( ) == 'ii' or last_name . lower ( ) == 'iii' and parts ) :
last_name = parts . pop ( ) + " " + last_name
middle_initials = ""
for middle_name in parts :
if middle_name :
middle_initials += middle_name [ 0 ]
return first_name , middle_initials , last_name
|
def available_composite_ids ( self , available_datasets = None ) :
"""Get names of compositors that can be generated from the available datasets .
Returns : generator of available compositor ' s names"""
|
if available_datasets is None :
available_datasets = self . available_dataset_ids ( composites = False )
else :
if not all ( isinstance ( ds_id , DatasetID ) for ds_id in available_datasets ) :
raise ValueError ( "'available_datasets' must all be DatasetID objects" )
all_comps = self . all_composite_ids ( )
# recreate the dependency tree so it doesn ' t interfere with the user ' s
# wishlist
comps , mods = self . cpl . load_compositors ( self . attrs [ 'sensor' ] )
dep_tree = DependencyTree ( self . readers , comps , mods )
dep_tree . find_dependencies ( set ( available_datasets + all_comps ) )
available_comps = set ( x . name for x in dep_tree . trunk ( ) )
# get rid of modified composites that are in the trunk
return sorted ( available_comps & set ( all_comps ) )
|
def ansi_code ( name ) :
"""Return ansi color or style codes or ' ' if colorama is not available ."""
|
try :
obj = colorama
for part in name . split ( "." ) :
obj = getattr ( obj , part )
return obj
except AttributeError :
return ""
|
def parse_uinput_mapping ( name , mapping ) :
"""Parses a dict of mapping options ."""
|
axes , buttons , mouse , mouse_options = { } , { } , { } , { }
description = "ds4drv custom mapping ({0})" . format ( name )
for key , attr in mapping . items ( ) :
key = key . upper ( )
if key . startswith ( "BTN_" ) or key . startswith ( "KEY_" ) :
buttons [ key ] = attr
elif key . startswith ( "ABS_" ) :
axes [ key ] = attr
elif key . startswith ( "REL_" ) :
mouse [ key ] = attr
elif key . startswith ( "MOUSE_" ) :
mouse_options [ key ] = attr
create_mapping ( name , description , axes = axes , buttons = buttons , mouse = mouse , mouse_options = mouse_options )
|
def load_from_path ( local_path = None , parser = None , all_load = False ) :
"""Loads the data from a local path into a GMQLDataset .
The loading of the files is " lazy " , which means that the files are loaded only when the
user does a materialization ( see : func : ` ~ gmql . dataset . GMQLDataset . GMQLDataset . materialize ` ) .
The user can force the materialization of the data ( maybe for an initial data exploration on
only the metadata ) by setting the : attr : ` ~ . reg _ load ` ( load in memory the region data ) ,
: attr : ` ~ . meta _ load ` ( load in memory the metadata ) or : attr : ` ~ . all _ load ` ( load both region and
meta data in memory ) . If the user specifies this final parameter as True , a
: class : ` ~ gmql . dataset . GDataframe . GDataframe ` is returned , otherwise a
: class : ` ~ gmql . dataset . GMQLDataset . GMQLDataset ` is returned
: param local _ path : local path of the dataset
: param parser : the parser to be used for reading the data
: param all _ load : if set to True , both region and meta data are loaded in memory and an
instance of GDataframe is returned
: return : A new GMQLDataset or a GDataframe"""
|
from . . import GDataframe
from . . import GMQLDataset
pmg = get_python_manager ( )
local_path = preprocess_path ( local_path )
if all_load : # load directly the metadata for exploration
meta = MetaLoaderFile . load_meta_from_path ( local_path )
if isinstance ( parser , RegionParser ) : # region data
regs = RegLoaderFile . load_reg_from_path ( local_path , parser )
else :
regs = RegLoaderFile . load_reg_from_path ( local_path )
return GDataframe . GDataframe ( regs = regs , meta = meta )
else :
from . . . settings import is_metaprofiling_enabled
if is_metaprofiling_enabled ( ) :
meta_profile = create_metadata_profile ( local_path )
else :
meta_profile = None
if parser is None : # find the parser
parser = RegLoaderFile . get_parser ( local_path )
elif not isinstance ( parser , RegionParser ) :
raise ValueError ( "parser must be RegionParser. {} was provided" . format ( type ( parser ) ) )
source_table = get_source_table ( )
id = source_table . search_source ( local = local_path )
if id is None :
id = source_table . add_source ( local = local_path , parser = parser )
local_sources = [ id ]
index = pmg . read_dataset ( str ( id ) , parser . get_gmql_parser ( ) )
return GMQLDataset . GMQLDataset ( index = index , parser = parser , location = "local" , path_or_name = local_path , local_sources = local_sources , meta_profile = meta_profile )
|
def plot_eps_data_hist ( self , dfs ) :
"""Plot histograms of data residuals and data error weighting
TODO :
* add percentage of data below / above the RMS value"""
|
# check if this is a DC inversion
if 'datum' in dfs [ 0 ] :
dc_inv = True
else :
dc_inv = False
nr_y = len ( dfs )
size_y = 5 / 2.54 * nr_y
if dc_inv :
nr_x = 1
else :
nr_x = 3
size_x = 15 / 2.54
fig , axes = plt . subplots ( nr_y , nr_x , figsize = ( size_x , size_y ) )
axes = np . atleast_2d ( axes )
# plot initial data errors
df = dfs [ 0 ]
if dc_inv :
ax = axes [ 0 , 0 ]
ax . hist ( df [ 'datum' ] / df [ 'eps_r' ] , 100 , )
ax . set_xlabel ( r'$-log(|R|) / \epsilon_r$' )
ax . set_ylabel ( r'count' )
else : # complex inversion
ax = axes [ 0 , 0 ]
ax . hist ( df [ '-log(|R|)' ] / df [ 'eps' ] , 100 , )
ax . set_xlabel ( r'$-log(|R|)$' )
ax . set_ylabel ( r'count' )
ax = axes [ 0 , 1 ]
ax . hist ( df [ '-log(|R|)' ] / df [ 'eps_r' ] , 100 , )
ax . set_xlabel ( r'$-log(|R|) / \epsilon_r$' )
ax . set_ylabel ( r'count' )
ax = axes [ 0 , 2 ]
phase_data = df [ '-Phase(rad)' ] / df [ 'eps_p' ]
if not np . all ( np . isinf ( phase_data ) | np . isnan ( phase_data ) ) :
ax . hist ( phase_data , 100 , )
ax . set_xlabel ( r'$-\phi[rad] / \epsilon_p$' )
ax . set_ylabel ( r'count' )
# iterations
for it , df in enumerate ( dfs [ 1 : ] ) :
ax = axes [ 1 + it , 0 ]
ax . hist ( df [ 'psi' ] , 100 )
rms = np . sqrt ( 1 / df [ 'psi' ] . shape [ 0 ] * np . sum ( df [ 'psi' ] ** 2 ) )
ax . axvline ( rms , color = 'k' , linestyle = 'dashed' )
ax . set_title ( 'iteration: {0}' . format ( it ) )
ax . set_xlabel ( 'psi' )
ax . set_ylabel ( r'count' )
ax = axes [ 1 + it , 1 ]
Rdat = df [ 'Re(d)' ]
Rmod = df [ 'Re(f(m))' ]
ax . scatter ( Rdat , Rmod , )
ax . set_xlabel ( r'$log(R_{data}~[\Omega])$' )
ax . set_ylabel ( r'$log(R_{mod}~[\Omega])$' )
ax = axes [ 1 + it , 2 ]
phidat = df [ 'Im(d)' ]
phimod = df [ 'Im(f(m))' ]
ax . scatter ( phidat , phimod , )
ax . set_xlabel ( r'$\phi_{data}~[mrad]$' )
ax . set_ylabel ( r'$\phi_{mod}~[mrad]$' )
fig . tight_layout ( )
fig . savefig ( 'eps_plot_hist.png' , dpi = 300 )
|
def get_file_port ( self ) :
"""Returns ports list can be used by File
File ports includes ethernet ports and link aggregation ports ."""
|
eths = self . get_ethernet_port ( bond = False )
las = self . get_link_aggregation ( )
return eths + las
|
def load ( self , data ) :
"""Function load
Store the object data"""
|
self . clear ( )
self . update ( data )
self . enhance ( )
|
def mul ( self , o ) :
"""Binary operation : multiplication
: param o : The other operand
: return : self * o"""
|
if self . is_integer and o . is_integer : # Two integers !
a , b = self . lower_bound , o . lower_bound
ret = StridedInterval ( bits = self . bits , stride = 0 , lower_bound = a * b , upper_bound = a * b )
if a * b > ( 2 ** self . bits - 1 ) :
logger . warning ( 'Overflow in multiplication detected.' )
return ret . normalize ( )
else : # All other cases
# Cut from both north pole and south pole
si1_psplit = self . _psplit ( )
si2_psplit = o . _psplit ( )
all_resulting_intervals = list ( )
for si1 in si1_psplit :
for si2 in si2_psplit :
tmp_unsigned_mul = self . _wrapped_unsigned_mul ( si1 , si2 )
tmp_signed_mul = self . _wrapped_signed_mul ( si1 , si2 )
for tmp_meet in tmp_unsigned_mul . _multi_valued_intersection ( tmp_signed_mul ) :
all_resulting_intervals . append ( tmp_meet )
return StridedInterval . least_upper_bound ( * all_resulting_intervals ) . normalize ( )
|
def format_hyperlink ( val , hlx , hxl , xhl ) :
"""Formats an html hyperlink into other forms .
@ hlx , hxl , xhl : values returned by set _ output _ format"""
|
if '<a href="' in str ( val ) and hlx != '<a href="' :
val = val . replace ( '<a href="' , hlx ) . replace ( '">' , hxl , 1 ) . replace ( '</a>' , xhl )
return val
|
def __read_and_render_yaml_file ( source , template , saltenv ) :
'''Read a yaml file and , if needed , renders that using the specifieds
templating . Returns the python objects defined inside of the file .'''
|
sfn = __salt__ [ 'cp.cache_file' ] ( source , saltenv )
if not sfn :
raise CommandExecutionError ( 'Source file \'{0}\' not found' . format ( source ) )
with salt . utils . files . fopen ( sfn , 'r' ) as src :
contents = src . read ( )
if template :
if template in salt . utils . templates . TEMPLATE_REGISTRY : # TODO : should we allow user to set also ` context ` like # pylint : disable = fixme
# ` file . managed ` does ?
# Apply templating
data = salt . utils . templates . TEMPLATE_REGISTRY [ template ] ( contents , from_str = True , to_str = True , saltenv = saltenv , grains = __grains__ , pillar = __pillar__ , salt = __salt__ , opts = __opts__ )
if not data [ 'result' ] : # Failed to render the template
raise CommandExecutionError ( 'Failed to render file path with error: ' '{0}' . format ( data [ 'data' ] ) )
contents = data [ 'data' ] . encode ( 'utf-8' )
else :
raise CommandExecutionError ( 'Unknown template specified: {0}' . format ( template ) )
return salt . utils . yaml . safe_load ( contents )
|
def convert_single ( ID , from_type , to_type ) :
'''Convenience function wrapper for convert . Takes a single ID and converts it from from _ type to to _ type .
The return value is the ID in the scheme of to _ type .'''
|
if from_type not in converter_types :
raise PubMedConverterTypeException ( from_type )
if to_type not in converter_types :
raise PubMedConverterTypeException ( to_type )
results = convert ( [ ID ] , from_type )
if ID in results :
return results [ ID ] . get ( to_type )
else :
return results [ ID . upper ( ) ] . get ( to_type )
|
def get_multi_dataset ( datasets , pmf = None ) :
"""Returns a Dataset that samples records from one or more Datasets .
Args :
datasets : A list of one or more Dataset objects to sample from .
pmf : A tensor of shape [ len ( datasets ) ] , the probabilities to sample each
dataset with . This tensor is often constructed with the global _ step . If
this is None , we sample from the datasets uniformly at random .
Returns :
A Dataset object containing records from multiple datasets . Note that
because this dataset iterates through other datasets it is stateful , thus
you will need to call make _ initializable _ iterator instead of
make _ one _ shot _ iterator ."""
|
pmf = tf . fill ( [ len ( datasets ) ] , 1.0 / len ( datasets ) ) if pmf is None else pmf
samplers = [ d . repeat ( ) . make_one_shot_iterator ( ) . get_next for d in datasets ]
sample = lambda _ : categorical_case ( pmf , samplers )
return tf . data . Dataset . from_tensors ( [ ] ) . repeat ( ) . map ( sample )
|
def label_faces ( network , tol = 0.0 , label = 'surface' ) :
r"""Finds pores on the surface of the network and labels them according to
whether they are on the * top * , * bottom * , etc . This function assumes the
network is cubic in shape ( i . e . with six flat sides )
Parameters
network : OpenPNM Network object
The network to apply the labels
tol : scalar
The tolerance for defining what counts as a surface pore , which is
specifically meant for random networks . All pores with ` ` tol ` ` of
the maximum or minimum along each axis are counts as pores . The
default is 0.
label : string
An identifying label to isolate the pores on the faces of the network .
default is ' surface ' ."""
|
if label not in network . labels ( ) :
find_surface_pores ( network , label = label )
Psurf = network [ 'pore.' + label ]
crds = network [ 'pore.coords' ]
xmin , xmax = sp . amin ( crds [ : , 0 ] ) , sp . amax ( crds [ : , 0 ] )
xspan = xmax - xmin
ymin , ymax = sp . amin ( crds [ : , 1 ] ) , sp . amax ( crds [ : , 1 ] )
yspan = ymax - ymin
zmin , zmax = sp . amin ( crds [ : , 2 ] ) , sp . amax ( crds [ : , 2 ] )
zspan = zmax - zmin
network [ 'pore.back' ] = ( crds [ : , 0 ] >= ( 1 - tol ) * xmax ) * Psurf
network [ 'pore.right' ] = ( crds [ : , 1 ] >= ( 1 - tol ) * ymax ) * Psurf
network [ 'pore.top' ] = ( crds [ : , 2 ] >= ( 1 - tol ) * zmax ) * Psurf
network [ 'pore.front' ] = ( crds [ : , 0 ] <= ( xmin + tol * xspan ) ) * Psurf
network [ 'pore.left' ] = ( crds [ : , 1 ] <= ( ymin + tol * yspan ) ) * Psurf
network [ 'pore.bottom' ] = ( crds [ : , 2 ] <= ( zmin + tol * zspan ) ) * Psurf
|
def delete_instance ( model , instance_id , _commit = True ) :
"""Delete instance .
: param model : a string , model name in rio . models .
: param instance _ id : integer , instance id .
: param _ commit : control whether commit data to database or not . Default True ."""
|
try :
model = get_model ( model )
except ImportError :
return
instance = model . query . get ( instance_id )
if not instance :
return
db . session . delete ( instance )
try :
if _commit :
db . session . commit ( )
else :
db . session . flush ( )
except Exception as exception :
db . session . rollback ( )
raise exception
|
def get_basis ( name , elements = None , version = None , fmt = None , uncontract_general = False , uncontract_spdf = False , uncontract_segmented = False , make_general = False , optimize_general = False , data_dir = None , header = True ) :
'''Obtain a basis set
This is the main function for getting basis set information .
This function reads in all the basis data and returns it either
as a string or as a python dictionary .
Parameters
name : str
Name of the basis set . This is not case sensitive .
elements : str or list
List of elements that you want the basis set for .
Elements can be specified by Z - number ( int or str ) or by symbol ( str ) .
If this argument is a str ( ie , ' 1-3,7-10 ' ) , it is expanded into a list .
Z numbers and symbols ( case insensitive ) can be used interchangeably
( see : func : ` bse . misc . expand _ elements ` )
If an empty string or list is passed , or if None is passed ( the default ) ,
all elements for which the basis set is defined are included .
version : int or str
Obtain a specific version of this basis set . By default ,
the latest version is returned .
fmt : str
The desired output format of the basis set . By default ,
basis set information is returned as a python dictionary . Otherwise ,
if a format is specified , a string is returned .
Use : func : ` bse . api . get _ formats ` to programmatically obtain the available
formats . The ` fmt ` argument is not case sensitive .
Available formats are
* nwchem
* gaussian94
* psi4
* gamess _ us
* turbomole
* json
uncontract _ general : bool
If True , remove general contractions by duplicating the set
of primitive exponents with each vector of coefficients .
Primitives with zero coefficient are removed , as are duplicate shells .
uncontract _ spdf : bool
If True , remove general contractions with combined angular momentum ( sp , spd , etc )
by duplicating the set of primitive exponents with each vector of coefficients .
Primitives with zero coefficient are removed , as are duplicate shells .
uncontract _ segmented : bool
If True , remove segmented contractions by duplicating each primitive into new shells .
Each coefficient is set to 1.0
make _ general : bool
If True , make the basis set as generally - contracted as possible . There will be one
shell per angular momentum ( for each element )
optimize _ general : bool
Optimize by removing general contractions that contain uncontracted
functions ( see : func : ` bse . manip . optimize _ general ` )
data _ dir : str
Data directory with all the basis set information . By default ,
it is in the ' data ' subdirectory of this project .
Returns
str or dict
The basis set in the desired format . If ` fmt ` is * * None * * , this will be a python
dictionary . Otherwise , it will be a string .'''
|
data_dir = fix_data_dir ( data_dir )
bs_data = _get_basis_metadata ( name , data_dir )
# If version is not specified , use the latest
if version is None :
version = bs_data [ 'latest_version' ]
else :
version = str ( version )
# Version may be an int
if not version in bs_data [ 'versions' ] :
raise KeyError ( "Version {} does not exist for basis {}" . format ( version , name ) )
# Compose the entire basis set ( all elements )
file_relpath = bs_data [ 'versions' ] [ version ] [ 'file_relpath' ]
basis_dict = compose . compose_table_basis ( file_relpath , data_dir )
# Set the name ( from the global metadata )
# Only the list of all names will be returned from compose _ table _ basis
basis_dict [ 'name' ] = bs_data [ 'display_name' ]
# Handle optional arguments
if elements is not None : # Convert to purely a list of strings that represent integers
elements = misc . expand_elements ( elements , True )
# Did the user pass an empty string or empty list ? If so , include
# all elements
if len ( elements ) != 0 :
bs_elements = basis_dict [ 'elements' ]
# Are elements part of this basis set ?
for el in elements :
if not el in bs_elements :
elsym = lut . element_sym_from_Z ( el )
raise KeyError ( "Element {} (Z={}) not found in basis {} version {}" . format ( elsym , el , name , version ) )
# Set to only the elements we want
basis_dict [ 'elements' ] = { k : v for k , v in bs_elements . items ( ) if k in elements }
# Note that from now on , the pipleline is going to modify basis _ dict . That is ok ,
# since we are returned a unique instance from compose _ table _ basis
needs_pruning = False
if optimize_general :
basis_dict = manip . optimize_general ( basis_dict , False )
needs_pruning = True
# uncontract _ segmented implies uncontract _ general
if uncontract_segmented :
basis_dict = manip . uncontract_segmented ( basis_dict , False )
needs_pruning = True
elif uncontract_general :
basis_dict = manip . uncontract_general ( basis_dict , False )
needs_pruning = True
if uncontract_spdf :
basis_dict = manip . uncontract_spdf ( basis_dict , 0 , False )
needs_pruning = True
if make_general :
basis_dict = manip . make_general ( basis_dict , False )
needs_pruning = True
# Remove dead and duplicate shells
if needs_pruning :
basis_dict = manip . prune_basis ( basis_dict , False )
# If fmt is not specified , return as a python dict
if fmt is None :
return basis_dict
if header :
header_str = _header_string ( basis_dict )
else :
header_str = None
return converters . convert_basis ( basis_dict , fmt , header_str )
|
def recoverURL ( self , url ) :
"""Public method to recover a resource .
Args :
url : The URL to be collected .
Returns :
Returns a resource that has to be read , for instance , with html = self . br . read ( )"""
|
# Configuring user agents . . .
self . setUserAgent ( )
# Configuring proxies
if "https://" in url :
self . setProxy ( protocol = "https" )
else :
self . setProxy ( protocol = "http" )
# Giving special treatment for . onion platforms
if ".onion" in url :
try : # TODO : configuring manually the tor bundle
pass
except : # TODO : capturing the error and eventually trying the tor2web approach
# url = url . replace ( " . onion " , " . tor2web . org " )
pass
url = url . replace ( ".onion" , ".onion.cab" )
# Opening the resource
try :
recurso = self . br . open ( url )
except : # Something happened . Maybe the request was forbidden ?
return None
html = recurso . read ( )
return html
|
def compare_and_set ( self , expected , updated ) :
"""Atomically sets the value to the given updated value only if the current value = = the expected value .
: param expected : ( object ) , the expected value .
: param updated : ( object ) , the new value .
: return : ( bool ) , ` ` true ` ` if successful ; or ` ` false ` ` if the actual value was not equal to the expected value ."""
|
return self . _encode_invoke ( atomic_reference_compare_and_set_codec , expected = self . _to_data ( expected ) , updated = self . _to_data ( updated ) )
|
def grain_funcs ( opts , proxy = None ) :
'''Returns the grain functions
. . code - block : : python
import salt . config
import salt . loader
_ _ opts _ _ = salt . config . minion _ config ( ' / etc / salt / minion ' )
grainfuncs = salt . loader . grain _ funcs ( _ _ opts _ _ )'''
|
ret = LazyLoader ( _module_dirs ( opts , 'grains' , 'grain' , ext_type_dirs = 'grains_dirs' , ) , opts , tag = 'grains' , )
ret . pack [ '__utils__' ] = utils ( opts , proxy = proxy )
return ret
|
def graphql_requests ( self , * queries ) :
""": param queries : Zero or more GraphQL objects
: type queries : GraphQL
: raises : FBchatException if request failed
: return : A tuple containing json graphql queries
: rtype : tuple"""
|
data = { "method" : "GET" , "response_format" : "json" , "queries" : graphql_queries_to_json ( * queries ) , }
return tuple ( self . _post ( self . req_url . GRAPHQL , data , fix_request = True , as_graphql = True ) )
|
def call ( self , name , * args , ** kwargs ) :
"""Asynchronously call a method of the external environment .
Args :
name : Name of the method to call .
* args : Positional arguments to forward to the method .
* * kwargs : Keyword arguments to forward to the method .
Returns :
Promise object that blocks and provides the return value when called ."""
|
payload = name , args , kwargs
self . _conn . send ( ( self . _CALL , payload ) )
return self . _receive
|
def ParsePageVisitedRow ( self , parser_mediator , query , row , cache = None , database = None , ** unused_kwargs ) :
"""Parses a page visited row .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
query ( str ) : query that created the row .
row ( sqlite3 . Row ) : row .
cache ( Optional [ SQLiteCache ] ) : cache .
database ( Optional [ SQLiteDatabase ] ) : database ."""
|
query_hash = hash ( query )
from_visit = self . _GetRowValue ( query_hash , row , 'from_visit' )
hidden = self . _GetRowValue ( query_hash , row , 'hidden' )
rev_host = self . _GetRowValue ( query_hash , row , 'rev_host' )
typed = self . _GetRowValue ( query_hash , row , 'typed' )
# TODO : make extra conditional formatting .
extras = [ ]
if from_visit :
extras . append ( 'visited from: {0:s}' . format ( self . _GetUrl ( from_visit , cache , database ) ) )
if hidden == '1' :
extras . append ( '(url hidden)' )
if typed == '1' :
extras . append ( '(directly typed)' )
else :
extras . append ( '(URL not typed directly)' )
event_data = FirefoxPlacesPageVisitedEventData ( )
event_data . host = self . _ReverseHostname ( rev_host )
event_data . offset = self . _GetRowValue ( query_hash , row , 'id' )
event_data . query = query
event_data . title = self . _GetRowValue ( query_hash , row , 'title' )
event_data . url = self . _GetRowValue ( query_hash , row , 'url' )
event_data . visit_count = self . _GetRowValue ( query_hash , row , 'visit_count' )
event_data . visit_type = self . _GetRowValue ( query_hash , row , 'visit_type' )
if extras :
event_data . extra = extras
timestamp = self . _GetRowValue ( query_hash , row , 'visit_date' )
if timestamp :
date_time = dfdatetime_posix_time . PosixTimeInMicroseconds ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_VISITED )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def register_images ( im0 , im1 , * , rmMean = True , correctScale = True ) :
"""Finds the rotation , scaling and translation of im1 relative to im0
Parameters
im0 : First image
im1 : Second image
rmMean : Set to true to remove the mean ( Default )
Returns
angle : The angle difference
scale : The scale difference
[ y , x ] : The offset
im2 : The rotated and translated second image
Notes
The algorithm uses gaussian fit for subpixel precision .
The best case would be to have two squares images of the same size .
The algorithm is faster if the size is a power of 2."""
|
# sanitize input
im0 = np . asarray ( im0 , dtype = np . float32 )
im1 = np . asarray ( im1 , dtype = np . float32 )
if rmMean : # remove mean
im0 = im0 - im0 . mean ( )
im1 = im1 - im1 . mean ( )
# Compute DFT ( THe images are resized to the same size )
f0 , f1 = dft_optsize_same ( im0 , im1 )
# Get rotation and scale
angle , scale = find_rotation_scale ( f0 , f1 , isccs = True )
# Avoid fluctiuations
if not correctScale :
if np . abs ( 1 - scale ) > 0.05 :
warnings . warn ( "Scale should be corrected" )
scale = 1
# apply rotation and scale
im2 = rotate_scale ( im1 , angle , scale )
f2 = dft_optsize ( im2 , shape = f0 . shape )
# Find offset
y , x = find_shift_dft ( f0 , f2 , isccs = True )
return angle , scale , [ y , x ] , im2
|
def _generate_standard_methods ( cls ) :
"""Generate standard setters , getters and checkers ."""
|
for state in cls . context . states_enum :
getter_name = 'is_{name}' . format ( name = state . value )
cls . context . new_methods [ getter_name ] = utils . generate_getter ( state )
setter_name = 'set_{name}' . format ( name = state . value )
cls . context . new_methods [ setter_name ] = utils . generate_setter ( state )
checker_name = 'can_be_{name}' . format ( name = state . value )
checker = utils . generate_checker ( state )
cls . context . new_methods [ checker_name ] = checker
cls . context . new_methods [ 'actual_state' ] = utils . actual_state
cls . context . new_methods [ 'as_enum' ] = utils . as_enum
cls . context . new_methods [ 'force_set' ] = utils . force_set
|
def resize ( self , size , interp = 'nearest' ) :
"""Resize the image .
Parameters
size : int , float , or tuple
* int - Percentage of current size .
* float - Fraction of current size .
* tuple - Size of the output image .
interp : : obj : ` str ` , optional
Interpolation to use for re - sizing ( ' nearest ' , ' lanczos ' , ' bilinear ' ,
' bicubic ' , or ' cubic ' )
Returns
: obj : ` PointCloudImage `
The resized image ."""
|
resized_data_0 = sm . imresize ( self . _data [ : , : , 0 ] , size , interp = interp , mode = 'F' )
resized_data_1 = sm . imresize ( self . _data [ : , : , 1 ] , size , interp = interp , mode = 'F' )
resized_data_2 = sm . imresize ( self . _data [ : , : , 2 ] , size , interp = interp , mode = 'F' )
resized_data = np . zeros ( [ resized_data_0 . shape [ 0 ] , resized_data_0 . shape [ 1 ] , self . channels ] )
resized_data [ : , : , 0 ] = resized_data_0
resized_data [ : , : , 1 ] = resized_data_1
resized_data [ : , : , 2 ] = resized_data_2
return PointCloudImage ( resized_data , self . _frame )
|
def insert_characters ( self , count = None ) :
"""Insert the indicated # of blank characters at the cursor
position . The cursor does not move and remains at the beginning
of the inserted blank characters . Data on the line is shifted
forward .
: param int count : number of characters to insert ."""
|
self . dirty . add ( self . cursor . y )
count = count or 1
line = self . buffer [ self . cursor . y ]
for x in range ( self . columns , self . cursor . x - 1 , - 1 ) :
if x + count <= self . columns :
line [ x + count ] = line [ x ]
line . pop ( x , None )
|
def import_directory ( module_basename : str , directory : str , sort_key = None ) -> None :
'''Load all python modules in directory and directory ' s children .
Parameters
: ` ` module _ basename ` ` : module name prefix for loaded modules
: ` ` directory ` ` : directory to load python modules from
: ` ` sort _ key ` ` : function to sort module names with before loading'''
|
logger . info ( 'loading submodules of %s' , module_basename )
logger . info ( 'loading modules from %s' , directory )
filenames = itertools . chain ( * [ [ os . path . join ( _ [ 0 ] , filename ) for filename in _ [ 2 ] ] for _ in os . walk ( directory ) if len ( _ [ 2 ] ) ] )
modulenames = _filenames_to_modulenames ( filenames , module_basename , directory )
for modulename in sorted ( modulenames , key = sort_key ) :
try :
importlib . import_module ( modulename )
except ImportError :
logger . warning ( 'failed loading %s' , modulename )
logger . exception ( 'module loading failure' )
else :
logger . info ( 'successfully loaded %s' , modulename )
|
def sflow_enable ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
sflow = ET . SubElement ( config , "sflow" , xmlns = "urn:brocade.com:mgmt:brocade-sflow" )
enable = ET . SubElement ( sflow , "enable" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get_longest_hl ( self , highlights ) :
"""Given a list of highlighted text , returns the
longest highlight
For example :
" < em > Muscle < / em > < em > atrophy < / em > , generalized " ,
" Generalized < em > muscle < / em > degeneration " ,
" Diffuse skeletal < em > " > muscle < / em > wasting "
and returns :
< em > Muscle < / em > < em > atrophy < / em > , generalized
If there are mutliple matches of the same length , returns
the top ( arbitrary ) highlight
: return :"""
|
len_dict = OrderedDict ( )
for hl in highlights : # dummy tags to make it valid xml
dummy_xml = "<p>" + hl + "</p>"
try :
element_tree = ET . fromstring ( dummy_xml )
hl_length = 0
for emph in element_tree . findall ( 'em' ) :
hl_length += len ( emph . text )
len_dict [ hl ] = hl_length
except ET . ParseError :
raise ET . ParseError
return max ( len_dict , key = len_dict . get )
|
def distinguish ( self , id_ , how = True ) :
"""Login required . Sends POST to distinguish a submission or comment . Returns : class : ` things . Link ` or : class : ` things . Comment ` , or raises : class : ` exceptions . UnexpectedResponse ` otherwise .
URL : ` ` http : / / www . reddit . com / api / distinguish / ` `
: param id \ _ : full id of object to distinguish
: param how : either True , False , or ' admin '"""
|
if how == True :
h = 'yes'
elif how == False :
h = 'no'
elif how == 'admin' :
h = 'admin'
else :
raise ValueError ( "how must be either True, False, or 'admin'" )
data = dict ( id = id_ )
j = self . post ( 'api' , 'distinguish' , h , data = data )
try :
return self . _thingify ( j [ 'json' ] [ 'data' ] [ 'things' ] [ 0 ] )
except Exception :
raise UnexpectedResponse ( j )
|
def make_fileitem_filepath ( filepath , condition = 'contains' , negate = False , preserve_case = False ) :
"""Create a node for FileItem / FilePath
: return : A IndicatorItem represented as an Element node"""
|
document = 'FileItem'
search = 'FileItem/FilePath'
content_type = 'string'
content = filepath
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node
|
def sky_centroid ( self ) :
"""The sky coordinates of the centroid within the source segment ,
returned as a ` ~ astropy . coordinates . SkyCoord ` object .
The output coordinate frame is the same as the input WCS ."""
|
if self . _wcs is not None :
return pixel_to_skycoord ( self . xcentroid . value , self . ycentroid . value , self . _wcs , origin = 0 )
else :
return None
|
def source_hashed ( source_filename , prepared_options , thumbnail_extension , ** kwargs ) :
"""Generate a thumbnail filename of the source filename and options separately
hashed , along with the size .
The format of the filename is a 12 character base64 sha1 hash of the source
filename , the size surrounded by underscores , and an 8 character options
base64 sha1 hash of the thumbnail options . For example :
` ` 1xedFtqllFo9_100x100 _ QHCa6G1l . jpg ` ` ."""
|
source_sha = hashlib . sha1 ( source_filename . encode ( 'utf-8' ) ) . digest ( )
source_hash = base64 . urlsafe_b64encode ( source_sha [ : 9 ] ) . decode ( 'utf-8' )
parts = ':' . join ( prepared_options [ 1 : ] )
parts_sha = hashlib . sha1 ( parts . encode ( 'utf-8' ) ) . digest ( )
options_hash = base64 . urlsafe_b64encode ( parts_sha [ : 6 ] ) . decode ( 'utf-8' )
return '%s_%s_%s.%s' % ( source_hash , prepared_options [ 0 ] , options_hash , thumbnail_extension )
|
def get_public_cms_page_urls ( * , language_code ) :
""": param language _ code : e . g . : " en " or " de "
: return : Tuple with all public urls in the given language"""
|
pages = Page . objects . public ( )
urls = [ page . get_absolute_url ( language = language_code ) for page in pages ]
urls . sort ( )
return tuple ( urls )
|
def read_tvips_header ( fh , byteorder , dtype , count , offsetsize ) :
"""Read TVIPS EM - MENU headers and return as dict ."""
|
result = { }
header = fh . read_record ( TIFF . TVIPS_HEADER_V1 , byteorder = byteorder )
for name , typestr in TIFF . TVIPS_HEADER_V1 :
result [ name ] = header [ name ] . tolist ( )
if header [ 'Version' ] == 2 :
header = fh . read_record ( TIFF . TVIPS_HEADER_V2 , byteorder = byteorder )
if header [ 'Magic' ] != int ( 0xaaaaaaaa ) :
log . warning ( 'read_tvips_header: invalid TVIPS v2 magic number' )
return { }
# decode utf16 strings
for name , typestr in TIFF . TVIPS_HEADER_V2 :
if typestr . startswith ( 'V' ) :
s = header [ name ] . tostring ( ) . decode ( 'utf16' , errors = 'ignore' )
result [ name ] = stripnull ( s , null = '\0' )
else :
result [ name ] = header [ name ] . tolist ( )
# convert nm to m
for axis in 'XY' :
header [ 'PhysicalPixelSize' + axis ] /= 1e9
header [ 'PixelSize' + axis ] /= 1e9
elif header . version != 1 :
log . warning ( 'read_tvips_header: unknown TVIPS header version' )
return { }
return result
|
def _get_path_pattern_tornado4 ( self ) :
"""Return the path pattern used when routing a request . ( Tornado < 4.5)
: rtype : str"""
|
for host , handlers in self . application . handlers :
if host . match ( self . request . host ) :
for handler in handlers :
if handler . regex . match ( self . request . path ) :
return handler . regex . pattern
|
def get_playlists ( self , search , start = 0 , max_items = 100 ) :
"""Search for playlists .
See get _ music _ service _ information for details on the arguments .
Note :
Un - intuitively this method returns MSAlbumList items . See
note in class doc string for details ."""
|
return self . get_music_service_information ( 'playlists' , search , start , max_items )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.