signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def from_json_file ( file : TextIO , check_version = True ) -> BELGraph :
"""Build a graph from the Node - Link JSON contained in the given file ."""
|
graph_json_dict = json . load ( file )
return from_json ( graph_json_dict , check_version = check_version )
|
def peer ( name ) :
'''Add another node into the peer list .
name
The remote host to probe .
CLI Example :
. . code - block : : bash
salt ' one . gluster . * ' glusterfs . peer two
GLUSTER direct CLI example ( to show what salt is sending to gluster ) :
$ gluster peer probe ftp2
GLUSTER CLI 3.4.4 return example ( so we know what we are parsing ) :
# if the " peer " is the local host :
peer probe : success : on localhost not needed
# if the peer was just added :
peer probe : success
# if the peer was already part of the cluster :
peer probe : success : host ftp2 port 24007 already in peer list'''
|
if salt . utils . cloud . check_name ( name , 'a-zA-Z0-9._-' ) :
raise SaltInvocationError ( 'Invalid characters in peer name "{0}"' . format ( name ) )
cmd = 'peer probe {0}' . format ( name )
return _gluster ( cmd )
|
def call_webhook ( event , webhook , payload ) :
"""Build request from event , webhook , payoad and parse response ."""
|
started_at = time ( )
request = _build_request_for_calling_webhook ( event , webhook , payload )
logger . info ( 'REQUEST %(uuid)s %(method)s %(url)s %(payload)s' % dict ( uuid = str ( event [ 'uuid' ] ) , url = request [ 'url' ] , method = request [ 'method' ] , payload = payload , ) )
try :
content = dispatch_webhook_request ( ** request )
logger . debug ( 'RESPONSE %(uuid)s %(method)s %(url)s %(data)s' % dict ( uuid = str ( event [ 'uuid' ] ) , url = request [ 'url' ] , method = request [ 'method' ] , data = content , ) )
data = dict ( parent = str ( event [ 'uuid' ] ) , content = content , started_at = started_at , ended_at = time ( ) )
except ( FailureWebhookError , ConnectionError ) as exception :
if sentry . client :
http_context = raven_context ( ** request )
sentry . captureException ( data = { 'request' : http_context } )
logger . error ( 'RESPONSE %(uuid)s %(method)s %(url)s %(error)s' % dict ( uuid = str ( event [ 'uuid' ] ) , method = request [ 'method' ] , url = request [ 'url' ] , error = exception . message , ) )
data = dict ( parent = str ( event [ 'uuid' ] ) , error = exception . message , started_at = started_at , ended_at = time ( ) , )
webhook_ran . send ( None , data = data )
return data
|
def decode_chain_list ( in_bytes ) :
"""Convert a list of bytes to a list of strings . Each string is of length mmtf . CHAIN _ LEN
: param in _ bytes : the input bytes
: return the decoded list of strings"""
|
tot_strings = len ( in_bytes ) // mmtf . utils . constants . CHAIN_LEN
out_strings = [ ]
for i in range ( tot_strings ) :
out_s = in_bytes [ i * mmtf . utils . constants . CHAIN_LEN : i * mmtf . utils . constants . CHAIN_LEN + mmtf . utils . constants . CHAIN_LEN ]
out_strings . append ( out_s . decode ( "ascii" ) . strip ( mmtf . utils . constants . NULL_BYTE ) )
return out_strings
|
def encode ( self , value ) :
"""Encodes the given value according to this FieldDefinition ."""
|
if type ( value ) == str and self . enum and value in self . enum :
value = self . enum [ value ]
if type ( value ) == int :
if self . shift > 0 :
value <<= self . shift
if self . mask is not None :
value &= self . mask
return self . type . encode ( value ) if self . type else bytearray ( )
|
def _rhoTilde ( self , r , N , L ) :
"""NAME :
_ rhoTilde
PURPOSE :
Evaluate rho _ tilde as defined in equation 3.9 and 2.24 for 0 < = n < N and 0 < = l < L
INPUT :
r - Evaluate at radius r
N - size of the N dimension
L - size of the L dimension
OUTPUT :
rho tilde
HISTORY :
2016-05-17 - Written - Aladdin"""
|
xi = self . _calculateXi ( r )
CC = _C ( xi , N , L )
a = self . _a
rho = nu . zeros ( ( N , L ) , float )
n = nu . arange ( 0 , N , dtype = float ) [ : , nu . newaxis ]
l = nu . arange ( 0 , L , dtype = float ) [ nu . newaxis , : ]
K = 0.5 * n * ( n + 4 * l + 3 ) + ( l + 1. ) * ( 2 * l + 1 )
rho [ : , : ] = K * ( ( a * r ) ** l ) / ( ( r / a ) * ( a + r ) ** ( 2 * l + 3. ) ) * CC [ : , : ] * ( nu . pi ) ** - 0.5
return rho
|
def explain ( self , bindVars = { } , allPlans = False ) :
"""Returns an explanation of the query . Setting allPlans to True will result in ArangoDB returning all possible plans . False returns only the optimal plan"""
|
return self . database . explainAQLQuery ( self . query , bindVars , allPlans )
|
def _seg ( chars ) :
"""按是否是汉字进行分词"""
|
s = ''
# 保存一个词
ret = [ ]
# 分词结果
flag = 0
# 上一个字符是什么 ? 0 : 汉字 , 1 : 不是汉字
for n , c in enumerate ( chars ) :
if RE_HANS . match ( c ) : # 汉字 , 确定 flag 的初始值
if n == 0 : # 第一个字符
flag = 0
if flag == 0 :
s += c
else : # 上一个字符不是汉字 , 分词
ret . append ( s )
flag = 0
s = c
else : # 不是汉字
if n == 0 : # 第一个字符 , 确定 flag 的初始值
flag = 1
if flag == 1 :
s += c
else : # 上一个字符是汉字 , 分词
ret . append ( s )
flag = 1
s = c
ret . append ( s )
# 最后的词
return ret
|
def burstColumn ( self , column , columnMatchingSegments , prevActiveCells , prevWinnerCells , learn ) :
"""Activates all of the cells in an unpredicted active column , chooses a winner
cell , and , if learning is turned on , learns on one segment , growing a new
segment if necessary .
@ param column ( int )
Index of bursting column .
@ param columnMatchingSegments ( iter )
Matching segments in this column , or None if there aren ' t any .
@ param prevActiveCells ( list )
Active cells in ` t - 1 ` .
@ param prevWinnerCells ( list )
Winner cells in ` t - 1 ` .
@ param learn ( bool )
Whether or not learning is enabled .
@ return ( tuple ) Contains :
` cells ` ( iter ) ,
` winnerCell ` ( int ) ,"""
|
start = self . cellsPerColumn * column
# Strip out destroyed cells before passing along to base _ burstColumn ( )
cellsForColumn = [ cellIdx for cellIdx in xrange ( start , start + self . cellsPerColumn ) if cellIdx not in self . deadCells ]
return self . _burstColumn ( self . connections , self . _random , self . lastUsedIterationForSegment , column , columnMatchingSegments , prevActiveCells , prevWinnerCells , cellsForColumn , self . numActivePotentialSynapsesForSegment , self . iteration , self . maxNewSynapseCount , self . initialPermanence , self . permanenceIncrement , self . permanenceDecrement , self . maxSegmentsPerCell , self . maxSynapsesPerSegment , learn )
|
def add_watcher_to_issue ( self , issue , JIRAUsername ) :
"""Adds a user to a specified issue ' s watcher list .
Arguments :
| issue ( string ) | A JIRA Issue that a watcher needs added to , can be an issue ID or Key |
| JIRAUsername ( string ) | A JIRA Username to add as a watcher to an issue |
Example :
| * Keyword * | * Parameters * | | |
| connect to jira | asimmons | options = { ' http : / / devjira01 ' } | |
| $ { issue } | create issue | $ { issue _ field _ dict } | True |
| add watcher to issue | $ { issue } | aSample | |"""
|
self . jira . add_watcher ( issue = issue , watcher = JIRAUsername )
|
def _generate_relative_positions_embeddings ( length_q , length_k , depth , max_relative_position , name , cache = False ) :
"""Generates tensor of size [ 1 if cache else length _ q , length _ k , depth ] ."""
|
with tf . variable_scope ( name ) :
relative_positions_matrix = _generate_relative_positions_matrix ( length_q , length_k , max_relative_position , cache = cache )
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth .
embeddings_table = tf . get_variable ( "embeddings" , [ vocab_size , depth ] )
embeddings = tf . gather ( embeddings_table , relative_positions_matrix )
return embeddings
|
def _progressbar ( self , msg , iter_num ) :
"""Display a progress bar with current loss .
Parameters
msg : str
Message to print alongside the progress bar
iter _ num : int
Iteration number .
Progress is only printed if this is a multiple of
` self . display _ progress ` ."""
|
if self . display_progress and ( iter_num + 1 ) % self . display_progress == 0 :
sys . stderr . write ( '\r' )
sys . stderr . write ( "Iteration {}: {}" . format ( iter_num + 1 , msg ) )
sys . stderr . flush ( )
|
def list_versions ( self ) :
"""Filterable list of versions of a layer , always ordered newest to oldest .
If the version ’ s source supports revisions , you can get a specific revision using
` ` . filter ( data _ _ source _ _ revision = value ) ` ` . Specific values depend on the source type .
Use ` ` data _ _ source _ revision _ _ lt ` ` or ` ` data _ _ source _ revision _ _ gte ` ` to filter
using ` ` < ` ` or ` ` > = ` ` operators respectively ."""
|
target_url = self . _client . get_url ( 'VERSION' , 'GET' , 'multi' , { 'layer_id' : self . id } )
return base . Query ( self . _manager , target_url , valid_filter_attributes = ( 'data' , ) , valid_sort_attributes = ( ) )
|
def DeleteUserDefinedFunction ( self , udf_link , options = None ) :
"""Deletes a user defined function .
: param str udf _ link :
The link to the user defined function .
: param dict options :
The request options for the request .
: return :
The deleted UDF .
: rtype :
dict"""
|
if options is None :
options = { }
path = base . GetPathFromLink ( udf_link )
udf_id = base . GetResourceIdOrFullNameFromLink ( udf_link )
return self . DeleteResource ( path , 'udfs' , udf_id , None , options )
|
def deriv_H ( state , z , x , y , fase ) :
r"""Calculate generic partial derivative
: math : ` \ left . \ frac { \ partial z } { \ partial x } \ right | _ { y } ` from a fundamental
helmholtz free energy equation of state
Parameters
state : any python object
Only need to define P and T properties , non phase specific properties
z : str
Name of variables in numerator term of derivatives
x : str
Name of variables in denominator term of derivatives
y : str
Name of constant variable in partial derivaritive
fase : any python object
Define phase specific properties ( v , cv , alfap , s , betap )
Notes
x , y and z can be the following values :
* P : Pressure
* T : Temperature
* v : Specific volume
* rho : Density
* u : Internal Energy
* h : Enthalpy
* s : Entropy
* g : Gibbs free energy
* a : Helmholtz free energy
Returns
deriv : float
∂ z / ∂ x | y
References
IAPWS , Revised Advisory Note No . 3 : Thermodynamic Derivatives from IAPWS
Formulations , http : / / www . iapws . org / relguide / Advise3 . pdf"""
|
# We use the relation between rho and v and his partial derivative
# ∂ v / ∂ b | c = - 1 / ρ2 ∂ ρ / ∂ b | c
# ∂ a / ∂ v | c = - ρ2 ∂ a / ∂ ρ | c
mul = 1
if z == "rho" :
mul = - fase . rho ** 2
z = "v"
if x == "rho" :
mul = - 1 / fase . rho ** 2
x = "v"
if y == "rho" :
y = "v"
dT = { "P" : state . P * 1000 * fase . alfap , "T" : 1 , "v" : 0 , "u" : fase . cv , "h" : fase . cv + state . P * 1000 * fase . v * fase . alfap , "s" : fase . cv / state . T , "g" : state . P * 1000 * fase . v * fase . alfap - fase . s , "a" : - fase . s }
dv = { "P" : - state . P * 1000 * fase . betap , "T" : 0 , "v" : 1 , "u" : state . P * 1000 * ( state . T * fase . alfap - 1 ) , "h" : state . P * 1000 * ( state . T * fase . alfap - fase . v * fase . betap ) , "s" : state . P * 1000 * fase . alfap , "g" : - state . P * 1000 * fase . v * fase . betap , "a" : - state . P * 1000 }
deriv = ( dv [ z ] * dT [ y ] - dT [ z ] * dv [ y ] ) / ( dv [ x ] * dT [ y ] - dT [ x ] * dv [ y ] )
return mul * deriv
|
def iter_fasta_qual ( fastafile , qualfile , defaultqual = OKQUAL , modify = False ) :
"""used by trim , emits one SeqRecord with quality values in it"""
|
from Bio . SeqIO . QualityIO import PairedFastaQualIterator
if not qualfile :
qualfile = make_qual ( fastafile , score = defaultqual )
rec_iter = PairedFastaQualIterator ( open ( fastafile ) , open ( qualfile ) )
for rec in rec_iter :
yield rec if not modify else modify_qual ( rec )
|
def pattern2re ( pattern ) :
"""Makes a unicode regular expression from a pattern .
Returns ` ` ( start , full _ re , int _ re ) ` ` where :
* ` start ` is either empty or the subdirectory in which to start searching ,
* ` full _ re ` is a regular expression object that matches the requested
files , i . e . a translation of the pattern
* ` int _ re ` is either None of a regular expression object that matches
the requested paths or their ancestors ( i . e . if a path doesn ' t match
` int _ re ` , no path under it will match ` full _ re ` )
This uses extended patterns , where :
* a slash ' / ' always represents the path separator
* a backslash ' \' escapes other special characters
* an initial slash ' / ' anchors the match at the beginning of the
( relative ) path
* a trailing ' / ' suffix is removed
* an asterisk ' * ' matches a sequence of any length ( including 0 ) of any
characters ( except the path separator )
* a ' ? ' matches exactly one character ( except the path separator )
* ' [ abc ] ' matches characters ' a ' , ' b ' or ' c '
* two asterisks ' * * ' matches one or more path components ( might match ' / '
characters )"""
|
pattern_segs = filter ( None , pattern . split ( '/' ) )
# This anchors the first component either at the start of the string or at
# the start of a path component
if not pattern :
return '' , re . compile ( '' ) , None
elif '/' in pattern :
full_regex = '^'
# Start at beginning of path
int_regex = [ ]
int_regex_done = False
start_dir = [ ]
start_dir_done = False
else :
full_regex = '(?:^|/)'
# Skip any number of full components
int_regex = None
int_regex_done = True
start_dir = [ ]
start_dir_done = True
# Handles each component
for pnum , pat in enumerate ( pattern_segs ) :
comp = patterncomp2re ( pat )
# The first component is already anchored
if pnum > 0 :
full_regex += '/'
full_regex += comp
if not int_regex_done :
if pat == '**' :
int_regex_done = True
else :
int_regex . append ( comp )
if not start_dir_done and no_special_chars . match ( pat ) :
start_dir . append ( pat )
else :
start_dir_done = True
full_regex = re . compile ( full_regex . rstrip ( '/' ) + '$' )
if int_regex is not None :
n = len ( int_regex )
int_regex_s = ''
for i , c in enumerate ( reversed ( int_regex ) ) :
if i == n - 1 : # Last iteration ( first component )
int_regex_s = '^(?:%s%s)?' % ( c , int_regex_s )
elif int_regex_s :
int_regex_s = '(?:/%s%s)?' % ( c , int_regex_s )
else : # First iteration ( last component )
int_regex_s = '(?:/%s)?' % c
int_regex = re . compile ( int_regex_s + '$' )
start_dir = '/' . join ( start_dir )
return start_dir , full_regex , int_regex
|
def reject ( self , f , * args ) :
"""Like ' match ' , but throw a parse error if ' f ' matches .
This is useful when a parser wants to be strict about specific things
being prohibited . For example , DottySQL bans the use of SQL keywords as
variable names ."""
|
match = self . match ( f , * args )
if match :
token = self . peek ( 0 )
raise errors . EfilterParseError ( query = self . tokenizer . source , token = token , message = "Was not expecting a %s here." % token . name )
|
def display_hook ( fn ) :
"""A decorator to wrap display hooks that return a MIME bundle or None .
Additionally it handles adding output to the notebook archive , saves
files specified with the output magic and handles tracebacks ."""
|
@ wraps ( fn )
def wrapped ( element ) :
global FULL_TRACEBACK
if Store . current_backend is None :
return { } , { }
try :
max_frames = OutputSettings . options [ 'max_frames' ]
mimebundle = fn ( element , max_frames = max_frames )
if mimebundle is None :
return { } , { }
mime_data , mime_metadata = mimebundle
if 'text/javascript' in mime_data :
mime_data [ 'text/html' ] = mimebundle_to_html ( mime_data )
del mime_data [ 'text/javascript' ]
# Only want to add to the archive for one display hook . . .
disabled_suffixes = [ 'png_display' , 'svg_display' ]
if not any ( fn . __name__ . endswith ( suffix ) for suffix in disabled_suffixes ) :
if type ( holoviews . archive ) is not FileArchive :
holoviews . archive . add ( element , html = mime_data [ 'text/html' ] )
filename = OutputSettings . options [ 'filename' ]
if filename :
Store . renderers [ Store . current_backend ] . save ( element , filename )
return mime_data , mime_metadata
except SkipRendering as e :
if e . warn :
sys . stderr . write ( str ( e ) )
return { } , { }
except AbbreviatedException as e :
FULL_TRACEBACK = '\n' . join ( traceback . format_exception ( e . etype , e . value , e . traceback ) )
info = dict ( name = e . etype . __name__ , message = str ( e . value ) . replace ( '\n' , '<br>' ) )
msg = '<i> [Call holoviews.ipython.show_traceback() for details]</i>'
return { 'text/html' : "<b>{name}</b>{msg}<br>{message}" . format ( msg = msg , ** info ) } , { }
except Exception :
raise
return wrapped
|
def loadNetworkFromFile ( filename , mode = 'pickle' ) :
"""Deprecated . Use loadNetwork instead ."""
|
if mode == 'pickle' :
import pickle
fp = open ( filename )
network = pickle . load ( fp )
fp . close ( )
return network
elif mode in [ 'plain' , 'conx' ] :
fp = open ( filename , "r" )
line = fp . readline ( )
network = None
while line :
if line . startswith ( "layer," ) : # layer , name , size
temp , name , sizeStr = line . split ( "," )
name = name . strip ( )
size = int ( sizeStr )
network . addLayer ( name , size )
line = fp . readline ( )
weights = [ float ( f ) for f in line . split ( ) ]
for i in range ( network [ name ] . size ) :
network [ name ] . weight [ i ] = weights [ i ]
elif line . startswith ( "connection," ) : # connection , fromLayer , toLayer
temp , nameFrom , nameTo = line . split ( "," )
nameFrom , nameTo = nameFrom . strip ( ) , nameTo . strip ( )
network . connect ( nameFrom , nameTo )
for i in range ( network [ nameFrom ] . size ) :
line = fp . readline ( )
weights = [ float ( f ) for f in line . split ( ) ]
for j in range ( network [ nameTo ] . size ) :
network [ nameFrom , nameTo ] . weight [ i ] [ j ] = weights [ j ]
elif line . startswith ( "parameter," ) :
temp , exp = line . split ( "," )
exec ( exp )
# network is the neural network object
elif line . startswith ( "network," ) :
temp , netType = line . split ( "," )
netType = netType . strip ( ) . lower ( )
if netType == "cascornetwork" :
from pyrobot . brain . cascor import CascorNetwork
network = CascorNetwork ( )
elif netType == "network" :
network = Network ( )
elif netType == "srn" :
network = SRN ( )
else :
raise AttributeError ( "unknown network type: '%s'" % netType )
line = fp . readline ( )
return network
|
def load_config ( self ) :
"""Loads the configuration ."""
|
config = dict ( [ ( key , value ) for key , value in iteritems ( self . options ) if key in self . cfg . settings and value is not None ] )
for key , value in iteritems ( config ) :
self . cfg . set ( key . lower ( ) , value )
|
def get_scratch_predictions ( self , path_to_scratch , results_dir , scratch_basename = 'scratch' , num_cores = 1 , exposed_buried_cutoff = 25 , custom_gene_mapping = None ) :
"""Run and parse ` ` SCRATCH ` ` results to predict secondary structure and solvent accessibility .
Annotations are stored in the protein ' s representative sequence at :
* ` ` . annotations ` `
* ` ` . letter _ annotations ` `
Args :
path _ to _ scratch ( str ) : Path to SCRATCH executable
results _ dir ( str ) : Path to SCRATCH results folder , which will have the files ( scratch . ss , scratch . ss8,
scratch . acc , scratch . acc20)
scratch _ basename ( str ) : Basename of the SCRATCH results ( ' scratch ' is default )
num _ cores ( int ) : Number of cores to use to parallelize SCRATCH run
exposed _ buried _ cutoff ( int ) : Cutoff of exposed / buried for the acc20 predictions
custom _ gene _ mapping ( dict ) : Default parsing of SCRATCH output files is to look for the model gene IDs . If
your output files contain IDs which differ from the model gene IDs , use this dictionary to map model
gene IDs to result file IDs . Dictionary keys must match model genes ."""
|
if not self . genome_path : # Write all sequences as one file
all_seqs = self . write_representative_sequences_file ( outname = self . id )
# Runs SCRATCH or loads existing results in results _ dir
scratch = SCRATCH ( project_name = scratch_basename , seq_file = self . genome_path )
scratch . run_scratch ( path_to_scratch = path_to_scratch , num_cores = num_cores , outdir = results_dir )
sspro_summary = scratch . sspro_summary ( )
sspro8_summary = scratch . sspro8_summary ( )
sspro_results = scratch . sspro_results ( )
sspro8_results = scratch . sspro8_results ( )
accpro_summary = scratch . accpro_summary ( )
accpro20_summary = scratch . accpro20_summary ( exposed_buried_cutoff )
accpro_results = scratch . accpro_results ( )
accpro20_results = scratch . accpro20_results ( )
counter = 0
# Adding the scratch annotations to the representative _ sequences letter _ annotations
for g in tqdm ( self . genes_with_a_representative_sequence ) :
if custom_gene_mapping :
g_id = custom_gene_mapping [ g . id ]
else :
g_id = g . id
if g_id in sspro_summary : # Secondary structure
g . protein . representative_sequence . annotations . update ( sspro_summary [ g_id ] )
g . protein . representative_sequence . annotations . update ( sspro8_summary [ g_id ] )
try :
g . protein . representative_sequence . letter_annotations [ 'SS-sspro' ] = sspro_results [ g_id ]
g . protein . representative_sequence . letter_annotations [ 'SS-sspro8' ] = sspro8_results [ g_id ]
except TypeError :
log . error ( 'Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative ' 'sequence, unable to set letter annotation' . format ( g_id , g . protein . representative_sequence . id ) )
# Solvent accessibility
g . protein . representative_sequence . annotations . update ( accpro_summary [ g_id ] )
g . protein . representative_sequence . annotations . update ( accpro20_summary [ g_id ] )
try :
g . protein . representative_sequence . letter_annotations [ 'RSA-accpro' ] = accpro_results [ g_id ]
g . protein . representative_sequence . letter_annotations [ 'RSA-accpro20' ] = accpro20_results [ g_id ]
except TypeError :
log . error ( 'Gene {}, SeqProp {}: sequence length mismatch between SCRATCH results and representative ' 'sequence, unable to set letter annotation' . format ( g_id , g . protein . representative_sequence . id ) )
counter += 1
else :
log . error ( '{}: missing SCRATCH results' . format ( g . id ) )
log . info ( '{}/{}: number of genes with SCRATCH predictions loaded' . format ( counter , len ( self . genes ) ) )
|
def promise_method ( func ) :
"""A decorator which ensures that once a method has been marked as resolved
( via Class . _ _ resolved ) ) will then propagate the attribute ( function ) call
upstream ."""
|
name = func . __name__
@ wraps ( func )
def wrapped ( self , * args , ** kwargs ) :
cls_name = type ( self ) . __name__
if getattr ( self , '_%s__resolved' % ( cls_name , ) ) :
return getattr ( getattr ( self , '_%s__wrapped' % ( cls_name , ) ) , name ) ( * args , ** kwargs )
return func ( self , * args , ** kwargs )
return wrapped
|
def pop_aggregations_params ( self ) :
"""Pop and return aggregation params from query string params .
Aggregation params are expected to be prefixed ( nested under ) by
any of ` self . _ aggregations _ keys ` ."""
|
from nefertari . view import BaseView
self . _query_params = BaseView . convert_dotted ( self . view . _query_params )
for key in self . _aggregations_keys :
if key in self . _query_params :
return self . _query_params . pop ( key )
else :
raise KeyError ( 'Missing aggregation params' )
|
def validate_wrap ( self , value ) :
'''Validates the type and length of ` ` value ` `'''
|
if not isinstance ( value , basestring ) :
self . _fail_validation_type ( value , basestring )
if self . regex . match ( value ) is None :
self . _fail_validation ( value , 'Value does not match regular expression' )
|
def mouseDown ( self , button ) :
"""Send a mouse button down at the last set position
button : int : [ 1 - n ]"""
|
log . debug ( 'mouseDown %s' , button )
self . buttons |= 1 << ( button - 1 )
self . pointerEvent ( self . x , self . y , buttonmask = self . buttons )
return self
|
def send ( self , request , stream = False , timeout = None , verify = True , cert = None , proxies = None ) :
"""Sends PreparedRequest object . Returns Response object .
: param request : The : class : ` PreparedRequest < PreparedRequest > ` being sent .
: param stream : ( optional ) Whether to stream the request content .
: param timeout : ( optional ) How long to wait for the server to send
data before giving up , as a float , or a : ref : ` ( connect timeout ,
read timeout ) < timeouts > ` tuple .
: type timeout : float or tuple
: param verify : ( optional ) Either a boolean , in which case it controls whether we verify
the server ' s TLS certificate , or a string , in which case it must be a path
to a CA bundle to use
: param cert : ( optional ) Any user - provided SSL certificate to be trusted .
: param proxies : ( optional ) The proxies dictionary to apply to the request ."""
|
raise NotImplementedError
|
def _make_admin_link_to_similar ( primary_field , * fields , name = None ) :
"""Create a function that links to a changelist of all objects with similar field values ."""
|
fields = ( primary_field , ) + fields
url_template = '<a href="{url}">{name_or_value}</a>'
def field_link ( self , obj ) :
value = getattr ( obj , primary_field , None )
name_or_value = name or value
filters = { field_name : getattr ( obj , field_name ) for field_name in fields }
url = _build_admin_filter_url ( obj , filters )
return format_html ( url_template , ** locals ( ) ) if url else value
field_link . allow_tags = True
field_link . short_description = primary_field . replace ( '_' , ' ' ) . capitalize ( )
field_link . admin_order_field = primary_field
field_link . __name__ = field_link . __name__ . replace ( 'field' , primary_field )
return field_link
|
def maximize ( func , parameter_dict , args = { } , verbose = False , population_size = 50 , gene_mutation_prob = 0.1 , gene_crossover_prob = 0.5 , tournament_size = 3 , generations_number = 10 , gene_type = None , n_jobs = 1 , error_score = 'raise' ) :
"""Same as _ fit in EvolutionarySearchCV but without fitting data . More similar to scipy . optimize .
Parameters
n _ jobs : int or map function , default = 1
Number of jobs to run in parallel .
Also accepts custom parallel map functions from Pool or SCOOP .
Returns
best _ params _ : dict
A list of parameters for the best learner .
best _ score _ : float
The score of the learner described by best _ params _
score _ results : tuple of 2 - tuples ( ( dict , float ) , . . . )
The score of every individual evaluation indexed by it ' s parameters .
hist : deap . tools . History object .
Use to get the geneology data of the search .
logbook : deap . tools . Logbook object .
Includes the statistics of the evolution ."""
|
toolbox = base . Toolbox ( )
_check_param_grid ( parameter_dict )
if isinstance ( n_jobs , int ) : # If n _ jobs is an int , greater than 1 or less than 0 ( indicating to use as
# many jobs as possible ) then we are going to create a default pool .
# Windows users need to be warned of this feature as it only works properly
# on linux . They need to encapsulate their pool in an if _ _ name _ _ = = " _ _ main _ _ "
# wrapper so that pools are not recursively created when the module is reloaded in each map
if isinstance ( n_jobs , ( int , float ) ) :
if n_jobs > 1 or n_jobs < 0 :
from multiprocessing import Pool
# Only imports if needed
if os . name == 'nt' : # Checks if we are on Windows
warnings . warn ( ( "Windows requires Pools to be declared from within " "an \'if __name__==\"__main__\":\' structure. In this " "case, n_jobs will accept map functions as well to " "facilitate custom parallelism. Please check to see " "that all code is working as expected." ) )
pool = Pool ( n_jobs )
toolbox . register ( "map" , pool . map )
warnings . warn ( "Need to create a creator. Run optimize.compile()" )
else :
compile ( )
# If it ' s not an int , we are going to pass it as the map directly
else :
try :
toolbox . register ( "map" , n_jobs )
except Exception :
raise TypeError ( "n_jobs must be either an integer or map function. Received: {}" . format ( type ( n_jobs ) ) )
name_values , gene_type , maxints = _get_param_types_maxint ( parameter_dict )
if verbose :
print ( "Types %s and maxint %s detected" % ( gene_type , maxints ) )
toolbox . register ( "individual" , _initIndividual , creator . Individual , maxints = maxints )
toolbox . register ( "population" , tools . initRepeat , list , toolbox . individual )
toolbox . register ( "evaluate" , _evalFunction , func , name_values = name_values , verbose = verbose , error_score = error_score , args = args )
toolbox . register ( "mate" , _cxIndividual , indpb = gene_crossover_prob , gene_type = gene_type )
toolbox . register ( "mutate" , _mutIndividual , indpb = gene_mutation_prob , up = maxints )
toolbox . register ( "select" , tools . selTournament , tournsize = tournament_size )
# Tools
pop = toolbox . population ( n = population_size )
hof = tools . HallOfFame ( 1 )
stats = tools . Statistics ( lambda ind : ind . fitness . values )
stats . register ( "avg" , np . nanmean )
stats . register ( "min" , np . nanmin )
stats . register ( "max" , np . nanmax )
stats . register ( "std" , np . nanstd )
# History
hist = tools . History ( )
toolbox . decorate ( "mate" , hist . decorator )
toolbox . decorate ( "mutate" , hist . decorator )
hist . update ( pop )
if verbose :
print ( '--- Evolve in {0} possible combinations ---' . format ( np . prod ( np . array ( maxints ) + 1 ) ) )
pop , logbook = algorithms . eaSimple ( pop , toolbox , cxpb = 0.5 , mutpb = 0.2 , ngen = generations_number , stats = stats , halloffame = hof , verbose = verbose )
current_best_score_ = hof [ 0 ] . fitness . values [ 0 ]
current_best_params_ = _individual_to_params ( hof [ 0 ] , name_values )
# Generate score _ cache with real parameters
_ , individuals , each_scores = zip ( * [ ( idx , indiv , np . mean ( indiv . fitness . values ) ) for idx , indiv in list ( hist . genealogy_history . items ( ) ) if indiv . fitness . valid and not np . all ( np . isnan ( indiv . fitness . values ) ) ] )
unique_individuals = { str ( indiv ) : ( indiv , score ) for indiv , score in zip ( individuals , each_scores ) }
score_results = tuple ( [ ( _individual_to_params ( indiv , name_values ) , score ) for indiv , score in unique_individuals . values ( ) ] )
if verbose :
print ( "Best individual is: %s\nwith fitness: %s" % ( current_best_params_ , current_best_score_ ) )
# Close your pools if you made them
if isinstance ( n_jobs , int ) and ( n_jobs > 1 or n_jobs < 0 ) :
pool . close ( )
pool . join ( )
return current_best_params_ , current_best_score_ , score_results , hist , logbook
|
def object_isinstance ( node , class_or_seq , context = None ) :
"""Check if a node ' isinstance ' any node in class _ or _ seq
: param node : A given node
: param class _ or _ seq : Union [ nodes . NodeNG , Sequence [ nodes . NodeNG ] ]
: rtype : bool
: raises AstroidTypeError : if the given ` ` classes _ or _ seq ` ` are not types"""
|
obj_type = object_type ( node , context )
if obj_type is util . Uninferable :
return util . Uninferable
return _object_type_is_subclass ( obj_type , class_or_seq , context = context )
|
def get_calling_namespaces ( ) :
"""Return the locals and globals for the function that called
into this module in the current call stack ."""
|
try :
1 // 0
except ZeroDivisionError : # Don ' t start iterating with the current stack - frame to
# prevent creating reference cycles ( f _ back is safe ) .
frame = sys . exc_info ( ) [ 2 ] . tb_frame . f_back
# Find the first frame that * isn ' t * from this file . This means
# that we expect all of the SCons frames that implement an Export ( )
# or SConscript ( ) call to be in this file , so that we can identify
# the first non - Script . SConscript frame as the user ' s local calling
# environment , and the locals and globals dictionaries from that
# frame as the calling namespaces . See the comment below preceding
# the DefaultEnvironmentCall block for even more explanation .
while frame . f_globals . get ( "__name__" ) == __name__ :
frame = frame . f_back
return frame . f_locals , frame . f_globals
|
def _ParseDataObject ( self , file_object , file_offset ) :
"""Parses a data object .
Args :
file _ object ( dfvfs . FileIO ) : a file - like object .
file _ offset ( int ) : offset of the data object relative to the start
of the file - like object .
Returns :
bytes : data .
Raises :
ParseError : if the data object cannot be parsed ."""
|
data_object_map = self . _GetDataTypeMap ( 'systemd_journal_data_object' )
try :
data_object , _ = self . _ReadStructureFromFileObject ( file_object , file_offset , data_object_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . ParseError ( ( 'Unable to parse data object at offset: 0x{0:08x} with error: ' '{1!s}' ) . format ( file_offset , exception ) )
if data_object . object_type != self . _OBJECT_TYPE_DATA :
raise errors . ParseError ( 'Unsupported object type: {0:d}.' . format ( data_object . object_type ) )
if data_object . object_flags not in ( 0 , self . _OBJECT_COMPRESSED_FLAG_XZ , self . _OBJECT_COMPRESSED_FLAG_LZ4 ) :
raise errors . ParseError ( 'Unsupported object flags: 0x{0:02x}.' . format ( data_object . object_flags ) )
# The data is read separately for performance reasons .
data_size = data_object . data_size - 64
data = file_object . read ( data_size )
if data_object . object_flags & self . _OBJECT_COMPRESSED_FLAG_XZ :
data = lzma . decompress ( data )
elif data_object . object_flags & self . _OBJECT_COMPRESSED_FLAG_LZ4 :
uncompressed_size_map = self . _GetDataTypeMap ( 'uint32le' )
try :
uncompressed_size = self . _ReadStructureFromByteStream ( data , file_offset + 64 , uncompressed_size_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . ParseError ( ( 'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with ' 'error: {1!s}' ) . format ( file_offset + 64 , exception ) )
data = lz4 . block . decompress ( data [ 8 : ] , uncompressed_size = uncompressed_size )
return data
|
def getRole ( self , label ) :
"""Get the : class : ` rtcclient . models . Role ` object by the label name
: param label : the label name of the role
: return : the : class : ` rtcclient . models . Role ` object
: rtype : : class : ` rtcclient . models . Role `"""
|
if not isinstance ( label , six . string_types ) or not label :
excp_msg = "Please specify a valid role label"
self . log . error ( excp_msg )
raise exception . BadValue ( excp_msg )
roles = self . getRoles ( )
if roles is not None :
for role in roles :
if role . label == label :
self . log . info ( "Get <Role %s> in <ProjectArea %s>" , role , self )
return role
excp_msg = "No role's label is %s in <ProjectArea %s>" % ( label , self )
self . log . error ( excp_msg )
raise exception . NotFound ( excp_msg )
|
def _construct_arrow_tip ( self , pos ) :
"""returns arrow tip as ( width , widget )"""
|
arrow_tip = None
width = 0
if self . _arrow_tip_char :
txt = urwid . Text ( self . _arrow_tip_char )
arrow_tip = urwid . AttrMap ( txt , self . _arrow_tip_att or self . _arrow_att )
width = len ( self . _arrow_tip_char )
return width , arrow_tip
|
def train_model ( self , balance , * args , ** kwargs ) :
"""Args :
balance : A 1d arraylike that sums to 1 , corresponding to the
( possibly estimated ) class balance ."""
|
self . balance = np . array ( balance )
|
def ftp_walk ( ftpconn : FTP , rootpath = '' ) :
"""Recursively traverse an ftp directory to discovery directory listing ."""
|
current_directory = rootpath
try :
directories , files = directory_listing ( ftpconn , current_directory )
except ftplib . error_perm :
return
# Yield before recursion
yield current_directory , directories , files
# Recurse into sub - directories
for name in directories :
new_path = os . path . join ( current_directory , name )
for entry in ftp_walk ( ftpconn , rootpath = new_path ) :
yield entry
else :
return
|
def _init_state ( self , initial_state : Union [ int , np . ndarray ] ) :
"""Initializes a the shard wavefunction and sets the initial state ."""
|
state = np . reshape ( sim . to_valid_state_vector ( initial_state , self . _num_qubits ) , ( self . _num_shards , self . _shard_size ) )
state_handle = mem_manager . SharedMemManager . create_array ( state . view ( dtype = np . float32 ) )
self . _shared_mem_dict [ 'state_handle' ] = state_handle
|
def delete ( self , id , project_id = None ) :
"""delete ."""
|
result = db . session . query ( Result ) . filter_by ( id = id ) . first ( )
if result is None :
response = jsonify ( { 'result' : None , 'message' : 'No interface defined for URL.' } )
return response , 404
db . session . delete ( result )
db . session . commit ( )
return jsonify ( { 'result' : result . serialize } )
|
def read_ckan_catalog ( portal_url ) :
"""Convierte los metadatos de un portal disponibilizados por la Action API
v3 de CKAN al estándar data . json .
Args :
portal _ url ( str ) : URL de un portal de datos CKAN que soporte la API v3.
Returns :
dict : Representación interna de un catálogo para uso en las funciones
de esta librería ."""
|
portal = RemoteCKAN ( portal_url )
try :
status = portal . call_action ( 'status_show' , requests_kwargs = { "verify" : False } )
packages_list = portal . call_action ( 'package_list' , requests_kwargs = { "verify" : False } )
groups_list = portal . call_action ( 'group_list' , requests_kwargs = { "verify" : False } )
# itera leyendo todos los datasets del portal
packages = [ ]
num_packages = len ( packages_list )
for index , pkg in enumerate ( packages_list ) : # progreso ( necesario cuando son muchos )
msg = "Leyendo dataset {} de {}" . format ( index + 1 , num_packages )
logger . info ( msg )
# agrega un nuevo dataset a la lista
packages . append ( portal . call_action ( 'package_show' , { 'id' : pkg } , requests_kwargs = { "verify" : False } ) )
# tiempo de espera padra evitar baneos
time . sleep ( 0.2 )
# itera leyendo todos los temas del portal
groups = [ portal . call_action ( 'group_show' , { 'id' : grp } , requests_kwargs = { "verify" : False } ) for grp in groups_list ]
catalog = map_status_to_catalog ( status )
catalog [ "dataset" ] = map_packages_to_datasets ( packages , portal_url )
catalog [ "themeTaxonomy" ] = map_groups_to_themes ( groups )
except ( CKANAPIError , RequestException ) as e :
logger . exception ( 'Error al procesar el portal %s' , portal_url , exc_info = True )
raise NonParseableCatalog ( portal_url , e )
return catalog
|
def stop_capture_handler ( self , name ) :
'''Remove all handlers with a given name
Args :
name :
The name of the handler ( s ) to remove .'''
|
empty_capturers_indeces = [ ]
for k , sc in self . _stream_capturers . iteritems ( ) :
stream_capturer = sc [ 0 ]
stream_capturer . remove_handler ( name )
if stream_capturer . handler_count == 0 :
self . _pool . killone ( sc [ 1 ] )
empty_capturers_indeces . append ( k )
for i in empty_capturers_indeces :
del self . _stream_capturers [ i ]
|
def _id_for_pc ( self , name ) :
"""Given the name of the PC , return the database identifier ."""
|
if not name in self . pc2id_lut :
self . c . execute ( "INSERT INTO pcs (name) VALUES ( ? )" , ( name , ) )
self . pc2id_lut [ name ] = self . c . lastrowid
self . id2pc_lut [ self . c . lastrowid ] = name
return self . pc2id_lut [ name ]
|
def wheel_libs ( wheel_fname , filt_func = None ) :
"""Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree .
Parameters
wheel _ fname : str
Filename of wheel
filt _ func : None or callable , optional
If None , inspect all files for library dependencies . If callable ,
accepts filename as argument , returns True if we should inspect the
file , False otherwise .
Returns
lib _ dict : dict
dictionary with ( key , value ) pairs of ( ` ` libpath ` ` ,
` ` dependings _ dict ` ` ) . ` ` libpath ` ` is library being depended on ,
relative to wheel root path if within wheel tree . ` ` dependings _ dict ` `
is ( key , value ) of ( ` ` depending _ lib _ path ` ` , ` ` install _ name ` ` ) . Again ,
` ` depending _ lib _ path ` ` is library relative to wheel root path , if
within wheel tree ."""
|
with TemporaryDirectory ( ) as tmpdir :
zip2dir ( wheel_fname , tmpdir )
lib_dict = tree_libs ( tmpdir , filt_func )
return stripped_lib_dict ( lib_dict , realpath ( tmpdir ) + os . path . sep )
|
def get_dependants ( project_name ) :
"""Yield dependants of ` project _ name ` ."""
|
for package in get_installed_distributions ( user_only = ENABLE_USER_SITE ) :
if is_dependant ( package , project_name ) :
yield package . project_name
|
def register_uri_backend ( uri_scheme , create_method , module , c14n_uri_method , escape , cast , is_connected ) :
"""This method is intended to be used by backends only .
It lets them register their services , identified by the URI scheme ,
at import time . The associated method create _ method must take one
parameter : the complete requested RFC 3986 compliant URI .
The associated module must be compliant with DBAPI v2.0 but will not
be directly used for other purposes than compatibility testing .
c14n _ uri _ method must be a function that takes one string argument ( the
same form that the one that would be passed to connect _ by _ uri ) and
returns its canonicalized form in an implementation dependant way . This
includes transforming any local pathname into an absolute form .
c14n _ uri _ method can also be None , in which case the behavior will be
the same as the one of the identity function .
escape must be a function that takes one string argument ( an unescaped
column name ) and returns an escaped version for use as an escaped
column name in an SQL query for this backend .
If something obviously not compatible is tried to be registred ,
NotImplementedError is raised ."""
|
try :
delta_api = __compare_api_level ( module . apilevel , any_apilevel )
mod_paramstyle = module . paramstyle
mod_threadsafety = module . threadsafety
except NameError :
raise NotImplementedError ( "This module does not support registration " "of non DBAPI services of at least apilevel 2.0" )
if delta_api < 0 or delta_api > 1 :
raise NotImplementedError ( "This module does not support registration " "of DBAPI services with a specified apilevel of %s" % module . apilevel )
if mod_paramstyle not in [ 'pyformat' , 'format' , 'qmark' ] :
raise NotImplementedError ( "This module only supports registration " "of DBAPI services with a 'format' or 'pyformat' 'qmark' paramstyle, not %r" % mod_paramstyle )
if mod_threadsafety < any_threadsafety :
raise NotImplementedError ( "This module does not support registration " "of DBAPI services of threadsafety %d (more generally under %d)" % ( mod_threadsafety , any_threadsafety ) )
if not urisup . valid_scheme ( uri_scheme ) :
raise urisup . InvalidSchemeError ( "Can't register an invalid URI scheme %r" % uri_scheme )
__uri_create_methods [ uri_scheme ] = ( create_method , module , c14n_uri_method , escape , cast , is_connected )
|
def send ( self , confirmation_email , send_date = "immediately" ) :
"""Sends this campaign ."""
|
body = { "ConfirmationEmail" : confirmation_email , "SendDate" : send_date }
response = self . _post ( self . uri_for ( "send" ) , json . dumps ( body ) )
|
def format_parameter ( element ) :
"""Formats a particular parameter . Essentially the same as built - in formatting except using ' i ' instead of ' j ' for
the imaginary number .
: param element : { int , float , long , complex , Parameter } Formats a parameter for Quil output ."""
|
if isinstance ( element , integer_types ) or isinstance ( element , np . int_ ) :
return repr ( element )
elif isinstance ( element , float ) :
return _check_for_pi ( element )
elif isinstance ( element , complex ) :
out = ''
r = element . real
i = element . imag
if i == 0 :
return repr ( r )
if r != 0 :
out += repr ( r )
if i == 1 :
assert np . isclose ( r , 0 , atol = 1e-14 )
out = 'i'
elif i == - 1 :
assert np . isclose ( r , 0 , atol = 1e-14 )
out = '-i'
elif i < 0 :
out += repr ( i ) + 'i'
elif r != 0 :
out += '+' + repr ( i ) + 'i'
else :
out += repr ( i ) + 'i'
return out
elif isinstance ( element , MemoryReference ) :
return str ( element )
elif isinstance ( element , Expression ) :
return _expression_to_string ( element )
elif isinstance ( element , MemoryReference ) :
return element . out ( )
assert False , "Invalid parameter: %r" % element
|
def interpolate ( self , target , extent ) :
"""Move this vector towards the given towards the target by the given
extent . The extent should be between 0 and 1."""
|
target = cast_anything_to_vector ( target )
self += extent * ( target - self )
|
def assert_instance_created ( self , model_class , ** kwargs ) :
"""Checks if a model instance was created in the database .
For example : :
> > > with self . assert _ instance _ created ( Article , slug = ' lorem - ipsum ' ) :
. . . Article . objects . create ( slug = ' lorem - ipsum ' )"""
|
return _InstanceContext ( self . assert_instance_does_not_exist , self . assert_instance_exists , model_class , ** kwargs )
|
def options ( self , urls = None , ** overrides ) :
"""Sets the acceptable HTTP method to OPTIONS"""
|
if urls is not None :
overrides [ 'urls' ] = urls
return self . where ( accept = 'OPTIONS' , ** overrides )
|
def irafcrop ( self , irafcropstring ) :
"""This is a wrapper around crop ( ) , similar to iraf imcopy ,
using iraf conventions ( 100:199 will be 100 pixels , not 99 ) ."""
|
irafcropstring = irafcropstring [ 1 : - 1 ]
# removing the [ ]
ranges = irafcropstring . split ( "," )
xr = ranges [ 0 ] . split ( ":" )
yr = ranges [ 1 ] . split ( ":" )
xmin = int ( xr [ 0 ] )
xmax = int ( xr [ 1 ] ) + 1
ymin = int ( yr [ 0 ] )
ymax = int ( yr [ 1 ] ) + 1
self . crop ( xmin , xmax , ymin , ymax )
|
def save ( self ) :
"""Pushes the current dispatch table on the stack and re - initializes
the current dispatch table to the default ."""
|
self . stack . append ( self . dispatch_table )
self . dispatch_table = self . default_table . copy ( )
|
def cpp_spec ( ) :
"""C + + specification , provided for example , and java compatible ."""
|
return { INDENTATION : '\t' , BEG_BLOCK : '{' , END_BLOCK : '}' , BEG_LINE : '' , END_LINE : '\n' , BEG_ACTION : '' , END_ACTION : ';' , BEG_CONDITION : 'if(' , END_CONDITION : ')' , LOGICAL_AND : ' && ' , LOGICAL_OR : ' || ' }
|
def check_supported_function ( func , check_func ) :
"""Decorator implementation that wraps a check around an ESPLoader
bootloader function to check if it ' s supported .
This is used to capture the multidimensional differences in
functionality between the ESP8266 & ESP32 ROM loaders , and the
software stub that runs on both . Not possible to do this cleanly
via inheritance alone ."""
|
def inner ( * args , ** kwargs ) :
obj = args [ 0 ]
if check_func ( obj ) :
return func ( * args , ** kwargs )
else :
raise NotImplementedInROMError ( obj , func )
return inner
|
def templates ( self ) :
"""Generate a dictionary with template names and file paths ."""
|
templates = { }
result = [ ]
if self . entry_point_group_templates :
result = self . load_entry_point_group_templates ( self . entry_point_group_templates ) or [ ]
for template in result :
for name , path in template . items ( ) :
templates [ name ] = path
return templates
|
def _add_loss_summaries ( total_loss ) :
"""Add summaries for losses in CIFAR - 10 model .
Generates moving average for all losses and associated summaries for
visualizing the performance of the network .
Args :
total _ loss : Total loss from loss ( ) .
Returns :
loss _ averages _ op : op for generating moving averages of losses ."""
|
# Compute the moving average of all individual losses and the total loss .
loss_averages = tf . train . ExponentialMovingAverage ( 0.9 , name = 'avg' )
losses = tf . get_collection ( 'losses' )
loss_averages_op = loss_averages . apply ( losses + [ total_loss ] )
# Attach a scalar summary to all individual losses and the total loss ; do the
# same for the averaged version of the losses .
for l in losses + [ total_loss ] : # Name each loss as ' ( raw ) ' and name the moving average version of the loss
# as the original loss name .
tf . summary . scalar ( l . op . name + ' (raw)' , l )
tf . summary . scalar ( l . op . name , loss_averages . average ( l ) )
return loss_averages_op
|
def has_library ( compiler , libname ) :
"""Return a boolean indicating whether a library is found ."""
|
with tempfile . NamedTemporaryFile ( "w" , suffix = ".cpp" ) as srcfile :
srcfile . write ( "int main (int argc, char **argv) { return 0; }" )
srcfile . flush ( )
outfn = srcfile . name + ".so"
try :
compiler . link_executable ( [ srcfile . name ] , outfn , libraries = [ libname ] , )
except setuptools . distutils . errors . LinkError :
return False
if not os . path . exists ( outfn ) :
return False
os . remove ( outfn )
return True
|
def float_to_latex ( x , format = "%.2g" ) : # pylint : disable = redefined - builtin
# pylint : disable = anomalous - backslash - in - string
r"""Convert a floating point number to a latex representation .
In particular , scientific notation is handled gracefully : e - > 10 ^
Parameters
x : ` float `
the number to represent
format : ` str ` , optional
the output string format
Returns
tex : ` str `
a TeX representation of the input
Examples
> > > from gwpy . plot . tex import float _ to _ latex
> > > float _ to _ latex ( 1)
> > > float _ to _ latex ( 2000)
'2 \ times 10 ^ { 3 } '
> > > float _ to _ latex ( 100)
'10 ^ { 2 } '
> > > float _ to _ latex ( - 500)
r ' - 5 \ ! \ ! \ times \ ! \ ! 10 ^ { 2 } '"""
|
if x == 0. :
return '0'
base_str = format % x
if "e" not in base_str :
return base_str
mantissa , exponent = base_str . split ( "e" )
if float ( mantissa ) . is_integer ( ) :
mantissa = int ( float ( mantissa ) )
exponent = exponent . lstrip ( "0+" )
if exponent . startswith ( '-0' ) :
exponent = '-' + exponent [ 2 : ]
if float ( mantissa ) == 1.0 :
return r"10^{%s}" % exponent
return r"%s\!\!\times\!\!10^{%s}" % ( mantissa , exponent )
|
def _validate_optional_key ( key , missing , value , validated , optional ) :
"""Validate an optional key ."""
|
try :
validated [ key ] = optional [ key ] ( value )
except NotValid as ex :
return [ '%r: %s' % ( key , arg ) for arg in ex . args ]
if key in missing :
missing . remove ( key )
return [ ]
|
def _ready_gzip_fastq ( in_files , data , require_bgzip = False ) :
"""Check if we have gzipped fastq and don ' t need format conversion or splitting .
Avoid forcing bgzip if we don ' t need indexed files ."""
|
all_gzipped = all ( [ not x or x . endswith ( ".gz" ) for x in in_files ] )
if require_bgzip and all_gzipped :
all_gzipped = all ( [ not x or not _check_gzipped_input ( x , data ) [ 0 ] for x in in_files ] )
needs_convert = dd . get_quality_format ( data ) . lower ( ) == "illumina"
needs_trim = dd . get_trim_ends ( data )
do_splitting = dd . get_align_split_size ( data ) is not False
return ( all_gzipped and not needs_convert and not do_splitting and not objectstore . is_remote ( in_files [ 0 ] ) and not needs_trim and not get_downsample_params ( data ) )
|
def from_array ( array ) :
"""Deserialize a new PassportElementErrorFiles from a given dictionary .
: return : new PassportElementErrorFiles instance .
: rtype : PassportElementErrorFiles"""
|
if array is None or not array :
return None
# end if
assert_type_or_raise ( array , dict , parameter_name = "array" )
data = { }
data [ 'source' ] = u ( array . get ( 'source' ) )
data [ 'type' ] = u ( array . get ( 'type' ) )
data [ 'file_hashes' ] = PassportElementErrorFiles . _builtin_from_array_list ( required_type = unicode_type , value = array . get ( 'file_hashes' ) , list_level = 1 )
data [ 'message' ] = u ( array . get ( 'message' ) )
instance = PassportElementErrorFiles ( ** data )
instance . _raw = array
return instance
|
def get_canonical_query_params ( self ) :
"""Return the canonical query params ( used in signing ) ."""
|
result = [ ]
for key , value in self . sorted_params ( ) :
result . append ( "%s=%s" % ( self . encode ( key ) , self . encode ( value ) ) )
return "&" . join ( result )
|
def _get_pos ( self ) :
"""Get current position for scroll bar ."""
|
if self . _canvas . height >= self . _max_height :
return 0
else :
return self . _canvas . start_line / ( self . _max_height - self . _canvas . height + 1 )
|
def to_valid_state_vector ( state_rep : Union [ int , np . ndarray ] , num_qubits : int , dtype : Type [ np . number ] = np . complex64 ) -> np . ndarray :
"""Verifies the state _ rep is valid and converts it to ndarray form .
This method is used to support passing in an integer representing a
computational basis state or a full wave function as a representation of
a state .
Args :
state _ rep : If an int , the state returned is the state corresponding to
a computational basis state . If an numpy array this is the full
wave function . Both of these are validated for the given number
of qubits , and the state must be properly normalized and of the
appropriate dtype .
num _ qubits : The number of qubits for the state . The state _ rep must be
valid for this number of qubits .
dtype : The numpy dtype of the state , will be used when creating the
state for a computational basis state , or validated against if
state _ rep is a numpy array .
Returns :
A numpy ndarray corresponding to the state on the given number of
qubits .
Raises :
ValueError if the state is not valid ."""
|
if isinstance ( state_rep , np . ndarray ) :
if len ( state_rep ) != 2 ** num_qubits :
raise ValueError ( 'initial state was of size {} ' 'but expected state for {} qubits' . format ( len ( state_rep ) , num_qubits ) )
state = state_rep
elif isinstance ( state_rep , int ) :
if state_rep < 0 :
raise ValueError ( 'initial_state must be positive' )
elif state_rep >= 2 ** num_qubits :
raise ValueError ( 'initial state was {} but expected state for {} qubits' . format ( state_rep , num_qubits ) )
else :
state = np . zeros ( 2 ** num_qubits , dtype = dtype )
state [ state_rep ] = 1.0
else :
raise TypeError ( 'initial_state was not of type int or ndarray' )
validate_normalized_state ( state , num_qubits , dtype )
return state
|
def phoneid ( self , phone_number , ** params ) :
"""The PhoneID API provides a cleansed phone number , phone type , and telecom carrier information to determine the
best communication method - SMS or voice .
See https : / / developer . telesign . com / docs / phoneid - api for detailed API documentation ."""
|
return self . post ( PHONEID_RESOURCE . format ( phone_number = phone_number ) , ** params )
|
def parent ( self ) :
"""The parent directory of this path ."""
|
p = self . _lib . dirname ( self . path )
p = self . __class__ ( p )
return p
|
def examples ( directory ) :
"""Generate example strategies to target folder"""
|
source_dir = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , "examples" )
try :
shutil . copytree ( source_dir , os . path . join ( directory , "examples" ) )
except OSError as e :
if e . errno == errno . EEXIST :
six . print_ ( "Folder examples is exists." )
|
def find_enriched ( sample_entities = None , background_entities = None , object_category = None , ** kwargs ) :
"""Given a sample set of sample _ entities ( e . g . overexpressed genes ) and a background set ( e . g . all genes assayed ) , and a category of descriptor ( e . g . phenotype , function ) ,
return enriched descriptors / classes"""
|
if sample_entities is None :
sample_entites = [ ]
( sample_counts , sample_results ) = get_counts ( entities = sample_entities , object_category = object_category , min_count = 2 , ** kwargs )
print ( str ( sample_counts ) )
sample_fcs = sample_results [ 'facet_counts' ]
taxon_count_dict = sample_fcs [ M . SUBJECT_TAXON ]
taxon = None
for ( t , tc ) in taxon_count_dict . items ( ) : # TODO - throw error if multiple taxa
taxon = t
if background_entities is None :
objects = list ( sample_counts . keys ( ) )
print ( "OBJECTS=" + str ( objects ) )
background_entities = get_background ( objects , taxon , object_category )
# TODO : consider caching
( bg_counts , _ ) = get_counts ( entities = background_entities , object_category = object_category , ** kwargs )
sample_n = len ( sample_entities )
# TODO - annotated only ?
pop_n = len ( background_entities )
# adapted from goatools
for ( sample_termid , sample_count ) in sample_counts . items ( ) :
pop_count = bg_counts [ sample_termid ]
# https : / / en . wikipedia . org / wiki / Fisher ' s _ exact _ test
# Cls NotCls
# study / sample [ a , b ]
# rest of ref [ c , d ]
a = sample_count
b = sample_n - sample_count
c = pop_count - sample_count
d = pop_n - pop_count - b
print ( "ABCD=" + str ( ( sample_termid , a , b , c , d , sample_n ) ) )
_ , p_uncorrected = sp . stats . fisher_exact ( [ [ a , b ] , [ c , d ] ] )
print ( "P=" + str ( p_uncorrected ) )
|
def format_tsv_line ( source , edge , target , value = None , metadata = None ) :
"""Render a single line for TSV file with data flow described
: type source str
: type edge str
: type target str
: type value float
: type metadata str
: rtype : str"""
|
return '{source}\t{edge}\t{target}\t{value}\t{metadata}' . format ( source = source , edge = edge , target = target , value = '{:.4f}' . format ( value ) if value is not None else '' , metadata = metadata or '' ) . rstrip ( ' \t' )
|
def getPeopleFilters ( self ) :
"""Return an iterator of L { IPeopleFilter } providers available to this
organizer ' s store ."""
|
yield AllPeopleFilter ( )
yield VIPPeopleFilter ( )
for getPeopleFilters in self . _gatherPluginMethods ( 'getPeopleFilters' ) :
for peopleFilter in getPeopleFilters ( ) :
yield peopleFilter
for tag in sorted ( self . getPeopleTags ( ) ) :
yield TaggedPeopleFilter ( tag )
|
def encrypt_variable ( variable , build_repo , * , tld = '.org' , public_key = None , travis_token = None , ** login_kwargs ) :
"""Encrypt an environment variable for ` ` build _ repo ` ` for Travis
` ` variable ` ` should be a bytes object , of the form ` ` b ' ENV = value ' ` ` .
` ` build _ repo ` ` is the repo that ` ` doctr deploy ` ` will be run from . It
should be like ' drdoctr / doctr ' .
` ` tld ` ` should be ` ` ' . org ' ` ` for travis - ci . org and ` ` ' . com ' ` ` for
travis - ci . com .
` ` public _ key ` ` should be a pem format public key , obtained from Travis if
not provided .
If the repo is private , travis _ token should be as returned by
` ` get _ temporary _ token ( * * login _ kwargs ) ` ` . A token being present
automatically implies ` ` tld = ' . com ' ` ` ."""
|
if not isinstance ( variable , bytes ) :
raise TypeError ( "variable should be bytes" )
if not b"=" in variable :
raise ValueError ( "variable should be of the form 'VARIABLE=value'" )
if not public_key :
_headers = { 'Content-Type' : 'application/json' , 'User-Agent' : 'MyClient/1.0.0' , }
headersv2 = { ** _headers , ** Travis_APIv2 }
headersv3 = { ** _headers , ** Travis_APIv3 }
if travis_token :
headersv3 [ 'Authorization' ] = 'token {}' . format ( travis_token )
res = requests . get ( 'https://api.travis-ci.com/repo/{build_repo}/key_pair/generated' . format ( build_repo = urllib . parse . quote ( build_repo , safe = '' ) ) , headers = headersv3 )
if res . json ( ) . get ( 'file' ) == 'not found' :
raise RuntimeError ( "Could not find the Travis public key for %s" % build_repo )
public_key = res . json ( ) [ 'public_key' ]
else :
res = requests . get ( 'https://api.travis-ci{tld}/repos/{build_repo}/key' . format ( build_repo = build_repo , tld = tld ) , headers = headersv2 )
public_key = res . json ( ) [ 'key' ]
if res . status_code == requests . codes . not_found :
raise RuntimeError ( 'Could not find requested repo on Travis. Is Travis enabled?' )
res . raise_for_status ( )
public_key = public_key . replace ( "RSA PUBLIC KEY" , "PUBLIC KEY" ) . encode ( 'utf-8' )
key = serialization . load_pem_public_key ( public_key , backend = default_backend ( ) )
pad = padding . PKCS1v15 ( )
return base64 . b64encode ( key . encrypt ( variable , pad ) )
|
def put_property ( elt , key , value , ttl = None , ctx = None ) :
"""Put properties in elt .
: param elt : properties elt to put . Not None methods .
: param number ttl : If not None , property time to leave .
: param ctx : elt ctx from where put properties . Equals elt if None . It
allows to get function properties related to a class or instance if
related function is defined in base class .
: param dict properties : properties to put in elt . elt and ttl are exclude .
: return : Timer if ttl is not None .
: rtype : Timer"""
|
return put_properties ( elt = elt , properties = { key : value } , ttl = ttl , ctx = ctx )
|
def getAvailableTemplates ( self ) :
"""Returns an array with the templates of stickers available .
Each array item is a dictionary with the following structure :
{ ' id ' : < template _ id > ,
' title ' : < teamplate _ title > ,
' selected : True / False ' }"""
|
# Getting adapters for current context . those adapters will return
# the desired sticker templates for the current context :
try :
adapters = getAdapters ( ( self . context , ) , IGetStickerTemplates )
except ComponentLookupError :
logger . info ( "No IGetStickerTemplates adapters found." )
adapters = None
templates = [ ]
if adapters is not None : # Gather all templates
for name , adapter in adapters :
templates += adapter ( self . request )
if templates :
return templates
# If there are no adapters , get all sticker templates in the system
seltemplate = self . getSelectedTemplate ( )
for temp in getStickerTemplates ( filter_by_type = self . filter_by_type ) :
out = temp
out [ "selected" ] = temp . get ( "id" , "" ) == seltemplate
templates . append ( out )
return templates
|
def forum_topic_delete ( self , topic_id ) :
"""Delete a topic ( Login Requires ) ( Moderator + ) ( UNTESTED ) .
Parameters :
topic _ id ( int ) : Where topic _ id is the topic id ."""
|
return self . _get ( 'forum_topics/{0}.json' . format ( topic_id ) , method = 'DELETE' , auth = True )
|
def get_mchirp ( h5group ) :
"""Calculate the chipr mass column for this PyCBC HDF5 table group"""
|
mass1 = h5group [ 'mass1' ] [ : ]
mass2 = h5group [ 'mass2' ] [ : ]
return ( mass1 * mass2 ) ** ( 3 / 5. ) / ( mass1 + mass2 ) ** ( 1 / 5. )
|
def random_square_mask ( shape , fraction ) :
"""Create a numpy array with specified shape and masked fraction .
Args :
shape : tuple , shape of the mask to create .
fraction : float , fraction of the mask area to populate with ` mask _ scalar ` .
Returns :
numpy . array : A numpy array storing the mask ."""
|
mask = np . ones ( shape )
patch_area = shape [ 0 ] * shape [ 1 ] * fraction
patch_dim = np . int ( math . floor ( math . sqrt ( patch_area ) ) )
if patch_area == 0 or patch_dim == 0 :
return mask
x = np . random . randint ( shape [ 0 ] - patch_dim )
y = np . random . randint ( shape [ 1 ] - patch_dim )
mask [ x : ( x + patch_dim ) , y : ( y + patch_dim ) , : ] = 0
return mask
|
def _serialize_v2 ( self , macaroon ) :
'''Serialize the macaroon in JSON format v2.
@ param macaroon the macaroon to serialize .
@ return JSON macaroon in v2 format .'''
|
serialized = { }
_add_json_binary_field ( macaroon . identifier_bytes , serialized , 'i' )
_add_json_binary_field ( binascii . unhexlify ( macaroon . signature_bytes ) , serialized , 's' )
if macaroon . location :
serialized [ 'l' ] = macaroon . location
if macaroon . caveats :
serialized [ 'c' ] = [ _caveat_v2_to_dict ( caveat ) for caveat in macaroon . caveats ]
return json . dumps ( serialized )
|
def update ( self , callback = None , errback = None , ** kwargs ) :
"""Update zone configuration . Pass a list of keywords and their values to
update . For the list of keywords available for zone configuration , see
: attr : ` ns1 . rest . zones . Zones . INT _ FIELDS ` and
: attr : ` ns1 . rest . zones . Zones . PASSTHRU _ FIELDS `"""
|
if not self . data :
raise ZoneException ( 'zone not loaded' )
def success ( result , * args ) :
self . data = result
if callback :
return callback ( self )
else :
return self
return self . _rest . update ( self . zone , callback = success , errback = errback , ** kwargs )
|
def create_paired_dir ( output_dir , meta_id , static = False , needwebdir = True ) :
"""Creates the meta or static dirs .
Adds an " even " or " odd " subdirectory to the static path
based on the meta - id ."""
|
# get the absolute root path
root_path = os . path . abspath ( output_dir )
# if it ' s a static directory , add even and odd
if static : # determine whether meta - id is odd or even
if meta_id [ - 1 ] . isdigit ( ) :
last_character = int ( meta_id [ - 1 ] )
else :
last_character = ord ( meta_id [ - 1 ] )
if last_character % 2 == 0 :
num_dir = 'even'
else :
num_dir = 'odd'
# add odd or even to the path , based on the meta - id
output_path = os . path . join ( root_path , num_dir )
# if it ' s a meta directory , output as normal
else :
output_path = root_path
# if it doesn ' t already exist , create the output path ( includes even / odd )
if not os . path . exists ( output_path ) :
os . mkdir ( output_path )
# add the pairtree to the output path
path_name = add_to_pairtree ( output_path , meta_id )
# add the meta - id directory to the end of the pairpath
meta_dir = os . path . join ( path_name , meta_id )
os . mkdir ( meta_dir )
# if we are creating static output
if static and needwebdir : # add the web path to the output directory
os . mkdir ( os . path . join ( meta_dir , 'web' ) )
static_dir = os . path . join ( meta_dir , 'web' )
return static_dir
# else we are creating meta output or don ' t need web directory
else :
return meta_dir
|
def post_stats ( cls , stats_url , stats , timeout = 2 , auth_provider = None ) :
"""POST stats to the given url .
: return : True if upload was successful , False otherwise ."""
|
def error ( msg ) : # Report aleady closed , so just print error .
print ( 'WARNING: Failed to upload stats to {}. due to {}' . format ( stats_url , msg ) , file = sys . stderr )
return False
# TODO ( benjy ) : The upload protocol currently requires separate top - level params , with JSON
# values . Probably better for there to be one top - level JSON value , namely json . dumps ( stats ) .
# But this will first require changing the upload receiver at every shop that uses this .
params = { k : cls . _json_dump_options ( v ) for ( k , v ) in stats . items ( ) }
cookies = Cookies . global_instance ( )
auth_provider = auth_provider or '<provider>'
# We can ' t simply let requests handle redirects , as we only allow them for specific codes :
# 307 and 308 indicate that the redirected request must use the same method , POST in this case .
# So they indicate a true redirect of the POST itself , and we allow them .
# The other redirect codes either must , or in practice do , cause the user agent to switch the
# method to GET . So when they are encountered on a POST , it indicates an auth problem ( a
# redirection to a login page ) .
def do_post ( url , num_redirects_allowed ) :
if num_redirects_allowed < 0 :
return error ( 'too many redirects.' )
r = requests . post ( url , data = params , timeout = timeout , cookies = cookies . get_cookie_jar ( ) , allow_redirects = False )
if r . status_code in { 307 , 308 } :
return do_post ( r . headers [ 'location' ] , num_redirects_allowed - 1 )
elif r . status_code != 200 :
error ( 'HTTP error code: {}. Reason: {}.' . format ( r . status_code , r . reason ) )
if 300 <= r . status_code < 400 or r . status_code == 401 :
print ( 'Use `path/to/pants login --to={}` to authenticate against the stats ' 'upload service.' . format ( auth_provider ) , file = sys . stderr )
return False
return True
try :
return do_post ( stats_url , num_redirects_allowed = 6 )
except Exception as e : # Broad catch - we don ' t want to fail the build over upload errors .
return error ( 'Error: {}' . format ( e ) )
|
def skip_whitespace ( self ) :
"""Consume input until a non - whitespace character is encountered .
The non - whitespace character is then ungotten , and the number of
whitespace characters consumed is returned .
If the tokenizer is in multiline mode , then newlines are whitespace .
@ rtype : int"""
|
skipped = 0
while True :
c = self . _get_char ( )
if c != ' ' and c != '\t' :
if ( c != '\n' ) or not self . multiline :
self . _unget_char ( c )
return skipped
skipped += 1
|
def pin_in_object_store ( obj ) :
"""Pin an object in the object store .
It will be available as long as the pinning process is alive . The pinned
object can be retrieved by calling get _ pinned _ object on the identifier
returned by this call ."""
|
obj_id = ray . put ( _to_pinnable ( obj ) )
_pinned_objects . append ( ray . get ( obj_id ) )
return "{}{}" . format ( PINNED_OBJECT_PREFIX , base64 . b64encode ( obj_id . binary ( ) ) . decode ( "utf-8" ) )
|
def release_filter ( self , value ) :
"""Validate the release filter ."""
|
compiled_pattern = coerce_pattern ( value )
if compiled_pattern . groups > 1 :
raise ValueError ( compact ( """
Release filter regular expression pattern is expected to have
zero or one capture group, but it has {count} instead!
""" , count = compiled_pattern . groups ) )
set_property ( self , 'release_filter' , value )
set_property ( self , 'compiled_filter' , compiled_pattern )
|
def uninstall_pgpm_from_db ( self ) :
"""Removes pgpm from db and all related metadata ( _ pgpm schema ) . Install packages are left as they are
: return : 0 if successful and error otherwise"""
|
drop_schema_cascade_script = 'DROP SCHEMA {schema_name} CASCADE;'
if self . _conn . closed :
self . _conn = psycopg2 . connect ( self . _connection_string , connection_factory = pgpm . lib . utils . db . MegaConnection )
cur = self . _conn . cursor ( )
# get current user
cur . execute ( pgpm . lib . utils . db . SqlScriptsHelper . current_user_sql )
current_user = cur . fetchone ( ) [ 0 ]
# check if current user is a super user
cur . execute ( pgpm . lib . utils . db . SqlScriptsHelper . is_superuser_sql )
is_cur_superuser = cur . fetchone ( ) [ 0 ]
if not is_cur_superuser :
self . _logger . debug ( 'User {0} is not a superuser. Only superuser can remove pgpm' . format ( current_user ) )
sys . exit ( 1 )
self . _logger . debug ( 'Removing pgpm from DB by dropping schema {0}' . format ( self . _pgpm_schema_name ) )
cur . execute ( drop_schema_cascade_script . format ( schema_name = self . _pgpm_schema_name ) )
# Commit transaction
self . _conn . commit ( )
self . _conn . close ( )
return 0
|
def get_plugin_info ( plugin ) :
"""Fetch information about the given package on PyPI and return it as a dict .
If the package cannot be found on PyPI , : exc : ` NameError ` will be raised ."""
|
url = 'https://pypi.python.org/pypi/{}/json' . format ( plugin )
try :
resp = request . urlopen ( url )
except HTTPError as e :
if e . code == 404 :
raise NameError ( "Plugin {} could not be found." . format ( plugin ) )
else :
raise ValueError ( "Checking plugin status on {} returned HTTP code {}" . format ( url , resp . getcode ( ) ) )
try :
json_resp = json . loads ( resp . read ( ) . decode ( ) )
# Catch ValueError instead of JSONDecodeError which is only available in
# Python 3.5 +
except ValueError :
raise ValueError ( "Could not decode JSON info for plugin at {}" . format ( url ) )
return json_resp [ 'info' ]
|
def set_epsilon_greedy_rate ( self , value ) :
'''setter'''
|
if isinstance ( value , float ) is True :
self . __epsilon_greedy_rate = value
else :
raise TypeError ( "The type of __epsilon_greedy_rate must be float." )
|
def composition ( self ) :
"""( Composition ) Returns the composition"""
|
elmap = collections . defaultdict ( float )
for site in self :
for species , occu in site . species . items ( ) :
elmap [ species ] += occu
return Composition ( elmap )
|
def writeString ( self , s ) :
"""Writes a string to the stream . It will be B { UTF - 8 } encoded ."""
|
s = self . context . getBytesForString ( s )
self . writeBytes ( s )
|
def next_stop ( self ) :
"""Return the next stop for this bus ."""
|
p = self . api . predictions ( vid = self . vid ) [ 'prd' ]
pobj = Prediction . fromapi ( self . api , p [ 0 ] )
pobj . _busobj = self
return pobj
|
def message_with_options ( self , * , args = None , kwargs = None , ** options ) :
"""Build a message with an arbitray set of processing options .
This method is useful if you want to compose actors . See the
actor composition documentation for details .
Parameters :
args ( tuple ) : Positional arguments that are passed to the actor .
kwargs ( dict ) : Keyword arguments that are passed to the actor .
* * options ( dict ) : Arbitrary options that are passed to the
broker and any registered middleware .
Returns :
Message : A message that can be enqueued on a broker ."""
|
for name in [ "on_failure" , "on_success" ] :
callback = options . get ( name )
if isinstance ( callback , Actor ) :
options [ name ] = callback . actor_name
elif not isinstance ( callback , ( type ( None ) , str ) ) :
raise TypeError ( name + " value must be an Actor" )
return Message ( queue_name = self . queue_name , actor_name = self . actor_name , args = args or ( ) , kwargs = kwargs or { } , options = options , )
|
def _add ( self , handler , allow_dupe = False , send_event = True ) :
"""Add handler instance and attach any events to it .
: param object handler : handler instance
: param bool allow _ dupe : If True , allow registering a handler more than once .
: return object : The handler you added is given back so this can be used as a decorator ."""
|
if not allow_dupe and handler in self . handlers :
raise ValueError ( "Handler already present: %s" % handler )
self . handlers . append ( handler )
self . _attach_handler_events ( handler )
if send_event :
self . on_handler_add ( handler )
|
def _create_window_function ( name , doc = '' ) :
"""Create a window function by name"""
|
def _ ( ) :
sc = SparkContext . _active_spark_context
jc = getattr ( sc . _jvm . functions , name ) ( )
return Column ( jc )
_ . __name__ = name
_ . __doc__ = 'Window function: ' + doc
return _
|
def is_present ( self , locator ) :
"""Tests to see if an element is present
@ type locator : webdriverwrapper . support . locator . Locator
@ param locator : locator used in search
@ rtype : bool
@ return : True if present , False if not present"""
|
return self . driver_wrapper . is_present ( locator , search_object = self . element )
|
def _next_seg ( self ) :
"""Get next seg ."""
|
if self . _seg :
self . _seg . close ( )
self . _seg_index += 1
if self . _seg_index > self . _last_seg_index :
self . _seg = None
return
filename = self . _seg_prefix + str ( self . _seg_index )
stat = cloudstorage . stat ( filename )
writer = output_writers . _GoogleCloudStorageOutputWriter
if writer . _VALID_LENGTH not in stat . metadata :
raise ValueError ( "Expect %s in metadata for file %s." % ( writer . _VALID_LENGTH , filename ) )
self . _seg_valid_length = int ( stat . metadata [ writer . _VALID_LENGTH ] )
if self . _seg_valid_length > stat . st_size :
raise ValueError ( "Valid length %s is too big for file %s of length %s" % ( self . _seg_valid_length , filename , stat . st_size ) )
self . _seg = cloudstorage . open ( filename )
|
def com_google_fonts_check_wght_valid_range ( ttFont ) :
"""The variable font ' wght ' ( Weight ) axis coordinate
must be within spec range of 1 to 1000 on all instances ."""
|
Failed = False
for instance in ttFont [ 'fvar' ] . instances :
if 'wght' in instance . coordinates :
value = instance . coordinates [ 'wght' ]
if value < 1 or value > 1000 :
Failed = True
yield FAIL , ( f"Found a bad wght coordinate with value '{value}'" " outside of the valid range from 1 to 1000." )
break
if not Failed :
yield PASS , ( "OK" )
|
def set_not_found_handler ( self , handler , version = None ) :
"""Sets the not _ found handler for the specified version of the api"""
|
if not self . not_found_handlers :
self . _not_found_handlers = { }
self . not_found_handlers [ version ] = handler
|
def from_bank_code ( cls , country_code , bank_code ) :
"""Create a new BIC object from country - and bank - code .
Examples :
> > > bic = BIC . from _ bank _ code ( ' DE ' , ' 20070000 ' )
> > > bic . country _ code
' DE '
> > > bic . bank _ code
' DEUT '
> > > bic . location _ code
' HH '
> > > BIC . from _ bank _ code ( ' DE ' , ' 01010101 ' )
Traceback ( most recent call last ) :
ValueError : Invalid bank code ' 01010101 ' for country ' DE '
Args :
country _ code ( str ) : ISO 3166 alpha2 country - code .
bank _ code ( str ) : Country specific bank - code .
Returns :
BIC : a BIC object generated from the given country code and bank code .
Raises :
ValueError : If the given bank code wasn ' t found in the registry
Note :
This currently only works for German bank - codes ."""
|
try :
return cls ( registry . get ( 'bank_code' ) [ ( country_code , bank_code ) ] [ 'bic' ] )
except KeyError :
raise ValueError ( "Invalid bank code {!r} for country {!r}" . format ( bank_code , country_code ) )
|
def quote ( code ) :
"""Returns quoted code if not already quoted and if possible
Parameters
code : String
\t Code thta is quoted"""
|
try :
code = code . rstrip ( )
except AttributeError : # code is not a string , may be None - - > There is no code to quote
return code
if code and code [ 0 ] + code [ - 1 ] not in ( '""' , "''" , "u'" , '"' ) and '"' not in code :
return 'u"' + code + '"'
else :
return code
|
def get_line_value ( self , context_type ) :
"""Get the values defined on this line .
: param context _ type : " ENV " or " LABEL "
: return : values of given type defined on this line"""
|
if context_type . upper ( ) == "ENV" :
return self . line_envs
elif context_type . upper ( ) == "LABEL" :
return self . line_labels
|
def searches ( self ) :
"""* The search - block ( if any ) associated with this document *
* * Usage : * *
. . code - block : : python
# DOCUMENT SEARCHES
docSearchBlock = doc . searches"""
|
return self . _get_object ( regex = re . compile ( r'((?<=\n)|(?<=^))(?P<title>\[Searches\]:) *(?P<tagString>( *?@\S*(\(.*?\))?)+)?(?P<content>(\n( |\t).*)*)' , re . UNICODE ) , objectType = "searchBlock" , content = None )
|
def Build ( self ) :
"""Builds a client config dictionary used in the OAuth 2.0 flow ."""
|
if all ( ( self . client_type , self . client_id , self . client_secret , self . auth_uri , self . token_uri ) ) :
client_config = { self . client_type : { 'client_id' : self . client_id , 'client_secret' : self . client_secret , 'auth_uri' : self . auth_uri , 'token_uri' : self . token_uri } }
else :
raise ValueError ( 'Required field is missing.' )
return client_config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.