signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def update_docs ( self , t , module ) :
"""Updates the documentation for the specified type using the module predocs .""" | # We need to look in the parent module docstrings for this types decorating tags .
key = "{}.{}" . format ( module . name , t . name )
if key in module . predocs :
t . docstring = self . docparser . to_doc ( module . predocs [ key ] [ 0 ] , t . name )
t . docstart , t . docend = ( module . predocs [ key ] [ 1 ] , module . predocs [ key ] [ 2 ] ) |
def resource_url ( self , entity_id , name , revision ) :
'''Return the resource url for a given resource on an entity .
@ param entity _ id The id of the entity to get resource for .
@ param name The name of the resource .
@ param revision The revision of the resource .''' | return '{}/{}/resource/{}/{}' . format ( self . url , _get_path ( entity_id ) , name , revision ) |
def get_assessment_taken_mdata ( ) :
"""Return default mdata map for AssessmentTaken""" | return { 'assessment_offered' : { 'element_label' : { 'text' : 'assessment offered' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'taker' : { 'element_label' : { 'text' : 'taker' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , } |
def xyz ( self ) :
""": returns : an array of shape ( N , 3 ) with the cartesian coordinates""" | return geo_utils . spherical_to_cartesian ( self . lons . flat , self . lats . flat , self . depths . flat ) |
def exists ( self , vars_list : List [ str ] ) -> 'TensorFluent' :
'''Returns the TensorFluent for the exists aggregation function .
Args :
vars _ list : The list of variables to be aggregated over .
Returns :
A TensorFluent wrapping the exists aggregation function .''' | return self . _aggregation_op ( tf . reduce_any , self , vars_list ) |
def str ( self , indent = 0 ) :
"""Get a string representation of this XML fragment .
@ param indent : The indent to be used in formatting the output .
@ type indent : int
@ return : A I { pretty } string .
@ rtype : basestring""" | tab = "%*s" % ( indent * 3 , "" )
result = [ ]
result . append ( "%s<%s" % ( tab , self . qname ( ) ) )
result . append ( self . nsdeclarations ( ) )
for a in self . attributes :
result . append ( " %s" % ( unicode ( a ) , ) )
if self . isempty ( ) :
result . append ( "/>" )
return "" . join ( result )
result . append ( ">" )
if self . hasText ( ) :
result . append ( self . text . escape ( ) )
for c in self . children :
result . append ( "\n" )
result . append ( c . str ( indent + 1 ) )
if len ( self . children ) :
result . append ( "\n%s" % ( tab , ) )
result . append ( "</%s>" % ( self . qname ( ) , ) )
return "" . join ( result ) |
def add_dispatcher ( self , dsp , inputs , outputs , dsp_id = None , input_domain = None , weight = None , inp_weight = None , description = None , include_defaults = False , await_domain = None , ** kwargs ) :
"""Add a single sub - dispatcher node to dispatcher .
: param dsp :
Child dispatcher that is added as sub - dispatcher node to the parent
dispatcher .
: type dsp : Dispatcher | dict [ str , list ]
: param inputs :
Inputs mapping . Data node ids from parent dispatcher to child
sub - dispatcher .
: type inputs : dict [ str , str | list [ str ] ] | tuple [ str ] |
( str , . . . , dict [ str , str | list [ str ] ] )
: param outputs :
Outputs mapping . Data node ids from child sub - dispatcher to parent
dispatcher .
: type outputs : dict [ str , str | list [ str ] ] | tuple [ str ] |
( str , . . . , dict [ str , str | list [ str ] ] )
: param dsp _ id :
Sub - dispatcher node id .
If None will be assigned as < dsp . name > .
: type dsp _ id : str , optional
: param input _ domain :
A function that checks if input values satisfy the function domain .
This can be any function that takes the a dictionary with the inputs
of the sub - dispatcher node and returns True if input values satisfy
the domain , otherwise False .
. . note : : This function is invoked every time that a data node reach
the sub - dispatcher node .
: type input _ domain : ( dict ) - > bool , optional
: param weight :
Node weight . It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow .
: type weight : float , int , optional
: param inp _ weight :
Edge weights from data nodes to the sub - dispatcher node .
It is a dictionary ( key = data node id ) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow .
: type inp _ weight : dict [ str , int | float ] , optional
: param description :
Sub - dispatcher node ' s description .
: type description : str , optional
: param include _ defaults :
If True the default values of the sub - dispatcher are added to the
current dispatcher .
: type include _ defaults : bool , optional
: param await _ domain :
If True the Dispatcher waits all input results before executing the
` input _ domain ` function . If a number is defined this is used as
` timeout ` for ` Future . result ` method [ default : True ] . Note this is
used when asynchronous or parallel execution is enable .
: type await _ domain : bool | int | float , optional
: param kwargs :
Set additional node attributes using key = value .
: type kwargs : keyword arguments , optional
: return :
Sub - dispatcher node id .
: rtype : str
. . seealso : : : func : ` add _ data ` , : func : ` add _ func ` , : func : ` add _ function ` ,
: func : ` add _ from _ lists `
* * Example * * :
. . testsetup : :
> > > dsp = Dispatcher ( name = ' Dispatcher ' )
Create a sub - dispatcher : :
> > > sub _ dsp = Dispatcher ( )
> > > sub _ dsp . add _ function ( ' max ' , max , [ ' a ' , ' b ' ] , [ ' c ' ] )
' max '
Add the sub - dispatcher to the parent dispatcher : :
> > > dsp . add _ dispatcher ( dsp _ id = ' Sub - Dispatcher ' , dsp = sub _ dsp ,
. . . inputs = { ' A ' : ' a ' , ' B ' : ' b ' } ,
. . . outputs = { ' c ' : ' C ' } )
' Sub - Dispatcher '
Add a sub - dispatcher node with domain : :
> > > def my _ domain ( kwargs ) :
. . . return kwargs [ ' C ' ] > 3
> > > dsp . add _ dispatcher ( dsp _ id = ' Sub - Dispatcher with domain ' ,
. . . dsp = sub _ dsp , inputs = { ' C ' : ' a ' , ' D ' : ' b ' } ,
. . . outputs = { ( ' c ' , ' b ' ) : ( ' E ' , ' E1 ' ) } ,
. . . input _ domain = my _ domain )
' Sub - Dispatcher with domain '""" | from . utils . blue import _init
dsp = _init ( dsp )
if not isinstance ( dsp , self . __class__ ) :
kw = dsp
dsp = self . __class__ ( name = dsp_id or 'unknown' , executor = self . executor )
dsp . add_from_lists ( ** kw )
if not dsp_id : # Get the dsp id .
dsp_id = dsp . name or 'unknown'
if description is None : # Get description .
description = dsp . __doc__ or None
if not isinstance ( inputs , dict ) : # Create the inputs dict .
inputs = kk_dict ( * inputs )
if not isinstance ( outputs , dict ) : # Create the outputs dict .
outputs = kk_dict ( * outputs )
# Set zero as default input distances .
# noinspection PyTypeChecker
_weight_from = dict . fromkeys ( inputs . keys ( ) , 0.0 )
_weight_from . update ( inp_weight or { } )
from . utils . alg import _nodes
# Return dispatcher node id .
dsp_id = self . add_function ( dsp_id , dsp , sorted ( _nodes ( inputs ) ) , sorted ( _nodes ( outputs . values ( ) ) ) , input_domain , weight , _weight_from , type = 'dispatcher' , description = description , wait_inputs = False , await_domain = await_domain , ** kwargs )
# Set proper inputs .
self . nodes [ dsp_id ] [ 'inputs' ] = inputs
# Set proper outputs .
self . nodes [ dsp_id ] [ 'outputs' ] = outputs
if SINK not in dsp . nodes and SINK in _nodes ( inputs . values ( ) ) . union ( _nodes ( outputs ) ) :
dsp . add_data ( SINK )
# Add sink node .
# Import default values from sub - dispatcher .
if include_defaults :
dsp_dfl = dsp . default_values
# Namespace shortcut .
remove = set ( )
# Set of nodes to remove after the import .
# Set default values .
for k , v in inputs . items ( ) :
if isinstance ( v , str ) :
if v in dsp_dfl :
self . set_default_value ( k , ** dsp_dfl . pop ( v ) )
else :
if v [ 0 ] in dsp_dfl :
self . set_default_value ( k , ** dsp_dfl . pop ( v [ 0 ] ) )
remove . update ( v [ 1 : ] )
# Remove default values .
for k in remove :
dsp_dfl . pop ( k , None )
return dsp_id |
def _salt_send_domain_event ( opaque , conn , domain , event , event_data ) :
'''Helper function send a salt event for a libvirt domain .
: param opaque : the opaque data that is passed to the callback .
This is a dict with ' prefix ' , ' object ' and ' event ' keys .
: param conn : libvirt connection
: param domain : name of the domain related to the event
: param event : name of the event
: param event _ data : additional event data dict to send''' | data = { 'domain' : { 'name' : domain . name ( ) , 'id' : domain . ID ( ) , 'uuid' : domain . UUIDString ( ) } , 'event' : event }
data . update ( event_data )
_salt_send_event ( opaque , conn , data ) |
def get_session_url ( session_id , type = 'view' , ** params ) :
"""Allowed types are : ` view ` , ` assets ` , ` download ` .""" | url = 'sessions/{}/{}' . format ( session_id , type )
url = urljoin ( API_URL , url )
return add_to_url ( url , ** params ) |
def findCommunities ( G ) :
"""Partition network with the Infomap algorithm .
Annotates nodes with ' community ' id and return number of communities found .""" | infomapWrapper = infomap . Infomap ( "--two-level" )
print ( "Building Infomap network from a NetworkX graph..." )
for e in G . edges ( ) :
infomapWrapper . addLink ( * e )
print ( "Find communities with Infomap..." )
infomapWrapper . run ( ) ;
tree = infomapWrapper . tree
print ( "Found %d top modules with codelength: %f" % ( tree . numTopModules ( ) , tree . codelength ( ) ) )
communities = { }
for node in tree . leafIter ( ) :
communities [ node . originalLeafIndex ] = node . moduleIndex ( )
nx . set_node_attributes ( G , name = 'community' , values = communities )
return tree . numTopModules ( ) |
def show_in_notebook ( self , labels = None , predict_proba = True , show_predicted_value = True , ** kwargs ) :
"""Shows html explanation in ipython notebook .
See as _ html ( ) for parameters .
This will throw an error if you don ' t have IPython installed""" | from IPython . core . display import display , HTML
display ( HTML ( self . as_html ( labels = labels , predict_proba = predict_proba , show_predicted_value = show_predicted_value , ** kwargs ) ) ) |
def search_reports_page ( self , search_term = None , enclave_ids = None , from_time = None , to_time = None , tags = None , excluded_tags = None , page_size = None , page_number = None ) :
"""Search for reports containing a search term .
: param str search _ term : The term to search for . If empty , no search term will be applied . Otherwise , must
be at least 3 characters .
: param list ( str ) enclave _ ids : list of enclave ids used to restrict reports to specific enclaves ( optional - by
default reports from all of user ' s enclaves are returned )
: param int from _ time : start of time window in milliseconds since epoch ( optional )
: param int to _ time : end of time window in milliseconds since epoch ( optional )
: param list ( str ) tags : Name ( or list of names ) of tag ( s ) to filter reports by . Only reports containing
ALL of these tags will be returned . ( optional )
: param list ( str ) excluded _ tags : Reports containing ANY of these tags will be excluded from the results .
: param int page _ number : the page number to get . ( optional )
: param int page _ size : the size of the page to be returned .
: return : a | Page | of | Report | objects . * NOTE * : The bodies of these reports will be ` ` None ` ` .""" | body = { 'searchTerm' : search_term }
params = { 'enclaveIds' : enclave_ids , 'from' : from_time , 'to' : to_time , 'tags' : tags , 'excludedTags' : excluded_tags , 'pageSize' : page_size , 'pageNumber' : page_number }
resp = self . _client . post ( "reports/search" , params = params , data = json . dumps ( body ) )
page = Page . from_dict ( resp . json ( ) , content_type = Report )
return page |
def write_hdf5_flag_group ( flag , h5group , ** kwargs ) :
"""Write a ` DataQualityFlag ` into the given HDF5 group""" | # write segmentlists
flag . active . write ( h5group , 'active' , ** kwargs )
kwargs [ 'append' ] = True
flag . known . write ( h5group , 'known' , ** kwargs )
# store metadata
for attr in [ 'name' , 'label' , 'category' , 'description' , 'isgood' , 'padding' ] :
value = getattr ( flag , attr )
if value is None :
continue
elif isinstance ( value , Quantity ) :
h5group . attrs [ attr ] = value . value
elif isinstance ( value , UnitBase ) :
h5group . attrs [ attr ] = str ( value )
else :
h5group . attrs [ attr ] = value
return h5group |
def delete_releasefile ( self , release ) :
"""Delete the releasefile of the given release
This is inteded to be used in a action unit .
: param release : the release with the releasefile
: type release : : class : ` Release `
: returns : an action status
: rtype : : class : ` ActionStatus `
: raises : None""" | fp = release . _releasefile . get_fullpath ( )
log . info ( "Deleting release file %s" , fp )
delete_file ( release . _releasefile )
return ActionStatus ( ActionStatus . SUCCESS , msg = "Deleted %s" % fp ) |
def FindRegex ( self , regex , data ) :
"""Search the data for a hit .""" | for match in re . finditer ( regex , data , flags = re . I | re . S | re . M ) :
yield ( match . start ( ) , match . end ( ) ) |
def list_resourcepools ( kwargs = None , call = None ) :
'''List all the resource pools for this VMware environment
CLI Example :
. . code - block : : bash
salt - cloud - f list _ resourcepools my - vmware - config''' | if call != 'function' :
raise SaltCloudSystemExit ( 'The list_resourcepools function must be called with ' '-f or --function.' )
return { 'Resource Pools' : salt . utils . vmware . list_resourcepools ( _get_si ( ) ) } |
def to_index_variable ( self ) :
"""Return this variable as an xarray . IndexVariable""" | return IndexVariable ( self . dims , self . _data , self . _attrs , encoding = self . _encoding , fastpath = True ) |
def init_params ( self , initializer = Uniform ( 0.01 ) , arg_params = None , aux_params = None , allow_missing = False , force_init = False , allow_extra = False ) :
"""Initializes the parameters and auxiliary states . By default this function
does nothing . Subclass should override this method if contains parameters .
Parameters
initializer : Initializer
Called to initialize parameters if needed .
arg _ params : dict
If not ` ` None ` ` , should be a dictionary of existing ` arg _ params ` . Initialization
will be copied from that .
aux _ params : dict
If not ` ` None ` ` , should be a dictionary of existing ` aux _ params ` . Initialization
will be copied from that .
allow _ missing : bool
If ` ` True ` ` , params could contain missing values , and the initializer will be
called to fill those missing params .
force _ init : bool
If ` ` True ` ` , will force re - initialize even if already initialized .
allow _ extra : boolean , optional
Whether allow extra parameters that are not needed by symbol .
If this is True , no error will be thrown when arg _ params or aux _ params
contain extra parameters that is not needed by the executor .""" | pass |
def aliased_slot_name ( self , slot : SLOT_OR_SLOTNAME ) -> str :
"""Return the overloaded slot name - - the alias if one exists otherwise the actual name
@ param slot : either a slot name or a definition
@ return : overloaded name""" | if isinstance ( slot , str ) :
slot = self . schema . slots [ slot ]
return slot . alias if slot . alias else slot . name |
def rstrip_extra ( fname ) :
"""Strip extraneous , non - discriminative filename info from the end of a file .""" | to_strip = ( "_R" , ".R" , "-R" , "_" , "fastq" , "." , "-" )
while fname . endswith ( to_strip ) :
for x in to_strip :
if fname . endswith ( x ) :
fname = fname [ : len ( fname ) - len ( x ) ]
break
return fname |
def add_certificate ( self , body , ** kwargs ) : # noqa : E501
"""Upload a new trusted certificate . # noqa : E501
An endpoint for uploading new trusted certificates . * * Example usage : * * ` curl - X POST https : / / api . us - east - 1 . mbedcloud . com / v3 / trusted - certificates - d { \" name \" : \" myCert1 \" , \" description \" : \" very important cert \" , \" certificate \" : \" certificate _ data \" , \" service \" : \" lwm2m \" } - H ' content - type : application / json ' - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . add _ certificate ( body , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param TrustedCertificateReq body : A trusted certificate object with attributes . ( required )
: return : TrustedCertificateResp
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . add_certificate_with_http_info ( body , ** kwargs )
# noqa : E501
else :
( data ) = self . add_certificate_with_http_info ( body , ** kwargs )
# noqa : E501
return data |
def lemmatize ( text , lowercase = True , remove_stopwords = True ) :
"""Return the lemmas of the tokens in a text .""" | doc = nlp ( text )
if lowercase and remove_stopwords :
lemmas = [ t . lemma_ . lower ( ) for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ]
elif lowercase :
lemmas = [ t . lemma_ . lower ( ) for t in doc ]
elif remove_stopwords :
lemmas = [ t . lemma_ for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ]
else :
lemmas = [ t . lemma_ for t in doc ]
return lemmas |
def set_timestamp_to_current ( self ) :
"""Set timestamp to current time utc
: rtype : None""" | # Good form to add tzinfo
self . timestamp = pytz . UTC . localize ( datetime . datetime . utcnow ( ) ) |
def update_company ( self , company , update_mask = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Updates specified company .
Example :
> > > from google . cloud import talent _ v4beta1
> > > client = talent _ v4beta1 . CompanyServiceClient ( )
> > > # TODO : Initialize ` company ` :
> > > company = { }
> > > response = client . update _ company ( company )
Args :
company ( Union [ dict , ~ google . cloud . talent _ v4beta1 . types . Company ] ) : Required .
The company resource to replace the current resource in the system .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . talent _ v4beta1 . types . Company `
update _ mask ( Union [ dict , ~ google . cloud . talent _ v4beta1 . types . FieldMask ] ) : Optional but strongly recommended for the best service experience .
If ` ` update _ mask ` ` is provided , only the specified fields in ` ` company ` `
are updated . Otherwise all the fields are updated .
A field mask to specify the company fields to be updated . Only top level
fields of ` ` Company ` ` are supported .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . talent _ v4beta1 . types . FieldMask `
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . talent _ v4beta1 . types . Company ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid .""" | # Wrap the transport method to add retry and timeout logic .
if "update_company" not in self . _inner_api_calls :
self . _inner_api_calls [ "update_company" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . update_company , default_retry = self . _method_configs [ "UpdateCompany" ] . retry , default_timeout = self . _method_configs [ "UpdateCompany" ] . timeout , client_info = self . _client_info , )
request = company_service_pb2 . UpdateCompanyRequest ( company = company , update_mask = update_mask )
return self . _inner_api_calls [ "update_company" ] ( request , retry = retry , timeout = timeout , metadata = metadata ) |
def _prepare_quote ( quote , author , max_len = 78 ) :
"""This function processes a quote and returns a string that is ready
to be used in the fancy prompt .""" | quote = quote . split ( ' ' )
max_len -= 6
lines = [ ]
cur_line = [ ]
def _len ( line ) :
return sum ( len ( elt ) for elt in line ) + len ( line ) - 1
while quote :
if not cur_line or ( _len ( cur_line ) + len ( quote [ 0 ] ) - 1 <= max_len ) :
cur_line . append ( quote . pop ( 0 ) )
continue
lines . append ( ' | %s' % ' ' . join ( cur_line ) )
cur_line = [ ]
if cur_line :
lines . append ( ' | %s' % ' ' . join ( cur_line ) )
cur_line = [ ]
lines . append ( ' | %s-- %s' % ( " " * ( max_len - len ( author ) - 5 ) , author ) )
return lines |
def setContentFor ( self , widget ) :
"""Updates toolbox contents with a data corresponding to a given tab .""" | for i in range ( self . count ( ) ) :
item = self . widget ( i )
if widget . isStatic :
item . setStaticContent ( widget )
else :
item . setContent ( widget ) |
def _request ( self , method , path , server = None , ** kwargs ) :
"""Execute a request to the cluster
A server is selected from the server pool .""" | while True :
next_server = server or self . _get_server ( )
try :
response = self . server_pool [ next_server ] . request ( method , path , username = self . username , password = self . password , schema = self . schema , ** kwargs )
redirect_location = response . get_redirect_location ( )
if redirect_location and 300 <= response . status <= 308 :
redirect_server = _server_url ( redirect_location )
self . _add_server ( redirect_server )
return self . _request ( method , path , server = redirect_server , ** kwargs )
if not server and response . status in SRV_UNAVAILABLE_STATUSES :
with self . _lock : # drop server from active ones
self . _drop_server ( next_server , response . reason )
else :
return response
except ( urllib3 . exceptions . MaxRetryError , urllib3 . exceptions . ReadTimeoutError , urllib3 . exceptions . SSLError , urllib3 . exceptions . HTTPError , urllib3 . exceptions . ProxyError , ) as ex :
ex_message = _ex_to_message ( ex )
if server :
raise ConnectionError ( "Server not available, exception: %s" % ex_message )
preserve_server = False
if isinstance ( ex , urllib3 . exceptions . ProtocolError ) :
preserve_server = any ( t in [ type ( arg ) for arg in ex . args ] for t in PRESERVE_ACTIVE_SERVER_EXCEPTIONS )
if ( not preserve_server ) :
with self . _lock : # drop server from active ones
self . _drop_server ( next_server , ex_message )
except Exception as e :
raise ProgrammingError ( _ex_to_message ( e ) ) |
def write_metadata ( self , fp ) :
"""Writes metadata to the given file handler .
Parameters
fp : pycbc . inference . io . BaseInferenceFile instance
The inference file to write to .""" | fp . attrs [ 'model' ] = self . name
fp . attrs [ 'variable_params' ] = list ( self . variable_params )
fp . attrs [ 'sampling_params' ] = list ( self . sampling_params )
fp . write_kwargs_to_attrs ( fp . attrs , static_params = self . static_params ) |
def get_file_handle ( file_path ) :
"""Return cyvcf2 VCF object
Args :
file _ path ( str )
Returns :
vcf _ obj ( cyvcf2 . VCF )""" | LOG . debug ( "Check if file end is correct" )
if not os . path . exists ( file_path ) :
raise IOError ( "No such file:{0}" . format ( file_path ) )
if not os . path . splitext ( file_path ) [ - 1 ] in VALID_ENDINGS :
raise IOError ( "Not a valid vcf file name: {}" . format ( file_path ) )
vcf_obj = VCF ( file_path )
return vcf_obj |
def agent_for_socks_port ( reactor , torconfig , socks_config , pool = None ) :
"""This returns a Deferred that fires with an object that implements
: class : ` twisted . web . iweb . IAgent ` and is thus suitable for passing
to ` ` treq ` ` as the ` ` agent = ` ` kwarg . Of course can be used
directly ; see ` using Twisted web cliet
< http : / / twistedmatrix . com / documents / current / web / howto / client . html > ` _ . If
you have a : class : ` txtorcon . Tor ` instance already , the preferred
API is to call : meth : ` txtorcon . Tor . web _ agent ` on it .
: param torconfig : a : class : ` txtorcon . TorConfig ` instance .
: param socks _ config : anything valid for Tor ' s ` ` SocksPort ` `
option . This is generally just a TCP port ( e . g . ` ` 9050 ` ` ) , but
can also be a unix path like so ` ` unix : / path / to / socket ` ` ( Tor
has restrictions on the ownership / permissions of the directory
containing ` ` socket ` ` ) . If the given SOCKS option is not
already available in the underlying Tor instance , it is
re - configured to add the SOCKS option .""" | # : param tls : True ( the default ) will use Twisted ' s default options
# with the hostname in the URI - - that is , TLS verification
# similar to a Browser . Otherwise , you can pass whatever Twisted
# returns for ` optionsForClientTLS
# < https : / / twistedmatrix . com / documents / current / api / twisted . internet . ssl . optionsForClientTLS . html > ` _
socks_config = str ( socks_config )
# sadly , all lists are lists - of - strings to Tor : /
if socks_config not in torconfig . SocksPort :
txtorlog . msg ( "Adding SOCKS port '{}' to Tor" . format ( socks_config ) )
torconfig . SocksPort . append ( socks_config )
try :
yield torconfig . save ( )
except Exception as e :
raise RuntimeError ( "Failed to reconfigure Tor with SOCKS port '{}': {}" . format ( socks_config , str ( e ) ) )
if socks_config . startswith ( 'unix:' ) :
socks_ep = UNIXClientEndpoint ( reactor , socks_config [ 5 : ] )
else :
if ':' in socks_config :
host , port = socks_config . split ( ':' , 1 )
else :
host = '127.0.0.1'
port = int ( socks_config )
socks_ep = TCP4ClientEndpoint ( reactor , host , port )
returnValue ( Agent . usingEndpointFactory ( reactor , _AgentEndpointFactoryUsingTor ( reactor , socks_ep ) , pool = pool , ) ) |
def register_share_command ( self , share_func ) :
"""Add ' share ' command for adding view only project permissions and sending email via another service .
: param share _ func : function to run when user choses this option""" | description = "Share a project with another user with specified permissions. " "Sends the other user an email message via D4S2 service. " "If not specified this command gives user download permissions."
share_parser = self . subparsers . add_parser ( 'share' , description = description )
add_project_name_or_id_arg ( share_parser )
user_or_email = share_parser . add_mutually_exclusive_group ( required = True )
add_user_arg ( user_or_email )
add_email_arg ( user_or_email )
_add_auth_role_arg ( share_parser , default_permissions = 'file_downloader' )
_add_resend_arg ( share_parser , "Resend share" )
_add_message_file ( share_parser , "Filename containing a message to be sent with the share. " "Pass - to read from stdin." )
share_parser . set_defaults ( func = share_func ) |
import re
def delete_leading_zeroes_in_ip ( ip_address : str ) -> str :
"""Function that removes leading zeroes from an IP address .
Args :
ip _ address ( str ) : an ip address .
Returns :
str : IP address without leading zeroes .
Examples :
> > > delete _ leading _ zeroes _ in _ ip ( ' 216.08.094.196 ' )
'216.8.94.196'
> > > delete _ leading _ zeroes _ in _ ip ( ' 12.01.024 ' )
'12.1.24'
> > > delete _ leading _ zeroes _ in _ ip ( ' 216.08.094.0196 ' )
'216.8.94.196'""" | return re . sub ( r'\.0*' , '.' , ip_address ) |
def _match_value_filter ( self , p , value ) :
"""Returns True of False if value in the pattern p matches the filter .""" | return self . _VALUE_FILTER_MAP [ p [ 0 ] ] ( value [ p [ 1 ] ] , p [ 2 ] ) |
def info ( self , verbose = None ) :
"""Prints and formats the results of the timing
@ _ print : # bool whether or not to print out to terminal
@ verbose : # bool True if you ' d like to print the individual timing
results in additions to the comparison results""" | if self . name :
flag ( bold ( self . name ) )
flag ( "Results after {} intervals" . format ( bold ( self . num_intervals , close = False ) ) , colors . notice_color , padding = "top" )
line ( "‒" )
verbose = verbose if verbose is not None else self . verbose
if verbose :
for result in self . _callable_results :
result . info ( )
line ( )
diffs = [ ( i , result . mean ) for i , result in enumerate ( self . _callable_results ) if result . mean ]
ranking = [ ( i , self . _callable_results [ i ] . format_time ( r ) ) for i , r in sorted ( diffs , key = lambda x : x [ 1 ] ) ]
max_rlen = len ( str ( len ( ranking ) ) ) + 2
max_rlen2 = max ( len ( r ) for i , r in ranking ) + 1
best = self . _callable_results [ ranking [ 0 ] [ 0 ] ] . mean
for idx , ( i , rank ) in enumerate ( ranking , 1 ) :
_obj_name = Look ( self . _callables [ i ] ) . objname ( )
pct = "" . rjust ( 10 ) if idx == 1 else self . _pct_diff ( best , self . _callable_results [ i ] . mean )
print ( ( "#" + str ( idx ) + " ¦" ) . rjust ( max_rlen ) , rank . rjust ( max_rlen2 ) , pct , "{}" . format ( _obj_name ) )
line ( "‒" , padding = "bottom" ) |
def _get_recursive_difference ( self , type ) :
'''Returns the recursive diff between dict values''' | if type == 'intersect' :
return [ recursive_diff ( item [ 'old' ] , item [ 'new' ] ) for item in self . _intersect ]
elif type == 'added' :
return [ recursive_diff ( { } , item ) for item in self . _added ]
elif type == 'removed' :
return [ recursive_diff ( item , { } , ignore_missing_keys = False ) for item in self . _removed ]
elif type == 'all' :
recursive_list = [ ]
recursive_list . extend ( [ recursive_diff ( item [ 'old' ] , item [ 'new' ] ) for item in self . _intersect ] )
recursive_list . extend ( [ recursive_diff ( { } , item ) for item in self . _added ] )
recursive_list . extend ( [ recursive_diff ( item , { } , ignore_missing_keys = False ) for item in self . _removed ] )
return recursive_list
else :
raise ValueError ( 'The given type for recursive list matching ' 'is not supported.' ) |
def path ( self , goal ) :
"""Get the shortest way between two nodes of the graph
Args :
goal ( str ) : Name of the targeted node
Return :
list of Node""" | if goal == self . name :
return [ self ]
if goal not in self . routes :
raise ValueError ( "Unknown '{0}'" . format ( goal ) )
obj = self
path = [ obj ]
while True :
obj = obj . routes [ goal ] . direction
path . append ( obj )
if obj . name == goal :
break
return path |
def keltner ( self , n , dev , array = False ) :
"""肯特纳通道""" | mid = self . sma ( n , array )
atr = self . atr ( n , array )
up = mid + atr * dev
down = mid - atr * dev
return up , down |
def predict_type ( self ) :
"""Traverse the ref _ elements path and determine the component type being
referenced .
Also do some checks on the array indexes""" | current_comp = self . ref_root
for name , array_suffixes , name_src_ref in self . ref_elements : # find instance
current_comp = current_comp . get_child_by_name ( name )
if current_comp is None : # Not found !
self . msg . fatal ( "Could not resolve hierarchical reference to '%s'" % name , name_src_ref )
# Do type - check in array suffixes
for array_suffix in array_suffixes :
array_suffix . predict_type ( )
# Check array suffixes
if ( isinstance ( current_comp , comp . AddressableComponent ) ) and current_comp . is_array : # is an array
if len ( array_suffixes ) != len ( current_comp . array_dimensions ) :
self . msg . fatal ( "Incompatible number of index dimensions after '%s'. Expected %d, found %d." % ( name , len ( current_comp . array_dimensions ) , len ( array_suffixes ) ) , name_src_ref )
elif array_suffixes : # Has array suffixes . Check if compatible with referenced component
self . msg . fatal ( "Unable to index non-array component '%s'" % name , name_src_ref )
return type ( current_comp ) |
def latex2png ( snippet , outfile ) :
"""Compiles a LaTeX snippet to png""" | pngimage = os . path . join ( IMAGEDIR , outfile + '.png' )
texdocument = os . path . join ( IMAGEDIR , 'tmp.tex' )
with open ( texdocument , 'w' ) as doc :
doc . write ( LATEX_DOC % ( snippet ) )
environment = os . environ
environment [ 'shell_escape_commands' ] = "bibtex,bibtex8,kpsewhich,makeindex,mpost,repstopdf," + ',' . join ( os . path . basename ( n ) for n in chain . from_iterable ( iglob ( os . path . join ( chemin , 'gregorio*' ) ) for chemin in os . environ [ "PATH" ] . split ( os . pathsep ) ) )
proc = Popen ( [ "lualatex" , '-output-directory=' + IMAGEDIR , texdocument ] , stdin = PIPE , stdout = STDERR , env = environment )
proc . communicate ( )
proc . stdin . close ( )
call ( [ "pdfcrop" , os . path . join ( IMAGEDIR , "tmp.pdf" ) ] , stdout = STDERR )
call ( [ "gs" , "-sDEVICE=pngalpha" , "-r144" , "-sOutputFile=" + pngimage , os . path . join ( IMAGEDIR , "tmp-crop.pdf" ) , ] , stdout = STDERR , ) |
def get_poll ( poll_id ) :
"""Get a strawpoll .
Example :
poll = strawpy . get _ poll ( ' 11682852 ' )
: param poll _ id :
: return : strawpy . Strawpoll object""" | return StrawPoll ( requests . get ( '{api_url}/{poll_id}' . format ( api_url = api_url , poll_id = poll_id ) ) ) |
def update_alert ( self , id , ** kwargs ) : # noqa : E501
"""Update a specific alert # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . update _ alert ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: param Alert body : Example Body : < pre > { \" id \" : \" 1459375928549 \" , \" name \" : \" Alert Name \" , \" target \" : \" success @ simulator . amazonses . com \" , \" condition \" : \" ts ( ~ sample . cpu . loadavg . 1m ) > 1 \" , \" displayExpression \" : \" ts ( ~ sample . cpu . loadavg . 1m ) \" , \" minutes \" : 5 , \" resolveAfterMinutes \" : 2 , \" severity \" : \" INFO \" , \" additionalInformation \" : \" Additional Info \" , \" tags \" : { \" customerTags \" : [ \" alertTag1 \" ] } } < / pre >
: return : ResponseContainerAlert
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . update_alert_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . update_alert_with_http_info ( id , ** kwargs )
# noqa : E501
return data |
def _run__exec ( self , action , replace ) :
"""Run a system command
> > > Action ( ) . run ( " hello " , actions = {
. . . " hello " : {
. . . " type " : " exec " ,
. . . " cmd " : " echo version = % { version } "
. . . } } , replace = {
. . . " version " : " 1712.10"
version = 1712.10""" | cmd = action . get ( 'cmd' )
shell = False
if isinstance ( cmd , str ) :
shell = True
if replace and action . get ( "template" , True ) :
if shell :
cmd = self . rfxcfg . macro_expand ( cmd , replace )
else :
cmd = [ self . rfxcfg . macro_expand ( x , replace ) for x in cmd ]
self . logf ( "Action {} exec\n" , action [ 'name' ] )
self . logf ( "{}\n" , cmd , level = common . log_cmd )
if self . sys ( cmd ) :
self . logf ( "Success\n" , level = common . log_good )
return
self . die ( "Failure\n" , level = common . log_err ) |
def newComment ( content ) :
"""Creation of a new node containing a comment .""" | ret = libxml2mod . xmlNewComment ( content )
if ret is None :
raise treeError ( 'xmlNewComment() failed' )
return xmlNode ( _obj = ret ) |
def make_response ( obj ) :
"""Try to coerce an object into a Response object .""" | if obj is None :
raise TypeError ( "Handler return value cannot be None." )
if isinstance ( obj , Response ) :
return obj
return Response ( 200 , body = obj ) |
def insert_global_var ( self , vname , vtype ) :
"Inserts a new global variable" | return self . insert_id ( vname , SharedData . KINDS . GLOBAL_VAR , [ SharedData . KINDS . GLOBAL_VAR , SharedData . KINDS . FUNCTION ] , vtype ) |
def add_speaker ( self , ** kwargs ) :
"""Creates a new BGPSpeaker instance .
Usage :
Method URI
POST / vtep / speakers
Request parameters :
Attribute Description
dpid ID of Datapath binding to speaker . ( e . g . 1)
as _ number AS number . ( e . g . 65000)
router _ id Router ID . ( e . g . " 172.17.0.1 " )
Example : :
$ curl - X POST - d ' {
" dpid " : 1,
" as _ number " : 65000,
" router _ id " : " 172.17.0.1"
} ' http : / / localhost : 8080 / vtep / speakers | python - m json . tool
"172.17.0.1 " : {
" EvpnSpeaker " : {
" as _ number " : 65000,
" dpid " : 1,
" neighbors " : { } ,
" router _ id " : " 172.17.0.1" """ | try :
body = self . vtep_app . add_speaker ( ** kwargs )
except DatapathNotFound as e :
return e . to_response ( status = 404 )
return Response ( content_type = 'application/json' , body = json . dumps ( body ) ) |
def _pre_compute_secondary ( self , positive_vals , negative_vals ) :
"""Compute secondary y min and max""" | self . _secondary_min = ( negative_vals and min ( min ( negative_vals ) , self . zero ) ) or self . zero
self . _secondary_max = ( positive_vals and max ( max ( positive_vals ) , self . zero ) ) or self . zero |
def delete_config ( self , mount_point = DEFAULT_MOUNT_POINT ) :
"""Delete the stored Azure configuration and credentials .
Supported methods :
DELETE : / auth / { mount _ point } / config . Produces : 204 ( empty body )
: param mount _ point : The " path " the method / backend was mounted on .
: type mount _ point : str | unicode
: return : The response of the request .
: rtype : requests . Response""" | api_path = '/v1/{mount_point}/config' . format ( mount_point = mount_point )
return self . _adapter . delete ( url = api_path , ) |
def _start ( self ) :
"""Start a task , which may involve starting zero or more processes .
This is indicated as an internal method because tasks are really
only ever marked as startable by the configuration . Any task
that should be running and is not will be started during regular
manage ( ) calls . A task set to run only once will be started only
if the _ stopped attribute is None .
If a task requires another task , it won ' t be started until the
required task has started , except if the required task has ' once '
control , then it won ' t be started until the ' once ' task has
stopped .
Currently , processes are started via direct fork / exec , with
stdin / stdout / stderr all redirected from / dev / null . In future ,
will probably add options to redirect stdout / stderr to syslog
or files .
Note that processes are intentionally not detached or put in
separate process groups or terminal groups . The presumption is
that " async " and " adopt " tasks will handle this themselves , and
we need " wait " tasks to not be detached .
Returns True to request a shorter period before the next call ,
False if nothing special is needed .""" | log = self . _params . get ( 'log' , self . _discard )
if self . _stopping :
log . debug ( "%s task is stopping" , self . _name )
return True
now = time . time ( )
conf = self . _config_running
control = self . _get ( conf . get ( 'control' ) )
once = ( control in self . _legion . once_controls )
# Tasks with " event " control are immediately marked stopped as if they
# ran at start . This is the only difference between " event " and " once "
# controls .
if control == 'event' and not self . _stopped :
self . _stopped = now
if self . _stopped :
if self . _dnr :
log . info ( "Task '%s' stopped and will now be deleted" , self . _name )
self . close ( )
return False
elif once :
log . debug ( "'%s' task %s exited %s ago" , control , self . _name , deltafmt ( time . time ( ) - self . _stopped ) )
return False
else :
log . debug ( "Restarting %s, task was stopped %s ago" , self . _name , deltafmt ( time . time ( ) - self . _stopped ) )
self . _reset_state ( )
start_delay = self . _get ( conf . get ( 'start_delay' ) )
if start_delay :
try :
start_delay = int ( start_delay )
except Exception as e :
log . error ( "Task '%s' has invalid start_delay '%s'" , self . _name , start_delay )
start_delay = 0
else :
start_delay = 0
if self . _starting and not self . _started :
if now > self . _starting + start_delay :
log . info ( "%s task marked started after %s" , self . _name , deltafmt ( now - self . _starting ) )
self . _mark_started ( )
return False
log . debug ( "%s task has been starting for %s of %s" , self . _name , deltafmt ( now - self . _starting ) , deltafmt ( start_delay ) )
return True
# Check the required state to ensure dependencies have been started . In the case of
# ' once ' controls , the dependency must have already stopped , otherwise it must have
# started .
if self . _started and control != 'suspend' :
log . debug ( "Task '%s' already started, skipping requires-check" , self . _name )
else :
for req in self . get_requires ( ) :
if req . _config_running . get ( 'control' ) == 'once' :
if not req . _stopped :
if self . _last_message + repetition_limit < time . time ( ) :
log . info ( "Task '%s' is waiting on '%s' to complete" , self . _name , req . _name )
self . _last_message = now
return True
else :
if not req . _started :
if self . _last_message + repetition_limit < time . time ( ) :
log . info ( "Task '%s' is waiting on '%s' to start" , self . _name , req . _name )
self . _last_message = now
return True
self . _last_message = 0
if once : # " once " processes are immediately marked as stopping .
self . _stopping = now
try :
start_command = None
if 'commands' in conf :
start_command = self . _get_list ( conf [ 'commands' ] . get ( 'start' ) )
if not start_command :
raise TaskError ( self . _name , "No 'start' command in task configuration" )
if not isinstance ( start_command , list ) :
start_command = list ( start_command )
if control != 'suspend' :
needed = self . _get ( conf . get ( 'count' ) , default = 1 )
running = len ( self . get_pids ( ) )
if needed < running :
self . _shrink ( needed , running )
return False
elif needed == running :
log . debug ( "all %d needed process%s running" , running , ses ( running , 'es' ) )
return False
self . _starting = now
if not start_delay :
self . _mark_started ( )
if control == 'suspend' :
if not self . _suspended :
log . debug ( "%s just moved to %r" , self . _name , control )
running = len ( self . get_pids ( ) )
if running > 0 :
log . debug ( "%s now %r, stopping running processes" , self . _name , control )
self . _shrink ( 0 , running )
else :
log . debug ( "%s is %r control, skipping process startup" , self . _name , control )
self . _suspended = now
return False
else :
if self . _suspended :
log . debug ( "%s just moved to %r" , self . _name , control )
self . _suspended = None
log . debug ( "Found %d running, %d needed, starting %d" , running , needed , needed - running )
started = 0
for instance in range ( needed ) :
if instance < len ( self . _proc_state ) :
proc = self . _proc_state [ instance ]
if proc . pid is not None :
log . debug ( "%s instance %d already started" , self . _name , instance )
continue
if proc . started is None :
proc . started = now
last_start_delta = now - proc . started
if last_start_delta < 0 : # This can happen when the system clock is manually set . As one
# of the goals here is to restart ntpd when it dies due to exceeding
# the panic threshold ( 1000 seconds ) , go ahead and mark the time
# as now so the task restart will only be delayed slightly longer
# than normal .
log . warning ( "Time flowed backwards, resetting %s instance %d start time" , self . _name , instance )
proc . started = now
continue
if last_start_delta < reexec_delay :
log . debug ( "%s instance %d restart skipped, last attempt %s ago" , self . _name , instance , deltafmt ( last_start_delta ) )
continue
else :
log . debug ( "%s growing instance %d" , self . _name , instance )
self . _proc_state . append ( ProcessState ( ) )
proc = self . _proc_state [ instance ]
pid = _exec_process ( start_command , self . _context , instance = instance , log = log )
log . debug ( "Forked pid %d for '%s', %d of %d now running" , pid , self . _name , len ( self . get_pids ( ) ) , needed )
self . _legion . proc_add ( event_target ( self , 'proc_exit' , key = pid , log = log ) )
proc . pid = pid
proc . started = now
started += 1
log . info ( "Task %s: %d process%s scheduled to start%s" , self . _name , started , ses ( started , 'es' ) , ( ' with time limit %s' % ( deltafmt ( self . _limit - now ) , ) ) if self . _limit else '' )
except Exception as e :
log . error ( "Failed to start task '%s' -- %s" , self . _name , e , exc_info = log . isEnabledFor ( logging . DEBUG ) )
return False |
def check ( text ) :
"""Check the text .""" | err = "strunk_white.greylist"
msg = "Use of '{}'. {}"
bad_words = [ "obviously" , "utilize" ]
explanations = { "obviously" : "This is obviously an inadvisable word to use." , "utilize" : r"Do you know anyone who *needs* to utilize the word utilize?" }
errors = [ ]
for word in bad_words :
occ = [ m for m in re . finditer ( word , text . lower ( ) ) ]
for o in occ :
errors . append ( ( o . start ( ) , o . end ( ) , err , msg . format ( word , explanations [ word ] ) , None ) )
return errors |
def _round ( self ) :
"""Subclasses may override this method .""" | for contour in self . contours :
contour . round ( )
for component in self . components :
component . round ( )
for anchor in self . anchors :
anchor . round ( )
for guideline in self . guidelines :
guideline . round ( )
self . width = normalizers . normalizeRounding ( self . width )
self . height = normalizers . normalizeRounding ( self . height ) |
def patch ( nml_path , nml_patch , out_path = None ) :
"""Create a new namelist based on an input namelist and reference dict .
> > > f90nml . patch ( ' data . nml ' , nml _ patch , ' patched _ data . nml ' )
This function is equivalent to the ` ` read ` ` function of the ` ` Parser ` `
object with the patch output arguments .
> > > parser = f90nml . Parser ( )
> > > nml = parser . read ( ' data . nml ' , nml _ patch , ' patched _ data . nml ' )
A patched namelist file will retain any formatting or comments from the
original namelist file . Any modified values will be formatted based on the
settings of the ` ` Namelist ` ` object .""" | parser = Parser ( )
return parser . read ( nml_path , nml_patch , out_path ) |
def style ( text , fg = None , bg = None , bold = None , dim = None , underline = None , blink = None , reverse = None , reset = True ) :
"""Styles a text with ANSI styles and returns the new string . By
default the styling is self contained which means that at the end
of the string a reset code is issued . This can be prevented by
passing ` ` reset = False ` ` .
Examples : :
click . echo ( click . style ( ' Hello World ! ' , fg = ' green ' ) )
click . echo ( click . style ( ' ATTENTION ! ' , blink = True ) )
click . echo ( click . style ( ' Some things ' , reverse = True , fg = ' cyan ' ) )
Supported color names :
* ` ` black ` ` ( might be a gray )
* ` ` red ` `
* ` ` green ` `
* ` ` yellow ` ` ( might be an orange )
* ` ` blue ` `
* ` ` magenta ` `
* ` ` cyan ` `
* ` ` white ` ` ( might be light gray )
* ` ` reset ` ` ( reset the color code only )
. . versionadded : : 2.0
: param text : the string to style with ansi codes .
: param fg : if provided this will become the foreground color .
: param bg : if provided this will become the background color .
: param bold : if provided this will enable or disable bold mode .
: param dim : if provided this will enable or disable dim mode . This is
badly supported .
: param underline : if provided this will enable or disable underline .
: param blink : if provided this will enable or disable blinking .
: param reverse : if provided this will enable or disable inverse
rendering ( foreground becomes background and the
other way round ) .
: param reset : by default a reset - all code is added at the end of the
string which means that styles do not carry over . This
can be disabled to compose styles .""" | bits = [ ]
if fg :
try :
bits . append ( '\033[%dm' % ( _ansi_colors . index ( fg ) + 30 ) )
except ValueError :
raise TypeError ( 'Unknown color %r' % fg )
if bg :
try :
bits . append ( '\033[%dm' % ( _ansi_colors . index ( bg ) + 40 ) )
except ValueError :
raise TypeError ( 'Unknown color %r' % bg )
if bold is not None :
bits . append ( '\033[%dm' % ( 1 if bold else 22 ) )
if dim is not None :
bits . append ( '\033[%dm' % ( 2 if dim else 22 ) )
if underline is not None :
bits . append ( '\033[%dm' % ( 4 if underline else 24 ) )
if blink is not None :
bits . append ( '\033[%dm' % ( 5 if blink else 25 ) )
if reverse is not None :
bits . append ( '\033[%dm' % ( 7 if reverse else 27 ) )
bits . append ( text )
if reset :
bits . append ( _ansi_reset_all )
return '' . join ( bits ) |
def _writeText ( self , image , text , pos ) :
"""Write morphed text in Image object .""" | offset = 0
x , y = pos
for c in text : # Write letter
c_size = self . font . getsize ( c )
c_image = Image . new ( 'RGBA' , c_size , ( 0 , 0 , 0 , 0 ) )
c_draw = ImageDraw . Draw ( c_image )
c_draw . text ( ( 0 , 0 ) , c , font = self . font , fill = ( 0 , 0 , 0 , 255 ) )
# Transform
c_image = self . _rndLetterTransform ( c_image )
# Paste onto image
image . paste ( c_image , ( x + offset , y ) , c_image )
offset += c_size [ 0 ] |
def calc_v_qa_v1 ( self ) :
"""Update the stored water volume based on the equation of continuity .
Note that for too high outflow values , which would result in overdraining
the lake , the outflow is trimmed .
Required derived parameters :
| Seconds |
| NmbSubsteps |
Required flux sequence :
| QZ |
Updated aide sequences :
| llake _ aides . QA |
| llake _ aides . V |
Basic Equation :
: math : ` \\ frac { dV } { dt } = QZ - QA `
Examples :
Prepare a lake model with an initial storage of 100.000 m3 and an
inflow of 2 m3 / s and a ( potential ) outflow of 6 m3 / s :
> > > from hydpy . models . llake import *
> > > parameterstep ( )
> > > simulationstep ( ' 12h ' )
> > > maxdt ( ' 6h ' )
> > > derived . seconds . update ( )
> > > derived . nmbsubsteps . update ( )
> > > aides . v = 1e5
> > > fluxes . qz = 2.
> > > aides . qa = 6.
Through calling method ` calc _ v _ qa _ v1 ` three times with the same inflow
and outflow values , the storage is emptied after the second step and
outflow is equal to inflow after the third step :
> > > model . calc _ v _ qa _ v1 ( )
> > > aides . v
v ( 13600.0)
> > > aides . qa
qa ( 6.0)
> > > model . new2old ( )
> > > model . calc _ v _ qa _ v1 ( )
> > > aides . v
v ( 0.0)
> > > aides . qa
qa ( 2.62963)
> > > model . new2old ( )
> > > model . calc _ v _ qa _ v1 ( )
> > > aides . v
v ( 0.0)
> > > aides . qa
qa ( 2.0)
Note that the results of method | calc _ v _ qa _ v1 | are not based
depend on the ( outer ) simulation step size but on the ( inner )
calculation step size defined by parameter ` maxdt ` .""" | der = self . parameters . derived . fastaccess
flu = self . sequences . fluxes . fastaccess
aid = self . sequences . aides . fastaccess
aid . qa = min ( aid . qa , flu . qz + der . nmbsubsteps / der . seconds * aid . v )
aid . v = max ( aid . v + der . seconds / der . nmbsubsteps * ( flu . qz - aid . qa ) , 0. ) |
def create ( self , ticket , payload = None , expires = None ) :
'''Create a session identifier in memcache associated with ` ` ticket ` ` .''' | if not payload :
payload = True
self . _client . set ( str ( ticket ) , payload , expires ) |
def _output_dirnames ( workflow = None , leaf = False ) :
"""Args :
workflow : optional collection of steps
leaf : only include leaves of the workflow
Returns : If workflow is specified , returns output directories for all target
steps in the workflow . If no workflow specified , returns all extant
output directories in drain . PATH .""" | if workflow is None :
dirs = set ( )
for cls in os . listdir ( drain . PATH ) :
for step in os . listdir ( os . path . join ( drain . PATH , cls ) ) :
dirs . add ( os . path . join ( drain . PATH , cls , step ) )
return dirs
else :
if leaf :
steps = [ step for step in workflow if step . target ]
else :
steps = util . union ( step . get_inputs ( ) for step in workflow if step . target )
return set ( step . _output_dirname for step in steps ) |
def unset ( ctx , key ) :
'''Removes the given key .''' | file = ctx . obj [ 'FILE' ]
quote = ctx . obj [ 'QUOTE' ]
success , key = unset_key ( file , key , quote )
if success :
click . echo ( "Successfully removed %s" % key )
else :
exit ( 1 ) |
def all ( self , paths , access = None , recursion = False ) :
"""Iterates over ` paths ` ( which may consist of files and / or directories ) .
Removes duplicates and returns list of valid paths meeting access
criteria .""" | self . __init__ ( )
self . access = access
self . filetype = 'all'
self . paths = paths
self . recursion = recursion
return _sorter ( self . _generator_other ( ) ) |
def _node_loop ( stations , lags , stream , clip_level , i = 0 , mem_issue = False , instance = 0 , plot = False ) :
"""Internal function to allow for brightness to be paralleled .
: type stations : list
: param stations : List of stations to use .
: type lags : numpy . ndarray
: param lags : List of lags where lags [ i [ : ] ] are the lags for stations [ i ] .
: type stream : obspy . core . stream . Stream
: param stream : Data stream to find the brightness for .
: type clip _ level : float
: param clip _ level : Upper limit for energy as a multiplier to the mean energy .
: type i : int
: param i : Index of loop for parallelisation .
: type mem _ issue : bool
: param mem _ issue : If True will write to disk rather than storing data in RAM .
: type instance : int
: param instance : instance for bulk parallelisation , only used if mem _ issue = true .
: type plot : bool
: param plot : Turn plotting on or off , defaults to False .
: returns : index
: rtype : int
: returns : network response
: rtype : numpy . ndarray""" | import matplotlib . pyplot as plt
# Set up some overhead for plotting
energy_stream = Stream ( )
# Using a stream as a handy container for
# plotting
for l , tr in enumerate ( stream ) :
j = [ k for k in range ( len ( stations ) ) if stations [ k ] == tr . stats . station ]
# Check that there is only one matching station
if len ( j ) > 1 :
warnings . warn ( 'Too many stations' )
j = [ j [ 0 ] ]
if len ( j ) == 0 :
warnings . warn ( 'No station match' )
continue
lag = lags [ j [ 0 ] ]
lagged_energy = np . square ( np . concatenate ( ( tr . data , np . zeros ( int ( round ( lag * tr . stats . sampling_rate ) ) ) ) ) ) [ int ( round ( lag * tr . stats . sampling_rate ) ) : ]
# Clip energy
lagged_energy = np . clip ( lagged_energy , 0 , clip_level * np . mean ( lagged_energy ) )
if 'energy' not in locals ( ) :
energy = ( lagged_energy / _rms ( lagged_energy ) ) . reshape ( 1 , len ( lagged_energy ) )
# Cope with zeros encountered
energy = np . nan_to_num ( energy )
# This is now an array of floats - we can convert this to int16
# normalize to have max at max of int16 range
if not max ( energy [ 0 ] ) == 0.0 :
energy = ( 500 * ( energy * ( 1 / max ( energy [ 0 ] ) ) ) ) . astype ( np . int16 )
else :
energy = energy . astype ( np . int16 )
else :
norm_energy = ( lagged_energy / _rms ( lagged_energy ) ) . reshape ( 1 , len ( lagged_energy ) )
norm_energy = np . nan_to_num ( norm_energy )
# Convert to int16
if not max ( norm_energy [ 0 ] ) == 0.0 :
norm_energy = ( 500 * ( norm_energy * ( 1 / max ( norm_energy [ 0 ] ) ) ) ) . astype ( np . int16 )
else :
norm_energy = norm_energy . astype ( np . int16 )
# Apply lag to data and add it to energy - normalize the data here
energy = np . concatenate ( ( energy , norm_energy ) , axis = 0 )
energy_stream += Trace ( data = lagged_energy , header = Stats ( { 'station' : tr . stats . station , 'sampling_rate' : tr . stats . sampling_rate } ) )
energy = np . sum ( energy , axis = 0 ) . reshape ( 1 , len ( lagged_energy ) )
energy = energy . astype ( np . uint16 )
# Convert any nans to zeros
energy = np . nan_to_num ( energy )
if plot :
fig , axes = plt . subplots ( len ( stream ) + 1 , 1 , sharex = True )
axes = axes . ravel ( )
for lagged_energy , tr , axis in zip ( energy_stream , stream , axes ) :
axis . plot ( lagged_energy * 200 , 'r' )
axis . plot ( tr . data , 'k' )
axes [ - 1 ] . plot ( energy [ 0 ] )
plt . subplots_adjust ( hspace = 0 )
plt . show ( )
if not mem_issue :
return i , energy
else :
np . save ( 'tmp' + str ( instance ) + '/node_' + str ( i ) , energy )
return i , str ( 'tmp' + str ( instance ) + '/node_' + str ( i ) ) |
def sortedby ( item_list , key_list , reverse = False ) :
"""sorts ` ` item _ list ` ` using key _ list
Args :
list _ ( list ) : list to sort
key _ list ( list ) : list to sort by
reverse ( bool ) : sort order is descending ( largest first )
if reverse is True else acscending ( smallest first )
Returns :
list : ` ` list _ ` ` sorted by the values of another ` ` list ` ` . defaults to
ascending order
SeeAlso :
sortedby2
Examples :
> > > # ENABLE _ DOCTEST
> > > import utool
> > > list _ = [ 1 , 2 , 3 , 4 , 5]
> > > key _ list = [ 2 , 5 , 3 , 1 , 5]
> > > result = utool . sortedby ( list _ , key _ list , reverse = True )
> > > print ( result )
[5 , 2 , 3 , 1 , 4]""" | assert len ( item_list ) == len ( key_list ) , ( 'Expected same len. Got: %r != %r' % ( len ( item_list ) , len ( key_list ) ) )
sorted_list = [ item for ( key , item ) in sorted ( list ( zip ( key_list , item_list ) ) , reverse = reverse ) ]
return sorted_list |
def clone ( self , name = None ) :
""": param name : new env name
: rtype : Environment""" | resp = self . _router . post_env_clone ( env_id = self . environmentId , json = dict ( name = name ) if name else { } ) . json ( )
return Environment ( self . organization , id = resp [ 'id' ] ) . init_router ( self . _router ) |
def connect ( self , protocol = None ) :
"""Initialize DAP IO pins for JTAG or SWD""" | # Convert protocol to port enum .
if protocol is not None :
port = self . PORT_MAP [ protocol ]
else :
port = DAPAccess . PORT . DEFAULT
try :
self . _link . connect ( port )
except DAPAccess . Error as exc :
six . raise_from ( self . _convert_exception ( exc ) , exc )
# Read the current mode and save it .
actualMode = self . _link . get_swj_mode ( )
self . _protocol = self . PORT_MAP [ actualMode ]
self . _invalidate_cached_registers ( ) |
def _calc_frames ( stats ) :
"""Compute a DataFrame summary of a Stats object .""" | timings = [ ]
callers = [ ]
for key , values in iteritems ( stats . stats ) :
timings . append ( pd . Series ( key + values [ : - 1 ] , index = timing_colnames , ) )
for caller_key , caller_values in iteritems ( values [ - 1 ] ) :
callers . append ( pd . Series ( key + caller_key + caller_values , index = caller_columns , ) )
timings_df = pd . DataFrame ( timings )
callers_df = pd . DataFrame ( callers )
timings_df [ 'filename:funcname' ] = ( timings_df [ 'filename' ] + ':' + timings_df [ 'funcname' ] )
timings_df = timings_df . groupby ( 'filename:funcname' ) . sum ( )
return timings_df , callers_df |
def _modifier ( self , operator , params ) :
'''$ orderby : sorts the results of a query in ascending ( 1 ) or descending ( - 1 ) order .''' | if operator == '$orderby' :
order_types = { - 1 : 'DESC' , 1 : 'ASC' }
if not isinstance ( params , dict ) :
raise RuntimeError ( 'Incorrect parameter type, %s' % params )
return 'ORDER BY %s' % ',' . join ( [ "%s %s" % ( p , order_types [ params [ p ] ] ) for p in params ] )
else :
raise RuntimeError ( 'Unknown operator, %s' % operator ) |
def add_message_listener ( self , name , fn ) :
"""Adds a message listener function that will be called every time the specified message is received .
. . tip : :
We recommend you use : py : func : ` on _ message ` instead of this method as it has a more elegant syntax .
This method is only preferred if you need to be able to
: py : func : ` remove the listener < remove _ message _ listener > ` .
The callback function must have three arguments :
* ` ` self ` ` - the current vehicle .
* ` ` name ` ` - the name of the message that was intercepted .
* ` ` message ` ` - the actual message ( a ` pymavlink < http : / / www . qgroundcontrol . org / mavlink / pymavlink > ` _
` class < https : / / www . samba . org / tridge / UAV / pymavlink / apidocs / classIndex . html > ` _ ) .
For example , in the fragment below ` ` my _ method ` ` will be called for every heartbeat message :
. . code : : python
# Callback method for new messages
def my _ method ( self , name , msg ) :
pass
vehicle . add _ message _ listener ( ' HEARTBEAT ' , my _ method )
See : ref : ` mavlink _ messages ` for more information .
: param String name : The name of the message to be intercepted by the listener function ( or ' * ' to get all messages ) .
: param fn : The listener function that will be called if a message is received .""" | name = str ( name )
if name not in self . _message_listeners :
self . _message_listeners [ name ] = [ ]
if fn not in self . _message_listeners [ name ] :
self . _message_listeners [ name ] . append ( fn ) |
def selected_fields ( self ) :
"""Obtain the fields selected by user .
: returns : Keyword of the selected field .
: rtype : list , str""" | items = self . lstFields . selectedItems ( )
if items and self . mode == MULTI_MODE :
return [ item . text ( ) for item in items ]
elif items and self . mode == SINGLE_MODE :
return items [ 0 ] . text ( )
else :
return [ ] |
def reset ( self ) :
"""Resets the internal evaluation result to initial state .""" | self . num_inst = 0
self . sum_metric = 0.0
self . global_num_inst = 0
self . global_sum_metric = 0.0 |
def get_content ( self , params = None ) :
"""Returns the raw byte content of a given Filelink
* returns * [ Bytes ]
` ` ` python
from filestack import Client
client = Client ( ' API _ KEY ' )
filelink = client . upload ( filepath = ' / path / to / file / foo . jpg ' )
byte _ content = filelink . get _ content ( )""" | if params :
CONTENT_DOWNLOAD_SCHEMA . check ( params )
response = utils . make_call ( CDN_URL , 'get' , handle = self . handle , params = params , security = self . security , transform_url = ( self . url if isinstance ( self , filestack . models . Transform ) else None ) )
return response . content |
def distance_matrix ( a , b , periodic ) :
'''Calculate a distrance matrix between coordinates sets a and b''' | a = a
b = b [ : , np . newaxis ]
return periodic_distance ( a , b , periodic ) |
def turn_physical_on ( self , ro = None , vo = None ) :
"""NAME :
turn _ physical _ on
PURPOSE :
turn on automatic returning of outputs in physical units
INPUT :
ro = reference distance ( kpc ; can be Quantity )
vo = reference velocity ( km / s ; can be Quantity )
OUTPUT :
( none )
HISTORY :
2016-01-19 - Written - Bovy ( UofT )""" | self . _roSet = True
self . _voSet = True
if not ro is None :
if _APY_LOADED and isinstance ( ro , units . Quantity ) :
ro = ro . to ( units . kpc ) . value
self . _ro = ro
if not vo is None :
if _APY_LOADED and isinstance ( vo , units . Quantity ) :
vo = vo . to ( units . km / units . s ) . value
self . _vo = vo
self . _orb . turn_physical_on ( ro = ro , vo = vo ) |
def get_postgresql_args ( db_config , extra_args = None ) :
"""Returns an array of argument values that will be passed to a ` psql ` or
` pg _ dump ` process when it is started based on the given database
configuration .""" | db = db_config [ 'NAME' ]
mapping = [ ( '--username={0}' , db_config . get ( 'USER' ) ) , ( '--host={0}' , db_config . get ( 'HOST' ) ) , ( '--port={0}' , db_config . get ( 'PORT' ) ) ]
args = apply_arg_values ( mapping )
if extra_args is not None :
args . extend ( shlex . split ( extra_args ) )
args . append ( db )
return args |
def mergeCatalogs ( catalog_list ) :
"""Merge a list of Catalogs .
Parameters :
catalog _ list : List of Catalog objects .
Returns :
catalog : Combined Catalog object""" | # Check the columns
for c in catalog_list :
if c . data . dtype . names != catalog_list [ 0 ] . data . dtype . names :
msg = "Catalog data columns not the same."
raise Exception ( msg )
data = np . concatenate ( [ c . data for c in catalog_list ] )
config = catalog_list [ 0 ] . config
return Catalog ( config , data = data ) |
def chain_2 ( d2f_dg2 , dg_dx , df_dg , d2g_dx2 ) :
"""Generic chaining function for second derivative
. . math : :
\\ frac { d ^ { 2 } ( f . g ) } { dx ^ { 2 } } = \\ frac { d ^ { 2 } f } { dg ^ { 2 } } ( \\ frac { dg } { dx } ) ^ { 2 } + \\ frac { df } { dg } \\ frac { d ^ { 2 } g } { dx ^ { 2 } }""" | if np . all ( dg_dx == 1. ) and np . all ( d2g_dx2 == 0 ) :
return d2f_dg2
dg_dx_2 = np . clip ( dg_dx , - np . inf , _lim_val_square ) ** 2
# dg _ dx _ 2 = dg _ dx * * 2
return d2f_dg2 * ( dg_dx_2 ) + df_dg * d2g_dx2 |
def available_files ( url ) :
"""Extract and return urls for all available . tgz files .""" | req = requests . get ( url )
if req . status_code != 200 :
raise base . FailedDownloadException ( 'Failed to download data (status {}) from {}!' . format ( req . status_code , url ) )
page_content = req . text
link_pattern = re . compile ( r'<a href="(.*?)">(.*?)</a>' )
available_files = [ ]
for match in link_pattern . findall ( page_content ) :
if match [ 0 ] . endswith ( '.tgz' ) :
available_files . append ( os . path . join ( url , match [ 0 ] ) )
return available_files |
def describe_apis ( name = None , description = None , region = None , key = None , keyid = None , profile = None ) :
'''Returns all rest apis in the defined region . If optional parameter name is included ,
returns all rest apis matching the name in the defined region .
CLI Example :
. . code - block : : bash
salt myminion boto _ apigateway . describe _ apis
salt myminion boto _ apigateway . describe _ apis name = ' api name '
salt myminion boto _ apigateway . describe _ apis name = ' api name ' description = ' desc str ' ''' | if name :
return _find_apis_by_name ( name , description = description , region = region , key = key , keyid = keyid , profile = profile )
else :
return _find_apis_by_name ( '' , description = description , region = region , key = key , keyid = keyid , profile = profile ) |
def add_vlan_to_interface ( self , interface , vlan_id ) :
"""Add vlan interface .
ip link add link eth0 name eth0.10 type vlan id 10""" | subif = '%s.%s' % ( interface , vlan_id )
vlan_id = '%s' % vlan_id
cmd = [ 'ip' , 'link' , 'add' , 'link' , interface , 'name' , subif , 'type' , 'vlan' , 'id' , vlan_id ]
stdcode , stdout = agent_utils . execute ( cmd , root = True )
if stdcode == 0 :
return agent_utils . make_response ( code = stdcode )
# execute failed .
message = stdout . pop ( 0 )
return agent_utils . make_response ( code = stdcode , message = message ) |
def _buildKnownDataSearchString ( reg_key , reg_valueName , reg_vtype , reg_data , check_deleted = False ) :
'''helper function similar to _ processValueItem to build a search string for a
known key / value / type / data''' | registry = Registry ( )
this_element_value = None
expected_string = b''
encoded_semicolon = ';' . encode ( 'utf-16-le' )
encoded_null = chr ( 0 ) . encode ( 'utf-16-le' )
if reg_key :
reg_key = reg_key . encode ( 'utf-16-le' )
if reg_valueName :
reg_valueName = reg_valueName . encode ( 'utf-16-le' )
if reg_data and not check_deleted :
if reg_vtype == 'REG_DWORD' :
this_element_value = struct . pack ( b'I' , int ( reg_data ) )
elif reg_vtype == "REG_QWORD" :
this_element_value = struct . pack ( b'Q' , int ( reg_data ) )
elif reg_vtype == 'REG_SZ' :
this_element_value = b'' . join ( [ reg_data . encode ( 'utf-16-le' ) , encoded_null ] )
if check_deleted :
reg_vtype = 'REG_SZ'
expected_string = b'' . join ( [ '[' . encode ( 'utf-16-le' ) , reg_key , encoded_null , encoded_semicolon , '**del.' . encode ( 'utf-16-le' ) , reg_valueName , encoded_null , encoded_semicolon , chr ( registry . vtype [ reg_vtype ] ) . encode ( 'utf-32-le' ) , encoded_semicolon , six . unichr ( len ( ' {0}' . format ( chr ( 0 ) ) . encode ( 'utf-16-le' ) ) ) . encode ( 'utf-32-le' ) , encoded_semicolon , ' ' . encode ( 'utf-16-le' ) , encoded_null , ']' . encode ( 'utf-16-le' ) ] )
else :
expected_string = b'' . join ( [ '[' . encode ( 'utf-16-le' ) , reg_key , encoded_null , encoded_semicolon , reg_valueName , encoded_null , encoded_semicolon , chr ( registry . vtype [ reg_vtype ] ) . encode ( 'utf-32-le' ) , encoded_semicolon , six . unichr ( len ( this_element_value ) ) . encode ( 'utf-32-le' ) , encoded_semicolon , this_element_value , ']' . encode ( 'utf-16-le' ) ] )
return expected_string |
def calc_contriarea_v1 ( self ) :
"""Determine the relative size of the contributing area of the whole
subbasin .
Required control parameters :
| NmbZones |
| ZoneType |
| RespArea |
| FC |
| Beta |
Required derived parameter :
| RelSoilArea |
Required state sequence :
| SM |
Calculated fluxes sequences :
| ContriArea |
Basic equation :
: math : ` ContriArea = \\ left ( \\ frac { SM } { FC } \\ right ) ^ { Beta } `
Examples :
Four zones are initialized , but only the first two zones
of type field and forest are taken into account in the calculation
of the relative contributing area of the catchment ( even , if also
glaciers contribute to the inflow of the upper zone layer ) :
> > > from hydpy . models . hland import *
> > > parameterstep ( ' 1d ' )
> > > nmbzones ( 4)
> > > zonetype ( FIELD , FOREST , GLACIER , ILAKE )
> > > beta ( 2.0)
> > > fc ( 200.0)
> > > resparea ( True )
> > > derived . relsoilarea ( 0.5)
> > > derived . relsoilzonearea ( 1.0/3.0 , 2.0/3.0 , 0.0 , 0.0)
With a relative soil moisture of 100 % in the whole subbasin , the
contributing area is also estimated as 100 % , . . .
> > > states . sm = 200.0
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 1.0)
. . . and relative soil moistures of 0 % result in an contributing
area of 0 % :
> > > states . sm = 0.0
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 0.0)
With the given value 2 of the nonlinearity parameter Beta , soil
moisture of 50 % results in a contributing area estimate of 25 % :
> > > states . sm = 100.0
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 0.25)
Setting the response area option to False , . . .
> > > resparea ( False )
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 1.0)
. . . setting the soil area ( total area of all field and forest
zones in the subbasin ) to zero . . . ,
> > > resparea ( True )
> > > derived . relsoilarea ( 0.0)
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 1.0)
. . . or setting all field capacities to zero . . .
> > > derived . relsoilarea ( 0.5)
> > > fc ( 0.0)
> > > states . sm = 0.0
> > > model . calc _ contriarea _ v1 ( )
> > > fluxes . contriarea
contriarea ( 1.0)
. . . leads to contributing area values of 100 % .""" | con = self . parameters . control . fastaccess
der = self . parameters . derived . fastaccess
flu = self . sequences . fluxes . fastaccess
sta = self . sequences . states . fastaccess
if con . resparea and ( der . relsoilarea > 0. ) :
flu . contriarea = 0.
for k in range ( con . nmbzones ) :
if con . zonetype [ k ] in ( FIELD , FOREST ) :
if con . fc [ k ] > 0. :
flu . contriarea += ( der . relsoilzonearea [ k ] * ( sta . sm [ k ] / con . fc [ k ] ) ** con . beta [ k ] )
else :
flu . contriarea += der . relsoilzonearea [ k ]
else :
flu . contriarea = 1. |
def parse_number ( d , key , regex , s ) :
"""Find a number using a given regular expression .
If the number is found , sets it under the key in the given dictionary .
d - The dictionary that will contain the data .
key - The key into the dictionary .
regex - A string containing the regular expression .
s - The string to search .""" | result = find_number ( regex , s )
if result is not None :
d [ key ] = result |
def minWidth ( self ) :
"""Attempt to determine a minimum sensible width""" | frags = self . frags
nFrags = len ( frags )
if not nFrags :
return 0
if nFrags == 1 :
f = frags [ 0 ]
fS = f . fontSize
fN = f . fontName
words = hasattr ( f , 'text' ) and split ( f . text , ' ' ) or f . words
func = lambda w , fS = fS , fN = fN : stringWidth ( w , fN , fS )
else :
words = _getFragWords ( frags )
func = lambda x : x [ 0 ]
return max ( map ( func , words ) ) |
def config ( commands = None , config_file = None , template_engine = 'jinja' , context = None , defaults = None , saltenv = 'base' , ** kwargs ) :
'''Configures the node with the specified commands .
This method is used to send configuration commands to the node . It
will take either a string or a list and prepend the necessary commands
to put the session into config mode .
Returns the diff after the configuration commands are loaded .
config _ file
The source file with the configuration commands to be sent to the
device .
The file can also be a template that can be rendered using the template
engine of choice .
This can be specified using the absolute path to the file , or using one
of the following URL schemes :
- ` ` salt : / / ` ` , to fetch the file from the Salt fileserver .
- ` ` http : / / ` ` or ` ` https : / / ` `
- ` ` ftp : / / ` `
- ` ` s3 : / / ` `
- ` ` swift : / / ` `
commands
The commands to send to the node in config mode . If the commands
argument is a string it will be cast to a list .
The list of commands will also be prepended with the necessary commands
to put the session in config mode .
. . note : :
This argument is ignored when ` ` config _ file ` ` is specified .
template _ engine : ` ` jinja ` `
The template engine to use when rendering the source file . Default :
` ` jinja ` ` . To simply fetch the file without attempting to render , set
this argument to ` ` None ` ` .
context
Variables to add to the template context .
defaults
Default values of the ` ` context ` ` dict .
transport : ` ` https ` `
Specifies the type of connection transport to use . Valid values for the
connection are ` ` socket ` ` , ` ` http _ local ` ` , ` ` http ` ` , and ` ` https ` ` .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
host : ` ` localhost ` `
The IP address or DNS host name of the connection device .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
username : ` ` admin ` `
The username to pass to the device to authenticate the eAPI connection .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
password
The password to pass to the device to authenticate the eAPI connection .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
port
The TCP port of the endpoint for the eAPI connection . If this keyword is
not specified , the default value is automatically determined by the
transport type ( ` ` 80 ` ` for ` ` http ` ` , or ` ` 443 ` ` for ` ` https ` ` ) .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
enablepwd
The enable mode password if required by the destination node .
. . note : :
This argument does not need to be specified when running in a
: mod : ` pyeapi < salt . proxy . arista _ pyeapi > ` Proxy Minion .
CLI Example :
. . code - block : : bash
salt ' * ' pyeapi . config commands = " [ ' ntp server 1.2.3.4 ' , ' ntp server 5.6.7.8 ' ] "
salt ' * ' pyeapi . config config _ file = salt : / / config . txt
salt ' * ' pyeapi . config config _ file = https : / / bit . ly / 2LGLcDy context = " { ' servers ' : [ ' 1.2.3.4 ' ] } "''' | initial_config = get_config ( as_string = True , ** kwargs )
if config_file :
file_str = __salt__ [ 'cp.get_file_str' ] ( config_file , saltenv = saltenv )
if file_str is False :
raise CommandExecutionError ( 'Source file {} not found' . format ( config_file ) )
log . debug ( 'Fetched from %s' , config_file )
log . debug ( file_str )
elif commands :
if isinstance ( commands , ( six . string_types , six . text_type ) ) :
commands = [ commands ]
file_str = '\n' . join ( commands )
# unify all the commands in a single file , to render them in a go
if template_engine :
file_str = __salt__ [ 'file.apply_template_on_contents' ] ( file_str , template_engine , context , defaults , saltenv )
log . debug ( 'Rendered:' )
log . debug ( file_str )
# whatever the source of the commands would be , split them line by line
commands = [ line for line in file_str . splitlines ( ) if line . strip ( ) ]
# push the commands one by one , removing empty lines
configured = call ( 'config' , commands , ** kwargs )
current_config = get_config ( as_string = True , ** kwargs )
diff = difflib . unified_diff ( initial_config . splitlines ( 1 ) [ 4 : ] , current_config . splitlines ( 1 ) [ 4 : ] )
return '' . join ( [ x . replace ( '\r' , '' ) for x in diff ] ) |
def move ( self , delta ) :
"""Move the node .
Args :
delta ( tupel ) : A tupel , holding the adjustment of the position .""" | self . pos = ( self . pos [ 0 ] + delta [ 0 ] , self . pos [ 1 ] + delta [ 1 ] ) |
def evaluate ( self , batchsize ) :
"""Evaluate how well the classifier is doing . Return mean loss and mean accuracy""" | sum_loss , sum_accuracy = 0 , 0
for i in range ( 0 , self . testsize , batchsize ) :
x = Variable ( self . x_test [ i : i + batchsize ] )
y = Variable ( self . y_test [ i : i + batchsize ] )
loss = self . model ( x , y )
sum_loss += loss . data * batchsize
sum_accuracy += self . model . accuracy . data * batchsize
return sum_loss / self . testsize , sum_accuracy / self . testsize |
def cleanup_tmpdir ( dirname ) :
"""Removes the given temporary directory if it exists .""" | if dirname is not None and os . path . exists ( dirname ) :
shutil . rmtree ( dirname ) |
def includeme ( config ) :
"""Add pyramid _ webpack methods and config to the app""" | settings = config . registry . settings
root_package_name = config . root_package . __name__
config . registry . webpack = { 'DEFAULT' : WebpackState ( settings , root_package_name ) }
for extra_config in aslist ( settings . get ( 'webpack.configs' , [ ] ) ) :
state = WebpackState ( settings , root_package_name , name = extra_config )
config . registry . webpack [ extra_config ] = state
# Set up any static views
for state in six . itervalues ( config . registry . webpack ) :
if state . static_view :
config . add_static_view ( name = state . static_view_name , path = state . static_view_path , cache_max_age = state . cache_max_age )
config . add_request_method ( get_webpack , 'webpack' ) |
def update_aliases ( self , body , params = None ) :
"""Update specified aliases .
` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / indices - aliases . html > ` _
: arg body : The definition of ` actions ` to perform
: arg master _ timeout : Specify timeout for connection to master
: arg request _ timeout : Request timeout""" | if body in SKIP_IN_PATH :
raise ValueError ( "Empty value passed for a required argument 'body'." )
return self . transport . perform_request ( "POST" , "/_aliases" , params = params , body = body ) |
def copy ( self ) :
"""Create a copy of the animation .""" | animation = AnimationList ( )
animation . set_frame_rate ( self . frame_rate )
animation . __coords = self . __coords
animation . __horizontal_flip = self . __horizontal_flip
animation . __vertical_flip = self . __vertical_flip
animation . should_repeat = self . should_repeat
animation . draw_order = self . draw_order
animation . update_order = self . update_order
for image in self . images :
new_image = Sprite ( )
new_image . coords = image . coords
new_image . apply_texture ( image . image )
animation . images . append ( new_image )
return animation |
async def AddPortMapping ( NewRemoteHost : str , NewExternalPort : int , NewProtocol : str , NewInternalPort : int , NewInternalClient : str , NewEnabled : int , NewPortMappingDescription : str , NewLeaseDuration : str ) -> None :
"""Returns None""" | raise NotImplementedError ( ) |
def set_master_logging_params ( self , enable = None , dedicate_thread = None , buffer = None , sock_stream = None , sock_stream_requests_only = None ) :
"""Sets logging params for delegating logging to master process .
: param bool enable : Delegate logging to master process .
Delegate the write of the logs to the master process
( this will put all of the logging I / O to a single process ) .
Useful for system with advanced I / O schedulers / elevators .
: param bool dedicate _ thread : Delegate log writing to a thread .
As error situations could cause the master to block while writing
a log line to a remote server , it may be a good idea to use this option and delegate
writes to a secondary thread .
: param int buffer : Set the buffer size for the master logger in bytes .
Bigger log messages will be truncated .
: param bool | tuple sock _ stream : Create the master logpipe as SOCK _ STREAM .
: param bool | tuple sock _ stream _ requests _ only : Create the master requests logpipe as SOCK _ STREAM .""" | self . _set ( 'log-master' , enable , cast = bool )
self . _set ( 'threaded-logger' , dedicate_thread , cast = bool )
self . _set ( 'log-master-bufsize' , buffer )
self . _set ( 'log-master-stream' , sock_stream , cast = bool )
if sock_stream_requests_only :
self . _set ( 'log-master-req-stream' , sock_stream_requests_only , cast = bool )
return self . _section |
def get_bibtex ( arxiv_id ) :
"""Get a BibTeX entry for a given arXiv ID .
. . note : :
Using awesome https : / / pypi . python . org / pypi / arxiv2bib / module .
: param arxiv _ id : The canonical arXiv id to get BibTeX from .
: returns : A BibTeX string or ` ` None ` ` .
> > > get _ bibtex ( ' 1506.06690 ' )
" @ article { 1506.06690v2, \\ nAuthor = { Lucas Verney and Lev Pitaevskii and Sandro Stringari } , \\ nTitle = { Hybridization of first and second sound in a weakly - interacting Bose gas } , \\ nEprint = { 1506.06690v2 } , \\ nDOI = { 10.1209/0295-5075/111/40005 } , \\ nArchivePrefix = { arXiv } , \\ nPrimaryClass = { cond - mat . quant - gas } , \\ nAbstract = { Using Landau ' s theory of two - fluid hydrodynamics we investigate the sound \\ nmodes propagating in a uniform weakly - interacting superfluid Bose gas for \\ nvalues of temperature , up to the critical point . In order to evaluate the \\ nrelevant thermodynamic functions needed to solve the hydrodynamic equations , \\ nincluding the temperature dependence of the superfluid density , we use \\ nBogoliubov theory at low temperatures and the results of a perturbative \\ napproach based on Beliaev diagrammatic technique at higher temperatures . \\ nSpecial focus is given on the hybridization phenomenon between first and second \\ nsound which occurs at low temperatures of the order of the interaction energy \\ nand we discuss explicitly the behavior of the two sound velocities near the \\ nhybridization point . } , \\ nYear = { 2015 } , \\ nMonth = { Jun } , \\ nUrl = { http : / / arxiv . org / abs / 1506.06690v2 } , \\ nFile = { 1506.06690v2 . pdf } \\ n } "
> > > get _ bibtex ( ' 1506.06690v1 ' )
" @ article { 1506.06690v1, \\ nAuthor = { Lucas Verney and Lev Pitaevskii and Sandro Stringari } , \\ nTitle = { Hybridization of first and second sound in a weakly - interacting Bose gas } , \\ nEprint = { 1506.06690v1 } , \\ nDOI = { 10.1209/0295-5075/111/40005 } , \\ nArchivePrefix = { arXiv } , \\ nPrimaryClass = { cond - mat . quant - gas } , \\ nAbstract = { Using Landau ' s theory of two - fluid hydrodynamics we investigate the sound \\ nmodes propagating in a uniform weakly - interacting superfluid Bose gas for \\ nvalues of temperature , up to the critical point . In order to evaluate the \\ nrelevant thermodynamic functions needed to solve the hydrodynamic equations , \\ nincluding the temperature dependence of the superfluid density , we use \\ nBogoliubov theory at low temperatures and the results of a perturbative \\ napproach based on Beliaev diagrammatic technique at higher temperatures . \\ nSpecial focus is given on the hybridization phenomenon between first and second \\ nsound which occurs at low temperatures of the order of the interaction energy \\ nand we discuss explicitly the behavior of the two sound velocities near the \\ nhybridization point . } , \\ nYear = { 2015 } , \\ nMonth = { Jun } , \\ nUrl = { http : / / arxiv . org / abs / 1506.06690v1 } , \\ nFile = { 1506.06690v1 . pdf } \\ n } " """ | # Fetch bibtex using arxiv2bib module
try :
bibtex = arxiv2bib . arxiv2bib ( [ arxiv_id ] )
except HTTPError :
bibtex = [ ]
for bib in bibtex :
if isinstance ( bib , arxiv2bib . ReferenceErrorInfo ) :
continue
else : # Return fetched bibtex
return bib . bibtex ( )
# An error occurred , return None
return None |
def add_filter ( self , filter ) :
"""Add filter to property
: param filter : object , extending from AbstractFilter
: return : None""" | if not isinstance ( filter , AbstractFilter ) :
err = 'Filters must be of type {}' . format ( AbstractFilter )
raise InvalidFilter ( err )
if filter not in self . filters :
self . filters . append ( filter )
return self |
def get_diff ( value1 , value2 , name1 , name2 ) :
"""Get a diff between two strings .
Args :
value1 ( str ) : First string to be compared .
value2 ( str ) : Second string to be compared .
name1 ( str ) : Name of the first string .
name2 ( str ) : Name of the second string .
Returns :
str : The full diff .""" | lines1 = [ line + "\n" for line in value1 . splitlines ( ) ]
lines2 = [ line + "\n" for line in value2 . splitlines ( ) ]
diff_lines = difflib . context_diff ( lines1 , lines2 , fromfile = name1 , tofile = name2 )
return "" . join ( diff_lines ) |
def info_file ( distro = None ) :
"""Return default distroinfo info file""" | if not distro :
distro = cfg [ 'DISTRO' ]
info_file_conf = distro . upper ( ) + 'INFO_FILE'
try :
return cfg [ info_file_conf ]
except KeyError :
raise exception . InvalidUsage ( why = "Couldn't find config option %s for distro: %s" % ( info_file_conf , distro ) ) |
def download_file ( self , fname , output_dir ) :
"""Downloads competition file to output _ dir .""" | if fname not in self . competition_files : # pylint : disable = unsupported - membership - test
raise ValueError ( "%s is not one of the competition's " "files: %s" % ( fname , self . competition_files ) )
command = [ "kaggle" , "competitions" , "download" , "--file" , fname , "--path" , output_dir , "-c" , self . _competition_name , ]
_run_kaggle_command ( command , self . _competition_name )
return os . path . join ( output_dir , fname ) |
def plugged_usbs ( multiple = True ) -> map or dict :
"""Gets the plugged - in USB Flash drives ( pen - drives ) .
If multiple is true , it returns a map , and a dict otherwise .
If multiple is false , this method will raise a : class : ` . NoUSBFound ` if no USB is found .""" | class FindPenDrives ( object ) : # From https : / / github . com / pyusb / pyusb / blob / master / docs / tutorial . rst
def __init__ ( self , class_ ) :
self . _class = class_
def __call__ ( self , device ) : # first , let ' s check the device
if device . bDeviceClass == self . _class :
return True
# ok , transverse all devices to find an
# interface that matches our class
for cfg in device : # find _ descriptor : what ' s it ?
intf = usb . util . find_descriptor ( cfg , bInterfaceClass = self . _class )
if intf is not None :
try :
product = intf . device . product . lower ( )
except ValueError as e :
if 'langid' in str ( e ) :
raise OSError ( 'Cannot get "langid". Do you have permissions?' )
else :
raise e
if 'crw' not in product and 'reader' not in product :
return True
return False
def get_pendrive ( pen : usb . Device ) -> dict :
manufacturer = pen . manufacturer . strip ( ) or str ( pen . idVendor )
model = pen . product . strip ( ) or str ( pen . idProduct )
serial_number = pen . serial_number . strip ( )
hid = Naming . hid ( manufacturer , serial_number , model )
return { '_id' : hid , # Make live easier to DeviceHubClient by using _ id
'hid' : hid , '@type' : 'USBFlashDrive' , 'serialNumber' : serial_number , 'model' : model , 'manufacturer' : manufacturer , 'vendorId' : pen . idVendor , 'productId' : pen . idProduct }
result = usb . core . find ( find_all = multiple , custom_match = FindPenDrives ( CLASS_MASS_STORAGE ) )
if multiple :
return map ( get_pendrive , result )
else :
if not result :
raise NoUSBFound ( )
return get_pendrive ( result ) |
def load ( self ) :
"""Return the current load .
The load is represented as a float , where 1.0 represents having
hit one of the flow control limits , and values between 0.0 and 1.0
represent how close we are to them . ( 0.5 means we have exactly half
of what the flow control setting allows , for example . )
There are ( currently ) two flow control settings ; this property
computes how close the manager is to each of them , and returns
whichever value is higher . ( It does not matter that we have lots of
running room on setting A if setting B is over . )
Returns :
float : The load value .""" | if self . _leaser is None :
return 0
return max ( [ self . _leaser . message_count / self . _flow_control . max_messages , self . _leaser . bytes / self . _flow_control . max_bytes , ] ) |
def filter ( self , criteria : Q , offset : int = 0 , limit : int = 10 , order_by : list = ( ) ) :
"""Read the repository and return results as per the filer""" | if criteria . children :
items = list ( self . _filter ( criteria , self . conn [ 'data' ] [ self . schema_name ] ) . values ( ) )
else :
items = list ( self . conn [ 'data' ] [ self . schema_name ] . values ( ) )
# Sort the filtered results based on the order _ by clause
for o_key in order_by :
reverse = False
if o_key . startswith ( '-' ) :
reverse = True
o_key = o_key [ 1 : ]
items = sorted ( items , key = itemgetter ( o_key ) , reverse = reverse )
result = ResultSet ( offset = offset , limit = limit , total = len ( items ) , items = items [ offset : offset + limit ] )
return result |
def gossip_bind ( self , format , * args ) :
"""Set - up gossip discovery of other nodes . At least one node in the cluster
must bind to a well - known gossip endpoint , so other nodes can connect to
it . Note that gossip endpoints are completely distinct from Zyre node
endpoints , and should not overlap ( they can use the same transport ) .""" | return lib . zyre_gossip_bind ( self . _as_parameter_ , format , * args ) |
def validate_metadata ( self , xml ) :
"""Validates an XML SP Metadata .
: param xml : Metadata ' s XML that will be validate
: type xml : string
: returns : The list of found errors
: rtype : list""" | assert isinstance ( xml , basestring )
if len ( xml ) == 0 :
raise Exception ( 'Empty string supplied as input' )
errors = [ ]
res = OneLogin_Saml2_Utils . validate_xml ( xml , 'saml-schema-metadata-2.0.xsd' , self . __debug )
if not isinstance ( res , Document ) :
errors . append ( res )
else :
dom = res
element = dom . documentElement
if element . tagName not in 'md:EntityDescriptor' :
errors . append ( 'noEntityDescriptor_xml' )
else :
if len ( element . getElementsByTagName ( 'md:SPSSODescriptor' ) ) != 1 :
errors . append ( 'onlySPSSODescriptor_allowed_xml' )
else :
valid_until = cache_duration = expire_time = None
if element . hasAttribute ( 'validUntil' ) :
valid_until = OneLogin_Saml2_Utils . parse_SAML_to_time ( element . getAttribute ( 'validUntil' ) )
if element . hasAttribute ( 'cacheDuration' ) :
cache_duration = element . getAttribute ( 'cacheDuration' )
expire_time = OneLogin_Saml2_Utils . get_expire_time ( cache_duration , valid_until )
if expire_time is not None and int ( time ( ) ) > int ( expire_time ) :
errors . append ( 'expired_xml' )
# TODO : Validate Sign
return errors |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.