signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def log ( cls , message ) :
"""Display info message if verbose level allows it ."""
|
if cls . verbose > 0 :
msg = '[INFO] %s' % message
cls . echo ( msg )
|
def file_can_be_written ( path ) :
"""Return ` ` True ` ` if a file can be written at the given ` ` path ` ` .
: param string path : the file path
: rtype : bool
. . warning : : This function will attempt to open the given ` ` path ` `
in write mode , possibly destroying the file previously existing there .
. . versionadded : : 1.4.0"""
|
if path is None :
return False
try :
with io . open ( path , "wb" ) as test_file :
pass
delete_file ( None , path )
return True
except ( IOError , OSError ) :
pass
return False
|
def filter_values ( cls , part_info ) : # type : ( Type [ T ] , PartInfo ) - > List [ T ]
"""Filter the part _ info dict list looking for instances of our class
Args :
part _ info ( dict ) : { part _ name : [ Info ] or None } as returned from
Controller . run _ hook ( )
Returns :
list : [ info ] where info is a subclass of cls"""
|
filtered = [ ]
for info_list in cls . filter_parts ( part_info ) . values ( ) :
filtered += info_list
return filtered
|
def pendingTasks ( self , * args , ** kwargs ) :
"""Get Number of Pending Tasks
Get an approximate number of pending tasks for the given ` provisionerId `
and ` workerType ` .
The underlying Azure Storage Queues only promises to give us an estimate .
Furthermore , we cache the result in memory for 20 seconds . So consumers
should be no means expect this to be an accurate number .
It is , however , a solid estimate of the number of pending tasks .
This method gives output : ` ` v1 / pending - tasks - response . json # ` `
This method is ` ` stable ` `"""
|
return self . _makeApiCall ( self . funcinfo [ "pendingTasks" ] , * args , ** kwargs )
|
def get_consistent_resource ( self ) :
""": return a refund that you can trust .
: rtype Refund"""
|
http_client = HttpClient ( )
response , _ = http_client . get ( routes . url ( routes . REFUND_RESOURCE , resource_id = self . id , payment_id = self . payment_id ) )
return Refund ( ** response )
|
def prefix2ns ( self , prefix : YangIdentifier , mid : ModuleId ) -> YangIdentifier :
"""Return the namespace corresponding to a prefix .
Args :
prefix : Prefix associated with a module and its namespace .
mid : Identifier of the module in which the prefix is declared .
Raises :
ModuleNotRegistered : If ` mid ` is not registered in the data model .
UnknownPrefix : If ` prefix ` is not declared ."""
|
try :
mdata = self . modules [ mid ]
except KeyError :
raise ModuleNotRegistered ( * mid ) from None
try :
return mdata . prefix_map [ prefix ] [ 0 ]
except KeyError :
raise UnknownPrefix ( prefix , mid ) from None
|
def assert_subset ( self , subset , superset , failure_message = 'Expected collection "{}" to be a subset of "{}' ) :
"""Asserts that a superset contains all elements of a subset"""
|
assertion = lambda : set ( subset ) . issubset ( set ( superset ) )
failure_message = unicode ( failure_message ) . format ( superset , subset )
self . webdriver_assert ( assertion , failure_message )
|
def set_from_file ( self , filename : str , is_padded : bool = True , oov_token : str = DEFAULT_OOV_TOKEN , namespace : str = "tokens" ) :
"""If you already have a vocabulary file for a trained model somewhere , and you really want to
use that vocabulary file instead of just setting the vocabulary from a dataset , for
whatever reason , you can do that with this method . You must specify the namespace to use ,
and we assume that you want to use padding and OOV tokens for this .
Parameters
filename : ` ` str ` `
The file containing the vocabulary to load . It should be formatted as one token per
line , with nothing else in the line . The index we assign to the token is the line
number in the file ( 1 - indexed if ` ` is _ padded ` ` , 0 - indexed otherwise ) . Note that this
file should contain the OOV token string !
is _ padded : ` ` bool ` ` , optional ( default = True )
Is this vocabulary padded ? For token / word / character vocabularies , this should be
` ` True ` ` ; while for tag or label vocabularies , this should typically be ` ` False ` ` . If
` ` True ` ` , we add a padding token with index 0 , and we enforce that the ` ` oov _ token ` ` is
present in the file .
oov _ token : ` ` str ` ` , optional ( default = DEFAULT _ OOV _ TOKEN )
What token does this vocabulary use to represent out - of - vocabulary characters ? This
must show up as a line in the vocabulary file . When we find it , we replace
` ` oov _ token ` ` with ` ` self . _ oov _ token ` ` , because we only use one OOV token across
namespaces .
namespace : ` ` str ` ` , optional ( default = " tokens " )
What namespace should we overwrite with this vocab file ?"""
|
if is_padded :
self . _token_to_index [ namespace ] = { self . _padding_token : 0 }
self . _index_to_token [ namespace ] = { 0 : self . _padding_token }
else :
self . _token_to_index [ namespace ] = { }
self . _index_to_token [ namespace ] = { }
with codecs . open ( filename , 'r' , 'utf-8' ) as input_file :
lines = input_file . read ( ) . split ( '\n' )
# Be flexible about having final newline or not
if lines and lines [ - 1 ] == '' :
lines = lines [ : - 1 ]
for i , line in enumerate ( lines ) :
index = i + 1 if is_padded else i
token = line . replace ( '@@NEWLINE@@' , '\n' )
if token == oov_token :
token = self . _oov_token
self . _token_to_index [ namespace ] [ token ] = index
self . _index_to_token [ namespace ] [ index ] = token
if is_padded :
assert self . _oov_token in self . _token_to_index [ namespace ] , "OOV token not found!"
|
def search ( self , search_term , column = "title" , number_results = 25 ) :
"""TODO :
Add documentation
DONE - > Search multiple pages untile the number _ results is meet or the end .
Check for strange encodings , other langauges chinese , etc . .
Simplify , simplify , simply . . . For exemple the book dictionary
should start with all keys with an empty string .
Change the actual output to json ?
Make a example terminal app that uses it
STARTED - > Add parameters to the search apart from the search _ term"""
|
request = { "req" : search_term , "column" : column }
self . __choose_mirror ( )
if sys . version_info [ 0 ] < 3 :
url = self . __selected_mirror + "/search.php?" + urllib . urlencode ( request )
else :
url = self . __selected_mirror + "/search.php?" + urllib . parse . urlencode ( request )
self . grabber . go ( url )
search_result = [ ]
nbooks = re . search ( r'([0-9]*) (books|files)' , self . grabber . doc . select ( "/html/body/table[2]/tr/td[1]/font" ) . text ( ) )
nbooks = int ( nbooks . group ( 1 ) )
pages_to_load = int ( math . ceil ( number_results / 25.0 ) )
# Pages needed to be loaded
# Check if the pages needed to be loaded are more than the pages available
if pages_to_load > int ( math . ceil ( nbooks / 25.0 ) ) :
pages_to_load = int ( math . ceil ( nbooks / 25.0 ) )
for page in range ( 1 , pages_to_load + 1 ) :
if len ( search_result ) > number_results : # Check if we got all the results
break
url = ""
request . update ( { "page" : page } )
if sys . version_info [ 0 ] < 3 :
url = self . __selected_mirror + "/search.php?" + urllib . urlencode ( request )
else :
url = self . __selected_mirror + "/search.php?" + urllib . parse . urlencode ( request )
self . grabber . go ( url )
search_result += self . __parse_books ( )
if page != pages_to_load : # Random delay because if you ask a lot of pages , your ip might get blocked .
time . sleep ( random . randint ( 250 , 1000 ) / 1000.0 )
return search_result [ : number_results ]
|
async def open_wallet_search ( wallet_handle : int , type_ : str , query_json : str , options_json : str ) -> int :
"""Search for wallet records
: param wallet _ handle : wallet handler ( created by open _ wallet ) .
: param type _ : allows to separate different record types collections
: param query _ json : MongoDB style query to wallet record tags :
" tagName " : " tagValue " ,
$ or : {
" tagName2 " : { $ regex : ' pattern ' } ,
" tagName3 " : { $ gte : ' 123 ' } ,
: param options _ json : / / TODO : FIXME : Think about replacing by bitmask
retrieveRecords : ( optional , true by default ) If false only " counts " will be calculated ,
retrieveTotalCount : ( optional , false by default ) Calculate total count ,
retrieveType : ( optional , false by default ) Retrieve record type ,
retrieveValue : ( optional , true by default ) Retrieve record value ,
retrieveTags : ( optional , true by default ) Retrieve record tags ,
: return : search _ handle : Wallet search handle that can be used later
to fetch records by small batches ( with fetch _ wallet _ search _ next _ records )"""
|
logger = logging . getLogger ( __name__ )
logger . debug ( "open_wallet_search: >>> wallet_handle: %r, type_: %r, query_json: %r, options_json: %r" , wallet_handle , type_ , query_json , options_json )
if not hasattr ( open_wallet_search , "cb" ) :
logger . debug ( "open_wallet_search: Creating callback" )
open_wallet_search . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_int32 ) )
c_wallet_handle = c_int32 ( wallet_handle )
c_type = c_char_p ( type_ . encode ( 'utf-8' ) )
c_query_json = c_char_p ( query_json . encode ( 'utf-8' ) )
c_options_json = c_char_p ( options_json . encode ( 'utf-8' ) )
search_handle = await do_call ( 'indy_open_wallet_search' , c_wallet_handle , c_type , c_query_json , c_options_json , open_wallet_search . cb )
res = search_handle
logger . debug ( "open_wallet_search: <<< res: %r" , res )
return res
|
def send ( self ) :
"""Sends the object to the watch . Block until completion , or raises : exc : ` . PutBytesError ` on failure .
During transmission , a " progress " event will be periodically emitted with the following signature : : :
( sent _ this _ interval , sent _ so _ far , total _ object _ size )"""
|
# Prepare the watch to receive something .
cookie = self . _prepare ( )
# Send it .
self . _send_object ( cookie )
# Commit it .
self . _commit ( cookie )
# Install it .
self . _install ( cookie )
|
def _calculate_cloud_ice_perc ( self ) :
"""Return the percentage of pixels that are either cloud or snow with
high confidence ( > 67 % ) ."""
|
self . output ( 'Calculating cloud and snow coverage from QA band' , normal = True , arrow = True )
a = rasterio . open ( join ( self . scene_path , self . _get_full_filename ( 'QA' ) ) ) . read_band ( 1 )
cloud_high_conf = int ( '1100000000000000' , 2 )
snow_high_conf = int ( '0000110000000000' , 2 )
fill_pixels = int ( '0000000000000001' , 2 )
cloud_mask = numpy . bitwise_and ( a , cloud_high_conf ) == cloud_high_conf
snow_mask = numpy . bitwise_and ( a , snow_high_conf ) == snow_high_conf
fill_mask = numpy . bitwise_and ( a , fill_pixels ) == fill_pixels
perc = numpy . true_divide ( numpy . sum ( cloud_mask | snow_mask ) , a . size - numpy . sum ( fill_mask ) ) * 100.0
self . output ( 'cloud/snow coverage: %s' % round ( perc , 2 ) , indent = 1 , normal = True , color = 'green' )
return perc
|
def get_auth_url ( self , s_pappid , order_id , money , timestamp , source , ticket , auth_type , redirect_url = None ) :
"""获取授权页链接
详情请参考
https : / / mp . weixin . qq . com / wiki ? id = mp1497082828 _ r1cI2
: param s _ pappid : 开票平台在微信的标识号 , 商户需要找开票平台提供
: param order _ id : 订单id , 在商户内单笔开票请求的唯一识别号
: param money : 订单金额 , 以分为单位
: type money : int
: param timestamp : Unix 时间戳
: type timestamp : int
: param source : 开票来源 。 app : App开票 , web : 微信H5开票 , wap : 普通网页开票
: param ticket : 根据获取授权ticket接口取得
: param auth _ type : 授权类型 。 0 : 开票授权 , 1 : 填写字段开票授权 , 2 : 领票授权
: type auth _ type : int
: param redirect _ url : 授权成功后跳转页面 。 本字段只有在source为H5的时候需要填写 。
: return : 获取授权页链接"""
|
if source not in { 'app' , 'web' , 'wap' } :
raise ValueError ( 'Unsupported source. Valid sources are "app", "web" or "wap"' )
if source == 'web' and redirect_url is None :
raise ValueError ( 'redirect_url is required if source is web' )
if not ( 0 <= auth_type <= 2 ) :
raise ValueError ( 'Unsupported auth type. Valid auth types are 0, 1 or 2' )
return self . _post ( 'getauthurl' , data = { 's_pappid' : s_pappid , 'order_id' : order_id , 'money' : money , 'timestamp' : timestamp , 'source' : source , 'ticket' : ticket , 'type' : auth_type , 'redirect_url' : redirect_url , } , result_processor = lambda x : x [ 'auth_url' ] , )
|
def _RunInTransaction ( self , function , readonly = False ) :
"""Runs function within a transaction .
Allocates a connection , begins a transaction on it and passes the connection
to function .
If function finishes without raising , the transaction is committed .
If function raises , the transaction will be rolled back , if a retryable
database error is raised , the operation may be repeated .
Args :
function : A function to be run , must accept a single MySQLdb . connection
parameter .
readonly : Indicates that only a readonly ( snapshot ) transaction is
required .
Returns :
The value returned by the last call to function .
Raises : Any exception raised by function ."""
|
start_query = "START TRANSACTION;"
if readonly :
start_query = "START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;"
for retry_count in range ( _MAX_RETRY_COUNT ) :
with contextlib . closing ( self . pool . get ( ) ) as connection :
try :
with contextlib . closing ( connection . cursor ( ) ) as cursor :
cursor . execute ( start_query )
ret = function ( connection )
if not readonly :
connection . commit ( )
return ret
except MySQLdb . OperationalError as e :
connection . rollback ( )
# Re - raise if this was the last attempt .
if retry_count >= _MAX_RETRY_COUNT or not _IsRetryable ( e ) :
raise
# Simple delay , with jitter .
# TODO ( user ) : Move to something more elegant , e . g . integrate a
# general retry or backoff library .
time . sleep ( random . uniform ( 1.0 , 2.0 ) * math . pow ( 1.5 , retry_count ) )
# Shouldn ' t happen , because we should have re - raised whatever caused the
# last try to fail .
raise Exception ( "Looped ended early - last exception swallowed." )
|
def _create ( self ) :
"""Creates a new and empty database ."""
|
from . tools import makedirs_safe
# create directory for sql database
makedirs_safe ( os . path . dirname ( self . _database ) )
# create all the tables
Base . metadata . create_all ( self . _engine )
logger . debug ( "Created new empty database '%s'" % self . _database )
|
def get_release_definition ( self , project , definition_id , property_filters = None ) :
"""GetReleaseDefinition .
[ Preview API ] Get a release definition .
: param str project : Project ID or project name
: param int definition _ id : Id of the release definition .
: param [ str ] property _ filters : A comma - delimited list of extended properties to be retrieved . If set , the returned Release Definition will contain values for the specified property Ids ( if they exist ) . If not set , properties will not be included .
: rtype : : class : ` < ReleaseDefinition > < azure . devops . v5_1 . release . models . ReleaseDefinition > `"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if definition_id is not None :
route_values [ 'definitionId' ] = self . _serialize . url ( 'definition_id' , definition_id , 'int' )
query_parameters = { }
if property_filters is not None :
property_filters = "," . join ( property_filters )
query_parameters [ 'propertyFilters' ] = self . _serialize . query ( 'property_filters' , property_filters , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'd8f96f24-8ea7-4cb6-baab-2df8fc515665' , version = '5.1-preview.3' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'ReleaseDefinition' , response )
|
def _get_data ( self ) :
"""Process the IGRA2 text file for observations at site _ id matching time .
Return :
: class : ` pandas . DataFrame ` containing the body data .
: class : ` pandas . DataFrame ` containing the header data ."""
|
# Split the list of times into begin and end dates . If only
# one date is supplied , set both begin and end dates equal to that date .
body , header , dates_long , dates = self . _get_data_raw ( )
params = self . _get_fwf_params ( )
df_body = pd . read_fwf ( StringIO ( body ) , ** params [ 'body' ] )
df_header = pd . read_fwf ( StringIO ( header ) , ** params [ 'header' ] )
df_body [ 'date' ] = dates_long
df_body = self . _clean_body_df ( df_body )
df_header = self . _clean_header_df ( df_header )
df_header [ 'date' ] = dates
return df_body , df_header
|
def retrieve ( self , state = None , favorite = None , tag = None , contentType = None , sort = None , detailType = None , search = None , domain = None , since = None , count = None , offset = None ) :
"""Retrieve the list of your articles
See : https : / / getpocket . com / developer / docs / v3 / retrieve
: param state : filter by state
: param favorite : only fetch favorite
: param tag : filter by tag or _ untagged _
: param contentType : get article , video or image
: param sort : sort by provided value
: param detailType : defines the response details to return
: param search : search term
: param domain : search domain
: param since : search modified since unix timestamp
: param count : the number of required items
: param offset : the position to start results from
: return : A dictionary containing the response result
: rtype : dict"""
|
return self . _make_request ( 'get' )
|
def get_many ( self , keys ) :
"""Fetch a bunch of keys from the cache . For certain backends ( memcached ,
pgsql ) this can be * much * faster when fetching multiple values .
Return a dict mapping each key in keys to its value . If the given
key is missing , it will be missing from the response dict ."""
|
d = { }
for k in keys :
val = self . get ( k )
if val is not None :
d [ k ] = val
return d
|
def apool ( self , k_height , k_width , d_height = 2 , d_width = 2 , mode = "VALID" , input_layer = None , num_channels_in = None ) :
"""Construct an average pooling layer ."""
|
return self . _pool ( "apool" , pooling_layers . average_pooling2d , k_height , k_width , d_height , d_width , mode , input_layer , num_channels_in )
|
def validate_config ( cls , config ) :
"""Runs a check on the given config to make sure that ` port ` / ` ports ` and
` discovery ` is defined ."""
|
if "discovery" not in config :
raise ValueError ( "No discovery method defined." )
if not any ( [ item in config for item in [ "port" , "ports" ] ] ) :
raise ValueError ( "No port(s) defined." )
cls . validate_check_configs ( config )
|
def process_text ( self , t : str , tok : BaseTokenizer ) -> List [ str ] :
"Process one text ` t ` with tokenizer ` tok ` ."
|
for rule in self . pre_rules :
t = rule ( t )
toks = tok . tokenizer ( t )
for rule in self . post_rules :
toks = rule ( toks )
return toks
|
def music_url ( ids = [ ] ) :
"""通过歌曲 ID 获取歌曲下载地址
: param ids : 歌曲 ID 的 list"""
|
if not isinstance ( ids , list ) :
raise ParamsError ( )
r = NCloudBot ( )
r . method = 'MUSIC_URL'
r . data = { 'ids' : ids , 'br' : 999000 , "csrf_token" : "" }
r . send ( )
return r . response
|
def report_fit ( self ) :
"""Print a report of the fit results ."""
|
if not self . fitted :
print ( 'Model not yet fit.' )
return
print ( 'Null Log-liklihood: {0:.3f}' . format ( self . log_likelihoods [ 'null' ] ) )
print ( 'Log-liklihood at convergence: {0:.3f}' . format ( self . log_likelihoods [ 'convergence' ] ) )
print ( 'Log-liklihood Ratio: {0:.3f}\n' . format ( self . log_likelihoods [ 'ratio' ] ) )
tbl = PrettyTable ( [ 'Component' , ] )
tbl = PrettyTable ( )
tbl . add_column ( 'Component' , self . fit_parameters . index . values )
for col in ( 'Coefficient' , 'Std. Error' , 'T-Score' ) :
tbl . add_column ( col , self . fit_parameters [ col ] . values )
tbl . align [ 'Component' ] = 'l'
tbl . float_format = '.3'
print ( tbl )
|
def Run ( self , args ) :
"""Lists a directory ."""
|
try :
directory = vfs . VFSOpen ( args . pathspec , progress_callback = self . Progress )
except ( IOError , OSError ) as e :
self . SetStatus ( rdf_flows . GrrStatus . ReturnedStatus . IOERROR , e )
return
files = list ( directory . ListFiles ( ) )
files . sort ( key = lambda x : x . pathspec . path )
for response in files :
self . SendReply ( response )
|
def role_add ( self , role = None , login = None , envs = [ ] , query = '/roles/' ) :
"""` login ` - Login or username of user to add to ` role `
` role ` - Role to add user to
Add user to role"""
|
data = { 'login' : self . args . login }
juicer . utils . Log . log_debug ( "Add Role '%s' to '%s'" , role , login )
for env in self . args . envs :
if not juicer . utils . role_exists_p ( role , self . connectors [ env ] ) :
juicer . utils . Log . log_info ( "role `%s` doesn't exist in %s... skipping!" , ( role , env ) )
continue
elif not juicer . utils . user_exists_p ( login , self . connectors [ env ] ) :
juicer . utils . Log . log_info ( "user `%s` doesn't exist in %s... skipping!" , ( login , env ) )
else :
url = "%s%s/users/" % ( query , role )
_r = self . connectors [ env ] . post ( url , data )
if _r . status_code == Constants . PULP_POST_OK :
juicer . utils . Log . log_info ( "added user `%s` to role `%s` in %s" , ( login , role , env ) )
else :
_r . raise_for_status ( )
return True
|
def status ( self ) :
"""Return a tuple with current processing status code and message ."""
|
status = self . receiver . status ( self )
return status if status else ( self . response_code , self . response . get ( 'message' ) )
|
def bind_filter ( self , direction , filter_name ) :
"""Adds a packet filter to this NIO .
Filter " freq _ drop " drops packets .
Filter " capture " captures packets .
: param direction : " in " , " out " or " both "
: param filter _ name : name of the filter to apply"""
|
if direction not in self . _dynamips_direction :
raise DynamipsError ( "Unknown direction {} to bind filter {}:" . format ( direction , filter_name ) )
dynamips_direction = self . _dynamips_direction [ direction ]
yield from self . _hypervisor . send ( "nio bind_filter {name} {direction} {filter}" . format ( name = self . _name , direction = dynamips_direction , filter = filter_name ) )
if direction == "in" :
self . _input_filter = filter_name
elif direction == "out" :
self . _output_filter = filter_name
elif direction == "both" :
self . _input_filter = filter_name
self . _output_filter = filter_name
|
def decoded_output_boxes ( self ) :
"""Returns : N x # class x 4"""
|
anchors = tf . tile ( tf . expand_dims ( self . proposals . boxes , 1 ) , [ 1 , cfg . DATA . NUM_CLASS , 1 ] )
# N x # class x 4
decoded_boxes = decode_bbox_target ( self . box_logits / self . bbox_regression_weights , anchors )
return decoded_boxes
|
def existing_node_input ( ) :
"""Get an existing node id by name or id .
Return - 1 if invalid"""
|
input_from_user = raw_input ( "Existing node name or id: " )
node_id = INVALID_NODE
if not input_from_user :
return node_id
# int or str ?
try :
parsed_input = int ( input_from_user )
except ValueError :
parsed_input = input_from_user
if isinstance ( parsed_input , int ) :
result = db . execute ( text ( fetch_query_string ( 'select_node_from_id.sql' ) ) , node_id = parsed_input ) . fetchall ( )
if result :
node_id = int ( result [ 0 ] [ 'node_id' ] )
else :
result = db . execute ( text ( fetch_query_string ( 'select_node_from_name.sql' ) ) , node_name = parsed_input ) . fetchall ( )
if result :
if len ( result ) == 1 :
print 'Node id: {node_id}\nNode name: {name}' . format ( ** result [ 0 ] )
print '-------------'
node_id = result [ 0 ] [ 'node_id' ]
else :
print 'Multiple nodes found with the name: {0}' . format ( parsed_input )
for item in result :
print '{node_id}: {name} = {value}' . format ( ** item )
node_selection = raw_input ( 'Enter a node id from this list or enter "?" to render all or "?<node>" for a specific one.' )
if node_selection :
node_selection_match = re . match ( r"\?(\d)*" , node_selection )
if node_selection_match :
if node_selection_match . groups ( ) [ 0 ] :
value = render_node ( int ( node_selection_match . groups ( ) [ 0 ] ) , noderequest = { '_no_template' : True } , ** result [ 0 ] )
print safe_dump ( value , default_flow_style = False )
else :
for item in result :
value = render_node ( item [ 'node_id' ] , noderequest = { '_no_template' : True } , ** item )
print 'Node id: {0}' . format ( item [ 'node_id' ] )
print safe_dump ( value , default_flow_style = False )
print '---'
node_id = node_input ( )
else :
try :
node_id = int ( node_selection )
except ValueError :
node_id = INVALID_NODE
print 'invalid node id: %s' % node
return node_id
|
def filter_cat ( self , axis , cat_index , cat_name ) :
'''Filter the matrix based on their category . cat _ index is the index of the category , the first category has index = 1.'''
|
run_filter . filter_cat ( self , axis , cat_index , cat_name )
|
def get_soa_record ( client , zone_id , zone_name ) :
"""Gets the SOA record for zone _ name from zone _ id .
Args :
client ( : class : ` botocore . client . Route53 ` ) : The connection used to
interact with Route53 ' s API .
zone _ id ( string ) : The AWS Route53 zone id of the hosted zone to query .
zone _ name ( string ) : The name of the DNS hosted zone to create .
Returns :
: class : ` stacker . util . SOARecord ` : An object representing the parsed SOA
record returned from AWS Route53."""
|
response = client . list_resource_record_sets ( HostedZoneId = zone_id , StartRecordName = zone_name , StartRecordType = "SOA" , MaxItems = "1" )
return SOARecord ( response [ "ResourceRecordSets" ] [ 0 ] )
|
def push_failure_state ( self ) :
"""Returns a tuple : the boolean for whether or not pushes succeed , and the
entire object returned by a call to push _ failure on the phylesystem - api .
This should only be called with wrappers around remote services ( RuntimeError
will be raised if you call this with a local wrapper ."""
|
if self . _src_code == _GET_LOCAL :
raise RuntimeError ( 'push_failure_state only pertains to work with remote phyleysystem instances' )
r = self . _remote_push_failure ( )
return r [ 'pushes_succeeding' ] , r
|
def find_module_registrations ( c_file ) :
"""Find any MP _ REGISTER _ MODULE definitions in the provided c file .
: param str c _ file : path to c file to check
: return : List [ ( module _ name , obj _ module , enabled _ define ) ]"""
|
global pattern
if c_file is None : # No c file to match the object file , skip
return set ( )
with io . open ( c_file , encoding = 'utf-8' ) as c_file_obj :
return set ( re . findall ( pattern , c_file_obj . read ( ) ) )
|
def history ( ctx , account , limit , type , csv , exclude , raw ) :
"""Show history of an account"""
|
from bitsharesbase . operations import getOperationNameForId
t = [ [ "#" , "time (block)" , "operation" , "details" ] ]
for a in account :
account = Account ( a , bitshares_instance = ctx . bitshares )
for b in account . history ( limit = limit , only_ops = type , exclude_ops = exclude ) :
block = BlockHeader ( b [ "block_num" ] )
row = [ b [ "id" ] , "%s (%s)" % ( block . time ( ) , b [ "block_num" ] ) , "{} ({})" . format ( getOperationNameForId ( b [ "op" ] [ 0 ] ) , b [ "op" ] [ 0 ] ) , pprintOperation ( b ) if not raw else json . dumps ( b , indent = 4 ) , ]
t . append ( row )
print_table ( t )
|
def build_sample_smoother_problem_friedman82 ( N = 200 ) :
"""Sample problem from supersmoother publication ."""
|
x = numpy . random . uniform ( size = N )
err = numpy . random . standard_normal ( N )
y = numpy . sin ( 2 * math . pi * ( 1 - x ) ** 2 ) + x * err
return x , y
|
def list_devices ( ) :
"""List devices via HTTP GET ."""
|
output = { }
for device_id , device in devices . items ( ) :
output [ device_id ] = { 'host' : device . host , 'state' : device . state }
return jsonify ( devices = output )
|
def parseSetEnv ( l ) :
"""Parses a list of strings of the form " NAME = VALUE " or just " NAME " into a dictionary . Strings
of the latter from will result in dictionary entries whose value is None .
: type l : list [ str ]
: rtype : dict [ str , str ]
> > > parseSetEnv ( [ ] )
> > > parseSetEnv ( [ ' a ' ] )
{ ' a ' : None }
> > > parseSetEnv ( [ ' a = ' ] )
> > > parseSetEnv ( [ ' a = b ' ] )
{ ' a ' : ' b ' }
> > > parseSetEnv ( [ ' a = a ' , ' a = b ' ] )
{ ' a ' : ' b ' }
> > > parseSetEnv ( [ ' a = b ' , ' c = d ' ] )
{ ' a ' : ' b ' , ' c ' : ' d ' }
> > > parseSetEnv ( [ ' a = b = c ' ] )
{ ' a ' : ' b = c ' }
> > > parseSetEnv ( [ ' ' ] )
Traceback ( most recent call last ) :
ValueError : Empty name
> > > parseSetEnv ( [ ' = 1 ' ] )
Traceback ( most recent call last ) :
ValueError : Empty name"""
|
d = dict ( )
for i in l :
try :
k , v = i . split ( '=' , 1 )
except ValueError :
k , v = i , None
if not k :
raise ValueError ( 'Empty name' )
d [ k ] = v
return d
|
def error ( self , message ) :
"""error ( message : string )
Prints a usage message incorporating the message to stderr and
exits .
If you override this in a subclass , it should not return - - it
should either exit or raise an exception ."""
|
self . print_usage ( _sys . stderr )
self . exit ( 2 , _ ( '%s: error: %s\n' ) % ( self . prog , message ) )
|
def valid_totp ( self , code , period = 30 , timestamp = None ) :
"""Valid a TOTP code .
: param code : A number that is less than 6 characters .
: param period : A period that a TOTP code is valid in seconds
: param timestamp : Validate TOTP at this given timestamp"""
|
if not valid_code ( code ) :
return False
return compare_digest ( bytes ( self . totp ( period , timestamp ) ) , bytes ( int ( code ) ) )
|
def _set_link_error_disable ( self , v , load = False ) :
"""Setter method for link _ error _ disable , mapped from YANG variable / interface / ethernet / link _ error _ disable ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ link _ error _ disable is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ link _ error _ disable ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = link_error_disable . link_error_disable , is_container = 'container' , presence = False , yang_name = "link-error-disable" , rest_name = "link-error-disable" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'port link dampening' , u'callpoint' : u'Pld' , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'cli-full-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-pld' , defining_module = 'brocade-pld' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """link_error_disable must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""" , } )
self . __link_error_disable = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def save_instance ( self , instance , using_transactions = True , dry_run = False ) :
"""Takes care of saving the object to the database .
Keep in mind that this is done by calling ` ` instance . save ( ) ` ` , so
objects are not created in bulk !"""
|
self . before_save_instance ( instance , using_transactions , dry_run )
if not using_transactions and dry_run : # we don ' t have transactions and we want to do a dry _ run
pass
else :
instance . save ( )
self . after_save_instance ( instance , using_transactions , dry_run )
|
def variable ( name , shape = None , dtype = tf . float32 , initializer = None , regularizer = None , trainable = True , collections = None , device = '' , restore = True ) :
"""Gets an existing variable with these parameters or creates a new one .
It also add itself to a group with its name .
Args :
name : the name of the new or existing variable .
shape : shape of the new or existing variable .
dtype : type of the new or existing variable ( defaults to ` DT _ FLOAT ` ) .
initializer : initializer for the variable if one is created .
regularizer : a ( Tensor - > Tensor or None ) function ; the result of
applying it on a newly created variable will be added to the collection
GraphKeys . REGULARIZATION _ LOSSES and can be used for regularization .
trainable : If ` True ` also add the variable to the graph collection
` GraphKeys . TRAINABLE _ VARIABLES ` ( see tf . Variable ) .
collections : A list of collection names to which the Variable will be added .
Note that the variable is always also added to the tf . GraphKeys . GLOBAL _ VARIABLES
and MODEL _ VARIABLES collections .
device : Optional device to place the variable . It can be an string or a
function that is called to get the device for the variable .
restore : whether the variable should be added to the
VARIABLES _ TO _ RESTORE collection .
Returns :
The created or existing variable ."""
|
collections = list ( collections or [ ] )
# Make sure variables are added to tf . GraphKeys . GLOBAL _ VARIABLES and MODEL _ VARIABLES
collections += [ tf . GraphKeys . GLOBAL_VARIABLES , MODEL_VARIABLES ]
# Add to VARIABLES _ TO _ RESTORE if necessary
if restore :
collections . append ( VARIABLES_TO_RESTORE )
# Remove duplicates
collections = set ( collections )
# Get the device for the variable .
with tf . device ( variable_device ( device , name ) ) :
return tf . get_variable ( name , shape = shape , dtype = dtype , initializer = initializer , regularizer = regularizer , trainable = trainable , collections = collections )
|
def download_attachments ( self , dataset_identifier , content_type = "json" , download_dir = "~/sodapy_downloads" ) :
'''Download all of the attachments associated with a dataset . Return the paths of downloaded
files .'''
|
metadata = self . get_metadata ( dataset_identifier , content_type = content_type )
files = [ ]
attachments = metadata [ 'metadata' ] . get ( "attachments" )
if not attachments :
logging . info ( "No attachments were found or downloaded." )
return files
download_dir = os . path . join ( os . path . expanduser ( download_dir ) , dataset_identifier )
if not os . path . exists ( download_dir ) :
os . makedirs ( download_dir )
for attachment in attachments :
file_path = os . path . join ( download_dir , attachment [ "filename" ] )
has_assetid = attachment . get ( "assetId" , False )
if has_assetid :
base = _format_old_api_request ( dataid = dataset_identifier )
assetid = attachment [ "assetId" ]
resource = "{0}/files/{1}?download=true&filename={2}" . format ( base , assetid , attachment [ "filename" ] )
else :
base = "/api/assets"
assetid = attachment [ "blobId" ]
resource = "{0}/{1}?download=true" . format ( base , assetid )
uri = "{0}{1}{2}" . format ( self . uri_prefix , self . domain , resource )
_download_file ( uri , file_path )
files . append ( file_path )
logging . info ( "The following files were downloaded:\n\t{0}" . format ( "\n\t" . join ( files ) ) )
return files
|
def import_setting ( file_path , qsettings = None ) :
"""Import InaSAFE ' s setting from a file .
: param file _ path : The file to read the imported setting .
: type file _ path : basestring
: param qsettings : A custom QSettings to use . If it ' s not defined , it will
use the default one .
: type qsettings : qgis . PyQt . QtCore . QSettings
: returns : A dictionary of the imported settings .
: rtype : dict"""
|
with open ( file_path , 'r' ) as f :
inasafe_settings = json . load ( f )
if not qsettings :
qsettings = QSettings ( )
# Clear the previous setting
qsettings . beginGroup ( 'inasafe' )
qsettings . remove ( '' )
qsettings . endGroup ( )
for key , value in list ( inasafe_settings . items ( ) ) :
set_setting ( key , value , qsettings = qsettings )
return inasafe_settings
|
def filename_to_module ( filename ) :
"""convert a filename like html5lib - 0.999 . egg - info to html5lib"""
|
find = re . compile ( r"^[^.|-]*" )
name = re . search ( find , filename ) . group ( 0 )
return name
|
def assert_await_all_transforms_exist ( cli , transform_paths , does_exist = DEFAULT_TRANSFORM_EXISTS , timeout_seconds = DEFAULT_TIMEOUT_SECONDS ) :
"""Asserts that we successfully awaited for all transforms to exist based on does _ exist . If the timeout passes
or the expression is _ registered ! = actual state , then it will fail .
: param cli :
: param transform _ paths :
: param does _ exist : ( True | False ) the state change we are waiting for .
: param timeout _ seconds : The amount of time to wait for a change before fail .
: return :"""
|
result = commands . await_all_transforms_exist ( cli , transform_paths , does_exist , timeout_seconds )
assert result is True
return result
|
def invertible_total_flatten ( unflat_list ) :
r"""Args :
unflat _ list ( list ) :
Returns :
tuple : ( flat _ list , invert _ levels )
CommandLine :
python - m utool . util _ list - - exec - invertible _ total _ flatten - - show
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > import utool as ut
> > > unflat _ list = [ 0 , [ [ 1 , 2 , 3 ] , 4 , 5 ] , 9 , [ 2 , 3 ] , [ 1 , [ 2 , 3 , 4 ] ] , 1 , 2 , 3]
> > > print ( ' unflat _ list = % r ' % ( unflat _ list , ) )
> > > ( flat _ list , invert _ levels ) = invertible _ total _ flatten ( unflat _ list )
> > > print ( ' flat _ list = % r ' % ( flat _ list , ) )
> > > unflat _ list2 = total _ unflatten ( flat _ list , invert _ levels )
> > > print ( ' unflat _ list2 = % r ' % ( unflat _ list2 , ) )
> > > assert unflat _ list2 = = unflat _ list
> > > assert ut . depth _ profile ( flat _ list ) = = 16"""
|
import utool as ut
next_list = unflat_list
scalar_flags = [ not ut . isiterable ( item ) for item in next_list ]
invert_stack = [ ]
# print ( ' unflat _ list = % r ' % ( unflat _ list , ) )
while not all ( scalar_flags ) :
unflattenized = [ [ item ] if flag else item for flag , item in zip ( scalar_flags , next_list ) ]
flatter_list , invert_part = ut . invertible_flatten1 ( unflattenized )
# print ( ' flatter _ list = % r ' % ( flatter _ list , ) )
for idx in ut . where ( scalar_flags ) :
invert_part [ idx ] = invert_part [ idx ] [ 0 ]
invert_stack . append ( invert_part )
next_list = flatter_list
scalar_flags = [ not ut . isiterable ( item ) for item in next_list ]
# invert _ part = [ None ] * len ( scalar _ flags )
# invert _ stack . append ( invert _ part )
invert_levels = invert_stack [ : : - 1 ]
flat_list = next_list
return flat_list , invert_levels
|
def cli ( yamlfile , inline , format ) :
"""Generate JSON Schema representation of a biolink model"""
|
print ( JsonSchemaGenerator ( yamlfile , format ) . serialize ( inline = inline ) )
|
def reverse_url ( self , datatype , url , verb = 'GET' , urltype = 'single' , api_version = None ) :
"""Extracts parameters from a populated URL
: param datatype : a string identifying the data the url accesses .
: param url : the fully - qualified URL to extract parameters from .
: param verb : the HTTP verb needed for use with the url .
: param urltype : an adjective used to the nature of the request .
: return : dict"""
|
api_version = api_version or 'v1'
templates = getattr ( self , 'URL_TEMPLATES__%s' % api_version )
# this is fairly simplistic , if necessary we could use the parse lib
template_url = r"https://(?P<api_host>.+)/services/api/(?P<api_version>.+)"
template_url += re . sub ( r'{([^}]+)}' , r'(?P<\1>.+)' , templates [ datatype ] [ verb ] [ urltype ] )
# / foo / { foo _ id } / bar / { id } /
m = re . match ( template_url , url or '' )
if not m :
raise KeyError ( "No reverse match from '%s' to %s.%s.%s" % ( url , datatype , verb , urltype ) )
r = m . groupdict ( )
del r [ 'api_host' ]
if r . pop ( 'api_version' ) != api_version :
raise ValueError ( "API version mismatch" )
return r
|
def an_text_url ( identifiant , code ) :
"""Port of the PHP function used by the National Assembly :
public function urlOpaque ( $ identifiant , $ codeType = NULL )
$ datas = array (
' PRJL ' = > array ( ' repertoire ' = > ' projets ' , ' prefixe ' = > ' pl ' , ' suffixe ' = > ' ' ) ,
' PION ' = > array ( ' repertoire ' = > ' propositions ' , ' prefixe ' = > ' pion ' , ' suffixe ' = > ' ' ) ,
' PNRECOMENQ ' = > array ( ' repertoire ' = > ' propositions ' , ' prefixe ' = > ' pion ' , ' suffixe ' = > ' ' ) ,
' PNREAPPART341 ' = > array ( ' repertoire ' = > ' propositions ' , ' prefixe ' = > ' pion ' , ' suffixe ' = > ' ' ) ,
' PNREMODREGLTAN ' = > array ( ' repertoire ' = > ' propositions ' , ' prefixe ' = > ' pion ' , ' suffixe ' = > ' ' ) ,
' AVCE ' = > array ( ' repertoire ' = > ' projets ' , ' prefixe ' = > ' pl ' , ' suffixe ' = > ' - ace ' ) ,
' ETDI ' = > array ( ' repertoire ' = > ' projets ' , ' prefixe ' = > ' pl ' , ' suffixe ' = > ' - ei ' ) ,
' ACIN ' = > array ( ' repertoire ' = > ' projets ' , ' prefixe ' = > ' pl ' , ' suffixe ' = > ' - ai ' ) ,
' LETT ' = > array ( ' repertoire ' = > ' projets ' , ' prefixe ' = > ' pl ' , ' suffixe ' = > ' - l ' ) ,
' PNRETVXINSTITEUROP ' = > array ( ' repertoire ' = > ' europe / resolutions ' , ' prefixe ' = > ' ppe ' , ' suffixe ' = > ' ' ) ,
' PNRE ' = > array ( ' repertoire ' = > ' europe / resolutions ' , ' prefixe ' = > ' ppe ' , ' suffixe ' = > ' ' ) ,
' RION ' = > array ( ' repertoire ' = > ' ' , ' prefixe ' = > ' ' , ' suffixe ' = > ' ' ) ,
' TCOM ' = > array ( ' repertoire ' = > ' ta - commission ' , ' prefixe ' = > ' r ' , ' suffixe ' = > ' - a0 ' ) ,
' TCOMMODREGLTAN ' = > array ( ' repertoire ' = > ' ta - commission ' , ' prefixe ' = > ' r ' , ' suffixe ' = > ' - a0 ' ) ,
' TCOMTVXINSTITEUROP ' = > array ( ' repertoire ' = > ' ta - commission ' , ' prefixe ' = > ' r ' , ' suffixe ' = > ' - a0 ' ) ,
' TCOMCOMENQ ' = > array ( ' repertoire ' = > ' ta - commission ' , ' prefixe ' = > ' r ' , ' suffixe ' = > ' - a0 ' ) ,
' TADO ' = > array ( ' repertoire ' = > ' ta ' , ' prefixe ' = > ' ta ' , ' suffixe ' = > ' ' ) ,
preg _ match ( ' / ( . { 4 } ) ( [ ANS ] * ) ( R [ 0-9 ] ) ( [ LS ] * ) ( [ 0-9 ] * ) ( [ BTACP ] * ) ( . * ) / ' , $ identifiant , $ matches ) ;
$ leg = $ matches [ 5 ] ;
$ typeTa = $ matches [ 6 ] ;
$ num = $ matches [ 7 ] ;
switch ( $ typeTa ) {
case ' BTC ' :
$ type = ' TCOM ' ;
break ;
case ' BTA ' :
$ type = ' TADO ' ;
break ;
default :
$ type = $ codeType ;
$ host = " http : / / www . assemblee - nationale . fr / " ;
return $ host . $ leg . " / " . $ datas [ $ type ] [ ' repertoire ' ] . " / " . $ datas [ $ type ] [ ' prefixe ' ] . $ num . $ datas [ $ type ] [ ' suffixe ' ] . " . pdf " ;"""
|
datas = { 'PRJL' : { 'repertoire' : 'projets' , 'prefixe' : 'pl' , 'suffixe' : '' , } , 'PION' : { 'repertoire' : 'propositions' , 'prefixe' : 'pion' , 'suffixe' : '' , } , 'PNRECOMENQ' : { 'repertoire' : 'propositions' , 'prefixe' : 'pion' , 'suffixe' : '' , } , 'PNREAPPART341' : { 'repertoire' : 'propositions' , 'prefixe' : 'pion' , 'suffixe' : '' , } , 'PNREMODREGLTAN' : { 'repertoire' : 'propositions' , 'prefixe' : 'pion' , 'suffixe' : '' , } , 'AVCE' : { 'repertoire' : 'projets' , 'prefixe' : 'pl' , 'suffixe' : '-ace' , } , 'ETDI' : { 'repertoire' : 'projets' , 'prefixe' : 'pl' , 'suffixe' : '-ei' , } , 'ACIN' : { 'repertoire' : 'projets' , 'prefixe' : 'pl' , 'suffixe' : '-ai' , } , 'LETT' : { 'repertoire' : 'projets' , 'prefixe' : 'pl' , 'suffixe' : '-l' , } , 'PNRETVXINSTITEUROP' : { 'repertoire' : 'europe/resolutions' , 'prefixe' : 'ppe' , 'suffixe' : '' , } , 'PNRE' : { 'repertoire' : 'propositions' , 'prefixe' : 'pion' , 'suffixe' : '' , } , 'RION' : { 'repertoire' : '' , 'prefixe' : '' , 'suffixe' : '' , } , 'TCOM' : { 'repertoire' : 'ta-commission' , 'prefixe' : 'r' , 'suffixe' : '-a0' , } , 'TCOMMODREGLTAN' : { 'repertoire' : 'ta-commission' , 'prefixe' : 'r' , 'suffixe' : '-a0' , } , 'TCOMTVXINSTITEUROP' : { 'repertoire' : 'ta-commission' , 'prefixe' : 'r' , 'suffixe' : '-a0' , } , 'TCOMCOMENQ' : { 'repertoire' : 'ta-commission' , 'prefixe' : 'r' , 'suffixe' : '-a0' , } , 'TADO' : { 'repertoire' : 'ta' , 'prefixe' : 'ta' , 'suffixe' : '' , } , # NOT IN NATIONAL ASSEMBLY PHP CODE
'RAPP' : { 'repertoire' : 'rapports' , 'prefixe' : 'r' , 'suffixe' : '' , } , 'RINF' : { 'repertoire' : 'rapports' , 'prefixe' : 'r' , 'suffixe' : '' , } }
match = re . match ( r'(.{4})([ANS]*)(R[0-9])([LS]*)([0-9]*)([BTACP]*)(.*)' , identifiant )
leg = match . group ( 5 )
typeTa = match . group ( 6 )
num = match . group ( 7 )
if typeTa == 'BTC' :
type = 'TCOM'
elif typeTa == 'BTA' :
type = 'TADO'
else :
type = code
host = "http://www.assemblee-nationale.fr/"
if type not in datas : # ex : ALCNANR5L15B0002 ( allocution du président )
raise Exception ( 'Unknown document type for %s' % identifiant )
return host + leg + "/" + datas [ type ] [ 'repertoire' ] + "/" + datas [ type ] [ 'prefixe' ] + num + datas [ type ] [ 'suffixe' ] + ".asp"
|
def _get_processed_dataframe ( self , dataframe ) :
"""Generate required dataframe for results from raw dataframe
: param pandas . DataFrame dataframe : the raw dataframe
: return : a dict containing raw , compiled , and summary dataframes from original dataframe
: rtype : dict"""
|
dataframe . index = pd . to_datetime ( dataframe [ 'epoch' ] , unit = 's' , utc = True )
del dataframe [ 'epoch' ]
summary = dataframe . describe ( percentiles = [ .80 , .90 , .95 ] ) . transpose ( ) . loc [ 'scriptrun_time' ]
df_grp = dataframe . groupby ( pd . TimeGrouper ( '{}S' . format ( self . interval ) ) )
df_final = df_grp . apply ( lambda x : x . describe ( percentiles = [ .80 , .90 , .95 ] ) [ 'scriptrun_time' ] )
return { "raw" : dataframe . round ( 2 ) , "compiled" : df_final . round ( 2 ) , "summary" : summary . round ( 2 ) }
|
def client_pause ( self , timeout ) :
"""Stop processing commands from clients for * timeout * milliseconds .
: raises TypeError : if timeout is not int
: raises ValueError : if timeout is less than 0"""
|
if not isinstance ( timeout , int ) :
raise TypeError ( "timeout argument must be int" )
if timeout < 0 :
raise ValueError ( "timeout must be greater equal 0" )
fut = self . execute ( b'CLIENT' , b'PAUSE' , timeout )
return wait_ok ( fut )
|
def user_create ( name , password , email , tenant_id = None , enabled = True , profile = None , project_id = None , description = None , ** connection_args ) :
'''Create a user ( keystone user - create )
CLI Examples :
. . code - block : : bash
salt ' * ' keystone . user _ create name = jack password = zero email = jack @ halloweentown . org tenant _ id = a28a7b5a999a455f84b1f5210264375e enabled = True'''
|
kstone = auth ( profile , ** connection_args )
if _OS_IDENTITY_API_VERSION > 2 :
if tenant_id and not project_id :
project_id = tenant_id
item = kstone . users . create ( name = name , password = password , email = email , project_id = project_id , enabled = enabled , description = description )
else :
item = kstone . users . create ( name = name , password = password , email = email , tenant_id = tenant_id , enabled = enabled )
return user_get ( item . id , profile = profile , ** connection_args )
|
def get_voltage_delta_branch ( grid , tree , node , r_preceeding , x_preceeding ) :
"""Determine voltage for a preceeding branch ( edge ) of node
Parameters
grid : LVGridDing0
Ding0 grid object
tree : : networkx : ` NetworkX Graph Obj < > `
Tree of grid topology
node : graph node
Node to determine voltage level at
r _ preceeding : float
Resitance of preceeding grid
x _ preceeding : float
Reactance of preceeding grid
Return
: any : ` float `
Delta voltage for node"""
|
cos_phi_load = cfg_ding0 . get ( 'assumptions' , 'cos_phi_load' )
cos_phi_feedin = cfg_ding0 . get ( 'assumptions' , 'cos_phi_gen' )
v_nom = cfg_ding0 . get ( 'assumptions' , 'lv_nominal_voltage' )
omega = 2 * math . pi * 50
# add resitance / reactance to preceeding
in_edge = [ _ for _ in grid . graph_branches_from_node ( node ) if _ [ 0 ] in list ( tree . predecessors ( node ) ) ] [ 0 ] [ 1 ]
r = r_preceeding + ( in_edge [ 'branch' ] . type [ 'R' ] * in_edge [ 'branch' ] . length )
x = x_preceeding + ( in_edge [ 'branch' ] . type [ 'L' ] / 1e3 * omega * in_edge [ 'branch' ] . length )
# get apparent power for load and generation case
peak_load , gen_capacity = get_house_conn_gen_load ( tree , node )
s_max_load = peak_load / cos_phi_load
s_max_feedin = gen_capacity / cos_phi_feedin
# determine voltage increase / drop a node
voltage_delta_load = voltage_delta_vde ( v_nom , s_max_load , r , x , cos_phi_load )
voltage_delta_gen = voltage_delta_vde ( v_nom , s_max_feedin , r , - x , cos_phi_feedin )
return [ voltage_delta_load , voltage_delta_gen , r , x ]
|
def _search_for_function_hints ( self , successor_state ) :
"""Scan for constants that might be used as exit targets later , and add them into pending _ exits .
: param SimState successor _ state : A successing state .
: return : A list of discovered code addresses .
: rtype : list"""
|
function_hints = [ ]
for action in successor_state . history . recent_actions :
if action . type == 'reg' and action . offset == self . project . arch . ip_offset : # Skip all accesses to IP registers
continue
elif action . type == 'exit' : # only consider read / write actions
continue
# Enumerate actions
if isinstance ( action , SimActionData ) :
data = action . data
if data is not None : # TODO : Check if there is a proper way to tell whether this const falls in the range of code
# TODO : segments
# Now let ' s live with this big hack . . .
try :
const = successor_state . solver . eval_one ( data . ast )
except : # pylint : disable = bare - except
continue
if self . _is_address_executable ( const ) :
if self . _pending_function_hints is not None and const in self . _pending_function_hints :
continue
# target = const
# tpl = ( None , None , target )
# st = self . project . _ simos . prepare _ call _ state ( self . project . initial _ state ( mode = ' fastpath ' ) ,
# initial _ state = saved _ state )
# st = self . project . initial _ state ( mode = ' fastpath ' )
# exits [ tpl ] = ( st , None , None )
function_hints . append ( const )
l . debug ( 'Got %d possible exits, including: %s' , len ( function_hints ) , ", " . join ( [ "0x%x" % f for f in function_hints ] ) )
return function_hints
|
def prune ( self , keep_channels = True , * , verbose = True ) :
"""Remove unused variables and ( optionally ) channels from the Data object .
Unused variables are those that are not included in either axes or constants .
Unused channels are those not specified in keep _ channels , or the first channel .
Parameters
keep _ channels : boolean or int or str or tuple
If False , removes all but the first channel .
If int or str , removes all but that index / name channel .
If tuple , removes all channels except those in the tuple by index or name .
Default is True : do not delete channels
verbose : boolean
Toggle talkback . Default is True ."""
|
for v in self . variables :
for var in wt_kit . flatten_list ( [ ax . variables for ax in self . _axes + self . _constants ] ) :
if v == var :
break
else :
self . remove_variable ( v . natural_name , implied = False , verbose = verbose )
if keep_channels is not True :
try :
if isinstance ( keep_channels , str ) :
raise TypeError
indexes = tuple ( keep_channels )
except TypeError :
indexes = ( keep_channels , )
for i , ch in enumerate ( self . channels ) :
if i not in indexes and not ch . natural_name in indexes :
self . remove_channel ( ch . natural_name , verbose = verbose )
|
def handle_scoping_input ( self , continue_flag , cmd , text ) :
"""handles what to do with a scoping gesture"""
|
default_split = text . partition ( SELECT_SYMBOL [ 'scope' ] ) [ 2 ] . split ( )
cmd = cmd . replace ( SELECT_SYMBOL [ 'scope' ] , '' )
continue_flag = True
if not default_split :
self . default_command = ""
print ( 'unscoping all' , file = self . output )
return continue_flag , cmd
while default_split :
if not text :
value = ''
else :
value = default_split [ 0 ]
tree_path = self . default_command . split ( )
tree_path . append ( value )
if self . completer . command_tree . in_tree ( tree_path ) :
self . set_scope ( value )
print ( "defaulting: " + value , file = self . output )
cmd = cmd . replace ( SELECT_SYMBOL [ 'scope' ] , '' )
elif SELECT_SYMBOL [ 'unscope' ] == default_split [ 0 ] and self . default_command . split ( ) :
value = self . default_command . split ( ) [ - 1 ]
self . default_command = ' ' + ' ' . join ( self . default_command . split ( ) [ : - 1 ] )
if not self . default_command . strip ( ) :
self . default_command = self . default_command . strip ( )
print ( 'unscoping: ' + value , file = self . output )
elif SELECT_SYMBOL [ 'unscope' ] not in text :
print ( "Scope must be a valid command" , file = self . output )
default_split = default_split [ 1 : ]
return continue_flag , cmd
|
def check_encoding_chars ( encoding_chars ) :
"""Validate the given encoding chars
: type encoding _ chars : ` ` dict ` `
: param encoding _ chars : the encoding chars ( see : func : ` hl7apy . set _ default _ encoding _ chars ` )
: raises : : exc : ` hl7apy . exceptions . InvalidEncodingChars ` if the given encoding chars are not valid"""
|
if not isinstance ( encoding_chars , collections . MutableMapping ) :
raise InvalidEncodingChars
required = { 'FIELD' , 'COMPONENT' , 'SUBCOMPONENT' , 'REPETITION' , 'ESCAPE' }
missing = required - set ( encoding_chars . keys ( ) )
if missing :
raise InvalidEncodingChars ( 'Missing required encoding chars' )
values = [ v for k , v in encoding_chars . items ( ) if k in required ]
if len ( values ) > len ( set ( values ) ) :
raise InvalidEncodingChars ( 'Found duplicate encoding chars' )
|
def info ( self , name , args ) :
"""Interfaces with the info dumpers ( DBGFInfo ) .
This feature is not implemented in the 4.0.0 release but it may show up
in a dot release .
in name of type str
The name of the info item .
in args of type str
Arguments to the info dumper .
return info of type str
The into string ."""
|
if not isinstance ( name , basestring ) :
raise TypeError ( "name can only be an instance of type basestring" )
if not isinstance ( args , basestring ) :
raise TypeError ( "args can only be an instance of type basestring" )
info = self . _call ( "info" , in_p = [ name , args ] )
return info
|
def result ( self ) :
"""Get the table used for the results of the query . If the query is incomplete , this blocks .
Raises :
Exception if we timed out waiting for results or the query failed ."""
|
self . wait ( )
if self . failed :
raise Exception ( 'Query failed: %s' % str ( self . errors ) )
return self . _table
|
def probe_characteristics ( self , conn_id , handle , services ) :
"""Probe a device for all characteristics defined in its GATT table
This routine must be called after probe _ services and passed the services dictionary
produced by that method .
Args :
handle ( int ) : a handle to the connection on the BLED112 dongle
conn _ id ( int ) : a unique identifier for this connection on the DeviceManager
that owns this adapter .
services ( dict ) : A dictionary of GATT services produced by probe _ services ( )"""
|
self . _command_task . async_command ( [ '_probe_characteristics' , handle , services ] , self . _probe_characteristics_finished , { 'connection_id' : conn_id , 'handle' : handle , 'services' : services } )
|
def init_app ( self , app ) :
'''Initialize this Flask extension for given app .'''
|
self . app = app
if not hasattr ( app , 'extensions' ) :
app . extensions = { }
app . extensions [ 'plugin_manager' ] = self
self . reload ( )
|
def passengers ( self ) -> Set [ "PassengerUnit" ] :
"""Units inside a Bunker , CommandCenter , Nydus , Medivac , WarpPrism , Overlord"""
|
return { PassengerUnit ( unit , self . _game_data ) for unit in self . _proto . passengers }
|
def set_access_credentials ( self , scope , access_token , refresh_token = None , update_user = True ) :
"""Set the credentials used for OAuth2 authentication .
Calling this function will overwrite any currently existing access
credentials .
: param scope : A set of reddit scopes the tokens provide access to
: param access _ token : the access token of the authentication
: param refresh _ token : the refresh token of the authentication
: param update _ user : Whether or not to set the user attribute for
identity scopes"""
|
if isinstance ( scope , ( list , tuple ) ) :
scope = set ( scope )
elif isinstance ( scope , six . string_types ) :
scope = set ( scope . split ( ) )
if not isinstance ( scope , set ) :
raise TypeError ( '`scope` parameter must be a set' )
self . clear_authentication ( )
# Update authentication settings
self . _authentication = scope
self . access_token = access_token
self . refresh_token = refresh_token
# Update the user object
if update_user and ( 'identity' in scope or '*' in scope ) :
self . user = self . get_me ( )
|
def getMessage ( self ) :
"""Return the message for this LogRecord .
Return the message for this LogRecord after merging any user - supplied arguments with the message ."""
|
if isinstance ( self . msg , numpy . ndarray ) :
msg = self . array2string ( self . msg )
else :
msg = str ( self . msg )
if self . args :
a2s = self . array2string
if isinstance ( self . args , Dict ) :
args = { k : ( a2s ( v ) if isinstance ( v , numpy . ndarray ) else v ) for ( k , v ) in self . args . items ( ) }
elif isinstance ( self . args , Sequence ) :
args = tuple ( ( a2s ( a ) if isinstance ( a , numpy . ndarray ) else a ) for a in self . args )
else :
raise TypeError ( "Unexpected input '%s' with type '%s'" % ( self . args , type ( self . args ) ) )
msg = msg % args
return msg
|
def lc ( ** kwargs ) :
"""Create parameters for a new light curve dataset .
Generally , this will be used as an input to the kind argument in
: meth : ` phoebe . frontend . bundle . Bundle . add _ dataset `
: parameter * * kwargs : defaults for the values of any of the parameters
: return : a : class : ` phoebe . parameters . parameters . ParameterSet ` of all newly
created : class : ` phoebe . parameters . parameters . Parameter ` s"""
|
obs_params = [ ]
syn_params , constraints = lc_syn ( syn = False , ** kwargs )
obs_params += syn_params . to_list ( )
# obs _ params + = lc _ dep ( * * kwargs ) . to _ list ( )
# ~ obs _ params + = [ FloatArrayParameter ( qualifier = ' flag ' , value = kwargs . get ( ' flag ' , [ ] ) , default _ unit = None , description = ' Signal flag ' ) ]
# ~ obs _ params + = [ FloatArrayParameter ( qualifier = ' weight ' , value = kwargs . get ( ' weight ' , [ ] ) , default _ unit = None , description = ' Signal weight ' ) ]
# ~ obs _ params + = [ FloatParameter ( qualifier = ' timeoffset ' , value = kwargs . get ( ' timeoffset ' , 0.0 ) , default _ unit = u . d , description = ' Zeropoint date offset for observations ' ) ]
# ~ obs _ params + = [ FloatParameter ( qualifier = ' statweight ' , value = kwargs . get ( ' statweight ' , 0.0 ) , default _ unit = None , description = ' Statistical weight in overall fitting ' ) ]
return ParameterSet ( obs_params ) , constraints
|
def check_version_2 ( dataset ) :
"""Checks if json - stat version attribute exists and is equal or greater than 2.0 for a given dataset .
Args :
dataset ( OrderedDict ) : data in JSON - stat format , previously deserialized to a python object by json . load ( ) or json . loads ( ) ,
Returns :
bool : True if version exists and is equal or greater than 2.0 , False otherwise . For datasets without the version attribute , always return False ."""
|
if float ( dataset . get ( 'version' ) ) >= 2.0 if dataset . get ( 'version' ) else False :
return True
else :
return False
|
def _step4 ( self , word ) :
"""step4 ( ) takes off - ant , - ence etc . , in context < c > vcvc < v > ."""
|
if len ( word ) <= 1 : # Only possible at this stage given unusual inputs to stem _ word like ' oed '
return word
ch = word [ - 2 ]
if ch == 'a' :
if word . endswith ( "al" ) :
return word [ : - 2 ] if self . _m ( word , len ( word ) - 3 ) > 1 else word
else :
return word
elif ch == 'c' :
if word . endswith ( "ance" ) :
return word [ : - 4 ] if self . _m ( word , len ( word ) - 5 ) > 1 else word
elif word . endswith ( "ence" ) :
return word [ : - 4 ] if self . _m ( word , len ( word ) - 5 ) > 1 else word
else :
return word
elif ch == 'e' :
if word . endswith ( "er" ) :
return word [ : - 2 ] if self . _m ( word , len ( word ) - 3 ) > 1 else word
else :
return word
elif ch == 'i' :
if word . endswith ( "ic" ) :
return word [ : - 2 ] if self . _m ( word , len ( word ) - 3 ) > 1 else word
else :
return word
elif ch == 'l' :
if word . endswith ( "able" ) :
return word [ : - 4 ] if self . _m ( word , len ( word ) - 5 ) > 1 else word
elif word . endswith ( "ible" ) :
return word [ : - 4 ] if self . _m ( word , len ( word ) - 5 ) > 1 else word
else :
return word
elif ch == 'n' :
if word . endswith ( "ant" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
elif word . endswith ( "ement" ) :
return word [ : - 5 ] if self . _m ( word , len ( word ) - 6 ) > 1 else word
elif word . endswith ( "ment" ) :
return word [ : - 4 ] if self . _m ( word , len ( word ) - 5 ) > 1 else word
elif word . endswith ( "ent" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
elif ch == 'o' :
if word . endswith ( "sion" ) or word . endswith ( "tion" ) : # slightly different logic to all the other cases
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
elif word . endswith ( "ou" ) :
return word [ : - 2 ] if self . _m ( word , len ( word ) - 3 ) > 1 else word
else :
return word
elif ch == 's' :
if word . endswith ( "ism" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
elif ch == 't' :
if word . endswith ( "ate" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
elif word . endswith ( "iti" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
elif ch == 'u' :
if word . endswith ( "ous" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
elif ch == 'v' :
if word . endswith ( "ive" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
elif ch == 'z' :
if word . endswith ( "ize" ) :
return word [ : - 3 ] if self . _m ( word , len ( word ) - 4 ) > 1 else word
else :
return word
else :
return word
|
def set_color_hsv ( self , hue , saturation , value ) :
"""Turn the bulb on with the given values as HSV ."""
|
try :
data = "action=on&color={};{};{}" . format ( hue , saturation , value )
request = requests . post ( '{}/{}/{}' . format ( self . resource , URI , self . _mac ) , data = data , timeout = self . timeout )
if request . status_code == 200 :
self . data [ 'on' ] = True
except requests . exceptions . ConnectionError :
raise exceptions . MyStromConnectionError ( )
|
def candidate ( self , cand_func , args = None , kwargs = None , name = 'Candidate' , context = None ) :
'''Adds a candidate function to an experiment . Can be used multiple times for
multiple candidates .
: param callable cand _ func : your control function
: param iterable args : positional arguments to pass to your function
: param dict kwargs : keyword arguments to pass to your function
: param string name : a name for your observation
: param dict context : observation - specific context'''
|
self . _candidates . append ( { 'func' : cand_func , 'args' : args or [ ] , 'kwargs' : kwargs or { } , 'name' : name , 'context' : context or { } , } )
|
def _get_args ( cls , args ) : # type : ( tuple ) - > Tuple [ type , slice , Callable ]
"""Return the parameters necessary to check type boundaries .
Args :
args : A slice representing the minimum and maximum lengths allowed
for values of that string .
Returns :
A tuple with three parameters : a type , a slice , and the len
function ."""
|
if isinstance ( args , tuple ) :
raise TypeError ( "{}[...] takes exactly one argument." . format ( cls . __name__ ) )
return super ( _StringMeta , cls ) . _get_args ( ( _STR_TYPE , args ) )
|
def build_model ( self ) :
'''Find out the type of model configured and dispatch the request to the appropriate method'''
|
if self . model_config [ 'model-type' ] :
self . model = self . build_fred ( )
elif self . model_config [ 'model-type' ] :
self . model = self . buidl_hred ( )
else :
raise Error ( "Unrecognized model type '{}'" . format ( self . model_config [ 'model-type' ] ) )
|
def chown ( self , paths , owner , recurse = False ) :
'''Change the owner for paths . The owner can be specified as ` user ` or ` user : group `
: param paths : List of paths to chmod
: type paths : list
: param owner : New owner
: type owner : string
: param recurse : Recursive chown
: type recurse : boolean
: returns : a generator that yields dictionaries
This always include the toplevel when recursing .'''
|
if not isinstance ( paths , list ) :
raise InvalidInputException ( "Paths should be a list" )
if not paths :
raise InvalidInputException ( "chown: no path given" )
if not owner :
raise InvalidInputException ( "chown: no owner given" )
processor = lambda path , node , owner = owner : self . _handle_chown ( path , node , owner )
for item in self . _find_items ( paths , processor , include_toplevel = True , include_children = False , recurse = recurse ) :
if item :
yield item
|
def post ( self , request , * args , ** kwargs ) :
"""Handler for HTTP POST requests ."""
|
context = self . get_context_data ( ** kwargs )
workflow = context [ self . context_object_name ]
try : # Check for the VALIDATE _ STEP * headers , if they are present
# and valid integers , return validation results as JSON ,
# otherwise proceed normally .
validate_step_start = int ( self . request . META . get ( 'HTTP_X_HORIZON_VALIDATE_STEP_START' , '' ) )
validate_step_end = int ( self . request . META . get ( 'HTTP_X_HORIZON_VALIDATE_STEP_END' , '' ) )
except ValueError : # No VALIDATE _ STEP * headers , or invalid values . Just proceed
# with normal workflow handling for POSTs .
pass
else : # There are valid VALIDATE _ STEP * headers , so only do validation
# for the specified steps and return results .
data = self . validate_steps ( request , workflow , validate_step_start , validate_step_end )
return http . HttpResponse ( json . dumps ( data ) , content_type = "application/json" )
if not workflow . is_valid ( ) :
return self . render_to_response ( context )
try :
success = workflow . finalize ( )
except forms . ValidationError :
return self . render_to_response ( context )
except Exception :
success = False
exceptions . handle ( request )
if success :
msg = workflow . format_status_message ( workflow . success_message )
messages . success ( request , msg )
else :
msg = workflow . format_status_message ( workflow . failure_message )
messages . error ( request , msg )
if "HTTP_X_HORIZON_ADD_TO_FIELD" in self . request . META :
field_id = self . request . META [ "HTTP_X_HORIZON_ADD_TO_FIELD" ]
response = http . HttpResponse ( )
if workflow . object :
data = [ self . get_object_id ( workflow . object ) , self . get_object_display ( workflow . object ) ]
response . content = json . dumps ( data )
response [ "X-Horizon-Add-To-Field" ] = field_id
return response
next_url = self . request . POST . get ( workflow . redirect_param_name )
return shortcuts . redirect ( next_url or workflow . get_success_url ( ) )
|
def append_result ( self , results , num_matches ) :
"""Real - time update of search results"""
|
filename , lineno , colno , match_end , line = results
if filename not in self . files :
file_item = FileMatchItem ( self , filename , self . sorting , self . text_color )
file_item . setExpanded ( True )
self . files [ filename ] = file_item
self . num_files += 1
search_text = self . search_text
title = "'%s' - " % search_text
nb_files = self . num_files
if nb_files == 0 :
text = _ ( 'String not found' )
else :
text_matches = _ ( 'matches in' )
text_files = _ ( 'file' )
if nb_files > 1 :
text_files += 's'
text = "%d %s %d %s" % ( num_matches , text_matches , nb_files , text_files )
self . set_title ( title + text )
file_item = self . files [ filename ]
line = self . truncate_result ( line , colno , match_end )
item = LineMatchItem ( file_item , lineno , colno , line , self . text_color )
self . data [ id ( item ) ] = ( filename , lineno , colno )
|
def save_state_recursively ( state , base_path , parent_path , as_copy = False ) :
"""Recursively saves a state to a json file
It calls this method on all its substates .
: param state : State to be stored
: param base _ path : Path to the state machine
: param parent _ path : Path to the parent state
: param bool as _ copy : Temporary storage flag to signal that the given path is not the new file _ system _ path
: return :"""
|
from rafcon . core . states . execution_state import ExecutionState
from rafcon . core . states . container_state import ContainerState
state_path = os . path . join ( parent_path , get_storage_id_for_state ( state ) )
state_path_full = os . path . join ( base_path , state_path )
if not os . path . exists ( state_path_full ) :
os . makedirs ( state_path_full )
storage_utils . write_dict_to_json ( state , os . path . join ( state_path_full , FILE_NAME_CORE_DATA ) )
if not as_copy :
state . file_system_path = state_path_full
if isinstance ( state , ExecutionState ) :
save_script_file_for_state_and_source_path ( state , state_path_full , as_copy )
save_semantic_data_for_state ( state , state_path_full )
# create yaml files for all children
if isinstance ( state , ContainerState ) :
remove_obsolete_folders ( state . states . values ( ) , os . path . join ( base_path , state_path ) )
for state in state . states . values ( ) :
save_state_recursively ( state , base_path , state_path , as_copy )
|
def handle_userinfo_request ( self , request = None , http_headers = None ) : # type : ( Optional [ str ] , Optional [ Mapping [ str , str ] ] ) - > oic . oic . message . OpenIDSchema
"""Handles a userinfo request .
: param request : urlencoded request ( either query string or POST body )
: param http _ headers : http headers"""
|
if http_headers is None :
http_headers = { }
userinfo_request = dict ( parse_qsl ( request ) )
bearer_token = extract_bearer_token_from_http_request ( userinfo_request , http_headers . get ( 'Authorization' ) )
introspection = self . authz_state . introspect_access_token ( bearer_token )
if not introspection [ 'active' ] :
raise InvalidAccessToken ( 'The access token has expired' )
scopes = introspection [ 'scope' ] . split ( )
user_id = self . authz_state . get_user_id_for_subject_identifier ( introspection [ 'sub' ] )
requested_claims = scope2claims ( scopes , extra_scope_dict = self . extra_scopes )
authentication_request = self . authz_state . get_authorization_request_for_access_token ( bearer_token )
requested_claims . update ( self . _get_requested_claims_in ( authentication_request , 'userinfo' ) )
user_claims = self . userinfo . get_claims_for ( user_id , requested_claims )
user_claims . setdefault ( 'sub' , introspection [ 'sub' ] )
response = OpenIDSchema ( ** user_claims )
logger . debug ( 'userinfo=%s from requested_claims=%s userinfo=%s' , response , requested_claims , user_claims )
return response
|
def find_package_docs ( package_dir , skippedNames = None ) :
"""Find documentation directories in a package using ` ` manifest . yaml ` ` .
Parameters
package _ dir : ` str `
Directory of an EUPS package .
skippedNames : ` list ` of ` str ` , optional
List of package or module names to skip when creating links .
Returns
doc _ dirs : namedtuple
Attributes of the namedtuple are :
- ` ` package _ dirs ` ` ( ` dict ` ) . Keys are package names ( for example ,
` ` ' afw ' ` ` ) . Values are absolute directory paths to the package ' s
documentation directory inside the package ' s ` ` doc ` ` directory . If
there is no package - level documentation the dictionary will be empty .
- ` ` modules _ dirs ` ` ( ` dict ` ) . Keys are module names ( for example ,
` ` ' lsst . afw . table ' ` ` ) . Values are absolute directory paths to the
module ' s directory inside the package ' s ` ` doc ` ` directory . If a
package has no modules the returned dictionary will be empty .
- ` ` static _ doc _ dirs ` ` ( ` dict ` ) . Keys are directory names relative to
the ` ` _ static ` ` directory . Values are absolute directory paths to
the static documentation directory in the package . If there
isn ' t a declared ` ` _ static ` ` directory , this dictionary is empty .
Raises
NoPackageDocs
Raised when the ` ` manifest . yaml ` ` file cannot be found in a package .
Notes
Stack packages have documentation in subdirectories of their ` doc `
directory . The ` ` manifest . yaml ` ` file declares what these directories are
so that they can be symlinked into the root project .
There are three types of documentation directories :
1 . Package doc directories contain documentation for the EUPS package
aspect . This is optional .
2 . Module doc directories contain documentation for a Python package
aspect . These are optional .
3 . Static doc directories are root directories inside the package ' s
` ` doc / _ static / ` ` directory . These are optional .
These are declared in a package ' s ` ` doc / manifest . yaml ` ` file . For example :
. . code - block : : yaml
package : " afw "
modules :
- " lsst . afw . image "
- " lsst . afw . geom "
statics :
- " _ static / afw "
This YAML declares * module * documentation directories :
- ` ` afw / doc / lsst . afw . image / ` `
- ` ` afw / doc / lsst . afw . geom / ` `
It also declares a * package * documentation directory :
- ` ` afw / doc / afw ` `
And a static documentaton directory :
- ` ` afw / doc / _ static / afw ` `"""
|
logger = logging . getLogger ( __name__ )
if skippedNames is None :
skippedNames = [ ]
doc_dir = os . path . join ( package_dir , 'doc' )
modules_yaml_path = os . path . join ( doc_dir , 'manifest.yaml' )
if not os . path . exists ( modules_yaml_path ) :
raise NoPackageDocs ( 'Manifest YAML not found: {0}' . format ( modules_yaml_path ) )
with open ( modules_yaml_path ) as f :
manifest_data = yaml . safe_load ( f )
module_dirs = { }
package_dirs = { }
static_dirs = { }
if 'modules' in manifest_data :
for module_name in manifest_data [ 'modules' ] :
if module_name in skippedNames :
logger . debug ( 'Skipping module {0}' . format ( module_name ) )
continue
module_dir = os . path . join ( doc_dir , module_name )
# validate that the module ' s documentation directory does exist
if not os . path . isdir ( module_dir ) :
message = 'module doc dir not found: {0}' . format ( module_dir )
logger . warning ( message )
continue
module_dirs [ module_name ] = module_dir
logger . debug ( 'Found module doc dir {0}' . format ( module_dir ) )
if 'package' in manifest_data :
package_name = manifest_data [ 'package' ]
full_package_dir = os . path . join ( doc_dir , package_name )
# validate the directory exists
if os . path . isdir ( full_package_dir ) and package_name not in skippedNames :
package_dirs [ package_name ] = full_package_dir
logger . debug ( 'Found package doc dir {0}' . format ( full_package_dir ) )
else :
logger . warning ( 'package doc dir excluded or not found: {0}' . format ( full_package_dir ) )
if 'statics' in manifest_data :
for static_dirname in manifest_data [ 'statics' ] :
full_static_dir = os . path . join ( doc_dir , static_dirname )
# validate the directory exists
if not os . path . isdir ( full_static_dir ) :
message = '_static doc dir not found: {0}' . format ( full_static_dir )
logger . warning ( message )
continue
# Make a relative path to ` _ static ` that ' s used as the
# link source in the root docproject ' s _ static / directory
relative_static_dir = os . path . relpath ( full_static_dir , os . path . join ( doc_dir , '_static' ) )
static_dirs [ relative_static_dir ] = full_static_dir
logger . debug ( 'Found _static doc dir: {0}' . format ( full_static_dir ) )
Dirs = namedtuple ( 'Dirs' , [ 'module_dirs' , 'package_dirs' , 'static_dirs' ] )
return Dirs ( module_dirs = module_dirs , package_dirs = package_dirs , static_dirs = static_dirs )
|
def _safe_read ( path , length ) :
"""Read file contents ."""
|
if not os . path . exists ( os . path . join ( HERE , path ) ) :
return ''
file_handle = codecs . open ( os . path . join ( HERE , path ) , encoding = 'utf-8' )
contents = file_handle . read ( length )
file_handle . close ( )
return contents
|
def GetParametro ( self , clave , clave1 = None , clave2 = None , clave3 = None , clave4 = None ) :
"Devuelve un parámetro de salida ( establecido por llamada anterior )"
|
# útil para parámetros de salida ( por ej . campos de TransaccionPlainWS )
valor = self . params_out . get ( clave )
# busco datos " anidados " ( listas / diccionarios )
for clave in ( clave1 , clave2 , clave3 , clave4 ) :
if clave is not None and valor is not None :
if isinstance ( clave1 , basestring ) and clave . isdigit ( ) :
clave = int ( clave )
try :
valor = valor [ clave ]
except ( KeyError , IndexError ) :
valor = None
if valor is not None :
if isinstance ( valor , basestring ) :
return valor
else :
return str ( valor )
else :
return ""
|
def dump ( self , file : TextIO , ** kwargs ) -> None :
"""Dump this protocol to a file in JSON ."""
|
return json . dump ( self . to_json ( ) , file , ** kwargs )
|
def remove_object_from_list ( self , obj , list_element ) :
"""Remove an object from a list element .
Args :
obj : Accepts JSSObjects , id ' s , and names
list _ element : Accepts an Element or a string path to that
element"""
|
list_element = self . _handle_location ( list_element )
if isinstance ( obj , JSSObject ) :
results = [ item for item in list_element . getchildren ( ) if item . findtext ( "id" ) == obj . id ]
elif isinstance ( obj , ( int , basestring ) ) :
results = [ item for item in list_element . getchildren ( ) if item . findtext ( "id" ) == str ( obj ) or item . findtext ( "name" ) == obj ]
if len ( results ) == 1 :
list_element . remove ( results [ 0 ] )
elif len ( results ) > 1 :
raise ValueError ( "There is more than one matching object at that " "path!" )
|
def rgb_to_hexa ( * args ) :
"""Convert RGB ( A ) color to hexadecimal ."""
|
if len ( args ) == 3 :
return ( "#%2.2x%2.2x%2.2x" % tuple ( args ) ) . upper ( )
elif len ( args ) == 4 :
return ( "#%2.2x%2.2x%2.2x%2.2x" % tuple ( args ) ) . upper ( )
else :
raise ValueError ( "Wrong number of arguments." )
|
def _get_result_paths ( self , data ) :
"""Set the result paths"""
|
result = { }
result [ 'Output' ] = ResultPath ( Path = self . Parameters [ '--output' ] . Value , IsWritten = self . Parameters [ '--output' ] . isOn ( ) )
result [ 'ClusterFile' ] = ResultPath ( Path = self . Parameters [ '--uc' ] . Value , IsWritten = self . Parameters [ '--uc' ] . isOn ( ) )
# uchime 3 - way global alignments
result [ 'Output_aln' ] = ResultPath ( Path = self . Parameters [ '--uchimealns' ] . Value , IsWritten = self . Parameters [ '--uchimealns' ] . isOn ( ) )
# uchime tab - separated format
result [ 'Output_tabular' ] = ResultPath ( Path = self . Parameters [ '--uchimeout' ] . Value , IsWritten = self . Parameters [ '--uchimeout' ] . isOn ( ) )
# chimeras fasta file output
result [ 'Output_chimeras' ] = ResultPath ( Path = self . Parameters [ '--chimeras' ] . Value , IsWritten = self . Parameters [ '--chimeras' ] . isOn ( ) )
# nonchimeras fasta file output
result [ 'Output_nonchimeras' ] = ResultPath ( Path = self . Parameters [ '--nonchimeras' ] . Value , IsWritten = self . Parameters [ '--nonchimeras' ] . isOn ( ) )
# log file
result [ 'LogFile' ] = ResultPath ( Path = self . Parameters [ '--log' ] . Value , IsWritten = self . Parameters [ '--log' ] . isOn ( ) )
return result
|
def get_slice_bound ( self , label , side , kind ) :
"""Calculate slice bound that corresponds to given label .
Returns leftmost ( one - past - the - rightmost if ` ` side = = ' right ' ` ` ) position
of given label .
Parameters
label : object
side : { ' left ' , ' right ' }
kind : { ' ix ' , ' loc ' , ' getitem ' }"""
|
assert kind in [ 'ix' , 'loc' , 'getitem' , None ]
if side not in ( 'left' , 'right' ) :
raise ValueError ( "Invalid value for side kwarg," " must be either 'left' or 'right': %s" % ( side , ) )
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution .
label = self . _maybe_cast_slice_bound ( label , side , kind )
# we need to look up the label
try :
slc = self . _get_loc_only_exact_matches ( label )
except KeyError as err :
try :
return self . _searchsorted_monotonic ( label , side )
except ValueError : # raise the original KeyError
raise err
if isinstance ( slc , np . ndarray ) : # get _ loc may return a boolean array or an array of indices , which
# is OK as long as they are representable by a slice .
if is_bool_dtype ( slc ) :
slc = lib . maybe_booleans_to_slice ( slc . view ( 'u1' ) )
else :
slc = lib . maybe_indices_to_slice ( slc . astype ( 'i8' ) , len ( self ) )
if isinstance ( slc , np . ndarray ) :
raise KeyError ( "Cannot get %s slice bound for non-unique " "label: %r" % ( side , original_label ) )
if isinstance ( slc , slice ) :
if side == 'left' :
return slc . start
else :
return slc . stop
else :
if side == 'right' :
return slc + 1
else :
return slc
|
async def prover_get_credentials_for_proof_req ( wallet_handle : int , proof_request_json : str ) -> str :
"""Gets human readable credentials matching the given proof request .
NOTE : This method is deprecated because immediately returns all fetched credentials .
Use < prover _ search _ credentials _ for _ proof _ req > to fetch records by small batches .
: param wallet _ handle : wallet handler ( created by open _ wallet ) .
: param proof _ request _ json : proof request json
" name " : string ,
" version " : string ,
" nonce " : string ,
" requested _ attributes " : { / / set of requested attributes
" < attr _ referent > " : < attr _ info > , / / see below
" requested _ predicates " : { / / set of requested predicates
" < predicate _ referent > " : < predicate _ info > , / / see below
" non _ revoked " : Optional < < non _ revoc _ interval > > , / / see below ,
/ / If specified prover must proof non - revocation
/ / for date in this interval for each attribute
/ / ( can be overridden on attribute level )
where :
attr _ referent : Proof - request local identifier of requested attribute
attr _ info : Describes requested attribute
" name " : string , / / attribute name , ( case insensitive and ignore spaces )
" restrictions " : Optional < [ < filter _ json > ] > , / / see above
/ / if specified , credential must satisfy to one of the given restriction .
" non _ revoked " : Optional < < non _ revoc _ interval > > , / / see below ,
/ / If specified prover must proof non - revocation
/ / for date in this interval this attribute
/ / ( overrides proof level interval )
predicate _ referent : Proof - request local identifier of requested attribute predicate
predicate _ info : Describes requested attribute predicate
" name " : attribute name , ( case insensitive and ignore spaces )
" p _ type " : predicate type ( Currently > = only )
" p _ value " : predicate value
" restrictions " : Optional < [ < filter _ json > ] > , / / see above
/ / if specified , credential must satisfy to one of the given restriction .
" non _ revoked " : Optional < < non _ revoc _ interval > > , / / see below ,
/ / If specified prover must proof non - revocation
/ / for date in this interval this attribute
/ / ( overrides proof level interval )
non _ revoc _ interval : Defines non - revocation interval
" from " : Optional < int > , / / timestamp of interval beginning
" to " : Optional < int > , / / timestamp of interval ending
: return : json with credentials for the given proof request .
" requested _ attrs " : {
" < attr _ referent > " : [ { cred _ info : < credential _ info > , interval : Optional < non _ revoc _ interval > } ] ,
" requested _ predicates " : {
" requested _ predicates " : [ { cred _ info : < credential _ info > , timestamp : Optional < integer > } , { cred _ info : < credential _ 2 _ info > , timestamp : Optional < integer > } ] ,
" requested _ predicate _ 2 _ referent " : [ { cred _ info : < credential _ 2 _ info > , timestamp : Optional < integer > } ]
} , where credential is
" referent " : < string > ,
" attrs " : [ { " attr _ name " : " attr _ raw _ value " } ] ,
" schema _ id " : string ,
" cred _ def _ id " : string ,
" rev _ reg _ id " : Optional < int > ,
" cred _ rev _ id " : Optional < int > ,"""
|
logger = logging . getLogger ( __name__ )
logger . debug ( "prover_get_credentials_for_proof_req: >>> wallet_handle: %r, proof_request_json: %r" , wallet_handle , proof_request_json )
if not hasattr ( prover_get_credentials_for_proof_req , "cb" ) :
logger . debug ( "prover_get_credentials_for_proof_req: Creating callback" )
prover_get_credentials_for_proof_req . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p ) )
c_wallet_handle = c_int32 ( wallet_handle )
c_proof_request_json = c_char_p ( proof_request_json . encode ( 'utf-8' ) )
credentials_json = await do_call ( 'indy_prover_get_credentials_for_proof_req' , c_wallet_handle , c_proof_request_json , prover_get_credentials_for_proof_req . cb )
res = credentials_json . decode ( )
logger . debug ( "prover_get_credentials_for_proof_req: <<< res: %r" , res )
return res
|
def split ( name , x , reverse = False , eps = None , eps_std = None , cond_latents = None , hparams = None , state = None , condition = False , temperature = 1.0 ) :
"""Splits / concatenates x into x1 and x2 across number of channels .
For the forward pass , x2 is assumed be gaussian ,
i . e P ( x2 | x1 ) ~ N ( mu , sigma ) where mu and sigma are the outputs of
a network conditioned on x1 and optionally on cond _ latents .
For the reverse pass , x2 is determined from mu ( x1 ) and sigma ( x1 ) .
This is deterministic / stochastic depending on whether eps is provided .
Args :
name : variable scope .
x : 4 - D Tensor , shape ( NHWC ) .
reverse : Forward or reverse pass .
eps : If eps is provided , x2 is set to be mu ( x1 ) + eps * sigma ( x1 ) .
eps _ std : Sample x2 with the provided eps _ std .
cond _ latents : optionally condition x2 on cond _ latents .
hparams : next _ frame _ glow hparams .
state : tf . nn . rnn _ cell . LSTMStateTuple . . Current state of the LSTM over z _ 2.
Used only when hparams . latent _ dist _ encoder = = " conv _ lstm "
condition : bool , Whether or not to condition the distribution on
cond _ latents .
temperature : Temperature with which to sample from the gaussian .
Returns :
If reverse :
x : 4 - D Tensor , concats input and x2 across channels .
x2 : 4 - D Tensor , a sample from N ( mu ( x1 ) , sigma ( x1 ) )
Else :
x1 : 4 - D Tensor , Output of the split operation .
logpb : log - probability of x2 belonging to mu ( x1 ) , sigma ( x1)
eps : 4 - D Tensor , ( x2 - mu ( x1 ) ) / sigma ( x1)
x2 : 4 - D Tensor , Latent representation at the current level .
state : Current LSTM state .
4 - D Tensor , only if hparams . latent _ dist _ encoder is set to conv _ lstm .
Raises :
ValueError : If latent is provided and shape is not equal to NHW ( C / 2)
where ( NHWC ) is the size of x ."""
|
# TODO ( mechcoder ) Change the return type to be a dict .
with tf . variable_scope ( name , reuse = tf . AUTO_REUSE ) :
if not reverse :
x1 , x2 = tf . split ( x , num_or_size_splits = 2 , axis = - 1 )
# objective : P ( x2 | x1 ) ~ N ( x2 ; NN ( x1 ) )
prior_dist , state = compute_prior ( "prior_on_z2" , x1 , cond_latents , hparams , condition , state = state )
logpb = tf . reduce_sum ( prior_dist . log_prob ( x2 ) , axis = [ 1 , 2 , 3 ] )
eps = get_eps ( prior_dist , x2 )
return x1 , logpb , eps , x2 , state
else :
prior_dist , state = compute_prior ( "prior_on_z2" , x , cond_latents , hparams , condition , state = state , temperature = temperature )
if eps is not None :
x2 = set_eps ( prior_dist , eps )
elif eps_std is not None :
x2 = eps_std * tf . random_normal ( common_layers . shape_list ( x ) )
else :
x2 = prior_dist . sample ( )
return tf . concat ( [ x , x2 ] , 3 ) , x2 , state
|
def add_fields ( self , log_record , record , message_dict ) :
"""add _ fields
: param log _ record : log record
: param record : log message
: param message _ dict : message dict"""
|
super ( SplunkFormatter , self ) . add_fields ( log_record , record , message_dict )
if not log_record . get ( 'timestamp' ) :
now = datetime . datetime . utcnow ( ) . strftime ( '%Y-%m-%dT%H:%M:%S.%fZ' )
log_record [ 'timestamp' ] = now
|
def acquire ( self , block = True ) :
"""Acquire lock . Blocks until acquired if ` block ` is ` True ` , otherwise returns ` False ` if the lock could not be acquired ."""
|
while True : # Try to set the lock
if self . redis . set ( self . name , self . value , px = self . timeout , nx = True ) : # It ' s ours until the timeout now
return True
# Lock is taken
if not block :
return False
# If blocking , try again in a bit
time . sleep ( self . sleep )
|
def assemble_rom_code ( self , asm ) :
"""assemble the given code and program the ROM"""
|
stream = StringIO ( asm )
worker = assembler . Assembler ( self . processor , stream )
try :
result = worker . assemble ( )
except BaseException as e :
return e , None
self . rom . program ( result )
return None , result
|
def deallocate ( self ) :
"""Releases all resources ."""
|
if self . _domain is not None :
domain_delete ( self . _domain , self . logger )
if self . _network is not None :
self . _network_delete ( )
if self . _storage_pool is not None :
self . _storage_pool_delete ( )
if self . _hypervisor is not None :
self . _hypervisor_delete ( )
|
def key ( self , frame ) :
"Return the sort key for the given frame ."
|
def keytuple ( primary ) :
if frame . frameno is None :
return ( primary , 1 )
return ( primary , 0 , frame . frameno )
# Look up frame by exact match
if type ( frame ) in self . frame_keys :
return keytuple ( self . frame_keys [ type ( frame ) ] )
# Look up parent frame for v2.2 frames
if frame . _in_version ( 2 ) and type ( frame ) . __bases__ [ 0 ] in self . frame_keys :
return keytuple ( self . frame_keys [ type ( frame ) . __bases__ [ 0 ] ] )
# Try each pattern
for ( pattern , key ) in self . re_keys :
if re . match ( pattern , frame . frameid ) :
return keytuple ( key )
return keytuple ( self . unknown_key )
|
def get_wfdb_plot_items ( record , annotation , plot_sym ) :
"""Get items to plot from wfdb objects"""
|
# Get record attributes
if record :
if record . p_signal is not None :
signal = record . p_signal
elif record . d_signal is not None :
signal = record . d_signal
else :
raise ValueError ( 'The record has no signal to plot' )
fs = record . fs
sig_name = record . sig_name
sig_units = record . units
record_name = 'Record: %s' % record . record_name
ylabel = [ '/' . join ( pair ) for pair in zip ( sig_name , sig_units ) ]
else :
signal = fs = ylabel = record_name = None
# Get annotation attributes
if annotation : # Get channels
ann_chans = set ( annotation . chan )
n_ann_chans = max ( ann_chans ) + 1
# Indices for each channel
chan_inds = n_ann_chans * [ np . empty ( 0 , dtype = 'int' ) ]
for chan in ann_chans :
chan_inds [ chan ] = np . where ( annotation . chan == chan ) [ 0 ]
ann_samp = [ annotation . sample [ ci ] for ci in chan_inds ]
if plot_sym :
ann_sym = n_ann_chans * [ None ]
for ch in ann_chans :
ann_sym [ ch ] = [ annotation . symbol [ ci ] for ci in chan_inds [ ch ] ]
else :
ann_sym = None
# Try to get fs from annotation if not already in record
if fs is None :
fs = annotation . fs
record_name = record_name or annotation . record_name
else :
ann_samp = None
ann_sym = None
# Cleaning : remove empty channels and set labels and styles .
# Wrangle together the signal and annotation channels if necessary
if record and annotation : # There may be instances in which the annotation ` chan `
# attribute has non - overlapping channels with the signal .
# In this case , omit empty middle channels . This function should
# already process labels and arrangements before passing into
# ` plot _ items `
sig_chans = set ( range ( signal . shape [ 1 ] ) )
all_chans = sorted ( sig_chans . union ( ann_chans ) )
# Need to update ylabels and annotation values
if sig_chans != all_chans :
compact_ann_samp = [ ]
if plot_sym :
compact_ann_sym = [ ]
else :
compact_ann_sym = None
ylabel = [ ]
for ch in all_chans : # ie . 0 , 1 , 9
if ch in ann_chans :
compact_ann_samp . append ( ann_samp [ ch ] )
if plot_sym :
compact_ann_sym . append ( ann_sym [ ch ] )
if ch in sig_chans :
ylabel . append ( '' . join ( [ sig_name [ ch ] , sig_units [ ch ] ] ) )
else :
ylabel . append ( 'ch_%d/NU' % ch )
ann_samp = compact_ann_samp
ann_sym = compact_ann_sym
# Signals encompass annotations
else :
ylabel = [ '/' . join ( pair ) for pair in zip ( sig_name , sig_units ) ]
# Remove any empty middle channels from annotations
elif annotation :
ann_samp = [ a for a in ann_samp if a . size ]
if ann_sym is not None :
ann_sym = [ a for a in ann_sym if a ]
ylabel = [ 'ch_%d/NU' % ch for ch in ann_chans ]
return signal , ann_samp , ann_sym , fs , ylabel , record_name
|
def _set_interface_fe ( self , v , load = False ) :
"""Setter method for interface _ fe , mapped from YANG variable / rule / command / interface _ fe ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ interface _ fe is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ interface _ fe ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = interface_fe . interface_fe , is_container = 'container' , presence = False , yang_name = "interface-fe" , rest_name = "" , parent = self , choice = ( u'cmdlist' , u'interface-t' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-aaa' , defining_module = 'brocade-aaa' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """interface_fe must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_fe.interface_fe, is_container='container', presence=False, yang_name="interface-fe", rest_name="", parent=self, choice=(u'cmdlist', u'interface-t'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""" , } )
self . __interface_fe = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def _handle_successor ( self , job , successor , all_successors ) :
"""Process each successor generated by the job , and return a new list of succeeding jobs .
: param VFGJob job : The VFGJob instance .
: param SimState successor : The succeeding state .
: param list all _ successors : A list of all successors .
: return : A list of newly created jobs from the successor .
: rtype : list"""
|
# Initialize parameters
addr = job . addr
jumpkind = successor . history . jumpkind
# Get instruction pointer
if job . is_return_jump :
ret_target = job . call_stack . current_return_target
if ret_target is None : # We have no where to go according to our call stack . However , the callstack might be corrupted
l . debug ( "According to the call stack, we have nowhere to return to." )
return [ ]
successor . ip = ret_target
# this try - except block is to handle cases where the instruction pointer is symbolic
try :
successor_addrs = successor . solver . eval_upto ( successor . ip , 2 )
except SimValueError : # TODO : Should fall back to reading targets from CFG
# It cannot be concretized currently . Maybe we could handle
# it later , maybe it just cannot be concretized
return [ ]
if len ( successor_addrs ) > 1 : # multiple concrete targets
if job . is_return_jump : # It might be caused by state merging
# We may retrieve the correct ip from call stack
successor . ip = job . call_stack . current_return_target
else :
return self . _handle_successor_multitargets ( job , successor , all_successors )
# Now there should be one single target for the successor
successor_addr = successor . solver . eval_one ( successor . ip )
# Get the fake ret successor
fakeret_successor = None
if self . _is_call_jumpkind ( jumpkind ) :
fakeret_successor = all_successors [ - 1 ]
# If the function we ' re calling into doesn ' t return , we should discard it
if self . _cfg is not None :
func = self . kb . functions . function ( addr = job . call_target )
if func is not None and func . returning is False and len ( all_successors ) == 2 :
del all_successors [ - 1 ]
fakeret_successor = None
if self . _is_call_jumpkind ( jumpkind ) : # Create a new call stack for the successor
new_call_stack = self . _create_callstack ( job , successor_addr , jumpkind , fakeret_successor )
if new_call_stack is None :
l . debug ( "Cannot create a new callstack for address %#x" , successor_addr )
job . dbg_exit_status [ successor ] = ""
return [ ]
new_call_stack_suffix = new_call_stack . stack_suffix ( self . _context_sensitivity_level )
new_function_key = FunctionKey . new ( successor_addr , new_call_stack_suffix )
# Save the initial state for the function
self . _save_function_initial_state ( new_function_key , successor_addr , successor . copy ( ) )
# bail out if we hit the interfunction _ level cap
if len ( job . call_stack ) >= self . _interfunction_level :
l . debug ( 'We are not tracing into a new function %#08x as we hit interfunction_level limit' , successor_addr )
# mark it as skipped
job . dbg_exit_status [ successor ] = "Skipped"
job . call_skipped = True
job . call_function_key = new_function_key
job . call_task . skipped = True
return [ ]
elif jumpkind == 'Ijk_Ret' : # Pop the current function out from the call stack
new_call_stack = self . _create_callstack ( job , successor_addr , jumpkind , fakeret_successor )
if new_call_stack is None :
l . debug ( "Cannot create a new callstack for address %#x" , successor_addr )
job . dbg_exit_status [ successor ] = ""
return [ ]
new_call_stack_suffix = new_call_stack . stack_suffix ( self . _context_sensitivity_level )
else :
new_call_stack = job . call_stack
new_call_stack_suffix = job . call_stack_suffix
# Generate the new block ID
new_block_id = BlockID . new ( successor_addr , new_call_stack_suffix , jumpkind )
# Generate new VFG jobs
if jumpkind == "Ijk_Ret" :
assert not job . is_call_jump
# Record this return
self . _return_target_sources [ successor_addr ] . append ( job . call_stack_suffix + ( addr , ) )
# Check if this return is inside our pending returns list
if new_block_id in self . _pending_returns :
del self . _pending_returns [ new_block_id ]
# Check if we have reached a fix - point
if jumpkind != 'Ijk_FakeRet' and new_block_id in self . _nodes :
last_state = self . _nodes [ new_block_id ] . state
_ , _ , merged = last_state . merge ( successor , plugin_whitelist = self . _mergeable_plugins )
if merged :
l . debug ( "%s didn't reach a fix-point" , new_block_id )
else :
l . debug ( "%s reaches a fix-point." , new_block_id )
job . dbg_exit_status [ successor ] = "Merged due to reaching a fix-point"
return [ ]
new_jobs = self . _create_new_jobs ( job , successor , new_block_id , new_call_stack )
return new_jobs
|
def generate_noise ( dimensions , stimfunction_tr , tr_duration , template , mask = None , noise_dict = None , temporal_proportion = 0.5 , iterations = None , fit_thresh = 0.05 , fit_delta = 0.5 , ) :
"""Generate the noise to be added to the signal .
Default noise parameters will create a noise volume with a standard
deviation of 0.1 ( where the signal defaults to a value of 1 ) . This has
built into estimates of how different types of noise mix . All noise
values can be set by the user or estimated with calc _ noise .
Parameters
dimensions : nd array
What is the shape of the volume to be generated
stimfunction _ tr : Iterable , list
When do the stimuli events occur . Each element is a TR
tr _ duration : float
What is the duration , in seconds , of each TR ?
template : 3d array , float
A continuous ( 0 - > 1 ) volume describing the likelihood a voxel is in
the brain . This can be used to contrast the brain and non brain .
mask : 3d array , binary
The mask of the brain volume , distinguishing brain from non - brain
noise _ dict : dictionary , float
This is a dictionary which describes the noise parameters of the
data . If there are no other variables provided then it will use
default values . The noise variables are as follows :
snr [ float ] : Ratio of MR signal to the spatial noise
sfnr [ float ] : Ratio of the MR signal to the temporal noise . This is the
total variability that the following sigmas ' sum ' to :
task _ sigma [ float ] : Size of the variance of task specific noise
drift _ sigma [ float ] : Size of the variance of drift noise
auto _ reg _ sigma [ float ] : Size of the variance of autoregressive
noise . This is an ARMA process where the AR and MA components can be
separately specified
physiological _ sigma [ float ] : Size of the variance of physiological
noise
auto _ reg _ rho [ list ] : The coefficients of the autoregressive
components you are modeling
ma _ rho [ list ] : The coefficients of the moving average components you
are modeling
max _ activity [ float ] : The max value of the averaged brain in order
to reference the template
voxel _ size [ list ] : The mm size of the voxels
fwhm [ float ] : The gaussian smoothing kernel size ( mm )
matched [ bool ] : Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by ' fwhm '
temporal _ proportion , float
What is the proportion of the temporal variance ( as specified by the
SFNR noise parameter ) that is accounted for by the system noise . If
this number is high then all of the temporal variability is due to
system noise , if it is low then all of the temporal variability is
due to brain variability .
iterations : list , int
The first element is how many steps of fitting the SFNR and SNR values
will be performed . Usually converges after < 5 . The second element
is the number of iterations for the AR fitting . This is much more
time consuming ( has to make a new timecourse on each iteration ) so
be careful about setting this appropriately .
fit _ thresh : float
What proportion of the target parameter value is sufficient error to
warrant finishing fit search .
fit _ delta : float
How much are the parameters attenuated during the fitting process ,
in terms of the proportion of difference between the target
parameter and the actual parameter
Returns
noise : multidimensional array , float
Generates the noise volume for these parameters"""
|
# Check the input data
if template . max ( ) > 1.1 :
raise ValueError ( 'Template out of range' )
# Change to be an empty dictionary if it is None
if noise_dict is None :
noise_dict = { }
# Take in the noise dictionary and add any missing information
noise_dict = _noise_dict_update ( noise_dict )
# How many iterations will you perform ? If unspecified it will set
# values based on whether you are trying to match noise specifically to
# this participant or just get in the ball park
if iterations is None :
if noise_dict [ 'matched' ] == 1 :
iterations = [ 20 , 20 ]
else :
iterations = [ 0 , 0 ]
if abs ( noise_dict [ 'auto_reg_rho' ] [ 0 ] ) - abs ( noise_dict [ 'ma_rho' ] [ 0 ] ) < 0.1 :
logger . warning ( 'ARMA coefs are close, may have trouble fitting' )
# What are the dimensions of the volume , including time
dimensions_tr = ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , len ( stimfunction_tr ) )
# Get the mask of the brain and set it to be 3d
if mask is None :
mask = np . ones ( dimensions )
# Create the base ( this inverts the process to make the template )
base = template * noise_dict [ 'max_activity' ]
# Reshape the base ( to be the same size as the volume to be created )
base = base . reshape ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , 1 )
base = np . ones ( dimensions_tr ) * base
# What is the mean signal of the non masked voxels in this template ?
mean_signal = ( base [ mask > 0 ] ) . mean ( )
# Generate the noise
noise_temporal = _generate_noise_temporal ( stimfunction_tr = stimfunction_tr , tr_duration = tr_duration , dimensions = dimensions , template = template , mask = mask , noise_dict = noise_dict , )
# Convert SFNR into the size of the standard deviation of temporal
# variability
temporal_sd = ( mean_signal / noise_dict [ 'sfnr' ] )
# Calculate the temporal sd of the system noise ( as opposed to the noise
# attributed to the functional variability ) .
temporal_sd_system = np . sqrt ( ( temporal_sd ** 2 ) * temporal_proportion )
# What is the standard deviation of the background activity
spat_sd = mean_signal / noise_dict [ 'snr' ]
spatial_sd = np . sqrt ( ( spat_sd ** 2 ) * ( 1 - temporal_proportion ) )
# Set up the machine noise
noise_system = _generate_noise_system ( dimensions_tr = dimensions_tr , spatial_sd = spatial_sd , temporal_sd = temporal_sd_system , )
# Sum up the noise of the brain
noise = base + ( noise_temporal * temporal_sd ) + noise_system
# Reject negative values ( only happens outside of the brain )
noise [ noise < 0 ] = 0
# Fit the SNR
noise , spatial_sd = _fit_spatial ( noise , noise_temporal , mask , template , spatial_sd , temporal_sd_system , noise_dict , fit_thresh , fit_delta , iterations [ 0 ] , )
# Fit the SFNR and AR noise
noise = _fit_temporal ( noise , mask , template , stimfunction_tr , tr_duration , spatial_sd , temporal_proportion , temporal_sd , noise_dict , fit_thresh , fit_delta , iterations [ 1 ] , )
# Return the noise
return noise
|
def query_data_providers ( self , query , scope_name = None , scope_value = None ) :
"""QueryDataProviders .
[ Preview API ]
: param : class : ` < DataProviderQuery > < azure . devops . v5_0 . contributions . models . DataProviderQuery > ` query :
: param str scope _ name :
: param str scope _ value :
: rtype : : class : ` < DataProviderResult > < azure . devops . v5_0 . contributions . models . DataProviderResult > `"""
|
route_values = { }
if scope_name is not None :
route_values [ 'scopeName' ] = self . _serialize . url ( 'scope_name' , scope_name , 'str' )
if scope_value is not None :
route_values [ 'scopeValue' ] = self . _serialize . url ( 'scope_value' , scope_value , 'str' )
content = self . _serialize . body ( query , 'DataProviderQuery' )
response = self . _send ( http_method = 'POST' , location_id = '738368db-35ee-4b85-9f94-77ed34af2b0d' , version = '5.0-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'DataProviderResult' , response )
|
def get_type ( bind ) :
"""Detect the ideal type for the data , either using the explicit type
definition or the format ( for date , date - time , not supported by JSON ) ."""
|
types = bind . types + [ bind . schema . get ( 'format' ) ]
for type_name in ( 'date-time' , 'date' , 'decimal' , 'integer' , 'boolean' , 'number' , 'string' ) :
if type_name in types :
return type_name
return 'string'
|
def load ( fh , encoding = None , is_verbose = False ) :
"""load a pickle , with a provided encoding
if compat is True :
fake the old class hierarchy
if it works , then return the new type objects
Parameters
fh : a filelike object
encoding : an optional encoding
is _ verbose : show exception output"""
|
try :
fh . seek ( 0 )
if encoding is not None :
up = Unpickler ( fh , encoding = encoding )
else :
up = Unpickler ( fh )
up . is_verbose = is_verbose
return up . load ( )
except ( ValueError , TypeError ) :
raise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.