signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def author_edit_view ( self , context ) :
"""Child blocks can override this to control the view shown to authors in Studio when
editing this block ' s children ."""
|
fragment = Fragment ( )
self . render_children ( context , fragment , can_reorder = True , can_add = False )
return fragment
|
def to_html ( self , codebase ) :
"""Convert this ClassDoc to HTML . This returns the default long - form
HTML description that ' s used when the full docs are built ."""
|
return ( '<a name = "%s" />\n<div class = "jsclass">\n' + '<h3>%s</h3>\n%s\n<h4>Methods</h4>\n%s</div>' ) % ( self . name , self . name , htmlize_paragraphs ( codebase . translate_links ( self . doc , self ) ) + codebase . build_see_html ( self . see , 'h4' , self ) , '\n' . join ( method . to_html ( codebase ) for method in self . methods if codebase . include_private or not method . is_private ) )
|
def get_indels ( self , one_based = True ) :
"""Return a data structure containing all indels in the read .
Returns the tuple ( insertions , deletions )
insertions = [ ( pos1 , ins1 ) , ( pos2 , ins2 ) ]
posN = start position ( preceding base , VCF - style )
insN = length of inserted sequence ( not including preceding base )
deletions = [ ( pos1 , del1 ) , ( pos2 , del2 ) ]
posN = start position ( preceding base , VCF - style )
delN = length of deleted sequence ( not including preceding base )"""
|
cigar = self . get_cigar ( )
# CIGAR _ OP = list ( ' MIDNSHP = X ' )
insertions = [ ]
deletions = [ ]
position_offset = 0
position_start = self . get_position ( one_based = one_based )
while cigar :
cigar_size , cigar_op = cigar . pop ( 0 )
if cigar_op in [ 0 , 7 , 8 ] : # M alignment match ( can be a sequence match or mismatch ) ; = sequence match ; x sequence mismatch
position_offset += cigar_size
elif cigar_op == 1 : # I insertion
insertions . append ( ( position_start + position_offset - 1 , cigar_size ) )
elif cigar_op == 2 : # D deletion from the reference
deletions . append ( ( position_start + position_offset - 1 , cigar_size ) )
position_offset += cigar_size
elif cigar_op == 3 : # N skipped region from the reference
position_offset += cigar_size
elif cigar_op == 4 : # S soft clipping ( clipped sequences present in SEQ )
pass
elif cigar_op == 5 : # H hard clipping ( clipped sequences NOT present in SEQ )
position_offset += cigar_size
elif cigar_op == 6 : # P padding ( silent deletion from padded reference )
pass
else : # unknown cigar _ op
print >> sys . stderr , 'unknown cigar_op' , cigar_op , cigar_size
return ( insertions , deletions )
|
def _validate_extra_component ( bs_data ) :
'''Extra checks for component basis files'''
|
assert len ( bs_data [ 'elements' ] ) > 0
# Make sure size of the coefficient matrix matches the number of exponents
for el in bs_data [ 'elements' ] . values ( ) :
if not 'electron_shells' in el :
continue
for s in el [ 'electron_shells' ] :
nprim = len ( s [ 'exponents' ] )
if nprim <= 0 :
raise RuntimeError ( "Invalid number of primitives: {}" . format ( nprim ) )
for g in s [ 'coefficients' ] :
if nprim != len ( g ) :
raise RuntimeError ( "Number of coefficients doesn't match number of primitives ({} vs {}" . format ( len ( g ) , nprim ) )
# If more than one AM is given , that should be the number of
# general contractions
nam = len ( s [ 'angular_momentum' ] )
if nam > 1 :
ngen = len ( s [ 'coefficients' ] )
if ngen != nam :
raise RuntimeError ( "Number of general contractions doesn't match combined AM ({} vs {}" . format ( ngen , nam ) )
|
def get_roots ( app = None ) :
'''Returns the list of root packages / modules exposing endpoints .
If app is provided , only returns those of enabled plugins'''
|
roots = set ( )
plugins = app . config [ 'PLUGINS' ] if app else None
for name in ENTRYPOINTS . keys ( ) :
for ep in iter_all ( name ) :
if plugins is None or ep . name in plugins :
roots . add ( ep . module_name . split ( '.' , 1 ) [ 0 ] )
return list ( roots )
|
def step4 ( self ) :
"""step4 ( ) takes off - ant , - ence etc . , in context < c > vcvc < v > ."""
|
if self . b [ self . k - 1 ] == 'a' :
if self . ends ( "al" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'c' :
if self . ends ( "ance" ) :
pass
elif self . ends ( "ence" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'e' :
if self . ends ( "er" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'i' :
if self . ends ( "ic" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'l' :
if self . ends ( "able" ) :
pass
elif self . ends ( "ible" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'n' :
if self . ends ( "ant" ) :
pass
elif self . ends ( "ement" ) :
pass
elif self . ends ( "ment" ) :
pass
elif self . ends ( "ent" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'o' :
if ( self . ends ( "ion" ) and ( self . b [ self . j ] == 's' or self . b [ self . j ] == 't' ) ) :
pass
elif self . ends ( "ou" ) :
pass
# takes care of - ous
else :
return
elif self . b [ self . k - 1 ] == 's' :
if self . ends ( "ism" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 't' :
if self . ends ( "ate" ) :
pass
elif self . ends ( "iti" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'u' :
if self . ends ( "ous" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'v' :
if self . ends ( "ive" ) :
pass
else :
return
elif self . b [ self . k - 1 ] == 'z' :
if self . ends ( "ize" ) :
pass
else :
return
else :
return
if self . m ( ) > 1 :
self . k = self . j
|
def insert_arguments_into_query ( compilation_result , arguments ) :
"""Insert the arguments into the compiled GraphQL query to form a complete query .
Args :
compilation _ result : a CompilationResult object derived from the GraphQL compiler
arguments : dict , mapping argument name to its value , for every parameter the query expects .
Returns :
string , a query in the appropriate output language , with inserted argument data"""
|
_ensure_arguments_are_provided ( compilation_result . input_metadata , arguments )
if compilation_result . language == MATCH_LANGUAGE :
return insert_arguments_into_match_query ( compilation_result , arguments )
elif compilation_result . language == GREMLIN_LANGUAGE :
return insert_arguments_into_gremlin_query ( compilation_result , arguments )
elif compilation_result . language == SQL_LANGUAGE :
return insert_arguments_into_sql_query ( compilation_result , arguments )
else :
raise AssertionError ( u'Unrecognized language in compilation result: ' u'{}' . format ( compilation_result ) )
|
def find_by_client ( cls , from_client , limit = None , before_time = None , before_message_id = None ) : # type : ( str , Optional [ int ] , Optional [ Union [ datetime , float ] ] , Optional [ str ] ) - > List [ Message ]
"""获取某个 client 的聊天记录
: param from _ client : 要获取聊天记录的 client id
: param limit : 返回条数限制 , 可选 , 服务端默认 100 条 , 最大 1000 条
: param before _ time : 查询起始的时间戳 , 返回小于这个时间 ( 不包含 ) 的记录 , 服务端默认是当前时间
: param before _ message _ id : 起始的消息 id , 使用时必须加上对应消息的时间 before _ time 参数 , 一起作为查询的起点
: return : 符合条件的聊天记录"""
|
query_params = { }
# type : Dict [ str , Any ]
query_params [ 'from' ] = from_client
if limit is not None :
query_params [ 'limit' ] = limit
if isinstance ( before_time , datetime ) :
query_params [ 'max_ts' ] = round ( before_time . timestamp ( ) * 1000 )
elif isinstance ( before_time , six . integer_types ) or isinstance ( before_time , float ) :
query_params [ 'max_ts' ] = round ( before_time * 1000 )
if before_message_id is not None :
query_params [ 'msgid' ] = before_message_id
return list ( cls . _find ( query_params ) )
|
def getlanguage ( self , language = None , windowsversion = None ) :
"""Get and return the manifest ' s language as string .
Can be either language - culture e . g . ' en - us ' or a string indicating
language neutrality , e . g . ' x - ww ' on Windows XP or ' none ' on Vista
and later ."""
|
if not language :
language = self . language
if language in ( None , "" , "*" , "neutral" ) :
return ( LANGUAGE_NEUTRAL_NT5 , LANGUAGE_NEUTRAL_NT6 ) [ ( windowsversion or sys . getwindowsversion ( ) ) >= ( 6 , ) ]
return language
|
def checkout ( cwd , rev = None , force = False , opts = '' , git_opts = '' , user = None , password = None , ignore_retcode = False , output_encoding = None ) :
'''Interface to ` git - checkout ( 1 ) ` _
cwd
The path to the git checkout
opts
Any additional options to add to the command line , in a single string
. . note : :
On the Salt CLI , if the opts are preceded with a dash , it is
necessary to precede them with ` ` opts = ` ` ( as in the CLI examples
below ) to avoid causing errors with Salt ' s own argument parsing .
git _ opts
Any additional options to add to git command itself ( not the
` ` checkout ` ` subcommand ) , in a single string . This is useful for
passing ` ` - c ` ` to run git with temporary changes to the git
configuration .
. . versionadded : : 2017.7.0
. . note : :
This is only supported in git 1.7.2 and newer .
rev
The remote branch or revision to checkout .
. . versionchanged : : 2015.8.0
Optional when using ` ` - b ` ` or ` ` - B ` ` in ` ` opts ` ` .
force : False
Force a checkout even if there might be overwritten changes
user
User under which to run the git command . By default , the command is run
by the user under which the minion is running .
password
Windows only . Required when specifying ` ` user ` ` . This parameter will be
ignored on non - Windows platforms .
. . versionadded : : 2016.3.4
ignore _ retcode : False
If ` ` True ` ` , do not log an error to the minion log if the git command
returns a nonzero exit status .
. . versionadded : : 2015.8.0
output _ encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run . This should not be needed in most
cases .
. . note : :
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF - 8 to handle
Unicode characters .
. . versionadded : : 2018.3.1
. . _ ` git - checkout ( 1 ) ` : http : / / git - scm . com / docs / git - checkout
CLI Examples :
. . code - block : : bash
# Checking out local local revisions
salt myminion git . checkout / path / to / repo somebranch user = jeff
salt myminion git . checkout / path / to / repo opts = ' testbranch - - conf / file1 file2'
salt myminion git . checkout / path / to / repo rev = origin / mybranch opts = ' - - track '
# Checking out remote revision into new branch
salt myminion git . checkout / path / to / repo upstream / master opts = ' - b newbranch '
# Checking out current revision into new branch ( 2015.8.0 and later )
salt myminion git . checkout / path / to / repo opts = ' - b newbranch ' '''
|
cwd = _expand_path ( cwd , user )
command = [ 'git' ] + _format_git_opts ( git_opts )
command . append ( 'checkout' )
if force :
command . append ( '--force' )
opts = _format_opts ( opts )
command . extend ( opts )
checkout_branch = any ( x in opts for x in ( '-b' , '-B' ) )
if rev is None :
if not checkout_branch :
raise SaltInvocationError ( '\'rev\' argument is required unless -b or -B in opts' )
else :
command . append ( rev )
# Checkout message goes to stderr
return _git_run ( command , cwd = cwd , user = user , password = password , ignore_retcode = ignore_retcode , redirect_stderr = True , output_encoding = output_encoding ) [ 'stdout' ]
|
def _build_effective_configuration ( self ) :
"""It might happen that the current value of one ( or more ) below parameters stored in
the controldata is higher than the value stored in the global cluster configuration .
Example : max _ connections in global configuration is 100 , but in controldata
` Current max _ connections setting : 200 ` . If we try to start postgres with
max _ connections = 100 , it will immediately exit .
As a workaround we will start it with the values from controldata and set ` pending _ restart `
to true as an indicator that current values of parameters are not matching expectations ."""
|
OPTIONS_MAPPING = { 'max_connections' : 'max_connections setting' , 'max_prepared_transactions' : 'max_prepared_xacts setting' , 'max_locks_per_transaction' : 'max_locks_per_xact setting' }
if self . _major_version >= 90400 :
OPTIONS_MAPPING [ 'max_worker_processes' ] = 'max_worker_processes setting'
data = self . controldata ( )
effective_configuration = self . _server_parameters . copy ( )
for name , cname in OPTIONS_MAPPING . items ( ) :
value = parse_int ( effective_configuration [ name ] )
cvalue = parse_int ( data [ cname ] )
if cvalue > value :
effective_configuration [ name ] = cvalue
self . _pending_restart = True
return effective_configuration
|
def aspage ( self ) :
"""Return TiffPage from file ."""
|
if self . offset is None :
raise ValueError ( 'cannot return virtual frame as page.' )
self . parent . filehandle . seek ( self . offset )
return TiffPage ( self . parent , index = self . index )
|
def from_dict ( cls , pref , prefix = None ) :
"""Create a Prefix object from a dict .
Suitable for creating Prefix objects from XML - RPC input ."""
|
if prefix is None :
prefix = Prefix ( )
prefix . id = pref [ 'id' ]
if pref [ 'vrf_id' ] is not None : # VRF is not mandatory
prefix . vrf = VRF . get ( pref [ 'vrf_id' ] )
prefix . family = pref [ 'family' ]
prefix . prefix = pref [ 'prefix' ]
prefix . display_prefix = pref [ 'display_prefix' ]
prefix . description = pref [ 'description' ]
prefix . comment = pref [ 'comment' ]
prefix . node = pref [ 'node' ]
if pref [ 'pool_id' ] is not None : # Pool is not mandatory
prefix . pool = Pool . get ( pref [ 'pool_id' ] )
prefix . type = pref [ 'type' ]
prefix . indent = pref [ 'indent' ]
prefix . country = pref [ 'country' ]
prefix . order_id = pref [ 'order_id' ]
prefix . customer_id = pref [ 'customer_id' ]
prefix . external_key = pref [ 'external_key' ]
prefix . authoritative_source = pref [ 'authoritative_source' ]
prefix . alarm_priority = pref [ 'alarm_priority' ]
prefix . monitor = pref [ 'monitor' ]
prefix . vlan = pref [ 'vlan' ]
prefix . added = pref [ 'added' ]
prefix . last_modified = pref [ 'last_modified' ]
prefix . total_addresses = int ( pref [ 'total_addresses' ] )
prefix . used_addresses = int ( pref [ 'used_addresses' ] )
prefix . free_addresses = int ( pref [ 'free_addresses' ] )
prefix . status = pref [ 'status' ]
prefix . avps = pref [ 'avps' ]
prefix . expires = pref [ 'expires' ]
prefix . inherited_tags = { }
for tag_name in pref [ 'inherited_tags' ] :
tag = Tag . from_dict ( { 'name' : tag_name } )
prefix . inherited_tags [ tag_name ] = tag
prefix . tags = { }
for tag_name in pref [ 'tags' ] :
tag = Tag . from_dict ( { 'name' : tag_name } )
prefix . tags [ tag_name ] = tag
if 'match' in pref :
prefix . match = pref [ 'match' ]
if 'display' in pref :
prefix . display = pref [ 'display' ]
if 'children' in pref :
prefix . children = pref [ 'children' ]
return prefix
|
def get_today_all ( output = 'pd' ) :
"""today all
Returns :
[ type ] - - [ description ]"""
|
data = [ ]
today = str ( datetime . date . today ( ) )
codes = QA_fetch_get_stock_list ( 'stock' ) . code . tolist ( )
bestip = select_best_ip ( ) [ 'stock' ]
for code in codes :
try :
l = QA_fetch_get_stock_day ( code , today , today , '00' , ip = bestip )
except :
bestip = select_best_ip ( ) [ 'stock' ]
l = QA_fetch_get_stock_day ( code , today , today , '00' , ip = bestip )
if l is not None :
data . append ( l )
res = pd . concat ( data )
if output in [ 'pd' ] :
return res
elif output in [ 'QAD' ] :
return QA_DataStruct_Stock_day ( res . set_index ( [ 'date' , 'code' ] , drop = False ) )
|
def get_css ( self ) :
"""Fetches and returns stylesheet file path or contents , for both
print and screen contexts , depending if we want a standalone
presentation or not ."""
|
css = { }
print_css = os . path . join ( self . theme_dir , 'css' , 'print.css' )
if not os . path . exists ( print_css ) : # Fall back to default theme
print_css = os . path . join ( THEMES_DIR , 'default' , 'css' , 'print.css' )
if not os . path . exists ( print_css ) :
raise IOError ( u"Cannot find css/print.css in default theme" )
with codecs . open ( print_css , encoding = self . encoding ) as css_file :
css [ 'print' ] = { 'path_url' : utils . get_path_url ( print_css , self . relative ) , 'contents' : css_file . read ( ) , }
screen_css = os . path . join ( self . theme_dir , 'css' , 'screen.css' )
if ( os . path . exists ( screen_css ) ) :
with codecs . open ( screen_css , encoding = self . encoding ) as css_file :
css [ 'screen' ] = { 'path_url' : utils . get_path_url ( screen_css , self . relative ) , 'contents' : css_file . read ( ) , }
else :
self . log ( u"No screen stylesheet provided in current theme" , 'warning' )
return css
|
def _status ( self ) :
"""Return html saying whether this Action is reverted by another
one or reverts another one ."""
|
text = ""
# Turns out that is related field in null , Django
# doesn ' t even make it a property of the object
# http : / / code . djangoproject . com / ticket / 11920
if hasattr ( self , "reverts" ) :
text += '(reverts <a href="%s">%s</a>)<br/>' % ( self . reverts . get_absolute_url ( ) , self . reverts . id )
if self . reverted :
text += '(reverted in <a href="%s">%s</a>)<br/>' % ( self . reverted . get_absolute_url ( ) , self . reverted . id )
return text
|
def comments_2 ( self , value = None ) :
"""Corresponds to IDD Field ` comments _ 2 `
Args :
value ( str ) : value for IDD Field ` comments _ 2 `
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value"""
|
if value is not None :
try :
value = str ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type str ' 'for field `comments_2`' . format ( value ) )
if ',' in value :
raise ValueError ( 'value should not contain a comma ' 'for field `comments_2`' )
self . _comments_2 = value
|
def generate_cart_upload_redirect_url ( self , ** kwargs ) :
"""https : / / www . sandbox . paypal . com / webscr
? cmd = _ cart
& upload = 1"""
|
required_vals = ( 'business' , 'item_name_1' , 'amount_1' , 'quantity_1' )
self . _check_required ( required_vals , ** kwargs )
url = "%s?cmd=_cart&upload=1" % self . config . PAYPAL_URL_BASE
additional = self . _encode_utf8 ( ** kwargs )
additional = urlencode ( additional )
return url + "&" + additional
|
def _move_cursor_to_line ( self , line ) :
"""Moves the cursor to the specified line , if possible ."""
|
last_line = self . _text_edit . document ( ) . blockCount ( ) - 1
self . _cursor . clearSelection ( )
self . _cursor . movePosition ( self . _cursor . End )
to_insert = ''
for i in range ( line - last_line ) :
to_insert += '\n'
if to_insert :
self . _cursor . insertText ( to_insert )
self . _cursor . movePosition ( self . _cursor . Start )
self . _cursor . movePosition ( self . _cursor . Down , self . _cursor . MoveAnchor , line )
self . _last_cursor_pos = self . _cursor . position ( )
|
def handling_exceptions ( self ) :
"""Perform proper exception handling ."""
|
try :
if self . using_jobs :
with handling_broken_process_pool ( ) :
yield
else :
yield
except SystemExit as err :
self . register_error ( err . code )
except BaseException as err :
if isinstance ( err , CoconutException ) :
logger . display_exc ( )
elif not isinstance ( err , KeyboardInterrupt ) :
traceback . print_exc ( )
printerr ( report_this_text )
self . register_error ( errmsg = err . __class__ . __name__ )
|
def add_log_file ( logger , log_file , global_log_file = False ) :
"""Add a log file to this logger . If global _ log _ file is true , log _ file will be handed the root logger , otherwise it will only be used by this particular logger .
Parameters
logger : obj : ` logging . Logger `
The logger .
log _ file : obj : ` str `
The path to the log file to log to .
global _ log _ file : obj : ` bool `
Whether or not to use the given log _ file for this particular logger or for the root logger ."""
|
if global_log_file :
add_root_log_file ( log_file )
else :
hdlr = logging . FileHandler ( log_file )
formatter = logging . Formatter ( '%(asctime)s %(name)-10s %(levelname)-8s %(message)s' , datefmt = '%m-%d %H:%M:%S' )
hdlr . setFormatter ( formatter )
logger . addHandler ( hdlr )
|
def flush ( self , key = None ) :
"""Flush the cache .
If I { key } is specified , only that item is flushed . Otherwise
the entire cache is flushed .
@ param key : the key to flush
@ type key : ( dns . name . Name , int , int ) tuple or None"""
|
if not key is None :
if key in self . data :
del self . data [ key ]
else :
self . data = { }
self . next_cleaning = time . time ( ) + self . cleaning_interval
|
def Convert ( self , metadata , conn , token = None ) :
"""Converts NetworkConnection to ExportedNetworkConnection ."""
|
result = ExportedNetworkConnection ( metadata = metadata , family = conn . family , type = conn . type , local_address = conn . local_address , remote_address = conn . remote_address , state = conn . state , pid = conn . pid , ctime = conn . ctime )
return [ result ]
|
def frompil ( cls , image , compression = Compression . PACK_BITS ) :
"""Create a new PSD document from PIL Image .
: param image : PIL Image object .
: param compression : ImageData compression option . See
: py : class : ` ~ psd _ tools . constants . Compression ` .
: return : A : py : class : ` ~ psd _ tools . api . psd _ image . PSDImage ` object ."""
|
header = cls . _make_header ( image . mode , image . size )
# TODO : Add default metadata .
# TODO : Perhaps make this smart object .
image_data = ImageData ( compression = compression )
image_data . set_data ( [ channel . tobytes ( ) for channel in image . split ( ) ] , header )
return cls ( PSD ( header = header , image_data = image_data , image_resources = ImageResources . new ( ) , ) )
|
def forwards ( self , orm ) :
"Write your forwards methods here ."
|
for doc in orm [ 'document_library.Document' ] . objects . all ( ) :
for title in doc . documenttitle_set . all ( ) :
title . is_published = doc . is_published
title . save ( )
|
def update ( self , activity_sid = values . unset , attributes = values . unset , friendly_name = values . unset , reject_pending_reservations = values . unset ) :
"""Update the WorkerInstance
: param unicode activity _ sid : The activity _ sid
: param unicode attributes : The attributes
: param unicode friendly _ name : The friendly _ name
: param bool reject _ pending _ reservations : The reject _ pending _ reservations
: returns : Updated WorkerInstance
: rtype : twilio . rest . taskrouter . v1 . workspace . worker . WorkerInstance"""
|
return self . _proxy . update ( activity_sid = activity_sid , attributes = attributes , friendly_name = friendly_name , reject_pending_reservations = reject_pending_reservations , )
|
def setFontUnderline ( self , state ) :
"""Sets whether or not this editor is currently in underline state .
: param state | < bool >"""
|
font = self . currentFont ( )
font . setUnderline ( state )
self . setCurrentFont ( font )
|
def config_find_lines ( regex , source = 'running' ) :
r'''. . versionadded : : 2019.2.0
Return the configuration lines that match the regular expressions from the
` ` regex ` ` argument . The configuration is read from the network device
interrogated .
regex
The regular expression to match the configuration lines against .
source : ` ` running ` `
The configuration type to retrieve from the network device . Default :
` ` running ` ` . Available options : ` ` running ` ` , ` ` startup ` ` , ` ` candidate ` ` .
CLI Example :
. . code - block : : bash
salt ' * ' napalm . config _ find _ lines ' ^ interface Ethernet1 \ d ' '''
|
config_txt = __salt__ [ 'net.config' ] ( source = source ) [ 'out' ] [ source ]
return __salt__ [ 'ciscoconfparse.find_lines' ] ( config = config_txt , regex = regex )
|
def compare_vectorized ( self , comp_func , labels_left , labels_right , * args , ** kwargs ) :
"""Compute the similarity between values with a callable .
This method initialises the comparing of values with a custom
function / callable . The function / callable should accept
numpy . ndarray ' s .
Example
> > > comp = recordlinkage . Compare ( )
> > > comp . compare _ vectorized ( custom _ callable , ' first _ name ' , ' name ' )
> > > comp . compare ( PAIRS , DATAFRAME1 , DATAFRAME2)
Parameters
comp _ func : function
A comparison function . This function can be a built - in function
or a user defined comparison function . The function should accept
numpy . ndarray ' s as first two arguments .
labels _ left : label , pandas . Series , pandas . DataFrame
The labels , Series or DataFrame to compare .
labels _ right : label , pandas . Series , pandas . DataFrame
The labels , Series or DataFrame to compare .
* args :
Additional arguments to pass to callable comp _ func .
* * kwargs :
Additional keyword arguments to pass to callable comp _ func .
( keyword ' label ' is reserved . )
label : ( list of ) label ( s )
The name of the feature and the name of the column . IMPORTANT :
This argument is a keyword argument and can not be part of the
arguments of comp _ func ."""
|
label = kwargs . pop ( 'label' , None )
if isinstance ( labels_left , tuple ) :
labels_left = list ( labels_left )
if isinstance ( labels_right , tuple ) :
labels_right = list ( labels_right )
feature = BaseCompareFeature ( labels_left , labels_right , args , kwargs , label = label )
feature . _f_compare_vectorized = comp_func
self . add ( feature )
|
def set_logging_settings ( profile , setting , value , store = 'local' ) :
'''Configure logging settings for the Windows firewall .
Args :
profile ( str ) :
The firewall profile to configure . Valid options are :
- domain
- public
- private
setting ( str ) :
The logging setting to configure . Valid options are :
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value ( str ) :
The value to apply to the setting . Valid values are dependent upon
the setting being configured . Valid options are :
allowedconnections :
- enable
- disable
- notconfigured
droppedconnections :
- enable
- disable
- notconfigured
filename :
- Full path and name of the firewall log file
- notconfigured
maxfilesize :
- 1 - 32767 ( Kb )
- notconfigured
store ( str ) :
The store to use . This is either the local firewall policy or the
policy defined by local group policy . Valid options are :
- lgpo
- local
Default is ` ` local ` `
Returns :
bool : ` ` True ` ` if successful
Raises :
CommandExecutionError : If an error occurs
ValueError : If the parameters are incorrect'''
|
# Input validation
if profile . lower ( ) not in ( 'domain' , 'public' , 'private' ) :
raise ValueError ( 'Incorrect profile: {0}' . format ( profile ) )
if setting . lower ( ) not in ( 'allowedconnections' , 'droppedconnections' , 'filename' , 'maxfilesize' ) :
raise ValueError ( 'Incorrect setting: {0}' . format ( setting ) )
if setting . lower ( ) in ( 'allowedconnections' , 'droppedconnections' ) :
if value . lower ( ) not in ( 'enable' , 'disable' , 'notconfigured' ) :
raise ValueError ( 'Incorrect value: {0}' . format ( value ) )
# TODO : Consider adding something like the following to validate filename
# https : / / stackoverflow . com / questions / 9532499 / check - whether - a - path - is - valid - in - python - without - creating - a - file - at - the - paths - ta
if setting . lower ( ) == 'maxfilesize' :
if value . lower ( ) != 'notconfigured' : # Must be a number between 1 and 32767
try :
int ( value )
except ValueError :
raise ValueError ( 'Incorrect value: {0}' . format ( value ) )
if not 1 <= int ( value ) <= 32767 :
raise ValueError ( 'Incorrect value: {0}' . format ( value ) )
# Run the command
command = 'set {0}profile logging {1} {2}' . format ( profile , setting , value )
results = _netsh_command ( command = command , store = store )
# A successful run should return an empty list
if results :
raise CommandExecutionError ( 'An error occurred: {0}' . format ( results ) )
return True
|
def get_version ( self , service_id , version_number ) :
"""Get the version for a particular service ."""
|
content = self . _fetch ( "/service/%s/version/%d" % ( service_id , version_number ) )
return FastlyVersion ( self , content )
|
def DumpArtifactsToYaml ( self , sort_by_os = True ) :
"""Dump a list of artifacts into a yaml string ."""
|
artifact_list = self . GetArtifacts ( )
if sort_by_os : # Sort so its easier to split these if necessary .
yaml_list = [ ]
done_set = set ( )
for os_name in rdf_artifacts . Artifact . SUPPORTED_OS_LIST :
done_set = set ( a for a in artifact_list if a . supported_os == [ os_name ] )
# Separate into knowledge _ base and non - kb for easier sorting .
done_set = sorted ( done_set , key = lambda x : x . name )
yaml_list . extend ( x . ToYaml ( ) for x in done_set if x . provides )
yaml_list . extend ( x . ToYaml ( ) for x in done_set if not x . provides )
artifact_list = artifact_list . difference ( done_set )
yaml_list . extend ( x . ToYaml ( ) for x in artifact_list )
# The rest .
else :
yaml_list = [ x . ToYaml ( ) for x in artifact_list ]
return "---\n\n" . join ( yaml_list )
|
def add_uppercase ( table ) :
"""Extend the table with uppercase options
> > > print ( " а " in add _ uppercase ( { " а " : " a " } ) )
True
> > > print ( add _ uppercase ( { " а " : " a " } ) [ " а " ] = = " a " )
True
> > > print ( " А " in add _ uppercase ( { " а " : " a " } ) )
True
> > > print ( add _ uppercase ( { " а " : " a " } ) [ " А " ] = = " A " )
True
> > > print ( len ( add _ uppercase ( { " а " : " a " } ) . keys ( ) ) )
> > > print ( " Аа " in add _ uppercase ( { " аа " : " aa " } ) )
True
> > > print ( add _ uppercase ( { " аа " : " aa " } ) [ " Аа " ] = = " Aa " )
True"""
|
orig = table . copy ( )
orig . update ( dict ( ( k . capitalize ( ) , v . capitalize ( ) ) for k , v in table . items ( ) ) )
return orig
|
def to_routing_header ( params ) :
"""Returns a routing header string for the given request parameters .
Args :
params ( Mapping [ str , Any ] ) : A dictionary containing the request
parameters used for routing .
Returns :
str : The routing header string ."""
|
if sys . version_info [ 0 ] < 3 : # Python 2 does not have the " safe " parameter for urlencode .
return urlencode ( params ) . replace ( "%2F" , "/" )
return urlencode ( params , # Per Google API policy ( go / api - url - encoding ) , / is not encoded .
safe = "/" , )
|
def align ( fastq_file , pair_file , index_dir , names , align_dir , data ) :
"""Perform piped alignment of fastq input files , generating sorted , deduplicated BAM ."""
|
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os . path . join ( align_dir , "{0}-sort{1}.bam" . format ( dd . get_sample_name ( data ) , umi_ext ) )
num_cores = data [ "config" ] [ "algorithm" ] . get ( "num_cores" , 1 )
rg_info = novoalign . get_rg_info ( names )
preset = "sr"
pair_file = pair_file if pair_file else ""
if data . get ( "align_split" ) :
final_file = out_file
out_file , data = alignprep . setup_combine ( final_file , data )
fastq_file , pair_file = alignprep . split_namedpipe_cls ( fastq_file , pair_file , data )
else :
final_file = None
if not utils . file_exists ( out_file ) and ( final_file is None or not utils . file_exists ( final_file ) ) :
with postalign . tobam_cl ( data , out_file , pair_file != "" ) as ( tobam_cl , tx_out_file ) :
index_file = None
# Skip trying to use indices now as they provide only slight speed - ups
# and give inconsitent outputs in BAM headers
# If a single index present , index _ dir points to that
# if index _ dir and os . path . isfile ( index _ dir ) :
# index _ dir = os . path . dirname ( index _ dir )
# index _ file = os . path . join ( index _ dir , " % s - % s . mmi " % ( dd . get _ genome _ build ( data ) , preset ) )
if not index_file or not os . path . exists ( index_file ) :
index_file = dd . get_ref_file ( data )
cmd = ( "minimap2 -a -x {preset} -R '{rg_info}' -t {num_cores} {index_file} " "{fastq_file} {pair_file} | " )
do . run ( cmd . format ( ** locals ( ) ) + tobam_cl , "minimap2 alignment: %s" % dd . get_sample_name ( data ) )
data [ "work_bam" ] = out_file
return data
|
def block_uid ( value : Union [ str , BlockUID , None ] ) -> BlockUID :
"""Convert value to BlockUID instance
: param value : Value to convert
: return :"""
|
if isinstance ( value , BlockUID ) :
return value
elif isinstance ( value , str ) :
return BlockUID . from_str ( value )
elif value is None :
return BlockUID . empty ( )
else :
raise TypeError ( "Cannot convert {0} to BlockUID" . format ( type ( value ) ) )
|
def _build_table ( self ) -> Dict [ State , Tuple [ Multiplex , ... ] ] :
"""Private method which build the table which map a State to the active multiplex ."""
|
result : Dict [ State , Tuple [ Multiplex , ... ] ] = { }
for state in self . influence_graph . all_states ( ) :
result [ state ] = tuple ( multiplex for multiplex in self . influence_graph . multiplexes if multiplex . is_active ( state ) )
return result
|
def simple ( self ) :
'''A string representation with only one period delimiter .'''
|
if self . _days :
return '%sD' % self . totaldays
elif self . months :
return '%sM' % self . _months
elif self . years :
return '%sY' % self . years
else :
return ''
|
def add_category ( self , category ) :
"""Add a category assigned to this message
: rtype : Category"""
|
self . _categories = self . _ensure_append ( category , self . _categories )
|
def get_object_type_by_name ( object_type_name ) :
""": return : type suitable to handle the given object type name .
Use the type to create new instances .
: param object _ type _ name : Member of TYPES
: raise ValueError : In case object _ type _ name is unknown"""
|
if object_type_name == b"commit" :
from . import commit
return commit . Commit
elif object_type_name == b"tag" :
from . import tag
return tag . TagObject
elif object_type_name == b"blob" :
from . import blob
return blob . Blob
elif object_type_name == b"tree" :
from . import tree
return tree . Tree
else :
raise ValueError ( "Cannot handle unknown object type: %s" % object_type_name )
|
def iter_trees ( nexson , nexson_version = None ) :
"""generator over all trees in all trees elements .
yields a tuple of 3 items :
trees element ID ,
tree ID ,
the tree obj"""
|
if nexson_version is None :
nexson_version = detect_nexson_version ( nexson )
nex = get_nexml_el ( nexson )
if _is_by_id_hbf ( nexson_version ) :
trees_group_by_id = nex [ 'treesById' ]
group_order = nex . get ( '^ot:treesElementOrder' , [ ] )
if len ( group_order ) < len ( trees_group_by_id ) :
group_order = list ( trees_group_by_id . keys ( ) )
group_order . sort ( )
for trees_group_id in group_order :
trees_group = trees_group_by_id [ trees_group_id ]
tree_by_id = trees_group [ 'treeById' ]
ti_order = trees_group . get ( '^ot:treeElementOrder' , [ ] )
if len ( ti_order ) < len ( tree_by_id ) :
ti_order = list ( tree_by_id . keys ( ) )
ti_order . sort ( )
for tree_id in ti_order :
tree = tree_by_id [ tree_id ]
yield trees_group_id , tree_id , tree
else :
for trees_group in nex . get ( 'trees' , [ ] ) :
trees_group_id = trees_group [ '@id' ]
for tree in trees_group . get ( 'tree' , [ ] ) :
tree_id = tree [ '@id' ]
yield trees_group_id , tree_id , tree
|
def getContinuousSetByName ( self , name ) :
"""Returns the ContinuousSet with the specified name , or raises
an exception otherwise ."""
|
if name not in self . _continuousSetNameMap :
raise exceptions . ContinuousSetNameNotFoundException ( name )
return self . _continuousSetNameMap [ name ]
|
def dump ( self , sender = None , ** kwargs ) :
'''Retrieve raw email dump for this bounce .
: param sender : A : class : ` BounceDump ` object to get dump with .
Defaults to ` None ` .
: param \ * \ * kwargs : Keyword arguments passed to
: func : ` requests . request ` .'''
|
if sender is None :
if self . _sender is None :
sender = _default_bounce_dump
else :
sender = BounceDump ( api_key = self . _sender . api_key , test = self . _sender . test , secure = self . _sender . secure )
return sender . get ( self . id , ** kwargs )
|
def get_nameid_data ( request , key = None ) :
"""Gets the NameID Data of the the Logout Request
: param request : Logout Request Message
: type request : string | DOMDocument
: param key : The SP key
: type key : string
: return : Name ID Data ( Value , Format , NameQualifier , SPNameQualifier )
: rtype : dict"""
|
if isinstance ( request , etree . _Element ) :
elem = request
else :
if isinstance ( request , Document ) :
request = request . toxml ( )
elem = fromstring ( request , forbid_dtd = True )
name_id = None
encrypted_entries = OneLogin_Saml2_Utils . query ( elem , '/samlp:LogoutRequest/saml:EncryptedID' )
if len ( encrypted_entries ) == 1 :
if key is None :
raise OneLogin_Saml2_Error ( 'Private Key is required in order to decrypt the NameID, check settings' , OneLogin_Saml2_Error . PRIVATE_KEY_NOT_FOUND )
encrypted_data_nodes = OneLogin_Saml2_Utils . query ( elem , '/samlp:LogoutRequest/saml:EncryptedID/xenc:EncryptedData' )
if len ( encrypted_data_nodes ) == 1 :
encrypted_data = encrypted_data_nodes [ 0 ]
name_id = OneLogin_Saml2_Utils . decrypt_element ( encrypted_data , key )
else :
entries = OneLogin_Saml2_Utils . query ( elem , '/samlp:LogoutRequest/saml:NameID' )
if len ( entries ) == 1 :
name_id = entries [ 0 ]
if name_id is None :
raise OneLogin_Saml2_ValidationError ( 'NameID not found in the Logout Request' , OneLogin_Saml2_ValidationError . NO_NAMEID )
name_id_data = { 'Value' : OneLogin_Saml2_Utils . element_text ( name_id ) }
for attr in [ 'Format' , 'SPNameQualifier' , 'NameQualifier' ] :
if attr in name_id . attrib . keys ( ) :
name_id_data [ attr ] = name_id . attrib [ attr ]
return name_id_data
|
def replace_multi ( self , keys , ttl = 0 , format = None , persist_to = 0 , replicate_to = 0 ) :
"""Replace multiple keys . Multi variant of : meth : ` replace `
. . seealso : : : meth : ` replace ` , : meth : ` upsert _ multi ` , : meth : ` upsert `"""
|
return _Base . replace_multi ( self , keys , ttl = ttl , format = format , persist_to = persist_to , replicate_to = replicate_to )
|
def cache ( opts , serial ) :
'''Returns the returner modules'''
|
return LazyLoader ( _module_dirs ( opts , 'cache' , 'cache' ) , opts , tag = 'cache' , pack = { '__opts__' : opts , '__context__' : { 'serial' : serial } } , )
|
def block_process_call ( self , address , register , value ) :
"""SMBus Block Write - Block Read Process Call
SMBus Block Write - Block Read Process Call was introduced in
Revision 2.0 of the specification .
This command selects a device register ( through the Comm byte ) , sends
1 to 31 bytes of data to it , and reads 1 to 31 bytes of data in return .
S Addr Wr [ A ] Comm [ A ] Count [ A ] Data [ A ] . . .
S Addr Rd [ A ] [ Count ] A [ Data ] . . . A P
Functionality flag : I2C _ FUNC _ SMBUS _ BLOCK _ PROC _ CALL"""
|
return self . smbus . block_process_call ( address , register , value )
|
def mkdir_command ( endpoint_plus_path ) :
"""Executor for ` globus mkdir `"""
|
endpoint_id , path = endpoint_plus_path
client = get_client ( )
autoactivate ( client , endpoint_id , if_expires_in = 60 )
res = client . operation_mkdir ( endpoint_id , path = path )
formatted_print ( res , text_format = FORMAT_TEXT_RAW , response_key = "message" )
|
def filter_attribute_value_assertions ( ava , attribute_restrictions = None ) :
"""Will weed out attribute values and values according to the
rules defined in the attribute restrictions . If filtering results in
an attribute without values , then the attribute is removed from the
assertion .
: param ava : The incoming attribute value assertion ( dictionary )
: param attribute _ restrictions : The rules that govern which attributes
and values that are allowed . ( dictionary )
: return : The modified attribute value assertion"""
|
if not attribute_restrictions :
return ava
for attr , vals in list ( ava . items ( ) ) :
_attr = attr . lower ( )
try :
_rests = attribute_restrictions [ _attr ]
except KeyError :
del ava [ attr ]
else :
if _rests is None :
continue
if isinstance ( vals , six . string_types ) :
vals = [ vals ]
rvals = [ ]
for restr in _rests :
for val in vals :
if restr . match ( val ) :
rvals . append ( val )
if rvals :
ava [ attr ] = list ( set ( rvals ) )
else :
del ava [ attr ]
return ava
|
def reflect_left ( self , value ) :
"""Only reflects the value if is > self ."""
|
if value > self :
value = self . reflect ( value )
return value
|
def add_to_configs ( self , configs ) :
"""Add one or more measurement configurations to the stored
configurations
Parameters
configs : list or numpy . ndarray
list or array of configurations
Returns
configs : Kx4 numpy . ndarray
array holding all configurations of this instance"""
|
if len ( configs ) == 0 :
return None
if self . configs is None :
self . configs = np . atleast_2d ( configs )
else :
configs = np . atleast_2d ( configs )
self . configs = np . vstack ( ( self . configs , configs ) )
return self . configs
|
def uses_base_tear_down ( cls ) :
"""Checks whether the tearDown method is the BasePlug implementation ."""
|
this_tear_down = getattr ( cls , 'tearDown' )
base_tear_down = getattr ( BasePlug , 'tearDown' )
return this_tear_down . __code__ is base_tear_down . __code__
|
def _message ( self , beacon_config , invert_hello = False ) :
"""Overridden : meth : ` . WBeaconGouverneurMessenger . _ message ` method . Appends encoded host group names
to requests and responses .
: param beacon _ config : beacon configuration
: return : bytes"""
|
m = WBeaconGouverneurMessenger . _message ( self , beacon_config , invert_hello = invert_hello )
hostgroups = self . _message_hostgroup_generate ( )
if len ( hostgroups ) > 0 :
m += ( WHostgroupBeaconMessenger . __message_groups_splitter__ + hostgroups )
return m
|
def schedule ( self , recipients = None , sender = None , priority = None ) :
"""Schedules message for a delivery .
Puts message ( and dispatches if any ) data into DB .
: param list | None recipients : recipient ( or a list ) or None .
If ` None ` Dispatches should be created before send using ` prepare _ dispatches ( ) ` .
: param User | None sender : Django User model heir instance
: param int | None priority : number describing message priority
: return : a tuple with message model and a list of dispatch models .
: rtype : tuple"""
|
if priority is None :
priority = self . priority
self . _message_model , self . _dispatch_models = Message . create ( self . get_alias ( ) , self . get_context ( ) , recipients = recipients , sender = sender , priority = priority )
return self . _message_model , self . _dispatch_models
|
def _install_one ( repo_url , branch , destination , commit = '' , patches = None , exclude_modules = None , include_modules = None , base = False , work_directory = '' ) :
"""Install a third party odoo add - on
: param string repo _ url : url of the repo that contains the patch .
: param string branch : name of the branch to checkout .
: param string destination : the folder where the add - on should end up at .
: param string commit : Optional commit rev to checkout to . If mentioned , that take over the branch
: param string work _ directory : the path to the directory of the yaml file .
: param list patches : Optional list of patches to apply ."""
|
patches = patches or [ ]
patches = [ core . FilePatch ( file = patch [ 'file' ] , work_directory = work_directory ) if 'file' in patch else core . Patch ( ** patch ) for patch in patches ]
addon_cls = core . Base if base else core . Addon
addon = addon_cls ( repo_url , branch , commit = commit , patches = patches , exclude_modules = exclude_modules , include_modules = include_modules )
addon . install ( destination )
|
def csv_writer ( parser , keep , extract , args ) :
"""Writes the data in CSV format ."""
|
# The output
output = sys . stdout if args . output == "-" else open ( args . output , "w" )
try : # Getting the samples
samples = np . array ( parser . get_samples ( ) , dtype = str )
k = _get_sample_select ( samples = samples , keep = keep )
# Writing the CSV header
print ( "sample_id" , "variant_id" , "chromosome" , "position" , "reference" , "coded" , "dosage" , "hard_call" , sep = "," , file = output )
# The data generator
generator = _get_generator ( parser = parser , extract = extract , keep = k , check_maf = args . maf )
# The number of markers extracted
nb_extracted = 0
for data in generator : # Keeping only the required genotypes
genotypes = data . genotypes
# The hard call mapping
hard_call_mapping = { 0 : "{ref}/{ref}" . format ( ref = data . reference ) , 1 : "{ref}/{alt}" . format ( ref = data . reference , alt = data . coded ) , 2 : "{alt}/{alt}" . format ( alt = data . coded ) , }
for sample , geno in zip ( samples [ k ] , genotypes ) : # Is the genotype missing
is_missing = np . isnan ( geno )
# Hard coding ( NaN values are empty string )
hard_coded = None
if is_missing :
geno = ""
hard_coded = ""
else :
hard_coded = hard_call_mapping [ int ( round ( geno , 0 ) ) ]
print ( sample , data . variant . name , data . variant . chrom , data . variant . pos , data . reference , data . coded , geno , hard_coded , sep = "," , file = output )
nb_extracted += 1
if nb_extracted == 0 :
logger . warning ( "No markers matched the extract list" )
finally :
output . close ( )
|
def build_includes ( cls , include_packages ) :
"""cx _ freeze doesn ' t support the star ( * ) method of sub - module inclusion , so all submodules must be included
explicitly .
Example ( From SaltStack 2014.7 ) :
salt
salt . fileserver
salt . fileserver . gitfs
salt . fileserver . hgfs
salt . fileserver . minionfs
salt . fileserver . roots
etc . . .
: param include _ packages : List of package references to recurse for subpackages"""
|
includes , package_root_paths = cls . _split_packages ( include_packages )
for package_path , package_name in six . iteritems ( package_root_paths ) :
includes . add ( package_name )
if re . search ( r'__init__.py.*$' , package_path ) : # Looks like a package . Walk the directory and see if there are more .
package_modules = set ( )
for root , dirs , files in os . walk ( os . path . dirname ( package_path ) ) :
if '__init__.py' in files :
package_modules . add ( root )
for module in [ f for f in files if f != u"__init__.py" and f . endswith ( '.py' ) ] :
package_modules . add ( os . path . join ( root , module ) )
common_prefix = os . path . commonprefix ( package_modules )
common_dir = os . path . dirname ( common_prefix )
package_tails = set ( [ f [ len ( common_dir ) + len ( os . sep ) : ] for f in package_modules ] )
package_names = set ( [ os . path . splitext ( tail ) [ 0 ] . replace ( os . sep , '.' ) for tail in package_tails ] )
includes |= package_names
return includes
|
def _create_model_matrices ( self ) :
"""Creates model matrices / vectors
Returns
None ( changes model attributes )"""
|
self . model_Y = np . array ( self . data [ self . max_lag : self . data . shape [ 0 ] ] )
self . model_scores = np . zeros ( self . model_Y . shape [ 0 ] )
|
def dict_of_lists_add ( dictionary , key , value ) : # type : ( DictUpperBound , Any , Any ) - > None
"""Add value to a list in a dictionary by key
Args :
dictionary ( DictUpperBound ) : Dictionary to which to add values
key ( Any ) : Key within dictionary
value ( Any ) : Value to add to list in dictionary
Returns :
None"""
|
list_objs = dictionary . get ( key , list ( ) )
list_objs . append ( value )
dictionary [ key ] = list_objs
|
def convert ( json_input , build_direction = "LEFT_TO_RIGHT" , table_attributes = None ) :
"""Converts JSON to HTML Table format .
Parameters
json _ input : dict
JSON object to convert into HTML .
build _ direction : { " TOP _ TO _ BOTTOM " , " LEFT _ TO _ RIGHT " }
String denoting the build direction of the table . If ` ` " TOP _ TO _ BOTTOM " ` ` child
objects will be appended below parents , i . e . in the subsequent row . If ` ` " LEFT _ TO _ RIGHT " ` `
child objects will be appended to the right of parents , i . e . in the subsequent column .
Default is ` ` " LEFT _ TO _ RIGHT " ` ` .
table _ attributes : dict , optional
Dictionary of ` ` ( key , value ) ` ` pairs describing attributes to add to the table .
Each attribute is added according to the template ` ` key = " value " . For example ,
the table ` ` { " border " : 1 } ` ` modifies the generated table tags to include
` ` border = " 1 " ` ` as an attribute . The generated opening tag would look like
` ` < table border = " 1 " > ` ` . Default is ` ` None ` ` .
Returns
str
String of converted HTML .
An example usage is shown below :
> > > json _ object = { " key " : " value " }
> > > build _ direction = " TOP _ TO _ BOTTOM "
> > > table _ attributes = { " border " : 1}
> > > html = convert ( json _ object , build _ direction = build _ direction , table _ attributes = table _ attributes )
> > > print ( html )
" < table border = " 1 " > < tr > < th > key < / th > < td > value < / td > < / tr > < / table > " """
|
json_converter = JsonConverter ( build_direction = build_direction , table_attributes = table_attributes )
return json_converter . convert ( json_input )
|
def raise_error ( self , exception_type , # type : Type [ Exception ]
message # type : Text
) : # type : ( . . . ) - > NoReturn
"""Raise an exception with the current parser state information and error message ."""
|
error_message = '{} at {}' . format ( message , repr ( self ) )
raise exception_type ( error_message )
|
def list ( self , identity = values . unset , limit = None , page_size = None ) :
"""Lists MemberInstance records from the API as a list .
Unlike stream ( ) , this operation is eager and will load ` limit ` records into
memory before returning .
: param unicode identity : The ` identity ` value of the resources to read
: param int limit : Upper limit for the number of records to return . list ( ) guarantees
never to return more than limit . Default is no limit
: param int page _ size : Number of records to fetch per request , when not set will use
the default value of 50 records . If no page _ size is defined
but a limit is defined , list ( ) will attempt to read the limit
with the most efficient page size , i . e . min ( limit , 1000)
: returns : Generator that will yield up to limit results
: rtype : list [ twilio . rest . chat . v2 . service . channel . member . MemberInstance ]"""
|
return list ( self . stream ( identity = identity , limit = limit , page_size = page_size , ) )
|
def create_snappy_message ( payloads , key = None ) :
"""Construct a Snappy Message containing multiple Messages
The given payloads will be encoded , compressed , and sent as a single atomic
message to Kafka .
Arguments :
payloads : list ( bytes ) , a list of payload to send be sent to Kafka
key : bytes , a key used for partition routing ( optional )"""
|
message_set = KafkaProtocol . _encode_message_set ( [ create_message ( payload , pl_key ) for payload , pl_key in payloads ] )
snapped = snappy_encode ( message_set )
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY
return kafka . structs . Message ( 0 , 0x00 | codec , key , snapped )
|
def _direct_nbody_force ( q , m , t , pot , softening , softening_args ) :
"""Calculate the force"""
|
# First do the particles
# Calculate all the distances
nq = len ( q )
dim = len ( q [ 0 ] )
dist_vec = nu . zeros ( ( nq , nq , dim ) )
dist = nu . zeros ( ( nq , nq ) )
for ii in range ( nq ) :
for jj in range ( ii + 1 , nq ) :
dist_vec [ ii , jj , : ] = q [ jj ] - q [ ii ]
dist_vec [ jj , ii , : ] = - dist_vec [ ii , jj , : ]
dist [ ii , jj ] = linalg . norm ( dist_vec [ ii , jj , : ] )
dist [ jj , ii ] = dist [ ii , jj ]
# Calculate all the forces
force = [ ]
for ii in range ( nq ) :
thisforce = nu . zeros ( dim )
for jj in range ( nq ) :
if ii == jj :
continue
thisforce += m [ jj ] * softening ( dist [ ii , jj ] , * softening_args ) / dist [ ii , jj ] * dist_vec [ ii , jj , : ]
force . append ( thisforce )
# Then add the external force
if pot is None :
return force
for ii in range ( nq ) :
force [ ii ] += _external_force ( q [ ii ] , t , pot )
return force
|
def filenamify ( title ) :
"""Convert a string to something suitable as a file name . E . g .
Matlagning del 1 av 10 - Räksmörgås | SVT Play
- > matlagning . del . 1 . av . 10 . - . raksmorgas . svt . play"""
|
# ensure it is unicode
title = ensure_unicode ( title )
# NFD decomposes chars into base char and diacritical mark , which
# means that we will get base char when we strip out non - ascii .
title = unicodedata . normalize ( 'NFD' , title )
# Convert to lowercase
# Drop any non ascii letters / digits
# Drop any leading / trailing whitespace that may have appeared
title = re . sub ( r'[^a-z0-9 .-]' , '' , title . lower ( ) . strip ( ) )
# Replace whitespace with dot
title = re . sub ( r'\s+' , '.' , title )
title = re . sub ( r'\.-\.' , '-' , title )
return title
|
async def create ( cls , user_id : Union [ int , str ] , is_active : bool = True , is_admin : bool = False , resource_policy : str = None , rate_limit : int = None , fields : Iterable [ str ] = None ) -> dict :
'''Creates a new keypair with the given options .
You need an admin privilege for this operation .'''
|
if fields is None :
fields = ( 'access_key' , 'secret_key' )
uid_type = 'Int!' if isinstance ( user_id , int ) else 'String!'
q = 'mutation($user_id: {0}, $input: KeyPairInput!) {{' . format ( uid_type ) + ' create_keypair(user_id: $user_id, props: $input) {' ' ok msg keypair { $fields }' ' }' '}'
q = q . replace ( '$fields' , ' ' . join ( fields ) )
variables = { 'user_id' : user_id , 'input' : { 'is_active' : is_active , 'is_admin' : is_admin , 'resource_policy' : resource_policy , 'rate_limit' : rate_limit , } , }
rqst = Request ( cls . session , 'POST' , '/admin/graphql' )
rqst . set_json ( { 'query' : q , 'variables' : variables , } )
async with rqst . fetch ( ) as resp :
data = await resp . json ( )
return data [ 'create_keypair' ]
|
def main ( args = None ) :
"""Build and run parser
: param args : cli args from tests"""
|
parser = argument_parser ( )
args = parser . parse_args ( args )
# If ' func ' isn ' t present , something is misconfigured above or no ( positional ) arg was given .
if not hasattr ( args , 'func' ) :
args = parser . parse_args ( [ 'help' ] )
# show help
# Convert argparse . Namespace into dict and clean it up .
# We can then pass it directly to the helper function .
kwargs = vars ( args )
# handle the ' - - dev ' option
if kwargs . pop ( 'dev' ) or os . environ . get ( 'QUILT_DEV_MODE' , '' ) . strip ( ) . lower ( ) == 'true' : # Enables CLI ctrl - c tracebacks , and whatever anyone else uses it for
quilt . _DEV_MODE = True
else : # Disables CLI ctrl - c tracebacks , etc .
quilt . _DEV_MODE = False
func = kwargs . pop ( 'func' )
try :
func ( ** kwargs )
return 0
except QuiltException as ex :
print ( ex . message , file = sys . stderr )
return 1
except requests . exceptions . ConnectionError as ex :
print ( "Failed to connect: %s" % ex , file = sys . stderr )
return 1
|
def print ( self ) :
"""Print results table .
> > > Results ( [ ' title ' ] , [ ( ' Konosuba ' , ) , ( ' Oreimo ' , ) ] ) . print ( )
# title
1 Konosuba
2 Oreimo"""
|
print ( tabulate ( ( ( i , * row ) for i , row in enumerate ( self . results , 1 ) ) , headers = self . headers , ) )
|
def create_criteria ( cls , query ) :
"""Return a criteria from a dictionary containing a query .
Query should be a dictionary , keyed by field name . If the value is
a list , it will be divided into multiple criteria as required ."""
|
criteria = [ ]
for name , value in query . items ( ) :
if isinstance ( value , list ) :
for inner_value in value :
criteria += cls . create_criteria ( { name : inner_value } )
else :
criteria . append ( { 'criteria' : { 'field' : name , 'value' : value , } , } )
return criteria or None
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : FormContext for this FormInstance
: rtype : twilio . rest . authy . v1 . form . FormContext"""
|
if self . _context is None :
self . _context = FormContext ( self . _version , form_type = self . _solution [ 'form_type' ] , )
return self . _context
|
def parent ( self ) :
"""A new URL with last part of path removed and cleaned up query and
fragment ."""
|
path = self . raw_path
if not path or path == "/" :
if self . raw_fragment or self . raw_query_string :
return URL ( self . _val . _replace ( query = "" , fragment = "" ) , encoded = True )
return self
parts = path . split ( "/" )
val = self . _val . _replace ( path = "/" . join ( parts [ : - 1 ] ) , query = "" , fragment = "" )
return URL ( val , encoded = True )
|
def partial_autocorrelation ( self , x , param = None ) :
"""As in tsfresh ` partial _ autocorrelation < https : / / github . com / blue - yonder / tsfresh / blob / master / tsfresh / feature _ extraction / feature _ calculators . py # L308 > ` _
Calculates the value of the partial autocorrelation function at the given lag . The lag ` k ` partial autocorrelation of a time series : math : ` \\ lbrace x _ t , t = 1 \\ ldots T \\ rbrace ` equals the partial correlation of : math : ` x _ t ` and : math : ` x _ { t - k } ` , adjusted for the intermediate variables : math : ` \\ lbrace x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } \\ rbrace ` ( : cite : ` Wilson2015 ` ) . Following ` this notes < https : / / onlinecourses . science . psu . edu / stat510 / node / 62 > ` _ , it can be defined as
. . math : :
\\ alpha _ k = \\ frac { Cov ( x _ t , x _ { t - k } | x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } ) }
{ \\ sqrt { Var ( x _ t | x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } ) Var ( x _ { t - k } | x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } ) } }
with ( a ) : math : ` x _ t = f ( x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } ) ` and ( b ) : math : ` x _ { t - k } = f ( x _ { t - 1 } , \\ ldots , x _ { t - k + 1 } ) ` being AR ( k - 1 ) models that can be fitted by OLS . Be aware that in ( a ) , the regression is done on past values to predict : math : ` x _ t ` whereas in ( b ) , future values are used to calculate the past value : math : ` x _ { t - k } ` . It is said in : cite : ` Wilson2015 ` that " for an AR ( p ) , the partial autocorrelations [ : math : ` \\ alpha _ k ` ] will be nonzero for ` k < = p ` and zero for ` k > p ` . "
With this property , it is used to determine the lag of an AR - Process .
: param x : the time series to calculate the feature of
: type x : pandas . Series
: param param : contains dictionaries { " lag " : val } with int val indicating the lag to be returned
: type param : list
: return : the value of this feature
: rtype : float"""
|
if param is None :
param = [ { 'lag' : 3 } , { 'lag' : 5 } , { 'lag' : 6 } ]
_partialc = feature_calculators . partial_autocorrelation ( x , param )
logging . debug ( "partial autocorrelation by tsfresh calculated" )
return _partialc
|
def suffix ( args ) :
"""% prog suffix fastqfile CAG
Filter reads based on suffix ."""
|
p = OptionParser ( suffix . __doc__ )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
fastqfile , sf = args
fw = must_open ( opts . outfile , "w" )
nreads = nselected = 0
for rec in iter_fastq ( fastqfile ) :
nreads += 1
if rec is None :
break
if rec . seq . endswith ( sf ) :
print ( rec , file = fw )
nselected += 1
logging . debug ( "Selected reads with suffix {0}: {1}" . format ( sf , percentage ( nselected , nreads ) ) )
|
def is_parseable ( self ) :
"""See if URL target is parseable for recursion ."""
|
if self . is_directory ( ) :
return True
if self . content_type in self . ContentMimetypes :
return True
log . debug ( LOG_CHECK , "URL with content type %r is not parseable." , self . content_type )
return False
|
def _set_enhanced_voq_max_queue_depth ( self , v , load = False ) :
"""Setter method for enhanced _ voq _ max _ queue _ depth , mapped from YANG variable / telemetry / profile / enhanced _ voq _ max _ queue _ depth ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ enhanced _ voq _ max _ queue _ depth is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ enhanced _ voq _ max _ queue _ depth ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name" , enhanced_voq_max_queue_depth . enhanced_voq_max_queue_depth , yang_name = "enhanced-voq-max-queue-depth" , rest_name = "enhanced-voq-max-queue-depth" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'EnhancedVoqMaxQueueDepthProfile' , u'info' : u'Enhanced VOQ max queue depth' } } ) , is_container = 'list' , yang_name = "enhanced-voq-max-queue-depth" , rest_name = "enhanced-voq-max-queue-depth" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'cli-suppress-list-no' : None , u'callpoint' : u'EnhancedVoqMaxQueueDepthProfile' , u'info' : u'Enhanced VOQ max queue depth' } } , namespace = 'urn:brocade.com:mgmt:brocade-telemetry' , defining_module = 'brocade-telemetry' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """enhanced_voq_max_queue_depth must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""" , } )
self . __enhanced_voq_max_queue_depth = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def new ( cls , alias , cert ) :
"""Helper function to create a new TrustedCertEntry .
: param str alias : The alias for the Trusted Cert Entry
: param str certs : The certificate , as a byte string .
: returns : A loaded : class : ` TrustedCertEntry ` instance , ready
to be placed in a keystore ."""
|
timestamp = int ( time . time ( ) ) * 1000
tke = cls ( timestamp = timestamp , # Alias must be lower case or it will corrupt the keystore for Java Keytool and Keytool Explorer
alias = alias . lower ( ) , cert = cert )
return tke
|
def getDefaultUncertainty ( self , result = None ) :
"""Return the uncertainty value , if the result falls within
specified ranges for the service from which this analysis was derived ."""
|
if result is None :
result = self . getResult ( )
uncertainties = self . getUncertainties ( )
if uncertainties :
try :
res = float ( result )
except ( TypeError , ValueError ) : # if analysis result is not a number , then we assume in range
return None
for d in uncertainties :
_min = float ( d [ 'intercept_min' ] )
_max = float ( d [ 'intercept_max' ] )
if _min <= res and res <= _max :
if str ( d [ 'errorvalue' ] ) . strip ( ) . endswith ( '%' ) :
try :
percvalue = float ( d [ 'errorvalue' ] . replace ( '%' , '' ) )
except ValueError :
return None
uncertainty = res / 100 * percvalue
else :
uncertainty = float ( d [ 'errorvalue' ] )
return uncertainty
return None
|
def B3 ( formula ) :
"""Rewrite formula eval result into Boolean3
: param formula :
: return : Boolean3"""
|
if isinstance ( formula , true ) or formula is True or formula == Boolean3 . Top . name or formula == Boolean3 . Top . value :
return Boolean3 . Top
if isinstance ( formula , false ) or formula is False or formula == Boolean3 . Bottom . name or formula == Boolean3 . Bottom . value :
return Boolean3 . Bottom
else :
return Boolean3 . Unknown
|
def get_risk_models ( oqparam , kind = 'vulnerability vulnerability_retrofitted ' 'fragility consequence' ) :
""": param oqparam :
an OqParam instance
: param kind :
a space - separated string with the kinds of risk models to read
: returns :
a dictionary riskid - > loss _ type , kind - > function"""
|
kinds = kind . split ( )
rmodels = AccumDict ( )
for kind in kinds :
for key in sorted ( oqparam . inputs ) :
mo = re . match ( '(occupants|%s)_%s$' % ( COST_TYPE_REGEX , kind ) , key )
if mo :
loss_type = mo . group ( 1 )
# the cost _ type in the key
# can be occupants , structural , nonstructural , . . .
rmodel = nrml . to_python ( oqparam . inputs [ key ] )
if len ( rmodel ) == 0 :
raise InvalidFile ( '%s is empty!' % oqparam . inputs [ key ] )
rmodels [ loss_type , kind ] = rmodel
if rmodel . lossCategory is None : # NRML 0.4
continue
cost_type = str ( rmodel . lossCategory )
rmodel_kind = rmodel . __class__ . __name__
kind_ = kind . replace ( '_retrofitted' , '' )
# strip retrofitted
if not rmodel_kind . lower ( ) . startswith ( kind_ ) :
raise ValueError ( 'Error in the file "%s_file=%s": is ' 'of kind %s, expected %s' % ( key , oqparam . inputs [ key ] , rmodel_kind , kind . capitalize ( ) + 'Model' ) )
if cost_type != loss_type :
raise ValueError ( 'Error in the file "%s_file=%s": lossCategory is of ' 'type "%s", expected "%s"' % ( key , oqparam . inputs [ key ] , rmodel . lossCategory , loss_type ) )
rdict = AccumDict ( accum = { } )
rdict . limit_states = [ ]
for ( loss_type , kind ) , rm in sorted ( rmodels . items ( ) ) :
if kind == 'fragility' : # build a copy of the FragilityModel with different IM levels
newfm = rm . build ( oqparam . continuous_fragility_discretization , oqparam . steps_per_interval )
for ( imt , riskid ) , ffl in newfm . items ( ) :
if not rdict . limit_states :
rdict . limit_states . extend ( rm . limitStates )
# we are rejecting the case of loss types with different
# limit states ; this may change in the future
assert rdict . limit_states == rm . limitStates , ( rdict . limit_states , rm . limitStates )
rdict [ riskid ] [ loss_type , kind ] = ffl
# TODO : see if it is possible to remove the attribute
# below , used in classical _ damage
ffl . steps_per_interval = oqparam . steps_per_interval
elif kind == 'consequence' :
for riskid , cf in rm . items ( ) :
rdict [ riskid ] [ loss_type , kind ] = cf
else : # vulnerability
cl_risk = oqparam . calculation_mode in ( 'classical' , 'classical_risk' )
# only for classical _ risk reduce the loss _ ratios
# to make sure they are strictly increasing
for ( imt , riskid ) , rf in rm . items ( ) :
rdict [ riskid ] [ loss_type , kind ] = ( rf . strictly_increasing ( ) if cl_risk else rf )
return rdict
|
def select_with_index ( self , selector ) :
'''Transforms each element of a sequence into a new form , incorporating
the index of the element .
Each element is transformed through a selector function which accepts
the element value and its zero - based index in the source sequence . The
generated sequence is lazily evaluated .
Args :
selector : A two argument function mapping the index of a value in
the source sequence and the element value itself to the
corresponding value in the generated sequence . The two
arguments of the selector function ( which can have any names )
and its return value are ,
Args :
index : The zero - based index of the element
element : The value of the element
Returns :
The selected value derived from the index and element
Returns :
A generated sequence whose elements are the result of invoking the
selector function on each element of the source sequence'''
|
return self . _create ( self . _pool . imap_unordered ( star , zip ( itertools . repeat ( selector ) , enumerate ( iter ( self ) ) ) , self . _chunksize ) )
|
def to_wire_dict ( self ) :
"""Return a simplified transport object for logging and caching .
The transport object must contain these attributes :
- url _ data . valid : bool
Indicates if URL is valid
- url _ data . result : unicode
Result string
- url _ data . warnings : list of tuples ( tag , warning message )
List of tagged warnings for this URL .
- url _ data . name : unicode string or None
name of URL ( eg . filename or link name )
- url _ data . parent _ url : unicode or None
Parent URL
- url _ data . base _ ref : unicode
HTML base reference URL of parent
- url _ data . url : unicode
Fully qualified URL .
- url _ data . domain : unicode
URL domain part .
- url _ data . checktime : int
Number of seconds needed to check this link , default : zero .
- url _ data . dltime : int
Number of seconds needed to download URL content , default : - 1
- url _ data . size : int
Size of downloaded URL content , default : - 1
- url _ data . info : list of unicode
Additional information about this URL .
- url _ data . line : int
Line number of this URL at parent document , or - 1
- url _ data . column : int
Column number of this URL at parent document , or - 1
- url _ data . page : int
Page number of this URL at parent document , or - 1
- url _ data . cache _ url : unicode
Cache url for this URL .
- url _ data . content _ type : unicode
MIME content type for URL content .
- url _ data . level : int
Recursion level until reaching this URL from start URL
- url _ data . last _ modified : datetime
Last modification date of retrieved page ( or None ) ."""
|
return dict ( valid = self . valid , extern = self . extern [ 0 ] , result = self . result , warnings = self . warnings [ : ] , name = self . name or u"" , title = self . get_title ( ) , parent_url = self . parent_url or u"" , base_ref = self . base_ref or u"" , base_url = self . base_url or u"" , url = self . url or u"" , domain = ( self . urlparts [ 1 ] if self . urlparts else u"" ) , checktime = self . checktime , dltime = self . dltime , size = self . size , info = self . info , line = self . line , column = self . column , page = self . page , cache_url = self . cache_url , content_type = self . content_type , level = self . recursion_level , modified = self . modified , )
|
def set_LObj ( self , LObj = None ) :
"""Set the LObj attribute , storing objects the instance depends on
For example :
A Detect object depends on a vessel and some apertures
That link between should be stored somewhere ( for saving / loading ) .
LObj does this : it stores the ID ( as dict ) of all objects depended on .
Parameters
LObj : None / dict / : class : ` ~ tofu . pathfile . ID ` / list of such
Provide either :
- A dict ( derived from : meth : ` ~ tofu . pathfile . ID . _ todict ` )
- A : class : ` ~ tofu . pathfile . ID ` instance
- A list of dict or : class : ` ~ tofu . pathfile . ID ` instances"""
|
self . _LObj = { }
if LObj is not None :
if type ( LObj ) is not list :
LObj = [ LObj ]
for ii in range ( 0 , len ( LObj ) ) :
if type ( LObj [ ii ] ) is ID :
LObj [ ii ] = LObj [ ii ] . _todict ( )
ClsU = list ( set ( [ oo [ 'Cls' ] for oo in LObj ] ) )
for c in ClsU :
self . _LObj [ c ] = [ oo for oo in LObj if oo [ 'Cls' ] == c ]
|
def _compute_next_evaluations ( self , pending_zipped_X = None , ignored_zipped_X = None ) :
"""Computes the location of the new evaluation ( optimizes the acquisition in the standard case ) .
: param pending _ zipped _ X : matrix of input configurations that are in a pending state ( i . e . , do not have an evaluation yet ) .
: param ignored _ zipped _ X : matrix of input configurations that the user black - lists , i . e . , those configurations will not be suggested again .
: return :"""
|
# # - - - Update the context if any
self . acquisition . optimizer . context_manager = ContextManager ( self . space , self . context )
# # # - - - Activate de _ duplication
if self . de_duplication :
duplicate_manager = DuplicateManager ( space = self . space , zipped_X = self . X , pending_zipped_X = pending_zipped_X , ignored_zipped_X = ignored_zipped_X )
else :
duplicate_manager = None
# # # We zip the value in case there are categorical variables
return self . space . zip_inputs ( self . evaluator . compute_batch ( duplicate_manager = duplicate_manager , context_manager = self . acquisition . optimizer . context_manager ) )
|
def dump ( destination , xs , model = None , properties = False , indent = True , ** kwargs ) :
"""Serialize Xmrs ( or subclass ) objects to PENMAN and write to a file .
Args :
destination : filename or file object
xs : iterator of : class : ` ~ delphin . mrs . xmrs . Xmrs ` objects to
serialize
model : Xmrs subclass used to get triples
properties : if ` True ` , encode variable properties
indent : if ` True ` , adaptively indent ; if ` False ` or ` None ` ,
don ' t indent ; if a non - negative integer N , indent N spaces
per level"""
|
text = dumps ( xs , model = model , properties = properties , indent = indent , ** kwargs )
if hasattr ( destination , 'write' ) :
print ( text , file = destination )
else :
with open ( destination , 'w' ) as fh :
print ( text , file = fh )
|
def resolve_source_mapping ( source_directory : str , output_directory : str , sources : Sources ) -> Mapping [ str , str ] :
"""Returns a mapping from absolute source path to absolute output path as specified
by the sources object . Files are not guaranteed to exist ."""
|
result = { os . path . join ( source_directory , source_file ) : os . path . join ( output_directory , output_file ) for source_file , output_file in sources . files . items ( ) }
filesystem = get_filesystem ( )
for glob in sources . globs :
matches = filesystem . list ( source_directory , glob . patterns , exclude = glob . exclude )
result . update ( { os . path . join ( source_directory , match ) : os . path . join ( output_directory , match ) for match in matches } )
return result
|
def overlap ( self , max_hang = 100 ) :
"""Determine the type of overlap given query , ref alignment coordinates
Consider the following alignment between sequence a and b :
aLhang \ / aRhang
/ - - - - - bLhang / \ bRhang
Terminal overlap : a before b , b before a
Contain overlap : a in b , b in a"""
|
aL , aR = 1 , self . reflen
bL , bR = 1 , self . querylen
aLhang , aRhang = self . start1 - aL , aR - self . end1
bLhang , bRhang = self . start2 - bL , bR - self . end2
if self . orientation == '-' :
bLhang , bRhang = bRhang , bLhang
s1 = aLhang + bRhang
s2 = aRhang + bLhang
s3 = aLhang + aRhang
s4 = bLhang + bRhang
# Dovetail ( terminal ) overlap
if s1 < max_hang :
type = 2
# b ~ a
elif s2 < max_hang :
type = 1
# a ~ b
# Containment overlap
elif s3 < max_hang :
type = 3
# a in b
elif s4 < max_hang :
type = 4
# b in a
else :
type = 0
return type
|
def _handle_produce_response ( self , node_id , send_time , batches , response ) :
"""Handle a produce response ."""
|
# if we have a response , parse it
log . debug ( 'Parsing produce response: %r' , response )
if response :
batches_by_partition = dict ( [ ( batch . topic_partition , batch ) for batch in batches ] )
for topic , partitions in response . topics :
for partition_info in partitions :
if response . API_VERSION < 2 :
partition , error_code , offset = partition_info
ts = None
else :
partition , error_code , offset , ts = partition_info
tp = TopicPartition ( topic , partition )
error = Errors . for_code ( error_code )
batch = batches_by_partition [ tp ]
self . _complete_batch ( batch , error , offset , ts )
if response . API_VERSION > 0 :
self . _sensors . record_throttle_time ( response . throttle_time_ms , node = node_id )
else : # this is the acks = 0 case , just complete all requests
for batch in batches :
self . _complete_batch ( batch , None , - 1 , None )
|
def h_v_t ( header , key ) :
"""get header value with title
try to get key from header and consider case sensitive
e . g . header [ ' x - log - abc ' ] or header [ ' X - Log - Abc ' ]
: param header :
: param key :
: return :"""
|
if key not in header :
key = key . title ( )
if key not in header :
raise ValueError ( "Unexpected header in response, missing: " + key + " headers:\n" + str ( header ) )
return header [ key ]
|
def main ( ) :
"""NAME
angle . py
DESCRIPTION
calculates angle between two input directions D1 , D2
INPUT ( COMMAND LINE ENTRY )
D1 _ dec D1 _ inc D1 _ dec D2 _ inc
OUTPUT
angle
SYNTAX
angle . py [ - h ] [ - i ] [ command line options ] [ < filename ]
OPTIONS
- h prints help and quits
- i for interactive data entry
- f FILE input filename
- F FILE output filename ( required if - F set )
Standard I / O"""
|
out = ""
if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
if '-F' in sys . argv :
ind = sys . argv . index ( '-F' )
o = sys . argv [ ind + 1 ]
out = open ( o , 'w' )
if '-i' in sys . argv :
cont = 1
while cont == 1 :
dir1 , dir2 = [ ] , [ ]
try :
ans = input ( 'Declination 1: [ctrl-D to quit] ' )
dir1 . append ( float ( ans ) )
ans = input ( 'Inclination 1: ' )
dir1 . append ( float ( ans ) )
ans = input ( 'Declination 2: ' )
dir2 . append ( float ( ans ) )
ans = input ( 'Inclination 2: ' )
dir2 . append ( float ( ans ) )
except :
print ( "\nGood bye\n" )
sys . exit ( )
# send dirs to angle and spit out result
ang = pmag . angle ( dir1 , dir2 )
print ( '%7.1f ' % ( ang ) )
elif '-f' in sys . argv :
ind = sys . argv . index ( '-f' )
file = sys . argv [ ind + 1 ]
file_input = numpy . loadtxt ( file )
else : # read from standard input
file_input = numpy . loadtxt ( sys . stdin . readlines ( ) , dtype = numpy . float )
if len ( file_input . shape ) > 1 : # list of directions
dir1 , dir2 = file_input [ : , 0 : 2 ] , file_input [ : , 2 : ]
else :
dir1 , dir2 = file_input [ 0 : 2 ] , file_input [ 2 : ]
angs = pmag . angle ( dir1 , dir2 )
for ang in angs : # read in the data ( as string variable ) , line by line
print ( '%7.1f' % ( ang ) )
if out != "" :
out . write ( '%7.1f \n' % ( ang ) )
if out :
out . close ( )
|
def softDeactivate ( rh ) :
"""Deactivate a virtual machine by first shutting down Linux and
then log it off .
Input :
Request Handle with the following properties :
function - ' POWERVM '
subfunction - ' SOFTOFF '
userid - userid of the virtual machine
parms [ ' maxQueries ' ] - Maximum number of queries to issue .
Optional .
parms [ ' maxWait ' ] - Maximum time to wait in seconds .
Optional ,
unless ' maxQueries ' is specified .
parms [ ' poll ' ] - Polling interval in seconds . Optional ,
unless ' maxQueries ' is specified .
Output :
Request Handle updated with the results .
Return code - 0 : ok , non - zero : error"""
|
rh . printSysLog ( "Enter powerVM.softDeactivate, userid: " + rh . userid )
strCmd = "echo 'ping'"
iucvResults = execCmdThruIUCV ( rh , rh . userid , strCmd )
if iucvResults [ 'overallRC' ] == 0 : # We could talk to the machine , tell it to shutdown nicely .
strCmd = "shutdown -h now"
iucvResults = execCmdThruIUCV ( rh , rh . userid , strCmd )
if iucvResults [ 'overallRC' ] == 0 :
time . sleep ( 15 )
else : # Shutdown failed . Let CP take down the system
# after we log the results .
rh . printSysLog ( "powerVM.softDeactivate " + rh . userid + " is unreachable. Treating it as already shutdown." )
else : # Could not ping the machine . Treat it as a success
# after we log the results .
rh . printSysLog ( "powerVM.softDeactivate " + rh . userid + " is unreachable. Treating it as already shutdown." )
# Tell z / VM to log off the system .
parms = [ "-T" , rh . userid ]
smcliResults = invokeSMCLI ( rh , "Image_Deactivate" , parms )
if smcliResults [ 'overallRC' ] == 0 :
pass
elif ( smcliResults [ 'overallRC' ] == 8 and smcliResults [ 'rc' ] == 200 and ( smcliResults [ 'rs' ] == 12 or + smcliResults [ 'rs' ] == 16 ) ) : # Tolerable error .
# Machine is already logged off or is logging off .
rh . printLn ( "N" , rh . userid + " is already logged off." )
else : # SMAPI API failed .
rh . printLn ( "ES" , smcliResults [ 'response' ] )
rh . updateResults ( smcliResults )
# Use results from invokeSMCLI
if rh . results [ 'overallRC' ] == 0 and 'maxQueries' in rh . parms : # Wait for the system to log off .
waitResults = waitForVMState ( rh , rh . userid , 'off' , maxQueries = rh . parms [ 'maxQueries' ] , sleepSecs = rh . parms [ 'poll' ] )
if waitResults [ 'overallRC' ] == 0 :
rh . printLn ( "N" , "Userid '" + rh . userid + " is in the desired state: off" )
else :
rh . updateResults ( waitResults )
rh . printSysLog ( "Exit powerVM.softDeactivate, rc: " + str ( rh . results [ 'overallRC' ] ) )
return rh . results [ 'overallRC' ]
|
def get_n_cluster_in_events ( event_numbers ) :
'''Calculates the number of cluster in every given event .
An external C + + library is used since there is no sufficient solution in python possible .
Because of np . bincount # BUG # 225 for values > int32 and the different handling under 32/64 bit operating systems .
Parameters
event _ numbers : numpy . array
List of event numbers to be checked .
Returns
numpy . array
First dimension is the event number .
Second dimension is the number of cluster of the event .'''
|
logging . debug ( "Calculate the number of cluster in every given event" )
event_numbers = np . ascontiguousarray ( event_numbers )
# change memory alignement for c + + library
result_event_numbers = np . empty_like ( event_numbers )
result_count = np . empty_like ( event_numbers , dtype = np . uint32 )
result_size = analysis_functions . get_n_cluster_in_events ( event_numbers , result_event_numbers , result_count )
return np . vstack ( ( result_event_numbers [ : result_size ] , result_count [ : result_size ] ) ) . T
|
def train ( args , params ) :
'''Train model'''
|
x_train , y_train , x_test , y_test = load_mnist_data ( args )
model = create_mnist_model ( params )
# nni
model . fit ( x_train , y_train , batch_size = args . batch_size , epochs = args . epochs , verbose = 1 , validation_data = ( x_test , y_test ) , callbacks = [ SendMetrics ( ) , TensorBoard ( log_dir = TENSORBOARD_DIR ) ] )
_ , acc = model . evaluate ( x_test , y_test , verbose = 0 )
LOG . debug ( 'Final result is: %d' , acc )
nni . report_final_result ( acc )
|
def build_instance_name ( inst , obj = None ) :
"""Return an instance name from an instance , and set instance . path"""
|
if obj is None :
for _ in inst . properties . values ( ) :
inst . path . keybindings . __setitem__ ( _ . name , _ . value )
return inst . path
if not isinstance ( obj , list ) :
return build_instance_name ( inst , get_keys_from_class ( obj ) )
keys = { }
for _ in obj :
if _ not in inst . properties :
raise pywbem . CIMError ( pywbem . CIM_ERR_FAILED , "Instance of %s is missing key property %s" % ( inst . classname , _ ) )
keys [ _ ] = inst [ _ ]
inst . path = pywbem . CIMInstanceName ( classname = inst . classname , keybindings = keys , namespace = inst . path . namespace , host = inst . path . host )
return inst . path
|
def reset_point_source_cache ( self , bool = True ) :
"""deletes all the cache in the point source class and saves it from then on
: return :"""
|
for imageModel in self . _imageModel_list :
imageModel . reset_point_source_cache ( bool = bool )
|
def main ( ) :
"""NAME
basemap _ magic . py
NB : this program no longer maintained - use plot _ map _ pts . py for greater functionality
DESCRIPTION
makes a map of locations in er _ sites . txt
SYNTAX
basemap _ magic . py [ command line options ]
OPTIONS
- h prints help message and quits
- f SFILE , specify er _ sites . txt or pmag _ results . txt format file
- res [ c , l , i , h ] specify resolution ( crude , low , intermediate , high )
- etp plot the etopo20 topographic mesh
- pad [ LAT LON ] pad bounding box by LAT / LON ( default is [ . 5 . 5 ] degrees )
- grd SPACE specify grid spacing
- prj [ lcc ] , specify projection ( lcc = lambert conic conformable ) , default is mercator
- n print site names ( default is not )
- l print location names ( default is not )
- o color ocean blue / land green ( default is not )
- R don ' t plot details of rivers
- B don ' t plot national / state boundaries , etc .
- sav save plot and quit quietly
- fmt [ png , svg , eps , jpg , pdf ] specify format for output , default is pdf
DEFAULTS
SFILE : ' er _ sites . txt '
resolution : intermediate
saved images are in pdf"""
|
dir_path = '.'
sites_file = 'er_sites.txt'
ocean = 0
res = 'i'
proj = 'merc'
prn_name = 0
prn_loc = 0
fancy = 0
rivers , boundaries = 0 , 0
padlon , padlat , gridspace , details = .5 , .5 , .5 , 1
fmt = 'pdf'
if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
if '-f' in sys . argv :
ind = sys . argv . index ( '-f' )
sites_file = sys . argv [ ind + 1 ]
if '-res' in sys . argv :
ind = sys . argv . index ( '-res' )
res = sys . argv [ ind + 1 ]
if '-etp' in sys . argv :
fancy = 1
if '-n' in sys . argv :
prn_name = 1
if '-l' in sys . argv :
prn_loc = 1
if '-o' in sys . argv :
ocean = 1
if '-R' in sys . argv :
rivers = 0
if '-B' in sys . argv :
boundaries = 0
if '-prj' in sys . argv :
ind = sys . argv . index ( '-prj' )
proj = sys . argv [ ind + 1 ]
if '-fmt' in sys . argv :
ind = sys . argv . index ( '-fmt' )
fmt = sys . argv [ ind + 1 ]
verbose = pmagplotlib . verbose
if '-sav' in sys . argv :
verbose = 0
if '-pad' in sys . argv :
ind = sys . argv . index ( '-pad' )
padlat = float ( sys . argv [ ind + 1 ] )
padlon = float ( sys . argv [ ind + 2 ] )
if '-grd' in sys . argv :
ind = sys . argv . index ( '-grd' )
gridspace = float ( sys . argv [ ind + 1 ] )
if '-WD' in sys . argv :
ind = sys . argv . index ( '-WD' )
dir_path = sys . argv [ ind + 1 ]
sites_file = dir_path + '/' + sites_file
location = ""
FIG = { 'map' : 1 }
pmagplotlib . plot_init ( FIG [ 'map' ] , 6 , 6 )
# read in er _ sites file
Sites , file_type = pmag . magic_read ( sites_file )
if 'results' in file_type :
latkey = 'average_lat'
lonkey = 'average_lon'
namekey = 'pmag_result_name'
lockey = 'er_location_names'
else :
latkey = 'site_lat'
lonkey = 'site_lon'
namekey = 'er_site_name'
lockey = 'er_location_name'
lats , lons = [ ] , [ ]
slats , slons = [ ] , [ ]
names , locs = [ ] , [ ]
for site in Sites :
if prn_loc == 1 and location == "" :
location = site [ 'er_location_name' ]
lats . append ( float ( site [ latkey ] ) )
l = float ( site [ lonkey ] )
if l < 0 :
l = l + 360.
# make positive
lons . append ( l )
if prn_name == 1 :
names . append ( site [ namekey ] )
if prn_loc == 1 :
locs . append ( site [ lockey ] )
for lat in lats :
slats . append ( lat )
for lon in lons :
slons . append ( lon )
Opts = { 'res' : res , 'proj' : proj , 'loc_name' : locs , 'padlon' : padlon , 'padlat' : padlat , 'latmin' : numpy . min ( slats ) - padlat , 'latmax' : numpy . max ( slats ) + padlat , 'lonmin' : numpy . min ( slons ) - padlon , 'lonmax' : numpy . max ( slons ) + padlon , 'sym' : 'ro' , 'boundinglat' : 0. , 'pltgrid' : 1. }
Opts [ 'lon_0' ] = 0.5 * ( numpy . min ( slons ) + numpy . max ( slons ) )
Opts [ 'lat_0' ] = 0.5 * ( numpy . min ( slats ) + numpy . max ( slats ) )
Opts [ 'names' ] = names
Opts [ 'gridspace' ] = gridspace
Opts [ 'details' ] = { 'coasts' : 1 , 'rivers' : 1 , 'states' : 1 , 'countries' : 1 , 'ocean' : 0 }
if ocean == 1 :
Opts [ 'details' ] [ 'ocean' ] = 1
if rivers == 1 :
Opts [ 'details' ] [ 'rivers' ] = 0
if boundaries == 1 :
Opts [ 'details' ] [ 'states' ] = 0
Opts [ 'details' ] [ 'countries' ] = 0
Opts [ 'details' ] [ 'fancy' ] = fancy
pmagplotlib . plot_map ( FIG [ 'map' ] , lats , lons , Opts )
if verbose :
pmagplotlib . draw_figs ( FIG )
files = { }
for key in list ( FIG . keys ( ) ) :
files [ key ] = 'Site_map' + '.' + fmt
if pmagplotlib . isServer :
black = '#000000'
purple = '#800080'
titles = { }
titles [ 'map' ] = 'Site Map'
FIG = pmagplotlib . add_borders ( FIG , titles , black , purple )
pmagplotlib . save_plots ( FIG , files )
elif verbose :
ans = input ( " S[a]ve to save plot, Return to quit: " )
if ans == "a" :
pmagplotlib . save_plots ( FIG , files )
else :
pmagplotlib . save_plots ( FIG , files )
|
def to_dict ( self ) :
'''Return a dict of the attributes .'''
|
return dict ( raw = self . raw , scheme = self . scheme , authority = self . authority , netloc = self . authority , path = self . path , query = self . query , fragment = self . fragment , userinfo = self . userinfo , username = self . username , password = self . password , host = self . host , hostname = self . hostname , port = self . port , resource = self . resource , url = self . url , encoding = self . encoding , )
|
def _make_unknown_name ( self , cursor ) :
'''Creates a name for unname type'''
|
parent = cursor . lexical_parent
pname = self . get_unique_name ( parent )
log . debug ( '_make_unknown_name: Got parent get_unique_name %s' , pname )
# we only look at types declarations
_cursor_decl = cursor . type . get_declaration ( )
# we had the field index from the parent record , as to differenciate
# between unnamed siblings of a same struct
_i = 0
found = False
# Look at the parent fields to find myself
for m in parent . get_children ( ) : # FIXME : make the good indices for fields
log . debug ( '_make_unknown_name child %d %s %s %s' , _i , m . kind , m . type . kind , m . location )
if m . kind not in [ CursorKind . STRUCT_DECL , CursorKind . UNION_DECL , CursorKind . CLASS_DECL ] : # CursorKind . FIELD _ DECL ] :
continue
if m == _cursor_decl :
found = True
break
_i += 1
if not found :
raise NotImplementedError ( "_make_unknown_name BUG %s" % cursor . location )
# truncate parent name to remove the first part ( union or struct )
_premainer = '_' . join ( pname . split ( '_' ) [ 1 : ] )
name = '%s_%d' % ( _premainer , _i )
return name
|
def get_embedded_object ( self , signature_id ) :
'''Retrieves a embedded signing object
Retrieves an embedded object containing a signature url that can be opened in an iFrame .
Args :
signature _ id ( str ) : The id of the signature to get a signature url for
Returns :
An Embedded object'''
|
request = self . _get_request ( )
return request . get ( self . EMBEDDED_OBJECT_GET_URL + signature_id )
|
def steal_page ( self , page ) :
"""Steal a page from another document"""
|
if page . doc == self :
return
self . fs . mkdir_p ( self . path )
new_page = ImgPage ( self , self . nb_pages )
logger . info ( "%s --> %s" % ( str ( page ) , str ( new_page ) ) )
new_page . _steal_content ( page )
|
def record ( self , action , props = None , path = KISSmetrics . RECORD_PATH , resp = False ) :
"""Record event for identity with any properties .
: param action : event performed
: param props : any additional data to include
: type props : dict
: param resp : indicate whether to return response
: type resp : boolean
: returns : an HTTP response for request if ` resp = True `
: rtype : ` urllib3 . response . HTTPResponse `
: raises : Exception if either ` identity ` or ` key ` not set"""
|
self . check_id_key ( )
timestamp = None
if not props :
props = { }
response = self . client . record ( person = self . identity , event = action , properties = props , timestamp = timestamp , path = path )
if resp :
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.