signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def set_register ( self , addr , data ) :
"""Sets an arbitrary register at @ addr and subsequent registers depending
on how much data you decide to write . It will automatically fill extra
bytes with zeros . You cannot write more than 14 bytes at a time .
@ addr should be a static constant from the Addr class , e . g . Addr . Speed""" | assert len ( data ) <= 14 , "Cannot write more than 14 bytes at a time"
cmd = chr ( addr ) + chr ( len ( data ) | 0x80 )
for byte in data :
cmd += chr ( cast_to_byte ( byte ) )
cmd += ( 16 - len ( cmd ) ) * chr ( 0 )
self . _dev . send_bytes ( cmd ) |
def unroll ( self , length , inputs , begin_state = None , layout = 'NTC' , merge_outputs = None , valid_length = None ) :
"""Unrolls an RNN cell across time steps .
Parameters
length : int
Number of steps to unroll .
inputs : Symbol , list of Symbol , or None
If ` inputs ` is a single Symbol ( usually the output
of Embedding symbol ) , it should have shape
( batch _ size , length , . . . ) if ` layout ` is ' NTC ' ,
or ( length , batch _ size , . . . ) if ` layout ` is ' TNC ' .
If ` inputs ` is a list of symbols ( usually output of
previous unroll ) , they should all have shape
( batch _ size , . . . ) .
begin _ state : nested list of Symbol , optional
Input states created by ` begin _ state ( ) `
or output state of another cell .
Created from ` begin _ state ( ) ` if ` None ` .
layout : str , optional
` layout ` of input symbol . Only used if inputs
is a single Symbol .
merge _ outputs : bool , optional
If ` False ` , returns outputs as a list of Symbols .
If ` True ` , concatenates output across time steps
and returns a single symbol with shape
( batch _ size , length , . . . ) if layout is ' NTC ' ,
or ( length , batch _ size , . . . ) if layout is ' TNC ' .
If ` None ` , output whatever is faster .
valid _ length : Symbol , NDArray or None
` valid _ length ` specifies the length of the sequences in the batch without padding .
This option is especially useful for building sequence - to - sequence models where
the input and output sequences would potentially be padded .
If ` valid _ length ` is None , all sequences are assumed to have the same length .
If ` valid _ length ` is a Symbol or NDArray , it should have shape ( batch _ size , ) .
The ith element will be the length of the ith sequence in the batch .
The last valid state will be return and the padded outputs will be masked with 0.
Note that ` valid _ length ` must be smaller or equal to ` length ` .
Returns
outputs : list of Symbol or Symbol
Symbol ( if ` merge _ outputs ` is True ) or list of Symbols
( if ` merge _ outputs ` is False ) corresponding to the output from
the RNN from this unrolling .
states : list of Symbol
The new state of this RNN after this unrolling .
The type of this symbol is same as the output of ` begin _ state ( ) ` .""" | # pylint : disable = too - many - locals
self . reset ( )
inputs , axis , F , batch_size = _format_sequence ( length , inputs , layout , False )
begin_state = _get_begin_state ( self , F , begin_state , inputs , batch_size )
states = begin_state
outputs = [ ]
all_states = [ ]
for i in range ( length ) :
output , states = self ( inputs [ i ] , states )
outputs . append ( output )
if valid_length is not None :
all_states . append ( states )
if valid_length is not None :
states = [ F . SequenceLast ( F . stack ( * ele_list , axis = 0 ) , sequence_length = valid_length , use_sequence_length = True , axis = 0 ) for ele_list in zip ( * all_states ) ]
outputs = _mask_sequence_variable_length ( F , outputs , length , valid_length , axis , True )
outputs , _ , _ , _ = _format_sequence ( length , outputs , layout , merge_outputs )
return outputs , states |
def is_known_scalar ( value ) :
"""Return True if value is a type we expect in a dataframe""" | def _is_datetime_or_timedelta ( value ) : # Using pandas . Series helps catch python , numpy and pandas
# versions of these types
return pd . Series ( value ) . dtype . kind in ( 'M' , 'm' )
return not np . iterable ( value ) and ( isinstance ( value , numbers . Number ) or _is_datetime_or_timedelta ( value ) ) |
def date_time_this_month ( ) :
"""获取当前月的随机时间
: return :
* date _ this _ month : ( datetime ) 当前月份的随机时间
举例如下 : :
print ( ' - - - GetRandomTime . date _ time _ this _ month demo - - - ' )
print ( GetRandomTime . date _ time _ this _ month ( ) )
print ( ' - - - ' )
执行结果 : :
- - - GetRandomTime . date _ time _ this _ month demo demo - - -
2018-07-01 12:47:20""" | now = datetime . now ( )
this_month_start = now . replace ( day = 1 , hour = 0 , minute = 0 , second = 0 , microsecond = 0 )
this_month_days = calendar . monthrange ( now . year , now . month )
random_seconds = random . randint ( 0 , this_month_days [ 1 ] * A_DAY_SECONDS )
return this_month_start + timedelta ( seconds = random_seconds ) |
def _apply_subtotals ( self , res , include_transforms_for_dims ) :
"""* Insert subtotals ( and perhaps other insertions later ) for
dimensions having their apparent dimension - idx in
* include _ transforms _ for _ dims * .""" | if not include_transforms_for_dims :
return res
suppressed_dim_count = 0
for ( dim_idx , dim ) in enumerate ( self . _all_dimensions ) :
if dim . dimension_type == DT . MR_CAT :
suppressed_dim_count += 1
# - - - only marginable dimensions can be subtotaled - - -
if not dim . is_marginable :
continue
apparent_dim_idx = dim_idx - suppressed_dim_count
transform = ( dim . has_transforms and apparent_dim_idx in include_transforms_for_dims )
if not transform :
continue
# - - - insert subtotals into result array - - -
insertions = self . _insertions ( res , dim , dim_idx )
res = self . _update_result ( res , insertions , dim_idx )
return res |
def read_tsv ( cls , file_path : str , gene_table : ExpGeneTable = None , encoding : str = 'UTF-8' , sep : str = '\t' ) :
"""Read expression matrix from a tab - delimited text file .
Parameters
file _ path : str
The path of the text file .
gene _ table : ` ExpGeneTable ` object , optional
The set of valid genes . If given , the genes in the text file will
be filtered against this set of genes . ( None )
encoding : str , optional
The file encoding . ( " UTF - 8 " )
sep : str , optional
The separator . ( " \t " )
Returns
` ExpMatrix `
The expression matrix .""" | # use pd . read _ csv to parse the tsv file into a DataFrame
matrix = cls ( pd . read_csv ( file_path , sep = sep , index_col = 0 , header = 0 , encoding = encoding ) )
# parse index column separately
# ( this seems to be the only way we can prevent pandas from converting
# " nan " or " NaN " to floats in the index ) [ ' 1 _ cell _ 306.120 ' , ' 1 _ cell _ 086.024 ' , ' 1 _ cell _ 168.103 ' ]
# ind = pd . read _ csv ( file _ path , sep = sep , usecols = [ 0 , ] , header = 0,
# encoding = encoding , na _ filter = False )
ind = pd . read_csv ( file_path , sep = sep , usecols = [ 0 , ] , header = None , skiprows = 1 , encoding = encoding , na_filter = False )
matrix . index = ind . iloc [ : , 0 ]
matrix . index . name = 'Genes'
if gene_table is not None : # filter genes
matrix = matrix . filter_genes ( gene_table . gene_names )
return matrix |
def get_html ( self ) :
"""Generates if need be and returns a simpler html document with text""" | if self . __htmltree is not None :
return self . __htmltree
else :
self . __make_tree ( )
return self . __htmltree |
def wait_for_processes ( die_on_failure : bool = True , timeout_sec : float = 1 ) -> None :
"""Wait for child processes ( catalogued in : data : ` processes ` ) to finish .
If ` ` die _ on _ failure ` ` is ` ` True ` ` , then whenever a subprocess returns
failure , all are killed .
If ` ` timeout _ sec ` ` is None , the function waits for its first process to
complete , then waits for the second , etc . So a subprocess dying does not
trigger a full quit instantly ( or potentially for ages ) .
If ` ` timeout _ sec ` ` is something else , each process is tried for that time ;
if it quits within that time , well and good ( successful quit - > continue
waiting for the others ; failure - > kill everything , if ` ` die _ on _ failure ` ` ) ;
if it doesn ' t , we try the next . That is much more responsive .""" | global processes
global proc_args_list
n = len ( processes )
Pool ( n ) . map ( print_lines , processes )
# in case of PIPE
something_running = True
while something_running :
something_running = False
for i , p in enumerate ( processes ) :
try :
retcode = p . wait ( timeout = timeout_sec )
if retcode == 0 :
log . info ( "Process #{} (of {}) exited cleanly" , i , n )
if retcode != 0 :
log . critical ( "Process #{} (of {}) exited with return code {} " "(indicating failure); its args were: {!r}" , i , n , retcode , proc_args_list [ i ] )
if die_on_failure :
log . critical ( "Exiting top-level process (will kill " "all other children)" )
fail ( )
# exit this process , therefore kill its children # noqa
except TimeoutExpired :
something_running = True
processes . clear ( )
proc_args_list . clear ( ) |
def WriteSessionCompletion ( self , aborted = False ) :
"""Writes session completion information .
Args :
aborted ( Optional [ bool ] ) : True if the session was aborted .
Raises :
IOError : if the storage type is not supported or
when the storage writer is closed .
OSError : if the storage type is not supported or
when the storage writer is closed .""" | self . _RaiseIfNotWritable ( )
if self . _storage_type != definitions . STORAGE_TYPE_SESSION :
raise IOError ( 'Unsupported storage type.' )
self . _session . aborted = aborted
session_completion = self . _session . CreateSessionCompletion ( )
self . _storage_file . WriteSessionCompletion ( session_completion ) |
def get_package_hashes ( filename ) :
"""Provides hash of given filename .
Args :
filename ( str ) : Name of file to hash
Returns :
( str ) : sha256 hash""" | log . debug ( 'Getting package hashes' )
filename = os . path . abspath ( filename )
with open ( filename , 'rb' ) as f :
data = f . read ( )
_hash = hashlib . sha256 ( data ) . hexdigest ( )
log . debug ( 'Hash for file %s: %s' , filename , _hash )
return _hash |
def packexe ( exefile , srcdir ) :
"""Pack the files in srcdir into exefile using 7z .
Requires that stub files are available in checkouts / stubs""" | exefile = cygpath ( os . path . abspath ( exefile ) )
appbundle = exefile + ".app.7z"
# Make sure that appbundle doesn ' t already exist
# We don ' t want to risk appending to an existing file
if os . path . exists ( appbundle ) :
raise OSError ( "%s already exists" % appbundle )
files = os . listdir ( srcdir )
SEVENZIP_ARGS = [ '-r' , '-t7z' , '-mx' , '-m0=BCJ2' , '-m1=LZMA:d27' , '-m2=LZMA:d19:mf=bt2' , '-m3=LZMA:d19:mf=bt2' , '-mb0:1' , '-mb0s1:2' , '-mb0s2:3' , '-m1fb=128' , '-m1lc=4' ]
# First , compress with 7z
stdout = tempfile . TemporaryFile ( )
try :
check_call ( [ SEVENZIP , 'a' ] + SEVENZIP_ARGS + [ appbundle ] + files , cwd = srcdir , stdout = stdout , preexec_fn = _noumask )
except Exception :
stdout . seek ( 0 )
data = stdout . read ( )
log . error ( data )
log . exception ( "Error packing exe %s from %s" , exefile , srcdir )
raise
stdout . close ( )
# Then prepend our stubs onto the compressed 7z data
o = open ( exefile , "wb" )
parts = [ 'checkouts/stubs/7z/7zSD.sfx.compressed' , 'checkouts/stubs/tagfile/app.tag' , appbundle ]
for part in parts :
i = open ( part )
while True :
block = i . read ( 4096 )
if not block :
break
o . write ( block )
i . close ( )
o . close ( )
os . unlink ( appbundle ) |
def _ssweek_to_gregorian ( ssweek_year , ssweek_week , ssweek_day ) :
"Gregorian calendar date for the given Sundaystarting - week year , week and day" | year_start = _ssweek_year_start ( ssweek_year )
return year_start + dt . timedelta ( days = ssweek_day - 1 , weeks = ssweek_week - 1 ) |
def _read_returned_msg ( self , method_frame ) :
'''Support method to read a returned ( basic . return ) Message from the
current frame buffer . Will return a Message with return _ info , or
re - queue current frames and raise a FrameUnderflow .
: returns : Message with the return _ info attribute set , where return _ info
is a dict with the following properties :
' channel ' : Channel instance
' reply _ code ' : reply code ( int )
' reply _ text ' : reply text
' exchange ' : exchange name
' routing _ key ' : routing key''' | header_frame , body = self . _reap_msg_frames ( method_frame )
return_info = { 'channel' : self . channel , 'reply_code' : method_frame . args . read_short ( ) , 'reply_text' : method_frame . args . read_shortstr ( ) , 'exchange' : method_frame . args . read_shortstr ( ) , 'routing_key' : method_frame . args . read_shortstr ( ) }
return Message ( body = body , return_info = return_info , ** header_frame . properties ) |
def get_wallet_transactions ( wallet_name , api_key , coin_symbol = 'btc' , before_bh = None , after_bh = None , txn_limit = None , omit_addresses = False , unspent_only = False , show_confidence = False , confirmations = 0 ) :
'''Takes a wallet , api _ key , coin _ symbol and returns the wallet ' s details
Optional :
- txn _ limit : # transactions to include
- before _ bh : filters response to only include transactions below before
height in the blockchain .
- after _ bh : filters response to only include transactions above after
height in the blockchain .
- confirmations : returns the balance and TXRefs that have this number
of confirmations
- unspent _ only : filters response to only include unspent TXRefs .
- show _ confidence : adds confidence information to unconfirmed TXRefs .''' | assert len ( wallet_name ) <= 25 , wallet_name
assert api_key
assert is_valid_coin_symbol ( coin_symbol = coin_symbol )
assert isinstance ( show_confidence , bool ) , show_confidence
assert isinstance ( omit_addresses , bool ) , omit_addresses
url = make_url ( coin_symbol , ** dict ( addrs = wallet_name ) )
params = { }
if txn_limit :
params [ 'limit' ] = txn_limit
if api_key :
params [ 'token' ] = api_key
if before_bh :
params [ 'before' ] = before_bh
if after_bh :
params [ 'after' ] = after_bh
if confirmations :
params [ 'confirmations' ] = confirmations
if unspent_only :
params [ 'unspentOnly' ] = 'true'
if show_confidence :
params [ 'includeConfidence' ] = 'true'
if omit_addresses :
params [ 'omitWalletAddresses' ] = 'true'
r = requests . get ( url , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
return _clean_tx ( get_valid_json ( r ) ) |
def get_ticks ( self ) :
"""Get all ticks and labels for a band structure plot .
Returns :
A dict with ' distance ' : a list of distance at which ticks should
be set and ' label ' : a list of label for each of those ticks .""" | tick_distance = [ ]
tick_labels = [ ]
previous_label = self . _bs . qpoints [ 0 ] . label
previous_branch = self . _bs . branches [ 0 ] [ 'name' ]
for i , c in enumerate ( self . _bs . qpoints ) :
if c . label is not None :
tick_distance . append ( self . _bs . distance [ i ] )
this_branch = None
for b in self . _bs . branches :
if b [ 'start_index' ] <= i <= b [ 'end_index' ] :
this_branch = b [ 'name' ]
break
if c . label != previous_label and previous_branch != this_branch :
label1 = c . label
if label1 . startswith ( "\\" ) or label1 . find ( "_" ) != - 1 :
label1 = "$" + label1 + "$"
label0 = previous_label
if label0 . startswith ( "\\" ) or label0 . find ( "_" ) != - 1 :
label0 = "$" + label0 + "$"
tick_labels . pop ( )
tick_distance . pop ( )
tick_labels . append ( label0 + "$\\mid$" + label1 )
else :
if c . label . startswith ( "\\" ) or c . label . find ( "_" ) != - 1 :
tick_labels . append ( "$" + c . label + "$" )
else :
tick_labels . append ( c . label )
previous_label = c . label
previous_branch = this_branch
return { 'distance' : tick_distance , 'label' : tick_labels } |
def update_webhook ( self , url , headers = None ) :
"""Register new webhook for incoming subscriptions .
If a webhook is already set , this will do an overwrite .
: param str url : the URL with listening webhook ( Required )
: param dict headers : K / V dict with additional headers to send with request
: return : void""" | headers = headers or { }
api = self . _get_api ( mds . NotificationsApi )
# Delete notifications channel
api . delete_long_poll_channel ( )
# Send the request to register the webhook
webhook_obj = WebhookData ( url = url , headers = headers )
api . register_webhook ( webhook_obj )
return |
def get_family ( self , family_id = None ) :
"""Gets the ` ` Family ` ` specified by its ` ` Id ` ` .
In plenary mode , the exact ` ` Id ` ` is found or a ` ` NotFound ` `
results . Otherwise , the returned ` ` Family ` ` may have a different
` ` Id ` ` than requested , such as the case where a duplicate ` ` Id ` `
was assigned to a ` ` Family ` ` and retained for compatibil
arg : family _ id ( osid . id . Id ) : ` ` Id ` ` of the ` ` Family ` `
return : ( osid . relationship . Family ) - the family
raise : NotFound - ` ` family _ id ` ` not found
raise : NullArgument - ` ` family _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method is must be implemented . *""" | if family_id is None :
raise NullArgument ( )
url_path = '/handcar/services/relationship/families/' + str ( family_id )
return objects . Family ( self . _get_request ( url_path ) ) |
def _strip_dollars_fast ( text ) :
"""Replace ` $ $ ` with ` $ ` . raise immediately
if ` $ ` starting an interpolated expression is found .
@ param text : the source text
@ return : the text with dollars replaced , or raise
HasExprException if there are interpolated expressions""" | def _sub ( m ) :
if m . group ( 0 ) == '$$' :
return '$'
raise HasExprException ( )
return _dollar_strip_re . sub ( _sub , text ) |
def flight_modes ( logfile ) :
'''show flight modes for a log file''' | print ( "Processing log %s" % filename )
mlog = mavutil . mavlink_connection ( filename )
mode = ""
previous_mode = ""
mode_start_timestamp = - 1
time_in_mode = { }
previous_percent = - 1
seconds_per_percent = - 1
filesize = os . path . getsize ( filename )
while True :
m = mlog . recv_match ( type = [ 'SYS_STATUS' , 'HEARTBEAT' , 'MODE' ] , condition = 'MAV.flightmode!="%s"' % mlog . flightmode )
if m is None :
break
print ( '%s MAV.flightmode=%-12s (MAV.timestamp=%u %u%%)' % ( time . asctime ( time . localtime ( m . _timestamp ) ) , mlog . flightmode , m . _timestamp , mlog . percent ) )
mode = mlog . flightmode
if ( mode not in time_in_mode ) :
time_in_mode [ mode ] = 0
if ( mode_start_timestamp == - 1 ) :
mode_start_timestamp = m . _timestamp
elif ( previous_mode != "" and previous_mode != mode ) :
time_in_mode [ previous_mode ] = time_in_mode [ previous_mode ] + ( m . _timestamp - mode_start_timestamp )
# figure out how many seconds per percentage point so I can
# caculate how many seconds for the final mode
if ( seconds_per_percent == - 1 and previous_percent != - 1 and previous_percent != mlog . percent ) :
seconds_per_percent = ( m . _timestamp - mode_start_timestamp ) / ( mlog . percent - previous_percent )
mode_start_timestamp = m . _timestamp
previous_mode = mode
previous_percent = mlog . percent
# put a whitespace line before the per - mode report
print ( )
print ( "Time per mode:" )
# need to get the time in the final mode
if ( seconds_per_percent != - 1 ) :
seconds_remaining = ( 100.0 - previous_percent ) * seconds_per_percent
time_in_mode [ previous_mode ] = time_in_mode [ previous_mode ] + seconds_remaining
total_flight_time = 0
for key , value in time_in_mode . iteritems ( ) :
total_flight_time = total_flight_time + value
for key , value in time_in_mode . iteritems ( ) :
print ( '%-12s %s %.2f%%' % ( key , str ( datetime . timedelta ( seconds = int ( value ) ) ) , ( value / total_flight_time ) * 100.0 ) )
else : # can ' t print time in mode if only one mode during flight
print ( previous_mode , " 100% of flight time" ) |
def check ( self , item_id ) :
"""Check if an analysis is complete .
: type item _ id : str
: param item _ id : File ID to check .
: rtype : bool
: return : Boolean indicating if a report is done or not .""" | response = self . _request ( "/submission/sample/{sample_id}" . format ( sample_id = item_id ) , headers = self . headers )
if response . status_code == 404 : # unknown id
return False
try :
finished = False
for submission in response . json ( ) [ 'data' ] :
finished = finished or submission [ 'submission_finished' ]
if finished :
return True
except ( ValueError , KeyError ) as e :
raise sandboxapi . SandboxError ( e )
return False |
def create_filehandlers ( self , filenames , fh_kwargs = None ) :
"""Organize the filenames into file types and create file handlers .""" | filenames = list ( OrderedDict . fromkeys ( filenames ) )
logger . debug ( "Assigning to %s: %s" , self . info [ 'name' ] , filenames )
self . info . setdefault ( 'filenames' , [ ] ) . extend ( filenames )
filename_set = set ( filenames )
created_fhs = { }
# load files that we know about by creating the file handlers
for filetype , filetype_info in self . sorted_filetype_items ( ) :
filehandlers = self . new_filehandlers_for_filetype ( filetype_info , filename_set , fh_kwargs = fh_kwargs )
filename_set -= set ( [ fhd . filename for fhd in filehandlers ] )
if filehandlers :
created_fhs [ filetype ] = filehandlers
self . file_handlers [ filetype ] = sorted ( self . file_handlers . get ( filetype , [ ] ) + filehandlers , key = lambda fhd : ( fhd . start_time , fhd . filename ) )
# update existing dataset IDs with information from the file handler
self . update_ds_ids_from_file_handlers ( )
# load any additional dataset IDs determined dynamically from the file
self . add_ds_ids_from_files ( )
return created_fhs |
def _xdate_setter ( self , xdate_format = '%Y-%m-%d' ) :
"""Makes x axis a date axis with auto format
Parameters
xdate _ format : String
\t Sets date formatting""" | if xdate_format : # We have to validate xdate _ format . If wrong then bail out .
try :
self . autofmt_xdate ( )
datetime . date ( 2000 , 1 , 1 ) . strftime ( xdate_format )
except ValueError :
self . autofmt_xdate ( )
return
self . __axes . xaxis_date ( )
formatter = dates . DateFormatter ( xdate_format )
self . __axes . xaxis . set_major_formatter ( formatter ) |
def run_c_extension_with_fallback ( log_function , extension , c_function , py_function , args , rconf ) :
"""Run a function calling a C extension , falling back
to a pure Python function if the former does not succeed .
: param function log _ function : a logger function
: param string extension : the name of the extension
: param function c _ function : the ( Python ) function calling the C extension
: param function py _ function : the ( Python ) function providing the fallback
: param rconf : the runtime configuration
: type rconf : : class : ` aeneas . runtimeconfiguration . RuntimeConfiguration `
: rtype : depends on the extension being called
: raises : RuntimeError : if both the C extension and
the pure Python code did not succeed .
. . versionadded : : 1.4.0""" | computed = False
if not rconf [ u"c_extensions" ] :
log_function ( u"C extensions disabled" )
elif extension not in rconf :
log_function ( [ u"C extension '%s' not recognized" , extension ] )
elif not rconf [ extension ] :
log_function ( [ u"C extension '%s' disabled" , extension ] )
else :
log_function ( [ u"C extension '%s' enabled" , extension ] )
if c_function is None :
log_function ( u"C function is None" )
elif can_run_c_extension ( extension ) :
log_function ( [ u"C extension '%s' enabled and it can be loaded" , extension ] )
computed , result = c_function ( * args )
else :
log_function ( [ u"C extension '%s' enabled but it cannot be loaded" , extension ] )
if not computed :
if py_function is None :
log_function ( u"Python function is None" )
else :
log_function ( u"Running the pure Python code" )
computed , result = py_function ( * args )
if not computed :
raise RuntimeError ( u"Both the C extension and the pure Python code failed. (Wrong arguments? Input too big?)" )
return result |
def serialize ( self , items ) :
"""Does the inverse of config parsing by taking parsed values and
converting them back to a string representing config file contents .""" | r = StringIO ( )
for key , value in items . items ( ) :
if isinstance ( value , list ) : # handle special case of lists
value = "[" + ", " . join ( map ( str , value ) ) + "]"
r . write ( "%s = %s\n" % ( key , value ) )
return r . getvalue ( ) |
def unfollow ( user , obj , send_action = False , flag = '' ) :
"""Removes a " follow " relationship .
Set ` ` send _ action ` ` to ` ` True ` ` ( ` ` False is default ) to also send a
` ` < user > stopped following < object > ` ` action signal .
Pass a string value to ` ` flag ` ` to determine which type of " follow " relationship you want to remove .
Example : :
unfollow ( request . user , other _ user )
unfollow ( request . user , other _ user , flag = ' watching ' )""" | check ( obj )
qs = apps . get_model ( 'actstream' , 'follow' ) . objects . filter ( user = user , object_id = obj . pk , content_type = ContentType . objects . get_for_model ( obj ) )
if flag :
qs = qs . filter ( flag = flag )
qs . delete ( )
if send_action :
if not flag :
action . send ( user , verb = _ ( 'stopped following' ) , target = obj )
else :
action . send ( user , verb = _ ( 'stopped %s' % flag ) , target = obj ) |
def remove_properties ( self ) :
"""Removes the property layer , if exists""" | node_prop = self . node . find ( 'properties' )
if node_prop is not None :
self . node . remove ( node_prop ) |
def domain_create ( self , domain , master = True , ** kwargs ) :
"""Registers a new Domain on the acting user ' s account . Make sure to point
your registrar to Linode ' s nameservers so that Linode ' s DNS manager will
correctly serve your domain .
: param domain : The domain to register to Linode ' s DNS manager .
: type domain : str
: param master : Whether this is a master ( defaults to true )
: type master : bool
: returns : The new Domain object .
: rtype : Domain""" | params = { 'domain' : domain , 'type' : 'master' if master else 'slave' , }
params . update ( kwargs )
result = self . post ( '/domains' , data = params )
if not 'id' in result :
raise UnexpectedResponseError ( 'Unexpected response when creating Domain!' , json = result )
d = Domain ( self , result [ 'id' ] , result )
return d |
def categories ( self , value ) :
"""Setter for * * self . _ _ categories * * attribute .
: param value : Attribute value .
: type value : dict""" | if value is not None :
assert type ( value ) is dict , "'{0}' attribute: '{1}' type is not 'dict'!" . format ( "categories" , value )
for key in value :
assert type ( key ) is unicode , "'{0}' attribute: '{1}' type is not 'unicode'!" . format ( "categories" , key )
self . __categories = value |
def square_count ( x : int , y : int ) -> int :
"""This python function calculates the total number of squares within a rectangular area .
Args :
x : The length of the rectangle
y : The width of the rectangle
Returns :
An integer count of the total number of squares within the rectangular grid .
Examples :
> > > square _ count ( 4 , 3)
20
> > > square _ count ( 2 , 2)
> > > square _ count ( 1 , 1)""" | if y < x :
x , y = y , x
return int ( ( ( ( x * ( x + 1 ) ) * ( ( 2 * x ) + 1 ) ) / 6 ) + ( ( ( ( y - x ) * x ) * ( x + 1 ) ) / 2 ) ) |
def create_datastore_write_config ( mapreduce_spec ) :
"""Creates datastore config to use in write operations .
Args :
mapreduce _ spec : current mapreduce specification as MapreduceSpec .
Returns :
an instance of datastore _ rpc . Configuration to use for all write
operations in the mapreduce .""" | force_writes = parse_bool ( mapreduce_spec . params . get ( "force_writes" , "false" ) )
if force_writes :
return datastore_rpc . Configuration ( force_writes = force_writes )
else : # dev server doesn ' t support force _ writes .
return datastore_rpc . Configuration ( ) |
def wait_for ( self , wait_for , timeout_ms ) :
"""Waits for one or more events to happen .
in wait _ for of type int
Specifies what to wait for ;
see : py : class : ` ProcessWaitForFlag ` for more information .
in timeout _ ms of type int
Timeout ( in ms ) to wait for the operation to complete .
Pass 0 for an infinite timeout .
return reason of type : class : ` ProcessWaitResult `
The overall wait result ;
see : py : class : ` ProcessWaitResult ` for more information .""" | if not isinstance ( wait_for , baseinteger ) :
raise TypeError ( "wait_for can only be an instance of type baseinteger" )
if not isinstance ( timeout_ms , baseinteger ) :
raise TypeError ( "timeout_ms can only be an instance of type baseinteger" )
reason = self . _call ( "waitFor" , in_p = [ wait_for , timeout_ms ] )
reason = ProcessWaitResult ( reason )
return reason |
def _normalize ( self , tokens ) :
"""Normalization transform to apply to both dictionary words and input tokens .""" | if self . case_sensitive :
return ' ' . join ( self . lexicon [ t ] . normalized for t in tokens )
else :
return ' ' . join ( self . lexicon [ t ] . lower for t in tokens ) |
def create_multi_output_factor ( self , tool , source , splitting_node , sink ) :
"""Creates a multi - output factor .
This takes a single node , applies a MultiOutputTool to create multiple nodes on a new plate
Instantiates a single tool for all of the input plate values ,
and connects the source and sink nodes with that tool .
Note that the tool parameters these are currently fixed over a plate . For parameters that vary over a plate ,
an extra input stream should be used
: param tool : The tool to use . This is either an instantiated Tool object or a dict with " name " and " parameters "
: param source : The source node
: param splitting _ node : The node over which to split
: param sink : The sink node
: return : The factor object
: type tool : MultiOutputTool | dict
: type source : Node | None
: type sink : Node
: rtype : Factor""" | if source and not isinstance ( source , Node ) :
raise ValueError ( "Expected Node, got {}" . format ( type ( source ) ) )
if not isinstance ( sink , Node ) :
raise ValueError ( "Expected Node, got {}" . format ( type ( sink ) ) )
# if isinstance ( tool , dict ) :
# tool = self . channels . get _ tool ( * * tool )
if not isinstance ( tool , MultiOutputTool ) :
raise ValueError ( "Expected MultiOutputTool, got {}" . format ( type ( tool ) ) )
# Check that the input _ plate are compatible - note this is the opposite way round to a normal factor
input_plates = source . plates if source else [ ]
output_plates = sink . plates
if len ( input_plates ) > 1 :
raise NotImplementedError
if len ( output_plates ) == 0 :
raise ValueError ( "No output plate found" )
if len ( output_plates ) == 1 :
if not self . check_multi_output_plate_compatibility ( input_plates , output_plates [ 0 ] ) :
raise IncompatiblePlatesError ( "Parent plate does not match input plate" )
factor = MultiOutputFactor ( tool = tool , source_node = source , splitting_node = splitting_node , sink_node = sink , input_plate = input_plates [ 0 ] if input_plates else None , output_plates = output_plates [ 0 ] )
else : # The output plates should be the same as the input plates , except for one
# additional plate . Since we ' re currently only supporting one input plate ,
# we can safely assume that there is a single matching plate .
# Finally , note that the output plate must either have no parents
# ( i . e . it is at the root of the tree ) , or the parent plate is somewhere
# in the input plate ' s ancestry
if len ( output_plates ) > 2 :
raise NotImplementedError
if len ( input_plates ) != 1 :
raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" )
if output_plates [ 0 ] == input_plates [ 0 ] : # Found a match , so the output plate should be the other plate
output_plate = output_plates [ 1 ]
else :
if output_plates [ 1 ] . plate_id != input_plates [ 0 ] . plate_id :
raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" )
output_plate = output_plates [ 0 ]
# Swap them round so the new plate is the last plate - this is required by the factor
output_plates [ 1 ] , output_plates [ 0 ] = output_plates [ 0 ] , output_plates [ 1 ]
if not output_plate . is_root : # We need to walk up the input plate ' s parent tree
match = False
parent = input_plates [ 0 ] . parent
while parent is not None :
if parent . plate_id == output_plate . parent . plate_id :
match = True
break
parent = parent . parent
if not match :
raise IncompatiblePlatesError ( "Require an input plate to match all but one of the output plates" )
factor = MultiOutputFactor ( tool = tool , source_node = source , sink_node = sink , splitting_node = splitting_node , input_plate = input_plates [ 0 ] , output_plates = output_plates )
self . _add_factor ( factor )
return factor |
def _handle ( self , event_handler , event_name , user_args , event_timeout , cond , cond_timeout ) :
"""Pop an event of specified type and calls its handler on it . If
condition is not None , block until condition is met or timeout .""" | if cond :
cond . wait ( cond_timeout )
event = self . pop_event ( event_name , event_timeout )
return event_handler ( event , * user_args ) |
def cli ( obj , alert , severity , timeout , purge ) :
"""List heartbeats .""" | client = obj [ 'client' ]
if obj [ 'output' ] == 'json' :
r = client . http . get ( '/heartbeats' )
click . echo ( json . dumps ( r [ 'heartbeats' ] , sort_keys = True , indent = 4 , ensure_ascii = False ) )
else :
timezone = obj [ 'timezone' ]
headers = { 'id' : 'ID' , 'origin' : 'ORIGIN' , 'customer' : 'CUSTOMER' , 'tags' : 'TAGS' , 'createTime' : 'CREATED' , 'receiveTime' : 'RECEIVED' , 'latency' : 'LATENCY' , 'timeout' : 'TIMEOUT' , 'since' : 'SINCE' , 'status' : 'STATUS' }
heartbeats = client . get_heartbeats ( )
click . echo ( tabulate ( [ h . tabular ( timezone ) for h in heartbeats ] , headers = headers , tablefmt = obj [ 'output' ] ) )
not_ok = [ hb for hb in heartbeats if hb . status != 'ok' ]
if purge :
with click . progressbar ( not_ok , label = 'Purging {} heartbeats' . format ( len ( not_ok ) ) ) as bar :
for b in bar :
client . delete_heartbeat ( b . id )
elif alert :
with click . progressbar ( heartbeats , label = 'Alerting {} heartbeats' . format ( len ( heartbeats ) ) ) as bar :
for b in bar :
params = dict ( filter ( lambda a : len ( a ) == 2 , map ( lambda a : a . split ( ':' ) , b . tags ) ) )
environment = params . get ( 'environment' , 'Production' )
group = params . get ( 'group' , 'System' )
tags = list ( filter ( lambda a : not a . startswith ( 'environment:' ) and not a . startswith ( 'group:' ) , b . tags ) )
if b . status == 'expired' : # aka . " stale "
client . send_alert ( resource = b . origin , event = 'HeartbeatFail' , correlate = [ 'HeartbeatFail' , 'HeartbeatSlow' , 'HeartbeatOK' ] , group = group , environment = environment , service = [ 'Alerta' ] , severity = severity , value = '{}' . format ( b . since ) , text = 'Heartbeat not received in {} seconds' . format ( b . timeout ) , tags = tags , type = 'heartbeatAlert' , timeout = timeout , customer = b . customer )
elif b . status == 'slow' :
client . send_alert ( resource = b . origin , event = 'HeartbeatSlow' , correlate = [ 'HeartbeatFail' , 'HeartbeatSlow' , 'HeartbeatOK' ] , group = group , environment = environment , service = [ 'Alerta' ] , severity = severity , value = '{}ms' . format ( b . latency ) , text = 'Heartbeat took more than {}ms to be processed' . format ( MAX_LATENCY ) , tags = tags , type = 'heartbeatAlert' , timeout = timeout , customer = b . customer )
else :
client . send_alert ( resource = b . origin , event = 'HeartbeatOK' , correlate = [ 'HeartbeatFail' , 'HeartbeatSlow' , 'HeartbeatOK' ] , group = group , environment = environment , service = [ 'Alerta' ] , severity = 'normal' , value = '' , text = 'Heartbeat OK' , tags = tags , type = 'heartbeatAlert' , customer = b . customer ) |
def get_biased_correlations ( data , threshold = 10 ) :
"""Gets the highest few correlations for each bit , across the entirety of the
data . Meant to provide a comparison point for the pairwise correlations
reported in the literature , which are typically between neighboring neurons
tuned to the same inputs . We would expect these neurons to be among the most
correlated in any region , so pairwise correlations between most likely do not
provide an unbiased estimator of correlations between arbitrary neurons .""" | data = data . toDense ( )
correlations = numpy . corrcoef ( data , rowvar = False )
highest_correlations = [ ]
for row in correlations :
highest_correlations += sorted ( row , reverse = True ) [ 1 : threshold + 1 ]
return numpy . mean ( highest_correlations ) |
def write_tensorboard_text ( self , key , input_dict ) :
"""Saves text to Tensorboard .
Note : Only works on tensorflow r1.2 or above .
: param key : The name of the text .
: param input _ dict : A dictionary that will be displayed in a table on Tensorboard .""" | try :
with tf . Session ( ) as sess :
s_op = tf . summary . text ( key , tf . convert_to_tensor ( ( [ [ str ( x ) , str ( input_dict [ x ] ) ] for x in input_dict ] ) ) )
s = sess . run ( s_op )
self . summary_writer . add_summary ( s , self . get_step )
except :
LOGGER . info ( "Cannot write text summary for Tensorboard. Tensorflow version must be r1.2 or above." )
pass |
def funk ( p , x , y ) :
"""Function misfit evaluation for best - fit tanh curve
f ( x [ : ] ) = alpha * tanh ( beta * x [ : ] )
alpha = params [ 0]
beta = params [ 1]
funk ( params ) = sqrt ( sum ( ( y [ : ] - f ( x [ : ] ) ) * * 2 ) / len ( y [ : ] ) )
Output is RMS misfit
x = xx [ 0 ] [ : ]
y = xx [ 1 ] [ : ] q""" | alpha = p [ 0 ]
beta = p [ 1 ]
dev = 0
for i in range ( len ( x ) ) :
dev = dev + ( ( y [ i ] - ( alpha * math . tanh ( beta * x [ i ] ) ) ) ** 2 )
rms = math . sqrt ( old_div ( dev , float ( len ( y ) ) ) )
return rms |
def prepare_string ( partname , thread , maxw ) :
"""extract a content string for part ' partname ' from ' thread ' of maximal
length ' maxw ' .""" | # map part names to function extracting content string and custom shortener
prep = { 'mailcount' : ( prepare_mailcount_string , None ) , 'date' : ( prepare_date_string , None ) , 'authors' : ( prepare_authors_string , shorten_author_string ) , 'subject' : ( prepare_subject_string , None ) , 'content' : ( prepare_content_string , None ) , }
s = ' '
# fallback value
if thread : # get extractor and shortener
content , shortener = prep [ partname ]
# get string
s = content ( thread )
# sanitize
s = s . replace ( '\n' , ' ' )
s = s . replace ( '\r' , '' )
# shorten if max width is requested
if maxw :
if len ( s ) > maxw and shortener :
s = shortener ( s , maxw )
else :
s = s [ : maxw ]
return s |
def replace ( self , nodes , node ) :
"""Replace nodes with node . Edges incoming to nodes [ 0 ] are connected to
the new node , and nodes outgoing from nodes [ - 1 ] become outgoing from
the new node .""" | nodes = nodes if isinstance ( nodes , list ) else [ nodes ]
# Is the new node part of the replace nodes ( i . e . want to collapse
# a group of nodes into one of them ) ?
collapse = self . id ( node ) in self . nodes
# Add new node and edges
if not collapse :
self . add_node ( node )
for in_node in self . incoming ( nodes ) : # TODO : check specifically for output _ shape is not generic . Consider refactoring .
self . add_edge ( in_node , node , in_node . output_shape if hasattr ( in_node , "output_shape" ) else None )
for out_node in self . outgoing ( nodes ) :
self . add_edge ( node , out_node , node . output_shape if hasattr ( node , "output_shape" ) else None )
# Remove the old nodes
for n in nodes :
if collapse and n == node :
continue
self . remove ( n ) |
def p_redirection_heredoc ( p ) :
'''redirection : LESS _ LESS WORD
| NUMBER LESS _ LESS WORD
| REDIR _ WORD LESS _ LESS WORD
| LESS _ LESS _ MINUS WORD
| NUMBER LESS _ LESS _ MINUS WORD
| REDIR _ WORD LESS _ LESS _ MINUS WORD''' | parserobj = p . context
assert isinstance ( parserobj , _parser )
output = ast . node ( kind = 'word' , word = p [ len ( p ) - 1 ] , parts = [ ] , pos = p . lexspan ( len ( p ) - 1 ) )
if len ( p ) == 3 :
p [ 0 ] = ast . node ( kind = 'redirect' , input = None , type = p [ 1 ] , heredoc = None , output = output , pos = ( p . lexpos ( 1 ) , p . endlexpos ( 2 ) ) )
else :
p [ 0 ] = ast . node ( kind = 'redirect' , input = p [ 1 ] , type = p [ 2 ] , heredoc = None , output = output , pos = ( p . lexpos ( 1 ) , p . endlexpos ( 3 ) ) )
if p . slice [ len ( p ) - 2 ] . ttype == tokenizer . tokentype . LESS_LESS :
parserobj . redirstack . append ( ( p [ 0 ] , False ) )
else :
parserobj . redirstack . append ( ( p [ 0 ] , True ) ) |
def from_record ( self , record ) :
"""Constructs and returns a sequenced item object , from given ORM object .""" | kwargs = self . get_field_kwargs ( record )
return self . sequenced_item_class ( ** kwargs ) |
def p_ExtendedAttributeNoArgs ( p ) :
"""ExtendedAttributeNoArgs : IDENTIFIER""" | p [ 0 ] = model . ExtendedAttribute ( value = model . ExtendedAttributeValue ( name = p [ 1 ] ) ) |
def _connect ( self ) :
"""Connects to the cloud web services . If this is the first
authentication , a web browser will be started to authenticate
against google and provide access to elasticluster .
: return : A Resource object with methods for interacting with the
service .""" | # check for existing connection
with GoogleCloudProvider . __gce_lock :
if self . _gce :
return self . _gce
flow = OAuth2WebServerFlow ( self . _client_id , self . _client_secret , GCE_SCOPE )
# The ` Storage ` object holds the credentials that your
# application needs to authorize access to the user ' s
# data . The name of the credentials file is provided . If the
# file does not exist , it is created . This object can only
# hold credentials for a single user . It stores the access
# priviledges for the application , so a user only has to grant
# access through the web interface once .
storage_path = os . path . join ( self . _storage_path , self . _client_id + '.oauth.dat' )
storage = Storage ( storage_path )
credentials = storage . get ( )
if credentials is None or credentials . invalid :
args = argparser . parse_args ( [ ] )
args . noauth_local_webserver = self . _noauth_local_webserver
# try to start a browser to have the user authenticate with Google
# TODO : what kind of exception is raised if the browser
# cannot be started ?
try :
credentials = run_flow ( flow , storage , flags = args )
except :
import sys
print "Unexpected error:" , sys . exc_info ( ) [ 0 ]
raise
http = httplib2 . Http ( )
self . _auth_http = credentials . authorize ( http )
self . _gce = build ( GCE_API_NAME , GCE_API_VERSION , http = http )
return self . _gce |
def plot_circ ( fignum , pole , ang , col ) :
"""function to put a small circle on an equal area projection plot , fig , fignum
Parameters
_ _ _ _ _
fignum : matplotlib figure number
pole : dec , inc of center of circle
ang : angle of circle
col :""" | plt . figure ( num = fignum )
D_c , I_c = pmag . circ ( pole [ 0 ] , pole [ 1 ] , ang )
X_c_up , Y_c_up = [ ] , [ ]
X_c_d , Y_c_d = [ ] , [ ]
for k in range ( len ( D_c ) ) :
XY = pmag . dimap ( D_c [ k ] , I_c [ k ] )
if I_c [ k ] < 0 :
X_c_up . append ( XY [ 0 ] )
Y_c_up . append ( XY [ 1 ] )
else :
X_c_d . append ( XY [ 0 ] )
Y_c_d . append ( XY [ 1 ] )
plt . plot ( X_c_d , Y_c_d , col + '.' , ms = 5 )
plt . plot ( X_c_up , Y_c_up , 'c.' , ms = 2 ) |
def is_bad_headers ( self ) :
"""Checks for bad headers i . e . newlines in subject , sender or recipients .""" | headers = [ self . subject , self . sender ]
headers += list ( self . send_to )
headers += dict ( self . extra_headers ) . values ( )
for val in headers :
for c in '\r\n' :
if c in val :
return True
return False |
def report_on_jobs ( self ) :
"""Gathers information about jobs such as its child jobs and status .
: returns jobStats : Pairings of a useful category and a list of jobs which fall into it .
: rtype dict :""" | hasChildren = [ ]
readyToRun = [ ]
zombies = [ ]
hasLogFile = [ ]
hasServices = [ ]
services = [ ]
properties = set ( )
for job in self . jobsToReport :
if job . logJobStoreFileID is not None :
hasLogFile . append ( job )
childNumber = reduce ( lambda x , y : x + y , map ( len , job . stack ) + [ 0 ] )
if childNumber > 0 : # Total number of successors > 0
hasChildren . append ( job )
properties . add ( "HAS_CHILDREN" )
elif job . command is not None : # Job has no children and a command to run . Indicates job could be run .
readyToRun . append ( job )
properties . add ( "READY_TO_RUN" )
else : # Job has no successors and no command , so is a zombie job .
zombies . append ( job )
properties . add ( "IS_ZOMBIE" )
if job . services :
hasServices . append ( job )
properties . add ( "HAS_SERVICES" )
if job . startJobStoreID or job . terminateJobStoreID or job . errorJobStoreID : # These attributes are only set in service jobs
services . append ( job )
properties . add ( "IS_SERVICE" )
jobStats = { 'hasChildren' : hasChildren , 'readyToRun' : readyToRun , 'zombies' : zombies , 'hasServices' : hasServices , 'services' : services , 'hasLogFile' : hasLogFile , 'properties' : properties , 'childNumber' : childNumber }
return jobStats |
def ensure_routing_table_is_fresh ( self , access_mode ) :
"""Update the routing table if stale .
This method performs two freshness checks , before and after acquiring
the refresh lock . If the routing table is already fresh on entry , the
method exits immediately ; otherwise , the refresh lock is acquired and
the second freshness check that follows determines whether an update
is still required .
This method is thread - safe .
: return : ` True ` if an update was required , ` False ` otherwise .""" | if self . routing_table . is_fresh ( access_mode ) :
return False
with self . refresh_lock :
if self . routing_table . is_fresh ( access_mode ) :
if access_mode == READ_ACCESS : # if reader is fresh but writers is not fresh , then we are reading in absence of writer
self . missing_writer = not self . routing_table . is_fresh ( WRITE_ACCESS )
return False
self . update_routing_table ( )
self . update_connection_pool ( )
return True |
def _validate_missing ( self , value ) :
"""Validate missing values . Raise a : exc : ` ValidationError ` if
` value ` should be considered missing .""" | if value is missing_ :
if hasattr ( self , 'required' ) and self . required :
self . fail ( 'required' )
if value is None :
if hasattr ( self , 'allow_none' ) and self . allow_none is not True :
self . fail ( 'null' ) |
def transaction_retry ( max_retries = 1 ) :
"""Decorator for methods doing database operations .
If the database operation fails , it will retry the operation
at most ` ` max _ retries ` ` times .""" | def _outer ( fun ) :
@ wraps ( fun )
def _inner ( * args , ** kwargs ) :
_max_retries = kwargs . pop ( 'exception_retry_count' , max_retries )
for retries in count ( 0 ) :
try :
return fun ( * args , ** kwargs )
except Exception : # pragma : no cover
# Depending on the database backend used we can experience
# various exceptions . E . g . psycopg2 raises an exception
# if some operation breaks the transaction , so saving
# the task result won ' t be possible until we rollback
# the transaction .
if retries >= _max_retries :
raise
try :
rollback_unless_managed ( )
except Exception :
pass
return _inner
return _outer |
def mu ( X , Z , A ) :
'''mean molecular weight assuming full ionisation .
( Kippenhahn & Weigert , Ch 13.1 , Eq . 13.6)
Parameters
X : float
Mass fraction vector .
Z : float
Charge number vector .
A : float
Mass number vector .''' | if not isinstance ( Z , np . ndarray ) :
Z = np . array ( Z )
if not isinstance ( A , np . ndarray ) :
A = np . array ( A )
if not isinstance ( X , np . ndarray ) :
X = np . array ( X )
try :
mu = old_div ( 1. , sum ( X * ( 1. + Z ) / A ) )
except TypeError :
X = np . array ( [ X ] )
A = np . array ( [ A ] )
Z = np . array ( [ Z ] )
mu = old_div ( 1. , sum ( X * ( 1. + Z ) / A ) )
return mu |
def format_param_list ( listed_params , output_name ) :
'''Utility method for formatting lists of parameters for api consumption
Useful for email address lists , etc
Args :
listed _ params ( list of values ) - the list to format
output _ name ( str ) - the parameter name to prepend to each key''' | output_payload = { }
if listed_params :
for index , item in enumerate ( listed_params ) :
output_payload [ str ( output_name ) + "[" + str ( index ) + "]" ] = item
return output_payload |
def _resolved_type ( self ) :
"""Return the type for the columns , and a flag to indicate that the
column has codes .""" | import datetime
self . type_ratios = { test : ( float ( self . type_counts [ test ] ) / float ( self . count ) ) if self . count else None for test , testf in tests + [ ( None , None ) ] }
# If it is more than 5 % str , it ' s a str
try :
if self . type_ratios . get ( text_type , 0 ) + self . type_ratios . get ( binary_type , 0 ) > .05 :
if self . type_counts [ text_type ] > 0 :
return text_type , False
elif self . type_counts [ binary_type ] > 0 :
return binary_type , False
except TypeError as e : # This is probably the result of the type being unknown
pass
if self . type_counts [ datetime . datetime ] > 0 :
num_type = datetime . datetime
elif self . type_counts [ datetime . date ] > 0 :
num_type = datetime . date
elif self . type_counts [ datetime . time ] > 0 :
num_type = datetime . time
elif self . type_counts [ float ] > 0 :
num_type = float
elif self . type_counts [ int ] > 0 :
num_type = int
elif self . type_counts [ text_type ] > 0 :
num_type = text_type
elif self . type_counts [ binary_type ] > 0 :
num_type = binary_type
else :
num_type = unknown
if self . type_counts [ binary_type ] > 0 and num_type != binary_type :
has_codes = True
else :
has_codes = False
return num_type , has_codes |
def types ( self ) :
"""Tuple containing types transformed by this transformer .""" | out = [ ]
if self . _transform_bytes :
out . append ( bytes )
if self . _transform_str :
out . append ( str )
return tuple ( out ) |
def check_compression_gathering ( self , ds ) :
"""At the current time the netCDF interface does not provide for packing
data . However a simple packing may be achieved through the use of the
optional NUG defined attributes scale _ factor and add _ offset . After the
data values of a variable have been read , they are to be multiplied by
the scale _ factor , and have add _ offset added to them . If both
attributes are present , the data are scaled before the offset is added .
When scaled data are written , the application should first subtract the
offset and then divide by the scale factor . The units of a variable
should be representative of the unpacked data .
This standard is more restrictive than the NUG with respect to the use
of the scale _ factor and add _ offset attributes ; ambiguities and
precision problems related to data type conversions are resolved by
these restrictions . If the scale _ factor and add _ offset attributes are
of the same data type as the associated variable , the unpacked data is
assumed to be of the same data type as the packed data . However , if the
scale _ factor and add _ offset attributes are of a different data type
from the variable ( containing the packed data ) then the unpacked data
should match the type of these attributes , which must both be of type
float or both be of type double . An additional restriction in this
case is that the variable containing the packed data must be of type
byte , short or int . It is not advised to unpack an int into a float
as there is a potential precision loss .
When data to be packed contains missing values the attributes that
indicate missing values ( _ FillValue , valid _ min , valid _ max ,
valid _ range ) must be of the same data type as
the packed data . See Section 2.5.1 , “ Missing Data ” for a discussion of
how applications should treat variables that have attributes indicating
both missing values and transformations defined by a scale and / or
offset .
: param netCDF4 . Dataset ds : An open netCDF dataset
: rtype : list
: return : List of results""" | ret_val = [ ]
for compress_var in ds . get_variables_by_attributes ( compress = lambda s : s is not None ) :
valid = True
reasoning = [ ]
# puts the referenced variable being compressed into a set
compress_set = set ( compress_var . compress . split ( ' ' ) )
if compress_var . ndim != 1 :
valid = False
reasoning . append ( "Compression variable {} may only have one dimension" . format ( compress_var . name ) )
# ensure compression variable is a proper index , and thus is an
# signed or unsigned integer type of some sort
if compress_var . dtype . kind not in { 'i' , 'u' } :
valid = False
reasoning . append ( "Compression variable {} must be an integer type to form a proper array index" . format ( compress_var . name ) )
# make sure all the variables referred to are contained by the
# variables .
if not compress_set . issubset ( ds . dimensions ) :
not_in_dims = sorted ( compress_set . difference ( ds . dimensions ) )
valid = False
reasoning . append ( "The following dimensions referenced by the compress attribute of variable {} do not exist: {}" . format ( compress_var . name , not_in_dims ) )
result = Result ( BaseCheck . MEDIUM , valid , self . section_titles [ '8.2' ] , reasoning )
ret_val . append ( result )
return ret_val |
def _zoom_labels ( self , zoom ) :
"""Adjust grid label font to zoom factor""" | labelfont = self . grid . GetLabelFont ( )
default_fontsize = get_default_font ( ) . GetPointSize ( )
labelfont . SetPointSize ( max ( 1 , int ( round ( default_fontsize * zoom ) ) ) )
self . grid . SetLabelFont ( labelfont ) |
def recv_msg ( self ) :
"""Receive a single message from the stream .
: return : A SLIP - decoded message
: rtype : bytes
: raises ProtocolError : when a SLIP protocol error has been encountered .
A subsequent call to : meth : ` recv _ msg ` ( after handling the exception )
will return the message from the next packet .""" | # First check if there are any pending messages
if self . _messages :
return self . _messages . popleft ( )
# No pending messages left . If a ProtocolError has occurred
# it must be re - raised here :
if self . _protocol_error :
self . _handle_pending_protocol_error ( )
while not self . _messages and not self . _stream_closed : # As long as no messages are available ,
# flush the internal packet buffer ,
# and try to read data
try :
if self . _flush_needed :
self . _flush_needed = False
self . _messages . extend ( self . driver . flush ( ) )
else :
data = self . recv_bytes ( )
if data == b'' :
self . _stream_closed = True
self . _messages . extend ( self . driver . receive ( data ) )
except ProtocolError as pe :
self . _messages . extend ( self . driver . messages )
self . _protocol_error = pe
self . _traceback = sys . exc_info ( ) [ 2 ]
break
if self . _messages :
return self . _messages . popleft ( )
if self . _protocol_error :
self . _handle_pending_protocol_error ( )
else :
return b'' |
def _as_json ( self , response ) :
"""Assuming this is not empty , return the content as JSON .
Result / exceptions is not determined if you call this method without testing _ is _ empty .
: raises : DeserializationError if response body contains invalid json data .""" | # Assume ClientResponse has " body " , and otherwise it ' s a requests . Response
content = response . text ( ) if hasattr ( response , "body" ) else response . text
try :
return json . loads ( content )
except ValueError :
raise DeserializationError ( "Error occurred in deserializing the response body." ) |
def raises ( self ) :
"""Return list of : raises meta .""" | return [ DocstringRaises . from_meta ( meta ) for meta in self . meta if meta . args [ 0 ] in { 'raises' , 'raise' , 'except' , 'exception' } ] |
def distinct ( ) :
"""Validates that all items in the given field list value are distinct ,
i . e . that the list contains no duplicates .""" | def validate ( value ) :
for i , item in enumerate ( value ) :
if item in value [ i + 1 : ] :
return e ( "{} is not a distinct set of values" , value )
return validate |
def _JMS_to_Bern_II ( C , udlnu ) :
"""From JMS to BernII basis for charged current process semileptonic
operators . ` udlnu ` should be of the form ' udl _ enu _ tau ' , ' cbl _ munu _ e ' etc .""" | u = uflav [ udlnu [ 0 ] ]
d = dflav [ udlnu [ 1 ] ]
l = lflav [ udlnu [ 4 : udlnu . find ( 'n' ) ] ]
lp = lflav [ udlnu [ udlnu . find ( '_' , 5 ) + 1 : len ( udlnu ) ] ]
ind = udlnu [ 0 ] + udlnu [ 1 ] + udlnu [ 4 : udlnu . find ( 'n' ) ] + udlnu [ udlnu . find ( '_' , 5 ) + 1 : len ( udlnu ) ]
return { '1' + ind : C [ "VnueduLL" ] [ lp , l , d , u ] . conj ( ) , '5' + ind : C [ "SnueduRL" ] [ lp , l , d , u ] . conj ( ) , '1p' + ind : C [ "VnueduLR" ] [ lp , l , d , u ] . conj ( ) , '5p' + ind : C [ "SnueduRR" ] [ lp , l , d , u ] . conj ( ) , '7p' + ind : C [ "TnueduRR" ] [ lp , l , d , u ] . conj ( ) } |
async def move_to ( self , mount : top_types . Mount , abs_position : top_types . Point , speed : float = None , critical_point : CriticalPoint = None ) :
"""Move the critical point of the specified mount to a location
relative to the deck , at the specified speed . ' speed ' sets the speed
of all robot axes to the given value . So , if multiple axes are to be
moved , they will do so at the same speed
The critical point of the mount depends on the current status of
the mount :
- If the mount does not have anything attached , its critical point is
the bottom of the mount attach bracket .
- If the mount has a pipette attached and it is not known to have a
pipette tip , the critical point is the end of the nozzle of a single
pipette or the end of the backmost nozzle of a multipipette
- If the mount has a pipette attached and it is known to have a
pipette tip , the critical point is the end of the pipette tip for
a single pipette or the end of the tip of the backmost nozzle of a
multipipette
: param mount : The mount to move
: param abs _ position : The target absolute position in
: ref : ` protocol - api - deck - coords ` to move the
critical point to
: param speed : An overall head speed to use during the move
: param critical _ point : The critical point to move . In most situations
this is not needed . If not specified , the
current critical point will be moved . If
specified , the critical point must be one that
actually exists - that is , specifying
: py : attr : ` . CriticalPoint . NOZZLE ` when no pipette
is attached or : py : attr : ` . CriticalPoint . TIP `
when no tip is applied will result in an error .""" | if not self . _current_position :
raise MustHomeError
await self . _cache_and_maybe_retract_mount ( mount )
z_axis = Axis . by_mount ( mount )
if mount == top_types . Mount . LEFT :
offset = top_types . Point ( * self . config . mount_offset )
else :
offset = top_types . Point ( 0 , 0 , 0 )
cp = self . _critical_point_for ( mount , critical_point )
target_position = OrderedDict ( ( ( Axis . X , abs_position . x - offset . x - cp . x ) , ( Axis . Y , abs_position . y - offset . y - cp . y ) , ( z_axis , abs_position . z - offset . z - cp . z ) ) )
await self . _move ( target_position , speed = speed ) |
def from_mongo ( cls , doc ) :
"""Convert data coming in from the MongoDB wire driver into a Document instance .""" | if doc is None : # To support simplified iterative use , None should return None .
return None
if isinstance ( doc , Document ) : # No need to perform processing on existing Document instances .
return doc
if cls . __type_store__ and cls . __type_store__ in doc : # Instantiate specific class mentioned in the data .
cls = load ( doc [ cls . __type_store__ ] , 'marrow.mongo.document' )
# Prepare a new instance in such a way that changes to the instance will be reflected in the originating doc .
instance = cls ( _prepare_defaults = False )
# Construct an instance , but delay default value processing .
instance . __data__ = doc
# I am Popeye of Borg ( pattern ) ; you will be askimilgrated .
instance . _prepare_defaults ( )
# pylint : disable = protected - access - - deferred default value processing .
return instance |
def _filter_scanline ( self , filter_type , line , result ) :
"""Apply a scanline filter to a scanline .
` filter _ type ` specifies the filter type ( 0 to 4)
' line ` specifies the current ( unfiltered ) scanline as a sequence
of bytes ;""" | assert 0 <= filter_type < 5
if self . prev is None : # We ' re on the first line . Some of the filters can be reduced
# to simpler cases which makes handling the line " off the top "
# of the image simpler . " up " becomes " none " ; " paeth " becomes
# " left " ( non - trivial , but true ) . " average " needs to be handled
# specially .
if filter_type == 2 : # " up "
filter_type = 0
elif filter_type == 3 :
self . prev = newBarray ( len ( line ) )
elif filter_type == 4 : # " paeth "
filter_type = 1
if filter_type == 1 :
self . __do_filter_sub ( line , result )
elif filter_type == 2 :
self . __do_filter_up ( line , result )
elif filter_type == 3 :
self . __do_filter_average ( line , result )
elif filter_type == 4 :
self . __do_filter_paeth ( line , result ) |
def get_collection ( self , folderid , username = "" , offset = 0 , limit = 10 ) :
"""Fetch collection folder contents
: param folderid : UUID of the folder to list
: param username : The user to list folders for , if omitted the authenticated user is used
: param offset : the pagination offset
: param limit : the pagination limit""" | if not username and self . standard_grant_type == "authorization_code" :
response = self . _req ( '/collections/{}' . format ( folderid ) , { "offset" : offset , "limit" : limit } )
else :
if not username :
raise DeviantartError ( "No username defined." )
else :
response = self . _req ( '/collections/{}' . format ( folderid ) , { "username" : username , "offset" : offset , "limit" : limit } )
deviations = [ ]
for item in response [ 'results' ] :
d = Deviation ( )
d . from_dict ( item )
deviations . append ( d )
if "name" in response :
name = response [ 'name' ]
else :
name = None
return { "results" : deviations , "name" : name , "has_more" : response [ 'has_more' ] , "next_offset" : response [ 'next_offset' ] } |
def _cleanSessions ( self ) :
"""Clean expired sesisons .""" | tooOld = extime . Time ( ) - timedelta ( seconds = PERSISTENT_SESSION_LIFETIME )
self . store . query ( PersistentSession , PersistentSession . lastUsed < tooOld ) . deleteFromStore ( )
self . _lastClean = self . _clock . seconds ( ) |
def ver_dec_content ( parts , sign_key = None , enc_key = None , sign_alg = 'SHA256' ) :
"""Verifies the value of a cookie
: param parts : The parts of the payload
: param sign _ key : A : py : class : ` cryptojwt . jwk . hmac . SYMKey ` instance
: param enc _ key : A : py : class : ` cryptojwt . jwk . hmac . SYMKey ` instance
: param sign _ alg : Which signing algorithm to was used
: return : A tuple with basic information and a timestamp""" | if parts is None :
return None
elif len ( parts ) == 3 : # verify the cookie signature
timestamp , load , b64_mac = parts
mac = base64 . b64decode ( b64_mac )
verifier = HMACSigner ( algorithm = sign_alg )
if verifier . verify ( load . encode ( 'utf-8' ) + timestamp . encode ( 'utf-8' ) , mac , sign_key . key ) :
return load , timestamp
else :
raise VerificationError ( )
elif len ( parts ) == 4 :
b_timestamp = parts [ 0 ]
iv = base64 . b64decode ( parts [ 1 ] )
ciphertext = base64 . b64decode ( parts [ 2 ] )
tag = base64 . b64decode ( parts [ 3 ] )
decrypter = AES_GCMEncrypter ( key = enc_key . key )
msg = decrypter . decrypt ( ciphertext , iv , tag = tag )
p = lv_unpack ( msg . decode ( 'utf-8' ) )
load = p [ 0 ]
timestamp = p [ 1 ]
if len ( p ) == 3 :
verifier = HMACSigner ( algorithm = sign_alg )
if verifier . verify ( load . encode ( 'utf-8' ) + timestamp . encode ( 'utf-8' ) , base64 . b64decode ( p [ 2 ] ) , sign_key . key ) :
return load , timestamp
else :
return load , timestamp
return None |
def _build_graph ( self ) -> nx . DiGraph :
"""Private method to build the graph from the model .""" | digraph = nx . DiGraph ( )
for state in self . model . all_states ( ) :
self . _number_of_states += 1
for next_state in self . model . available_state ( state ) :
self . _number_of_transitions += 1
digraph . add_edge ( self . _transform_state_to_string ( state ) , self . _transform_state_to_string ( next_state ) )
return digraph |
def get_conv ( bits , bin_point , signed = False , scaling = 1.0 ) :
"""Creates a I { conversion structure } implented as a dictionary containing all parameters
needed to switch between number representations .
@ param bits : the number of bits
@ param bin _ point : binary point position
@ param signed : True if Fix , False if UFix
@ param scaling : optional scaling to be applied after the conversion
@ return : a conversion structure that can be applied in both directions of
conversion for the given specs .""" | conversion_t = { }
conversion_t [ "bits" ] = bits
conversion_t [ "bin_point" ] = bin_point
conversion_t [ "signed" ] = signed
conversion_t [ "scaling" ] = scaling
conversion_t [ "dec_step" ] = 1.0 / ( 2 ** bin_point )
# dec _ max = dec _ mask * dec _ step
conversion_t [ "dec_mask" ] = sum ( [ 2 ** i for i in range ( bin_point ) ] )
if bits == 8 :
conversion_t [ "fmt" ] = "B"
elif bits == 16 :
conversion_t [ "fmt" ] = "H"
elif bits == 32 :
conversion_t [ "fmt" ] = "I"
else :
raise ConversionError ( "numer of bits not supported: " + str ( bits ) )
if signed :
_get_signed_params ( conversion_t )
else :
_get_unsigned_params ( conversion_t )
return conversion_t |
def p_or_p_expression ( tok ) :
"""or _ p _ expression : xor _ p _ expression OP _ OR _ P or _ p _ expression
| xor _ p _ expression""" | if len ( tok ) == 4 :
tok [ 0 ] = LogicalBinOpRule ( tok [ 2 ] , tok [ 1 ] , tok [ 3 ] )
else :
tok [ 0 ] = tok [ 1 ] |
def create_property_nod_worksheets ( workbook , data_list , result_info_key , identifier_keys ) :
"""Creates two worksheets out of the property / nod data because the data
doesn ' t come flat enough to make sense on one sheet .
Args :
workbook : the main workbook to add the sheets to
data _ list : the main list of data
result _ info _ key : the key in api _ data dicts that contains the data results
Should always be ' address _ info ' for property / nod
identifier _ keys : the list of keys used as requested identifiers
( address , zipcode , city , state , etc )""" | nod_details_list = [ ]
nod_default_history_list = [ ]
for prop_data in data_list :
nod_data = prop_data [ 'property/nod' ]
if nod_data is None :
nod_data = { }
default_history_data = nod_data . pop ( 'default_history' , [ ] )
_set_identifier_fields ( nod_data , prop_data , result_info_key , identifier_keys )
nod_details_list . append ( nod_data )
for item in default_history_data :
_set_identifier_fields ( item , prop_data , result_info_key , identifier_keys )
nod_default_history_list . append ( item )
worksheet = workbook . create_sheet ( title = 'NOD Details' )
write_data ( worksheet , nod_details_list )
worksheet = workbook . create_sheet ( title = 'NOD Default History' )
write_data ( worksheet , nod_default_history_list ) |
def generate_dynamodb_tables ( ) :
"""Create the Blockade DynamoDB tables .""" | logger . debug ( "[#] Setting up DynamoDB tables" )
client = boto3 . client ( 'dynamodb' , region_name = PRIMARY_REGION )
existing_tables = client . list_tables ( ) [ 'TableNames' ]
responses = list ( )
for label in DYNAMODB_TABLES :
if label in existing_tables :
logger . debug ( "[*] Table %s already exists" % ( label ) )
continue
kwargs = { 'TableName' : label , 'ProvisionedThroughput' : { 'ReadCapacityUnits' : 5 , 'WriteCapacityUnits' : 5 } }
kwargs . update ( DYNAMODB_SCHEMAS [ label ] )
response = client . create_table ( ** kwargs )
responses . append ( response )
logger . debug ( "[#] Successfully setup DynamoDB table %s" % ( label ) )
logger . info ( "[#] Successfully setup DynamoDB tables" )
return responses |
def nearest_neighbors ( X , n_neighbors , metric , metric_kwds , angular , random_state , verbose = False ) :
"""Compute the ` ` n _ neighbors ` ` nearest points for each data point in ` ` X ` `
under ` ` metric ` ` . This may be exact , but more likely is approximated via
nearest neighbor descent .
Parameters
X : array of shape ( n _ samples , n _ features )
The input data to compute the k - neighbor graph of .
n _ neighbors : int
The number of nearest neighbors to compute for each sample in ` ` X ` ` .
metric : string or callable
The metric to use for the computation .
metric _ kwds : dict
Any arguments to pass to the metric computation function .
angular : bool
Whether to use angular rp trees in NN approximation .
random _ state : np . random state
The random state to use for approximate NN computations .
verbose : bool
Whether to print status data during the computation .
Returns
knn _ indices : array of shape ( n _ samples , n _ neighbors )
The indices on the ` ` n _ neighbors ` ` closest points in the dataset .
knn _ dists : array of shape ( n _ samples , n _ neighbors )
The distances to the ` ` n _ neighbors ` ` closest points in the dataset .""" | if verbose :
print ( ts ( ) , "Finding Nearest Neighbors" )
if metric == "precomputed" : # Note that this does not support sparse distance matrices yet . . .
# Compute indices of n nearest neighbors
knn_indices = np . argsort ( X ) [ : , : n_neighbors ]
# Compute the nearest neighbor distances
# ( equivalent to np . sort ( X ) [ : , : n _ neighbors ] )
knn_dists = X [ np . arange ( X . shape [ 0 ] ) [ : , None ] , knn_indices ] . copy ( )
rp_forest = [ ]
else :
if callable ( metric ) :
distance_func = metric
elif metric in dist . named_distances :
distance_func = dist . named_distances [ metric ]
else :
raise ValueError ( "Metric is neither callable, " + "nor a recognised string" )
if metric in ( "cosine" , "correlation" , "dice" , "jaccard" ) :
angular = True
rng_state = random_state . randint ( INT32_MIN , INT32_MAX , 3 ) . astype ( np . int64 )
if scipy . sparse . isspmatrix_csr ( X ) :
if metric in sparse . sparse_named_distances :
distance_func = sparse . sparse_named_distances [ metric ]
if metric in sparse . sparse_need_n_features :
metric_kwds [ "n_features" ] = X . shape [ 1 ]
else :
raise ValueError ( "Metric {} not supported for sparse " + "data" . format ( metric ) )
metric_nn_descent = sparse . make_sparse_nn_descent ( distance_func , tuple ( metric_kwds . values ( ) ) )
# TODO : Hacked values for now
n_trees = 5 + int ( round ( ( X . shape [ 0 ] ) ** 0.5 / 20.0 ) )
n_iters = max ( 5 , int ( round ( np . log2 ( X . shape [ 0 ] ) ) ) )
if verbose :
print ( ts ( ) , "Building RP forest with" , str ( n_trees ) , "trees" )
rp_forest = make_forest ( X , n_neighbors , n_trees , rng_state , angular )
leaf_array = rptree_leaf_array ( rp_forest )
if verbose :
print ( ts ( ) , "NN descent for" , str ( n_iters ) , "iterations" )
knn_indices , knn_dists = metric_nn_descent ( X . indices , X . indptr , X . data , X . shape [ 0 ] , n_neighbors , rng_state , max_candidates = 60 , rp_tree_init = True , leaf_array = leaf_array , n_iters = n_iters , verbose = verbose , )
else :
metric_nn_descent = make_nn_descent ( distance_func , tuple ( metric_kwds . values ( ) ) )
# TODO : Hacked values for now
n_trees = 5 + int ( round ( ( X . shape [ 0 ] ) ** 0.5 / 20.0 ) )
n_iters = max ( 5 , int ( round ( np . log2 ( X . shape [ 0 ] ) ) ) )
if verbose :
print ( ts ( ) , "Building RP forest with" , str ( n_trees ) , "trees" )
rp_forest = make_forest ( X , n_neighbors , n_trees , rng_state , angular )
leaf_array = rptree_leaf_array ( rp_forest )
if verbose :
print ( ts ( ) , "NN descent for" , str ( n_iters ) , "iterations" )
knn_indices , knn_dists = metric_nn_descent ( X , n_neighbors , rng_state , max_candidates = 60 , rp_tree_init = True , leaf_array = leaf_array , n_iters = n_iters , verbose = verbose , )
if np . any ( knn_indices < 0 ) :
warn ( "Failed to correctly find n_neighbors for some samples." "Results may be less than ideal. Try re-running with" "different parameters." )
if verbose :
print ( ts ( ) , "Finished Nearest Neighbor Search" )
return knn_indices , knn_dists , rp_forest |
def _run_bcbio_variation ( vrn_file , rm_file , rm_interval_file , base_dir , sample , caller , data ) :
"""Run validation of a caller against the truth set using bcbio . variation .""" | val_config_file = _create_validate_config_file ( vrn_file , rm_file , rm_interval_file , base_dir , data )
work_dir = os . path . join ( base_dir , "work" )
out = { "summary" : os . path . join ( work_dir , "validate-summary.csv" ) , "grading" : os . path . join ( work_dir , "validate-grading.yaml" ) , "discordant" : os . path . join ( work_dir , "%s-eval-ref-discordance-annotate.vcf" % sample ) }
if not utils . file_exists ( out [ "discordant" ] ) or not utils . file_exists ( out [ "grading" ] ) :
bcbio_variation_comparison ( val_config_file , base_dir , data )
out [ "concordant" ] = filter ( os . path . exists , [ os . path . join ( work_dir , "%s-%s-concordance.vcf" % ( sample , x ) ) for x in [ "eval-ref" , "ref-eval" ] ] ) [ 0 ]
return out |
def has_delete_permission ( self , request , obj = None ) :
"""Returns True if the given request has permission to change the given
Django model instance , the default implementation doesn ' t examine the
` obj ` parameter .
Can be overriden by the user in subclasses . In such case it should
return True if the given request has permission to delete the ` obj `
model instance . If ` obj ` is None , this should return True if the given
request has permission to delete * any * object of the given type .""" | opts = self . opts
return request . user . has_perm ( opts . app_label + '.' + opts . get_delete_permission ( ) , obj ) |
def list_namespaced_limit_range ( self , namespace , ** kwargs ) :
"""list or watch objects of kind LimitRange
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ namespaced _ limit _ range ( namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1LimitRangeList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_namespaced_limit_range_with_http_info ( namespace , ** kwargs )
else :
( data ) = self . list_namespaced_limit_range_with_http_info ( namespace , ** kwargs )
return data |
def add_callback ( self , f ) :
"""Magic method assigned to f . callback that allows a callback to be
defined as follows :
@ bucket
def fun ( . . . ) :
@ fun . callback
def fun ( ) :
In the event that a cached result is used , the callback is fired .""" | self . callback = f
return self . decorate ( self . fref ( ) ) |
def scan_process_filenames ( self ) :
"""Update the filename for each process in the snapshot when possible .
@ note : Tipically you don ' t need to call this method . It ' s called
automatically by L { scan } to get the full pathname for each process
when possible , since some scan methods only get filenames without
the path component .
If unsure , use L { scan } instead .
@ see : L { scan } , L { Process . get _ filename }
@ rtype : bool
@ return : C { True } if all the pathnames were retrieved , C { False } if the
debugger doesn ' t have permission to scan some processes . In either
case , all processes the debugger has access to have a full pathname
instead of just a filename .""" | complete = True
for aProcess in self . __processDict . values ( ) :
try :
new_name = None
old_name = aProcess . fileName
try :
aProcess . fileName = None
new_name = aProcess . get_filename ( )
finally :
if not new_name :
aProcess . fileName = old_name
complete = False
except Exception :
complete = False
return complete |
def get_addr ( iface = 'lo0' , iface_type = 'link' ) :
"""获取网络接口 ` ` iface ` ` 的 ` ` mac ` `
- 如果系统类型为 ` ` mac ` ` , 使用 ` ` psutil ` `
- 其他使用 ` ` socket ` `
: param iface : ` ` 网络接口 ` `
: type iface : str
: return : ` ` mac地址 / 空 ` `
: rtype : str / None""" | if platform . system ( ) in [ 'Darwin' , 'Linux' ] :
if iface_type == 'link' :
_AF_FAMILY = psutil . AF_LINK
elif iface_type in [ 'inet' , 'inet6' ] :
_AF_FAMILY = getattr ( socket , 'AF_{}' . format ( iface_type . upper ( ) ) )
else :
raise SystemError ( 'Not valid address FAMILY' )
else :
raise SystemExit ( 'Unsupported System. Only Mac/Linux Supported' )
addrs = psutil . net_if_addrs ( )
for n in addrs [ iface ] :
if n . family == _AF_FAMILY :
return n . address |
def create_api_pool ( self ) :
"""Get an instance of Api Pool services facade .""" | return ApiPool ( self . networkapi_url , self . user , self . password , self . user_ldap ) |
def _extract_member ( self , member , targetpath , pwd ) :
"""Extract the ZipInfo object ' member ' to a physical
file on the path targetpath .""" | # build the destination pathname , replacing
# forward slashes to platform specific separators .
arcname = member . filename . replace ( '/' , os . path . sep )
if os . path . altsep :
arcname = arcname . replace ( os . path . altsep , os . path . sep )
# interpret absolute pathname as relative , remove drive letter or
# UNC path , redundant separators , " . " and " . . " components .
arcname = os . path . splitdrive ( arcname ) [ 1 ]
invalid_path_parts = ( '' , os . path . curdir , os . path . pardir )
arcname = os . path . sep . join ( x for x in arcname . split ( os . path . sep ) if x not in invalid_path_parts )
if os . path . sep == '\\' : # filter illegal characters on Windows
arcname = self . _sanitize_windows_name ( arcname , os . path . sep )
targetpath = os . path . join ( targetpath , arcname )
targetpath = os . path . normpath ( targetpath )
# Create all upper directories if necessary .
upperdirs = os . path . dirname ( targetpath )
if upperdirs and not os . path . exists ( upperdirs ) :
os . makedirs ( upperdirs )
if member . filename [ - 1 ] == '/' :
if not os . path . isdir ( targetpath ) :
os . mkdir ( targetpath )
return targetpath
with self . open ( member , pwd = pwd ) as source , open ( targetpath , "wb" ) as target :
shutil . copyfileobj ( source , target )
return targetpath |
def set_zonefile_present ( self , zfhash , block_height , con = None , path = None ) :
"""Set a zonefile as present , and if it was previously absent , inform the storage listener""" | was_present = atlasdb_set_zonefile_present ( zfhash , True , con = con , path = path )
# tell anyone who cares that we got this zone file , if it was new
if not was_present and self . store_zonefile_cb :
log . debug ( '{} was new, so passing it along to zonefile storage watchers...' . format ( zfhash ) )
self . store_zonefile_cb ( zfhash , block_height )
else :
log . debug ( '{} was seen before, so not passing it along to zonefile storage watchers' . format ( zfhash ) ) |
def ae_latent_sample ( latents_dense , inputs , ed , embed , iters , hparams ) :
"""Sample from the latent space in the autoencoder .""" | if hparams . num_decode_blocks < 2 and hparams . sampling_temp == 0.0 : # TODO ( lukaszkaiser ) : beam - search only works in non - blocked mode for now .
tf . logging . info ( "Running beam-search for latents with beam size 1." )
return ae_latent_sample_beam ( latents_dense , inputs , ed , embed , hparams )
latents_pred = decode_transformer ( inputs , ed , latents_dense , hparams , "extra" )
latents_discrete , _ = ae_latent_softmax ( latents_pred , None , hparams )
def next_bit ( latents_discrete , i ) :
latents_discrete_prev = latents_discrete
with tf . variable_scope ( tf . get_variable_scope ( ) , reuse = True ) :
latents_dense = embed ( latents_discrete )
latents_pred = decode_transformer ( inputs , ed , latents_dense , hparams , "extra" )
latents_discrete , _ = ae_latent_softmax ( latents_pred , None , hparams )
return tf . concat ( [ latents_discrete_prev [ : , : ( i + 1 ) , : ] , latents_discrete [ : , ( i + 1 ) : , : ] ] , axis = 1 )
for i in range ( iters ) :
latents_discrete = next_bit ( latents_discrete , i )
return latents_discrete |
def _detect_timezone_etc_localtime ( ) :
"""Detect timezone based on / etc / localtime file .""" | matches = [ ]
if os . path . exists ( "/etc/localtime" ) :
f = open ( "/etc/localtime" , "rb" )
localtime = pytz . tzfile . build_tzinfo ( "/etc/localtime" , f )
f . close ( )
# We want to match against the local database because / etc / localtime will
# be copied from that . Once we have found a name for / etc / localtime , we can
# use the name to get the " same " timezone from the inbuilt pytz database .
tzdatabase = _load_local_tzinfo ( )
if tzdatabase :
tznames = tzdatabase . keys ( )
tzvalues = tzdatabase . __getitem__
else :
tznames = pytz . all_timezones
tzvalues = _tzinfome
# See if we can find a " Human Name " for this . .
for tzname in tznames :
tz = tzvalues ( tzname )
if dir ( tz ) != dir ( localtime ) :
continue
for attrib in dir ( tz ) : # Ignore functions and specials
if callable ( getattr ( tz , attrib ) ) or attrib . startswith ( "__" ) :
continue
# This will always be different
if attrib == "zone" or attrib == "_tzinfos" :
continue
if getattr ( tz , attrib ) != getattr ( localtime , attrib ) :
break
# We get here iff break didn ' t happen , i . e . no meaningful attributes
# differ between tz and localtime
else : # Try and get a timezone from pytz which has the same name as the zone
# which matches in the local database .
if tzname not in pytz . all_timezones :
warnings . warn ( "Skipping %s because not in pytz database." % tzname )
continue
matches . append ( _tzinfome ( tzname ) )
matches . sort ( key = lambda x : x . zone )
if len ( matches ) == 1 :
return matches [ 0 ]
if len ( matches ) > 1 :
warnings . warn ( "We detected multiple matches for your /etc/localtime. " "(Matches where %s)" % matches )
return matches [ 0 ]
else :
warnings . warn ( "We detected no matches for your /etc/localtime." )
# Register / etc / localtime as the timezone loaded .
pytz . _tzinfo_cache [ "/etc/localtime" ] = localtime
return localtime |
def centre_of_mass ( self ) :
"""Returns the centre of mass of AMPAL object .
Notes
All atoms are included in calculation , call ` centre _ of _ mass `
manually if another selection is require .
Returns
centre _ of _ mass : numpy . array
3D coordinate for the centre of mass .""" | elts = set ( [ x . element for x in self . get_atoms ( ) ] )
masses_dict = { e : ELEMENT_DATA [ e ] [ 'atomic mass' ] for e in elts }
points = [ x . _vector for x in self . get_atoms ( ) ]
masses = [ masses_dict [ x . element ] for x in self . get_atoms ( ) ]
return centre_of_mass ( points = points , masses = masses ) |
def check_permissions ( self , request ) :
"""Check if the request should be permitted .
Raises an appropriate exception if the request is not permitted .
: param request : Pyramid Request object .""" | for permission in self . get_permissions ( ) :
if not permission . has_permission ( request , self ) :
self . permission_denied ( request , message = getattr ( permission , 'message' , None ) ) |
def uri_split_tree ( uri ) :
"""Return ( scheme , ( user , passwd , host , port ) , path ,
( ( k1 , v1 ) , ( k2 , v2 ) , . . . ) , fragment ) using
basic _ urisplit ( ) , then split _ authority ( ) and split _ query ( ) on the
result .
> > > uri _ split _ tree (
. . . ' http : / / % 42%20 + blabla : lol @ % 77ww . foobar . org / % 7Exilun / ' +
. . . ' ? query = + + + % 2b & = & = = = & a = b & & & + + + aaa % 3D = + % 2B % 2D & & & & ' +
. . . ' # frag + + + % 42 ' )
( ' http ' , ( ' % 42%20 + blabla ' , ' lol ' , ' % 77ww . foobar . org ' , None ) , ' / % 7Exilun / ' , ( ( ' query ' , ' + + + % 2b ' ) , ( ' ' , ' ' ) , ( ' ' , ' = = ' ) , ( ' a ' , ' b ' ) , ( ' + + + aaa % 3D ' , ' + % 2B % 2D ' ) ) , ' frag + + + % 42 ' )""" | scheme , authority , path , query , fragment = basic_urisplit ( uri )
if authority :
authority = split_authority ( authority )
if query :
query = split_query ( query )
return ( scheme and scheme or None , authority and authority or None , path and path or None , query and query or None , fragment and fragment or None ) |
def is_element_present ( driver , selector , by = By . CSS_SELECTOR ) :
"""Returns whether the specified element selector is present on the page .
@ Params
driver - the webdriver object ( required )
selector - the locator that is used ( required )
by - the method to search for the locator ( Default : By . CSS _ SELECTOR )
@ Returns
Boolean ( is element present )""" | try :
driver . find_element ( by = by , value = selector )
return True
except Exception :
return False |
def add_attribute_group_items ( attributegroupitems , ** kwargs ) :
"""Populate attribute groups with items .
* * attributegroupitems : a list of items , of the form :
' attr _ id ' : X ,
' group _ id ' : Y ,
' network _ id ' : Z ,
Note that this approach supports the possibility of populating groups
within multiple networks at the same time .
When adding a group item , the function checks whether it can be added ,
based on the ' exclusivity ' setup of the groups - - if a group is specified
as being ' exclusive ' , then any attributes within that group cannot appear
in any other group ( within a network ) .""" | user_id = kwargs . get ( 'user_id' )
if not isinstance ( attributegroupitems , list ) :
raise HydraError ( "Cannpt add attribute group items. Attributegroupitems must be a list" )
new_agis_i = [ ]
group_lookup = { }
# for each network , keep track of what attributes are contained in which groups it ' s in
# structure : { NETWORK _ ID : { ATTR _ ID : [ GROUP _ ID ] }
agi_lookup = { }
network_lookup = { }
# ' agi ' = shorthand for ' attribute group item '
for agi in attributegroupitems :
network_i = network_lookup . get ( agi . network_id )
if network_i is None :
network_i = _get_network ( agi . network_id )
network_lookup [ agi . network_id ] = network_i
network_i . check_write_permission ( user_id )
# Get the group so we can check for exclusivity constraints
group_i = group_lookup . get ( agi . group_id )
if group_i is None :
group_lookup [ agi . group_id ] = _get_attr_group ( agi . group_id )
network_agis = agi_lookup
# Create a map of all agis currently in the network
if agi_lookup . get ( agi . network_id ) is None :
agi_lookup [ agi . network_id ] = { }
network_agis = _get_attributegroupitems ( agi . network_id )
log . info ( network_agis )
for net_agi in network_agis :
if net_agi . group_id not in group_lookup :
group_lookup [ net_agi . group_id ] = _get_attr_group ( net_agi . group_id )
if agi_lookup . get ( net_agi . network_id ) is None :
agi_lookup [ net_agi . network_id ] [ net_agi . attr_id ] = [ net_agi . group_id ]
else :
if agi_lookup [ net_agi . network_id ] . get ( net_agi . attr_id ) is None :
agi_lookup [ net_agi . network_id ] [ net_agi . attr_id ] = [ net_agi . group_id ]
elif net_agi . group_id not in agi_lookup [ net_agi . network_id ] [ net_agi . attr_id ] :
agi_lookup [ net_agi . network_id ] [ net_agi . attr_id ] . append ( net_agi . group_id )
# Does this agi exist anywhere else inside this network ?
# Go through all the groups that this attr is in and make sure it ' s not exclusive
if agi_lookup [ agi . network_id ] . get ( agi . attr_id ) is not None :
for group_id in agi_lookup [ agi . network_id ] [ agi . attr_id ] :
group = group_lookup [ group_id ]
# Another group has been found .
if group . exclusive == 'Y' : # The other group is exclusive , so this attr can ' t be added
raise HydraError ( "Attribute %s is already in Group %s for network %s. This group is exclusive, so attr %s cannot exist in another group." % ( agi . attr_id , group . id , agi . network_id , agi . attr_id ) )
# Now check that if this group is exclusive , then the attr isn ' t in
# any other groups
if group_lookup [ agi . group_id ] . exclusive == 'Y' :
if len ( agi_lookup [ agi . network_id ] [ agi . attr_id ] ) > 0 : # The other group is exclusive , so this attr can ' t be added
raise HydraError ( "Cannot add attribute %s to group %s. This group is exclusive, but attr %s has been found in other groups (%s)" % ( agi . attr_id , agi . group_id , agi . attr_id , agi_lookup [ agi . network_id ] [ agi . attr_id ] ) )
agi_i = AttrGroupItem ( )
agi_i . network_id = agi . network_id
agi_i . group_id = agi . group_id
agi_i . attr_id = agi . attr_id
# Update the lookup table in preparation for the next pass .
if agi_lookup [ agi . network_id ] . get ( agi . attr_id ) is None :
agi_lookup [ agi . network_id ] [ agi . attr_id ] = [ agi . group_id ]
elif agi . group_id not in agi_lookup [ agi . network_id ] [ agi . attr_id ] :
agi_lookup [ agi . network_id ] [ agi . attr_id ] . append ( agi . group_id )
db . DBSession . add ( agi_i )
new_agis_i . append ( agi_i )
log . info ( agi_lookup )
db . DBSession . flush ( )
return new_agis_i |
def dump_p ( self , filename ) :
"""Dump parameter values ( ` ` fit . p ` ` ) into file ` ` filename ` ` .
` ` fit . dump _ p ( filename ) ` ` saves the best - fit parameter values
( ` ` fit . p ` ` ) from a ` ` nonlinear _ fit ` ` called ` ` fit ` ` . These values
are recovered using
` ` p = nonlinear _ fit . load _ parameters ( filename ) ` `
where ` ` p ` ` ' s layout is the same as that of ` ` fit . p ` ` .""" | warnings . warn ( "nonlinear_fit.dump_p deprecated; use gvar.dump instead" , DeprecationWarning )
with open ( filename , "wb" ) as f :
pickle . dump ( self . palt , f ) |
def list_services ( self ) :
"""List Services .""" | content = self . _fetch ( "/service" )
return map ( lambda x : FastlyService ( self , x ) , content ) |
def update ( self , buf ) :
"""Update this hash object ' s state with the provided string .""" | if self . _final :
raise InvalidOperation ( "Cannot update finalised tlsh" )
else :
self . _buf_len += len ( buf )
return self . _tlsh . update ( buf ) |
def next_frame_base ( ) :
"""Common HParams for next _ frame models .""" | hparams = common_hparams . basic_params1 ( )
# Loss cutoff .
hparams . add_hparam ( "video_modality_loss_cutoff" , 0.01 )
# Additional resizing the frames before feeding them to model .
hparams . add_hparam ( "preprocess_resize_frames" , None )
# How many data points to suffle . Ideally should be part of problem not model !
hparams . add_hparam ( "shuffle_buffer_size" , 128 )
# Tiny mode . For faster tests .
hparams . add_hparam ( "tiny_mode" , False )
# In case a model supports smaller / faster version .
hparams . add_hparam ( "small_mode" , False )
# In case a model has stochastic version .
hparams . add_hparam ( "stochastic_model" , False )
# Internal loss for recurrent models .
hparams . add_hparam ( "internal_loss" , True )
# choose from : concat , multiplicative , multi _ additive
hparams . add_hparam ( "action_injection" , "multi_additive" )
# Scheduled sampling method . Choose between
# ground _ truth _ only , prediction _ only , prob , count , prob _ inverse _ exp .
hparams . add_hparam ( "scheduled_sampling_mode" , "prediction_only" )
hparams . add_hparam ( "scheduled_sampling_decay_steps" , 10000 )
hparams . add_hparam ( "scheduled_sampling_max_prob" , 1.0 )
hparams . add_hparam ( "scheduled_sampling_k" , 900.0 )
return hparams |
def bind_socket ( address , port ) :
"""Returns a socket bound on ( address : port ) .""" | assert address
assert port
bindsocket = socket . socket ( )
try :
bindsocket . bind ( ( address , port ) )
except socket . error :
logger . error ( "Couldn't bind socket on %s:%s" , address , port )
return None
logger . info ( 'Listening on %s:%s' , address , port )
bindsocket . listen ( 0 )
return bindsocket |
def search_app_root ( ) :
"""Search your Django application root
returns :
- ( String ) Django application root path""" | while True :
current = os . getcwd ( )
if pathlib . Path ( "apps.py" ) . is_file ( ) :
return current
elif pathlib . Path . cwd ( ) == "/" :
raise FileNotFoundError
else :
os . chdir ( "../" ) |
def zadd ( self , key , score , member , mode , client = None ) :
"""Like ZADD , but supports different score update modes , in case the
member already exists in the ZSET :
- " nx " : Don ' t update the score
- " xx " : Only update elements that already exist . Never add elements .
- " min " : Use the smaller of the given and existing score
- " max " : Use the larger of the given and existing score""" | if mode == 'nx' :
f = self . _zadd_noupdate
elif mode == 'xx' :
f = self . _zadd_update_existing
elif mode == 'min' :
f = self . _zadd_update_min
elif mode == 'max' :
f = self . _zadd_update_max
else :
raise NotImplementedError ( 'mode "%s" unsupported' % mode )
return f ( keys = [ key ] , args = [ score , member ] , client = client ) |
def clear_state ( self , activity , agent , registration = None ) :
"""Clear state ( s ) with specified activity and agent
: param activity : Activity object of state ( s ) to be deleted
: type activity : : class : ` tincan . activity . Activity `
: param agent : Agent object of state ( s ) to be deleted
: type agent : : class : ` tincan . agent . Agent `
: param registration : registration UUID of state ( s ) to be deleted
: type registration : str | unicode
: return : LRS Response object
: rtype : : class : ` tincan . lrs _ response . LRSResponse `""" | return self . _delete_state ( activity = activity , agent = agent , registration = registration ) |
def exception ( self ) :
'''Return an instance of the corresponding exception''' | code , _ , message = self . data . partition ( ' ' )
return self . find ( code ) ( message ) |
def AddContract ( self , contract ) :
"""Add a contract to the database .
Args :
contract ( neo . SmartContract . Contract ) : a Contract instance .""" | super ( UserWallet , self ) . AddContract ( contract )
try :
db_contract = Contract . get ( ScriptHash = contract . ScriptHash . ToBytes ( ) )
db_contract . delete_instance ( )
except Exception as e :
logger . debug ( "contract does not exist yet" )
sh = bytes ( contract . ScriptHash . ToArray ( ) )
address , created = Address . get_or_create ( ScriptHash = sh )
address . IsWatchOnly = False
address . save ( )
db_contract = Contract . create ( RawData = contract . ToArray ( ) , ScriptHash = contract . ScriptHash . ToBytes ( ) , PublicKeyHash = contract . PublicKeyHash . ToBytes ( ) , Address = address , Account = self . __dbaccount )
logger . debug ( "Creating db contract %s " % db_contract )
db_contract . save ( ) |
def get_class ( kls ) :
""": param kls - string of fully identified starter function or starter method path
for instance :
- workers . abstract _ worker . AbstractWorker . start
- workers . example _ script _ worker . main
: return tuple ( type , object , starter )
for instance :
- ( FunctionType , < function _ main > , None )
- ( type , < Class _ . . . > , ' start ' )""" | parts = kls . split ( '.' )
try : # First , try to import module hosting starter function
module = '.' . join ( parts [ : - 1 ] )
m = __import__ ( module )
except ImportError : # Alternatively , try to import module hosting Class with a starter method
module = '.' . join ( parts [ : - 2 ] )
m = __import__ ( module )
t = None
starter = None
for i in range ( 1 , len ( parts ) ) :
comp = parts [ i ]
starter = parts [ i : ]
m = getattr ( m , comp )
if isinstance ( m , class_types ) :
t = type
starter = None if len ( parts [ i : ] ) == 1 else '.' . join ( parts [ i + 1 : ] )
break
if isinstance ( m , types . FunctionType ) :
t = types . FunctionType
starter = None
break
return t , m , starter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.