signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def parse ( self , rrstr ) : # type : ( bytes ) - > None
'''Parse a Rock Ridge Sparse File record out of a string .
Parameters :
rrstr - The string to parse the record out of .
Returns :
Nothing .'''
|
if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'SF record already initialized!' )
# We assume that the caller has already checked the su _ entry _ version ,
# so we don ' t bother .
( su_len , su_entry_version_unused , ) = struct . unpack_from ( '=BB' , rrstr [ : 4 ] , 2 )
if su_len == 12 : # This is a Rock Ridge version 1.10 SF Record , which is 12 bytes .
( virtual_file_size_le , virtual_file_size_be ) = struct . unpack_from ( '=LL' , rrstr [ : 12 ] , 4 )
if virtual_file_size_le != utils . swab_32bit ( virtual_file_size_be ) :
raise pycdlibexception . PyCdlibInvalidISO ( 'Virtual file size little-endian does not match big-endian' )
self . virtual_file_size_low = virtual_file_size_le
elif su_len == 21 : # This is a Rock Ridge version 1.12 SF Record , which is 21 bytes .
( virtual_file_size_high_le , virtual_file_size_high_be , virtual_file_size_low_le , virtual_file_size_low_be , self . table_depth ) = struct . unpack_from ( '=LLLLB' , rrstr [ : 21 ] , 4 )
if virtual_file_size_high_le != utils . swab_32bit ( virtual_file_size_high_be ) :
raise pycdlibexception . PyCdlibInvalidISO ( 'Virtual file size high little-endian does not match big-endian' )
if virtual_file_size_low_le != utils . swab_32bit ( virtual_file_size_low_be ) :
raise pycdlibexception . PyCdlibInvalidISO ( 'Virtual file size low little-endian does not match big-endian' )
self . virtual_file_size_low = virtual_file_size_low_le
self . virtual_file_size_high = virtual_file_size_high_le
else :
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid length on Rock Ridge SF record (expected 12 or 21)' )
self . _initialized = True
|
def assert_valid ( self , instance , value = None ) :
"""Checks if valid , including HasProperty instances pass validation"""
|
valid = super ( Instance , self ) . assert_valid ( instance , value )
if not valid :
return False
if value is None :
value = instance . _get ( self . name )
if isinstance ( value , HasProperties ) :
value . validate ( )
return True
|
def connect ( self , address ) :
"""initiate a new connection to a remote socket bound to an address
. . note : : this method will block until the connection has been made
: param address :
the address to which to initiate a connection , the format of which
depends on the socket ' s type ; for TCP sockets , this is a
` ` ( host , port ` ` ) two - tuple"""
|
address = _dns_resolve ( self , address )
with self . _registered ( 'we' ) :
while 1 :
err = self . _sock . connect_ex ( address )
if not self . _blocking or err not in _BLOCKING_OP :
if err not in ( 0 , errno . EISCONN ) :
raise socket . error ( err , errno . errorcode [ err ] )
return
if self . _writable . wait ( self . gettimeout ( ) ) :
raise socket . timeout ( "timed out" )
if scheduler . state . interrupted :
raise IOError ( errno . EINTR , "interrupted system call" )
|
def make_command_line ( predictions = '/dev/stdout' , quiet = True , save_resume = True , q_colon = None , ** kwargs ) :
"""Construct a command line for VW , with each named argument corresponding
to a VW option .
Single character keys are mapped to single - dash options ,
e . g . ' b = 20 ' yields ' - b 20 ' ,
while multiple character keys map to double - dash options :
' quiet = True ' yields ' - - quiet '
Boolean values are interpreted as flags : present if True , absent if False .
All other values are treated as option arguments , as in the - b example above .
If an option argument is a list , that option is repeated multiple times ,
e . g . ' q = [ ' ab ' , ' bc ' ] ' yields ' - q ab - q bc '
q _ colon is handled specially , mapping to ' - - q : ' .
Run ' vw - h ' for a listing of most options .
Defaults are well - suited for use with Wabbit Wappa :
vw - - predictions / dev / stdout - - quiet - - save _ resume
NOTE : This function makes no attempt to validate the inputs or
ensure they are compatible with Wabbit Wappa .
Outputs a command line string ."""
|
args = [ 'vw' ]
if q_colon :
kwargs [ 'q:' ] = q_colon
kwargs [ 'predictions' ] = predictions
kwargs [ 'quiet' ] = quiet
kwargs [ 'save_resume' ] = save_resume
for key , value in kwargs . items ( ) :
if len ( key ) == 1 :
option = '-{}' . format ( key )
else :
option = '--{}' . format ( key )
if value is True :
arg_list = [ option ]
elif isinstance ( value , basestring ) :
arg_list = [ '{} {}' . format ( option , value ) ]
elif hasattr ( value , '__getitem__' ) : # Listlike value
arg_list = [ '{} {}' . format ( option , subvalue ) for subvalue in value ]
else :
arg_list = [ '{} {}' . format ( option , value ) ]
args . extend ( arg_list )
command = ' ' . join ( args )
return command
|
def on_init_strate ( self ) :
"""策略加载完配置"""
|
# 添加必要的股票 , 以便能得到相应的股票行情数据
self . symbol_pools . append ( self . symbol_ref )
if self . cta_call [ "enable" ] :
self . symbol_pools . append ( self . cta_call [ "symbol" ] )
if self . cta_put [ "enable" ] :
self . symbol_pools . append ( self . cta_put [ "symbol" ] )
# call put 的持仓量以及持仓天数
self . cta_call [ 'pos' ] = 0
self . cta_call [ 'days' ] = 0
self . cta_put [ 'pos' ] = 0
self . cta_put [ 'days' ] = 0
# call put 一天只操作一次 , 记录当天是否已经操作过
self . cta_call [ 'done' ] = False
self . cta_put [ 'done' ] = False
# 记录当天操作的订单id
self . cta_call [ 'order_id' ] = ''
self . cta_put [ 'order_id' ] = ''
# 检查参数 : 下单的滑点 / 下单的数量
if self . trade_price_idx < 1 or self . trade_price_idx > 5 :
raise Exception ( "conifg trade_price_idx error!" )
if self . trade_qty < 0 :
raise Exception ( "conifg trade_qty error!" )
|
def end_timing ( self , name , elapsed ) :
"""Ends measurement of execution elapsed time and updates specified counter .
: param name : a counter name
: param elapsed : execution elapsed time in milliseconds to update the counter ."""
|
counter = self . get ( name , CounterType . Interval )
self . _calculate_stats ( counter , elapsed )
self . _update ( )
|
def make_multisig_info ( m , pks , compressed = None ) :
"""Make a multisig address and redeem script .
@ m of the given @ pks must sign .
Return { ' address ' : p2sh address , ' redeem _ script ' : redeem script , ' private _ keys ' : private keys , ' segwit ' : False }
* privkeys will be hex - encoded
* redeem _ script will be hex - encoded"""
|
pubs = [ ]
privkeys = [ ]
for pk in pks :
priv = None
if compressed in [ True , False ] :
priv = BitcoinPrivateKey ( pk , compressed = compressed )
else :
priv = BitcoinPrivateKey ( pk )
priv_hex = priv . to_hex ( )
pub_hex = priv . public_key ( ) . to_hex ( )
privkeys . append ( priv_hex )
pubs . append ( pub_hex )
script = make_multisig_script ( pubs , m )
addr = btc_make_p2sh_address ( script )
return { 'address' : addr , 'redeem_script' : script , 'private_keys' : privkeys , 'segwit' : False , }
|
def add ( self , varname , result , pointer = None ) :
"""Adds the specified python - typed result and an optional Ftype pointer
to use when cleaning up this object .
: arg result : a python - typed representation of the result .
: arg pointer : an instance of Ftype with pointer information for deallocating
the c - pointer ."""
|
self . result [ varname ] = result
setattr ( self , varname , result )
if pointer is not None :
self . _finalizers [ varname ] = pointer
|
def is_valid_optimize_same_id_sources ( self ) :
"""The ` optimize _ same _ id _ sources ` can be true only in the classical
calculators ."""
|
if ( self . optimize_same_id_sources and 'classical' in self . calculation_mode or 'disagg' in self . calculation_mode ) :
return True
elif self . optimize_same_id_sources :
return False
else :
return True
|
def is_in_list ( self , plane_list ) :
"""Checks whether the plane is identical to one of the Planes in the plane _ list list of Planes
: param plane _ list : List of Planes to be compared to
: return : True if the plane is in the list , False otherwise"""
|
for plane in plane_list :
if self . is_same_plane_as ( plane ) :
return True
return False
|
def add_member_to_group ( self , group_id , member_id ) :
"""AddMemberToGroup .
[ Preview API ] Add a member to a Group .
: param str group _ id : Id of the Group .
: param str member _ id : Id of the member to add ."""
|
route_values = { }
if group_id is not None :
route_values [ 'groupId' ] = self . _serialize . url ( 'group_id' , group_id , 'str' )
if member_id is not None :
route_values [ 'memberId' ] = self . _serialize . url ( 'member_id' , member_id , 'str' )
self . _send ( http_method = 'PUT' , location_id = '45a36e53-5286-4518-aa72-2d29f7acc5d8' , version = '5.0-preview.1' , route_values = route_values )
|
def ylim ( self , low , high ) :
"""Set yaxis limits
Parameters
low : number
high : number
index : int , optional
Returns
Chart"""
|
self . chart [ 'yAxis' ] [ 0 ] [ 'min' ] = low
self . chart [ 'yAxis' ] [ 0 ] [ 'max' ] = high
return self
|
def wrap ( function_to_trace = None , ** trace_options ) :
"""Functions decorated with this will be traced .
Use ` ` local = True ` ` to only trace local code , eg : :
@ hunter . wrap ( local = True )
def my _ function ( ) :
Keyword arguments are allowed , eg : :
@ hunter . wrap ( action = hunter . CallPrinter )
def my _ function ( ) :
Or , filters : :
@ hunter . wrap ( module = ' foobar ' )
def my _ function ( ) :"""
|
def tracing_decorator ( func ) :
@ functools . wraps ( func )
def tracing_wrapper ( * args , ** kwargs ) :
predicates = [ ]
local = trace_options . pop ( 'local' , False )
if local :
predicates . append ( Q ( depth_lt = 2 ) )
predicates . append ( ~ When ( Q ( calls_gt = 0 , depth = 0 ) & ~ Q ( kind = 'return' ) , Stop ) )
local_tracer = trace ( * predicates , ** trace_options )
try :
return func ( * args , ** kwargs )
finally :
local_tracer . stop ( )
return tracing_wrapper
if function_to_trace is None :
return tracing_decorator
else :
return tracing_decorator ( function_to_trace )
|
def get ( hdfs_path , local_path ) :
"""Get a file from hdfs
: param hdfs _ path : Destination ( str )
: param local _ path : Source ( str )
: raises : IOError : If unsuccessful"""
|
cmd = "hadoop fs -get %s %s" % ( hdfs_path , local_path )
rcode , stdout , stderr = _checked_hadoop_fs_command ( cmd )
|
def htmlprint ( * values , plain = None , ** options ) :
"""Convert HTML to VTML and then print it .
Follows same semantics as vtmlprint ."""
|
print ( * [ htmlrender ( x , plain = plain ) for x in values ] , ** options )
|
def cmd_sync ( self , low , timeout = None , full_return = False ) :
'''Execute a runner function synchronously ; eauth is respected
This function requires that : conf _ master : ` external _ auth ` is configured
and the user is authorized to execute runner functions : ( ` ` @ runner ` ` ) .
. . code - block : : python
runner . eauth _ sync ( {
' fun ' : ' jobs . list _ jobs ' ,
' username ' : ' saltdev ' ,
' password ' : ' saltdev ' ,
' eauth ' : ' pam ' ,'''
|
reformatted_low = self . _reformat_low ( low )
return mixins . SyncClientMixin . cmd_sync ( self , reformatted_low , timeout , full_return )
|
def format_help ( self ) -> str :
"""Copy of format _ help ( ) from argparse . ArgumentParser with tweaks to separately display required parameters"""
|
formatter = self . _get_formatter ( )
# usage
formatter . add_usage ( self . usage , self . _actions , self . _mutually_exclusive_groups )
# description
formatter . add_text ( self . description )
# Begin cmd2 customization ( separate required and optional arguments )
# positionals , optionals and user - defined groups
for action_group in self . _action_groups :
if action_group . title == 'optional arguments' : # check if the arguments are required , group accordingly
req_args = [ ]
opt_args = [ ]
for action in action_group . _group_actions :
if action . required :
req_args . append ( action )
else :
opt_args . append ( action )
# separately display required arguments
formatter . start_section ( 'required arguments' )
formatter . add_text ( action_group . description )
formatter . add_arguments ( req_args )
formatter . end_section ( )
# now display truly optional arguments
formatter . start_section ( action_group . title )
formatter . add_text ( action_group . description )
formatter . add_arguments ( opt_args )
formatter . end_section ( )
else :
formatter . start_section ( action_group . title )
formatter . add_text ( action_group . description )
formatter . add_arguments ( action_group . _group_actions )
formatter . end_section ( )
# End cmd2 customization
# epilog
formatter . add_text ( self . epilog )
# determine help from format above
return formatter . format_help ( )
|
def combine_sample_regions ( * samples ) :
"""Create batch - level sets of callable regions for multi - sample calling .
Intersects all non - callable ( nblock ) regions from all samples in a batch ,
producing a global set of callable regions ."""
|
samples = utils . unpack_worlds ( samples )
samples = cwlutils . unpack_tarballs ( samples , samples [ 0 ] )
# back compatibility - - global file for entire sample set
global_analysis_file = os . path . join ( samples [ 0 ] [ "dirs" ] [ "work" ] , "analysis_blocks.bed" )
if utils . file_exists ( global_analysis_file ) and not _needs_region_update ( global_analysis_file , samples ) :
global_no_analysis_file = os . path . join ( os . path . dirname ( global_analysis_file ) , "noanalysis_blocks.bed" )
else :
global_analysis_file = None
out = [ ]
analysis_files = [ ]
batches = [ ]
with shared . bedtools_tmpdir ( samples [ 0 ] ) :
for batch , items in vmulti . group_by_batch ( samples , require_bam = False ) . items ( ) :
batches . append ( items )
if global_analysis_file :
analysis_file , no_analysis_file = global_analysis_file , global_no_analysis_file
else :
analysis_file , no_analysis_file = _combine_sample_regions_batch ( batch , items )
for data in items :
vr_file = dd . get_variant_regions ( data )
if analysis_file :
analysis_files . append ( analysis_file )
data [ "config" ] [ "algorithm" ] [ "callable_regions" ] = analysis_file
data [ "config" ] [ "algorithm" ] [ "non_callable_regions" ] = no_analysis_file
data [ "config" ] [ "algorithm" ] [ "callable_count" ] = pybedtools . BedTool ( analysis_file ) . count ( )
elif vr_file :
data [ "config" ] [ "algorithm" ] [ "callable_count" ] = pybedtools . BedTool ( vr_file ) . count ( )
# attach a representative sample for calculating callable region
if not data . get ( "work_bam" ) :
for x in items :
if x . get ( "work_bam" ) :
data [ "work_bam_callable" ] = x [ "work_bam" ]
out . append ( [ data ] )
# Ensure output order matches input order , consistency for CWL - based runs
assert len ( out ) == len ( samples )
sample_indexes = { dd . get_sample_name ( d ) : i for i , d in enumerate ( samples ) }
def by_input_index ( xs ) :
return sample_indexes [ dd . get_sample_name ( xs [ 0 ] ) ]
out . sort ( key = by_input_index )
if len ( analysis_files ) > 0 :
final_regions = pybedtools . BedTool ( analysis_files [ 0 ] )
_analysis_block_stats ( final_regions , batches [ 0 ] )
return out
|
def __get_eval_string ( task_id , system_id , system_dir , system_filename , model_dir , model_filenames ) :
"""ROUGE can evaluate several system summaries for a given text
against several model summaries , i . e . there is an m - to - n
relation between system and model summaries . The system
summaries are listed in the < PEERS > tag and the model summaries
in the < MODELS > tag . pyrouge currently only supports one system
summary per text , i . e . it assumes a 1 - to - n relation between
system and model summaries ."""
|
peer_elems = "<P ID=\"{id}\">{name}</P>" . format ( id = system_id , name = system_filename )
model_elems = [ "<M ID=\"{id}\">{name}</M>" . format ( id = chr ( 65 + i ) , name = name ) for i , name in enumerate ( model_filenames ) ]
model_elems = "\n\t\t\t" . join ( model_elems )
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""" . format ( task_id = task_id , model_root = model_dir , model_elems = model_elems , peer_root = system_dir , peer_elems = peer_elems )
return eval_string
|
def delete_database ( self , dbname ) :
"""Removes the named database remotely and locally . The method will throw
a CloudantClientException if the database does not exist .
: param str dbname : Name of the database to delete ."""
|
db = self . _DATABASE_CLASS ( self , dbname )
if not db . exists ( ) :
raise CloudantClientException ( 404 , dbname )
db . delete ( )
if dbname in list ( self . keys ( ) ) :
super ( CouchDB , self ) . __delitem__ ( dbname )
|
def sub ( a , b ) :
"""Subtract two values , ignoring None"""
|
if a is None :
if b is None :
return None
else :
return - 1 * b
elif b is None :
return a
return a - b
|
def find_one ( self , cls , id ) :
"""Required functionality ."""
|
one = self . _find ( cls , { "_id" : id } )
if not one :
return None
return one [ 0 ]
|
def convert ( self , value , view ) :
"""Check that the value is a string and matches the pattern ."""
|
if isinstance ( value , BASESTRING ) :
if self . pattern and not self . regex . match ( value ) :
self . fail ( u"must match the pattern {0}" . format ( self . pattern ) , view )
return value
else :
self . fail ( u'must be a string' , view , True )
|
def outpoint ( tx_id , index , tree = None ) :
'''hex _ str , int , int - > Outpoint
accepts block explorer txid string'''
|
tx_id_le = bytes . fromhex ( tx_id ) [ : : - 1 ]
return tb . make_outpoint ( tx_id_le , index , tree )
|
def fix_config ( self , options ) :
"""Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary .
: param options : the options to fix
: type options : dict
: return : the ( potentially ) fixed options
: rtype : dict"""
|
options = super ( DeleteFile , self ) . fix_config ( options )
opt = "regexp"
if opt not in options :
options [ opt ] = ".*"
if opt not in self . help :
self . help [ opt ] = "The regular expression that the files must match (string)."
return options
|
def module_refresh ( self ) :
'''Refresh all the modules'''
|
log . debug ( 'Refreshing modules...' )
if self . opts [ 'grains' ] . get ( 'os' ) != 'MacOS' : # In case a package has been installed into the current python
# process ' site - packages ' , the ' site ' module needs to be reloaded in
# order for the newly installed package to be importable .
try :
reload_module ( site )
except RuntimeError :
log . error ( 'Error encountered during module reload. Modules were not reloaded.' )
except TypeError :
log . error ( 'Error encountered during module reload. Modules were not reloaded.' )
self . load_modules ( )
if not self . opts . get ( 'local' , False ) and self . opts . get ( 'multiprocessing' , True ) :
self . functions [ 'saltutil.refresh_modules' ] ( )
|
def count_posts ( self , tag = None , user_id = None , include_draft = False ) :
"""Returns the total number of posts for the give filter
: param tag : Filter by a specific tag
: type tag : str
: param user _ id : Filter by a specific user
: type user _ id : str
: param include _ draft : Whether to include posts marked as draft or not
: type include _ draft : bool
: return : The number of posts for the given filter ."""
|
result = 0
with self . _engine . begin ( ) as conn :
try :
count_statement = sqla . select ( [ sqla . func . count ( ) ] ) . select_from ( self . _post_table )
sql_filter = self . _get_filter ( tag , user_id , include_draft , conn )
count_statement = count_statement . where ( sql_filter )
result = conn . execute ( count_statement ) . scalar ( )
except Exception as e :
self . _logger . exception ( str ( e ) )
result = 0
return result
|
def __copy_tree ( src_dir , dest_dir ) :
"""The shutil . copytree ( ) or distutils . dir _ util . copy _ tree ( ) will happen to report
error list below if we invoke it again and again ( at least in python 2.7.4 ) :
IOError : [ Errno 2 ] No such file or directory : . . .
So we have to write our ' s copy _ tree ( ) for that purpose ."""
|
if not os . path . exists ( dest_dir ) :
os . makedirs ( dest_dir )
shutil . copystat ( src_dir , dest_dir )
for entry in os . listdir ( src_dir ) :
from_path = os . path . join ( src_dir , entry )
to_path = os . path . join ( dest_dir , entry )
if os . path . isdir ( from_path ) :
__copy_tree ( from_path , to_path )
else :
shutil . copy2 ( from_path , to_path )
|
def override_language ( self , language ) :
"""Context manager to override the instance language ."""
|
previous_language = self . _linguist . language
self . _linguist . language = language
yield
self . _linguist . language = previous_language
|
def process_stats ( self , stats , prefix , metric_categories , nested_tags , tags , recursion_level = 0 ) :
"""The XML will have Stat Nodes and Nodes that contain the metrics themselves
This code recursively goes through each Stat Node to properly setup tags
where each Stat will have a different tag key depending on the context ."""
|
for child in stats :
if child . tag in metrics . METRIC_VALUE_FIELDS :
self . submit_metrics ( child , prefix , tags )
elif child . tag in metrics . CATEGORY_FIELDS :
recursion_tags = tags + [ "{}:{}" . format ( nested_tags . get ( prefix ) [ recursion_level ] , child . get ( 'name' ) ) ]
self . process_stats ( child , prefix , metric_categories , nested_tags , recursion_tags , recursion_level + 1 )
|
def check_values_selection_field ( cr , table_name , field_name , allowed_values ) :
"""check if the field selection ' field _ name ' of the table ' table _ name '
has only the values ' allowed _ values ' .
If not return False and log an error .
If yes , return True .
. . versionadded : : 8.0"""
|
res = True
cr . execute ( "SELECT %s, count(*) FROM %s GROUP BY %s;" % ( field_name , table_name , field_name ) )
for row in cr . fetchall ( ) :
if row [ 0 ] not in allowed_values :
logger . error ( "Invalid value '%s' in the table '%s' " "for the field '%s'. (%s rows)." , row [ 0 ] , table_name , field_name , row [ 1 ] )
res = False
return res
|
def req ( self , method , params = ( ) ) :
"""send request to ppcoind"""
|
response = self . session . post ( self . url , data = json . dumps ( { "method" : method , "params" : params , "jsonrpc" : "1.1" } ) , ) . json ( )
if response [ "error" ] is not None :
return response [ "error" ]
else :
return response [ "result" ]
|
def obj_to_string ( obj , top = True ) :
"""Turn an arbitrary object into a unicode string . If complex ( dict / list / tuple ) , will be json - encoded ."""
|
obj = prepare_for_json_encoding ( obj )
if type ( obj ) == six . text_type :
return obj
return json . dumps ( obj )
|
def _conglomerate_meshes ( meshin , header ) :
"""Conglomerate meshes from several cores into one ."""
|
meshout = { }
npc = header [ 'nts' ] // header [ 'ncs' ]
shp = [ val + 1 if val != 1 else 1 for val in header [ 'nts' ] ]
x_p = int ( shp [ 0 ] != 1 )
y_p = int ( shp [ 1 ] != 1 )
for coord in meshin [ 0 ] :
meshout [ coord ] = np . zeros ( shp )
for icore in range ( np . prod ( header [ 'ncs' ] ) ) :
ifs = [ icore // np . prod ( header [ 'ncs' ] [ : i ] ) % header [ 'ncs' ] [ i ] * npc [ i ] for i in range ( 3 ) ]
for coord , mesh in meshin [ icore ] . items ( ) :
meshout [ coord ] [ ifs [ 0 ] : ifs [ 0 ] + npc [ 0 ] + x_p , ifs [ 1 ] : ifs [ 1 ] + npc [ 1 ] + y_p , ifs [ 2 ] : ifs [ 2 ] + npc [ 2 ] + 1 ] = mesh
return meshout
|
def create_widgets ( self ) :
"""Build basic components of dialog ."""
|
self . bbox = QDialogButtonBox ( QDialogButtonBox . Ok | QDialogButtonBox . Cancel )
self . idx_ok = self . bbox . button ( QDialogButtonBox . Ok )
self . idx_cancel = self . bbox . button ( QDialogButtonBox . Cancel )
self . idx_group = FormMenu ( [ gr [ 'name' ] for gr in self . groups ] )
chan_box = QListWidget ( )
self . idx_chan = chan_box
stage_box = QListWidget ( )
stage_box . addItems ( STAGE_NAME )
stage_box . setSelectionMode ( QAbstractItemView . ExtendedSelection )
self . idx_stage = stage_box
cycle_box = QListWidget ( )
cycle_box . setSelectionMode ( QAbstractItemView . ExtendedSelection )
self . idx_cycle = cycle_box
|
def getRecommendedRenderTargetSize ( self ) :
"""Suggested size for the intermediate render target that the distortion pulls from ."""
|
fn = self . function_table . getRecommendedRenderTargetSize
pnWidth = c_uint32 ( )
pnHeight = c_uint32 ( )
fn ( byref ( pnWidth ) , byref ( pnHeight ) )
return pnWidth . value , pnHeight . value
|
def _update_progress ( self , percentage , ** kwargs ) :
"""Update the progress with a percentage , including updating the progressbar as well as calling the progress
callback .
: param float percentage : Percentage of the progressbar . from 0.0 to 100.0.
: param kwargs : Other parameters that will be passed to the progress _ callback handler .
: return : None"""
|
if self . _show_progressbar :
if self . _progressbar is None :
self . _initialize_progressbar ( )
self . _progressbar . update ( percentage * 10000 )
if self . _progress_callback is not None :
self . _progress_callback ( percentage , ** kwargs )
|
def get_box_loc ( fig , ax , line_wave , arrow_tip , box_axes_space = 0.06 ) :
"""Box loc in data coords , given Fig . coords offset from arrow _ tip .
Parameters
fig : matplotlib Figure artist
Figure on which the boxes will be placed .
ax : matplotlib Axes artist
Axes on which the boxes will be placed .
arrow _ tip : list or array of floats
Location of tip of arrow , in data coordinates .
box _ axes _ space : float
Vertical space between arrow tip and text box in figure
coordinates . Default is 0.06.
Returns
box _ loc : list of floats
Box locations in data coordinates .
Notes
Note that this function is not needed if user provides both arrow
tip positions and box locations . The use case is when the program
has to automatically find positions of boxes . In the automated
plotting case , the arrow tip is set to be the top of the Axes
( outside this function ) and the box locations are determined by
` box _ axes _ space ` .
In Matplotlib annotate function , both the arrow tip and the box
location can be specified . While calculating automatic box
locations , it is not ideal to use data coordinates to calculate
box location , since plots will not have a uniform appearance . Given
locations of arrow tips , and a spacing in figure fraction , this
function will calculate the box locations in data
coordinates . Using this boxes can be placed in a uniform manner ."""
|
# Plot boxes in their original x position , at a height given by the
# key word box _ axes _ spacing above the arrow tip . The default
# is set to 0.06 . This is in figure fraction so that the spacing
# doesn ' t depend on the data y range .
box_loc = [ ]
fig_inv_trans = fig . transFigure . inverted ( )
for w , a in zip ( line_wave , arrow_tip ) : # Convert position of tip of arrow to figure coordinates , add
# the vertical space between top edge and text box in figure
# fraction . Convert this text box position back to data
# coordinates .
display_coords = ax . transData . transform ( ( w , a ) )
figure_coords = fig_inv_trans . transform ( display_coords )
figure_coords [ 1 ] += box_axes_space
display_coords = fig . transFigure . transform ( figure_coords )
ax_coords = ax . transData . inverted ( ) . transform ( display_coords )
box_loc . append ( ax_coords )
return box_loc
|
def plot_histogram ( self , freq = None , figsize = ( 15 , 5 ) , title = None , bins = 20 , ** kwargs ) :
"""Plots a histogram of returns given a return frequency .
Args :
* freq ( str ) : Data frequency used for display purposes .
This will dictate the type of returns
( daily returns , monthly , . . . )
Refer to pandas docs for valid period strings .
* figsize ( ( x , y ) ) : figure size
* title ( str ) : Title if default not appropriate
* bins ( int ) : number of bins for the histogram
* kwargs : passed to pandas ' hist method"""
|
if title is None :
title = self . _get_default_plot_title ( self . name , freq , 'Return Histogram' )
ser = self . _get_series ( freq ) . to_returns ( ) . dropna ( )
plt . figure ( figsize = figsize )
ax = ser . hist ( bins = bins , figsize = figsize , normed = True , ** kwargs )
ax . set_title ( title )
plt . axvline ( 0 , linewidth = 4 )
return ser . plot ( kind = 'kde' )
|
def trivialInput ( symbol ) :
"""Create a new L { IRichInput } implementation for the given input symbol .
This creates a new type object and is intended to be used at module scope
to define rich input types . Generally , only one use per symbol should be
required . For example : :
Apple = trivialInput ( Fruit . apple )
@ param symbol : A symbol from some state machine ' s input alphabet .
@ return : A new type object usable as a rich input for the given symbol .
@ rtype : L { type }"""
|
return implementer ( IRichInput ) ( type ( symbol . name . title ( ) , ( FancyStrMixin , object ) , { "symbol" : _symbol ( symbol ) , } ) )
|
def initiate_tasks ( self ) :
"""Loads all tasks using ` TaskLoader ` from respective configuration option"""
|
self . tasks_classes = TaskLoader ( ) . load_tasks ( paths = self . configuration [ Configuration . ALGORITHM ] [ Configuration . TASKS ] [ Configuration . PATHS ] )
|
def attach ( cls , transform_job_name , sagemaker_session = None ) :
"""Attach an existing transform job to a new Transformer instance
Args :
transform _ job _ name ( str ) : Name for the transform job to be attached .
sagemaker _ session ( sagemaker . session . Session ) : Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed . If not specified , one will be created
using the default AWS configuration chain .
Returns :
sagemaker . transformer . Transformer : The Transformer instance with the specified transform job attached ."""
|
sagemaker_session = sagemaker_session or Session ( )
job_details = sagemaker_session . sagemaker_client . describe_transform_job ( TransformJobName = transform_job_name )
init_params = cls . _prepare_init_params_from_job_description ( job_details )
transformer = cls ( sagemaker_session = sagemaker_session , ** init_params )
transformer . latest_transform_job = _TransformJob ( sagemaker_session = sagemaker_session , job_name = init_params [ 'base_transform_job_name' ] )
return transformer
|
def writefile ( filename , content ) :
"""writes the content into the file
: param filename : the filename
: param content : teh content
: return :"""
|
with open ( path_expand ( filename ) , 'w' ) as outfile :
outfile . write ( content )
|
def get_present_elements ( self , locator , params = None , timeout = None , visible = False , parent = None ) :
"""Get elements present in the DOM .
If timeout is 0 ( zero ) return WebElement instance or None , else we wait and retry for timeout and raise
TimeoutException should the element not be found .
: param locator : element identifier
: param params : ( optional ) locator parameters
: param timeout : ( optional ) time to wait for element ( default : self . _ explicit _ wait )
: param visible : ( optional ) if the element should also be visible ( default : False )
: param parent : internal ( see # get _ present _ children )
: return : WebElement instance"""
|
error_msg = "Children were never present" if parent else "Elements were never present!"
expected_condition = ec . visibility_of_all_elements_located if visible else ec . presence_of_all_elements_located
return self . _get ( locator , expected_condition , params , timeout , error_msg , parent )
|
def update_status ( self , value , value_set ) :
"""Update the status and set _ status to update the icons to display ."""
|
self . _status = value
self . _status_set = value_set
self . repaint ( )
self . update ( )
|
def get_time ( time_to_convert = None ) :
"""Create blink - compatible timestamp ."""
|
if time_to_convert is None :
time_to_convert = time . time ( )
return time . strftime ( TIMESTAMP_FORMAT , time . localtime ( time_to_convert ) )
|
def getWidget ( self ) :
"""Some ideas for your widget :
- Textual information ( alert , license place number )
- Check boxes : if checked , send e - mail to your mom when the analyzer spots something
- . . or send an sms to yourself
- You can include the cv2 . imshow window to the widget to see how the analyzer proceeds"""
|
self . widget = QtWidgets . QTextEdit ( )
self . widget . setStyleSheet ( style . detector_test )
self . widget . setReadOnly ( True )
self . signals . objects . connect ( self . objects_slot )
return self . widget
|
def _log_tc_proxy ( self ) :
"""Log the proxy settings ."""
|
if self . default_args . tc_proxy_tc :
self . log . info ( u'Proxy Server (TC): {}:{}.' . format ( self . default_args . tc_proxy_host , self . default_args . tc_proxy_port ) )
|
def push ( self , cf ) :
"""Push the frame cf onto the stack . Return the new stack ."""
|
cf . next = self
if self . state is not None :
self . state . register_plugin ( 'callstack' , cf )
self . state . history . recent_stack_actions . append ( CallStackAction ( hash ( cf ) , len ( cf ) , 'push' , callframe = cf . copy ( { } , with_tail = False ) ) )
return cf
|
def find_orfs ( fa , seqs ) :
"""find orfs and see if they overlap with insertions
# seqs [ id ] = [ gene , model , [ [ i - gene _ pos , i - model _ pos , i - length , iseq , [ orfs ] , [ introns ] ] , . . . ] ]"""
|
faa = '%s.prodigal.faa' % ( fa )
fna = '%s.prodigal.fna' % ( fa )
gbk = '%s.prodigal.gbk' % ( fa )
if os . path . exists ( faa ) is False :
p = subprocess . Popen ( 'prodigal -q -i %s -a %s -d %s -c -f gbk -m -n -o %s -p meta' % ( fa , faa , fna , gbk ) , shell = True )
p . communicate ( )
for orf in parse_fasta ( faa ) :
if orf [ 0 ] == [ ] :
continue
id = orf [ 0 ] . split ( '>' ) [ 1 ] . split ( '_' , 1 ) [ 0 ]
pos = sorted ( [ int ( i ) for i in orf [ 0 ] . split ( ) [ 2 : 5 ] if i != '#' ] )
if id not in seqs :
continue
for i , ins in enumerate ( seqs [ id ] [ 2 ] ) :
if check_overlap ( pos , ins , 0.90 ) is True :
seqs [ id ] [ 2 ] [ i ] [ 4 ] . append ( orf )
return seqs , faa
|
def hex2ip ( hex_ip , invert = False ) :
'''Convert a hex string to an ip , if a failure occurs the original hex is
returned . If ' invert = True ' assume that ip from / proc / net / < proto >'''
|
if len ( hex_ip ) == 32 : # ipv6
ip = [ ]
for i in range ( 0 , 32 , 8 ) :
ip_part = hex_ip [ i : i + 8 ]
ip_part = [ ip_part [ x : x + 2 ] for x in range ( 0 , 8 , 2 ) ]
if invert :
ip . append ( "{0[3]}{0[2]}:{0[1]}{0[0]}" . format ( ip_part ) )
else :
ip . append ( "{0[0]}{0[1]}:{0[2]}{0[3]}" . format ( ip_part ) )
try :
address = ipaddress . IPv6Address ( ":" . join ( ip ) )
if address . ipv4_mapped :
return str ( address . ipv4_mapped )
else :
return address . compressed
except ipaddress . AddressValueError as ex :
log . error ( 'hex2ip - ipv6 address error: %s' , ex )
return hex_ip
try :
hip = int ( hex_ip , 16 )
except ValueError :
return hex_ip
if invert :
return '{3}.{2}.{1}.{0}' . format ( hip >> 24 & 255 , hip >> 16 & 255 , hip >> 8 & 255 , hip & 255 )
return '{0}.{1}.{2}.{3}' . format ( hip >> 24 & 255 , hip >> 16 & 255 , hip >> 8 & 255 , hip & 255 )
|
def parity_even_p ( state , marked_qubits ) :
"""Calculates the parity of elements at indexes in marked _ qubits
Parity is relative to the binary representation of the integer state .
: param state : The wavefunction index that corresponds to this state .
: param marked _ qubits : The indexes to be considered in the parity sum .
: returns : A boolean corresponding to the parity ."""
|
assert isinstance ( state , int ) , f"{state} is not an integer. Must call parity_even_p with an integer state."
mask = 0
for q in marked_qubits :
mask |= 1 << q
return bin ( mask & state ) . count ( "1" ) % 2 == 0
|
def CreateDynamicDisplayAdSettings ( client , opener ) :
"""Creates dynamic display ad settings .
Args :
client : an AdWordsClient instance .
opener : an OpenerDirector instance .
Returns :
A dict containing the dynamic display ad settings ."""
|
media_service = client . GetService ( 'MediaService' , 'v201809' )
logo = { 'xsi_type' : 'Image' , 'mediaId' : _CreateImage ( media_service , opener , 'https://goo.gl/dEvQeF' ) }
dynamic_settings = { 'landscapeLogoImage' : logo , 'pricePrefix' : 'as low as' , 'promoText' : 'Free shipping!' }
return dynamic_settings
|
def _parse_child ( self , child ) :
"""Parse a single child element .
@ param child : The child C { etree . Element } to parse .
@ return : A tuple C { ( name , type , min _ occurs , max _ occurs ) } with the
details about the given child ."""
|
if set ( child . attrib ) - set ( [ "name" , "type" , "minOccurs" , "maxOccurs" ] ) :
raise RuntimeError ( "Unexpected attribute in child" )
name = child . attrib [ "name" ]
type = child . attrib [ "type" ] . split ( ":" ) [ 1 ]
min_occurs = child . attrib . get ( "minOccurs" )
max_occurs = child . attrib . get ( "maxOccurs" )
if min_occurs is None :
min_occurs = "1"
min_occurs = int ( min_occurs )
if max_occurs is None :
max_occurs = "1"
if max_occurs != "unbounded" :
max_occurs = int ( max_occurs )
return name , type , min_occurs , max_occurs
|
def find_obj ( self , env , modname , classname , name , type_name , searchmode = 0 ) :
"""Find a Chapel object for " name " , possibly with module or class / record
name . Returns a list of ( name , object entry ) tuples .
: arg int searchmode : If 1 , search more specific names first . Otherwise ,
search built - ins first and then get more specific ."""
|
if name [ - 2 : ] == '()' :
name = name [ : - 2 ]
if not name :
return [ ]
objects = self . data [ 'objects' ]
matches = [ ]
newname = None
if searchmode == 1 :
if type_name is None :
objtypes = list ( self . object_types )
else :
objtypes = self . objtypes_for_role ( type_name )
if objtypes is not None :
if modname and classname :
fullname = modname + '.' + classname + '.' + name
if ( fullname in objects and objects [ fullname ] [ 1 ] in objtypes ) :
newname = fullname
if not newname :
if ( modname and modname + '.' + name in objects and objects [ modname + '.' + name ] [ 1 ] in objtypes ) :
newname = modname + '.' + name
elif name in objects and objects [ name ] [ 1 ] in objtypes :
newname = name
else : # " Fuzzy " search mode .
searchname = '.' + name
matches = [ ( oname , objects [ oname ] ) for oname in objects if oname . endswith ( searchname ) and objects [ oname ] [ 1 ] in objtypes ]
else : # NOTE : Search for exact match , object type is not considered .
if name in objects :
newname = name
elif type_name == 'mod' : # Only exact matches allowed for modules .
return [ ]
elif classname and classname + '.' + name in objects :
newname = classname + '.' + name
elif modname and modname + '.' + name in objects :
newname = modname + '.' + name
elif ( modname and classname and modname + '.' + classname + '.' + name in objects ) :
newname = modname + '.' + classname + '.' + name
if newname is not None :
matches . append ( ( newname , objects [ newname ] ) )
return matches
|
def _recursive_overwrite ( self , src , dest ) :
"""Copy src to dest , recursively and with file overwrite ."""
|
if os . path . isdir ( src ) :
if not os . path . isdir ( dest ) :
os . makedirs ( dest )
files = os . listdir ( src )
for f in files :
self . _recursive_overwrite ( os . path . join ( src , f ) , os . path . join ( dest , f ) )
else :
shutil . copyfile ( src , dest , follow_symlinks = False )
|
def tValueForPoint ( self , point ) :
"""get a t values for a given point
required :
the point must be a point on the curve .
in an overlap cause the point will be an intersection points wich is alwasy a point on the curve"""
|
if self . segmentType == "curve" :
on1 = self . previousOnCurve
off1 = self . points [ 0 ] . coordinates
off2 = self . points [ 1 ] . coordinates
on2 = self . points [ 2 ] . coordinates
return _tValueForPointOnCubicCurve ( point , ( on1 , off1 , off2 , on2 ) )
elif self . segmentType == "line" :
return _tValueForPointOnLine ( point , ( self . previousOnCurve , self . points [ 0 ] . coordinates ) )
elif self . segmentType == "qcurve" :
raise NotImplementedError
else :
raise NotImplementedError
|
def exit_on_error ( self , message , exit_code = 1 ) : # pylint : disable = no - self - use
"""Log generic message when getting an error and exit
: param exit _ code : if not None , exit with the provided value as exit code
: type exit _ code : int
: param message : message for the exit reason
: type message : str
: return : None"""
|
log = "I got an unrecoverable error. I have to exit."
if message :
log += "\n-----\nError message: %s" % message
print ( "Error message: %s" % message )
log += "-----\n"
log += "You can get help at https://github.com/Alignak-monitoring/alignak\n"
log += "If you think this is a bug, create a new issue including as much " "details as possible (version, configuration,...)"
if exit_code is not None :
exit ( exit_code )
|
def parse ( xmlfile , element_names , element_attrs = { } , attr_conversions = { } , heterogeneous = False , warn = False ) :
"""Parses the given element _ names from xmlfile and yield compound objects for
their xml subtrees ( no extra objects are returned if element _ names appear in
the subtree ) The compound objects provide all element attributes of
the root of the subtree as attributes unless attr _ names are supplied . In this
case attr _ names maps element names to a list of attributes which are
supplied . If attr _ conversions is not empty it must map attribute names to
callables which will be called upon the attribute value before storing under
the attribute name .
The compound objects gives dictionary style access to list of compound
objects o for any children with the given element name
o [ ' child _ element _ name ' ] = [ osub0 , osub1 , . . . ]
As a shorthand , attribute style access to the list of child elements is
provided unless an attribute with the same name as the child elements
exists ( i . e . o . child _ element _ name = [ osub0 , osub1 , . . . ] )
@ Note : All elements with the same name must have the same type regardless of
the subtree in which they occur ( heterogeneous cases may be handled by
setting heterogeneous = False ( with reduced parsing speed )
@ Note : Attribute names may be modified to avoid name clashes
with python keywords . ( set warn = True to receive renaming warnings )
@ Note : The element _ names may be either a single string or a list of strings .
@ Example : parse ( ' plain . edg . xml ' , [ ' edge ' ] )"""
|
if isinstance ( element_names , str ) :
element_names = [ element_names ]
elementTypes = { }
for event , parsenode in ET . iterparse ( xmlfile ) :
if parsenode . tag in element_names :
yield _get_compound_object ( parsenode , elementTypes , parsenode . tag , element_attrs , attr_conversions , heterogeneous , warn )
parsenode . clear ( )
|
def read_pid_from_pidfile ( pidfile_path ) :
"""Read the PID recorded in the named PID file .
Read and return the numeric PID recorded as text in the named
PID file . If the PID file cannot be read , or if the content is
not a valid PID , return ` ` None ` ` ."""
|
pid = None
try :
pidfile = open ( pidfile_path , 'r' )
except IOError :
pass
else : # According to the FHS 2.3 section on PID files in / var / run :
# The file must consist of the process identifier in
# ASCII - encoded decimal , followed by a newline character .
# Programs that read PID files should be somewhat flexible
# in what they accept ; i . e . , they should ignore extra
# whitespace , leading zeroes , absence of the trailing
# newline , or additional lines in the PID file .
line = pidfile . readline ( ) . strip ( )
try :
pid = int ( line )
except ValueError :
pass
pidfile . close ( )
return pid
|
def graceful ( cls ) :
"""A decorator to protect against message structure changes .
Many of our processors expect messages to be in a certain format . If the
format changes , they may start to fail and raise exceptions . This decorator
is in place to catch and log those exceptions and to gracefully return
default values ."""
|
def _wrapper ( f ) :
@ functools . wraps ( f )
def __wrapper ( msg , ** config ) :
try :
return f ( msg , ** config )
except KeyError :
log . exception ( "%r failed on %r" % ( f , msg . get ( 'msg_id' ) ) )
return cls ( )
return __wrapper
return _wrapper
|
def _CopyDateTimeFromStringISO8601 ( self , time_string ) :
"""Copies a date and time from an ISO 8601 date and time string .
Args :
time _ string ( str ) : time value formatted as :
hh : mm : ss . # # # # # [ + - ] # # : # #
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits . The fraction of second and
time zone offset are optional .
Returns :
tuple [ int , int , int , int , int ] : hours , minutes , seconds , microseconds ,
time zone offset in minutes .
Raises :
ValueError : if the time string is invalid or not supported ."""
|
if not time_string :
raise ValueError ( 'Invalid time string.' )
time_string_length = len ( time_string )
year , month , day_of_month = self . _CopyDateFromString ( time_string )
if time_string_length <= 10 :
return { 'year' : year , 'month' : month , 'day_of_month' : day_of_month }
# If a time of day is specified the time string it should at least
# contain ' YYYY - MM - DDThh ' .
if time_string [ 10 ] != 'T' :
raise ValueError ( 'Invalid time string - missing as date and time separator.' )
hours , minutes , seconds , microseconds , time_zone_offset = ( self . _CopyTimeFromStringISO8601 ( time_string [ 11 : ] ) )
if time_zone_offset :
year , month , day_of_month , hours , minutes = self . _AdjustForTimeZoneOffset ( year , month , day_of_month , hours , minutes , time_zone_offset )
date_time_values = { 'year' : year , 'month' : month , 'day_of_month' : day_of_month , 'hours' : hours , 'minutes' : minutes , 'seconds' : seconds }
if microseconds is not None :
date_time_values [ 'microseconds' ] = microseconds
return date_time_values
|
def get_home_page ( self ) :
"""Return the published home page .
Used for ' parent ' in cms . api . create _ page ( )"""
|
try :
home_page_draft = Page . objects . get ( is_home = True , publisher_is_draft = True )
except Page . DoesNotExist :
log . error ( 'ERROR: "home page" doesn\'t exists!' )
raise RuntimeError ( 'no home page' )
return home_page_draft
|
def load_timeseries ( path , group = None ) :
"""Load a TimeSeries from a . hdf , . txt or . npy file . The
default data types will be double precision floating point .
Parameters
path : string
source file path . Must end with either . npy or . txt .
group : string
Additional name for internal storage use . Ex . hdf storage uses
this as the key value .
Raises
ValueError
If path does not end in . npy or . txt ."""
|
ext = _os . path . splitext ( path ) [ 1 ]
if ext == '.npy' :
data = _numpy . load ( path )
elif ext == '.txt' :
data = _numpy . loadtxt ( path )
elif ext == '.hdf' :
key = 'data' if group is None else group
f = h5py . File ( path )
data = f [ key ] [ : ]
series = TimeSeries ( data , delta_t = f [ key ] . attrs [ 'delta_t' ] , epoch = f [ key ] . attrs [ 'start_time' ] )
f . close ( )
return series
else :
raise ValueError ( 'Path must end with .npy, .hdf, or .txt' )
if data . ndim == 2 :
delta_t = ( data [ - 1 ] [ 0 ] - data [ 0 ] [ 0 ] ) / ( len ( data ) - 1 )
epoch = _lal . LIGOTimeGPS ( data [ 0 ] [ 0 ] )
return TimeSeries ( data [ : , 1 ] , delta_t = delta_t , epoch = epoch )
elif data . ndim == 3 :
delta_t = ( data [ - 1 ] [ 0 ] - data [ 0 ] [ 0 ] ) / ( len ( data ) - 1 )
epoch = _lal . LIGOTimeGPS ( data [ 0 ] [ 0 ] )
return TimeSeries ( data [ : , 1 ] + 1j * data [ : , 2 ] , delta_t = delta_t , epoch = epoch )
else :
raise ValueError ( 'File has %s dimensions, cannot convert to Array, \
must be 2 (real) or 3 (complex)' % data . ndim )
|
def advance_job_status ( namespace : str , job : Job , duration : float , err : Optional [ Exception ] ) :
"""Advance the status of a job depending on its execution .
This function is called after a job has been executed . It calculates its
next status and calls the appropriate signals ."""
|
duration = human_duration ( duration )
if not err :
job . status = JobStatus . SUCCEEDED
logger . info ( 'Finished execution of %s in %s' , job , duration )
return
if job . should_retry :
job . status = JobStatus . NOT_SET
job . retries += 1
if isinstance ( err , RetryException ) and err . at is not None :
job . at = err . at
else :
job . at = ( datetime . now ( timezone . utc ) + exponential_backoff ( job . retries ) )
signals . job_schedule_retry . send ( namespace , job = job , err = err )
log_args = ( job . retries , job . max_retries + 1 , job , duration , human_duration ( ( job . at - datetime . now ( tz = timezone . utc ) ) . total_seconds ( ) ) )
if isinstance ( err , RetryException ) :
logger . info ( 'Retry requested during execution %d/%d of %s ' 'after %s, retry in %s' , * log_args )
else :
logger . warning ( 'Error during execution %d/%d of %s after %s, ' 'retry in %s' , * log_args )
return
job . status = JobStatus . FAILED
signals . job_failed . send ( namespace , job = job , err = err )
logger . error ( 'Error during execution %d/%d of %s after %s' , job . max_retries + 1 , job . max_retries + 1 , job , duration , exc_info = err )
|
def add_role ( user , roles ) :
"""Map roles for user in database
Args :
user ( User ) : User to add roles to
roles ( [ Role ] ) : List of roles to add
Returns :
None"""
|
def _add_role ( role ) :
user_role = UserRole ( )
user_role . user_id = user . user_id
user_role . role_id = role . role_id
db . session . add ( user_role )
db . session . commit ( )
[ _add_role ( role ) for role in roles ]
|
def PowersOf ( logbase , count , lower = 0 , include_zero = True ) :
"""Returns a list of count powers of logbase ( from logbase * * lower ) ."""
|
if not include_zero :
return [ logbase ** i for i in range ( lower , count + lower ) ]
else :
return [ 0 ] + [ logbase ** i for i in range ( lower , count + lower ) ]
|
def attach_container ( self , path = None , save = "all" , mode = "w" , nbuffer = 50 , force = False ) :
"""add a Container to the simulation which allows some
persistance to the simulation .
Parameters
path : str or None ( default : None )
path for the container . If None ( the default ) , the data lives only
in memory ( and are available with ` simulation . container ` )
mode : str , optional
" a " or " w " ( default " w " )
save : str , optional
" all " will save every time - step ,
" last " will only get the last time step
nbuffer : int , optional
wait until nbuffer data in the Queue before save on disk .
timeout : int , optional
wait until timeout since last flush before save on disk .
force : bool , optional ( default False )
if True , remove the target folder if not empty . if False , raise an
error ."""
|
self . _container = TriflowContainer ( "%s/%s" % ( path , self . id ) if path else None , save = save , mode = mode , metadata = self . parameters , force = force , nbuffer = nbuffer )
self . _container . connect ( self . stream )
return self . _container
|
def init ( force ) :
"""Initialize registered aliases and mappings ."""
|
click . secho ( 'Creating indexes...' , fg = 'green' , bold = True , file = sys . stderr )
with click . progressbar ( current_search . create ( ignore = [ 400 ] if force else None ) , length = current_search . number_of_indexes ) as bar :
for name , response in bar :
bar . label = name
click . secho ( 'Putting templates...' , fg = 'green' , bold = True , file = sys . stderr )
with click . progressbar ( current_search . put_templates ( ignore = [ 400 ] if force else None ) , length = len ( current_search . templates . keys ( ) ) ) as bar :
for response in bar :
bar . label = response
|
def str2actfunc ( act_func ) :
"""Convert activation function name to tf function ."""
|
if act_func == 'sigmoid' :
return tf . nn . sigmoid
elif act_func == 'tanh' :
return tf . nn . tanh
elif act_func == 'relu' :
return tf . nn . relu
|
def get ( self , request , * args , ** kwargs ) :
"""Handler for HTTP GET requests ."""
|
try :
context = self . get_context_data ( ** kwargs )
except exceptions . NotAvailable :
exceptions . handle ( request )
self . set_workflow_step_errors ( context )
return self . render_to_response ( context )
|
def __select_nearest_ws ( xmldata , latitude , longitude ) :
"""Select the nearest weatherstation ."""
|
log . debug ( "__select_nearest_ws: latitude: %s, longitude: %s" , latitude , longitude )
dist = 0
dist2 = 0
loc_data = None
try :
ws_xml = xmldata [ __BRWEERGEGEVENS ] [ __BRACTUEELWEER ]
ws_xml = ws_xml [ __BRWEERSTATIONS ] [ __BRWEERSTATION ]
except ( KeyError , TypeError ) :
log . warning ( "Missing section in Buienradar xmldata (%s)." "Can happen 00:00-01:00 CE(S)T" , __BRWEERSTATION )
return None
for wstation in ws_xml :
dist2 = __get_ws_distance ( wstation , latitude , longitude )
if dist2 is not None :
if ( ( loc_data is None ) or ( dist2 < dist ) ) :
dist = dist2
loc_data = wstation
if loc_data is None :
log . warning ( "No weatherstation selected; aborting..." )
return None
else :
try :
log . debug ( "Selected weatherstation: code='%s', " "name='%s', lat='%s', lon='%s'." , loc_data [ __BRSTATIONCODE ] , loc_data [ __BRSTATIONNAAM ] [ __BRTEXT ] , loc_data [ __BRLAT ] , loc_data [ __BRLON ] )
except KeyError :
log . debug ( "Selected weatherstation" )
return loc_data
|
async def check_start ( self , pair ) :
"""Starts a check ."""
|
self . check_state ( pair , CandidatePair . State . IN_PROGRESS )
request = self . build_request ( pair )
try :
response , addr = await pair . protocol . request ( request , pair . remote_addr , integrity_key = self . remote_password . encode ( 'utf8' ) )
except exceptions . TransactionError as exc : # 7.1.3.1 . Failure Cases
if exc . response and exc . response . attributes . get ( 'ERROR-CODE' , ( None , None ) ) [ 0 ] == 487 :
if 'ICE-CONTROLLING' in request . attributes :
self . switch_role ( ice_controlling = False )
elif 'ICE-CONTROLLED' in request . attributes :
self . switch_role ( ice_controlling = True )
return await self . check_start ( pair )
else :
self . check_state ( pair , CandidatePair . State . FAILED )
self . check_complete ( pair )
return
# check remote address matches
if addr != pair . remote_addr :
self . __log_info ( 'Check %s failed : source address mismatch' , pair )
self . check_state ( pair , CandidatePair . State . FAILED )
self . check_complete ( pair )
return
# success
self . check_state ( pair , CandidatePair . State . SUCCEEDED )
if self . ice_controlling or pair . remote_nominated :
pair . nominated = True
self . check_complete ( pair )
|
def transform_attributes ( attrs , cls ) :
"""Transform some attribute keys .
: param attrs : Properties from the XML
: type attrs : dict
: param cls : Class of the entity
: type cls : class"""
|
transformed = { }
for key , value in attrs . items ( ) :
if value is None :
value = ""
if key == "text" :
transformed [ "raw_content" ] = value
elif key == "author" :
if cls == DiasporaProfile : # Diaspora Profile XML message contains no GUID . We need the guid . Fetch it .
profile = retrieve_and_parse_profile ( value )
transformed [ 'id' ] = value
transformed [ "guid" ] = profile . guid
else :
transformed [ "actor_id" ] = value
transformed [ "handle" ] = value
elif key == 'guid' :
if cls != DiasporaProfile :
transformed [ "id" ] = value
transformed [ "guid" ] = value
elif key in ( "root_author" , "recipient" ) :
transformed [ "target_id" ] = value
transformed [ "target_handle" ] = value
elif key in ( "target_guid" , "root_guid" , "parent_guid" ) :
transformed [ "target_id" ] = value
transformed [ "target_guid" ] = value
elif key in ( "first_name" , "last_name" ) :
values = [ attrs . get ( 'first_name' ) , attrs . get ( 'last_name' ) ]
values = [ v for v in values if v ]
transformed [ "name" ] = " " . join ( values )
elif key == "image_url" :
if "image_urls" not in transformed :
transformed [ "image_urls" ] = { }
transformed [ "image_urls" ] [ "large" ] = value
elif key == "image_url_small" :
if "image_urls" not in transformed :
transformed [ "image_urls" ] = { }
transformed [ "image_urls" ] [ "small" ] = value
elif key == "image_url_medium" :
if "image_urls" not in transformed :
transformed [ "image_urls" ] = { }
transformed [ "image_urls" ] [ "medium" ] = value
elif key == "tag_string" :
if value :
transformed [ "tag_list" ] = value . replace ( "#" , "" ) . split ( " " )
elif key == "bio" :
transformed [ "raw_content" ] = value
elif key == "searchable" :
transformed [ "public" ] = True if value == "true" else False
elif key in [ "target_type" ] and cls == DiasporaRetraction :
transformed [ "entity_type" ] = DiasporaRetraction . entity_type_from_remote ( value )
elif key == "remote_photo_path" :
transformed [ "remote_path" ] = value
elif key == "remote_photo_name" :
transformed [ "remote_name" ] = value
elif key == "status_message_guid" :
transformed [ "linked_guid" ] = value
transformed [ "linked_type" ] = "Post"
elif key == "author_signature" :
transformed [ "signature" ] = value
elif key in BOOLEAN_KEYS :
transformed [ key ] = True if value == "true" else False
elif key in DATETIME_KEYS :
transformed [ key ] = datetime . strptime ( value , "%Y-%m-%dT%H:%M:%SZ" )
elif key in INTEGER_KEYS :
transformed [ key ] = int ( value )
else :
transformed [ key ] = value
return transformed
|
def get_single_value ( value ) :
"""Get a single value out of the given value .
This is meant to be used after a call to : func : ` all _ elements _ equal ` that returned True . With this
function we return a single number from the input value .
Args :
value ( ndarray or number ) : a numpy array or a single number .
Returns :
number : a single number from the input
Raises :
ValueError : if not all elements are equal"""
|
if not all_elements_equal ( value ) :
raise ValueError ( 'Not all values are equal to each other.' )
if is_scalar ( value ) :
return value
return value . item ( 0 )
|
def SlotSentinel ( * args ) :
"""Provides exception handling for all slots"""
|
# ( NOTE ) davidlatwe
# Thanks to this answer
# https : / / stackoverflow . com / questions / 18740884
if len ( args ) == 0 or isinstance ( args [ 0 ] , types . FunctionType ) :
args = [ ]
@ QtCore . pyqtSlot ( * args )
def slotdecorator ( func ) :
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
try :
func ( * args )
except Exception :
traceback . print_exc ( )
return wrapper
return slotdecorator
|
def completed_prefetch ( self , blocking_wait = False , max_yield = 999 ) :
"""Similar to completed but only returns once the object is local .
Assumes obj _ id only is one id ."""
|
for worker , obj_id in self . completed ( blocking_wait = blocking_wait ) :
plasma_id = ray . pyarrow . plasma . ObjectID ( obj_id . binary ( ) )
( ray . worker . global_worker . raylet_client . fetch_or_reconstruct ( [ obj_id ] , True ) )
self . _fetching . append ( ( worker , obj_id ) )
remaining = [ ]
num_yielded = 0
for worker , obj_id in self . _fetching :
plasma_id = ray . pyarrow . plasma . ObjectID ( obj_id . binary ( ) )
if ( num_yielded < max_yield and ray . worker . global_worker . plasma_client . contains ( plasma_id ) ) :
yield ( worker , obj_id )
num_yielded += 1
else :
remaining . append ( ( worker , obj_id ) )
self . _fetching = remaining
|
def create_file ( filename ) :
"""Creates a new file if the file name does not exists
: param filename : the name of the file"""
|
expanded_filename = os . path . expanduser ( os . path . expandvars ( filename ) )
if not os . path . exists ( expanded_filename ) :
open ( expanded_filename , "a" ) . close ( )
|
def document ( self ) :
"""Render the error document"""
|
resp = request . environ . get ( 'pylons.original_response' )
page = error_document_template % dict ( prefix = request . environ . get ( 'SCRIPT_NAME' , '' ) , code = request . params . get ( 'code' , resp . status_int ) , message = request . params . get ( 'message' , resp . body ) )
return page
|
def parse_opera ( url_data ) :
"""Parse an opera bookmark file ."""
|
from . . bookmarks . opera import parse_bookmark_data
for url , name , lineno in parse_bookmark_data ( url_data . get_content ( ) ) :
url_data . add_url ( url , line = lineno , name = name )
|
def transform ( self , code , * , name = None , filename = None ) :
"""Transform a codetransformer . Code object applying the transforms .
Parameters
code : Code
The code object to transform .
name : str , optional
The new name for this code object .
filename : str , optional
The new filename for this code object .
Returns
new _ code : Code
The transformed code object ."""
|
# reverse lookups from for constants and names .
reversed_consts = { }
reversed_names = { }
reversed_varnames = { }
for instr in code :
if isinstance ( instr , LOAD_CONST ) :
reversed_consts [ instr ] = instr . arg
if instr . uses_name :
reversed_names [ instr ] = instr . arg
if isinstance ( instr , ( STORE_FAST , LOAD_FAST ) ) :
reversed_varnames [ instr ] = instr . arg
instrs , consts = tuple ( zip ( * reversed_consts . items ( ) ) ) or ( ( ) , ( ) )
for instr , const in zip ( instrs , self . transform_consts ( consts ) ) :
instr . arg = const
instrs , names = tuple ( zip ( * reversed_names . items ( ) ) ) or ( ( ) , ( ) )
for instr , name_ in zip ( instrs , self . transform_names ( names ) ) :
instr . arg = name_
instrs , varnames = tuple ( zip ( * reversed_varnames . items ( ) ) ) or ( ( ) , ( ) )
for instr , varname in zip ( instrs , self . transform_varnames ( varnames ) ) :
instr . arg = varname
with self . _new_context ( code ) :
post_transform = self . patterndispatcher ( code )
return Code ( post_transform , code . argnames , cellvars = self . transform_cellvars ( code . cellvars ) , freevars = self . transform_freevars ( code . freevars ) , name = name if name is not None else code . name , filename = filename if filename is not None else code . filename , firstlineno = code . firstlineno , lnotab = _new_lnotab ( post_transform , code . lnotab ) , flags = code . flags , )
|
def complete ( self , config , prompt , session , context , current_arguments , current ) : # type : ( CompletionInfo , str , ShellSession , BundleContext , List [ str ] , str ) - > List [ str ]
"""Returns the list of services IDs matching the current state
: param config : Configuration of the current completion
: param prompt : Shell prompt ( for re - display )
: param session : Shell session ( to display in shell )
: param context : Bundle context of the Shell bundle
: param current _ arguments : Current arguments ( without the command itself )
: param current : Current word
: return : A list of matches"""
|
with use_ipopo ( context ) as ipopo :
try : # Find the factory name
for idx , completer_id in enumerate ( config . completers ) :
if completer_id == FACTORY :
factory_name = current_arguments [ idx ]
break
else : # No factory completer found in signature
for idx , completer_id in enumerate ( config . completers ) :
if completer_id == COMPONENT :
name = current_arguments [ idx ]
details = ipopo . get_instance_details ( name )
factory_name = details [ "factory" ]
break
else : # No factory name can be found
return [ ]
# Get the details about this factory
details = ipopo . get_factory_details ( factory_name )
properties = details [ "properties" ]
except ( IndexError , ValueError ) : # No / unknown factory name
return [ ]
else :
return [ "{}=" . format ( key ) for key in properties if key . startswith ( current ) ]
|
def copy_from_dict ( self , adict , parent = None ) :
"""从一个已经存在的 dict 中复制所有的值 。
: param adict : 被复制的 dict 。
: type adict : dict
: param parent : 复制到哪个父对象 。
若为 None 则复制到 self 。
: type parent : rookout . PYConf"""
|
if not parent :
parent = self
for k , v in adict . items ( ) :
if isinstance ( v , dict ) :
vDict = PYConf ( v )
self . copy_from_dict ( v , vDict )
parent [ k ] = vDict
else :
parent [ k ] = v
|
def verbose_comment ( self , t , i ) :
"""Handle verbose comments ."""
|
current = [ ]
escaped = False
try :
while t != "\n" :
if not escaped and t == "\\" :
escaped = True
current . append ( t )
elif escaped :
escaped = False
if t in self . _new_refs :
current . append ( "\\" )
current . append ( t )
else :
current . append ( t )
t = next ( i )
except StopIteration :
pass
if t == "\n" :
current . append ( t )
return current
|
def slugify_headline ( line , remove_dashes = False ) :
"""Takes a header line from a Markdown document and
returns a tuple of the
' # ' - stripped version of the head line ,
a string version for < a id = ' ' > < / a > anchor tags ,
and the level of the headline as integer .
E . g . ,
> > > dashify _ headline ( ' # # # some header lvl3 ' )
( ' Some header lvl3 ' , ' some - header - lvl3 ' , 3)"""
|
stripped_right = line . rstrip ( '#' )
stripped_both = stripped_right . lstrip ( '#' )
level = len ( stripped_right ) - len ( stripped_both )
stripped_wspace = stripped_both . strip ( )
# character replacements
replaced_colon = stripped_wspace . replace ( '.' , '' )
replaced_slash = replaced_colon . replace ( '/' , '' )
rem_nonvalids = '' . join ( [ c if c in VALIDS else '-' for c in replaced_slash ] )
lowered = rem_nonvalids . lower ( )
slugified = re . sub ( r'(-)\1+' , r'\1' , lowered )
# remove duplicate dashes
slugified = slugified . strip ( '-' )
# strip dashes from start and end
# exception ' & ' ( double - dash in github )
slugified = slugified . replace ( '-&-' , '--' )
if remove_dashes :
slugified = slugified . replace ( '-' , '' )
return [ stripped_wspace , slugified , level ]
|
def set_font_size ( self , size ) :
"Set font size in points"
|
if ( self . font_size_pt == size ) :
return
self . font_size_pt = size
self . font_size = size / self . k
if ( self . page > 0 ) :
self . _out ( sprintf ( 'BT /F%d %.2f Tf ET' , self . current_font [ 'i' ] , self . font_size_pt ) )
|
def MakeClass ( descriptor ) :
"""Construct a class object for a protobuf described by descriptor .
Composite descriptors are handled by defining the new class as a member of the
parent class , recursing as deep as necessary .
This is the dynamic equivalent to :
class Parent ( message . Message ) :
_ _ metaclass _ _ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child ( message . Message ) :
_ _ metaclass _ _ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor . nested _ types [ 0]
Sample usage :
file _ descriptor = descriptor _ pb2 . FileDescriptorProto ( )
file _ descriptor . ParseFromString ( proto2 _ string )
msg _ descriptor = descriptor . MakeDescriptor ( file _ descriptor . message _ type [ 0 ] )
msg _ class = reflection . MakeClass ( msg _ descriptor )
msg = msg _ class ( )
Args :
descriptor : A descriptor . Descriptor object describing the protobuf .
Returns :
The Message class object described by the descriptor ."""
|
if descriptor in MESSAGE_CLASS_CACHE :
return MESSAGE_CLASS_CACHE [ descriptor ]
attributes = { }
for name , nested_type in descriptor . nested_types_by_name . items ( ) :
attributes [ name ] = MakeClass ( nested_type )
attributes [ GeneratedProtocolMessageType . _DESCRIPTOR_KEY ] = descriptor
result = GeneratedProtocolMessageType ( str ( descriptor . name ) , ( message . Message , ) , attributes )
MESSAGE_CLASS_CACHE [ descriptor ] = result
return result
|
def get_logs ( self , jobs , log_file = None ) :
"""Get log or log url of the jobs ."""
|
if not ( jobs and self . log_url ) :
return
for job in jobs :
url = "{}?jobId={}" . format ( self . log_url , job . get ( "id" ) )
if log_file :
self . _download_log ( "{}&download" . format ( url ) , log_file )
else :
logger . info ( "Submit log for job %s: %s" , job . get ( "id" ) , url )
|
def max_width ( * args , ** kwargs ) :
"""Returns formatted text or context manager for textui : puts .
> > > from clint . textui import puts , max _ width
> > > max _ width ( ' 123 5678 ' , 8)
'123 5678'
> > > max _ width ( ' 123 5678 ' , 7)
'123 \n 5678'
> > > with max _ width ( 7 ) :
. . . puts ( ' 123 5678 ' )
'123 \n 5678'"""
|
args = list ( args )
if not args :
args . append ( kwargs . get ( 'string' ) )
args . append ( kwargs . get ( 'cols' ) )
args . append ( kwargs . get ( 'separator' ) )
elif len ( args ) == 1 :
args . append ( kwargs . get ( 'cols' ) )
args . append ( kwargs . get ( 'separator' ) )
elif len ( args ) == 2 :
args . append ( kwargs . get ( 'separator' ) )
string , cols , separator = args
if separator is None :
separator = '\n'
# default value
if cols is None : # cols should be specified vitally
# because string can be specified at textui : puts function
string , cols = cols , string
if string is None :
MAX_WIDTHS . append ( ( cols , separator ) )
return _max_width_context ( )
else :
return _max_width_formatter ( string , cols , separator )
|
def cluster_info ( cpu , cfg ) :
"""Collects fact for each host
Collects the cpu and node configuration facts to be used by the rule .
Arguments :
cpu ( CpuInfo ) : Parser object for the cpu info .
cfg ( NodeConfig ) : Parser object for the node configuration .
Returns :
dict : Dictionary of fact information including the keys
` ` cpu _ count ` ` , ` ` pods _ per _ core _ int ` ` , ` ` pods _ per _ core _ customized ` ` ,
` ` max _ pods ` ` , and ` ` max _ pods _ customized ` ` ."""
|
cpus = cpu . cpu_count
pods_per_core = cfg . doc . find ( "pods-per-core" )
pods_per_core_int = int ( pods_per_core . value ) if pods_per_core else PODS_PER_CORE
cfg_max_pods = cfg . doc . find ( "max-pods" )
cfg_max_pods_int = int ( cfg_max_pods . value ) if cfg_max_pods else MAX_PODS
calc_max_pods = cpus * pods_per_core_int
return { "cpu_count" : cpus , "pods_per_core" : pods_per_core_int , "pods_per_core_customized" : bool ( pods_per_core ) , "max_pods" : min ( cfg_max_pods_int , calc_max_pods ) , "max_pods_customized" : bool ( cfg_max_pods ) }
|
def on_patch ( resc , req , resp , rid ) :
"""Deserialize the payload & update the single item"""
|
signals . pre_req . send ( resc . model )
signals . pre_req_update . send ( resc . model )
props = req . deserialize ( )
model = find ( resc . model , rid )
from_rest ( model , props )
goldman . sess . store . update ( model )
props = to_rest_model ( model , includes = req . includes )
resp . last_modified = model . updated
resp . serialize ( props )
signals . post_req . send ( resc . model )
signals . post_req_update . send ( resc . model )
|
def write_system_config ( base_url , datadir , tooldir ) :
"""Write a bcbio _ system . yaml configuration file with tool information ."""
|
out_file = os . path . join ( datadir , "galaxy" , os . path . basename ( base_url ) )
if not os . path . exists ( os . path . dirname ( out_file ) ) :
os . makedirs ( os . path . dirname ( out_file ) )
if os . path . exists ( out_file ) : # if no tool directory and exists , do not overwrite
if tooldir is None :
return out_file
else :
bak_file = out_file + ".bak%s" % ( datetime . datetime . now ( ) . strftime ( "%Y%M%d_%H%M" ) )
shutil . copy ( out_file , bak_file )
if tooldir :
java_basedir = os . path . join ( tooldir , "share" , "java" )
rewrite_ignore = ( "log" , )
with contextlib . closing ( urllib_request . urlopen ( base_url ) ) as in_handle :
with open ( out_file , "w" ) as out_handle :
in_resources = False
in_prog = None
for line in ( l . decode ( "utf-8" ) for l in in_handle ) :
if line [ 0 ] != " " :
in_resources = line . startswith ( "resources" )
in_prog = None
elif ( in_resources and line [ : 2 ] == " " and line [ 2 ] != " " and not line . strip ( ) . startswith ( rewrite_ignore ) ) :
in_prog = line . split ( ":" ) [ 0 ] . strip ( )
# Update java directories to point to install directory , avoid special cases
elif line . strip ( ) . startswith ( "dir:" ) and in_prog and in_prog not in [ "log" , "tmp" ] :
final_dir = os . path . basename ( line . split ( ) [ - 1 ] )
if tooldir :
line = "%s: %s\n" % ( line . split ( ":" ) [ 0 ] , os . path . join ( java_basedir , final_dir ) )
in_prog = None
elif line . startswith ( "galaxy" ) :
line = "# %s" % line
out_handle . write ( line )
return out_file
|
def shard_filename ( path , tag , shard_num , total_shards ) :
"""Create filename for data shard ."""
|
return os . path . join ( path , "%s-%s-%s-%.5d-of-%.5d" % ( _PREFIX , _ENCODE_TAG , tag , shard_num , total_shards ) )
|
def _parse_value ( self , html_data , field ) :
"""Parse the HTML table to find the requested field ' s value .
All of the values are passed in an HTML table row instead of as
individual items . The values need to be parsed by matching the
requested attribute with a parsing scheme that sports - reference uses
to differentiate stats . This function returns a single value for the
given attribute .
Parameters
html _ data : string
A string containing all of the rows of stats for a given team . If
multiple tables are being referenced , this will be comprised of
multiple rows in a single string .
field : string
The name of the attribute to match . Field must be a key in the
PLAYER _ SCHEME dictionary .
Returns
list
A list of all values that match the requested field . If no value
could be found , returns None ."""
|
scheme = PLAYER_SCHEME [ field ]
items = [ i . text ( ) for i in html_data ( scheme ) . items ( ) ]
# Stats can be added and removed on a yearly basis . If no stats are
# found , return None and have that be the value .
if len ( items ) == 0 :
return None
return items
|
def construct ( args ) :
'''Construct a queue - name from a set of arguments and a delimiter'''
|
# make everything unicode
name = u''
delimiter , encodeseq = delimiter_encodeseq ( _c . FSQ_DELIMITER , _c . FSQ_ENCODE , _c . FSQ_CHARSET )
if len ( args ) == 0 :
return delimiter
for arg in args :
name = delimiter . join ( [ name , encode ( coerce_unicode ( arg , _c . FSQ_CHARSET ) , delimiter = delimiter , encodeseq = encodeseq ) ] )
return name
|
def insert_import_path_to_sys_modules ( import_path ) :
"""When importing a module , Python references the directories in sys . path .
The default value of sys . path varies depending on the system , But :
When you start Python with a script , the directory of the script is inserted into sys . path [ 0 ] .
So we have to replace sys . path to import object in specified scripts ."""
|
abspath = os . path . abspath ( import_path )
if os . path . isdir ( abspath ) :
sys . path . insert ( 0 , abspath )
else :
sys . path . insert ( 0 , os . path . dirname ( abspath ) )
|
def _calc_cost ( self , * args , ** kwargs ) :
"""Calculate the cost
This method calculates the cost from each of the input operators
Returns
float cost"""
|
return np . sum ( [ op . cost ( * args , ** kwargs ) for op in self . _operators ] )
|
def get_starsep_RaDecDeg ( ra1_deg , dec1_deg , ra2_deg , dec2_deg ) :
"""Calculate separation ."""
|
sep = deltaStarsRaDecDeg ( ra1_deg , dec1_deg , ra2_deg , dec2_deg )
sgn , deg , mn , sec = degToDms ( sep )
if deg != 0 :
txt = '%02d:%02d:%06.3f' % ( deg , mn , sec )
else :
txt = '%02d:%06.3f' % ( mn , sec )
return txt
|
def putSubHandler ( self , prefix , handler ) :
'''Add a sub : class : ` RpcHandler ` with prefix ` ` prefix ` ` .
: keyword prefix : a string defining the prefix of the subhandler
: keyword handler : the sub - handler .'''
|
self . subHandlers [ prefix ] = handler
handler . _parent = self
return self
|
def _get_fields ( self , attr ) :
"""Get the hash / range fields of all joined constraints"""
|
ret = set ( )
if "OR" in self . pieces :
return ret
for i in range ( 0 , len ( self . pieces ) , 2 ) :
const = self . pieces [ i ]
field = getattr ( const , attr )
if field is not None :
ret . add ( field )
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.