signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_sn ( unit ) :
"""获取文本行的句子数量
Keyword arguments :
unit - - 文本行
Return :
sn - - 句数"""
|
sn = 0
match_re = re . findall ( str ( sentence_delimiters ) , unit )
if match_re :
string = '' . join ( match_re )
sn = len ( string )
return int ( sn )
|
def _nutation ( date , eop_correction = True , terms = 106 ) :
"""Model 1980 of nutation as described in Vallado p . 224
Args :
date ( beyond . utils . date . Date )
eop _ correction ( bool ) : set to ` ` True ` ` to include model correction
from ' finals ' files .
terms ( int )
Return :
tuple : 3 - elements , all floats in degrees
1 . ̄ε
2 . Δψ
3 . Δε
Warning :
The good version of the nutation model can be found in the * * errata * *
of the 4th edition of * Fundamentals of Astrodynamics and Applications *
by Vallado ."""
|
ttt = date . change_scale ( 'TT' ) . julian_century
r = 360.
# in arcsecond
epsilon_bar = 84381.448 - 46.8150 * ttt - 5.9e-4 * ttt ** 2 + 1.813e-3 * ttt ** 3
# Conversion to degrees
epsilon_bar /= 3600.
# mean anomaly of the moon
m_m = 134.96298139 + ( 1325 * r + 198.8673981 ) * ttt + 0.0086972 * ttt ** 2 + 1.78e-5 * ttt ** 3
# mean anomaly of the sun
m_s = 357.52772333 + ( 99 * r + 359.0503400 ) * ttt - 0.0001603 * ttt ** 2 - 3.3e-6 * ttt ** 3
# L - Omega
u_m_m = 93.27191028 + ( 1342 * r + 82.0175381 ) * ttt - 0.0036825 * ttt ** 2 + 3.1e-6 * ttt ** 3
# Mean elongation of the moon from the sun
d_s = 297.85036306 + ( 1236 * r + 307.11148 ) * ttt - 0.0019142 * ttt ** 2 + 5.3e-6 * ttt ** 3
# Mean longitude of the ascending node of the moon
om_m = 125.04452222 - ( 5 * r + 134.1362608 ) * ttt + 0.0020708 * ttt ** 2 + 2.2e-6 * ttt ** 3
delta_psi = 0.
delta_eps = 0.
for integers , reals in _tab ( terms ) :
a1 , a2 , a3 , a4 , a5 = integers
# Conversion from 0.1 mas to mas
A , B , C , D = np . array ( list ( reals ) ) / 36000000.
a_p = a1 * m_m + a2 * m_s + a3 * u_m_m + a4 * d_s + a5 * om_m
# a _ p % = 360.
delta_psi += ( A + B * ttt ) * np . sin ( np . deg2rad ( a_p ) )
delta_eps += ( C + D * ttt ) * np . cos ( np . deg2rad ( a_p ) )
if eop_correction :
delta_eps += date . eop . deps / 3600000.
delta_psi += date . eop . dpsi / 3600000.
return epsilon_bar , delta_psi , delta_eps
|
def _get_assistive_access ( ) :
'''Get a list of all of the assistive access applications installed ,
returns as a ternary showing whether each app is enabled or not .'''
|
cmd = 'sqlite3 "/Library/Application Support/com.apple.TCC/TCC.db" "SELECT * FROM access"'
call = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'debug' , python_shell = False )
if call [ 'retcode' ] != 0 :
comment = ''
if 'stderr' in call :
comment += call [ 'stderr' ]
if 'stdout' in call :
comment += call [ 'stdout' ]
raise CommandExecutionError ( 'Error: {0}' . format ( comment ) )
out = call [ 'stdout' ]
return re . findall ( r'kTCCServiceAccessibility\|(.*)\|[0-9]{1}\|([0-9]{1})\|[0-9]{1}\|' , out , re . MULTILINE )
|
def get_mark_css ( aes_name , css_value ) :
"""Generate CSS class for < mark > tag .
Parameters
aes _ name : str
The name of the class .
css _ value : str
The value for the CSS property defined by aes _ name .
Returns
list of str
The CSS codeblocks"""
|
css_prop = AES_CSS_MAP [ aes_name ]
if isinstance ( css_value , list ) :
return get_mark_css_for_rules ( aes_name , css_prop , css_value )
else :
return get_mark_simple_css ( aes_name , css_prop , css_value )
|
def find_by_external_tracker ( self , url , id_ ) :
"""Find a list of bugs by searching an external tracker URL and ID .
param url : ` ` str ` ` , the external ticket URL , eg
" http : / / tracker . ceph . com " . ( Note this is the base URL . )
param id _ : ` ` str ` ` , the external ticket ID , eg " 18812 " .
returns : deferred that when fired returns a list of ` ` AttrDict ` ` s
representing these bugs ."""
|
payload = { 'include_fields' : [ 'id' , 'summary' , 'status' ] , 'f1' : 'external_bugzilla.url' , 'o1' : 'substring' , 'v1' : url , 'f2' : 'ext_bz_bug_map.ext_bz_bug_id' , 'o2' : 'equals' , 'v2' : id_ , }
d = self . call ( 'Bug.search' , payload )
d . addCallback ( self . _parse_bugs_callback )
return d
|
def lonlat_to_laea ( lon , lat , lon0 , lat0 , f_e = 0.0 , f_n = 0.0 ) :
"""Converts vectors of longitude and latitude into Lambert Azimuthal
Equal Area projection ( km ) , with respect to an origin point
: param numpy . ndarray lon :
Longitudes
: param numpy . ndarray lat :
Latitude
: param float lon0:
Central longitude
: param float lat0:
Central latitude
: param float f _ e :
False easting ( km )
: param float f _ e :
False northing ( km )
: returns :
* easting ( km )
* northing ( km )"""
|
lon = np . radians ( lon )
lat = np . radians ( lat )
lon0 = np . radians ( lon0 )
lat0 = np . radians ( lat0 )
q_0 = TO_Q ( lat0 )
q_p = TO_Q ( np . pi / 2. )
q_val = TO_Q ( lat )
beta = np . arcsin ( q_val / q_p )
beta0 = np . arcsin ( q_0 / q_p )
r_q = WGS84 [ "a" ] * np . sqrt ( q_p / 2. )
dval = WGS84 [ "a" ] * ( np . cos ( lat0 ) / np . sqrt ( 1.0 - ( WGS84 [ "e2" ] * ( np . sin ( lat0 ) ** 2. ) ) ) / ( r_q * np . cos ( beta0 ) ) )
bval = r_q * np . sqrt ( 2. / ( 1.0 + ( np . sin ( beta0 ) * np . sin ( beta ) ) + ( np . cos ( beta ) * np . cos ( beta0 ) * np . cos ( lon - lon0 ) ) ) )
easting = f_e + ( ( bval * dval ) * ( np . cos ( beta ) * np . sin ( lon - lon0 ) ) )
northing = f_n + ( bval / dval ) * ( ( np . cos ( beta0 ) * np . sin ( beta ) ) - ( np . sin ( beta0 ) * np . cos ( beta ) * np . cos ( lon - lon0 ) ) )
return easting , northing
|
def _validate_anyof ( self , definitions , field , value ) :
"""{ ' type ' : ' list ' , ' logical ' : ' anyof ' }"""
|
valids , _errors = self . __validate_logical ( 'anyof' , definitions , field , value )
if valids < 1 :
self . _error ( field , errors . ANYOF , _errors , valids , len ( definitions ) )
|
def list ( self , prefix = '' , delimiter = '' , filter_function = None , max_results = 1 , previous_key = '' ) :
'''a method to list keys in the google drive collection
: param prefix : string with prefix value to filter results
: param delimiter : string with value which results must not contain ( after prefix )
: param filter _ function : ( positional arguments ) function used to filter results
: param max _ results : integer with maximum number of results to return
: param previous _ key : string with key in collection to begin search after
: return : list of key strings
NOTE : each key string can be divided into one or more segments
based upon the / characters which occur in the key string as
well as its file extension type . if the key string represents
a file path , then each directory in the path , the file name
and the file extension are all separate indexed values .
eg . lab / unittests / 1473719695.2165067 . json is indexed :
[ ' lab ' , ' unittests ' , ' 1473719695.2165067 ' , ' . json ' ]
it is possible to filter the records in the collection according
to one or more of these path segments using a filter _ function .
NOTE : the filter _ function must be able to accept an array of positional
arguments and return a value that can evaluate to true or false .
while searching the records , list produces an array of strings
which represent the directory structure in relative path of each
key string . if a filter _ function is provided , this list of strings
is fed to the filter function . if the function evaluates this input
and returns a true value the file will be included in the list
results .'''
|
title = '%s.list' % self . __class__ . __name__
# validate input
input_fields = { 'prefix' : prefix , 'delimiter' : delimiter , 'max_results' : max_results , 'previous_key' : previous_key }
for key , value in input_fields . items ( ) :
if value :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# validate filter function
if filter_function :
try :
path_segments = [ 'lab' , 'unittests' , '1473719695.2165067' , '.json' ]
filter_function ( * path_segments )
except :
err_msg = '%s(filter_function=%s)' % ( title , filter_function . __class__ . __name__ )
raise TypeError ( '%s must accept positional arguments.' % err_msg )
# construct empty results list
results_list = [ ]
check_key = True
if previous_key :
check_key = False
# determine root path
root_path = ''
if prefix :
from os import path
root_path , file_name = path . split ( prefix )
# iterate over dropbox files
for file_path , file_id in self . _walk ( root_path ) :
path_segments = file_path . split ( os . sep )
record_key = os . path . join ( * path_segments )
record_key = record_key . replace ( '\\' , '/' )
if record_key == previous_key :
check_key = True
# find starting point
if not check_key :
continue
# apply prefix filter
partial_key = record_key
if prefix :
if record_key . find ( prefix ) == 0 :
partial_key = record_key [ len ( prefix ) : ]
else :
continue
# apply delimiter filter
if delimiter :
if partial_key . find ( delimiter ) > - 1 :
continue
# apply filter function
if filter_function :
if filter_function ( * path_segments ) :
results_list . append ( record_key )
else :
results_list . append ( record_key )
# return results list
if len ( results_list ) == max_results :
return results_list
return results_list
|
def tag ( context : click . Context , file_id : int , tags : List [ str ] ) :
"""Add tags to an existing file ."""
|
file_obj = context . obj [ 'db' ] . file_ ( file_id )
if file_obj is None :
print ( click . style ( 'unable to find file' , fg = 'red' ) )
context . abort ( )
for tag_name in tags :
tag_obj = context . obj [ 'db' ] . tag ( tag_name )
if tag_obj is None :
tag_obj = context . obj [ 'db' ] . new_tag ( tag_name )
elif tag_obj in file_obj . tags :
print ( click . style ( f"{tag_name}: tag already added" , fg = 'yellow' ) )
continue
file_obj . tags . append ( tag_obj )
context . obj [ 'db' ] . commit ( )
all_tags = ( tag . name for tag in file_obj . tags )
print ( click . style ( f"file tags: {', '.join(all_tags)}" , fg = 'blue' ) )
|
def _set_datapath ( self , datapath ) :
"""Set a datapath ."""
|
if datapath :
self . _datapath = datapath . rstrip ( os . sep )
self . _fifo = int ( stat . S_ISFIFO ( os . stat ( self . datapath ) . st_mode ) )
else :
self . _datapath = None
self . _fifo = False
|
def segment_from_cnr ( cnr_file , data , out_base ) :
"""Provide segmentation on a cnr file , used in external PureCN integration ."""
|
cns_file = _cnvkit_segment ( cnr_file , dd . get_coverage_interval ( data ) , data , [ data ] , out_file = "%s.cns" % out_base , detailed = True )
out = _add_seg_to_output ( { "cns" : cns_file } , data , enumerate_chroms = False )
return out [ "seg" ]
|
def next ( self , timeout = None ) :
"""Return the next result value in the sequence . Raise
StopIteration at the end . Can raise the exception raised by
the Job"""
|
try :
apply_result = self . _collector . _get_result ( self . _idx , timeout )
except IndexError : # Reset for next time
self . _idx = 0
raise StopIteration
except :
self . _idx = 0
raise
self . _idx += 1
assert apply_result . ready ( )
return apply_result . get ( 0 )
|
def filter_belief ( stmts_in , belief_cutoff , ** kwargs ) :
"""Filter to statements with belief above a given cutoff .
Parameters
stmts _ in : list [ indra . statements . Statement ]
A list of statements to filter .
belief _ cutoff : float
Only statements with belief above the belief _ cutoff will be returned .
Here 0 < belief _ cutoff < 1.
save : Optional [ str ]
The name of a pickle file to save the results ( stmts _ out ) into .
Returns
stmts _ out : list [ indra . statements . Statement ]
A list of filtered statements ."""
|
dump_pkl = kwargs . get ( 'save' )
logger . info ( 'Filtering %d statements to above %f belief' % ( len ( stmts_in ) , belief_cutoff ) )
# The first round of filtering is in the top - level list
stmts_out = [ ]
# Now we eliminate supports / supported - by
for stmt in stmts_in :
if stmt . belief < belief_cutoff :
continue
stmts_out . append ( stmt )
supp_by = [ ]
supp = [ ]
for st in stmt . supports :
if st . belief >= belief_cutoff :
supp . append ( st )
for st in stmt . supported_by :
if st . belief >= belief_cutoff :
supp_by . append ( st )
stmt . supports = supp
stmt . supported_by = supp_by
logger . info ( '%d statements after filter...' % len ( stmts_out ) )
if dump_pkl :
dump_statements ( stmts_out , dump_pkl )
return stmts_out
|
def get_resource_class_collection_attribute_iterator ( rc ) :
"""Returns an iterator over all terminal attributes in the given registered
resource ."""
|
for attr in itervalues_ ( rc . __everest_attributes__ ) :
if attr . kind == RESOURCE_ATTRIBUTE_KINDS . COLLECTION :
yield attr
|
def _get_template_list ( self ) :
"Get the hierarchy of templates belonging to the object / box _ type given ."
|
t_list = [ ]
if hasattr ( self . obj , 'category_id' ) and self . obj . category_id :
cat = self . obj . category
base_path = 'box/category/%s/content_type/%s/' % ( cat . path , self . name )
if hasattr ( self . obj , 'slug' ) :
t_list . append ( base_path + '%s/%s.html' % ( self . obj . slug , self . box_type , ) )
t_list . append ( base_path + '%s.html' % ( self . box_type , ) )
t_list . append ( base_path + 'box.html' )
base_path = 'box/content_type/%s/' % self . name
if hasattr ( self . obj , 'slug' ) :
t_list . append ( base_path + '%s/%s.html' % ( self . obj . slug , self . box_type , ) )
t_list . append ( base_path + '%s.html' % ( self . box_type , ) )
t_list . append ( base_path + 'box.html' )
t_list . append ( 'box/%s.html' % self . box_type )
t_list . append ( 'box/box.html' )
return t_list
|
def get_authorize_url ( self , redirect_uri = None , ** kw ) :
'''return the authorization url that the user should be redirected to .'''
|
redirect = redirect_uri if redirect_uri else self . redirect_uri
if not redirect :
raise APIError ( '21305' , 'Parameter absent: redirect_uri' , 'OAuth2 request' )
response_type = kw . pop ( 'response_type' , 'code' )
return '%s%s?%s' % ( self . auth_url , 'authorize' , _encode_params ( client_id = self . client_id , response_type = response_type , redirect_uri = redirect , ** kw ) )
|
def trips_process_text ( ) :
"""Process text with TRIPS and return INDRA Statements ."""
|
if request . method == 'OPTIONS' :
return { }
response = request . body . read ( ) . decode ( 'utf-8' )
body = json . loads ( response )
text = body . get ( 'text' )
tp = trips . process_text ( text )
return _stmts_from_proc ( tp )
|
def _read ( self , directory , filename , session , path , name , extension , spatial = None , spatialReferenceID = None , replaceParamFile = None ) :
"""ProjectFileEvent Read from File Method"""
|
yml_events = [ ]
with open ( path ) as fo :
yml_events = yaml . load ( fo )
for yml_event in yml_events :
if os . path . exists ( os . path . join ( directory , yml_event . subfolder ) ) :
orm_event = yml_event . as_orm ( )
if not self . _similar_event_exists ( orm_event . subfolder ) :
session . add ( orm_event )
self . events . append ( orm_event )
session . commit ( )
|
def recommend_delete ( self , num_iid , session ) :
'''taobao . item . recommend . delete 取消橱窗推荐一个商品
取消当前用户指定商品的橱窗推荐状态 这个Item所属卖家从传入的session中获取 , 需要session绑定'''
|
request = TOPRequest ( 'taobao.item.recommend.delete' )
request [ 'num_iid' ] = num_iid
self . create ( self . execute ( request , session ) [ 'item' ] )
return self
|
def should_show_thanks_page_to ( participant ) :
"""In the context of the / ad route , should the participant be shown
the thanks . html page instead of ad . html ?"""
|
if participant is None :
return False
status = participant . status
marked_done = participant . end_time is not None
ready_for_external_submission = ( status in ( "overrecruited" , "working" ) and marked_done )
assignment_complete = status in ( "submitted" , "approved" )
return assignment_complete or ready_for_external_submission
|
def decryption ( self , ciphertext , key ) :
"""Builds a single cycle AES Decryption circuit
: param WireVector ciphertext : data to decrypt
: param WireVector key : AES key to use to encrypt ( AES is symmetric )
: return : a WireVector containing the plaintext"""
|
if len ( ciphertext ) != self . _key_len :
raise pyrtl . PyrtlError ( "Ciphertext length is invalid" )
if len ( key ) != self . _key_len :
raise pyrtl . PyrtlError ( "key length is invalid" )
key_list = self . _key_gen ( key )
t = self . _add_round_key ( ciphertext , key_list [ 10 ] )
for round in range ( 1 , 11 ) :
t = self . _inv_shift_rows ( t )
t = self . _sub_bytes ( t , True )
t = self . _add_round_key ( t , key_list [ 10 - round ] )
if round != 10 :
t = self . _mix_columns ( t , True )
return t
|
def _combine_msd_quan ( msd , quan ) :
"""Combine msd and quantiles in chain summary
Parameters
msd : array of shape ( num _ params , 2 , num _ chains )
mean and sd for chains
cquan : array of shape ( num _ params , num _ quan , num _ chains )
quantiles for chains
Returns
msdquan : array of shape ( num _ params , 2 + num _ quan , num _ chains )"""
|
dim1 = msd . shape
n_par , _ , n_chains = dim1
ll = [ ]
for i in range ( n_chains ) :
a1 = msd [ : , : , i ]
a2 = quan [ : , : , i ]
ll . append ( np . column_stack ( [ a1 , a2 ] ) )
msdquan = np . dstack ( ll )
return msdquan
|
def split_url ( self , url ) :
"""Parse an IIIF API URL path into components .
Will parse a URL or URL path that accords with either the
parametrized or info API forms . Will raise an IIIFRequestError on
failure .
If self . identifier is set then url is assumed not to include the
identifier ."""
|
# clear data first
identifier = self . identifier
self . clear ( )
# url must start with baseurl if set ( including slash )
if ( self . baseurl is not None ) :
( path , num ) = re . subn ( '^' + self . baseurl , '' , url , 1 )
if ( num != 1 ) :
raise IIIFRequestError ( text = "Request URL does not start with base URL" )
url = path
# Break up by path segments , count to decide format
segs = url . split ( '/' )
if ( identifier is not None ) :
segs . insert ( 0 , identifier )
elif ( self . allow_slashes_in_identifier ) :
segs = self . _allow_slashes_in_identifier_munger ( segs )
# Now have segments with identifier as first
if ( len ( segs ) > 5 ) :
raise IIIFRequestPathError ( text = "Request URL (%s) has too many path segments" % url )
elif ( len ( segs ) == 5 ) :
self . identifier = urlunquote ( segs [ 0 ] )
self . region = urlunquote ( segs [ 1 ] )
self . size = urlunquote ( segs [ 2 ] )
self . rotation = urlunquote ( segs [ 3 ] )
self . quality = self . strip_format ( urlunquote ( segs [ 4 ] ) )
self . info = False
elif ( len ( segs ) == 2 ) :
self . identifier = urlunquote ( segs [ 0 ] )
info_name = self . strip_format ( urlunquote ( segs [ 1 ] ) )
if ( info_name != "info" ) :
raise IIIFRequestError ( text = "Bad name for Image Information" )
if ( self . api_version == '1.0' ) :
if ( self . format not in [ 'json' , 'xml' ] ) :
raise IIIFRequestError ( text = "Invalid format for Image Information (json and xml allowed)" )
elif ( self . format != 'json' ) :
raise IIIFRequestError ( text = "Invalid format for Image Information (only json allowed)" )
self . info = True
elif ( len ( segs ) == 1 ) :
self . identifier = urlunquote ( segs [ 0 ] )
raise IIIFRequestBaseURI ( )
else :
raise IIIFRequestPathError ( text = "Bad number of path segments in request" )
return ( self )
|
def args_match ( m_args , m_kwargs , default , * args , ** kwargs ) :
""": param m _ args : values to match args against
: param m _ kwargs : values to match kwargs against
: param arg : args to match
: param arg : kwargs to match"""
|
if len ( m_args ) > len ( args ) :
return False
for m_arg , arg in zip ( m_args , args ) :
matches = arg_match ( m_arg , arg , eq )
if not matches or matches is InvalidArg :
return False
# bail out
if m_kwargs :
for name , m_arg in m_kwargs . items ( ) :
name , comparator = arg_comparitor ( name )
arg = kwargs . get ( name )
if not arg_match ( m_arg , arg , comparator , default ) :
return False
# bail out
return True
|
def list_sensors ( self , filter = "" , strategy = False , status = "" , use_python_identifiers = True , tuple = False , refresh = False ) :
"""List sensors available on this resource matching certain criteria .
Parameters
filter : string , optional
Filter each returned sensor ' s name against this regexp if specified .
To ease the dichotomy between Python identifier names and actual
sensor names , the default is to search on Python identifier names
rather than KATCP sensor names , unless ` use _ python _ identifiers `
below is set to False . Note that the sensors of subordinate
KATCPResource instances may have inconsistent names and Python
identifiers , better to always search on Python identifiers in this
case .
strategy : { False , True } , optional
Only list sensors with a set strategy if True
status : string , optional
Filter each returned sensor ' s status against this regexp if given
use _ python _ identifiers : { True , False } , optional
Match on python identfiers even the the KATCP name is available .
tuple : { True , False } , optional , Default : False
Return backwards compatible tuple instead of SensorResultTuples
refresh : { True , False } , optional , Default : False
If set the sensor values will be refreshed with get _ value before
returning the results .
Returns
sensors : list of SensorResultTuples , or list of tuples
List of matching sensors presented as named tuples . The ` object `
field is the : class : ` KATCPSensor ` object associated with the sensor .
Note that the name of the object may not match ` name ` if it
originates from a subordinate device ."""
| |
def save ( self , * args , ** kwargs ) :
"""Extends save ( ) method of Django models to check that the database name
is not left blank .
Note : ' blank = False ' is only checked at a form - validation - stage . A test
using Fixtureless that tries to randomly create a CrossRefDB with an
empty string name would unintentionally break the test ."""
|
if self . name == '' :
raise FieldError
else :
return super ( CrossRefDB , self ) . save ( * args , ** kwargs )
|
def render_button ( content , button_type = None , icon = None , button_class = "btn-default" , size = "" , href = "" , name = None , value = None , title = None , extra_classes = "" , id = "" , ) :
"""Render a button with content"""
|
attrs = { }
classes = add_css_class ( "btn" , button_class )
size = text_value ( size ) . lower ( ) . strip ( )
if size == "xs" :
classes = add_css_class ( classes , "btn-xs" )
elif size == "sm" or size == "small" :
classes = add_css_class ( classes , "btn-sm" )
elif size == "lg" or size == "large" :
classes = add_css_class ( classes , "btn-lg" )
elif size == "md" or size == "medium" :
pass
elif size :
raise BootstrapError ( 'Parameter "size" should be "xs", "sm", "lg" or ' + 'empty ("{}" given).' . format ( size ) )
if button_type :
if button_type not in ( "submit" , "reset" , "button" , "link" ) :
raise BootstrapError ( 'Parameter "button_type" should be "submit", "reset", ' + '"button", "link" or empty ("{}" given).' . format ( button_type ) )
attrs [ "type" ] = button_type
classes = add_css_class ( classes , extra_classes )
attrs [ "class" ] = classes
icon_content = render_icon ( icon ) if icon else ""
if href :
attrs [ "href" ] = href
tag = "a"
else :
tag = "button"
if id :
attrs [ "id" ] = id
if name :
attrs [ "name" ] = name
if value :
attrs [ "value" ] = value
if title :
attrs [ "title" ] = title
return render_tag ( tag , attrs = attrs , content = mark_safe ( text_concat ( icon_content , content , separator = " " ) ) , )
|
def output_to_fd ( self , fd ) :
"""Outputs the results of the scanner to a file descriptor ( stdout counts : )
: param fd : file
: return : void"""
|
for library in self . libraries_found :
fd . write ( "%s==%s\n" % ( library . key , library . version ) )
|
def line_is_continuation ( line : str ) -> bool :
"""Args :
line
Returns :
True iff line is a continuation line , else False ."""
|
llstr = line . lstrip ( )
return len ( llstr ) > 0 and llstr [ 0 ] == "&"
|
def attr ( ** context ) :
"""Decorator that add attributes into func .
Added attributes can be access outside via function ' s ` func _ dict ` property ."""
|
# TODO ( Jim Zhan ) FIXME
def decorator ( func ) :
def wrapped_func ( * args , ** kwargs ) :
for key , value in context . items ( ) :
print key , value
return func ( * args , ** kwargs )
return wraps ( func ) ( decorator )
return decorator
|
def update_order_by_id ( cls , order_id , order , ** kwargs ) :
"""Update Order
Update attributes of Order
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . update _ order _ by _ id ( order _ id , order , async = True )
> > > result = thread . get ( )
: param async bool
: param str order _ id : ID of order to update . ( required )
: param Order order : Attributes of order to update . ( required )
: return : Order
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _update_order_by_id_with_http_info ( order_id , order , ** kwargs )
else :
( data ) = cls . _update_order_by_id_with_http_info ( order_id , order , ** kwargs )
return data
|
def make_contiguous ( im , keep_zeros = True ) :
r"""Take an image with arbitrary greyscale values and adjust them to ensure
all values fall in a contiguous range starting at 0.
This function will handle negative numbers such that most negative number
will become 0 , * unless * ` ` keep _ zeros ` ` is ` ` True ` ` in which case it will
become 1 , and all 0 ' s in the original image remain 0.
Parameters
im : array _ like
An ND array containing greyscale values
keep _ zeros : Boolean
If ` ` True ` ` ( default ) then 0 values remain 0 , regardless of how the
other numbers are adjusted . This is mostly relevant when the array
contains negative numbers , and means that - 1 will become + 1 , while
0 values remain 0.
Returns
image : ND - array
An ND - array the same size as ` ` im ` ` but with all values in contiguous
orders .
Example
> > > import porespy as ps
> > > import scipy as sp
> > > im = sp . array ( [ [ 0 , 2 , 9 ] , [ 6 , 8 , 3 ] ] )
> > > im = ps . tools . make _ contiguous ( im )
> > > print ( im )
[ [ 0 1 5]
[3 4 2 ] ]"""
|
im = sp . copy ( im )
if keep_zeros :
mask = ( im == 0 )
im [ mask ] = im . min ( ) - 1
im = im - im . min ( )
im_flat = im . flatten ( )
im_vals = sp . unique ( im_flat )
im_map = sp . zeros ( shape = sp . amax ( im_flat ) + 1 )
im_map [ im_vals ] = sp . arange ( 0 , sp . size ( sp . unique ( im_flat ) ) )
im_new = im_map [ im_flat ]
im_new = sp . reshape ( im_new , newshape = sp . shape ( im ) )
im_new = sp . array ( im_new , dtype = im_flat . dtype )
return im_new
|
def minute_his ( self , symbol = '' , datetime = '20161209' ) :
'''分时历史数据
: param market :
: param symbol :
: param datetime :
: return : pd . dataFrame or None'''
|
market = get_stock_market ( symbol )
with self . client . connect ( * self . bestip ) :
data = self . client . get_history_minute_time_data ( int ( market ) , symbol , datetime )
return self . client . to_df ( data )
|
def set_status ( self , status ) :
"""Updates the status text
Args :
status ( int ) : The offline / starting / online status of Modis
0 : offline , 1 : starting , 2 : online"""
|
text = ""
colour = "#FFFFFF"
if status == 0 :
text = "OFFLINE"
colour = "#EF9A9A"
elif status == 1 :
text = "STARTING"
colour = "#FFE082"
elif status == 2 :
text = "ONLINE"
colour = "#A5D6A7"
self . status . set ( text )
self . statusbar . config ( background = colour )
|
def _createFilenames ( self , pixels = None ) :
"""Create a masked records array of all filenames for the given set of
pixels and store the existence of those files in the mask values .
Examples :
f = getFilenames ( [ 1,2,3 ] )
# All possible catalog files
f [ ' catalog ' ] . data
# All existing catalog files
f [ ' catalog ' ] [ ~ f . mask [ ' catalog ' ] ]
# or
f [ ' catalog ' ] . compressed ( )
# All missing mask _ 1 files
f [ ' mask _ 1 ' ] [ f . mask [ ' mask _ 1 ' ] ]
# Pixels where all files exist
f [ ' pix ' ] [ ~ f . mask [ ' pix ' ] ]
Parameters :
pixels : If pixels is None , grab all pixels of ' nside _ catalog ' .
Returns :
recarray : pixels and mask value"""
|
nside_catalog = self [ 'coords' ] [ 'nside_catalog' ]
# Deprecated : ADW 2018-06-17
# if nside _ catalog is None :
# pixels = [ None ]
if pixels is not None :
pixels = [ pixels ] if np . isscalar ( pixels ) else pixels
else :
pixels = np . arange ( hp . nside2npix ( nside_catalog ) )
npix = len ( pixels )
catalog_dir = self [ 'catalog' ] [ 'dirname' ]
catalog_base = self [ 'catalog' ] [ 'basename' ]
mask_dir = self [ 'mask' ] [ 'dirname' ]
mask_base_1 = self [ 'mask' ] [ 'basename_1' ]
mask_base_2 = self [ 'mask' ] [ 'basename_2' ]
data = np . ma . empty ( npix , dtype = [ ( 'pix' , int ) , ( 'catalog' , object ) , ( 'mask_1' , object ) , ( 'mask_2' , object ) ] )
mask = np . ma . empty ( npix , dtype = [ ( 'pix' , bool ) , ( 'catalog' , bool ) , ( 'mask_1' , bool ) , ( 'mask_2' , bool ) ] )
for ii , pix in enumerate ( pixels ) :
if pix is None : # DEPRECTATED : ADW 2018-06-17
# This is not really being used anymore
catalog = os . path . join ( catalog_dir , catalog_base )
mask_1 = os . path . join ( mask_dir , mask_base_1 )
mask_2 = os . path . join ( mask_dir , mask_base_2 )
else :
catalog = os . path . join ( catalog_dir , catalog_base % pix )
mask_1 = os . path . join ( mask_dir , mask_base_1 % pix )
mask_2 = os . path . join ( mask_dir , mask_base_2 % pix )
data [ ii ] [ 'pix' ] = pix if pix is not None else - 1
data [ ii ] [ 'catalog' ] = catalog
data [ ii ] [ 'mask_1' ] = mask_1
data [ ii ] [ 'mask_2' ] = mask_2
mask [ ii ] [ 'catalog' ] = not os . path . exists ( catalog )
mask [ ii ] [ 'mask_1' ] = not os . path . exists ( mask_1 )
mask [ ii ] [ 'mask_2' ] = not os . path . exists ( mask_2 )
for name in [ 'catalog' , 'mask_1' , 'mask_2' ] :
if np . all ( mask [ name ] ) :
logger . warn ( "All '%s' files masked" % name )
# mask ' pix ' if all files not present
mask [ 'pix' ] = mask [ 'catalog' ] | mask [ 'mask_1' ] | mask [ 'mask_2' ]
if np . all ( mask [ 'pix' ] ) :
logger . warn ( "All pixels masked" )
# return np . ma . mrecords . MaskedArray ( data , mask , fill _ value = [ - 1 , None , None , None ] )
# return np . ma . mrecords . MaskedArray ( data , mask , fill _ value = [ - 1 , ' ' , ' ' , ' ' ] )
return np . ma . MaskedArray ( data , mask , fill_value = [ - 1 , '' , '' , '' ] )
|
def process ( source , target , rdfsonly , base = None , logger = logging ) :
'''Prepare a statement into a triple ready for rdflib graph'''
|
for link in source . match ( ) :
s , p , o = link [ : 3 ]
# SKip docheader statements
if s == ( base or '' ) + '@docheader' :
continue
if p in RESOURCE_MAPPING :
p = RESOURCE_MAPPING [ p ]
if o in RESOURCE_MAPPING :
o = RESOURCE_MAPPING [ o ]
if p == VERSA_BASEIRI + 'refines' :
tlinks = list ( source . match ( s , TYPE_REL ) )
if tlinks :
if tlinks [ 0 ] [ TARGET ] == VERSA_BASEIRI + 'Resource' :
p = I ( RDFS_NAMESPACE + 'subClassOf' )
elif tlinks [ 0 ] [ TARGET ] == VERSA_BASEIRI + 'Property' :
p = I ( RDFS_NAMESPACE + 'subPropertyOf' )
if p == VERSA_BASEIRI + 'properties' :
suri = I ( iri . absolutize ( s , base ) ) if base else s
target . add ( ( URIRef ( o ) , URIRef ( RDFS_NAMESPACE + 'domain' ) , URIRef ( suri ) ) )
continue
if p == VERSA_BASEIRI + 'value' :
if o not in [ 'Literal' , 'IRI' ] :
ouri = I ( iri . absolutize ( o , base ) ) if base else o
target . add ( ( URIRef ( s ) , URIRef ( RDFS_NAMESPACE + 'range' ) , URIRef ( ouri ) ) )
continue
s = URIRef ( s )
# Translate v : type to rdf : type
p = RDF . type if p == TYPE_REL else URIRef ( p )
o = URIRef ( o ) if isinstance ( o , I ) else Literal ( o )
if not rdfsonly or p . startswith ( RDF_NAMESPACE ) or p . startswith ( RDFS_NAMESPACE ) :
target . add ( ( s , p , o ) )
return
|
def get_from_sources ( self , index , doc_type , document_id ) :
"""Get source stored locally"""
|
return self . sources . get ( index , { } ) . get ( doc_type , { } ) . get ( document_id , { } )
|
def linkify ( self , hosts , timeperiods ) :
"""Create link between objects : :
* hostdependency - > host
* hostdependency - > timeperiods
: param hosts : hosts to link
: type hosts : alignak . objects . host . Hosts
: param timeperiods : timeperiods to link
: type timeperiods : alignak . objects . timeperiod . Timeperiods
: return : None"""
|
self . linkify_hd_by_h ( hosts )
self . linkify_hd_by_tp ( timeperiods )
self . linkify_h_by_hd ( hosts )
|
def save_to_rst ( prefix , data ) :
"""Saves a RST file with the given prefix into the script file location ."""
|
with open ( find_full_name ( prefix ) , "w" ) as rst_file :
rst_file . write ( full_gpl_for_rst )
rst_file . write ( data )
|
def close ( self ) :
'''Save the cookie jar if needed .'''
|
if self . _save_filename :
self . _cookie_jar . save ( self . _save_filename , ignore_discard = self . _keep_session_cookies )
|
def delete_dscp_marking_rule ( self , rule , policy ) :
"""Deletes a DSCP marking rule ."""
|
return self . delete ( self . qos_dscp_marking_rule_path % ( policy , rule ) )
|
def get_friends ( self , ** params ) :
"""Return a UserList of Redditors with whom the user is friends ."""
|
url = self . config [ 'friends' ]
return self . request_json ( url , params = params ) [ 0 ]
|
def notify_workers ( self , worker_ids , subject , message_text ) :
"""Send a text message to workers ."""
|
params = { 'Subject' : subject , 'MessageText' : message_text }
self . build_list_params ( params , worker_ids , 'WorkerId' )
return self . _process_request ( 'NotifyWorkers' , params )
|
def prefix ( self , keys ) :
"""Set a new prefix key ."""
|
assert isinstance ( keys , tuple )
self . _prefix = keys
self . _load_prefix_binding ( )
|
def _query_api ( self , method , url , fields = None , extra_headers = None , req_body = None ) :
"""Abstracts http queries to the API"""
|
with self . auth . authenticate ( ) as token :
logging . debug ( 'PA Authentication returned token %s' , token )
headers = { 'Authorization' : 'Bearer %s' % ( token , ) , 'Realm' : self . auth_realm }
if extra_headers is not None :
headers . update ( extra_headers )
logging . info ( '[%s] %s' , method , url )
if req_body is not None :
response = self . http . request ( method , url , fields , headers , body = req_body )
else :
response = self . http . request ( method , url , fields , headers )
if response . status != 200 :
print ( response . data )
logging . warning ( 'Got non-200 HTTP status from API: %d' , response . status )
raise ApiQueryError ( "Failed to get API data" , response . status )
return json . loads ( response . data . decode ( ) )
|
def alias_tool ( self , context_name , tool_name , tool_alias ) :
"""Register an alias for a specific tool .
Note that a tool alias takes precedence over a context prefix / suffix .
Args :
context _ name ( str ) : Context containing the tool .
tool _ name ( str ) : Name of tool to alias .
tool _ alias ( str ) : Alias to give the tool ."""
|
data = self . _context ( context_name )
aliases = data [ "tool_aliases" ]
if tool_name in aliases :
raise SuiteError ( "Tool %r in context %r is already aliased to %r" % ( tool_name , context_name , aliases [ tool_name ] ) )
self . _validate_tool ( context_name , tool_name )
aliases [ tool_name ] = tool_alias
self . _flush_tools ( )
|
def get_instance_attribute ( self , instance_id , attribute ) :
"""Gets an attribute from an instance .
: type instance _ id : string
: param instance _ id : The Amazon id of the instance
: type attribute : string
: param attribute : The attribute you need information about
Valid choices are :
* instanceType | kernel | ramdisk | userData |
* disableApiTermination |
* instanceInitiatedShutdownBehavior |
* rootDeviceName | blockDeviceMapping
: rtype : : class : ` boto . ec2 . image . InstanceAttribute `
: return : An InstanceAttribute object representing the value of the
attribute requested"""
|
params = { 'InstanceId' : instance_id }
if attribute :
params [ 'Attribute' ] = attribute
return self . get_object ( 'DescribeInstanceAttribute' , params , InstanceAttribute , verb = 'POST' )
|
def is_valid ( cls , arg ) :
"""Return True if arg is valid value for the class . If the string
value is wrong for the enumeration , the encoding will fail ."""
|
return ( isinstance ( arg , ( int , long ) ) and ( arg >= 0 ) ) or isinstance ( arg , basestring )
|
def get_or_create_modification ( self , graph : BELGraph , node : BaseEntity ) -> Optional [ List [ Modification ] ] :
"""Create a list of node modification objects that belong to the node described by node _ data .
Return ` ` None ` ` if the list can not be constructed , and the node should also be skipped .
: param dict node : Describes the given node and contains is _ variant information
: return : A list of modification objects belonging to the given node"""
|
modification_list = [ ]
if FUSION in node :
mod_type = FUSION
node = node [ FUSION ]
p3_namespace_url = graph . namespace_url [ node [ PARTNER_3P ] [ NAMESPACE ] ]
if p3_namespace_url in graph . uncached_namespaces :
log . warning ( 'uncached namespace %s in fusion()' , p3_namespace_url )
return
p3_name = node [ PARTNER_3P ] [ NAME ]
p3_namespace_entry = self . get_namespace_entry ( p3_namespace_url , p3_name )
if p3_namespace_entry is None :
log . warning ( 'Could not find namespace entry %s %s' , p3_namespace_url , p3_name )
return
# FIXME raise ?
p5_namespace_url = graph . namespace_url [ node [ PARTNER_5P ] [ NAMESPACE ] ]
if p5_namespace_url in graph . uncached_namespaces :
log . warning ( 'uncached namespace %s in fusion()' , p5_namespace_url )
return
p5_name = node [ PARTNER_5P ] [ NAME ]
p5_namespace_entry = self . get_namespace_entry ( p5_namespace_url , p5_name )
if p5_namespace_entry is None :
log . warning ( 'Could not find namespace entry %s %s' , p5_namespace_url , p5_name )
return
# FIXME raise ?
fusion_dict = { 'type' : mod_type , 'p3_partner' : p3_namespace_entry , 'p5_partner' : p5_namespace_entry , }
node_range_3p = node . get ( RANGE_3P )
if node_range_3p and FUSION_REFERENCE in node_range_3p :
fusion_dict . update ( { 'p3_reference' : node_range_3p [ FUSION_REFERENCE ] , 'p3_start' : node_range_3p [ FUSION_START ] , 'p3_stop' : node_range_3p [ FUSION_STOP ] , } )
node_range_5p = node . get ( RANGE_5P )
if node_range_5p and FUSION_REFERENCE in node_range_5p :
fusion_dict . update ( { 'p5_reference' : node_range_5p [ FUSION_REFERENCE ] , 'p5_start' : node_range_5p [ FUSION_START ] , 'p5_stop' : node_range_5p [ FUSION_STOP ] , } )
modification_list . append ( fusion_dict )
else :
for variant in node [ VARIANTS ] :
mod_type = variant [ KIND ] . strip ( )
if mod_type == HGVS :
modification_list . append ( { 'type' : mod_type , 'variantString' : variant [ IDENTIFIER ] } )
elif mod_type == FRAGMENT :
if FRAGMENT_MISSING in variant :
modification_list . append ( { 'type' : mod_type , } )
else :
modification_list . append ( { 'type' : mod_type , 'p3_start' : variant [ FRAGMENT_START ] , 'p3_stop' : variant [ FRAGMENT_STOP ] } )
elif mod_type in { GMOD , PMOD } :
variant_identifier = variant [ IDENTIFIER ]
namespace_url = _normalize_url ( graph , variant_identifier [ NAMESPACE ] )
if namespace_url in graph . uncached_namespaces :
log . warning ( 'uncached namespace %s in fusion()' , namespace_url )
return
mod_entry = self . get_namespace_entry ( namespace_url , variant_identifier [ NAME ] )
if mod_type == GMOD :
modification_list . append ( { 'type' : mod_type , 'identifier' : mod_entry } )
if mod_type == PMOD :
modification_list . append ( { 'type' : mod_type , 'identifier' : mod_entry , 'residue' : variant [ PMOD_CODE ] . strip ( ) if PMOD_CODE in variant else None , 'position' : variant [ PMOD_POSITION ] if PMOD_POSITION in variant else None } )
modifications = [ ]
for modification in modification_list :
mod_hash = hash_dump ( modification )
mod = self . object_cache_modification . get ( mod_hash )
if mod is None :
mod = self . get_modification_by_hash ( mod_hash )
if not mod :
modification [ 'sha512' ] = mod_hash
mod = Modification ( ** modification )
self . object_cache_modification [ mod_hash ] = mod
modifications . append ( mod )
return modifications
|
def start_event ( self ) :
"""Called by the event loop when it is started .
Creates the output frame pools ( if used ) then calls
: py : meth : ` on _ start ` . Creating the output frame pools now allows
their size to be configured before starting the component ."""
|
# create object pool for each output
if self . with_outframe_pool :
self . update_config ( )
for name in self . outputs :
self . outframe_pool [ name ] = ObjectPool ( Frame , self . new_frame , self . config [ 'outframe_pool_len' ] )
try :
self . on_start ( )
except Exception as ex :
self . logger . exception ( ex )
raise StopIteration ( )
|
def _parse_arguments ( ) :
"""Constructs and parses the command line arguments for eg . Returns an args
object as returned by parser . parse _ args ( ) ."""
|
parser = argparse . ArgumentParser ( description = 'eg provides examples of common command usage.' )
parser . add_argument ( '-v' , '--version' , action = 'store_true' , help = 'Display version information about eg' )
parser . add_argument ( '-f' , '--config-file' , help = 'Path to the .egrc file, if it is not in the default location.' )
parser . add_argument ( '-e' , '--edit' , action = 'store_true' , help = """Edit the custom examples for the given command. If editor-cmd
is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a
message and does nothing.""" )
parser . add_argument ( '--examples-dir' , help = 'The location to the examples/ dir that ships with eg' )
parser . add_argument ( '-c' , '--custom-dir' , help = 'Path to a directory containing user-defined examples.' )
parser . add_argument ( '-p' , '--pager-cmd' , help = 'String literal that will be invoked to page output.' )
parser . add_argument ( '-l' , '--list' , action = 'store_true' , help = 'Show all the programs with eg entries.' )
parser . add_argument ( '--color' , action = 'store_true' , dest = 'use_color' , default = None , help = 'Colorize output.' )
parser . add_argument ( '-s' , '--squeeze' , action = 'store_true' , default = None , help = 'Show fewer blank lines in output.' )
parser . add_argument ( '--no-color' , action = 'store_false' , dest = 'use_color' , help = 'Do not colorize output.' )
parser . add_argument ( 'program' , nargs = '?' , help = 'The program for which to display examples.' )
args = parser . parse_args ( )
if len ( sys . argv ) < 2 : # Too few arguments . We can ' t specify this using argparse alone , so we
# have to manually check .
parser . print_help ( )
parser . exit ( )
elif not args . version and not args . list and not args . program :
parser . error ( _MSG_BAD_ARGS )
else :
return args
|
def p_binary_operators ( self , p ) :
"""conditional : conditional AND condition
| conditional OR condition
condition : condition LTE expression
| condition GTE expression
| condition LT expression
| condition GT expression
| condition EQ expression
expression : expression ADD term
| expression SUB term
term : term MUL factor
| term DIV factor
| term POW factor
| term MOD factor"""
|
p [ 0 ] = Instruction ( "op(left, right)" , context = { 'op' : self . binary_operators [ p [ 2 ] ] , 'left' : p [ 1 ] , 'right' : p [ 3 ] } )
|
def get_modifications_indirect ( self ) :
"""Extract indirect Modification INDRA Statements ."""
|
# Get all the specific mod types
mod_event_types = list ( ont_to_mod_type . keys ( ) )
# Add ONT : : PTMs as a special case
mod_event_types += [ 'ONT::PTM' ]
def get_increase_events ( mod_event_types ) :
mod_events = [ ]
events = self . tree . findall ( "EVENT/[type='ONT::INCREASE']" )
for event in events :
affected = event . find ( ".//*[@role=':AFFECTED']" )
if affected is None :
continue
affected_id = affected . attrib . get ( 'id' )
if not affected_id :
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self . tree . find ( pattern )
if affected_event is not None :
affected_type = affected_event . find ( 'type' )
if affected_type is not None and affected_type . text in mod_event_types :
mod_events . append ( event )
return mod_events
def get_cause_events ( mod_event_types ) :
mod_events = [ ]
ccs = self . tree . findall ( "CC/[type='ONT::CAUSE']" )
for cc in ccs :
outcome = cc . find ( ".//*[@role=':OUTCOME']" )
if outcome is None :
continue
outcome_id = outcome . attrib . get ( 'id' )
if not outcome_id :
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self . tree . find ( pattern )
if outcome_event is not None :
outcome_type = outcome_event . find ( 'type' )
if outcome_type is not None and outcome_type . text in mod_event_types :
mod_events . append ( cc )
return mod_events
mod_events = get_increase_events ( mod_event_types )
mod_events += get_cause_events ( mod_event_types )
# Iterate over all modification events
for event in mod_events :
event_id = event . attrib [ 'id' ]
if event_id in self . _static_events :
continue
event_type = _get_type ( event )
# Get enzyme Agent
enzyme = event . find ( ".//*[@role=':AGENT']" )
if enzyme is None :
enzyme = event . find ( ".//*[@role=':FACTOR']" )
if enzyme is None :
return
enzyme_id = enzyme . attrib . get ( 'id' )
if enzyme_id is None :
continue
enzyme_agent = self . _get_agent_by_id ( enzyme_id , event_id )
affected_event_tag = event . find ( ".//*[@role=':AFFECTED']" )
if affected_event_tag is None :
affected_event_tag = event . find ( ".//*[@role=':OUTCOME']" )
if affected_event_tag is None :
return
affected_id = affected_event_tag . attrib . get ( 'id' )
if not affected_id :
return
affected_event = self . tree . find ( "EVENT/[@id='%s']" % affected_id )
if affected_event is None :
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product ( ( enzyme_agent , ) ) : # enz _ t comes out as a tuple so we need to take the first
# element here
enz = enz_t [ 0 ]
# Note that we re - run the extraction code here potentially
# multiple times . This is mainly to make sure each Statement
# object created here is independent ( i . e . has different UUIDs )
# without having to manipulate it after creation .
stmts = self . _get_modification_event ( affected_event )
stmts_to_make = [ ]
if stmts :
for stmt in stmts : # The affected event should have no enzyme but should
# have a substrate
if stmt . enz is None and stmt . sub is not None :
stmts_to_make . append ( stmt )
for stmt in stmts_to_make :
stmt . enz = enz
for ev in stmt . evidence :
ev . epistemics [ 'direct' ] = False
self . statements . append ( stmt )
self . _add_extracted ( event_type , event . attrib [ 'id' ] )
self . _add_extracted ( affected_event . find ( 'type' ) . text , affected_id )
|
def shutdown ( self ) :
"""Send shutdown command and wait for the process to exit ."""
|
# Return early if this server has already exited .
if not process . proc_alive ( self . proc ) :
return
logger . info ( "Attempting to connect to %s" , self . hostname )
client = self . connection
# Attempt the shutdown command twice , the first attempt might fail due
# to an election .
attempts = 2
for i in range ( attempts ) :
logger . info ( "Attempting to send shutdown command to %s" , self . hostname )
try :
client . admin . command ( "shutdown" , force = True )
except ConnectionFailure : # A shutdown succeeds by closing the connection but a
# connection error does not necessarily mean that the shutdown
# has succeeded .
pass
# Wait for the server to exit otherwise rerun the shutdown command .
try :
return process . wait_mprocess ( self . proc , 5 )
except TimeoutError as exc :
logger . info ( "Timed out waiting on process: %s" , exc )
continue
raise ServersError ( "Server %s failed to shutdown after %s attempts" % ( self . hostname , attempts ) )
|
def pr ( self , script = False , expanded = False , verbose = False ) -> str :
"""Represent a HistoryItem in a pretty fashion suitable for printing .
If you pass verbose = True , script and expanded will be ignored
: return : pretty print string version of a HistoryItem"""
|
if verbose :
ret_str = self . listformat . format ( self . idx , str ( self ) . rstrip ( ) )
if self != self . expanded :
ret_str += self . ex_listformat . format ( self . idx , self . expanded . rstrip ( ) )
else :
if script : # display without entry numbers
if expanded or self . statement . multiline_command :
ret_str = self . expanded . rstrip ( )
else :
ret_str = str ( self )
else : # display a numbered list
if expanded or self . statement . multiline_command :
ret_str = self . listformat . format ( self . idx , self . expanded . rstrip ( ) )
else :
ret_str = self . listformat . format ( self . idx , str ( self ) . rstrip ( ) )
return ret_str
|
def xmlrpc_method ( ** kwargs ) :
"""Support multiple endpoints serving the same views by chaining calls to
xmlrpc _ method"""
|
# Add some default arguments
kwargs . update ( require_csrf = False , require_methods = [ "POST" ] , decorator = ( submit_xmlrpc_metrics ( method = kwargs [ "method" ] ) , ) , mapper = TypedMapplyViewMapper , )
def decorator ( f ) :
rpc2 = _xmlrpc_method ( endpoint = "RPC2" , ** kwargs )
pypi = _xmlrpc_method ( endpoint = "pypi" , ** kwargs )
pypi_slash = _xmlrpc_method ( endpoint = "pypi_slash" , ** kwargs )
return rpc2 ( pypi_slash ( pypi ( f ) ) )
return decorator
|
def in_notebook ( ) :
"""Check to see if we are in an IPython or Jypyter notebook .
Returns
in _ notebook : bool
Returns True if we are in a notebook"""
|
try : # function returns IPython context , but only in IPython
ipy = get_ipython ( )
# NOQA
# we only want to render rich output in notebooks
# in terminals we definitely do not want to output HTML
name = str ( ipy . __class__ ) . lower ( )
terminal = 'terminal' in name
# spyder uses ZMQshell , and can appear to be a notebook
spyder = '_' in os . environ and 'spyder' in os . environ [ '_' ]
# assume we are in a notebook if we are not in
# a terminal and we haven ' t been run by spyder
notebook = ( not terminal ) and ( not spyder )
return notebook
except BaseException :
return False
|
def _cast_to ( e , solution , cast_to ) :
"""Casts a solution for the given expression to type ` cast _ to ` .
: param e : The expression ` value ` is a solution for
: param value : The solution to be cast
: param cast _ to : The type ` value ` should be cast to . Must be one of the currently supported types ( bytes | int )
: raise ValueError : If cast _ to is a currently unsupported cast target .
: return : The value of ` solution ` cast to type ` cast _ to `"""
|
if cast_to is None :
return solution
if type ( solution ) is bool :
if cast_to is bytes :
return bytes ( [ int ( solution ) ] )
elif cast_to is int :
return int ( solution )
elif type ( solution ) is float :
solution = _concrete_value ( claripy . FPV ( solution , claripy . fp . FSort . from_size ( len ( e ) ) ) . raw_to_bv ( ) )
if cast_to is bytes :
if len ( e ) == 0 :
return b""
return binascii . unhexlify ( '{:x}' . format ( solution ) . zfill ( len ( e ) // 4 ) )
if cast_to is not int :
raise ValueError ( "cast_to parameter {!r} is not a valid cast target, currently supported are only int and bytes!" . format ( cast_to ) )
return solution
|
def fovray ( inst , raydir , rframe , abcorr , observer , et ) :
"""Determine if a specified ray is within the field - of - view ( FOV ) of a
specified instrument at a given time .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / fovray _ c . html
: param inst : Name or ID code string of the instrument .
: type inst : str
: param raydir : Ray ' s direction vector .
: type raydir : 3 - Element Array of floats
: param rframe : Body - fixed , body - centered frame for target body .
: type rframe : str
: param abcorr : Aberration correction flag .
: type abcorr : str
: param observer : Name or ID code string of the observer .
: type observer : str
: param et : Time of the observation ( seconds past J2000 ) .
: type et : float
: return : Visibility flag
: rtype : bool"""
|
inst = stypes . stringToCharP ( inst )
raydir = stypes . toDoubleVector ( raydir )
rframe = stypes . stringToCharP ( rframe )
abcorr = stypes . stringToCharP ( abcorr )
observer = stypes . stringToCharP ( observer )
et = ctypes . c_double ( et )
visible = ctypes . c_int ( )
libspice . fovray_c ( inst , raydir , rframe , abcorr , observer , ctypes . byref ( et ) , ctypes . byref ( visible ) )
return bool ( visible . value )
|
def _get_pltdotstrs ( self , hdrgos_usr , ** kws ) :
"""Plot GO DAGs for each group found under a specfied header GO ."""
|
import datetime
import timeit
dotstrs_all = [ ]
tic = timeit . default_timer ( )
# Loop through GO groups . Each group of GOs is formed under a single " header GO "
hdrgo2usrgos , go2obj = self . _get_plt_data ( hdrgos_usr )
# get dot strings with _ get _ dotstrs _ curs
for hdrgo , usrgos in hdrgo2usrgos . items ( ) :
dotstrs_cur = self . _get_dotgraphs ( hdrgo , usrgos , pltargs = PltGroupedGosArgs ( self . grprobj , ** kws ) , go2parentids = get_go2parents_go2obj ( go2obj ) )
dotstrs_all . extend ( dotstrs_cur )
sys . stdout . write ( "\nElapsed HMS: {HMS} to write " . format ( HMS = str ( datetime . timedelta ( seconds = ( timeit . default_timer ( ) - tic ) ) ) ) )
sys . stdout . write ( "{P:5,} GO DAG plots for {H:>5,} GO grouping headers\n" . format ( H = len ( hdrgo2usrgos ) , P = len ( dotstrs_all ) ) )
return sorted ( set ( dotstrs_all ) )
|
def analyze ( fqdn , result , argl , argd ) :
"""Analyzes the result from calling the method with the specified FQDN .
Args :
fqdn ( str ) : full - qualified name of the method that was called .
result : result of calling the method with ` fqdn ` .
argl ( tuple ) : positional arguments passed to the method call .
argd ( dict ) : keyword arguments passed to the method call ."""
|
package = fqdn . split ( '.' ) [ 0 ]
if package not in _methods :
_load_methods ( package )
if _methods [ package ] is not None and fqdn in _methods [ package ] :
return _methods [ package ] [ fqdn ] ( fqdn , result , * argl , ** argd )
|
def seek ( self , offset , from_what = os . SEEK_SET ) :
''': param offset : Position in the file to seek to
: type offset : integer
Seeks to * offset * bytes from the beginning of the file . This is a no - op if the file is open for writing .
The position is computed from adding * offset * to a reference point ; the reference point is selected by the
* from _ what * argument . A * from _ what * value of 0 measures from the beginning of the file , 1 uses the current file
position , and 2 uses the end of the file as the reference point . * from _ what * can be omitted and defaults to 0,
using the beginning of the file as the reference point .'''
|
if from_what == os . SEEK_SET :
reference_pos = 0
elif from_what == os . SEEK_CUR :
reference_pos = self . _pos
elif from_what == os . SEEK_END :
if self . _file_length == None :
desc = self . describe ( )
self . _file_length = int ( desc [ "size" ] )
reference_pos = self . _file_length
else :
raise DXFileError ( "Invalid value supplied for from_what" )
orig_pos = self . _pos
self . _pos = reference_pos + offset
in_buf = False
orig_buf_pos = self . _read_buf . tell ( )
if offset < orig_pos :
if orig_buf_pos > orig_pos - offset : # offset is less than original position but within the buffer
in_buf = True
else :
buf_len = dxpy . utils . string_buffer_length ( self . _read_buf )
if buf_len - orig_buf_pos > offset - orig_pos : # offset is greater than original position but within the buffer
in_buf = True
if in_buf : # offset is within the buffer ( at least one byte following
# the offset can be read directly out of the buffer )
self . _read_buf . seek ( orig_buf_pos - orig_pos + offset )
elif offset == orig_pos : # This seek is a no - op ( the cursor is just past the end of
# the read buffer and coincides with the desired seek
# position ) . We don ' t have the data ready , but the request
# for the data starting here is already in flight .
# Detecting this case helps to optimize for sequential read
# access patterns .
pass
else : # offset is outside the buffer - - reset buffer and queues .
# This is the failsafe behavior
self . _read_buf = BytesIO ( )
# TODO : if the offset is within the next response ( s ) , don ' t throw out the queues
self . _request_iterator , self . _response_iterator = None , None
|
def warn ( self , cmd , desc = '' ) :
'''Style for warning message .'''
|
return self . _label_desc ( cmd , desc , self . warn_color )
|
def load_related_model ( self , name , load_only = None , dont_load = None ) :
'''Load a the : class : ` ForeignKey ` field ` ` name ` ` if this is part of the
fields of this model and if the related object is not already loaded .
It is used by the lazy loading mechanism of : ref : ` one - to - many < one - to - many > `
relationships .
: parameter name : the : attr : ` Field . name ` of the : class : ` ForeignKey ` to load .
: parameter load _ only : Optional parameters which specify the fields to load .
: parameter dont _ load : Optional parameters which specify the fields not to load .
: return : the related : class : ` StdModel ` instance .'''
|
field = self . _meta . dfields . get ( name )
if not field :
raise ValueError ( 'Field "%s" not available' % name )
elif not field . type == 'related object' :
raise ValueError ( 'Field "%s" not a foreign key' % name )
return self . _load_related_model ( field , load_only , dont_load )
|
def expect_column_values_to_match_regex ( self , column , regex , mostly = None , result_format = None , include_config = False , catch_exceptions = None , meta = None ) :
"""Expect column entries to be strings that match a given regular expression . Valid matches can be found anywhere in the string , for example " [ at ] + " will identify the following strings as expected : " cat " , " hat " , " aa " , " a " , and " t " , and the following strings as unexpected : " fish " , " dog " .
expect _ column _ values _ to _ match _ regex is a : func : ` column _ map _ expectation < great _ expectations . data _ asset . dataset . Dataset . column _ map _ expectation > ` .
Args :
column ( str ) : The column name .
regex ( str ) : The regular expression the column entries should match .
Keyword Args :
mostly ( None or a float between 0 and 1 ) : Return ` " success " : True ` if at least mostly percent of values match the expectation . For more detail , see : ref : ` mostly ` .
Other Parameters :
result _ format ( str or None ) : Which output mode to use : ` BOOLEAN _ ONLY ` , ` BASIC ` , ` COMPLETE ` , or ` SUMMARY ` .
For more detail , see : ref : ` result _ format < result _ format > ` .
include _ config ( boolean ) : If True , then include the expectation config as part of the result object . For more detail , see : ref : ` include _ config ` .
catch _ exceptions ( boolean or None ) : If True , then catch exceptions and include them as part of the result object . For more detail , see : ref : ` catch _ exceptions ` .
meta ( dict or None ) : A JSON - serializable dictionary ( nesting allowed ) that will be included in the output without modification . For more detail , see : ref : ` meta ` .
Returns :
A JSON - serializable expectation result object .
Exact fields vary depending on the values passed to : ref : ` result _ format < result _ format > ` and
: ref : ` include _ config ` , : ref : ` catch _ exceptions ` , and : ref : ` meta ` .
See Also :
expect _ column _ values _ to _ not _ match _ regex
expect _ column _ values _ to _ match _ regex _ list"""
|
raise NotImplementedError
|
def train ( ds , ii ) :
"""Run the training step , given a dataset object ."""
|
print ( "Loading model" )
m = model . CannonModel ( 2 )
print ( "Training..." )
m . fit ( ds )
np . savez ( "./ex%s_coeffs.npz" % ii , m . coeffs )
np . savez ( "./ex%s_scatters.npz" % ii , m . scatters )
np . savez ( "./ex%s_chisqs.npz" % ii , m . chisqs )
np . savez ( "./ex%s_pivots.npz" % ii , m . pivots )
fig = m . diagnostics_leading_coeffs ( ds )
plt . savefig ( "ex%s_leading_coeffs.png" % ii )
# m . diagnostics _ leading _ coeffs _ triangle ( ds )
# m . diagnostics _ plot _ chisq ( ds )
return m
|
def getSpec ( cls ) :
"""Return base spec for this region . See base class method for more info ."""
|
spec = { "description" : CoordinateSensorRegion . __doc__ , "singleNodeOnly" : True , "inputs" : { } , # input data is added to queue via " addDataToQueue " command
"outputs" : { "dataOut" : { "description" : "Encoded coordinate SDR." , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : True , } , "resetOut" : { "description" : "0/1 reset flag output." , "dataType" : "UInt32" , "count" : 1 , "regionLevel" : True , "isDefaultOutput" : False , } , "sequenceIdOut" : { "description" : "Sequence ID" , "dataType" : "UInt32" , "count" : 1 , "regionLevel" : True , "isDefaultOutput" : False , } , } , "parameters" : { "activeBits" : { "description" : "The number of bits that are set to encode a single " "coordinate value" , "dataType" : "uint" , "accessMode" : "ReadWrite" , "count" : 1 , "defaultValue" : 21 } , "outputWidth" : { "description" : "Size of output vector" , "dataType" : "UInt32" , "accessMode" : "ReadWrite" , "count" : 1 , "defaultValue" : 1000 } , "radius" : { "description" : "Radius around 'coordinate'" , "dataType" : "UInt32" , "accessMode" : "ReadWrite" , "count" : 1 , "defaultValue" : 2 } , "verbosity" : { "description" : "Verbosity level" , "dataType" : "UInt32" , "accessMode" : "ReadWrite" , "count" : 1 } , } , "commands" : { "addDataToQueue" : { "description" : CoordinateSensorRegion . addDataToQueue . __doc__ , } , "addResetToQueue" : { "description" : CoordinateSensorRegion . addResetToQueue . __doc__ , } } , }
return spec
|
def _try_convert_data ( self , name , data , use_dtypes = True , convert_dates = True ) :
"""try to parse a ndarray like into a column by inferring dtype"""
|
# don ' t try to coerce , unless a force conversion
if use_dtypes :
if self . dtype is False :
return data , False
elif self . dtype is True :
pass
else : # dtype to force
dtype = ( self . dtype . get ( name ) if isinstance ( self . dtype , dict ) else self . dtype )
if dtype is not None :
try :
dtype = np . dtype ( dtype )
return data . astype ( dtype ) , True
except :
return data , False
if convert_dates :
new_data , result = self . _try_convert_to_date ( data )
if result :
return new_data , True
result = False
if data . dtype == 'object' : # try float
try :
data = data . astype ( 'float64' )
result = True
except :
pass
if data . dtype . kind == 'f' :
if data . dtype != 'float64' : # coerce floats to 64
try :
data = data . astype ( 'float64' )
result = True
except :
pass
# do ' t coerce 0 - len data
if len ( data ) and ( data . dtype == 'float' or data . dtype == 'object' ) : # coerce ints if we can
try :
new_data = data . astype ( 'int64' )
if ( new_data == data ) . all ( ) :
data = new_data
result = True
except :
pass
# coerce ints to 64
if data . dtype == 'int' : # coerce floats to 64
try :
data = data . astype ( 'int64' )
result = True
except :
pass
return data , result
|
def _get_csr_extensions ( csr ) :
'''Returns a list of dicts containing the name , value and critical value of
any extension contained in a csr object .'''
|
ret = OrderedDict ( )
csrtempfile = tempfile . NamedTemporaryFile ( )
csrtempfile . write ( csr . as_pem ( ) )
csrtempfile . flush ( )
csryaml = _parse_openssl_req ( csrtempfile . name )
csrtempfile . close ( )
if csryaml and 'Requested Extensions' in csryaml [ 'Certificate Request' ] [ 'Data' ] :
csrexts = csryaml [ 'Certificate Request' ] [ 'Data' ] [ 'Requested Extensions' ]
if not csrexts :
return ret
for short_name , long_name in six . iteritems ( EXT_NAME_MAPPINGS ) :
if long_name in csrexts :
csrexts [ short_name ] = csrexts [ long_name ]
del csrexts [ long_name ]
ret = csrexts
return ret
|
def send_emote ( self , room_id , text_content , timestamp = None ) :
"""Perform PUT / rooms / $ room _ id / send / m . room . message with m . emote msgtype
Args :
room _ id ( str ) : The room ID to send the event in .
text _ content ( str ) : The m . emote body to send .
timestamp ( int ) : Set origin _ server _ ts ( For application services only )"""
|
return self . send_message_event ( room_id , "m.room.message" , self . get_emote_body ( text_content ) , timestamp = timestamp )
|
def load_lang_conf ( ) :
"""Load language setting from language config file if it exists , otherwise
try to use the local settings if Spyder provides a translation , or
return the default if no translation provided ."""
|
if osp . isfile ( LANG_FILE ) :
with open ( LANG_FILE , 'r' ) as f :
lang = f . read ( )
else :
lang = get_interface_language ( )
save_lang_conf ( lang )
# Save language again if it ' s been disabled
if lang . strip ( '\n' ) in DISABLED_LANGUAGES :
lang = DEFAULT_LANGUAGE
save_lang_conf ( lang )
return lang
|
def union ( self , x , y ) :
"""Merges part that contain x and part containing y
: returns : False if x , y are already in same part
: complexity : O ( inverse _ ackerman ( n ) )"""
|
repr_x = self . find ( x )
repr_y = self . find ( y )
if repr_x == repr_y : # already in the same component
return False
if self . rank [ repr_x ] == self . rank [ repr_y ] :
self . rank [ repr_x ] += 1
self . up [ repr_y ] = repr_x
elif self . rank [ repr_x ] > self . rank [ repr_y ] :
self . up [ repr_y ] = repr_x
else :
self . up [ repr_x ] = repr_y
return True
|
def calculate_dates ( self , dt ) :
"""Given a dt , find that day ' s close and period start ( close - offset ) ."""
|
period_end = self . cal . open_and_close_for_session ( self . cal . minute_to_session_label ( dt ) , ) [ 1 ]
# Align the market close time here with the execution time used by the
# simulation clock . This ensures that scheduled functions trigger at
# the correct times .
self . _period_end = self . cal . execution_time_from_close ( period_end )
self . _period_start = self . _period_end - self . offset
self . _period_close = self . _period_end
|
def check_permission ( permission , hidden = True ) :
"""Check if permission is allowed .
If permission fails then the connection is aborted .
: param permission : The permission to check .
: param hidden : Determine if a 404 error ( ` ` True ` ` ) or 401/403 error
( ` ` False ` ` ) should be returned if the permission is rejected ( i . e .
hide or reveal the existence of a particular object ) ."""
|
if permission is not None and not permission . can ( ) :
if hidden :
abort ( 404 )
else :
if current_user . is_authenticated :
abort ( 403 , 'You do not have a permission for this action' )
abort ( 401 )
|
async def prompt ( self , text = None ) :
'''Prompt for user input from stdin .'''
|
if self . sess is None :
hist = FileHistory ( s_common . getSynPath ( 'cmdr_history' ) )
self . sess = PromptSession ( history = hist )
if text is None :
text = self . cmdprompt
with patch_stdout ( ) :
retn = await self . sess . prompt ( text , async_ = True , vi_mode = self . vi_mode , enable_open_in_editor = True )
return retn
|
def visitName ( self , ctx : jsgParser . NameContext ) :
"""name : ID | STRING"""
|
rtkn = get_terminal ( ctx )
tkn = esc_kw ( rtkn )
self . _names [ rtkn ] = tkn
|
def lookup ( self , key ) :
"""Look up the given ` node ` URL using the given ` hash _ ` first in the
database and then by waiting on the futures created with
: meth : ` create _ query _ future ` for that node URL and hash .
If the hash is not in the database , : meth : ` lookup ` iterates as long as
there are pending futures for the given ` hash _ ` and ` node ` . If there
are no pending futures , : class : ` KeyError ` is raised . If a future raises
a : class : ` ValueError ` , it is ignored . If the future returns a value , it
is used as the result ."""
|
try :
result = self . lookup_in_database ( key )
except KeyError :
pass
else :
return result
while True :
fut = self . _lookup_cache [ key ]
try :
result = yield from fut
except ValueError :
continue
else :
return result
|
def home_page ( self , tld_type : Optional [ TLDType ] = None ) -> str :
"""Generate a random home page .
: param tld _ type : TLD type .
: return : Random home page .
: Example :
http : / / www . fontir . info"""
|
resource = self . random . choice ( USERNAMES )
domain = self . top_level_domain ( tld_type = tld_type , )
return 'http://www.{}{}' . format ( resource , domain )
|
def generateViewHierarchies ( self ) :
'''Wrapper method to create the view hierarchies . Currently it just calls
: func : ` ~ exhale . graph . ExhaleRoot . generateClassView ` and
: func : ` ~ exhale . graph . ExhaleRoot . generateDirectoryView ` - - - if you want to implement
additional hierarchies , implement the additionaly hierarchy method and call it
from here . Then make sure to ` ` include ` ` it in
: func : ` ~ exhale . graph . ExhaleRoot . generateAPIRootBody ` .'''
|
# gather the class hierarchy data and write it out
class_view_data = self . generateClassView ( )
self . writeOutHierarchy ( True , class_view_data )
# gather the file hierarchy data and write it out
file_view_data = self . generateDirectoryView ( )
self . writeOutHierarchy ( False , file_view_data )
|
def load_conf ( cfg_path ) :
"""Try to load the given conf file ."""
|
global config
try :
cfg = open ( cfg_path , 'r' )
except Exception as ex :
if verbose :
print ( "Unable to open {0}" . format ( cfg_path ) )
print ( str ( ex ) )
return False
# Read the entire contents of the conf file
cfg_json = cfg . read ( )
cfg . close ( )
# print ( cfg _ json )
# Try to parse the conf file into a Python structure
try :
config = json . loads ( cfg_json )
except Exception as ex :
print ( "Unable to parse configuration file as JSON" )
print ( str ( ex ) )
return False
# This config was successfully loaded
return True
|
def rpm_qf_args ( tags = None , separator = ';' ) :
"""Return the arguments to pass to rpm to list RPMs in the format expected
by parse _ rpm _ output ."""
|
if tags is None :
tags = image_component_rpm_tags
fmt = separator . join ( [ "%%{%s}" % tag for tag in tags ] )
return r"-qa --qf '{0}\n'" . format ( fmt )
|
def set_default ( feature , value ) :
"""Sets the default value of the given feature , overriding any previous default .
feature : the name of the feature
value : the default value to assign"""
|
f = __all_features [ feature ]
bad_attribute = None
if f . free :
bad_attribute = "free"
elif f . optional :
bad_attribute = "optional"
if bad_attribute :
raise InvalidValue ( "%s property %s cannot have a default" % ( bad_attribute , f . name ) )
if value not in f . values :
raise InvalidValue ( "The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f . values )
f . set_default ( value )
|
def exception_uuid ( exc_type , exc_value , exc_traceback ) :
"""Calculate a 32 - bit UUID for a given exception .
Useful for looking for an existing ticket coresponding to a given exception .
Takes into account the type of exception , and the name of every file and
function in the traceback , along with the source code from those lines .
This should remain consistant until a file or function is renamed , the
source changed on the precise lines in the traceback , or the evaluation path
changes .
: return : Hex ` ` str ` ` of length 8."""
|
hasher = hashlib . sha256 ( str ( exc_type ) )
for file_name , line_no , func_name , source in traceback . extract_tb ( exc_traceback ) :
hasher . update ( '\n' + file_name + ':' + func_name + ':' + ( source or '' ) )
return hasher . hexdigest ( ) [ : 8 ]
|
def user_delete ( self , id , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / users # delete - user"
|
api_path = "/api/v2/users/{id}.json"
api_path = api_path . format ( id = id )
return self . call ( api_path , method = "DELETE" , ** kwargs )
|
def get_default_config_help ( self ) :
"""Returns the help text for the configuration options for this handler"""
|
config = super ( InfluxdbHandler , self ) . get_default_config_help ( )
config . update ( { 'hostname' : 'Hostname' , 'port' : 'Port' , 'ssl' : 'set to True to use HTTPS instead of http' , 'batch_size' : 'How many metrics to store before sending to the' ' influxdb server' , 'cache_size' : 'How many values to store in cache in case of' ' influxdb failure' , 'username' : 'Username for connection' , 'password' : 'Password for connection' , 'database' : 'Database name' , 'time_precision' : 'time precision in second(s), milisecond(ms) or ' 'microsecond (u)' , } )
return config
|
def from_euler ( self , roll , pitch , yaw ) :
'''fill the matrix from Euler angles in radians'''
|
cp = cos ( pitch )
sp = sin ( pitch )
sr = sin ( roll )
cr = cos ( roll )
sy = sin ( yaw )
cy = cos ( yaw )
self . a . x = cp * cy
self . a . y = ( sr * sp * cy ) - ( cr * sy )
self . a . z = ( cr * sp * cy ) + ( sr * sy )
self . b . x = cp * sy
self . b . y = ( sr * sp * sy ) + ( cr * cy )
self . b . z = ( cr * sp * sy ) - ( sr * cy )
self . c . x = - sp
self . c . y = sr * cp
self . c . z = cr * cp
|
def buy_avg_holding_price ( self ) :
"""[ float ] 买方向持仓均价"""
|
return 0 if self . buy_quantity == 0 else self . _buy_holding_cost / self . buy_quantity / self . contract_multiplier
|
def write_catalog ( detections , fname , format = "QUAKEML" ) :
"""Write events contained within detections to a catalog file .
: type detections : list
: param detections : list of eqcorrscan . core . match _ filter . Detection
: type fname : str
: param fname : Name of the file to write to
: type format : str
: param format : File format to use , see obspy . core . event . Catalog . write for supported formats ."""
|
catalog = get_catalog ( detections )
catalog . write ( filename = fname , format = format )
|
def domains ( self ) :
"""This method returns all of your current domains ."""
|
json = self . request ( '/domains' , method = 'GET' )
status = json . get ( 'status' )
if status == 'OK' :
domains_json = json . get ( 'domains' , [ ] )
domains = [ Domain . from_json ( domain ) for domain in domains_json ]
return domains
else :
message = json . get ( 'message' )
raise DOPException ( '[%s]: %s' % ( status , message ) )
|
def _event_for ( self , elts ) :
"""Creates an Event that is set when the bundle with elts is sent ."""
|
event = Event ( )
event . canceller = self . _canceller_for ( elts , event )
return event
|
def deactivate_in_ec ( self , ec_index ) :
'''Deactivate this component in an execution context .
@ param ec _ index The index of the execution context to deactivate in .
This index is into the total array of contexts , that
is both owned and participating contexts . If the value
of ec _ index is greater than the length of
@ ref owned _ ecs , that length is subtracted from
ec _ index and the result used as an index into
@ ref participating _ ecs .'''
|
with self . _mutex :
if ec_index >= len ( self . owned_ecs ) :
ec_index -= len ( self . owned_ecs )
if ec_index >= len ( self . participating_ecs ) :
raise exceptions . BadECIndexError ( ec_index )
ec = self . participating_ecs [ ec_index ]
else :
ec = self . owned_ecs [ ec_index ]
ec . deactivate_component ( self . _obj )
|
def control_force ( self , c ) :
'''represents physical limitation of the control'''
|
# bring instance variables into local scope
g = self . g
s = self . s
return g * ( 2 / π ) * arctan ( ( s / g ) * c )
|
def incr ( self , name , amount = 1 ) :
"""Increase the value at key ` ` name ` ` by ` ` amount ` ` . If no key exists , the value
will be initialized as ` ` amount ` ` .
Like * * Redis . INCR * *
: param string name : the key name
: param int amount : increments
: return : the integer value at key ` ` name ` `
: rtype : int
> > > ssdb . incr ( ' set _ count ' , 3)
13
> > > ssdb . incr ( ' set _ count ' , 1)
14
> > > ssdb . incr ( ' set _ count ' , - 2)
12
> > > ssdb . incr ( ' temp _ count ' , 42)
42"""
|
amount = get_integer ( 'amount' , amount )
return self . execute_command ( 'incr' , name , amount )
|
def with_timeout ( timeout , d , reactor = reactor ) :
"""Returns a ` Deferred ` that is in all respects equivalent to ` d ` , e . g . when ` cancel ( ) ` is called on it ` Deferred ` ,
the wrapped ` Deferred ` will also be cancelled ; however , a ` Timeout ` will be fired after the ` timeout ` number of
seconds if ` d ` has not fired by that time .
When a ` Timeout ` is raised , ` d ` will be cancelled . It is up to the caller to worry about how ` d ` handles
cancellation , i . e . whether it has full / true support for cancelling , or does cancelling it just prevent its callbacks
from being fired but doesn ' t cancel the underlying operation ."""
|
if timeout is None or not isinstance ( d , Deferred ) :
return d
ret = Deferred ( canceller = lambda _ : ( d . cancel ( ) , timeout_d . cancel ( ) , ) )
timeout_d = sleep ( timeout , reactor )
timeout_d . addCallback ( lambda _ : ( d . cancel ( ) , ret . errback ( Failure ( Timeout ( ) ) ) if not ret . called else None , ) )
timeout_d . addErrback ( lambda f : f . trap ( CancelledError ) )
d . addCallback ( lambda result : ( timeout_d . cancel ( ) , ret . callback ( result ) , ) )
d . addErrback ( lambda f : ( if_ ( not f . check ( CancelledError ) , lambda : ( timeout_d . cancel ( ) , ret . errback ( f ) , ) ) , ) )
return ret
|
def process_attrib ( element , msg ) :
'''process _ attrib
High - level api : Delete four attributes from an ElementTree node if they
exist : operation , insert , value and key . Then a new attribute ' diff ' is
added .
Parameters
element : ` Element `
A node needs to be looked at .
msg : ` str `
Message to be added in attribute ' diff ' .
Returns
Element
Argument ' element ' is returned after processing .'''
|
attrib_required = [ 'type' , 'access' , 'mandatory' ]
for node in element . iter ( ) :
for attrib in node . attrib . keys ( ) :
if attrib not in attrib_required :
del node . attrib [ attrib ]
if msg :
node . attrib [ 'diff' ] = msg
return element
|
def get_fmt_results ( results , limit = 5 , sep = '::' , fmt = None ) :
"""Return a list of formatted strings representation on a result dictionary .
The elements of the key are divided by a separator string . The result is
appended after the key between parentheses . Apply a format transformation
to odd elements of the key if a fmt parameter is passed ."""
|
result_list = [ ]
for key in sorted ( results , key = lambda x : results [ x ] , reverse = True ) :
if len ( result_list ) >= limit and results [ key ] <= 1 :
break
if fmt is not None :
fmtkey = [ ]
for i in range ( len ( key ) ) :
if i % 2 == 1 :
fmtkey . append ( fmt . format ( key [ i ] ) )
else :
fmtkey . append ( key [ i ] )
result_list . append ( u'{0}({1})' . format ( sep . join ( fmtkey ) , results [ key ] ) )
else :
result_list . append ( u'{0}({1})' . format ( sep . join ( key ) , results [ key ] ) )
else :
return result_list
if fmt is not None :
result_list . append ( fmt . format ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) ) )
else :
result_list . append ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) )
return result_list
|
def in_git_clone ( ) :
"""Returns ` True ` if the current directory is a git repository
Logic is ' borrowed ' from : func : ` git . repo . fun . is _ git _ dir `"""
|
gitdir = '.git'
return os . path . isdir ( gitdir ) and ( os . path . isdir ( os . path . join ( gitdir , 'objects' ) ) and os . path . isdir ( os . path . join ( gitdir , 'refs' ) ) and os . path . exists ( os . path . join ( gitdir , 'HEAD' ) ) )
|
def build_attrs ( self , * args , ** kwargs ) :
"""Disable automatic corrections and completions ."""
|
attrs = super ( CaptchaAnswerInput , self ) . build_attrs ( * args , ** kwargs )
attrs [ 'autocapitalize' ] = 'off'
attrs [ 'autocomplete' ] = 'off'
attrs [ 'autocorrect' ] = 'off'
attrs [ 'spellcheck' ] = 'false'
return attrs
|
def get_default_config ( self ) :
"""Returns the default collector settings"""
|
config = super ( EximCollector , self ) . get_default_config ( )
config . update ( { 'path' : 'exim' , 'bin' : '/usr/sbin/exim' , 'use_sudo' : False , 'sudo_cmd' : '/usr/bin/sudo' , 'sudo_user' : 'root' , } )
return config
|
def _fw_delete ( self , drvr_name , data ) :
"""Firewall Delete routine .
This function calls routines to remove FW from fabric and device .
It also updates its local cache ."""
|
fw_id = data . get ( 'firewall_id' )
tenant_id = self . tenant_db . get_fw_tenant ( fw_id )
if tenant_id not in self . fwid_attr :
LOG . error ( "Invalid tenant id for FW delete %s" , tenant_id )
return
tenant_obj = self . fwid_attr [ tenant_id ]
ret = self . _check_delete_fw ( tenant_id , drvr_name )
if ret :
tenant_obj . delete_fw ( fw_id )
self . tenant_db . del_fw_tenant ( fw_id )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.