signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def addRule ( self , doc , func , _preprocess = True ) :
"""Add a grammar rules to _ self . rules _ , _ self . rule2func _ ,
and _ self . rule2name _
Comments , lines starting with # and blank lines are stripped from
doc . We also allow limited form of * and + when there it is of
the RHS has a single item , e . g .
stmts : : = stmt +""" | fn = func
# remove blanks lines and comment lines , e . g . lines starting with " # "
doc = os . linesep . join ( [ s for s in doc . splitlines ( ) if s and not re . match ( "^\s*#" , s ) ] )
rules = doc . split ( )
index = [ ]
for i in range ( len ( rules ) ) :
if rules [ i ] == '::=' :
index . append ( i - 1 )
index . append ( len ( rules ) )
for i in range ( len ( index ) - 1 ) :
lhs = rules [ index [ i ] ]
rhs = rules [ index [ i ] + 2 : index [ i + 1 ] ]
rule = ( lhs , tuple ( rhs ) )
if _preprocess :
rule , fn = self . preprocess ( rule , func )
# Handle a stripped - down form of * , + , and ? :
# allow only one nonterminal on the right - hand side
if len ( rule [ 1 ] ) == 1 :
if rule [ 1 ] [ 0 ] == rule [ 0 ] :
raise TypeError ( "Complete recursive rule %s" % rule2str ( rule ) )
repeat = rule [ 1 ] [ - 1 ] [ - 1 ]
if repeat in ( '*' , '+' , '?' ) :
nt = rule [ 1 ] [ - 1 ] [ : - 1 ]
if repeat == '?' :
new_rule_pair = [ rule [ 0 ] , list ( ( nt , ) ) ]
self . optional_nt . add ( rule [ 0 ] )
else :
self . list_like_nt . add ( rule [ 0 ] )
new_rule_pair = [ rule [ 0 ] , [ rule [ 0 ] ] + list ( ( nt , ) ) ]
new_rule = rule2str ( new_rule_pair )
self . addRule ( new_rule , func , _preprocess )
if repeat == '+' :
second_rule_pair = ( lhs , ( nt , ) )
else :
second_rule_pair = ( lhs , tuple ( ) )
new_rule = rule2str ( second_rule_pair )
self . addRule ( new_rule , func , _preprocess )
continue
if lhs in self . rules :
if rule in self . rules [ lhs ] :
if 'dups' in self . debug and self . debug [ 'dups' ] :
self . duplicate_rule ( rule )
continue
self . rules [ lhs ] . append ( rule )
else :
self . rules [ lhs ] = [ rule ]
self . rule2func [ rule ] = fn
self . rule2name [ rule ] = func . __name__ [ 2 : ]
self . ruleschanged = True
# Note : In empty rules , i . e . len ( rule [ 1 ] = = 0 , we don ' t
# call reductions on explicitly . Instead it is computed
# implicitly .
if self . profile_info is not None and len ( rule [ 1 ] ) > 0 :
rule_str = self . reduce_string ( rule )
if rule_str not in self . profile_info :
self . profile_info [ rule_str ] = 0
pass
return |
def prior ( self , samples ) :
"""priori distribution
Parameters
samples : list
a collection of sample , it ' s a ( NUM _ OF _ INSTANCE * NUM _ OF _ FUNCTIONS ) matrix ,
representing { { w11 , w12 , . . . , w1k } , { w21 , w22 , . . . w2k } , . . . { wk1 , wk2 , . . . , wkk } }
Returns
float
priori distribution""" | ret = np . ones ( NUM_OF_INSTANCE )
for i in range ( NUM_OF_INSTANCE ) :
for j in range ( self . effective_model_num ) :
if not samples [ i ] [ j ] > 0 :
ret [ i ] = 0
if self . f_comb ( 1 , samples [ i ] ) >= self . f_comb ( self . target_pos , samples [ i ] ) :
ret [ i ] = 0
return ret |
def list_images ( img_dpath_ , ignore_list = [ ] , recursive = False , fullpath = False , full = None , sort = True ) :
r"""Returns a list of images in a directory . By default returns relative paths .
TODO : rename to ls _ images
TODO : Change all instances of fullpath to full
Args :
img _ dpath _ ( str ) :
ignore _ list ( list ) : ( default = [ ] )
recursive ( bool ) : ( default = False )
fullpath ( bool ) : ( default = False )
full ( None ) : ( default = None )
sort ( bool ) : ( default = True )
Returns :
list : gname _ list
CommandLine :
python - m utool . util _ path - - exec - list _ images
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > img _ dpath _ = ' ? '
> > > ignore _ list = [ ]
> > > recursive = False
> > > fullpath = False
> > > full = None
> > > sort = True
> > > gname _ list = list _ images ( img _ dpath _ , ignore _ list , recursive ,
> > > fullpath , full , sort )
> > > result = ( ' gname _ list = % s ' % ( str ( gname _ list ) , ) )
> > > print ( result )""" | # if not QUIET :
# print ( ignore _ list )
if full is not None :
fullpath = fullpath or full
img_dpath_ = util_str . ensure_unicode ( img_dpath_ )
img_dpath = realpath ( img_dpath_ )
ignore_set = set ( ignore_list )
gname_list_ = [ ]
assertpath ( img_dpath )
# Get all the files in a directory recursively
true_imgpath = truepath ( img_dpath )
for root , dlist , flist in os . walk ( true_imgpath ) :
root = util_str . ensure_unicode ( root )
rel_dpath = relpath ( root , img_dpath )
# Ignore directories
if any ( [ dname in ignore_set for dname in dirsplit ( rel_dpath ) ] ) :
continue
for fname in iter ( flist ) :
fname = util_str . ensure_unicode ( fname )
gname = join ( rel_dpath , fname ) . replace ( '\\' , '/' )
if gname . startswith ( './' ) :
gname = gname [ 2 : ]
if fpath_has_imgext ( gname ) : # Ignore Files
if gname in ignore_set :
continue
if fullpath :
gpath = join ( img_dpath , gname )
gname_list_ . append ( gpath )
else :
gname_list_ . append ( gname )
if not recursive :
break
if sort :
gname_list = sorted ( gname_list_ )
return gname_list |
def data_files ( self ) :
"""Returns a python list of all ( sharded ) data subset files .
Returns :
python list of all ( sharded ) data set files .
Raises :
ValueError : if there are not data _ files matching the subset .""" | tf_record_pattern = os . path . join ( FLAGS . data_dir , '%s-*' % self . subset )
data_files = tf . gfile . Glob ( tf_record_pattern )
if not data_files :
print ( 'No files found for dataset %s/%s at %s' % ( self . name , self . subset , FLAGS . data_dir ) )
self . download_message ( )
exit ( - 1 )
return data_files |
def _make_class_unpicklable ( cls ) :
"""Make the given class un - picklable .""" | def _break_on_call_reduce ( self ) :
raise TypeError ( '%r cannot be pickled' % self )
cls . __reduce__ = _break_on_call_reduce
cls . __module__ = '<unknown>' |
def list_permissions ( self , group_name = None , resource = None , url_prefix = None , auth = None , session = None , send_opts = None ) :
"""List the permission sets for the logged in user
Optionally filter by resource or group .
Args :
group _ name ( string ) : Name of group to filter on
resource ( intern . resource . boss . BossResource ) : Identifies which data model object to filter on
url _ prefix ( string ) : Protocol + host such as https : / / api . theboss . io
auth ( string ) : Token to send in the request header .
session ( requests . Session ) : HTTP session to use for request .
send _ opts ( dictionary ) : Additional arguments to pass to session . send ( ) .
Returns :
( list [ dict ] ) : List of dictionaries of permission sets""" | filter_params = { }
if group_name :
filter_params [ "group" ] = group_name
if resource :
filter_params . update ( resource . get_dict_route ( ) )
req = self . get_permission_request ( 'GET' , 'application/json' , url_prefix , auth , query_params = filter_params )
prep = session . prepare_request ( req )
resp = session . send ( prep , ** send_opts )
if resp . status_code != 200 :
msg = "Failed to get permission sets. "
if group_name :
msg = "{} Group: {}" . format ( msg , group_name )
if resource :
msg = "{} Resource: {}" . format ( msg , resource . name )
msg = '{}, got HTTP response: ({}) - {}' . format ( msg , resp . status_code , resp . text )
raise HTTPError ( msg , request = req , response = resp )
else :
return resp . json ( ) [ "permission-sets" ] |
def Overlay_setInspectMode ( self , mode , ** kwargs ) :
"""Function path : Overlay . setInspectMode
Domain : Overlay
Method name : setInspectMode
Parameters :
Required arguments :
' mode ' ( type : InspectMode ) - > Set an inspection mode .
Optional arguments :
' highlightConfig ' ( type : HighlightConfig ) - > A descriptor for the highlight appearance of hovered - over nodes . May be omitted if < code > enabled = = false < / code > .
No return value .
Description : Enters the ' inspect ' mode . In this mode , elements that user is hovering over are highlighted . Backend then generates ' inspectNodeRequested ' event upon element selection .""" | expected = [ 'highlightConfig' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['highlightConfig']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'Overlay.setInspectMode' , mode = mode , ** kwargs )
return subdom_funcs |
def read_blocking ( self ) :
"""Same as read , except blocks untill data is available to be read .""" | while True :
data = self . _read ( )
if data != None :
break
return self . _parse_message ( data ) |
def update_frame ( self , key , ranges = None , plot = None , element = None ) :
"""Updates an existing plot with data corresponding
to the key .""" | reused = isinstance ( self . hmap , DynamicMap ) and ( self . overlaid or self . batched )
if not reused and element is None :
element = self . _get_frame ( key )
elif element is not None :
self . current_key = key
self . current_frame = element
renderer = self . handles . get ( 'glyph_renderer' , None )
glyph = self . handles . get ( 'glyph' , None )
visible = element is not None
if hasattr ( renderer , 'visible' ) :
renderer . visible = visible
if hasattr ( glyph , 'visible' ) :
glyph . visible = visible
if ( ( self . batched and not element ) or element is None or ( not self . dynamic and self . static ) or ( self . streaming and self . streaming [ 0 ] . data is self . current_frame . data and not self . streaming [ 0 ] . _triggering ) ) :
return
if self . batched :
style_element = element . last
max_cycles = None
else :
style_element = element
max_cycles = self . style . _max_cycles
style = self . lookup_options ( style_element , 'style' )
self . style = style . max_cycles ( max_cycles ) if max_cycles else style
ranges = self . compute_ranges ( self . hmap , key , ranges )
self . param . set_param ( ** self . lookup_options ( style_element , 'plot' ) . options )
ranges = util . match_spec ( style_element , ranges )
self . current_ranges = ranges
plot = self . handles [ 'plot' ]
if not self . overlaid :
self . _update_ranges ( style_element , ranges )
self . _update_plot ( key , plot , style_element )
self . _set_active_tools ( plot )
if 'hover' in self . handles and 'hv_created' in self . handles [ 'hover' ] . tags :
self . _update_hover ( element )
self . _update_glyphs ( element , ranges , self . style [ self . cyclic_index ] )
self . _execute_hooks ( element ) |
def step_note_that ( context , remark ) :
"""Used as generic step that provides an additional remark / hint
and enhance the readability / understanding without performing any check .
. . code - block : : gherkin
Given that today is " April 1st "
But note that " April 1st is Fools day ( and beware ) " """ | log = getattr ( context , "log" , None )
if log :
log . info ( u"NOTE: %s;" % remark ) |
def wait_for_responses ( self , client ) :
"""Waits for all responses to come back and resolves the
eventual results .""" | assert_open ( self )
if self . has_pending_requests :
raise RuntimeError ( 'Cannot wait for responses if there are ' 'pending requests outstanding. You need ' 'to wait for pending requests to be sent ' 'first.' )
pending = self . pending_responses
self . pending_responses = [ ]
for command_name , options , promise in pending :
value = client . parse_response ( self . connection , command_name , ** options )
promise . resolve ( value ) |
def WindowsSdkVersion ( self ) :
"""Microsoft Windows SDK versions for specified MSVC + + version .""" | if self . vc_ver <= 9.0 :
return ( '7.0' , '6.1' , '6.0a' )
elif self . vc_ver == 10.0 :
return ( '7.1' , '7.0a' )
elif self . vc_ver == 11.0 :
return ( '8.0' , '8.0a' )
elif self . vc_ver == 12.0 :
return ( '8.1' , '8.1a' )
elif self . vc_ver >= 14.0 :
return ( '10.0' , '8.1' ) |
def qos_map_cos_traffic_class_cos4 ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
qos = ET . SubElement ( config , "qos" , xmlns = "urn:brocade.com:mgmt:brocade-qos" )
map = ET . SubElement ( qos , "map" )
cos_traffic_class = ET . SubElement ( map , "cos-traffic-class" )
name_key = ET . SubElement ( cos_traffic_class , "name" )
name_key . text = kwargs . pop ( 'name' )
cos4 = ET . SubElement ( cos_traffic_class , "cos4" )
cos4 . text = kwargs . pop ( 'cos4' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _schedule_pending_unlocked ( self , state ) :
"""Consider the pending transfers for a stream , pumping new chunks while
the unacknowledged byte count is below : attr : ` window _ size _ bytes ` . Must
be called with the FileStreamState lock held .
: param FileStreamState state :
Stream to schedule chunks for .""" | while state . jobs and state . unacked < self . window_size_bytes :
sender , fp = state . jobs [ 0 ]
s = fp . read ( self . IO_SIZE )
if s :
state . unacked += len ( s )
sender . send ( mitogen . core . Blob ( s ) )
else : # File is done . Cause the target ' s receive loop to exit by
# closing the sender , close the file , and remove the job entry .
sender . close ( )
fp . close ( )
state . jobs . pop ( 0 ) |
def search_url ( self , searchterm ) :
"""Search for URLs
: type searchterm : str
: rtype : list""" | return self . __search ( type_attribute = self . __mispurltypes ( ) , value = searchterm ) |
def dots_to_empty_cells ( config , tsv_fpath ) :
"""Put dots instead of empty cells in order to view TSV with column - t""" | def proc_line ( l , i ) :
while '\t\t' in l :
l = l . replace ( '\t\t' , '\t.\t' )
return l
return iterate_file ( config , tsv_fpath , proc_line , suffix = 'dots' ) |
def deprecate_and_include ( parser , token ) :
"""Raises a deprecation warning about using the first argument . The
remaining arguments are passed to an ` ` { % include % } ` ` tag . Usage : :
{ % deprecate _ and _ include " old _ template . html " " new _ template . html " % }
In order to avoid re - implementing { % include % } so as to resolve variables ,
this tag currently only works with literal template path strings .""" | split_contents = token . split_contents ( )
current_template = split_contents [ 1 ]
new_template = split_contents [ 2 ]
if settings . DEBUG :
warnings . simplefilter ( 'always' , DeprecationWarning )
warnings . warn ( "The %s template is deprecated; Use %s instead." % ( current_template , new_template ) , DeprecationWarning , stacklevel = 2 )
new_contents = [ split_contents [ 0 ] ] + split_contents [ 2 : ]
include_token = Token ( token . token_type , " " . join ( new_contents ) )
return do_include ( parser , include_token ) |
def exchange_partition ( self , partitionSpecs , source_db , source_table_name , dest_db , dest_table_name ) :
"""Parameters :
- partitionSpecs
- source _ db
- source _ table _ name
- dest _ db
- dest _ table _ name""" | self . send_exchange_partition ( partitionSpecs , source_db , source_table_name , dest_db , dest_table_name )
return self . recv_exchange_partition ( ) |
def phi_from_spinx_spiny ( spinx , spiny ) :
"""Returns the angle between the x - component axis and the in - plane spin .""" | phi = numpy . arctan2 ( spiny , spinx )
return phi % ( 2 * numpy . pi ) |
def list_cron_job_for_all_namespaces ( self , ** kwargs ) :
"""list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ cron _ job _ for _ all _ namespaces ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V2alpha1CronJobList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_cron_job_for_all_namespaces_with_http_info ( ** kwargs )
else :
( data ) = self . list_cron_job_for_all_namespaces_with_http_info ( ** kwargs )
return data |
def _remove_duplicates ( self ) :
"""Remove every maximal rectangle contained by another one .""" | contained = set ( )
for m1 , m2 in itertools . combinations ( self . _max_rects , 2 ) :
if m1 . contains ( m2 ) :
contained . add ( m2 )
elif m2 . contains ( m1 ) :
contained . add ( m1 )
# Remove from max _ rects
self . _max_rects = [ m for m in self . _max_rects if m not in contained ] |
def transform ( self , X ) :
"""Return this basis applied to X .
Parameters
X : ndarray
of shape ( N , d ) of observations where N is the number of samples ,
and d is the dimensionality of X .
Returns
ndarray :
of shape ( N , d + 1 ) , or ( N , d ) depending on onescol .""" | N , D = X . shape
return np . hstack ( ( np . ones ( ( N , 1 ) ) , X ) ) if self . onescol else X |
def project ( self , projected_meta = None , new_attr_dict = None , all_but_meta = None , projected_regs = None , new_field_dict = None , all_but_regs = None ) :
"""* Wrapper of * ` ` PROJECT ` `
The PROJECT operator creates , from an existing dataset , a new dataset with all the samples
( with their regions and region values ) in the input one , but keeping for each sample in the input
dataset only those metadata and / or region attributes expressed in the operator parameter list .
Region coordinates and values of the remaining metadata and region attributes remain equal
to those in the input dataset . Differently from the SELECT operator , PROJECT allows to :
* Remove existing metadata and / or region attributes from a dataset ;
* Create new metadata and / or region attributes to be added to the result .
: param projected _ meta : list of metadata attributes to project on
: param new _ attr _ dict : an optional dictionary of the form { ' new _ meta _ 1 ' : function1,
' new _ meta _ 2 ' : function2 , . . . } in which every function computes
the new metadata attribute based on the values of the others
: param all _ but _ meta : list of metadata attributes that must be excluded from the projection
: param projected _ regs : list of the region fields to select
: param new _ field _ dict : an optional dictionary of the form { ' new _ field _ 1 ' :
function1 , ' new _ field _ 2 ' : function2 , . . . } in which every function
computes the new region field based on the values of the others
: param all _ but _ regs : list of region fields that must be excluded from the projection
: return : a new GMQLDataset""" | projected_meta_exists = False
if isinstance ( projected_meta , list ) and all ( [ isinstance ( x , str ) for x in projected_meta ] ) :
projected_meta = Some ( projected_meta )
projected_meta_exists = True
elif projected_meta is None :
projected_meta = none ( )
else :
raise TypeError ( "projected_meta must be a list of strings or None." " {} was provided" . format ( type ( projected_meta ) ) )
if isinstance ( new_attr_dict , dict ) :
meta_ext = [ ]
expBuild = self . pmg . getNewExpressionBuilder ( self . __index )
for k in new_attr_dict . keys ( ) :
item = new_attr_dict [ k ]
if isinstance ( k , str ) :
if isinstance ( item , MetaField ) :
me = expBuild . createMetaExtension ( k , item . getMetaExpression ( ) )
elif isinstance ( item , int ) :
me = expBuild . createMetaExtension ( k , expBuild . getMEType ( "int" , str ( item ) ) )
elif isinstance ( item , str ) :
me = expBuild . createMetaExtension ( k , expBuild . getMEType ( "string" , item ) )
elif isinstance ( item , float ) :
me = expBuild . createMetaExtension ( k , expBuild . getMEType ( "float" , str ( item ) ) )
else :
raise TypeError ( "Type {} of item of new_attr_dict is not valid" . format ( type ( item ) ) )
meta_ext . append ( me )
else :
raise TypeError ( "The key of new_attr_dict must be a string. " "{} was provided" . format ( type ( k ) ) )
meta_ext = Some ( meta_ext )
elif new_attr_dict is None :
meta_ext = none ( )
else :
raise TypeError ( "new_attr_dict must be a dictionary." " {} was provided" . format ( type ( new_attr_dict ) ) )
if isinstance ( all_but_meta , list ) and all ( [ isinstance ( x , str ) for x in all_but_meta ] ) :
if not projected_meta_exists :
all_but_meta = Some ( all_but_meta )
all_but_value = True
else :
raise ValueError ( "all_but_meta and projected_meta are mutually exclusive" )
elif all_but_meta is None :
all_but_meta = none ( )
all_but_value = False
else :
raise TypeError ( "all_but_meta must be a list of strings." " {} was provided" . format ( type ( all_but_meta ) ) )
projected_meta = all_but_meta if all_but_value else projected_meta
projected_regs_exists = False
if isinstance ( projected_regs , list ) and all ( [ isinstance ( x , str ) for x in projected_regs ] ) :
projected_regs = Some ( projected_regs )
projected_regs_exists = True
elif projected_regs is None :
projected_regs = none ( )
else :
raise TypeError ( "projected_regs must be a list of strings or None." " {} was provided" . format ( type ( projected_regs ) ) )
if isinstance ( new_field_dict , dict ) :
regs_ext = [ ]
expBuild = self . pmg . getNewExpressionBuilder ( self . __index )
for k in new_field_dict . keys ( ) :
item = new_field_dict [ k ]
if isinstance ( k , str ) :
if isinstance ( item , RegField ) :
re = expBuild . createRegionExtension ( k , item . getRegionExpression ( ) )
elif isinstance ( item , MetaField ) :
re = expBuild . createRegionExtension ( k , item . reMetaNode )
elif isinstance ( item , int ) :
re = expBuild . createRegionExtension ( k , expBuild . getREType ( "float" , str ( item ) ) )
elif isinstance ( item , str ) :
re = expBuild . createRegionExtension ( k , expBuild . getREType ( "string" , item ) )
elif isinstance ( item , float ) :
re = expBuild . createRegionExtension ( k , expBuild . getREType ( "float" , str ( item ) ) )
else :
raise TypeError ( "Type {} of item of new_field_dict is not valid" . format ( type ( item ) ) )
regs_ext . append ( re )
else :
raise TypeError ( "The key of new_field_dict must be a string. " "{} was provided" . format ( type ( k ) ) )
regs_ext = Some ( regs_ext )
elif new_field_dict is None :
regs_ext = none ( )
else :
raise TypeError ( "new_field_dict must be a dictionary." " {} was provided" . format ( type ( new_field_dict ) ) )
if isinstance ( all_but_regs , list ) and all ( [ isinstance ( x , str ) for x in all_but_regs ] ) :
if not projected_regs_exists :
all_but_regs = Some ( all_but_regs )
else :
raise ValueError ( "all_but_meta and projected_meta are mutually exclusive" )
elif all_but_regs is None :
all_but_regs = none ( )
else :
raise TypeError ( "all_but_regs must be a list of strings." " {} was provided" . format ( type ( all_but_regs ) ) )
new_index = self . opmng . project ( self . __index , projected_meta , meta_ext , all_but_value , projected_regs , all_but_regs , regs_ext )
return GMQLDataset ( index = new_index , location = self . location , local_sources = self . _local_sources , remote_sources = self . _remote_sources , meta_profile = self . meta_profile ) |
def cmd ( self , endpoints , cmd ) :
"""endpoints is [ ( host1 , port1 ) , ( host2 , port ) , . . . ]""" | replies = [ ]
for ep in endpoints :
try :
replies . append ( self . _cmd ( ep , cmd ) )
except self . CmdFailed as ex : # if there ' s only 1 endpoint , give up .
# if there ' s more , keep trying .
if len ( endpoints ) == 1 :
raise ex
return "" . join ( replies ) |
def insert_from_segwizard ( self , fileobj , instruments , name , version = None , comment = None ) :
"""Parse the contents of the file object fileobj as a
segwizard - format segment list , and insert the result as a
new list of " active " segments into this LigolwSegments
object . A new entry will be created in the segment _ definer
table for the segment list , and instruments , name and
comment are used to populate the entry ' s metadata . Note
that the " valid " segments are left empty , nominally
indicating that there are no periods of validity .""" | self . add ( LigolwSegmentList ( active = segmentsUtils . fromsegwizard ( fileobj , coltype = LIGOTimeGPS ) , instruments = instruments , name = name , version = version , comment = comment ) ) |
def move ( self , i , lat , lng , change_time = True ) :
'''move a rally point''' | if i < 1 or i > self . rally_count ( ) :
print ( "Invalid rally point number %u" % i )
self . rally_points [ i - 1 ] . lat = int ( lat * 1e7 )
self . rally_points [ i - 1 ] . lng = int ( lng * 1e7 )
if change_time :
self . last_change = time . time ( ) |
def help ( self , print_output = True ) :
"""Calls the help RPC , which returns the list of RPC calls available .
This RPC should normally be used in an interactive console environment
where the output should be printed instead of returned . Otherwise ,
newlines will be escaped , which will make the output difficult to read .
Args :
print _ output : A bool for whether the output should be printed .
Returns :
A str containing the help output otherwise None if print _ output
wasn ' t set .""" | help_text = self . _rpc ( 'help' )
if print_output :
print ( help_text )
else :
return help_text |
def detect_ts ( df , max_anoms = 0.10 , direction = 'pos' , alpha = 0.05 , only_last = None , threshold = None , e_value = False , longterm = False , piecewise_median_period_weeks = 2 , plot = False , y_log = False , xlabel = '' , ylabel = 'count' , title = None , verbose = False ) :
"""Anomaly Detection Using Seasonal Hybrid ESD Test
A technique for detecting anomalies in seasonal univariate time series where the input is a
series of < timestamp , value > pairs .
Args :
x : Time series as a two column data frame where the first column consists of the
timestamps and the second column consists of the observations .
max _ anoms : Maximum number of anomalies that S - H - ESD will detect as a percentage of the
data .
direction : Directionality of the anomalies to be detected . Options are : ( ' pos ' | ' neg ' | ' both ' ) .
alpha : The level of statistical significance with which to accept or reject anomalies .
only _ last : Find and report anomalies only within the last day or hr in the time series . Options : ( None | ' day ' | ' hr ' )
threshold : Only report positive going anoms above the threshold specified . Options are : ( None | ' med _ max ' | ' p95 ' | ' p99 ' )
e _ value : Add an additional column to the anoms output containing the expected value .
longterm : Increase anom detection efficacy for time series that are greater than a month .
See Details below .
piecewise _ median _ period _ weeks : The piecewise median time window as described in Vallis , Hochenbaum , and Kejariwal ( 2014 ) . Defaults to 2.
plot : ( Currently unsupported ) A flag indicating if a plot with both the time series and the estimated anoms ,
indicated by circles , should also be returned .
y _ log : Apply log scaling to the y - axis . This helps with viewing plots that have extremely
large positive anomalies relative to the rest of the data .
xlabel : X - axis label to be added to the output plot .
ylabel : Y - axis label to be added to the output plot .
Details
' longterm ' This option should be set when the input time series is longer than a month .
The option enables the approach described in Vallis , Hochenbaum , and Kejariwal ( 2014 ) .
' threshold ' Filter all negative anomalies and those anomalies whose magnitude is smaller
than one of the specified thresholds which include : the median
of the daily max values ( med _ max ) , the 95th percentile of the daily max values ( p95 ) , and the
99th percentile of the daily max values ( p99 ) .
' title ' Title for the output plot .
' verbose ' Enable debug messages
The returned value is a dictionary with the following components :
anoms : Data frame containing timestamps , values , and optionally expected values .
plot : A graphical object if plotting was requested by the user . The plot contains
the estimated anomalies annotated on the input time series""" | if not isinstance ( df , DataFrame ) :
raise ValueError ( "data must be a single data frame." )
else :
if len ( df . columns ) != 2 or not df . iloc [ : , 1 ] . map ( np . isreal ) . all ( ) :
raise ValueError ( ( "data must be a 2 column data.frame, with the" "first column being a set of timestamps, and " "the second coloumn being numeric values." ) )
if ( not ( df . dtypes [ 0 ] . type is np . datetime64 ) and not ( df . dtypes [ 0 ] . type is np . int64 ) ) :
df = format_timestamp ( df )
if list ( df . columns . values ) != [ "timestamp" , "value" ] :
df . columns = [ "timestamp" , "value" ]
# Sanity check all input parameters
if max_anoms > 0.49 :
length = len ( df . value )
raise ValueError ( ( "max_anoms must be less than 50% of " "the data points (max_anoms =%f data_points =%s)." ) % ( round ( max_anoms * length , 0 ) , length ) )
if not direction in [ 'pos' , 'neg' , 'both' ] :
raise ValueError ( "direction options are: pos | neg | both." )
if not ( 0.01 <= alpha or alpha <= 0.1 ) :
if verbose :
import warnings
warnings . warn ( ( "alpha is the statistical signifigance, " "and is usually between 0.01 and 0.1" ) )
if only_last and not only_last in [ 'day' , 'hr' ] :
raise ValueError ( "only_last must be either 'day' or 'hr'" )
if not threshold in [ None , 'med_max' , 'p95' , 'p99' ] :
raise ValueError ( "threshold options are: None | med_max | p95 | p99" )
if not isinstance ( e_value , bool ) :
raise ValueError ( "e_value must be a boolean" )
if not isinstance ( longterm , bool ) :
raise ValueError ( "longterm must be a boolean" )
if piecewise_median_period_weeks < 2 :
raise ValueError ( "piecewise_median_period_weeks must be at greater than 2 weeks" )
if not isinstance ( plot , bool ) :
raise ValueError ( "plot must be a boolean" )
if not isinstance ( y_log , bool ) :
raise ValueError ( "y_log must be a boolean" )
if not isinstance ( xlabel , string_types ) :
raise ValueError ( "xlabel must be a string" )
if not isinstance ( ylabel , string_types ) :
raise ValueError ( "ylabel must be a string" )
if title and not isinstance ( title , string_types ) :
raise ValueError ( "title must be a string" )
if not title :
title = ''
else :
title = title + " : "
gran = get_gran ( df )
if gran == "day" :
num_days_per_line = 7
if isinstance ( only_last , string_types ) and only_last == 'hr' :
only_last = 'day'
else :
num_days_per_line = 1
if gran == 'sec' :
df . timestamp = date_format ( df . timestamp , "%Y-%m-%d %H:%M:00" )
df = format_timestamp ( df . groupby ( 'timestamp' ) . aggregate ( np . sum ) )
# if the data is daily , then we need to bump
# the period to weekly to get multiple examples
gran_period = { 'min' : 1440 , 'hr' : 24 , 'day' : 7 }
period = gran_period . get ( gran )
if not period :
raise ValueError ( '%s granularity detected. This is currently not supported.' % gran )
num_obs = len ( df . value )
clamp = ( 1 / float ( num_obs ) )
if max_anoms < clamp :
max_anoms = clamp
if longterm :
if gran == "day" :
num_obs_in_period = period * piecewise_median_period_weeks + 1
num_days_in_period = 7 * piecewise_median_period_weeks + 1
else :
num_obs_in_period = period * 7 * piecewise_median_period_weeks
num_days_in_period = 7 * piecewise_median_period_weeks
last_date = df . timestamp . iloc [ - 1 ]
all_data = [ ]
for j in range ( 0 , len ( df . timestamp ) , num_obs_in_period ) :
start_date = df . timestamp . iloc [ j ]
end_date = min ( start_date + datetime . timedelta ( days = num_days_in_period ) , df . timestamp . iloc [ - 1 ] )
# if there is at least 14 days left , subset it ,
# otherwise subset last _ date - 14days
if ( end_date - start_date ) . days == num_days_in_period :
sub_df = df [ ( df . timestamp >= start_date ) & ( df . timestamp < end_date ) ]
else :
sub_df = df [ ( df . timestamp > ( last_date - datetime . timedelta ( days = num_days_in_period ) ) ) & ( df . timestamp <= last_date ) ]
all_data . append ( sub_df )
else :
all_data = [ df ]
all_anoms = DataFrame ( columns = [ 'timestamp' , 'value' ] )
seasonal_plus_trend = DataFrame ( columns = [ 'timestamp' , 'value' ] )
# Detect anomalies on all data ( either entire data in one - pass ,
# or in 2 week blocks if longterm = TRUE )
for i in range ( len ( all_data ) ) :
directions = { 'pos' : Direction ( True , True ) , 'neg' : Direction ( True , False ) , 'both' : Direction ( False , True ) }
anomaly_direction = directions [ direction ]
# detect _ anoms actually performs the anomaly detection and
# returns the results in a list containing the anomalies
# as well as the decomposed components of the time series
# for further analysis .
s_h_esd_timestamps = detect_anoms ( all_data [ i ] , k = max_anoms , alpha = alpha , num_obs_per_period = period , use_decomp = True , one_tail = anomaly_direction . one_tail , upper_tail = anomaly_direction . upper_tail , verbose = verbose )
# store decomposed components in local variable and overwrite
# s _ h _ esd _ timestamps to contain only the anom timestamps
data_decomp = s_h_esd_timestamps [ 'stl' ]
s_h_esd_timestamps = s_h_esd_timestamps [ 'anoms' ]
# - - Step 3 : Use detected anomaly timestamps to extract the actual
# anomalies ( timestamp and value ) from the data
if s_h_esd_timestamps :
anoms = all_data [ i ] [ all_data [ i ] . timestamp . isin ( s_h_esd_timestamps ) ]
else :
anoms = DataFrame ( columns = [ 'timestamp' , 'value' ] )
# Filter the anomalies using one of the thresholding functions if applicable
if threshold : # Calculate daily max values
periodic_maxes = df . groupby ( df . timestamp . map ( Timestamp . date ) ) . aggregate ( np . max ) . value
# Calculate the threshold set by the user
if threshold == 'med_max' :
thresh = periodic_maxes . median ( )
elif threshold == 'p95' :
thresh = periodic_maxes . quantile ( .95 )
elif threshold == 'p99' :
thresh = periodic_maxes . quantile ( .99 )
# Remove any anoms below the threshold
anoms = anoms [ anoms . value >= thresh ]
all_anoms = all_anoms . append ( anoms )
seasonal_plus_trend = seasonal_plus_trend . append ( data_decomp )
# Cleanup potential duplicates
try :
all_anoms . drop_duplicates ( subset = [ 'timestamp' ] , inplace = True )
seasonal_plus_trend . drop_duplicates ( subset = [ 'timestamp' ] , inplace = True )
except TypeError :
all_anoms . drop_duplicates ( cols = [ 'timestamp' ] , inplace = True )
seasonal_plus_trend . drop_duplicates ( cols = [ 'timestamp' ] , inplace = True )
# - - If only _ last was set by the user ,
# create subset of the data that represent the most recent day
if only_last :
start_date = df . timestamp . iloc [ - 1 ] - datetime . timedelta ( days = 7 )
start_anoms = df . timestamp . iloc [ - 1 ] - datetime . timedelta ( days = 1 )
if gran is "day" :
breaks = 3 * 12
num_days_per_line = 7
else :
if only_last == 'day' :
breaks = 12
else :
start_date = df . timestamp . iloc [ - 1 ] - datetime . timedelta ( days = 2 )
# truncate to days
start_date = datetime . date ( start_date . year , start_date . month , start_date . day )
start_anoms = ( df . timestamp . iloc [ - 1 ] - datetime . timedelta ( hours = 1 ) )
breaks = 3
# subset the last days worth of data
x_subset_single_day = df [ df . timestamp > start_anoms ]
# When plotting anoms for the last day only
# we only show the previous weeks data
x_subset_week = df [ ( df . timestamp <= start_anoms ) & ( df . timestamp > start_date ) ]
if len ( all_anoms ) > 0 :
all_anoms = all_anoms [ all_anoms . timestamp >= x_subset_single_day . timestamp . iloc [ 0 ] ]
num_obs = len ( x_subset_single_day . value )
# Calculate number of anomalies as a percentage
anom_pct = ( len ( df . value ) / float ( num_obs ) ) * 100
if anom_pct == 0 :
return { "anoms" : None , "plot" : None }
# The original R implementation handles plotting here .
# Plotting is currently not implemented in this version .
# if plot :
# plot _ something ( )
all_anoms . index = all_anoms . timestamp
if e_value :
d = { 'timestamp' : all_anoms . timestamp , 'anoms' : all_anoms . value , 'expected_value' : seasonal_plus_trend [ seasonal_plus_trend . timestamp . isin ( all_anoms . timestamp ) ] . value }
else :
d = { 'timestamp' : all_anoms . timestamp , 'anoms' : all_anoms . value }
anoms = DataFrame ( d , index = d [ 'timestamp' ] . index )
return { 'anoms' : anoms , 'plot' : None } |
def get_trees ( self , data , showerrors = False ) : # - > list :
"""returns a list of trees with valid guesses""" | if not all ( check ( self . _productionset . alphabet , [ x ] ) for x in data ) :
raise ValueError ( "Unknown element in {}, alphabet:{}" . format ( str ( data ) , self . productionset . alphabet ) )
result = self . __recursive_parser ( self . _productionset . initialsymbol , data , self . _productionset . main_production , showerrors )
finalresult = [ ]
for eresult in result :
if eresult . left == 0 and eresult . right == len ( data ) and eresult not in finalresult :
finalresult . append ( eresult )
return finalresult |
def _getGlobals ( self , ** kwargs ) :
"""Return the globals dictionary for the formula calculation""" | # Default globals
globs = { "__builtins__" : None , "all" : all , "any" : any , "bool" : bool , "chr" : chr , "cmp" : cmp , "complex" : complex , "divmod" : divmod , "enumerate" : enumerate , "float" : float , "format" : format , "frozenset" : frozenset , "hex" : hex , "int" : int , "len" : len , "list" : list , "long" : long , "math" : math , "max" : max , "min" : min , "oct" : oct , "ord" : ord , "pow" : pow , "range" : range , "reversed" : reversed , "round" : round , "str" : str , "sum" : sum , "tuple" : tuple , "xrange" : xrange , }
# Update with keyword arguments
globs . update ( kwargs )
# Update with additional Python libraries
for imp in self . getPythonImports ( ) :
mod = imp [ "module" ]
func = imp [ "function" ]
member = self . _getModuleMember ( mod , func )
if member is None :
raise ImportError ( "Could not find member {} of module {}" . format ( func , mod ) )
globs [ func ] = member
return globs |
def rand_email ( ) :
"""Random email .
Usage Example : :
> > > rand _ email ( )
Z4Lljcbdw7m @ npa . net""" | name = random . choice ( string . ascii_letters ) + rand_str ( string . ascii_letters + string . digits , random . randint ( 4 , 14 ) )
domain = rand_str ( string . ascii_lowercase , random . randint ( 2 , 10 ) )
kind = random . choice ( _all_email_kinds )
return "%s@%s%s" % ( name , domain , kind ) |
def delete_collection_node ( self , ** kwargs ) : # noqa : E501
"""delete _ collection _ node # noqa : E501
delete collection of Node # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ node ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_node_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . delete_collection_node_with_http_info ( ** kwargs )
# noqa : E501
return data |
def getMapScale ( self , latitude , level , dpi = 96 ) :
'''returns the map scale on the dpi of the screen''' | dpm = dpi / 0.0254
# convert to dots per meter
return self . getGroundResolution ( latitude , level ) * dpm |
def request_system_arm ( blink , network ) :
"""Arm system .
: param blink : Blink instance .
: param network : Sync module network id .""" | url = "{}/network/{}/arm" . format ( blink . urls . base_url , network )
return http_post ( blink , url ) |
def close ( self ) :
"""Flush data , write 28 bytes BGZF EOF marker , and close BGZF file .
samtools will look for a magic EOF marker , just a 28 byte empty BGZF
block , and if it is missing warns the BAM file may be truncated . In
addition to samtools writing this block , so too does bgzip - so this
implementation does too .""" | if self . _buffer :
self . flush ( )
self . _handle . write ( _bgzf_eof )
self . _handle . flush ( )
self . _handle . close ( ) |
def __display_top ( self , stat_display , stats ) :
"""Display the second line in the Curses interface .
< QUICKLOOK > + CPU | PERCPU + < GPU > + MEM + SWAP + LOAD""" | self . init_column ( )
self . new_line ( )
# Init quicklook
stat_display [ 'quicklook' ] = { 'msgdict' : [ ] }
# Dict for plugins width
plugin_widths = { }
for p in self . _top :
plugin_widths [ p ] = self . get_stats_display_width ( stat_display . get ( p , 0 ) ) if hasattr ( self . args , 'disable_' + p ) else 0
# Width of all plugins
stats_width = sum ( itervalues ( plugin_widths ) )
# Number of plugin but quicklook
stats_number = sum ( [ int ( stat_display [ p ] [ 'msgdict' ] != [ ] ) for p in self . _top if not getattr ( self . args , 'disable_' + p ) ] )
if not self . args . disable_quicklook : # Quick look is in the place !
if self . args . full_quicklook :
quicklook_width = self . screen . getmaxyx ( ) [ 1 ] - ( stats_width + 8 + stats_number * self . space_between_column )
else :
quicklook_width = min ( self . screen . getmaxyx ( ) [ 1 ] - ( stats_width + 8 + stats_number * self . space_between_column ) , self . _quicklook_max_width - 5 )
try :
stat_display [ "quicklook" ] = stats . get_plugin ( 'quicklook' ) . get_stats_display ( max_width = quicklook_width , args = self . args )
except AttributeError as e :
logger . debug ( "Quicklook plugin not available (%s)" % e )
else :
plugin_widths [ 'quicklook' ] = self . get_stats_display_width ( stat_display [ "quicklook" ] )
stats_width = sum ( itervalues ( plugin_widths ) ) + 1
self . space_between_column = 1
self . display_plugin ( stat_display [ "quicklook" ] )
self . new_column ( )
# Compute spaces between plugins
# Note : Only one space between Quicklook and others
plugin_display_optional = { }
for p in self . _top :
plugin_display_optional [ p ] = True
if stats_number > 1 :
self . space_between_column = max ( 1 , int ( ( self . screen . getmaxyx ( ) [ 1 ] - stats_width ) / ( stats_number - 1 ) ) )
for p in [ 'mem' , 'cpu' ] : # No space ? Remove optional stats
if self . space_between_column < 3 :
plugin_display_optional [ p ] = False
plugin_widths [ p ] = self . get_stats_display_width ( stat_display [ p ] , without_option = True ) if hasattr ( self . args , 'disable_' + p ) else 0
stats_width = sum ( itervalues ( plugin_widths ) ) + 1
self . space_between_column = max ( 1 , int ( ( self . screen . getmaxyx ( ) [ 1 ] - stats_width ) / ( stats_number - 1 ) ) )
else :
self . space_between_column = 0
# Display CPU , MEM , SWAP and LOAD
for p in self . _top :
if p == 'quicklook' :
continue
if p in stat_display :
self . display_plugin ( stat_display [ p ] , display_optional = plugin_display_optional [ p ] )
if p is not 'load' : # Skip last column
self . new_column ( )
# Space between column
self . space_between_column = 3
# Backup line position
self . saved_line = self . next_line |
def signedDistance ( actor , maxradius = 0.5 , bounds = ( 0 , 1 , 0 , 1 , 0 , 1 ) , dims = ( 10 , 10 , 10 ) ) :
"""` ` vtkSignedDistance ` ` filter .
: param float maxradius : how far out to propagate distance calculation
: param list bounds : volume bounds .""" | dist = vtk . vtkSignedDistance ( )
dist . SetInputData ( actor . polydata ( True ) )
dist . SetRadius ( maxradius )
dist . SetBounds ( bounds )
dist . SetDimensions ( dims )
dist . Update ( )
return Volume ( dist . GetOutput ( ) ) |
def determine_intent ( self , utterance , num_results = 1 , include_tags = False , context_manager = None ) :
"""Given an utterance , provide a valid intent .
Args :
utterance ( str ) : an ascii or unicode string representing natural language speech
include _ tags ( list ) : includes the parsed tags ( including position and confidence )
as part of result
context _ manager ( list ) : a context manager to provide context to the utterance
num _ results ( int ) : a maximum number of results to be returned .
Returns : A generator that yields dictionaries .""" | parser = Parser ( self . tokenizer , self . tagger )
parser . on ( 'tagged_entities' , ( lambda result : self . emit ( "tagged_entities" , result ) ) )
context = [ ]
if context_manager :
context = context_manager . get_context ( )
for result in parser . parse ( utterance , N = num_results , context = context ) :
self . emit ( "parse_result" , result )
# create a context without entities used in result
remaining_context = self . __get_unused_context ( result , context )
best_intent , tags = self . __best_intent ( result , remaining_context )
if best_intent and best_intent . get ( 'confidence' , 0.0 ) > 0 :
if include_tags :
best_intent [ '__tags__' ] = tags
yield best_intent |
def checkMgtKeyInUse ( self , CorpNum , MgtKeyType , MgtKey ) :
"""파트너 관리번호 사용중 여부 확인 .
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of [ ' SELL ' , ' BUY ' , ' TRUSTEE ' ]
MgtKey : 파트너 관리번호
return
사용중 여부 by True / False
raise
PopbillException""" | if MgtKeyType not in self . __MgtKeyTypes :
raise PopbillException ( - 99999999 , "관리번호 형태가 올바르지 않습니다." )
if MgtKey == None or MgtKey == "" :
raise PopbillException ( - 99999999 , "관리번호가 입력되지 않았습니다." )
try :
result = self . _httpget ( '/Taxinvoice/' + MgtKeyType + "/" + MgtKey , CorpNum )
return result . itemKey != None and result . itemKey != ""
except PopbillException as PE :
if PE . code == - 11000005 :
return False
raise PE |
def get_version ( ) :
"returns the libdbus library version as a tuple of integers ( major , minor , micro ) ." | major = ct . c_int ( )
minor = ct . c_int ( )
micro = ct . c_int ( )
dbus . dbus_get_version ( ct . byref ( major ) , ct . byref ( minor ) , ct . byref ( micro ) )
return ( major . value , minor . value , micro . value ) |
def _required_attr ( self , attr , key ) :
"""Wrapper for getting required attributes .""" | assert isinstance ( attr , dict )
if key not in attr :
raise AttributeError ( "Required attribute {} not found." . format ( key ) )
return attr [ key ] |
def default_partfactory ( part_number = None , content_length = None , content_type = None , content_md5 = None ) :
"""Get default part factory .
: param part _ number : The part number . ( Default : ` ` None ` ` )
: param content _ length : The content length . ( Default : ` ` None ` ` )
: param content _ type : The HTTP Content - Type . ( Default : ` ` None ` ` )
: param content _ md5 : The content MD5 . ( Default : ` ` None ` ` )
: returns : The content length , the part number , the stream , the content
type , MD5 of the content .""" | return content_length , part_number , request . stream , content_type , content_md5 , None |
def outputjson ( self , obj ) :
"""Serialize ` obj ` with JSON and output to the client""" | self . header ( 'Content-Type' , 'application/json' )
self . outputdata ( json . dumps ( obj ) . encode ( 'ascii' ) ) |
def make_node_dict ( outer_list , sort = "zone" ) :
"""Convert node data from nested - list to sorted dict .""" | raw_dict = { }
x = 1
for inner_list in outer_list :
for node in inner_list :
raw_dict [ x ] = node
x += 1
if sort == "name" : # sort by provider - name
srt_dict = OrderedDict ( sorted ( raw_dict . items ( ) , key = lambda k : ( k [ 1 ] . cloud , k [ 1 ] . name . lower ( ) ) ) )
else : # sort by provider - zone - name
srt_dict = OrderedDict ( sorted ( raw_dict . items ( ) , key = lambda k : ( k [ 1 ] . cloud , k [ 1 ] . zone , k [ 1 ] . name . lower ( ) ) ) )
x = 1
node_dict = { }
for i , v in srt_dict . items ( ) :
node_dict [ x ] = v
x += 1
return node_dict |
async def on_data_error ( self , exception ) :
"""Handle error .""" | self . logger . error ( 'Encountered error on socket.' , exc_info = ( type ( exception ) , exception , None ) )
await self . disconnect ( expected = False ) |
def initialise_arrays ( group , f ) :
"""Create EArrays for calibrated hits""" | for node in [ 'pos_x' , 'pos_y' , 'pos_z' , 'dir_x' , 'dir_y' , 'dir_z' , 'du' , 'floor' , 't0' ] :
if node in [ 'floor' , 'du' ] :
atom = U1_ATOM
else :
atom = F4_ATOM
f . create_earray ( group , node , atom , ( 0 , ) , filters = FILTERS ) |
def parse_plunge_bearing ( plunge , bearing ) :
"""Parses strings of plunge and bearing and returns a consistent plunge and
bearing measurement as floats . Plunge angles returned by this function will
always be between 0 and 90.
If no direction letter ( s ) is present , the plunge is assumed to be measured
from the end specified by the bearing . If a direction letter ( s ) is present ,
the bearing will be switched to the opposite ( 180 degrees ) end if the
specified direction corresponds to the opposite end specified by the
bearing .
Parameters
plunge : string
A plunge measurement .
bearing : string
A bearing measurement . May be in azimuth or quadrant format .
Returns
plunge , bearing : floats
The plunge and bearing following the conventions outlined above .
Examples
> > > parse _ plunge _ bearing ( " 30NW " , 160)
. . . ( 30 , 340)""" | bearing = parse_azimuth ( bearing )
plunge , direction = split_trailing_letters ( plunge )
if direction is not None :
if opposite_end ( bearing , direction ) :
bearing += 180
if plunge < 0 :
bearing += 180
plunge = - plunge
if plunge > 90 :
bearing += 180
plunge = 180 - plunge
if bearing > 360 :
bearing -= 360
return plunge , bearing |
def value ( self , name ) :
"""get value of a track at the current time""" | return self . tracks . get ( name ) . row_value ( self . controller . row ) |
def cdx_limit ( cdx_iter , limit ) :
"""limit cdx to at most ` limit ` .""" | # for cdx , _ in itertools . izip ( cdx _ iter , xrange ( limit ) ) :
# yield cdx
return ( cdx for cdx , _ in zip ( cdx_iter , range ( limit ) ) ) |
def calibrate_counts ( array , attributes , index ) :
"""Calibration for counts channels .""" | offset = np . float32 ( attributes [ "corrected_counts_offsets" ] [ index ] )
scale = np . float32 ( attributes [ "corrected_counts_scales" ] [ index ] )
array = ( array - offset ) * scale
return array |
def get_derived_metric_tags ( self , id , ** kwargs ) : # noqa : E501
"""Get all tags associated with a specific derived metric definition # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . get _ derived _ metric _ tags ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: return : ResponseContainerTagsResponse
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . get_derived_metric_tags_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . get_derived_metric_tags_with_http_info ( id , ** kwargs )
# noqa : E501
return data |
def _format_local_file ( self ) :
"""When args . local _ file empty on Windows , tries to map args . entity to a
unc path .
Updates args . local _ file in - place without returning anything .""" | if self . type != 'file' :
return
if not self . entity :
return
if not is_win :
return
if self . _file_exists ( ) :
return
self . args . local_file = self . _to_unc_path ( self . entity ) |
def _check_file ( self ) :
"""Checks watched file moficiation time and permission changes .""" | try :
self . editor . toPlainText ( )
except RuntimeError :
self . _timer . stop ( )
return
if self . editor and self . editor . file . path :
if not os . path . exists ( self . editor . file . path ) and self . _mtime :
self . _notify_deleted_file ( )
else :
mtime = os . path . getmtime ( self . editor . file . path )
if mtime > self . _mtime :
self . _mtime = mtime
self . _notify_change ( )
# check for permission change
writeable = os . access ( self . editor . file . path , os . W_OK )
self . editor . setReadOnly ( not writeable ) |
def write_file ( self , filename , cart_coords = False ) :
"""Write the input string into a file
Option : see _ _ str _ _ method""" | with zopen ( filename , "w" ) as f :
f . write ( self . to_string ( cart_coords ) ) |
def shape ( self ) :
"""Returns ( rowCount , valueCount )""" | bf = self . copy ( )
content = requests . get ( bf . dataset_url ) . json ( )
rowCount = content [ 'status' ] [ 'rowCount' ]
valueCount = content [ 'status' ] [ 'valueCount' ]
return ( rowCount , valueCount ) |
def emitCurrentChanged ( self ) :
"""Emits the current schema changed signal for this combobox , provided \
the signals aren ' t blocked .""" | if ( not self . signalsBlocked ( ) ) :
schema = self . currentSchema ( )
self . currentSchemaChanged . emit ( schema )
if ( schema ) :
self . currentTableChanged . emit ( schema . model ( ) )
else :
self . currentTableChanged . emit ( None ) |
def data ( self , ctx = None ) :
"""Returns a copy of this parameter on one context . Must have been
initialized on this context before .
Parameters
ctx : Context
Desired context .
Returns
NDArray on ctx""" | d = self . _check_and_get ( self . _data , ctx )
if self . _rate :
d = nd . Dropout ( d , self . _rate , self . _mode , self . _axes )
return d |
def find_tags ( self , tokens , ** kwargs ) :
"""Annotates the given list of tokens with part - of - speech tags .
Returns a list of tokens , where each token is now a [ word , tag ] - list .""" | # [ " The " , " cat " , " purs " ] = > [ [ " The " , " DT " ] , [ " cat " , " NN " ] , [ " purs " , " VB " ] ]
return find_tags ( tokens , lexicon = kwargs . get ( "lexicon" , self . lexicon or { } ) , model = kwargs . get ( "model" , self . model ) , morphology = kwargs . get ( "morphology" , self . morphology ) , context = kwargs . get ( "context" , self . context ) , entities = kwargs . get ( "entities" , self . entities ) , language = kwargs . get ( "language" , self . language ) , default = kwargs . get ( "default" , self . default ) , map = kwargs . get ( "map" , None ) ) |
def process_infile ( f , fileStore ) :
"""Takes an array of files or a single file and imports into the jobstore .
This returns a tuple or an array of tuples replacing all previous path
strings . Toil does not preserve a file ' s original name upon import and
so the tuple keeps track of this with the format : ' ( filepath , preserveThisFilename ) '
: param f : String or an Array . The smallest element must be a string ,
so : an array of strings , an array of arrays of strings . . . etc .
: param fileStore : The filestore object that is called to load files into the filestore .
: return : A tuple or an array of tuples .""" | # check if this has already been processed
if isinstance ( f , tuple ) :
return f
elif isinstance ( f , list ) :
return process_array_infile ( f , fileStore )
elif isinstance ( f , basestring ) :
return process_single_infile ( f , fileStore )
else :
raise RuntimeError ( 'Error processing file: ' . format ( str ( f ) ) ) |
def _check_rev_dict ( tree , ebt ) :
"""Verifyies that ` ebt ` is the inverse of the ` edgeBySourceId ` data member of ` tree `""" | ebs = defaultdict ( dict )
for edge in ebt . values ( ) :
source_id = edge [ '@source' ]
edge_id = edge [ '@id' ]
ebs [ source_id ] [ edge_id ] = edge
assert ebs == tree [ 'edgeBySourceId' ] |
def create_container ( self , conf , detach , tty ) :
"""Create a single container""" | name = conf . name
image_name = conf . image_name
if conf . tag is not NotSpecified :
image_name = conf . image_name_with_tag
container_name = conf . container_name
with conf . assumed_role ( ) :
env = dict ( e . pair for e in conf . env )
binds = conf . volumes . binds
command = conf . formatted_command
volume_names = conf . volumes . volume_names
volumes_from = list ( conf . volumes . share_with_names )
no_tty_option = conf . no_tty_option
ports = [ p . container_port . port_pair for p in conf . ports ]
port_bindings = self . exposed ( conf . ports )
uncreated = [ ]
for name in binds :
if not os . path . exists ( name ) :
log . info ( "Making volume for mounting\tvolume=%s" , name )
try :
os . makedirs ( name )
except OSError as error :
uncreated . append ( ( name , error ) )
if uncreated :
raise BadOption ( "Failed to create some volumes on the host" , uncreated = uncreated )
log . info ( "Creating container from %s\timage=%s\tcontainer_name=%s\ttty=%s" , image_name , name , container_name , tty )
if binds :
log . info ( "\tUsing volumes\tvolumes=%s" , volume_names )
if env :
log . info ( "\tUsing environment\tenv=%s" , sorted ( env . keys ( ) ) )
if ports :
log . info ( "\tUsing ports\tports=%s" , ports )
if port_bindings :
log . info ( "\tPort bindings: %s" , port_bindings )
if volumes_from :
log . info ( "\tVolumes from: %s" , volumes_from )
host_config = conf . harpoon . docker_api . create_host_config ( binds = binds , volumes_from = volumes_from , port_bindings = port_bindings , devices = conf . devices , lxc_conf = conf . lxc_conf , privileged = conf . privileged , restart_policy = conf . restart_policy , dns = conf . network . dns , dns_search = conf . network . dns_search , extra_hosts = conf . network . extra_hosts , network_mode = conf . network . network_mode , publish_all_ports = conf . network . publish_all_ports , cap_add = conf . cpu . cap_add , cap_drop = conf . cpu . cap_drop , mem_limit = conf . cpu . mem_limit , cpu_shares = conf . cpu . cpu_shares , cpuset_cpus = conf . cpu . cpuset_cpus , cpuset_mems = conf . cpu . cpuset_mems , memswap_limit = conf . cpu . memswap_limit , ulimits = conf . ulimits , read_only = conf . read_only_rootfs , log_config = conf . log_config , security_opt = conf . security_opt , ** conf . other_options . host_config )
container_id = conf . harpoon . docker_api . create_container ( image_name , name = container_name , detach = detach , command = command , volumes = volume_names , environment = env , tty = False if no_tty_option else tty , user = conf . user , ports = ports , stdin_open = tty , hostname = conf . network . hostname , domainname = conf . network . domainname , network_disabled = conf . network . disabled , host_config = host_config , ** conf . other_options . create )
if isinstance ( container_id , dict ) :
if "errorDetail" in container_id :
raise BadImage ( "Failed to create container" , image = name , error = container_id [ "errorDetail" ] )
container_id = container_id [ "Id" ]
return container_id |
def _unweave ( target , advices , pointcut , ctx , depth , depth_predicate ) :
"""Unweave deeply advices in target .""" | # if weaving has to be done
if pointcut is None or pointcut ( target ) : # do something only if target is intercepted
if is_intercepted ( target ) :
_remove_advices ( target = target , advices = advices , ctx = ctx )
# search inside the target
if depth > 0 : # for an object or a class , weave on methods
# get base ctx
_base_ctx = None
if ctx is not None :
_base_ctx = base_ctx ( ctx )
for _ , member in getmembers ( target , depth_predicate ) :
_unweave ( target = member , advices = advices , pointcut = pointcut , depth = depth - 1 , depth_predicate = depth_predicate , ctx = _base_ctx ) |
def GetMessages ( self , formatter_mediator , event ) :
"""Determines the formatted message strings for an event object .
Args :
formatter _ mediator ( FormatterMediator ) : mediates the interactions
between formatters and other components , such as storage and Windows
EventLog resources .
event ( EventObject ) : event .
Returns :
tuple ( str , str ) : formatted message string and short message string .
Raises :
WrongFormatter : if the event object cannot be formatted by the formatter .""" | if self . DATA_TYPE != event . data_type :
raise errors . WrongFormatter ( 'Invalid event object - unsupported data type: {0:s}' . format ( event . data_type ) )
event_values = event . CopyToDict ( )
number_of_volumes = event_values . get ( 'number_of_volumes' , 0 )
volume_serial_numbers = event_values . get ( 'volume_serial_numbers' , None )
volume_device_paths = event_values . get ( 'volume_device_paths' , None )
volumes_strings = [ ]
for volume_index in range ( 0 , number_of_volumes ) :
if not volume_serial_numbers :
volume_serial_number = 'UNKNOWN'
else :
volume_serial_number = volume_serial_numbers [ volume_index ]
if not volume_device_paths :
volume_device_path = 'UNKNOWN'
else :
volume_device_path = volume_device_paths [ volume_index ]
volumes_strings . append ( ( 'volume: {0:d} [serial number: 0x{1:08X}, device path: ' '{2:s}]' ) . format ( volume_index + 1 , volume_serial_number , volume_device_path ) )
if volumes_strings :
event_values [ 'volumes_string' ] = ', ' . join ( volumes_strings )
return self . _ConditionalFormatMessages ( event_values ) |
def get_Generic_parameters ( tp , generic_supertype ) :
"""tp must be a subclass of generic _ supertype .
Retrieves the type values from tp that correspond to parameters
defined by generic _ supertype .
E . g . get _ Generic _ parameters ( tp , typing . Mapping ) is equivalent
to get _ Mapping _ key _ value ( tp ) except for the error message .
Note that get _ Generic _ itemtype ( tp ) is not exactly equal to
get _ Generic _ parameters ( tp , typing . Container ) , as that method
additionally contains treatment for typing . Tuple and typing . Iterable .""" | try :
res = _select_Generic_superclass_parameters ( tp , generic_supertype )
except TypeError :
res = None
if res is None :
raise TypeError ( "%s has no proper parameters defined by %s." % ( type_str ( tp ) , type_str ( generic_supertype ) ) )
else :
return tuple ( res ) |
def print ( * args , ** kwargs ) :
"""Normally print function in python prints values to a stream / stdout
> > print ( value1 , value2 , sep = ' ' , end = ' \n ' , file = sys . stdout )
Current package usage :
print ( value1 , value2 , sep = ' ' , end = ' \n ' , file = sys . stdout , color = None ,
bg _ color = None , text _ format = None , log _ type = None )
: param args : Values ( str ) to print
: param kwargs : Text formats like sep , end , color , background , text format , log type ( ERROR , INFO , WARNING , DEBUG )
: return : Colored text to stdout ( Console )""" | # Pop out color and background values from kwargs
color_name = kwargs . pop ( 'color' , None )
bg_color = kwargs . pop ( 'bg_color' , None )
log_type = kwargs . pop ( 'log_type' , None )
# Check formats , create a list of text formats
txt_formats = kwargs . pop ( 'text_format' , [ ] )
if sys . version_info [ 0 ] == 2 :
str_type = basestring
elif sys . version_info [ 0 ] == 3 :
str_type = str
else :
str_type = basestring
if isinstance ( txt_formats , str_type ) :
txt_formats = [ txt_formats ]
# Check for file keyword
file_name = kwargs . get ( 'file' , sys . stdout )
# Check for foreground and background colors
if color_name or bg_color or log_type : # Pop out the ' end ' argument
end_ = kwargs . pop ( 'end' , "\n" )
kwargs [ 'end' ] = ""
# If log type argument is provided
if log_type :
if log_type not in log_types . keys ( ) :
print ( 'Log type not valid!' , log_type = 'error' )
sys . exit ( 1 )
if log_type == 'info' :
__builtin__ . print ( '\033[{}m[INF] ' . format ( foreground_colors [ log_types [ log_type ] ] ) , file = file_name , end = '' )
__builtin__ . print ( '\033[0m' , file = file_name , end = '' )
if log_type == 'warn' :
__builtin__ . print ( '\033[{}m[WRN] ' . format ( foreground_colors [ log_types [ log_type ] ] ) , file = file_name , end = '' )
__builtin__ . print ( '\033[0m' , file = file_name , end = '' )
if log_type == 'error' :
__builtin__ . print ( '\033[{}m[ERR] ' . format ( foreground_colors [ log_types [ log_type ] ] ) , file = file_name , end = '' )
__builtin__ . print ( '\033[0m' , file = file_name , end = '' )
if log_type == 'hint' :
__builtin__ . print ( '\033[{}m[HNT] ' . format ( foreground_colors [ log_types [ log_type ] ] ) , file = file_name , end = '' )
__builtin__ . print ( '\033[0m' , file = file_name , end = '' )
if log_type == 'debug' :
__builtin__ . print ( '\033[{}m[DBG] ' . format ( foreground_colors [ log_types [ log_type ] ] ) , file = file_name , end = '' )
__builtin__ . print ( '\033[0m' , file = file_name , end = '' )
# If foreground color argument is provided
if color_name :
if color_name not in foreground_colors . keys ( ) :
print ( 'Invalid color code!' , log_type = 'error' )
sys . exit ( 1 )
__builtin__ . print ( '\033[{}m' . format ( foreground_colors [ color_name ] ) , file = file_name , end = '' )
# If background color argument is provided
if bg_color :
if bg_color not in background_colors . keys ( ) :
print ( 'Invalid background color code!' , log_type = 'error' )
sys . exit ( 1 )
__builtin__ . print ( '\033[{}m' . format ( background_colors [ bg_color ] ) , file = file_name , end = '' )
# If text formats are provided
for txt_format in txt_formats :
__builtin__ . print ( '\033[{}m' . format ( text_formats [ txt_format ] ) , file = file_name , end = '' )
# Print values
__builtin__ . print ( * args , ** kwargs )
# Reset
__builtin__ . print ( '\033[0m' , file = file_name , end = end_ )
else :
__builtin__ . print ( * args , ** kwargs ) |
def unproxy ( possible_proxy ) :
'''Unwrap and return the object referenced by a proxy .
This function is very similar to : func : ` get _ reference ` , but works for
both proxies and regular objects . If the specified object is a proxy ,
its reference is extracted with ` ` get _ reference ` ` and returned . If it
is not a proxy , it is returned as is .
If the object references by the proxy is itself a proxy , the unwrapping
is repeated until a regular ( non - proxy ) object is found .
possible _ proxy :
object that might or might not be a proxy .''' | while isinstance ( possible_proxy , ThreadLocalProxy ) :
possible_proxy = ThreadLocalProxy . get_reference ( possible_proxy )
return possible_proxy |
def get_culture_pair ( self , code ) :
"""# Return a tuple of the language and country for a given culture code
: param code :
: return :""" | if not any ( x in code for x in ( '_' , '-' ) ) :
raise ValueError ( "%s is not a valid culture code" % code )
cc = CultureCode . objects . get ( code = code . replace ( '_' , '-' ) )
return cc . language , cc . country |
def _reset ( self , server , ** kwargs ) :
"""Reset the server object with new values given as params .
- server : a dict representing the server . e . g the API response .
- kwargs : any meta fields such as cloud _ manager and populated .
Note : storage _ devices and ip _ addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects .""" | if server : # handle storage , ip _ address dicts and tags if they exist
Server . _handle_server_subobjs ( server , kwargs . get ( 'cloud_manager' ) )
for key in server :
object . __setattr__ ( self , key , server [ key ] )
for key in kwargs :
object . __setattr__ ( self , key , kwargs [ key ] ) |
def mean_absolute_error ( data , ground_truth , mask = None , normalized = False , force_lower_is_better = True ) :
r"""Return L1 - distance between ` ` data ` ` and ` ` ground _ truth ` ` .
See also ` this Wikipedia article
< https : / / en . wikipedia . org / wiki / Mean _ absolute _ error > ` _ .
Parameters
data : ` Tensor ` or ` array - like `
Input data to compare to the ground truth . If not a ` Tensor ` , an
unweighted tensor space will be assumed .
ground _ truth : ` array - like `
Reference to which ` ` data ` ` should be compared .
mask : ` array - like ` , optional
If given , ` ` data * mask ` ` is compared to ` ` ground _ truth * mask ` ` .
normalized : bool , optional
If ` ` True ` ` , the output values are mapped to the interval
: math : ` [ 0 , 1 ] ` ( see ` Notes ` for details ) , otherwise return the
original mean absolute error .
force _ lower _ is _ better : bool , optional
If ` ` True ` ` , it is ensured that lower values correspond to better
matches . For the mean absolute error , this is already the case , and
the flag is only present for compatibility to other figures of merit .
Returns
mae : float
FOM value , where a lower value means a better match .
Notes
The FOM evaluates
. . math : :
\ mathrm { MAE } ( f , g ) = \ frac { \ | f - g \ | _ 1 } { \ | 1 \ | _ 1 } ,
where : math : ` \ | 1 \ | _ 1 ` is the volume of the domain of definition
of the functions . For : math : ` \ mathbb { R } ^ n ` type spaces , this is equal
to the number of elements : math : ` n ` .
The normalized form is
. . math : :
\ mathrm { MAE _ N } ( f , g ) = \ frac { \ | f - g \ | _ 1 } { \ | f \ | _ 1 + \ | g \ | _ 1 } .
The normalized variant takes values in : math : ` [ 0 , 1 ] ` .""" | if not hasattr ( data , 'space' ) :
data = odl . vector ( data )
space = data . space
ground_truth = space . element ( ground_truth )
l1_norm = odl . solvers . L1Norm ( space )
if mask is not None :
data = data * mask
ground_truth = ground_truth * mask
diff = data - ground_truth
fom = l1_norm ( diff )
if normalized :
fom /= ( l1_norm ( data ) + l1_norm ( ground_truth ) )
else :
fom /= l1_norm ( space . one ( ) )
# Ignore ` force _ lower _ is _ better ` since that ' s already the case
return fom |
def get_gradebook_columns_by_gradebooks ( self , gradebook_ids ) :
"""Gets the list of gradebook columns corresponding to a list of ` ` Gradebooks ` ` .
arg : gradebook _ ids ( osid . id . IdList ) : list of gradebook
` ` Ids ` `
return : ( osid . grading . GradebookColumnList ) - list of gradebook
columns
raise : NullArgument - ` ` gradebook _ ids ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceBinSession . get _ resources _ by _ bins
gradebook_column_list = [ ]
for gradebook_id in gradebook_ids :
gradebook_column_list += list ( self . get_gradebook_columns_by_gradebook ( gradebook_id ) )
return objects . GradebookColumnList ( gradebook_column_list ) |
def get_metadata ( self , filename ) :
'''Fetch all availabe metadata''' | obj = self . bucket . Object ( filename )
checksum = 'md5:{0}' . format ( obj . e_tag [ 1 : - 1 ] )
mime = obj . content_type . split ( ';' , 1 ) [ 0 ] if obj . content_type else None
return { 'checksum' : checksum , 'size' : obj . content_length , 'mime' : mime , 'modified' : obj . last_modified , } |
def _get_selected_ids ( self ) :
"""List of currently selected ids""" | selection = self . get_selection ( )
if selection . get_mode ( ) != gtk . SELECTION_MULTIPLE :
raise AttributeError ( 'selected_ids only valid for select_multiple' )
model , selected_paths = selection . get_selected_rows ( )
if selected_paths :
return zip ( * selected_paths ) [ 0 ]
else :
return ( ) |
def patched_function ( self , * args , ** kwargs ) :
"""Step 3 . Wrapped function calling .""" | result = self . function ( * args , ** kwargs )
self . validate ( result )
return result |
def filePath ( self , index ) :
"""Gets the file path of the item at the specified ` ` index ` ` .
: param index : item index - QModelIndex
: return : str""" | return self . _fs_model_source . filePath ( self . _fs_model_proxy . mapToSource ( index ) ) |
def merge ( self , recarray , columns = None ) :
"""Merge another recarray with the same columns into this table .
: Arguments :
recarray
numpy record array that describes the layout and initializes the
table
: Returns :
n number of inserted rows
: Raises :
Raises an exception if duplicate and incompatible data exist
in the main table and the new one .""" | len_before = len ( self )
# CREATE TEMP TABLE in database
tmparray = SQLarray ( self . tmp_table_name , records = recarray , columns = columns , connection = self . connection , is_tmp = True )
len_tmp = len ( tmparray )
# insert into main table
SQL = """INSERT OR ABORT INTO __self__ SELECT * FROM %s""" % self . tmp_table_name
self . sql ( SQL )
len_after = len ( self )
n_inserted = len_after - len_before
assert len_tmp == n_inserted
del tmparray
# also drops the tmp table ( keep it at end for debugging )
return n_inserted |
def dataflagatom ( chans , pol , d , sig , mode , conv ) :
"""Wrapper function to get shared memory as numpy array into pool
Assumes data _ mem is global mps . Array""" | data = numpyview ( data_mem , 'complex64' , datashape ( d ) )
# data = n . ma . masked _ array ( data , data = = 0j ) # this causes massive overflagging on 14sep03 data
return rtlib . dataflag ( data , chans , pol , d , sig , mode , conv ) |
def exp_cov ( prices , span = 180 , frequency = 252 ) :
"""Estimate the exponentially - weighted covariance matrix , which gives
greater weight to more recent data .
: param prices : adjusted closing prices of the asset , each row is a date
and each column is a ticker / id .
: type prices : pd . DataFrame
: param span : the span of the exponential weighting function , defaults to 180
: type span : int , optional
: param frequency : number of time periods in a year , defaults to 252 ( the number
of trading days in a year )
: type frequency : int , optional
: return : annualised estimate of exponential covariance matrix
: rtype : pd . DataFrame""" | if not isinstance ( prices , pd . DataFrame ) :
warnings . warn ( "prices are not in a dataframe" , RuntimeWarning )
prices = pd . DataFrame ( prices )
assets = prices . columns
daily_returns = daily_price_returns ( prices )
N = len ( assets )
# Loop over matrix , filling entries with the pairwise exp cov
S = np . zeros ( ( N , N ) )
for i in range ( N ) :
for j in range ( i , N ) :
S [ i , j ] = S [ j , i ] = _pair_exp_cov ( daily_returns . iloc [ : , i ] , daily_returns . iloc [ : , j ] , span )
return pd . DataFrame ( S * frequency , columns = assets , index = assets ) |
def addlayer ( self , name , srs , geomType ) :
"""add a layer to the vector layer
Parameters
name : str
the layer name
srs : int , str or : osgeo : class : ` osr . SpatialReference `
the spatial reference system . See : func : ` spatialist . auxil . crsConvert ` for options .
geomType : int
an OGR well - known binary data type .
See ` Module ogr < https : / / gdal . org / python / osgeo . ogr - module . html > ` _ .
Returns""" | self . vector . CreateLayer ( name , srs , geomType )
self . init_layer ( ) |
def get_source_link ( thing , source_location ) :
"""Get a link to the line number a module / class / function is defined at .
Parameters
thing : function or class
Thing to get the link for
source _ location : str
GitHub url of the source code
Returns
str
String with link to the file & line number , or empty string if it
couldn ' t be found""" | try :
lineno = get_line ( thing )
try :
owner_module = inspect . getmodule ( thing )
assert owner_module is not None
except ( TypeError , AssertionError ) :
owner_module = inspect . getmodule ( thing . fget )
thing_file = "/" . join ( owner_module . __name__ . split ( "." ) )
if owner_module . __file__ . endswith ( "__init__.py" ) :
thing_file += "/__init__.py"
else :
thing_file += ".py"
return ( f"Source: [{escape(thing_file)}]({source_location}/{thing_file}#L{lineno})" + "\n\n" )
except Exception as e : # print ( " Failed to find source file . " )
# print ( e )
# print ( lineno )
# print ( thing )
# print ( owner _ module )
# print ( thing _ file )
# print ( source _ location )
pass
return "" |
def _create_mapping ( self , func , switch ) :
"""Internal function to create the mapping .""" | mapping = { }
for feature in self . data [ 'features' ] :
content = func ( feature )
if switch == 'style' :
for key , value in content . items ( ) :
if isinstance ( value , MacroElement ) : # Make sure objects are rendered :
if value . _parent is None :
value . _parent = self . geojson_obj
value . render ( )
# Replace objects with their Javascript var names :
content [ key ] = "{{'" + value . get_name ( ) + "'}}"
key = self . _to_key ( content )
mapping . setdefault ( key , [ ] ) . append ( self . get_feature_id ( feature ) )
self . _set_default_key ( mapping )
return mapping |
def make_mutant_tuples ( example_protos , original_feature , index_to_mutate , viz_params ) :
"""Return a list of ` MutantFeatureValue ` s and a list of mutant Examples .
Args :
example _ protos : The examples to mutate .
original _ feature : A ` OriginalFeatureList ` that encapsulates the feature to
mutate .
index _ to _ mutate : The index of the int64 _ list or float _ list to mutate .
viz _ params : A ` VizParams ` object that contains the UI state of the request .
Returns :
A list of ` MutantFeatureValue ` s and a list of mutant examples .""" | mutant_features = make_mutant_features ( original_feature , index_to_mutate , viz_params )
mutant_examples = [ ]
for example_proto in example_protos :
for mutant_feature in mutant_features :
copied_example = copy . deepcopy ( example_proto )
feature_name = mutant_feature . original_feature . feature_name
try :
feature_list = proto_value_for_feature ( copied_example , feature_name )
if index_to_mutate is None :
new_values = mutant_feature . mutant_value
else :
new_values = list ( feature_list )
new_values [ index_to_mutate ] = mutant_feature . mutant_value
del feature_list [ : ]
feature_list . extend ( new_values )
mutant_examples . append ( copied_example )
except ( ValueError , IndexError ) : # If the mutant value can ' t be set , still add the example to the
# mutant _ example even though no change was made . This is necessary to
# allow for computation of global PD plots when not all examples have
# the same number of feature values for a feature .
mutant_examples . append ( copied_example )
return mutant_features , mutant_examples |
def handle_endtag ( self , tag ) :
"""Handles every end tag like e . g . < / p > .""" | if tag in self . stroke_after_elements :
if self . text . endswith ( self . stroke_text ) : # Only add a stroke if there isn ' t already a stroke posted
# In this case , there was no content between the tags , so
# remove the starting stroke
self . text = self . text [ : - len ( self . stroke_text ) ]
else : # If there ' s no linebreak before the stroke , add one !
if not self . text . endswith ( '\n' ) :
self . text += '\n'
self . text += self . stroke_text
if tag == 'a' : # If it ' s a link , add a footnote
self . text += '[{}]' . format ( len ( self . links ) )
elif tag == 'br' and self . text and not self . text . endswith ( '\n' ) : # If it ' s a break , check if there ' s no break at the end of the
# content . If there ' s none , add one !
self . text += '\n'
# Reset the lasttag , otherwise this parse can geht confused , if the
# next element is not wrapped in a new tag .
if tag == self . lasttag :
self . lasttag = None |
def get_beamarea_deg2 ( self , ra , dec ) :
"""Calculate the area of the synthesized beam in square degrees .
Parameters
ra , dec : float
The sky coordinates at which the calculation is made .
Returns
area : float
The beam area in square degrees .""" | barea = abs ( self . beam . a * self . beam . b * np . pi )
# in deg * * 2 at reference coords
if self . lat is not None :
barea /= np . cos ( np . radians ( dec - self . lat ) )
return barea |
def download_setuptools ( version = DEFAULT_VERSION , download_base = DEFAULT_URL , to_dir = os . curdir , delay = 15 ) :
"""Download setuptools from a specified location and return its filename
` version ` should be a valid setuptools version number that is available
as an egg for download under the ` download _ base ` URL ( which should end
with a ' / ' ) . ` to _ dir ` is the directory where the egg will be downloaded .
` delay ` is the number of seconds to pause before an actual download attempt .""" | import urllib2 , shutil
egg_name = "setuptools-%s-py%s.egg" % ( version , sys . version [ : 3 ] )
url = download_base + egg_name
saveto = os . path . join ( to_dir , egg_name )
src = dst = None
if not os . path . exists ( saveto ) : # Avoid repeated downloads
try :
from distutils import log
if delay :
log . warn ( """
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""" , version , download_base , delay , url ) ;
from time import sleep ;
sleep ( delay )
log . warn ( "Downloading %s" , url )
src = urllib2 . urlopen ( url )
# Read / write all in one block , so we don ' t create a corrupt file
# if the download is interrupted .
data = _validate_md5 ( egg_name , src . read ( ) )
dst = open ( saveto , "wb" ) ;
dst . write ( data )
finally :
if src :
src . close ( )
if dst :
dst . close ( )
return os . path . realpath ( saveto ) |
def getSamplingStrategy ( self , serviceName ) :
"""Parameters :
- serviceName""" | self . _seqid += 1
future = self . _reqs [ self . _seqid ] = concurrent . Future ( )
self . send_getSamplingStrategy ( serviceName )
return future |
def save ( self , path ) :
'''Save source video to file .
Args :
path ( str ) : Filename to save to .
Notes : Saves entire source video to file , not just currently selected
frames .''' | # IMPORTANT WARNING : saves entire source video
self . clip . write_videofile ( path , audio_fps = self . clip . audio . fps ) |
def run ( self ) :
"""Index the document . Since ids are predictable ,
we won ' t index anything twice .""" | with self . input ( ) . open ( ) as handle :
body = json . loads ( handle . read ( ) )
es = elasticsearch . Elasticsearch ( )
id = body . get ( '_id' )
es . index ( index = 'frontpage' , doc_type = 'html' , id = id , body = body ) |
def on ( self , event , f = None ) :
"""Registers the function ` ` f ` ` to the event name ` ` event ` ` .
If ` ` f ` ` isn ' t provided , this method returns a function that
takes ` ` f ` ` as a callback ; in other words , you can use this method
as a decorator , like so : :
@ ee . on ( ' data ' )
def data _ handler ( data ) :
print ( data )
In both the decorated and undecorated forms , the event handler is
returned . The upshot of this is that you can call decorated handlers
directly , as well as use them in remove _ listener calls .""" | def _on ( f ) :
self . _add_event_handler ( event , f , f )
return f
if f is None :
return _on
else :
return _on ( f ) |
def run ( self ) :
"""Include a file as part of the content of this reST file .""" | if not self . state . document . settings . file_insertion_enabled :
raise self . warning ( '"%s" directive disabled.' % self . name )
source = self . state_machine . input_lines . source ( self . lineno - self . state_machine . input_offset - 1 )
source_dir = os . path . dirname ( os . path . abspath ( source ) )
path = directives . path ( self . arguments [ 0 ] )
path = os . path . normpath ( os . path . join ( source_dir , path ) )
path = utils . relative_path ( None , path )
path = nodes . reprunicode ( path )
encoding = self . options . get ( 'encoding' , self . state . document . settings . input_encoding )
e_handler = self . state . document . settings . input_encoding_error_handler
tab_width = self . options . get ( 'tab-width' , self . state . document . settings . tab_width )
try :
self . state . document . settings . record_dependencies . add ( path )
include_file = io . FileInput ( source_path = path , encoding = encoding , error_handler = e_handler )
except UnicodeEncodeError as error :
raise self . severe ( 'Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % ( self . name , SafeString ( path ) ) )
except IOError as error :
raise self . severe ( 'Problems with "%s" directive path:\n%s.' % ( self . name , ErrorString ( error ) ) )
startline = self . options . get ( 'start-line' , None )
endline = self . options . get ( 'end-line' , None )
try :
if startline or ( endline is not None ) :
lines = include_file . readlines ( )
rawtext = '' . join ( lines [ startline : endline ] )
else :
rawtext = include_file . read ( )
except UnicodeError as error :
raise self . severe ( 'Problem with "%s" directive:\n%s' % ( self . name , ErrorString ( error ) ) )
include_lines = statemachine . string2lines ( rawtext , tab_width , convert_whitespace = True )
# default lexer to ' text '
lexer = self . options . get ( 'lexer' , 'text' )
self . options [ 'source' ] = path
codeblock = Pygments ( self . name , [ lexer ] , # arguments
{ } , # no options for this directive
include_lines , # content
self . lineno , self . content_offset , self . block_text , self . state , self . state_machine )
return codeblock . run ( ) |
def run_LDA ( df ) :
"""Run LinearDiscriminantAnalysis on input dataframe ( df ) and return
transformed data , scalings and explained variance by discriminants .""" | # Prep variables for sklearn LDA
X = df . iloc [ : , 1 : df . shape [ 1 ] ] . values
# input data matrix
y = df [ "Condition" ] . values
# data categories list
# Calculate LDA
sklearn_lda = LDA ( )
X_lda_sklearn = sklearn_lda . fit_transform ( X , y )
try :
exp_var = sklearn_lda . explained_variance_ratio_
except AttributeError as ae :
print ( "\n{}: explained variance cannot be computed.\nPlease check this GitHub PR:" " https://github.com/scikit-learn/scikit-learn/pull/6027" . format ( ae ) )
return X_lda_sklearn , y , "NA"
return X_lda_sklearn , y , exp_var |
def set_connection_ip_list ( addresses = None , grant_by_default = False , server = _DEFAULT_SERVER ) :
'''Set the IPGrant list for the SMTP virtual server .
: param str addresses : A dictionary of IP + subnet pairs .
: param bool grant _ by _ default : Whether the addresses should be a blacklist or whitelist .
: param str server : The SMTP server name .
: return : A boolean representing whether the change succeeded .
: rtype : bool
CLI Example :
. . code - block : : bash
salt ' * ' win _ smtp _ server . set _ connection _ ip _ list addresses = " { ' 127.0.0.1 ' : ' 255.255.255.255 ' } "''' | setting = 'IPGrant'
formatted_addresses = list ( )
# It ' s okay to accept an empty list for set _ connection _ ip _ list ,
# since an empty list may be desirable .
if not addresses :
addresses = dict ( )
_LOG . debug ( 'Empty %s specified.' , setting )
# Convert addresses to the ' ip _ address , subnet ' format used by
# IIsIPSecuritySetting .
for address in addresses :
formatted_addresses . append ( '{0}, {1}' . format ( address . strip ( ) , addresses [ address ] . strip ( ) ) )
current_addresses = get_connection_ip_list ( as_wmi_format = True , server = server )
# Order is not important , so compare to the current addresses as unordered sets .
if set ( formatted_addresses ) == set ( current_addresses ) :
_LOG . debug ( '%s already contains the provided addresses.' , setting )
return True
# First we should check GrantByDefault , and change it if necessary .
current_grant_by_default = _get_wmi_setting ( 'IIsIPSecuritySetting' , 'GrantByDefault' , server )
if grant_by_default != current_grant_by_default :
_LOG . debug ( 'Setting GrantByDefault to: %s' , grant_by_default )
_set_wmi_setting ( 'IIsIPSecuritySetting' , 'GrantByDefault' , grant_by_default , server )
_set_wmi_setting ( 'IIsIPSecuritySetting' , setting , formatted_addresses , server )
new_addresses = get_connection_ip_list ( as_wmi_format = True , server = server )
ret = set ( formatted_addresses ) == set ( new_addresses )
if ret :
_LOG . debug ( '%s configured successfully: %s' , setting , formatted_addresses )
return ret
_LOG . error ( 'Unable to configure %s with value: %s' , setting , formatted_addresses )
return ret |
def console_get_height_rect ( con : tcod . console . Console , x : int , y : int , w : int , h : int , fmt : str ) -> int :
"""Return the height of this text once word - wrapped into this rectangle .
Returns :
int : The number of lines of text once word - wrapped .
. . deprecated : : 8.5
Use : any : ` Console . get _ height _ rect ` instead .""" | return int ( lib . TCOD_console_get_height_rect_fmt ( _console ( con ) , x , y , w , h , _fmt ( fmt ) ) ) |
def _set_rightMargin ( self , value ) :
"""value will be an int or float .
Subclasses may override this method .""" | bounds = self . bounds
if bounds is None :
self . width = value
else :
xMin , yMin , xMax , yMax = bounds
self . width = xMax + value |
def qAx ( mt , x , q ) :
"""This function evaluates the APV of a geometrically increasing annual annuity - due""" | q = float ( q )
j = ( mt . i - q ) / ( 1 + q )
mtj = Actuarial ( nt = mt . nt , i = j )
return Ax ( mtj , x ) |
def wrap_response ( resp , api_call ) :
"""Wrap the requests response in an ` ApiResponse ` object
: param object resp : response object provided by the ` requests ` library .
: param object api _ call : Api Call
: return : An ` ApiResponse ` object that wraps the content of the response .
: rtype : object , list or dict""" | try :
js_resp = resp . json ( )
if resp . ok :
if "items" in js_resp . keys ( ) :
r = ApiListResponse ( js_resp [ "items" ] )
else :
r = ApiDictResponse ( js_resp )
if "paging" in js_resp . keys ( ) :
cursors = js_resp . get ( "paging" , { } ) . get ( "cursors" , { } )
if "after" in cursors . keys ( ) :
r . next = api_call ( after = cursors [ "after" ] )
if "before" in cursors . keys ( ) :
r . previous = api_call ( after = cursors [ "before" ] )
else :
r = ApiDictResponse ( js_resp )
if "error" in js_resp . keys ( ) :
r . error = js_resp [ 'error' ]
elif "message" in js_resp . keys ( ) :
r . error = js_resp [ 'message' ]
# common to all
r . status_code = resp . status_code
r . headers = resp . headers
return r
except :
return resp |
def windows ( self , window_size = 60 , context_len = 90 , step = 10 ) :
'''Walk through the sequence of interest in windows of window _ size ,
evaluate free ( unbound ) pair probabilities .
: param window _ size : Window size in base pairs .
: type window _ size : int
: param context _ len : The number of bases of context to use when
analyzing each window .
: type context _ len : int
: param step : The number of base pairs to move for each new window .
: type step : int''' | self . walked = _context_walk ( self . template , window_size , context_len , step )
self . core_starts , self . core_ends , self . scores = zip ( * self . walked )
return self . walked |
def reference_text ( ref ) :
'''Convert a single reference to plain text format
Parameters
ref : dict
Information about a single reference''' | ref_wrap = textwrap . TextWrapper ( initial_indent = '' , subsequent_indent = ' ' * 8 )
s = ''
if ref [ 'type' ] == 'unpublished' :
s += ref_wrap . fill ( ', ' . join ( ref [ 'authors' ] ) ) + '\n'
s += ref_wrap . fill ( ref [ 'title' ] ) + '\n'
s += ref_wrap . fill ( ref [ 'note' ] ) + '\n'
elif ref [ 'type' ] == 'article' :
s += ref_wrap . fill ( ', ' . join ( ref [ 'authors' ] ) ) + '\n'
s += ref_wrap . fill ( ref [ 'title' ] ) + '\n'
s += '{}, {}, {} ({})' . format ( ref [ 'journal' ] , ref [ 'volume' ] , ref [ 'page' ] , ref [ 'year' ] )
s += '\n' + ref [ 'doi' ]
elif ref [ 'type' ] == 'incollection' :
s += ref_wrap . fill ( ', ' . join ( ref [ 'authors' ] ) )
s += ref_wrap . fill ( '\n{}' . format ( ref [ 'title' ] ) )
s += ref_wrap . fill ( '\nin \'{}\'' . format ( ref [ 'booktitle' ] ) )
if 'editors' in ref :
s += ref_wrap . fill ( '\ned. ' + ', ' . join ( ref [ 'editors' ] ) )
if 'series' in ref :
s += '\n{}, {}, {} ({})' . format ( ref [ 'series' ] , ref [ 'volume' ] , ref [ 'page' ] , ref [ 'year' ] )
if 'doi' in ref :
s += '\n' + ref [ 'doi' ]
elif ref [ 'type' ] == 'techreport' :
s += ref_wrap . fill ( ', ' . join ( ref [ 'authors' ] ) )
s += ref_wrap . fill ( '\n{}' . format ( ref [ 'title' ] ) )
s += '\n\'{}\'' . format ( ref [ 'institution' ] )
s += '\nTechnical Report {}' . format ( ref [ 'number' ] )
s += '\n{}' . format ( ref [ 'year' ] )
if 'doi' in ref :
s += '\n' + ref [ 'doi' ]
elif ref [ 'type' ] == 'misc' :
s += ref_wrap . fill ( ', ' . join ( ref [ 'authors' ] ) ) + '\n'
s += ref_wrap . fill ( ref [ 'title' ] )
if 'note' in ref :
s += '\n' + ref [ 'note' ]
if 'doi' in ref :
s += '\n' + ref [ 'doi' ]
else :
raise RuntimeError ( 'Cannot handle reference type {}' . format ( ref [ 'type' ] ) )
return s |
def get_data ( self , file_id ) :
"""Acquires the data from the table identified by the id .
The file is read only once , consecutive calls to this method will
return the sale collection .
: param file _ id : identifier for the table
: return : all the values from the table""" | if file_id not in self . _file_values :
file_contents = 'cwr_%s.csv' % file_id
self . _file_values [ file_id ] = self . _reader . read_csv_file ( file_contents )
return self . _file_values [ file_id ] |
def check_git ( ) :
"""Check if git command is available .""" | try :
with open ( os . devnull , "wb" ) as devnull :
subprocess . check_call ( [ "git" , "--version" ] , stdout = devnull , stderr = devnull )
except :
raise RuntimeError ( "Please make sure git is installed and on your path." ) |
def rot_rads_v2 ( vec_a , rads ) :
"""Rotate vector by angle in radians .""" | x = vec_a . x * math . cos ( rads ) - vec_a . y * math . sin ( rads )
y = vec_a . x * math . sin ( rads ) + vec_a . y * math . cos ( rads )
return Vec2 ( x , y ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.