signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def announcement_view ( request , announcement_pk ) :
'''The view of a single manager announcement .'''
|
announce = get_object_or_404 ( Announcement , pk = announcement_pk )
page_name = "View Announcement"
profile = UserProfile . objects . get ( user = request . user )
pin_form = PinForm ( request . POST if "pin" in request . POST else None , instance = announce , )
can_edit = announce . incumbent == profile or request . user . is_superuser
if pin_form . is_valid ( ) :
pin_form . save ( )
return HttpResponseRedirect ( reverse ( 'managers:view_announcement' , kwargs = { "announcement_pk" : announcement_pk } ) , )
return render_to_response ( 'view_announcement.html' , { 'page_name' : page_name , 'pin_form' : pin_form , 'can_edit' : can_edit , 'announcement' : announce , } , context_instance = RequestContext ( request ) )
|
def to_global ( s ) :
"""Format a global variable name ."""
|
if s . startswith ( 'GPSTime' ) :
s = 'Gps' + s [ 3 : ]
if '_' in s :
s = "" . join ( [ i . capitalize ( ) for i in s . split ( "_" ) ] )
return s [ 0 ] . lower ( ) + s [ 1 : ]
|
def kwargs_to_string ( kwargs ) :
"""Given a set of kwargs , turns them into a string which can then be passed to a command .
: param kwargs : kwargs from a function call .
: return : outstr : A string , which is ' ' if no kwargs were given , and the kwargs in string format otherwise ."""
|
outstr = ''
for arg in kwargs :
outstr += ' -{} {}' . format ( arg , kwargs [ arg ] )
return outstr
|
def rst_blocks ( script_blocks , output_blocks , file_conf , gallery_conf ) :
"""Generates the rst string containing the script prose , code and output
Parameters
script _ blocks : list
( label , content , line _ number )
List where each element is a tuple with the label ( ' text ' or ' code ' ) ,
the corresponding content string of block and the leading line number
output _ blocks : list
List of strings where each element is the restructured text
representation of the output of each block
file _ conf : dict
File - specific settings given in source file comments as :
` ` # sphinx _ gallery _ < name > = < value > ` `
gallery _ conf : dict
Contains the configuration of Sphinx - Gallery
Returns
out : str
rst notebook"""
|
# A simple example has two blocks : one for the
# example introduction / explanation and one for the code
is_example_notebook_like = len ( script_blocks ) > 2
example_rst = u""
# there can be unicode content
for ( blabel , bcontent , lineno ) , code_output in zip ( script_blocks , output_blocks ) :
if blabel == 'code' :
if not file_conf . get ( 'line_numbers' , gallery_conf . get ( 'line_numbers' , False ) ) :
lineno = None
code_rst = codestr2rst ( bcontent , lang = gallery_conf [ 'lang' ] , lineno = lineno ) + '\n'
if is_example_notebook_like :
example_rst += code_rst
example_rst += code_output
else :
example_rst += code_output
if 'sphx-glr-script-out' in code_output : # Add some vertical space after output
example_rst += "\n\n|\n\n"
example_rst += code_rst
else :
block_separator = '\n\n' if not bcontent . endswith ( '\n' ) else '\n'
example_rst += bcontent + block_separator
return example_rst
|
def get_rna_counts ( rna_file , transcript_name ) :
"""Get coverage for a given RNA BAM file , return read counts ."""
|
# check if the RNA file exists
if not os . path . exists ( rna_file ) :
msg = 'RNA-Seq BAM file "{}" does not exist' . format ( rna_file )
logging . error ( msg )
raise OSError ( msg )
rna_counts = { }
cov_file = tempfile . NamedTemporaryFile ( delete = False )
try :
subprocess . check_call ( [ 'bedtools' , 'genomecov' , '-ibam' , rna_file , '-bg' ] , stdout = cov_file )
except subprocess . CalledProcessError as e : # needs testing
raise ribocore . RNACountsError ( 'Could not generate coverage for RNA BAM file. \n{}\n' . format ( e ) )
for line in open ( cov_file . name ) :
line = line . split ( )
if line [ 0 ] == transcript_name :
position , count = int ( line [ 1 ] ) + 1 , int ( line [ 3 ] )
rna_counts [ position ] = count
cov_file . close ( )
os . unlink ( cov_file . name )
return rna_counts
|
def disable_hidden_api_blacklist ( self ) :
"""If necessary and possible , disables hidden api blacklist ."""
|
version_codename = self . _ad . adb . getprop ( 'ro.build.version.codename' )
sdk_version = int ( self . _ad . adb . getprop ( 'ro.build.version.sdk' ) )
# we check version _ codename in addition to sdk _ version because P builds
# in development report sdk _ version 27 , but still enforce the blacklist .
if self . _ad . is_rootable and ( sdk_version >= 28 or version_codename == 'P' ) :
self . _ad . adb . shell ( 'settings put global hidden_api_blacklist_exemptions "*"' )
|
def rebuild_token_map ( self , partitioner , token_map ) :
"""Rebuild our view of the topology from fresh rows from the
system topology tables .
For internal use only ."""
|
self . partitioner = partitioner
if partitioner . endswith ( 'RandomPartitioner' ) :
token_class = MD5Token
elif partitioner . endswith ( 'Murmur3Partitioner' ) :
token_class = Murmur3Token
elif partitioner . endswith ( 'ByteOrderedPartitioner' ) :
token_class = BytesToken
else :
self . token_map = None
return
token_to_host_owner = { }
ring = [ ]
for host , token_strings in six . iteritems ( token_map ) :
for token_string in token_strings :
token = token_class . from_string ( token_string )
ring . append ( token )
token_to_host_owner [ token ] = host
all_tokens = sorted ( ring )
self . token_map = TokenMap ( token_class , token_to_host_owner , all_tokens , self )
|
def check ( ctx , repository , config ) :
"""Check commits ."""
|
ctx . obj = Repo ( repository = repository , config = config )
|
def as_list ( func ) :
"""A decorator used to return a JSON response of a list of model
objects . It expects the decorated function to return a list
of model instances . It then converts the instances to dicts
and serializes them into a json response
Examples :
> > > @ app . route ( ' / api ' )
. . . @ as _ list
. . . def list _ customers ( ) :
. . . return Customer . all ( )"""
|
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
response = func ( * args , ** kwargs )
if isinstance ( response , Response ) :
return response
return as_json_list ( response , ** _serializable_params ( request . args , check_groupby = True ) )
return wrapper
|
def log ( level , msg , * args , ** kwargs ) :
"""Logs ' msg % args ' at absl logging level ' level ' .
If no args are given just print msg , ignoring any interpolation specifiers .
Args :
level : int , the absl logging level at which to log the message
( logging . DEBUG | INFO | WARNING | ERROR | FATAL ) . While some C + + verbose logging
level constants are also supported , callers should prefer explicit
logging . vlog ( ) calls for such purpose .
msg : str , the message to be logged .
* args : The args to be substitued into the msg .
* * kwargs : May contain exc _ info to add exception traceback to message ."""
|
if level > converter . ABSL_DEBUG : # Even though this function supports level that is greater than 1 , users
# should use logging . vlog instead for such cases .
# Treat this as vlog , 1 is equivalent to DEBUG .
standard_level = converter . STANDARD_DEBUG - ( level - 1 )
else :
if level < converter . ABSL_FATAL :
level = converter . ABSL_FATAL
standard_level = converter . absl_to_standard ( level )
_absl_logger . log ( standard_level , msg , * args , ** kwargs )
|
def envs ( backend = None , sources = False ) :
'''Return the available fileserver environments . If no backend is provided ,
then the environments for all configured backends will be returned .
backend
Narrow fileserver backends to a subset of the enabled ones .
. . versionchanged : : 2015.5.0
If all passed backends start with a minus sign ( ` ` - ` ` ) , then these
backends will be excluded from the enabled backends . However , if
there is a mix of backends with and without a minus sign ( ex :
` ` backend = - roots , git ` ` ) then the ones starting with a minus
sign will be disregarded .
Additionally , fileserver backends can now be passed as a
comma - separated list . In earlier versions , they needed to be passed
as a python list ( ex : ` ` backend = " [ ' roots ' , ' git ' ] " ` ` )
CLI Example :
. . code - block : : bash
salt - run fileserver . envs
salt - run fileserver . envs backend = roots , git
salt - run fileserver . envs git'''
|
fileserver = salt . fileserver . Fileserver ( __opts__ )
return sorted ( fileserver . envs ( back = backend , sources = sources ) )
|
def all ( self ) :
"""Get all collaborators .
Returns :
List [ str ] : Collaborators ."""
|
return [ email for email , action in self . _collaborators . items ( ) if action in [ RoleValue . Owner , RoleValue . User , ShareRequestValue . Add ] ]
|
def to_dict ( mapreduce_yaml ) :
"""Converts a MapReduceYaml file into a JSON - encodable dictionary .
For use in user - visible UI and internal methods for interfacing with
user code ( like param validation ) . as a list
Args :
mapreduce _ yaml : The Pyton representation of the mapreduce . yaml document .
Returns :
A list of configuration dictionaries ."""
|
all_configs = [ ]
for config in mapreduce_yaml . mapreduce :
out = { "name" : config . name , "mapper_input_reader" : config . mapper . input_reader , "mapper_handler" : config . mapper . handler , }
if config . mapper . params_validator :
out [ "mapper_params_validator" ] = config . mapper . params_validator
if config . mapper . params :
param_defaults = { }
for param in config . mapper . params :
param_defaults [ param . name ] = param . default or param . value
out [ "mapper_params" ] = param_defaults
if config . params :
param_defaults = { }
for param in config . params :
param_defaults [ param . name ] = param . default or param . value
out [ "params" ] = param_defaults
if config . mapper . output_writer :
out [ "mapper_output_writer" ] = config . mapper . output_writer
all_configs . append ( out )
return all_configs
|
def create_function_from_response_pdu ( resp_pdu , req_pdu = None ) :
"""Parse response PDU and return instance of : class : ` ModbusFunction ` or
raise error .
: param resp _ pdu : PDU of response .
: param req _ pdu : Request PDU , some functions require more info than in
response PDU in order to create instance . Default is None .
: return : Number or list with response data ."""
|
function_code = pdu_to_function_code_or_raise_error ( resp_pdu )
function = function_code_to_function_map [ function_code ]
if req_pdu is not None and 'req_pdu' in inspect . getargspec ( function . create_from_response_pdu ) . args : # NOQA
return function . create_from_response_pdu ( resp_pdu , req_pdu )
return function . create_from_response_pdu ( resp_pdu )
|
def get_storage_key ( self , * args ) :
"""Return the redis key where to store the index for the given " value " ( ` args ` )
For this index , we store all PKs having for a field in the same sorted - set .
Key has this form :
model - name : field - name : sub - field - name : index - key - name
The ' : sub - field - name part ' is repeated for each entry in * args that is not the final value
Parameters
args : tuple
All the " values " to take into account to get the storage key . The last entry ,
the final value , is not used .
Returns
str
The redis key to use"""
|
args = list ( args )
args . pop ( )
# final value , not needed for the storage key
parts = [ self . model . _name , self . field . name , ] + args
if self . prefix :
parts . append ( self . prefix )
if self . key :
parts . append ( self . key )
return self . field . make_key ( * parts )
|
def normalize_array ( value ) :
"""5.1.1.4 - c . 5:
Arrays shall be rendered by :
1 . generating the normalized values of its elements ,
2 . joining these elements with single underscores ( _ ) into a single
character sequence , and
3 . using the first eight characters of the md5 checksum of this
character sequence
. . . which can be semi - formalized as :
subsequence ( md5 ( join ( normalized _ values , ' _ ' ) , 0 , 8 )"""
|
norm_elements = [ ]
for element in value :
norm_elements . append ( normalize ( element ) )
norm_str = "_" . join ( norm_elements )
md5 = hashlib . md5 ( norm_str . encode ( 'utf-8' ) ) . hexdigest ( )
return md5 [ : 8 ]
|
def remove_year ( name ) :
"""Removes year from input
: param name : path to edit
: return : inputs with no years"""
|
for i in range ( len ( name ) - 3 ) : # last index is length - 3 - 1 = length - 4
if name [ i : i + 4 ] . isdigit ( ) :
name = name [ : i ] + name [ i + 4 : ]
return remove_year ( name )
# if there is a removal , start again
return name
|
def set_const ( const , val ) :
'''Convenience wrapper to reliably set the value of a constant from
outside of package scope'''
|
try :
cur = getattr ( _c , const )
except AttributeError :
raise FSQEnvError ( errno . ENOENT , u'no such constant:' u' {0}' . format ( const ) )
except TypeError :
raise TypeError ( errno . EINVAL , u'const name must be a string or' u' unicode object, not:' u' {0}' . format ( const . __class__ . __name__ ) )
should_be = cur . __class__
try :
if not isinstance ( val , should_be ) :
if should_be is unicode or cur is None :
val = coerce_unicode ( val , _c . FSQ_CHARSET )
elif should_be is int and const . endswith ( 'MODE' ) :
val = int ( val , 8 )
elif isinstance ( cur , numbers . Integral ) :
val = int ( val )
else :
should_be ( val )
except ( TypeError , ValueError , ) :
raise FSQEnvError ( errno . EINVAL , u'invalid type for constant {0},' u' should be {1}, not:' u' {2}' . format ( const , should_be . __name__ , val . __class__ . __name__ ) )
setattr ( _c , const , val )
return val
|
def read_dist_egginfo_json ( dist , filename = DEFAULT_JSON ) :
"""Safely get a json within an egginfo from a distribution ."""
|
# use the given package ' s distribution to acquire the json file .
if not dist . has_metadata ( filename ) :
logger . debug ( "no '%s' for '%s'" , filename , dist )
return
try :
result = dist . get_metadata ( filename )
except IOError :
logger . error ( "I/O error on reading of '%s' for '%s'." , filename , dist )
return
try :
obj = json . loads ( result )
except ( TypeError , ValueError ) :
logger . error ( "the '%s' found in '%s' is not a valid json." , filename , dist )
return
logger . debug ( "found '%s' for '%s'." , filename , dist )
return obj
|
def calc_qigz2_v1 ( self ) :
"""Aggregate the amount of the second interflow component released
by all HRUs .
Required control parameters :
| NHRU |
| FHRU |
Required flux sequence :
| QIB2 |
Calculated state sequence :
| QIGZ2 |
Basic equation :
: math : ` QIGZ2 = \\ Sigma ( FHRU \\ cdot QIB2 ) `
Example :
> > > from hydpy . models . lland import *
> > > parameterstep ( )
> > > nhru ( 2)
> > > fhru ( 0.75 , 0.25)
> > > fluxes . qib2 = 1.0 , 5.0
> > > model . calc _ qigz2 _ v1 ( )
> > > states . qigz2
qigz2(2.0)"""
|
con = self . parameters . control . fastaccess
flu = self . sequences . fluxes . fastaccess
sta = self . sequences . states . fastaccess
sta . qigz2 = 0.
for k in range ( con . nhru ) :
sta . qigz2 += con . fhru [ k ] * flu . qib2 [ k ]
|
def _filter_satisfied ( self , update_setd = False ) :
"""This method extracts a model provided by the previous call to a SAT
oracle and iterates over all soft clauses checking if each of is
satisfied by the model . Satisfied clauses are marked accordingly
while the literals of the unsatisfied clauses are kept in a list
called ` ` setd ` ` , which is then used to refine the correction set
( see : func : ` _ compute ` , and : func : ` do _ cld _ check ` ) .
Optional Boolean parameter ` ` update _ setd ` ` enforces the method to
update variable ` ` self . setd ` ` . If this parameter is set to
` ` False ` ` , the method only updates the list of satisfied clauses ,
which is an under - approximation of a * maximal satisfiable subset *
( MSS ) .
: param update _ setd : whether or not to update setd
: type update _ setd : bool"""
|
model = self . oracle . get_model ( )
setd = set ( )
for i , cl in enumerate ( self . soft ) :
if not self . satc [ i ] :
if self . _satisfied ( cl , model ) :
self . satc [ i ] = True
self . ss_assumps . append ( self . sels [ i ] )
else :
setd = setd . union ( set ( cl ) )
if update_setd :
self . setd = list ( setd )
|
def make_hashcode ( uuid , filepath , file_event ) :
"""Generate a SHA1 based on the given arguments .
: param uuid : perceval uuid of the item
: param filepath : path of the corresponding file
: param file _ event : commit file event
: returns : a SHA1 hash code"""
|
content = ':' . join ( [ uuid , filepath , file_event ] )
hashcode = hashlib . sha1 ( content . encode ( 'utf-8' ) )
return hashcode . hexdigest ( )
|
def get_fields_from_model ( self , model , fields ) :
"""Iterate over given < field > names ( in " orm query " notation ) and find
the actual field given the initial < model > .
If < field > is a tuple of the format ( ' field _ name ' , ' Verbose name ' ) ,
overwrite the field ' s verbose name with the given name for display
purposes ."""
|
model_fields = { }
for field in fields :
if isinstance ( field , tuple ) and len ( field ) == 2 :
field , verbose_name = field [ 0 ] , field [ 1 ]
else :
try :
model_field = get_fields_from_path ( model , field ) [ - 1 ]
verbose_name = model_field . verbose_name
except ( FieldDoesNotExist , IndexError , TypeError ) as e :
logger . warn ( "AdvancedFilterForm: skip invalid field " "- %s" , e )
continue
model_fields [ field ] = verbose_name
return model_fields
|
def pivot ( self , speed , durationS = - 1.0 ) :
"""pivot ( ) controls the pivot speed of the RedBot . The values of the pivot function inputs
range from - 255:255 , with - 255 indicating a full speed counter - clockwise rotation .
255 indicates a full speed clockwise rotation"""
|
if speed < 0 :
self . left_fwd ( min ( abs ( speed ) , 255 ) )
self . right_rev ( min ( abs ( speed ) , 255 ) )
else :
self . left_rev ( min ( abs ( speed ) , 255 ) )
self . right_fwd ( min ( abs ( speed ) , 255 ) )
if durationS > 0 :
self . board . sleep ( durationS )
self . left_stop ( )
self . right_stop ( )
|
def get_orientation_degrees ( self ) :
"""Returns a dictionary object to represent the current orientation
in degrees , 0 to 360 , using the aircraft principal axes of
pitch , roll and yaw"""
|
orientation = self . get_orientation_radians ( )
for key , val in orientation . items ( ) :
deg = math . degrees ( val )
# Result is - 180 to + 180
orientation [ key ] = deg + 360 if deg < 0 else deg
return orientation
|
def _set_port_channel ( self , v , load = False ) :
"""Setter method for port _ channel , mapped from YANG variable / interface / port _ channel ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ port _ channel is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ port _ channel ( ) directly .
YANG Description : The list of port - channels in the managed device . Each
entry represents a port - channel ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name" , port_channel . port_channel , yang_name = "port-channel" , rest_name = "Port-channel" , parent = self , is_container = 'list' , user_ordered = True , path_helper = self . _path_helper , yang_keys = 'name' , extensions = { u'tailf-common' : { u'info' : u'The list of port-channels.' , u'cli-no-key-completion' : None , u'alt-name' : u'Port-channel' , u'sort-priority' : u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL' , u'cli-suppress-show-path' : None , u'cli-suppress-list-no' : None , u'cli-custom-range-actionpoint' : u'NsmRangeCliActionpoint' , u'cli-custom-range-enumerator' : u'NsmRangeCliActionpoint' , u'cli-suppress-key-abbreviation' : None , u'cli-no-match-completion' : None , u'cli-full-command' : None , u'callpoint' : u'interface_po' } } ) , is_container = 'list' , yang_name = "port-channel" , rest_name = "Port-channel" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'The list of port-channels.' , u'cli-no-key-completion' : None , u'alt-name' : u'Port-channel' , u'sort-priority' : u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL' , u'cli-suppress-show-path' : None , u'cli-suppress-list-no' : None , u'cli-custom-range-actionpoint' : u'NsmRangeCliActionpoint' , u'cli-custom-range-enumerator' : u'NsmRangeCliActionpoint' , u'cli-suppress-key-abbreviation' : None , u'cli-no-match-completion' : None , u'cli-full-command' : None , u'callpoint' : u'interface_po' } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """port_channel must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name",port_channel.port_channel, yang_name="port-channel", rest_name="Port-channel", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of port-channels.', u'cli-no-key-completion': None, u'alt-name': u'Port-channel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_po'}}), is_container='list', yang_name="port-channel", rest_name="Port-channel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of port-channels.', u'cli-no-key-completion': None, u'alt-name': u'Port-channel', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PORT_CHANNEL', u'cli-suppress-show-path': None, u'cli-suppress-list-no': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_po'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""" , } )
self . __port_channel = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_requests_session ( ) :
"""Set connection pool maxsize and block value to avoid ` connection pool full ` warnings .
: return : requests session"""
|
session = requests . sessions . Session ( )
session . mount ( 'http://' , HTTPAdapter ( pool_connections = 25 , pool_maxsize = 25 , pool_block = True ) )
session . mount ( 'https://' , HTTPAdapter ( pool_connections = 25 , pool_maxsize = 25 , pool_block = True ) )
return session
|
def fburl ( parser , token ) :
"""Returns an absolute URL matching given view with its parameters .
This is a way to define links that aren ' t tied to a particular URL
configuration : :
{ % url path . to . some _ view arg1 , arg2 , name1 = value1 % }
The first argument is a path to a view . It can be an absolute python path
or just ` ` app _ name . view _ name ` ` without the project name if the view is
located inside the project . Other arguments are comma - separated values
that will be filled in place of positional and keyword arguments in the
URL . All arguments for the URL should be present .
For example if you have a view ` ` app _ name . client ` ` taking client ' s id and
the corresponding line in a URLconf looks like this : :
( ' ^ client / ( \ d + ) / $ ' , ' app _ name . client ' )
and this app ' s URLconf is included into the project ' s URLconf under some
path : :
( ' ^ clients / ' , include ( ' project _ name . app _ name . urls ' ) )
then in a template you can create a link for a certain client like this : :
{ % url app _ name . client client . id % }
The URL will look like ` ` / clients / client / 123 / ` ` ."""
|
bits = token . contents . split ( ' ' )
if len ( bits ) < 2 :
raise template . TemplateSyntaxError ( "'%s' takes at least one argument" " (path to a view)" % bits [ 0 ] )
viewname = bits [ 1 ]
args = [ ]
kwargs = { }
asvar = None
if len ( bits ) > 2 :
bits = iter ( bits [ 2 : ] )
for bit in bits :
if bit == 'as' :
asvar = bits . next ( )
break
else :
for arg in bit . split ( "," ) :
if '=' in arg :
k , v = arg . split ( '=' , 1 )
k = k . strip ( )
kwargs [ k ] = parser . compile_filter ( v )
elif arg :
args . append ( parser . compile_filter ( arg ) )
return URLNode ( viewname , args , kwargs , asvar )
|
def get_default_value ( self ) :
"""Return the default value for the parameter . If here is no default value , return None"""
|
if ( 'default_value' in self . attributes and bool ( self . attributes [ 'default_value' ] . strip ( ) ) ) :
return self . attributes [ 'default_value' ]
else :
return None
|
def are_in_interval ( s , l , r , border = 'included' ) :
"""Checks whether all number in the sequence s lie inside the interval formed by
l and r ."""
|
return numpy . all ( [ IntensityRangeStandardization . is_in_interval ( x , l , r , border ) for x in s ] )
|
def run ( configObj , wcsmap = None ) :
"""Run the blot task based on parameters provided interactively by the user ."""
|
# Insure all output filenames specified have . fits extensions
if configObj [ 'outdata' ] [ - 5 : ] != '.fits' :
configObj [ 'outdata' ] += '.fits'
scale_pars = configObj [ 'Data Scaling Parameters' ]
user_wcs_pars = configObj [ 'User WCS Parameters' ]
# PyFITS can be used here as it will always operate on
# output from PyDrizzle ( which will always be a FITS file )
# Open the input ( drizzled ? ) image
_fname , _sciextn = fileutil . parseFilename ( configObj [ 'data' ] )
_inimg = fileutil . openImage ( _fname , memmap = False )
_expin = fileutil . getKeyword ( configObj [ 'data' ] , scale_pars [ 'expkey' ] , handle = _inimg )
# Return the PyFITS HDU corresponding to the named extension
_scihdu = fileutil . getExtn ( _inimg , _sciextn )
_insci = _scihdu . data . copy ( )
_inexptime = 1.0
if scale_pars [ 'in_units' ] == 'counts' :
if scale_pars [ 'expkey' ] in _inimg [ 'PRIMARY' ] . header :
_inexptime = _inimg [ 'PRIMARY' ] . header [ scale_pars [ 'expkey' ] ]
elif 'DRIZEXPT' in _inimg [ 'PRIMARY' ] . header : # Try keyword written out by new ' drizzle ' if no valid ' expkey ' was given
_inexptime = _inimg [ 'PRIMARY' ] . header [ 'DRIZEXPT' ]
else :
raise ValueError ( 'No valid exposure time keyword could be found ' 'for input %s' % configObj [ 'data' ] )
# always convert input to ' cps ' for blot ( ) algorithm
if _inexptime != 0.0 or _inexptime != 1.0 :
np . divide ( _insci , _inexptime , _insci )
_inimg . close ( )
del _inimg
# read in WCS from source ( drizzled ) image
source_wcs = stwcs . wcsutil . HSTWCS ( configObj [ 'data' ] )
if source_wcs . wcs . is_unity ( ) :
print ( "WARNING: No valid WCS found for input drizzled image: {}!" . format ( configObj [ 'data' ] ) )
# define blot _ wcs
blot_wcs = None
_refname , _refextn = fileutil . parseFilename ( configObj [ 'reference' ] )
if os . path . exists ( _refname ) : # read in WCS from pre - existing output image
blot_wcs = stwcs . wcsutil . HSTWCS ( configObj [ 'reference' ] )
if blot_wcs . wcs . is_unity ( ) :
print ( "WARNING: No valid WCS found for output image: {} !" . format ( configObj [ 'reference' ] ) )
# define blot WCS based on input images or specified reference WCS values
if user_wcs_pars [ 'user_wcs' ] :
blot_wcs = wcs_functions . build_hstwcs ( user_wcs_pars [ 'raref' ] , user_wcs_pars [ 'decref' ] , user_wcs_pars [ 'xrefpix' ] , user_wcs_pars [ 'yrefpix' ] , user_wcs_pars [ 'outnx' ] , user_wcs_pars [ 'outny' ] , user_wcs_pars [ 'outscale' ] , user_wcs_pars [ 'orient' ] )
configObj [ 'coeffs' ] = None
# If blot _ wcs is still not defined at this point , we have a problem . . .
if blot_wcs is None :
blot_wcs = stwcs . distortion . utils . output_wcs ( [ source_wcs ] , undistort = False )
out_wcs = blot_wcs . copy ( )
# perform blotting operation now
_outsci = do_blot ( _insci , source_wcs , out_wcs , _expin , coeffs = configObj [ 'coeffs' ] , interp = configObj [ 'interpol' ] , sinscl = configObj [ 'sinscl' ] , stepsize = configObj [ 'stepsize' ] , wcsmap = wcsmap )
# create output with proper units and exptime - scaling
if scale_pars [ 'out_units' ] == 'counts' :
if scale_pars [ 'expout' ] == 'input' :
_outscale = fileutil . getKeyword ( configObj [ 'reference' ] , scale_pars [ 'expkey' ] )
# _ outscale = _ expin
else :
_outscale = float ( scale_pars [ 'expout' ] )
print ( "Output blotted images scaled by exptime of {}" . format ( _outscale ) )
np . multiply ( _outsci , _outscale , _outsci )
# Add sky back in to the blotted image , as specified by the user
if configObj [ 'addsky' ] :
skyval = _scihdu . header [ 'MDRIZSKY' ]
else :
skyval = configObj [ 'skyval' ]
print ( "Added {} counts back in to blotted image as sky." . format ( skyval ) )
_outsci += skyval
del _scihdu
# Write output Numpy objects to a PyFITS file
# Blotting only occurs from a drizzled SCI extension
# to a blotted SCI extension . . .
outputimage . writeSingleFITS ( _outsci , blot_wcs , configObj [ 'outdata' ] , configObj [ 'reference' ] )
|
def move_to ( x , y ) :
"""Moves the brush to a particular position .
Arguments :
x - a number between - 250 and 250.
y - a number between - 180 and 180."""
|
_make_cnc_request ( "coord/{0}/{1}" . format ( x , y ) )
state [ 'turtle' ] . goto ( x , y )
|
def make_logger ( self , level = "INFO" ) :
"""Convenience function which creates a logger for the module .
INPUTS :
level ( default = " INFO " ) : Minimum log level for logged / streamed messages .
OUTPUTS :
logger Logger for the function . NOTE : Must be bound to variable named logger ."""
|
level = getattr ( logging , level . upper ( ) )
logger = logging . getLogger ( __name__ )
logger . setLevel ( logging . DEBUG )
fh = logging . FileHandler ( 'PyMORESANE.log' , mode = 'w' )
fh . setLevel ( level )
ch = logging . StreamHandler ( )
ch . setLevel ( level )
formatter = logging . Formatter ( '%(asctime)s [%(levelname)s]: %(' 'message)s' , datefmt = '[%m/%d/%Y] [%I:%M:%S]' )
fh . setFormatter ( formatter )
ch . setFormatter ( formatter )
logger . addHandler ( fh )
logger . addHandler ( ch )
return logger
|
def to_netcdf ( self , filename , compress = True ) :
"""Write InferenceData to file using netcdf4.
Parameters
filename : str
Location to write to
compress : bool
Whether to compress result . Note this saves disk space , but may make
saving and loading somewhat slower ( default : True ) .
Returns
str
Location of netcdf file"""
|
mode = "w"
# overwrite first , then append
if self . _groups : # check ' s whether a group is present or not .
for group in self . _groups :
data = getattr ( self , group )
kwargs = { }
if compress :
kwargs [ "encoding" ] = { var_name : { "zlib" : True } for var_name in data . variables }
data . to_netcdf ( filename , mode = mode , group = group , ** kwargs )
data . close ( )
mode = "a"
else : # creates a netcdf file for an empty InferenceData object .
empty_netcdf_file = nc . Dataset ( filename , mode = "w" , format = "NETCDF4" )
empty_netcdf_file . close ( )
return filename
|
def compute_csets_dTRAM ( connectivity , count_matrices , nn = None , callback = None ) :
r"""Computes the largest connected sets for dTRAM data .
Parameters
connectivity : string
one ' reversible _ pathways ' , ' neighbors ' , ' summed _ count _ matrix ' or None .
Selects the algorithm for measuring overlap between thermodynamic
and Markov states .
* ' reversible _ pathways ' : requires that every state in the connected set
can be reached by following a pathway of reversible transitions . A
reversible transition between two Markov states ( within the same
thermodynamic state k ) is a pair of Markov states that belong to the
same strongly connected component of the count matrix ( from
thermodynamic state k ) . A pathway of reversible transitions is a list of
reversible transitions [ ( i _ 1 , i _ 2 ) , ( i _ 2 , i _ 3 ) , . . . , ( i _ ( N - 2 ) , i _ ( N - 1 ) ) ,
( i _ ( N - 1 ) , i _ N ) ] . The thermodynamic state where the reversible
transitions happen , is ignored in constructing the reversible pathways .
This is equivalent to assuming that two ensembles overlap at some Markov
state whenever there exist frames from both ensembles in that Markov
state .
* ' largest ' : alias for reversible _ pathways
* ' neighbors ' : similar to ' reversible _ pathways ' but with a more strict
requirement for the overlap between thermodynamic states . It is required
that every state in the connected set can be reached by following a
pathway of reversible transitions or jumping between overlapping
thermodynamic states while staying in the same Markov state . A reversible
transition between two Markov states ( within the same thermodynamic
state k ) is a pair of Markov states that belong to the same strongly
connected component of the count matrix ( from thermodynamic state k ) .
It is assumed that the data comes from an Umbrella sampling simulation
and the number of the thermodynamic state matches the position of the
Umbrella along the order parameter . The overlap of thermodynamic states
k and l within Markov state n is set according to the value of nn ; if
there are samples in both product - space states ( k , n ) and ( l , n ) and
| l - n | < = nn , the states are overlapping .
* ' summed _ count _ matrix ' : all thermodynamic states are assumed to overlap .
The connected set is then computed by summing the count matrices over
all thermodynamic states and taking it ' s largest strongly connected set .
Not recommended !
* None : assume that everything is connected . For debugging .
count _ matrices : numpy . ndarray ( ( T , M , M ) )
Count matrices for all T thermodynamic states .
nn : int or None , optional
Number of neighbors that are assumed to overlap when
connectivity = ' neighbors '
Returns
csets , projected _ cset
csets : list of numpy . ndarray ( ( M _ prime _ k , ) , dtype = int )
List indexed by thermodynamic state . Every element csets [ k ] is
the largest connected set at thermodynamic state k .
projected _ cset : numpy . ndarray ( M _ prime , dtype = int )
The overall connected set . This is the union of the individual
connected sets of the thermodynamic states ."""
|
if connectivity == 'post_hoc_RE' or connectivity == 'BAR_variance' :
raise Exception ( 'Connectivity type %s not supported for dTRAM data.' % connectivity )
state_counts = _np . maximum ( count_matrices . sum ( axis = 1 ) , count_matrices . sum ( axis = 2 ) )
return _compute_csets ( connectivity , state_counts , count_matrices , None , None , None , nn = nn , callback = callback )
|
def tracked_array ( array , dtype = None ) :
"""Properly subclass a numpy ndarray to track changes .
Avoids some pitfalls of subclassing by forcing contiguous
arrays , and does a view into a TrackedArray .
Parameters
array : array - like object
To be turned into a TrackedArray
dtype : np . dtype
Which dtype to use for the array
Returns
tracked : TrackedArray
Contains input array data"""
|
# if someone passed us None , just create an empty array
if array is None :
array = [ ]
# make sure it is contiguous then view it as our subclass
tracked = np . ascontiguousarray ( array , dtype = dtype ) . view ( TrackedArray )
# should always be contiguous here
assert tracked . flags [ 'C_CONTIGUOUS' ]
return tracked
|
def glob_wildcards ( pattern : str , files : Optional [ List [ str ] ] = None ) -> Dict [ str , Union [ List [ Any ] , List [ str ] ] ] :
"""Glob the values of the wildcards by matching the given pattern to the filesystem .
Returns a named tuple with a list of values for each wildcard ."""
|
pattern = os . path . normpath ( pattern )
if sys . platform == 'win32' : # we perform path matching with / slash only
pattern = pattern . replace ( '\\' , '/' )
first_wildcard = re . search ( "{[^{]" , pattern )
dirname = os . path . dirname ( pattern [ : first_wildcard . start ( ) ] ) if first_wildcard else os . path . dirname ( pattern )
if not dirname :
dirname = "."
names = [ match . group ( 'name' ) for match in SOS_WILDCARD . finditer ( pattern ) ]
res = { x : [ ] for x in names }
pattern = re . compile ( regex ( pattern ) )
if files is None :
files = ( ( os . path . join ( dirpath , f ) if dirpath != "." else f ) for dirpath , dirnames , filenames in os . walk ( dirname ) for f in chain ( filenames , dirnames ) )
for f in files : # we perform path matching with only / slash
match = re . match ( pattern , str ( f ) . replace ( '\\' , '/' ) )
if match :
for name , value in match . groupdict ( ) . items ( ) :
res [ name ] . append ( value )
return res
|
def _recv_robust ( self , sock , size ) :
"""Receive size from sock , and retry if the recv ( ) call was interrupted .
( this is only required for python2 compatability )"""
|
while True :
try :
return sock . recv ( size )
except socket . error as e :
if e . errno != errno . EINTR :
raise
|
def get_register_context ( self ) :
"""return hexadecimal dump of registers as expected by GDB"""
|
logging . debug ( "GDB getting register context" )
resp = b''
reg_num_list = [ reg . reg_num for reg in self . _register_list ]
vals = self . _context . read_core_registers_raw ( reg_num_list )
# print ( " Vals : % s " % vals )
for reg , regValue in zip ( self . _register_list , vals ) :
if reg . bitsize == 64 :
resp += six . b ( conversion . u64_to_hex16le ( regValue ) )
else :
resp += six . b ( conversion . u32_to_hex8le ( regValue ) )
logging . debug ( "GDB reg: %s = 0x%X" , reg . name , regValue )
return resp
|
def drop_message ( self , message ) :
"""Drop the given message object from the appropriate message list .
Returns True if the message was found , otherwise False ."""
|
for type_ in 'errors' , 'warnings' , 'notices' :
list_ = getattr ( self , type_ )
if message in list_ :
list_ . remove ( message )
if 'signing_severity' in message :
self . signing_summary [ message [ 'signing_severity' ] ] -= 1
return True
return False
|
def get_xml_root ( xml_path ) :
"""Load and parse an xml by given xml _ path and return its root .
: param xml _ path : URL to a xml file
: type xml _ path : str
: return : xml root"""
|
r = requests . get ( xml_path )
root = ET . fromstring ( r . content )
return root
|
def wcs_to_coords ( w , shape ) :
"""Generate an N x D list of pixel center coordinates where N is
the number of pixels and D is the dimensionality of the map ."""
|
if w . naxis == 2 :
y , x = wcs_to_axes ( w , shape )
elif w . naxis == 3 :
z , y , x = wcs_to_axes ( w , shape )
else :
raise Exception ( "Wrong number of WCS axes %i" % w . naxis )
x = 0.5 * ( x [ 1 : ] + x [ : - 1 ] )
y = 0.5 * ( y [ 1 : ] + y [ : - 1 ] )
if w . naxis == 2 :
x = np . ravel ( np . ones ( shape ) * x [ : , np . newaxis ] )
y = np . ravel ( np . ones ( shape ) * y [ np . newaxis , : ] )
return np . vstack ( ( x , y ) )
z = 0.5 * ( z [ 1 : ] + z [ : - 1 ] )
x = np . ravel ( np . ones ( shape ) * x [ : , np . newaxis , np . newaxis ] )
y = np . ravel ( np . ones ( shape ) * y [ np . newaxis , : , np . newaxis ] )
z = np . ravel ( np . ones ( shape ) * z [ np . newaxis , np . newaxis , : ] )
return np . vstack ( ( x , y , z ) )
|
def build_catalog ( site , datasets , format = None ) :
'''Build the DCAT catalog for this site'''
|
site_url = url_for ( 'site.home_redirect' , _external = True )
catalog_url = url_for ( 'site.rdf_catalog' , _external = True )
graph = Graph ( namespace_manager = namespace_manager )
catalog = graph . resource ( URIRef ( catalog_url ) )
catalog . set ( RDF . type , DCAT . Catalog )
catalog . set ( DCT . title , Literal ( site . title ) )
catalog . set ( DCT . language , Literal ( current_app . config [ 'DEFAULT_LANGUAGE' ] ) )
catalog . set ( FOAF . homepage , URIRef ( site_url ) )
publisher = graph . resource ( BNode ( ) )
publisher . set ( RDF . type , FOAF . Organization )
publisher . set ( FOAF . name , Literal ( current_app . config [ 'SITE_AUTHOR' ] ) )
catalog . set ( DCT . publisher , publisher )
for dataset in datasets :
catalog . add ( DCAT . dataset , dataset_to_rdf ( dataset , graph ) )
if isinstance ( datasets , Paginable ) :
if not format :
raise ValueError ( 'Pagination requires format' )
catalog . add ( RDF . type , HYDRA . Collection )
catalog . set ( HYDRA . totalItems , Literal ( datasets . total ) )
kwargs = { 'format' : format , 'page_size' : datasets . page_size , '_external' : True , }
first_url = url_for ( 'site.rdf_catalog_format' , page = 1 , ** kwargs )
page_url = url_for ( 'site.rdf_catalog_format' , page = datasets . page , ** kwargs )
last_url = url_for ( 'site.rdf_catalog_format' , page = datasets . pages , ** kwargs )
pagination = graph . resource ( URIRef ( page_url ) )
pagination . set ( RDF . type , HYDRA . PartialCollectionView )
pagination . set ( HYDRA . first , URIRef ( first_url ) )
pagination . set ( HYDRA . last , URIRef ( last_url ) )
if datasets . has_next :
next_url = url_for ( 'site.rdf_catalog_format' , page = datasets . page + 1 , ** kwargs )
pagination . set ( HYDRA . next , URIRef ( next_url ) )
if datasets . has_prev :
prev_url = url_for ( 'site.rdf_catalog_format' , page = datasets . page - 1 , ** kwargs )
pagination . set ( HYDRA . previous , URIRef ( prev_url ) )
catalog . set ( HYDRA . view , pagination )
return catalog
|
def can_import ( self , file_uris , current_doc = None ) :
"""Check that the specified file looks like an image supported by PIL"""
|
if len ( file_uris ) <= 0 :
return False
for file_uri in file_uris :
file_uri = self . fs . safe ( file_uri )
if not self . check_file_type ( file_uri ) :
return False
return True
|
def add_instruction ( self , target , data ) :
"""Add an instruction node to this element .
: param string text : text content to add as an instruction ."""
|
self . _add_instruction ( self . impl_node , target , data )
|
def com_google_fonts_check_name_description_max_length ( ttFont ) :
"""Description strings in the name table must not exceed 200 characters ."""
|
failed = False
for name in ttFont [ 'name' ] . names :
if ( name . nameID == NameID . DESCRIPTION and len ( name . string . decode ( name . getEncoding ( ) ) ) > 200 ) :
failed = True
break
if failed :
yield WARN , ( "A few name table entries with ID={} (NameID.DESCRIPTION)" " are longer than 200 characters." " Please check whether those entries are copyright notices" " mistakenly stored in the description string entries by" " a bug in an old FontLab version." " If that's the case, then such copyright notices must be" " removed from these entries." "" ) . format ( NameID . DESCRIPTION )
else :
yield PASS , "All description name records have reasonably small lengths."
|
def read_sbml ( filename ) :
"""Read the model from a SBML file .
: param filename : SBML filename to read the model from
: return : A tuple , consisting of : class : ` ~ means . core . model . Model ` instance ,
set of parameter values , and set of initial conditions variables ."""
|
import libsbml
if not os . path . exists ( filename ) :
raise IOError ( 'File {0!r} does not exist' . format ( filename ) )
reader = libsbml . SBMLReader ( )
document = reader . readSBML ( filename )
sbml_model = document . getModel ( )
if not sbml_model :
raise ValueError ( 'Cannot parse SBML model from {0!r}' . format ( filename ) )
species = sympy . symbols ( [ s . getId ( ) for s in sbml_model . getListOfSpecies ( ) ] )
initial_conditions = [ s . getInitialConcentration ( ) for s in sbml_model . getListOfSpecies ( ) ]
compartments = sympy . symbols ( [ s . getId ( ) for s in sbml_model . getListOfCompartments ( ) ] )
compartment_sizes = [ s . getSize ( ) for s in sbml_model . getListOfCompartments ( ) ]
reactions = map ( _parse_reaction , sbml_model . getListOfReactions ( ) )
# getListOfParameters is an attribute of the model for SBML Level 1&2
parameters_with_values = [ ( sympy . Symbol ( p . getId ( ) ) , p . getValue ( ) ) for p in sbml_model . getListOfParameters ( ) ]
parameter_values = dict ( parameters_with_values )
parameters = map ( lambda x : x [ 0 ] , parameters_with_values )
if not parameters :
track_local_parameters = True
parameters = set ( )
parameter_values = { }
else :
track_local_parameters = False
stoichiometry_matrix = np . zeros ( ( len ( species ) , len ( reactions ) ) , dtype = int )
propensities = [ ]
for reaction_index , reaction in enumerate ( reactions ) :
if track_local_parameters :
for param , value in reaction . parameters :
parameters . add ( param )
parameter_values [ param ] = value
reactants = reaction . reactants
products = reaction . products
propensities . append ( reaction . propensity )
for species_index , species_id in enumerate ( species ) :
net_stoichiometry = products . get ( species_id , 0 ) - reactants . get ( species_id , 0 )
stoichiometry_matrix [ species_index , reaction_index ] = net_stoichiometry
if track_local_parameters : # sympy does not allow sorting its parameter lists by default ,
# explicitly tell to sort by str representation
sorted_parameters = sorted ( parameters , key = str )
else :
sorted_parameters = parameters
parameter_values_list = [ parameter_values [ p ] for p in sorted_parameters ]
# We need to concatenate compartment names and parameters as in our framework we cannot differentiate the two
compartments_and_parameters = compartments + sorted_parameters
parameter_values_list = compartment_sizes + parameter_values_list
model = Model ( species , compartments_and_parameters , propensities , stoichiometry_matrix )
return model , parameter_values_list , initial_conditions
|
def _split ( rule ) :
"""Splits a rule whose len ( rhs ) > 2 into shorter rules ."""
|
rule_str = str ( rule . lhs ) + '__' + '_' . join ( str ( x ) for x in rule . rhs )
rule_name = '__SP_%s' % ( rule_str ) + '_%d'
yield Rule ( rule . lhs , [ rule . rhs [ 0 ] , NT ( rule_name % 1 ) ] , weight = rule . weight , alias = rule . alias )
for i in xrange ( 1 , len ( rule . rhs ) - 2 ) :
yield Rule ( NT ( rule_name % i ) , [ rule . rhs [ i ] , NT ( rule_name % ( i + 1 ) ) ] , weight = 0 , alias = 'Split' )
yield Rule ( NT ( rule_name % ( len ( rule . rhs ) - 2 ) ) , rule . rhs [ - 2 : ] , weight = 0 , alias = 'Split' )
|
def goBack ( self ) :
"""Goes up one level if possible and returns the url at the current level .
If it cannot go up , then a blank string will be returned .
: return < str >"""
|
if not self . canGoBack ( ) :
return ''
self . _blockStack = True
self . _index -= 1
self . emitCurrentChanged ( )
self . _blockStack = False
return self . currentUrl ( )
|
def delete_dcnm_out_part ( self , tenant_id , fw_dict , is_fw_virt = False ) :
"""Delete the DCNM OUT partition and update the result ."""
|
res = fw_const . DCNM_OUT_PART_DEL_SUCCESS
tenant_name = fw_dict . get ( 'tenant_name' )
ret = True
try :
self . _delete_partition ( tenant_id , tenant_name )
except Exception as exc :
LOG . error ( "deletion of Out Partition failed for tenant " "%(tenant)s, Exception %(exc)s" , { 'tenant' : tenant_id , 'exc' : str ( exc ) } )
res = fw_const . DCNM_OUT_PART_DEL_FAIL
ret = False
self . update_fw_db_result ( tenant_id , dcnm_status = res )
LOG . info ( "Out partition deleted" )
return ret
|
def _ack ( self , sender , uid , level , payload = None ) :
"""Replies to a message"""
|
content = { 'reply-to' : uid , 'reply-level' : level , 'payload' : payload }
self . __client . send_message ( sender , json . dumps ( content ) )
|
def DbPutAttributeAlias ( self , argin ) :
"""Define an alias for an attribute
: param argin : Str [ 0 ] = attribute name
Str [ 1 ] = attribute alias
: type : tango . DevVarStringArray
: return :
: rtype : tango . DevVoid"""
|
self . _log . debug ( "In DbPutAttributeAlias()" )
if len ( argin ) < 2 :
self . warn_stream ( "DataBase::DbPutAttributeAlias(): insufficient number of arguments " )
th_exc ( DB_IncorrectArguments , "insufficient number of arguments to put attribute alias" , "DataBase::DbPutAttributeAlias()" )
attribute_name = argin [ 0 ]
attribute_alias = argin [ 1 ]
self . db . put_attribute_alias ( attribute_name , attribute_alias )
|
def visitEqualityExpression ( self , ctx ) :
"""expression : expression ( EQ | NEQ ) expression"""
|
arg1 , arg2 = conversions . to_same ( self . visit ( ctx . expression ( 0 ) ) , self . visit ( ctx . expression ( 1 ) ) , self . _eval_context )
if isinstance ( arg1 , str ) : # string equality is case - insensitive
equal = arg1 . lower ( ) == arg2 . lower ( )
else :
equal = arg1 == arg2
return equal if ctx . EQ ( ) is not None else not equal
|
def _node_type ( st ) :
"""return a string indicating the type of special node represented by
the stat buffer st ( block , character , fifo , socket ) ."""
|
_types = [ ( stat . S_ISBLK , "block device" ) , ( stat . S_ISCHR , "character device" ) , ( stat . S_ISFIFO , "named pipe" ) , ( stat . S_ISSOCK , "socket" ) ]
for t in _types :
if t [ 0 ] ( st . st_mode ) :
return t [ 1 ]
|
def is_dir ( self , follow_symlinks = True ) :
"""Return True if this entry is a directory or a symbolic link pointing to
a directory ; return False if the entry is or points to any other kind
of file , or if it doesn ’ t exist anymore .
The result is cached on the os . DirEntry object .
Args :
follow _ symlinks ( bool ) : Follow symlinks .
Not supported on cloud storage objects .
Returns :
bool : True if directory exists ."""
|
try :
return ( self . _system . isdir ( path = self . _path , client_kwargs = self . _client_kwargs , virtual_dir = False ) or # Some directories only exists virtually in object path and
# don ' t have headers .
bool ( S_ISDIR ( self . stat ( ) . st_mode ) ) )
except ObjectPermissionError : # The directory was listed , but unable to head it or access to its
# content
return True
|
def version ( self ) :
'''adb version'''
|
match = re . search ( r"(\d+)\.(\d+)\.(\d+)" , self . raw_cmd ( "version" ) . communicate ( ) [ 0 ] . decode ( "utf-8" ) )
return [ match . group ( i ) for i in range ( 4 ) ]
|
def get_center ( self , element ) :
"""Get center coordinates of an element
: param element : either a WebElement , PageElement or element locator as a tuple ( locator _ type , locator _ value )
: returns : dict with center coordinates"""
|
web_element = self . get_web_element ( element )
location = web_element . location
size = web_element . size
return { 'x' : location [ 'x' ] + ( size [ 'width' ] / 2 ) , 'y' : location [ 'y' ] + ( size [ 'height' ] / 2 ) }
|
def set_file ( path , saltenv = 'base' , ** kwargs ) :
'''Set answers to debconf questions from a file .
CLI Example :
. . code - block : : bash
salt ' * ' debconf . set _ file salt : / / pathto / pkg . selections'''
|
if '__env__' in kwargs : # " env " is not supported ; Use " saltenv " .
kwargs . pop ( '__env__' )
path = __salt__ [ 'cp.cache_file' ] ( path , saltenv )
if path :
_set_file ( path )
return True
return False
|
def update_tag ( self , tag_name , description = None , custom_properties = None , ** kwargs ) :
"""update a tag by name
Args :
tag _ name ( string ) : name of tag to update
description ( optional [ string ] ) : a description
custom _ properties ( optional [ dict ] ) : dictionary of custom properties"""
|
data = { 'description' : description or '' , 'customProperties' : custom_properties or { } }
resp = self . _put ( self . _u ( self . _TAG_ENDPOINT_SUFFIX , tag_name ) , data = data , ** kwargs )
resp . raise_for_status ( )
return resp . json ( )
|
def create_time_subscription ( self , instance , on_data = None , timeout = 60 ) :
"""Create a new subscription for receiving time updates of an instance .
Time updates are emitted at 1Hz .
This method returns a future , then returns immediately . Stop the
subscription by canceling the future .
: param str instance : A Yamcs instance name
: param on _ data : Function that gets called with
: class : ` ~ datetime . datetime ` updates .
: type on _ data : Optional [ Callable [ ~ datetime . datetime ] )
: param timeout : The amount of seconds to wait for the request to
complete .
: type timeout : Optional [ float ]
: return : Future that can be used to manage the background websocket
subscription .
: rtype : . TimeSubscription"""
|
manager = WebSocketSubscriptionManager ( self , resource = 'time' )
# Represent subscription as a future
subscription = TimeSubscription ( manager )
wrapped_callback = functools . partial ( _wrap_callback_parse_time_info , subscription , on_data )
manager . open ( wrapped_callback , instance )
# Wait until a reply or exception is received
subscription . reply ( timeout = timeout )
return subscription
|
def make_search_body ( index , query , params = None ) :
"""Generates a dictionary suitable for encoding as the search body
: param index : The index name to query
: param query : The query itself
: param params : Modifiers for the query
: type params : : class : ` couchbase . fulltext . Params `
: return : A dictionary suitable for serialization"""
|
dd = { }
if not isinstance ( query , Query ) :
query = QueryStringQuery ( query )
dd [ 'query' ] = query . encodable
if params :
dd . update ( params . as_encodable ( index ) )
dd [ 'indexName' ] = index
return dd
|
def read_connections ( self , connections ) :
"""Reads connections from configuration parameters .
Each section represents an individual Connectionparams
: param connections : configuration parameters to be read"""
|
del self . _items [ : ]
for key in connections . get_key_names ( ) :
item = DiscoveryItem ( )
item . key = key
value = connections . get_as_nullable_string ( key )
item . connection = ConnectionParams . from_string ( value )
self . _items . append ( item )
|
def validate ( self ) :
"""validate : Makes sure question is valid
Args : None
Returns : boolean indicating if question is valid"""
|
assert self . id is not None , "Assumption Failed: Question must have an id"
assert isinstance ( self . question , str ) or self . question is None , "Assumption Failed: Question must be a string"
assert isinstance ( self . question_type , str ) , "Assumption Failed: Question type must be a string"
assert isinstance ( self . answers , list ) , "Assumption Failed: Answers must be a list"
assert isinstance ( self . hints , list ) , "Assumption Failed: Hints must be a list"
for a in self . answers :
assert isinstance ( a , dict ) , "Assumption Failed: Answer in answer list is not a dict"
for h in self . hints :
assert isinstance ( h , str ) , "Assumption Failed: Hint in hints list is not a string"
return True
|
def on_notify_load_status ( self , webkitView , * args , ** kwargs ) :
"""Callback function when the page was loaded completely
FYI , this function will be called after $ ( document ) . ready ( )
in jQuery"""
|
status = webkitView . get_load_status ( )
if status == status . FINISHED :
if self . debug is True :
print 'Load finished'
|
def _internal_verify_cas ( ticket , service , suffix ) :
"""Verifies CAS 2.0 and 3.0 XML - based authentication ticket .
Returns username on success and None on failure ."""
|
params = { 'ticket' : ticket , 'service' : service }
if settings . CAS_PROXY_CALLBACK :
params [ 'pgtUrl' ] = settings . CAS_PROXY_CALLBACK
url = ( urljoin ( settings . CAS_SERVER_URL , suffix ) + '?' + urlencode ( params ) )
page = urlopen ( url )
username = None
try :
response = page . read ( )
tree = ElementTree . fromstring ( response )
document = minidom . parseString ( response )
if tree [ 0 ] . tag . endswith ( 'authenticationSuccess' ) :
if settings . CAS_RESPONSE_CALLBACKS :
cas_response_callbacks ( tree )
username = tree [ 0 ] [ 0 ] . text
pgt_el = document . getElementsByTagName ( 'cas:proxyGrantingTicket' )
if pgt_el :
pgt = pgt_el [ 0 ] . firstChild . nodeValue
try :
pgtIou = _get_pgtiou ( pgt )
tgt = Tgt . objects . get ( username = username )
tgt . tgt = pgtIou . tgt
tgt . save ( )
pgtIou . delete ( )
except Tgt . DoesNotExist :
Tgt . objects . create ( username = username , tgt = pgtIou . tgt )
logger . info ( 'Creating TGT ticket for {user}' . format ( user = username ) )
pgtIou . delete ( )
except Exception as e :
logger . warning ( 'Failed to do proxy authentication. {message}' . format ( message = e ) )
else :
failure = document . getElementsByTagName ( 'cas:authenticationFailure' )
if failure :
logger . warn ( 'Authentication failed from CAS server: %s' , failure [ 0 ] . firstChild . nodeValue )
except Exception as e :
logger . error ( 'Failed to verify CAS authentication: {message}' . format ( message = e ) )
finally :
page . close ( )
return username
|
def add_note ( self , player , text , label = None , update = None ) :
"""Add a note to the xml . If update param is None , it will be the current time ."""
|
if label is not None and ( label not in self . label_names ) :
raise LabelNotFoundError ( 'Invalid label: {}' . format ( label ) )
if update is None :
update = datetime . utcnow ( )
# converted to timestamp , rounded to ones
update = update . strftime ( '%s' )
label_id = self . _get_label_id ( label )
new_note = etree . Element ( 'note' , player = player , label = label_id , update = update )
new_note . text = text
self . root . append ( new_note )
|
def parse_plays_stream ( self ) :
"""Generate and yield a stream of parsed plays . Useful for per play processing ."""
|
lx_doc = self . html_doc ( )
if lx_doc is not None :
parser = PlayParser ( self . game_key . season , self . game_key . game_type )
plays = lx_doc . xpath ( '//tr[@class = "evenColor"]' )
for p in plays :
p_obj = parser . build_play ( p )
self . plays . append ( p_obj )
yield p_obj
|
def _cancel ( self , msgId ) :
"""Cancel outstanding REQ , drop reply silently .
@ param msgId : message ID to cancel
@ type msgId : C { str }"""
|
_ , canceller = self . _requests . pop ( msgId , ( None , None ) )
if canceller is not None and canceller . active ( ) :
canceller . cancel ( )
|
def _seek_to_extent ( self , extent ) : # type : ( int ) - > None
'''An internal method to seek to a particular extent on the input ISO .
Parameters :
extent - The extent to seek to .
Returns :
Nothing .'''
|
self . _cdfp . seek ( extent * self . pvd . logical_block_size ( ) )
|
def _symlink_or_copy_grabix ( in_file , out_file , data ) :
"""We cannot symlink in CWL , but may be able to use inputs or copy"""
|
if cwlutils . is_cwl_run ( data ) : # Has grabix indexes , we ' re okay to go
if utils . file_exists ( in_file + ".gbi" ) :
out_file = in_file
else :
utils . copy_plus ( in_file , out_file )
else :
utils . symlink_plus ( in_file , out_file )
return out_file
|
def generate_chunks ( data , chunk_size = DEFAULT_CHUNK_SIZE ) :
"""Yield ' chunk _ size ' items from ' data ' at a time ."""
|
iterator = iter ( repeated . getvalues ( data ) )
while True :
chunk = list ( itertools . islice ( iterator , chunk_size ) )
if not chunk :
return
yield chunk
|
def get_string ( ) :
"""A better str ( _ get _ keycodes ( ) ) method"""
|
keycodes = _get_keycodes ( )
initial_code , codes = keycodes [ 0 ] , keycodes [ 1 : ]
initial_char = chr ( initial_code )
if initial_code == 27 :
initial_char = '\\e'
elif not ascii . isgraph ( initial_char ) :
initial_char = '\\x%x' % initial_code
chars = '' . join ( [ chr ( c ) for c in codes ] )
return '' . join ( ( initial_char , chars ) )
|
def _post ( self , url , ** kw ) :
'''Makes a POST request , setting Authorization
header by default'''
|
headers = kw . pop ( 'headers' , { } )
headers . setdefault ( 'Authorization' , self . AUTHORIZATION_HEADER )
kw [ 'headers' ] = headers
resp = self . session . post ( url , ** kw )
self . _raise_for_status ( resp )
return resp
|
def get_password ( self , service , username ) :
"""Get password of the username for the service"""
|
items = self . _find_passwords ( service , username )
if not items :
return None
secret = items [ 0 ] . secret
return ( secret if isinstance ( secret , six . text_type ) else secret . decode ( 'utf-8' ) )
|
def show_header ( ** header ) :
"Display a HTTP - style header on the command - line ."
|
print ( '%s: %s' % ( 'Now' , header [ 'now' ] ) )
print ( '%s: %s' % ( 'Stop-Name' , header [ 'name' ] ) )
print ( '%s: %s' % ( 'Stop-ID' , header . get ( 'id' , None ) ) )
print ( '' )
|
def handle_set_command ( self , line : str , position : int , tokens : ParseResults ) -> ParseResults :
"""Handle a ` ` SET X = " Y " ` ` statement ."""
|
key , value = tokens [ 'key' ] , tokens [ 'value' ]
self . raise_for_invalid_annotation_value ( line , position , key , value )
self . annotations [ key ] = value
return tokens
|
def onContinue ( self , event , grid , next_dia = None ) : # , age _ data _ type = ' site ' ) :
"""Save grid data in the data object"""
|
# deselect column , including remove ' EDIT ALL ' label
if self . grid_frame . drop_down_menu :
self . grid_frame . drop_down_menu . clean_up ( )
# remove ' * * ' and ' ^ ^ ' from col names
# self . remove _ starred _ labels ( grid )
grid . remove_starred_labels ( )
grid . SaveEditControlValue ( )
# locks in value in cell currently edited
grid_name = str ( grid . GetName ( ) )
# save all changes to data object and write to file
self . grid_frame . grid_builder . save_grid_data ( )
# check that all required data are present
validation_errors = self . validate ( grid )
if validation_errors :
warn_string = ""
for error_name , error_cols in list ( validation_errors . items ( ) ) :
if error_cols :
warn_string += "You have {}: {}.\n\n" . format ( error_name , ", " . join ( error_cols ) )
warn_string += "Are you sure you want to continue?"
result = pw . warning_with_override ( warn_string )
if result == wx . ID_YES :
pass
else :
return False
else :
wx . MessageBox ( 'Saved!' , 'Info' , style = wx . OK | wx . ICON_INFORMATION )
self . panel . Destroy ( )
if next_dia :
next_dia ( )
else : # propagate any type / lithology / class data from sites to samples table
# will only overwrite if sample values are blank or " Not Specified "
self . contribution . propagate_lithology_cols ( )
wx . MessageBox ( 'Done!' , 'Info' , style = wx . OK | wx . ICON_INFORMATION )
|
def _compose_range ( pattern , rule , fill = 2 ) :
"""oc . _ compose _ range ( ' Week ' , ' Week04 - Week09 ' , fill = 2 ) - hash a range .
This takes apart a range of times and returns a dictionary of
all intervening values appropriately set . The fill value is
used to format the time numbers ."""
|
keys = [ ]
mask = len ( pattern )
for rule in str . split ( rule , "," ) :
if not '-' in rule :
if rule [ : mask ] == pattern :
keys . append ( rule [ mask : ] )
else :
keys . append ( rule )
else :
( start , end ) = str . split ( rule , '-' )
if rule [ : mask ] == pattern :
start = int ( start [ mask : ] )
else :
start = int ( start )
# Since I allow both " Week00-15 " and " Week00 - Week15 " , I need
# to check for the second week .
if end [ 0 : mask ] == pattern :
end = int ( end [ mask : ] )
else :
end = int ( end )
key = "%%0%ii" % fill
for i in range ( start , end + 1 ) :
keys . append ( key % i )
# print keys
return keys
|
def find_tags_from_xml ( self , xml_name , tag_name , ** attribute_filter ) :
"""Return a list of all the matched tags in a specific xml
: param str xml _ name : specify from which xml to pick the tag from
: param str tag _ name : specify the tag name"""
|
xml = self . xml [ xml_name ]
if xml is None :
return [ ]
if xml . tag == tag_name :
if self . is_tag_matched ( xml . tag , ** attribute_filter ) :
return [ xml ]
return [ ]
tags = xml . findall ( ".//" + tag_name )
return [ tag for tag in tags if self . is_tag_matched ( tag , ** attribute_filter ) ]
|
def set_metadata ( self , obj , metadata , clear = False , prefix = None ) :
"""Accepts a dictionary of metadata key / value pairs and updates the
specified object metadata with them .
If ' clear ' is True , any existing metadata is deleted and only the
passed metadata is retained . Otherwise , the values passed here update
the object ' s metadata .
By default , the standard object metadata prefix ( ' X - Object - Meta - ' ) is
prepended to the header name if it isn ' t present . For non - standard
headers , you must include a non - None prefix , such as an empty string ."""
|
# Add the metadata prefix , if needed .
if prefix is None :
prefix = OBJECT_META_PREFIX
massaged = _massage_metakeys ( metadata , prefix )
cname = utils . get_name ( self . container )
oname = utils . get_name ( obj )
new_meta = { }
# Note that the API for object POST is the opposite of that for
# container POST : for objects , all current metadata is deleted ,
# whereas for containers you need to set the values to an empty
# string to delete them .
if not clear :
obj_meta = self . get_metadata ( obj , prefix = prefix )
new_meta = _massage_metakeys ( obj_meta , prefix )
utils . case_insensitive_update ( new_meta , massaged )
# Remove any empty values , since the object metadata API will
# store them .
to_pop = [ ]
for key , val in six . iteritems ( new_meta ) :
if not val :
to_pop . append ( key )
for key in to_pop :
new_meta . pop ( key )
uri = "/%s/%s" % ( cname , oname )
resp , resp_body = self . api . method_post ( uri , headers = new_meta )
|
def dump ( self , indentation = 0 ) :
"""Returns a string representation of the structure ."""
|
dump = [ ]
dump . append ( '[{0}]' . format ( self . name ) )
printable_bytes = [ ord ( i ) for i in string . printable if i not in string . whitespace ]
# Refer to the _ _ set _ format _ _ method for an explanation
# of the following construct .
for keys in self . __keys__ :
for key in keys :
val = getattr ( self , key )
if isinstance ( val , ( int , long ) ) :
if key . startswith ( 'Signature_' ) :
val_str = '%-8X' % ( val )
else :
val_str = '0x%-8X' % ( val )
if key == 'TimeDateStamp' or key == 'dwTimeStamp' :
try :
val_str += ' [%s UTC]' % time . asctime ( time . gmtime ( val ) )
except ValueError as e :
val_str += ' [INVALID TIME]'
else :
val_str = bytearray ( val )
if key . startswith ( 'Signature' ) :
val_str = '' . join ( [ '{:02X}' . format ( i ) for i in val_str . rstrip ( b'\x00' ) ] )
else :
val_str = '' . join ( [ chr ( i ) if ( i in printable_bytes ) else '\\x{0:02x}' . format ( i ) for i in val_str . rstrip ( b'\x00' ) ] )
dump . append ( '0x%-8X 0x%-3X %-30s %s' % ( self . __field_offsets__ [ key ] + self . __file_offset__ , self . __field_offsets__ [ key ] , key + ':' , val_str ) )
return dump
|
def respond ( self , result ) :
"""Create a response to this request .
When processing the request completed successfully this method can be used to
create a response object .
: param result : The result of the invoked method .
: type result : Anything that can be encoded by JSON .
: returns : A response object that can be serialized and sent to the client .
: rtype : : py : class : ` JSONRPCSuccessResponse `"""
|
if self . one_way or self . unique_id is None :
return None
response = JSONRPCSuccessResponse ( )
response . result = result
response . unique_id = self . unique_id
return response
|
def wrap ( query , container , namespace ) :
"""NORMALIZE QUERY SO IT CAN STILL BE JSON"""
|
if is_op ( query , QueryOp ) or query == None :
return query
query = wrap ( query )
table = container . get_table ( query [ 'from' ] )
schema = table . schema
output = QueryOp ( frum = table , format = query . format , limit = mo_math . min ( MAX_LIMIT , coalesce ( query . limit , DEFAULT_LIMIT ) ) )
if query . select or isinstance ( query . select , ( Mapping , list ) ) :
output . select = _normalize_selects ( query . select , query . frum , schema = schema )
else :
if query . edges or query . groupby :
output . select = DEFAULT_SELECT
else :
output . select = _normalize_selects ( "." , query . frum )
if query . groupby and query . edges :
Log . error ( "You can not use both the `groupby` and `edges` clauses in the same query!" )
elif query . edges :
output . edges = _normalize_edges ( query . edges , limit = output . limit , schema = schema )
output . groupby = Null
elif query . groupby :
output . edges = Null
output . groupby = _normalize_groupby ( query . groupby , limit = output . limit , schema = schema )
else :
output . edges = Null
output . groupby = Null
output . where = _normalize_where ( query . where , schema = schema )
output . window = [ _normalize_window ( w ) for w in listwrap ( query . window ) ]
output . having = None
output . sort = _normalize_sort ( query . sort )
if not mo_math . is_integer ( output . limit ) or output . limit < 0 :
Log . error ( "Expecting limit >= 0" )
output . isLean = query . isLean
return output
|
def merge ( d1 , d2 ) :
"""Merge two raw datasets into one .
Parameters
d1 : dict
d2 : dict
Returns
dict"""
|
if d1 [ 'formula_id2latex' ] is None :
formula_id2latex = { }
else :
formula_id2latex = d1 [ 'formula_id2latex' ] . copy ( )
formula_id2latex . update ( d2 [ 'formula_id2latex' ] )
handwriting_datasets = d1 [ 'handwriting_datasets' ]
for dataset in d2 [ 'handwriting_datasets' ] :
handwriting_datasets . append ( dataset )
return { 'formula_id2latex' : formula_id2latex , 'handwriting_datasets' : handwriting_datasets }
|
def base_uri ( relative_path = '' ) :
"""Get absolute path to resource , works for dev and for PyInstaller"""
|
try : # PyInstaller creates a temp folder and stores path in _ MEIPASS
base_path = sys . _MEIPASS
except Exception :
if 'pytest' in sys . modules :
for arg in reversed ( sys . argv ) :
path = os . path . realpath ( arg )
if os . path . exists ( path ) :
base_path = path if os . path . isdir ( path ) else os . path . dirname ( path )
break
else :
base_path = os . path . dirname ( os . path . realpath ( sys . argv [ 0 ] ) )
if not os . path . exists ( base_path ) :
raise ValueError ( 'Path %s does not exist' % base_path )
return 'file://%s' % os . path . join ( base_path , relative_path )
|
def gauss_fltr_opencv ( dem , size = 3 , sigma = 1 ) :
"""OpenCV Gaussian filter
Still propagates NaN values"""
|
import cv2
dem = malib . checkma ( dem )
dem_cv = cv2 . GaussianBlur ( dem . filled ( np . nan ) , ( size , size ) , sigma )
out = np . ma . fix_invalid ( dem_cv )
out . set_fill_value ( dem . fill_value )
return out
|
def load_layer ( name , globals_dict = None , symb_list = None ) :
"""Loads a Scapy layer module to make variables , objects and functions
available globally ."""
|
_load ( "scapy.layers." + LAYER_ALIASES . get ( name , name ) , globals_dict = globals_dict , symb_list = symb_list )
|
def get_og_image_url ( self ) :
""": return : URL of the image to use in OG shares"""
|
li = self . get_list_image ( )
if li :
from easy_thumbnails . files import get_thumbnailer
thumb_url = get_thumbnailer ( li ) [ 'og_image' ] . url
# TODO : looks like this may fail if SITE _ DOMAIN = " acmi . lvh . me "
return urljoin ( settings . SITE_DOMAIN , thumb_url )
|
def read ( stream ) :
'''Read PLY data from a readable file - like object or filename .'''
|
( must_close , stream ) = _open_stream ( stream , 'read' )
try :
data = PlyData . _parse_header ( stream )
for elt in data :
elt . _read ( stream , data . text , data . byte_order )
finally :
if must_close :
stream . close ( )
return data
|
def run ( self ) :
"""runner"""
|
subprocess . check_call ( "python setup.py sdist" , shell = True )
subprocess . check_call ( "python setup.py bdist_wheel" , shell = True )
# OLD way :
# os . system ( " python setup . py sdist bdist _ wheel upload " )
# NEW way :
# Ref : https : / / packaging . python . org / distributing /
subprocess . check_call ( "twine upload dist/*" , shell = True )
subprocess . check_call ( "git tag -a {0} -m 'version {0}'" . format ( __version__ ) , shell = True )
subprocess . check_call ( "git push --tags" , shell = True )
sys . exit ( )
|
def label_by_time ( self , time_signals , label_names = [ ] , time_units = 'ms' , time_dimension = 0 , copy = True , backup_original_spike_times_to = None , ** kwargs ) :
"""creates a labeled spike data structure
` time _ signals ` is list of lists ( or matrix ) , containing a timestamp in the
first column ( or first element of each element ) and indizes that are to be
applied to the data in the remaining columns / elements .
This function will not add or remove spikes , but only shift spikes according to the
preceding time signals .
If you want to get spikes relative to a time signal with fixed limits , use ` label _ peri _ signals ` ,
which will leave out and duplicate spikes , but can manage overlapping time signals .
To get the absolute spike times back , ` . absolute _ spike _ times _ from _ labels ` can be used
on the resulting SpikeContainer .
However , the order and length of the timing signals might not be correct , if eg .
the intervals between time signals vary in length .
If ` backup _ original _ spike _ times _ to ` is set to a string , the original spike times will
be saved as this dimension as the new ( relative ) spike times replace the old time dimension ."""
|
if self . data_format == 'empty' :
return SpikeContainer ( None , units = self . units , copy_from = self )
spike_times = self . spike_times . get_converted ( time_dimension , units = time_units ) [ 1 ] . copy ( )
# this is read only
time_signals = np . array ( time_signals )
re_zeroed_spike_times = spike_times . copy ( )
# this will be modified
indizes = np . zeros ( ( len ( spike_times ) , time_signals . shape [ 0 ] - 1 ) )
maximal_time_gap = np . max ( np . diff ( time_signals [ 0 ] ) )
for t in range ( len ( time_signals [ 0 ] ) ) :
if t + 1 < len ( time_signals [ 0 ] ) : # we are past the last time signal
spike_range = ( spike_times > time_signals [ 0 ] [ t ] ) * ( spike_times <= time_signals [ 0 ] [ t + 1 ] )
indizes [ spike_range , : ] = [ time_signals [ _i ] [ t ] for _i in range ( 1 , time_signals . shape [ 0 ] ) ]
re_zeroed_spike_times [ spike_range ] = ( spike_times [ spike_range ] - time_signals [ 0 ] [ t ] )
else : # we move all spikes in the future back by this time signal
# ( this will overwrite the spike times multiple times )
indizes [ spike_times > time_signals [ 0 ] [ t ] , : ] = [ time_signals [ _i ] [ t ] for _i in range ( 1 , time_signals . shape [ 0 ] ) ]
re_zeroed_spike_times [ spike_times > time_signals [ 0 ] [ t ] ] = ( spike_times [ spike_times > time_signals [ 0 ] [ t ] ] - time_signals [ 0 ] [ t ] )
new_spike_times = LabeledMatrix ( self . spike_times . matrix , self . spike_times . labels )
new_spike_times . add_label_dimension ( label_names , indizes )
new_spike_times . labels [ 0 ] . units = time_units
new_spike_times . matrix [ : , 0 ] = re_zeroed_spike_times
new_spike_times . labels [ 0 ] . min = 0
new_spike_times . labels [ 0 ] . max = maximal_time_gap
if kwargs . get ( 'recalculate_time_extent' , False ) :
new_spike_times . labels [ 0 ] . min = np . min ( re_zeroed_spike_times )
new_spike_times . labels [ 0 ] . max = np . max ( re_zeroed_spike_times )
if backup_original_spike_times_to is not None :
new_spike_times . add_label_dimension ( backup_original_spike_times_to , self [ time_dimension ] )
if copy :
s = SpikeContainer ( new_spike_times , copy_from = self )
# if backup _ original _ spike _ times _ to is not None :
# # copying spikes
# time _ label = self . get _ label ( time _ dimension )
# s [ backup _ original _ spike _ times _ to ] = { ' data ' : self [ time _ dimension ] , ' label ' : time _ label }
return s
else : # if backup _ original _ spike _ times _ to is not None :
# # copying spikes
# time _ label = self . get _ label ( time _ dimension )
# self [ backup _ original _ spike _ times _ to ] = { ' data ' : self [ time _ dimension ] , ' label ' : time _ label }
self . set_spike_times ( new_spike_times )
return self
|
def remove_acl ( path ) :
"""Remove the ACL of the file or folder located on the given path .
Also remove the ACL of any file and folder below the given one ,
recursively .
Args :
path ( str ) : Path to the file or folder to remove the ACL for ,
recursively ."""
|
# Some files have ACLs , let ' s remove them recursively
if ( platform . system ( ) == constants . PLATFORM_DARWIN and os . path . isfile ( '/bin/chmod' ) ) :
subprocess . call ( [ '/bin/chmod' , '-R' , '-N' , path ] )
elif ( ( platform . system ( ) == constants . PLATFORM_LINUX ) and os . path . isfile ( '/bin/setfacl' ) ) :
subprocess . call ( [ '/bin/setfacl' , '-R' , '-b' , path ] )
|
def _get_buffer ( self , index ) :
"""Shared bounds checking and buffer creation ."""
|
if not 0 <= index < self . count :
raise IndexError ( )
size = struct . calcsize ( self . format )
# We create the buffer every time instead of keeping the buffer ( which is 32 bytes at least )
# around forever .
buf = bytearray ( size + 1 )
buf [ 0 ] = self . first_register + size * index
return buf
|
def create_repo_from_pip_url ( pip_url , ** kwargs ) :
r"""Return a object representation of a VCS repository via pip - style url .
: returns : instance of a repository object
: rtype : : class : ` libvcs . svn . SubversionRepo ` , : class : ` libvcs . git . GitRepo ` or
: class : ` libvcs . hg . MercurialRepo ` .
Usage Example : :
> > > from libvcs . shortcuts import create _ repo _ from _ pip _ url
> > > r = create _ repo _ from _ pip _ url (
. . . pip _ url = ' git + https : / / www . github . com / you / myrepo ' ,
. . . repo _ dir = ' / tmp / myrepo ' )
> > > r . update _ repo ( )
| myrepo | ( git ) Repo directory for myrepo ( git ) does not exist @ \
/ tmp / myrepo
| myrepo | ( git ) Cloning .
| myrepo | ( git ) git clone https : / / www . github . com / tony / myrepo \
/ tmp / myrepo
Cloning into ' / tmp / myrepo ' . . .
Checking connectivity . . . done .
| myrepo | ( git ) git fetch
| myrepo | ( git ) git pull
Already up - to - date ."""
|
if pip_url . startswith ( 'git+' ) :
return GitRepo . from_pip_url ( pip_url , ** kwargs )
elif pip_url . startswith ( 'hg+' ) :
return MercurialRepo . from_pip_url ( pip_url , ** kwargs )
elif pip_url . startswith ( 'svn+' ) :
return SubversionRepo . from_pip_url ( pip_url , ** kwargs )
else :
raise InvalidPipURL ( pip_url )
|
def post ( interface , method , version = 1 , apihost = DEFAULT_PARAMS [ 'apihost' ] , https = DEFAULT_PARAMS [ 'https' ] , caller = None , session = None , params = None ) :
"""Send POST request to an API endpoint
. . versionadded : : 0.8.3
: param interface : interface name
: type interface : str
: param method : method name
: type method : str
: param version : method version
: type version : int
: param apihost : API hostname
: type apihost : str
: param https : whether to use HTTPS
: type https : bool
: param params : parameters for endpoint
: type params : dict
: return : endpoint response
: rtype : : class : ` dict ` , : class : ` lxml . etree . Element ` , : class : ` str `"""
|
url = "%s://%s/%s/%s/v%s/" % ( 'https' if https else 'http' , apihost , interface , method , version )
return webapi_request ( url , 'POST' , caller = caller , session = session , params = params )
|
def get_inventory ( self , context ) :
"""Will locate vm in vcenter and fill its uuid
: type context : cloudshell . shell . core . context . ResourceCommandContext"""
|
vcenter_vm_name = context . resource . attributes [ 'vCenter VM' ]
vcenter_vm_name = vcenter_vm_name . replace ( '\\' , '/' )
vcenter_name = context . resource . attributes [ 'vCenter Name' ]
self . logger . info ( 'start autoloading vm_path: {0} on vcenter: {1}' . format ( vcenter_vm_name , vcenter_name ) )
with CloudShellSessionContext ( context ) as cloudshell_session :
session = cloudshell_session
vcenter_api_res = session . GetResourceDetails ( vcenter_name )
vcenter_resource = self . model_parser . convert_to_vcenter_model ( vcenter_api_res )
si = None
try :
self . logger . info ( 'connecting to vcenter ({0})' . format ( vcenter_api_res . Address ) )
si = self . _get_connection_to_vcenter ( self . pv_service , session , vcenter_resource , vcenter_api_res . Address )
self . logger . info ( 'loading vm uuid' )
vm_loader = VMLoader ( self . pv_service )
uuid = vm_loader . load_vm_uuid_by_name ( si , vcenter_resource , vcenter_vm_name )
self . logger . info ( 'vm uuid: {0}' . format ( uuid ) )
self . logger . info ( 'loading the ip of the vm' )
ip = self . _try_get_ip ( self . pv_service , si , uuid , vcenter_resource )
if ip :
session . UpdateResourceAddress ( context . resource . name , ip )
except Exception :
self . logger . exception ( "Get inventory command failed" )
raise
finally :
if si :
self . pv_service . disconnect ( si )
return self . _get_auto_load_response ( uuid , vcenter_name , context . resource )
|
def plot_energy ( data , kind = "kde" , bfmi = True , figsize = None , legend = True , fill_alpha = ( 1 , 0.75 ) , fill_color = ( "C0" , "C5" ) , bw = 4.5 , textsize = None , fill_kwargs = None , plot_kwargs = None , ax = None , ) :
"""Plot energy transition distribution and marginal energy distribution in HMC algorithms .
This may help to diagnose poor exploration by gradient - based algorithms like HMC or NUTS .
Parameters
data : xarray dataset , or object that can be converted ( must represent
` sample _ stats ` and have an ` energy ` variable )
kind : str
Type of plot to display ( kde or histogram )
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size . If None it will be defined automatically .
legend : bool
Flag for plotting legend ( defaults to True )
fill _ alpha : tuple of floats
Alpha blending value for the shaded area under the curve , between 0
( no shade ) and 1 ( opaque ) . Defaults to ( 1 , . 75)
fill _ color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution .
Defaults to ( ' C0 ' , ' C5 ' )
bw : float
Bandwidth scaling factor for the KDE . Should be larger than 0 . The higher this number the
smoother the KDE will be . Defaults to 4.5 which is essentially the same as the Scott ' s rule
of thumb ( the default rule used by SciPy ) . Only works if ` kind = ' kde ' `
textsize : float
Text size scaling factor for labels , titles and lines . If None it will be autoscaled based
on figsize .
fill _ kwargs : dicts , optional
Additional keywords passed to ` arviz . plot _ kde ` ( to control the shade )
plot _ kwargs : dicts , optional
Additional keywords passed to ` arviz . plot _ kde ` or ` plt . hist ` ( if type = ' hist ' )
ax : axes
Matplotlib axes .
Returns
ax : matplotlib axes
Examples
Plot a default energy plot
. . plot : :
: context : close - figs
> > > import arviz as az
> > > data = az . load _ arviz _ data ( ' centered _ eight ' )
> > > az . plot _ energy ( data )
Represent energy plot via histograms
. . plot : :
: context : close - figs
> > > az . plot _ energy ( data , kind = ' hist ' )"""
|
energy = convert_to_dataset ( data , group = "sample_stats" ) . energy . values
if ax is None :
_ , ax = plt . subplots ( figsize = figsize , constrained_layout = True )
if fill_kwargs is None :
fill_kwargs = { }
if plot_kwargs is None :
plot_kwargs = { }
figsize , _ , _ , xt_labelsize , linewidth , _ = _scale_fig_size ( figsize , textsize , 1 , 1 )
series = zip ( fill_alpha , fill_color , ( "Marginal Energy" , "Energy transition" ) , ( energy - energy . mean ( ) , np . diff ( energy ) ) , )
if kind == "kde" :
for alpha , color , label , value in series :
fill_kwargs [ "alpha" ] = alpha
fill_kwargs [ "color" ] = color
plot_kwargs . setdefault ( "color" , color )
plot_kwargs . setdefault ( "alpha" , 0 )
plot_kwargs . setdefault ( "linewidth" , linewidth )
plot_kde ( value , bw = bw , label = label , textsize = xt_labelsize , plot_kwargs = plot_kwargs , fill_kwargs = fill_kwargs , ax = ax , )
elif kind == "hist" :
for alpha , color , label , value in series :
ax . hist ( value . flatten ( ) , bins = "auto" , density = True , alpha = alpha , label = label , color = color , ** plot_kwargs )
else :
raise ValueError ( "Plot type {} not recognized." . format ( kind ) )
if bfmi :
for idx , val in enumerate ( e_bfmi ( energy ) ) :
ax . plot ( [ ] , label = "chain {:>2} BFMI = {:.2f}" . format ( idx , val ) , alpha = 0 )
ax . set_xticks ( [ ] )
ax . set_yticks ( [ ] )
if legend :
ax . legend ( )
return ax
|
def readline ( self ) :
"""Get the next line from the input buffer ."""
|
self . line_number += 1
if self . line_number > len ( self . lines ) :
return ''
return self . lines [ self . line_number - 1 ]
|
def _create_environment ( config , outdir ) :
"""Constructor for an instance of the environment .
Args :
config : Object providing configurations via attributes .
outdir : Directory to store videos in .
Raises :
NotImplementedError : For action spaces other than Box and Discrete .
Returns :
Wrapped OpenAI Gym environment ."""
|
if isinstance ( config . env , str ) :
env = gym . make ( config . env )
else :
env = config . env ( )
# Ensure that the environment has the specification attribute set as expected
# by the monitor wrapper .
if not hasattr ( env , 'spec' ) :
setattr ( env , 'spec' , getattr ( env , 'spec' , None ) )
if config . max_length :
env = tools . wrappers . LimitDuration ( env , config . max_length )
env = gym . wrappers . Monitor ( env , outdir , lambda unused_episode_number : True )
if isinstance ( env . action_space , gym . spaces . Box ) :
env = tools . wrappers . RangeNormalize ( env )
env = tools . wrappers . ClipAction ( env )
elif isinstance ( env . action_space , gym . spaces . Discrete ) :
env = tools . wrappers . RangeNormalize ( env , action = False )
else :
message = "Unsupported action space '{}'" . format ( type ( env . action_space ) )
raise NotImplementedError ( message )
env = tools . wrappers . ConvertTo32Bit ( env )
env = tools . wrappers . CacheSpaces ( env )
return env
|
def two_way_difference ( self , b , extra_add = ( ) , extra_remove = ( ) ) :
"""Return ( self - b , b - self )"""
|
if self is b :
return ( ( ) , ( ) )
if isinstance ( b , DiffRef_ ) :
extra_remove = extra_remove + b . add
b = b . origin
if extra_add == extra_remove :
extra_add = extra_remove = ( )
if isinstance ( b , Diff_ ) :
if self . base is b . base :
first = self . add + b . remove
second = self . remove + b . add
elif self . base is b :
first = self . add
second = self . remove
elif b . base is self :
first = b . remove
second = b . add
else :
first = self
second = b
else :
first = self
second = b
if not first and not extra_add :
return ( ( ) , tuple ( second ) + tuple ( extra_remove ) )
elif not second and not extra_remove :
return ( tuple ( first ) + tuple ( extra_add ) , ( ) )
else :
first = set ( first )
first . update ( extra_add )
second = set ( second )
second . update ( extra_remove )
return tuple ( first . difference ( second ) ) , tuple ( second . difference ( first ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.