signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _CheckFieldMaskMessage ( message ) :
"""Raises ValueError if message is not a FieldMask ."""
|
message_descriptor = message . DESCRIPTOR
if ( message_descriptor . name != 'FieldMask' or message_descriptor . file . name != 'google/protobuf/field_mask.proto' ) :
raise ValueError ( 'Message {0} is not a FieldMask.' . format ( message_descriptor . full_name ) )
|
def add_choice ( self , choice , inline_region , identifier = None , name = '' ) :
"""stub"""
|
if inline_region not in self . my_osid_object_form . _my_map [ 'choices' ] :
raise IllegalState ( 'that inline region does not exist. Please call add_inline_region first' )
if identifier is None :
identifier = str ( ObjectId ( ) )
current_identifiers = [ c [ 'id' ] for c in self . my_osid_object_form . _my_map [ 'choices' ] [ inline_region ] ]
if identifier not in current_identifiers :
choice = { 'id' : identifier , 'texts' : [ self . _dict_display_text ( choice ) ] , 'name' : name }
self . my_osid_object_form . _my_map [ 'choices' ] [ inline_region ] . append ( choice )
else :
for current_choice in self . my_osid_object_form . _my_map [ 'choices' ] [ inline_region ] :
if current_choice [ 'id' ] == identifier :
self . add_or_replace_value ( 'texts' , choice , dictionary = current_choice )
choice = current_choice
return choice
|
def composite_decorator ( func ) :
"""Decorator for wrapping functions that calculate a weighted sum"""
|
@ wraps ( func )
def wrapper ( self , * args , ** kwargs ) :
total = [ ]
for weight , iso in zip ( self . weights , self . isochrones ) :
subfunc = getattr ( iso , func . __name__ )
total . append ( weight * subfunc ( * args , ** kwargs ) )
return np . sum ( total , axis = 0 )
return wrapper
|
def add_params ( param_list_left , param_list_right ) :
"""Add two lists of parameters one by one
: param param _ list _ left : list of numpy arrays
: param param _ list _ right : list of numpy arrays
: return : list of numpy arrays"""
|
res = [ ]
for x , y in zip ( param_list_left , param_list_right ) :
res . append ( x + y )
return res
|
def delete_processing_block ( processing_block_id ) :
"""Delete Processing Block with the specified ID"""
|
scheduling_block_id = processing_block_id . split ( ':' ) [ 0 ]
config = get_scheduling_block ( scheduling_block_id )
processing_blocks = config . get ( 'processing_blocks' )
processing_block = list ( filter ( lambda x : x . get ( 'id' ) == processing_block_id , processing_blocks ) ) [ 0 ]
config [ 'processing_blocks' ] . remove ( processing_block )
DB . set ( 'scheduling_block/{}' . format ( config [ 'id' ] ) , json . dumps ( config ) )
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db .
DB . rpush ( 'processing_block_events' , json . dumps ( dict ( type = "deleted" , id = processing_block_id ) ) )
|
def SetServerInformation ( self , server , port ) :
"""Set the server information .
Args :
server ( str ) : IP address or hostname of the server .
port ( int ) : Port number of the server ."""
|
self . _host = server
self . _port = port
logger . debug ( 'Elasticsearch server: {0!s} port: {1:d}' . format ( server , port ) )
|
def status ( resources , * args , ** kwargs ) :
"""Print status report for zero or more resources ."""
|
template = '{:<50}{:>10}'
client = redis . Redis ( decode_responses = True , ** kwargs )
# resource details
for loop , resource in enumerate ( resources ) : # blank between resources
if loop :
print ( )
# strings needed
keys = Keys ( resource )
wildcard = keys . key ( '*' )
# header
template = '{:<50}{:>10}'
indicator = client . get ( keys . indicator )
if indicator is None :
continue
print ( template . format ( resource , indicator ) )
print ( SEPARATOR )
# body
numbers = sorted ( [ keys . number ( key ) for key in client . scan_iter ( wildcard ) ] )
for number in numbers :
label = client . get ( keys . key ( number ) )
print ( template . format ( label , number ) )
if resources :
return
# show a more general status report for all available queues
resources = find_resources ( client )
if resources :
dispensers = ( Keys . DISPENSER . format ( r ) for r in resources )
indicators = ( Keys . INDICATOR . format ( r ) for r in resources )
combinations = zip ( client . mget ( dispensers ) , client . mget ( indicators ) )
sizes = ( int ( dispenser ) - int ( indicator ) + 1 for dispenser , indicator in combinations )
# print sorted results
print ( template . format ( 'Resource' , 'Queue size' ) )
print ( SEPARATOR )
for size , resource in sorted ( zip ( sizes , resources ) , reverse = True ) :
print ( template . format ( resource , size ) )
|
def save ( self ) :
"""Creates a new user and account . Returns the newly created user ."""
|
username , email , password = ( self . cleaned_data [ 'username' ] , self . cleaned_data [ 'email' ] , self . cleaned_data [ 'password1' ] )
user = get_user_model ( ) . objects . create_user ( username , email , password , not defaults . ACCOUNTS_ACTIVATION_REQUIRED , defaults . ACCOUNTS_ACTIVATION_REQUIRED )
return user
|
def order_upgrades ( self , upgrades , history = None ) :
"""Order upgrades according to their dependencies .
( topological sort using
Kahn ' s algorithm - http : / / en . wikipedia . org / wiki / Topological _ sorting ) .
: param upgrades : Dict of upgrades
: param history : Dict of applied upgrades"""
|
history = history or { }
graph_incoming , graph_outgoing = self . _create_graph ( upgrades , history )
# Removed already applied upgrades ( assumes all dependencies prior to
# this upgrade has been applied ) .
for node_id in six . iterkeys ( history ) :
start_nodes = [ node_id , ]
while start_nodes :
node = start_nodes . pop ( )
# Remove from direct dependents
try :
for d in graph_outgoing [ node ] :
graph_incoming [ d ] = [ x for x in graph_incoming [ d ] if x != node ]
except KeyError :
warnings . warn ( "Ghost upgrade %s detected" % node )
# Remove all prior dependencies
if node in graph_incoming : # Get dependencies , remove node , and recursively
# remove all dependencies .
depends_on = graph_incoming [ node ]
# Add dependencies to check
for d in depends_on :
graph_outgoing [ d ] = [ x for x in graph_outgoing [ d ] if x != node ]
start_nodes . append ( d )
del graph_incoming [ node ]
# Check for missing dependencies
for node_id , depends_on in six . iteritems ( graph_incoming ) :
for d in depends_on :
if d not in graph_incoming :
raise RuntimeError ( "Upgrade %s depends on an unknown" " upgrade %s" % ( node_id , d ) )
# Nodes with no incoming edges
start_nodes = [ x for x in six . iterkeys ( graph_incoming ) if len ( graph_incoming [ x ] ) == 0 ]
topo_order = [ ]
while start_nodes : # Append node _ n to list ( it has no incoming edges )
node_n = start_nodes . pop ( )
topo_order . append ( node_n )
# For each node m with and edge from n to m
for node_m in graph_outgoing [ node_n ] : # Remove the edge n to m
graph_incoming [ node_m ] = [ x for x in graph_incoming [ node_m ] if x != node_n ]
# If m has no incoming edges , add it to start _ nodes .
if not graph_incoming [ node_m ] :
start_nodes . append ( node_m )
for node , edges in six . iteritems ( graph_incoming ) :
if edges :
raise RuntimeError ( "The upgrades have at least one cyclic " "dependency involving %s." % node )
return map ( lambda x : upgrades [ x ] , topo_order )
|
def oplot ( self , x , y , ** kw ) :
"""generic plotting method , overplotting any existing plot"""
|
self . panel . oplot ( x , y , ** kw )
|
def compose ( layers , bbox = None , layer_filter = None , color = None , ** kwargs ) :
"""Compose layers to a single : py : class : ` PIL . Image ` .
If the layers do not have visible pixels , the function returns ` None ` .
Example : :
image = compose ( [ layer1 , layer2 ] )
In order to skip some layers , pass ` layer _ filter ` function which
should take ` layer ` as an argument and return ` True ` to keep the layer
or return ` False ` to skip : :
image = compose (
layers ,
layer _ filter = lambda x : x . is _ visible ( ) and x . kind = = ' type '
By default , visible layers are composed .
. . note : : This function is experimental and does not guarantee
Photoshop - quality rendering .
Currently the following are ignored :
- Adjustments layers
- Layer effects
- Blending mode ( all blending modes become normal )
Shape drawing is inaccurate if the PSD file is not saved with
maximum compatibility .
: param layers : a layer , or an iterable of layers .
: param bbox : ( left , top , bottom , right ) tuple that specifies a region to
compose . By default , all the visible area is composed . The origin
is at the top - left corner of the PSD document .
: param layer _ filter : a callable that takes a layer and returns ` bool ` .
: param color : background color in ` int ` or ` tuple ` .
: return : : py : class : ` PIL . Image ` or ` None ` ."""
|
from PIL import Image
if not hasattr ( layers , '__iter__' ) :
layers = [ layers ]
def _default_filter ( layer ) :
return layer . is_visible ( )
layer_filter = layer_filter or _default_filter
valid_layers = [ x for x in layers if layer_filter ( x ) ]
if len ( valid_layers ) == 0 :
return None
if bbox is None :
bbox = extract_bbox ( valid_layers )
if bbox == ( 0 , 0 , 0 , 0 ) :
return None
# Alpha must be forced to correctly blend .
mode = get_pil_mode ( valid_layers [ 0 ] . _psd . color_mode , True )
result = Image . new ( mode , ( bbox [ 2 ] - bbox [ 0 ] , bbox [ 3 ] - bbox [ 1 ] ) , color = color if color is not None else 'white' , )
result . putalpha ( 0 )
for layer in valid_layers :
if intersect ( layer . bbox , bbox ) == ( 0 , 0 , 0 , 0 ) :
continue
image = layer . compose ( ** kwargs )
if image is None :
continue
logger . debug ( 'Composing %s' % layer )
offset = ( layer . left - bbox [ 0 ] , layer . top - bbox [ 1 ] )
result = _blend ( result , image , offset )
return result
|
def _validate_virtualbox ( self ) :
'''a method to validate that virtualbox is running on Win 7/8 machines
: return : boolean indicating whether virtualbox is running'''
|
# validate operating system
if self . localhost . os . sysname != 'Windows' :
return False
win_release = float ( self . localhost . os . release )
if win_release >= 10.0 :
return False
# validate docker - machine installation
from os import devnull
from subprocess import call , check_output , STDOUT
sys_command = 'docker-machine --help'
try :
check_output ( sys_command , shell = True , stderr = STDOUT ) . decode ( 'utf-8' )
except Exception as err :
raise Exception ( 'Docker requires docker-machine to run on Win7/8. GoTo: https://www.docker.com' )
# validate virtualbox is running
sys_command = 'docker-machine status %s' % self . vbox
try :
vbox_status = check_output ( sys_command , shell = True , stderr = open ( devnull , 'wb' ) ) . decode ( 'utf-8' ) . replace ( '\n' , '' )
except Exception as err :
if not self . vbox :
raise Exception ( 'Docker requires VirtualBox to run on Win7/8. GoTo: https://www.virtualbox.org' )
elif self . vbox == "default" :
raise Exception ( 'Virtualbox "default" not found. Container will not start without a valid virtualbox.' )
else :
raise Exception ( 'Virtualbox "%s" not found. Try using "default" instead.' % self . vbox )
if 'Stopped' in vbox_status :
raise Exception ( 'Virtualbox "%s" is stopped. Try first running: docker-machine start %s' % ( self . vbox , self . vbox ) )
return True
|
def _get_format_from_filename ( file , mode ) :
"""Return a format string obtained from file ( or file . name ) .
If file already exists ( = read mode ) , an empty string is returned on
error . If not , an exception is raised .
The return type will always be str or unicode ( even if
file / file . name is a bytes object ) ."""
|
format = ''
file = getattr ( file , 'name' , file )
try : # This raises an exception if file is not a ( Unicode / byte ) string :
format = _os . path . splitext ( file ) [ - 1 ] [ 1 : ]
# Convert bytes to unicode ( raises AttributeError on Python 3 str ) :
format = format . decode ( 'utf-8' , 'replace' )
except Exception :
pass
if format . upper ( ) not in _formats and 'r' not in mode :
raise TypeError ( "No format specified and unable to get format from " "file extension: {0!r}" . format ( file ) )
return format
|
def json_dict_copy ( json_object , property_list , defaultValue = None ) :
"""property _ list = [
{ " name " : " name " , " alternateName " : [ " name " , " title " ] } ,
{ " name " : " birthDate " , " alternateName " : [ " dob " , " dateOfBirth " ] } ,
{ " name " : " description " }"""
|
ret = { }
for prop in property_list :
p_name = prop [ "name" ]
for alias in prop . get ( "alternateName" , [ ] ) :
if json_object . get ( alias ) is not None :
ret [ p_name ] = json_object . get ( alias )
break
if not p_name in ret :
if p_name in json_object :
ret [ p_name ] = json_object [ p_name ]
elif defaultValue is not None :
ret [ p_name ] = defaultValue
return ret
|
def on_cursor_shape_changed ( self , combo ) :
"""Changes the value of cursor _ shape in dconf"""
|
index = combo . get_active ( )
self . settings . style . set_int ( 'cursor-shape' , index )
|
def load_arguments ( self , argv , base = None ) :
'''Process given argument list based on registered arguments and given
optional base : class : ` argparse . ArgumentParser ` instance .
This method saves processed arguments on itself , and this state won ' t
be lost after : meth : ` clean ` calls .
Processed argument state will be available via : meth : ` get _ argument `
method .
: param argv : command - line arguments ( without command itself )
: type argv : iterable of str
: param base : optional base : class : ` argparse . ArgumentParser ` instance .
: type base : argparse . ArgumentParser or None
: returns : argparse . Namespace instance with processed arguments as
given by : meth : ` argparse . ArgumentParser . parse _ args ` .
: rtype : argparse . Namespace'''
|
plugin_parser = argparse . ArgumentParser ( add_help = False )
plugin_parser . add_argument ( '--plugin' , action = 'append' , default = [ ] )
parent = base or plugin_parser
parser = argparse . ArgumentParser ( parents = ( parent , ) , add_help = False , ** getattr ( parent , 'defaults' , { } ) )
plugins = [ plugin for plugins in plugin_parser . parse_known_args ( argv ) [ 0 ] . plugin for plugin in plugins . split ( ',' ) ]
for plugin in sorted ( set ( plugins ) , key = plugins . index ) :
arguments = self . extract_plugin_arguments ( plugin )
if arguments :
group = parser . add_argument_group ( '%s arguments' % plugin )
for argargs , argkwargs in arguments :
group . add_argument ( * argargs , ** argkwargs )
self . _argparse_arguments = parser . parse_args ( argv )
return self . _argparse_arguments
|
def parse_parameters ( cls , parameters , possible_fields = None ) :
"""Parses a list of parameters to get the list of fields needed in
order to evaluate those parameters .
Parameters
parameters : ( list of ) strings
The list of desired parameters . These can be ( functions of ) fields
or virtual fields .
possible _ fields : { None , dict }
Specify the list of possible fields . Must be a dictionary given
the names , and dtype of each possible field . If None , will use this
class ' s ` _ staticfields ` .
Returns
list :
The list of names of the fields that are needed in order to
evaluate the given parameters ."""
|
if possible_fields is not None : # make sure field names are strings and not unicode
possible_fields = dict ( [ [ f , dt ] for f , dt in possible_fields . items ( ) ] )
class ModifiedArray ( cls ) :
_staticfields = possible_fields
cls = ModifiedArray
return cls ( 1 , names = parameters ) . fieldnames
|
def delete_ip_scope ( network_address , auth , url ) :
'''Function to delete an entire IP segment from the IMC IP Address management under terminal access
: param network _ address
: param auth
: param url
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . termaccess import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > new _ scope = add _ ip _ scope ( ' 10.50.0.1 ' , ' 10.50.0.254 ' , ' cyoung ' , ' test group ' , auth . creds , auth . url )
> > > delete _ scope = delete _ ip _ scope ( ' 10.50.0.0/24 ' , auth . creds , auth . url )'''
|
scope_id = get_scope_id ( network_address , auth , url )
delete_ip_address_url = '''/imcrs/res/access/assignedIpScope/''' + str ( scope_id )
f_url = url + delete_ip_address_url
r = requests . delete ( f_url , auth = auth , headers = HEADERS )
try :
return r
if r . status_code == 204 : # print ( " IP Segment Successfully Deleted " )
return r . status_code
except requests . exceptions . RequestException as e :
return "Error:\n" + str ( e ) + " delete_ip_scope: An Error has occured"
|
def qtrim_back ( self , name , size = 1 ) :
"""Sets the list element at ` ` index ` ` to ` ` value ` ` . An error is returned for out of
range indexes .
: param string name : the queue name
: param int size : the max length of removed elements
: return : the length of removed elements
: rtype : int"""
|
size = get_positive_integer ( "size" , size )
return self . execute_command ( 'qtrim_back' , name , size )
|
def _SkipFieldContents ( tokenizer ) :
"""Skips over contents ( value or message ) of a field .
Args :
tokenizer : A tokenizer to parse the field name and values ."""
|
# Try to guess the type of this field .
# If this field is not a message , there should be a " : " between the
# field name and the field value and also the field value should not
# start with " { " or " < " which indicates the beginning of a message body .
# If there is no " : " or there is a " { " or " < " after " : " , this field has
# to be a message or the input is ill - formed .
if tokenizer . TryConsume ( ':' ) and not tokenizer . LookingAt ( '{' ) and not tokenizer . LookingAt ( '<' ) :
_SkipFieldValue ( tokenizer )
else :
_SkipFieldMessage ( tokenizer )
|
def smiles_to_compound ( smiles , assign_descriptors = True ) :
"""Convert SMILES text to compound object
Raises :
ValueError : SMILES with unsupported format"""
|
it = iter ( smiles )
mol = molecule ( )
try :
for token in it :
mol ( token )
result , _ = mol ( None )
except KeyError as err :
raise ValueError ( "Unsupported Symbol: {}" . format ( err ) )
result . graph . remove_node ( 0 )
logger . debug ( result )
if assign_descriptors :
molutil . assign_descriptors ( result )
return result
|
def name_strip ( self , name , is_policy = False , prefix = True ) :
"""Transforms name to AWS valid characters and adds prefix and type
: param name : Name of the role / policy
: param is _ policy : True if policy should be added as suffix
: param prefix : True if prefix should be added
: return : Transformed and joined name"""
|
str = self . name_build ( name , is_policy , prefix )
str = str . title ( )
str = str . replace ( '-' , '' )
return str
|
def psnr ( prediction , ground_truth , maxp = None , name = 'psnr' ) :
"""` Peek Signal to Noise Ratio < https : / / en . wikipedia . org / wiki / Peak _ signal - to - noise _ ratio > ` _ .
. . math : :
PSNR = 20 \ cdot \ log _ { 10 } ( MAX _ p ) - 10 \ cdot \ log _ { 10 } ( MSE )
Args :
prediction : a : class : ` tf . Tensor ` representing the prediction signal .
ground _ truth : another : class : ` tf . Tensor ` with the same shape .
maxp : maximum possible pixel value of the image ( 255 in in 8bit images )
Returns :
A scalar tensor representing the PSNR"""
|
maxp = float ( maxp )
def log10 ( x ) :
with tf . name_scope ( "log10" ) :
numerator = tf . log ( x )
denominator = tf . log ( tf . constant ( 10 , dtype = numerator . dtype ) )
return numerator / denominator
mse = tf . reduce_mean ( tf . square ( prediction - ground_truth ) )
if maxp is None :
psnr = tf . multiply ( log10 ( mse ) , - 10. , name = name )
else :
psnr = tf . multiply ( log10 ( mse ) , - 10. )
psnr = tf . add ( tf . multiply ( 20. , log10 ( maxp ) ) , psnr , name = name )
return psnr
|
def clear_cache ( modeladmin , request , queryset = None ) :
"""Clears media cache files such as thumbnails ."""
|
execute = request . POST . get ( 'execute' )
files_in_storage = [ ]
storage = get_media_storage ( )
cache_files_choices = [ ]
for storage_name in get_cache_files ( ) :
link = mark_safe ( '<a href="%s">%s</a>' % ( storage . url ( storage_name ) , storage_name ) )
cache_files_choices . append ( ( storage_name , link ) )
if not len ( cache_files_choices ) :
messages . warning ( request , message = _ ( 'There are no cache files.' ) )
return HttpResponseRedirect ( '' )
if execute :
form = DeleteCacheFilesForm ( queryset , cache_files_choices , request . POST )
if form . is_valid ( ) :
form . save ( )
node = FileNode . get_top_node ( )
message = ungettext ( 'Deleted %i cache file.' , 'Deleted %i cache files.' , len ( form . success_files ) ) % len ( form . success_files )
if len ( form . success_files ) == len ( cache_files_choices ) :
message = '%s %s' % ( _ ( 'The cache was cleared.' ) , message )
messages . success ( request , message = message )
if form . error_files :
messages . error ( request , message = _ ( 'The following files could not be deleted:' ) + ' ' + repr ( form . error_files ) )
return HttpResponseRedirect ( node . get_admin_url ( ) )
if not execute :
if len ( cache_files_choices ) > 0 :
form = DeleteCacheFilesForm ( queryset , cache_files_choices )
else :
form = None
c = get_actions_context ( modeladmin )
c . update ( { 'title' : _ ( 'Clear cache' ) , 'submit_label' : _ ( 'Delete selected files' ) , 'form' : form , 'select_all' : 'selected_files' , } )
return render_to_response ( 'admin/media_tree/filenode/actions_form.html' , c , context_instance = RequestContext ( request ) )
return HttpResponseRedirect ( '' )
|
def lag_calc ( self , stream , pre_processed , shift_len = 0.2 , min_cc = 0.4 , horizontal_chans = [ 'E' , 'N' , '1' , '2' ] , vertical_chans = [ 'Z' ] , cores = 1 , interpolate = False , plot = False , parallel = True , process_cores = None , debug = 0 ) :
"""Compute picks based on cross - correlation alignment .
: type stream : obspy . core . stream . Stream
: param stream :
All the data needed to cut from - can be a gappy Stream .
: type pre _ processed : bool
: param pre _ processed :
Whether the stream has been pre - processed or not to match the
templates . See note below .
: type shift _ len : float
: param shift _ len :
Shift length allowed for the pick in seconds , will be
plus / minus this amount - default = 0.2
: type min _ cc : float
: param min _ cc :
Minimum cross - correlation value to be considered a pick ,
default = 0.4.
: type horizontal _ chans : list
: param horizontal _ chans :
List of channel endings for horizontal - channels , on which
S - picks will be made .
: type vertical _ chans : list
: param vertical _ chans :
List of channel endings for vertical - channels , on which P - picks
will be made .
: type cores : int
: param cores :
Number of cores to use in parallel processing , defaults to one .
: type interpolate : bool
: param interpolate :
Interpolate the correlation function to achieve sub - sample
precision .
: type plot : bool
: param plot :
To generate a plot for every detection or not , defaults to False
: type parallel : bool
: param parallel : Turn parallel processing on or off .
: type process _ cores : int
: param process _ cores :
Number of processes to use for pre - processing ( if different to
` cores ` ) .
: type debug : int
: param debug : Debug output level , 0-5 with 5 being the most output .
: returns :
Catalog of events with picks . No origin information is included .
These events can then be written out via
: func : ` obspy . core . event . Catalog . write ` , or to Nordic Sfiles using
: func : ` eqcorrscan . utils . sfile _ util . eventtosfile ` and located
externally .
: rtype : obspy . core . event . Catalog
. . Note : :
Note on pre - processing : You can provide a pre - processed stream ,
which may be beneficial for detections over large time periods
( the stream can have gaps , which reduces memory usage ) . However ,
in this case the processing steps are not checked , so you must
ensure that all the template in the Party have the same sampling
rate and filtering as the stream .
If pre - processing has not be done then the data will be processed
according to the parameters in the templates , in this case
templates will be grouped by processing parameters and run with
similarly processed data . In this case , all templates do not have
to have the same processing parameters .
. . Note : :
Picks are corrected for the template pre - pick time ."""
|
return Party ( families = [ self ] ) . lag_calc ( stream = stream , pre_processed = pre_processed , shift_len = shift_len , min_cc = min_cc , horizontal_chans = horizontal_chans , vertical_chans = vertical_chans , cores = cores , interpolate = interpolate , plot = plot , parallel = parallel , process_cores = process_cores , debug = debug )
|
def _delete_port_profile_from_ucsm ( self , handle , port_profile , ucsm_ip ) :
"""Deletes Port Profile from UCS Manager ."""
|
port_profile_dest = ( const . PORT_PROFILESETDN + const . VNIC_PATH_PREFIX + port_profile )
handle . StartTransaction ( )
# Find port profile on the UCS Manager
p_profile = handle . GetManagedObject ( None , self . ucsmsdk . VnicProfile . ClassId ( ) , { self . ucsmsdk . VnicProfile . NAME : port_profile , self . ucsmsdk . VnicProfile . DN : port_profile_dest } )
if p_profile :
handle . RemoveManagedObject ( p_profile )
else :
LOG . warning ( 'UCS Manager network driver did not find ' 'Port Profile %s to delete.' , port_profile )
handle . CompleteTransaction ( )
|
def string_to_element ( element_as_string , include_namespaces = False ) :
""": return : an element parsed from a string value , or the element as is if already parsed"""
|
if element_as_string is None :
return None
elif isinstance ( element_as_string , ElementTree ) :
return element_as_string . getroot ( )
elif isinstance ( element_as_string , ElementType ) :
return element_as_string
else :
element_as_string = _xml_content_to_string ( element_as_string )
if not isinstance ( element_as_string , string_types ) : # Let cElementTree handle the error
return fromstring ( element_as_string )
elif not strip_xml_declaration ( element_as_string ) : # Same as ElementTree ( ) . getroot ( )
return None
elif include_namespaces :
return fromstring ( element_as_string )
else :
return fromstring ( strip_namespaces ( element_as_string ) )
|
def _get_model ( vehicle ) :
"""Clean the model field . Best guess ."""
|
model = vehicle [ 'model' ]
model = model . replace ( vehicle [ 'year' ] , '' )
model = model . replace ( vehicle [ 'make' ] , '' )
return model . strip ( ) . split ( ' ' ) [ 0 ]
|
def update ( self , friendly_name = values . unset , default_service_role_sid = values . unset , default_channel_role_sid = values . unset , default_channel_creator_role_sid = values . unset , read_status_enabled = values . unset , reachability_enabled = values . unset , typing_indicator_timeout = values . unset , consumption_report_interval = values . unset , notifications_new_message_enabled = values . unset , notifications_new_message_template = values . unset , notifications_new_message_sound = values . unset , notifications_new_message_badge_count_enabled = values . unset , notifications_added_to_channel_enabled = values . unset , notifications_added_to_channel_template = values . unset , notifications_added_to_channel_sound = values . unset , notifications_removed_from_channel_enabled = values . unset , notifications_removed_from_channel_template = values . unset , notifications_removed_from_channel_sound = values . unset , notifications_invited_to_channel_enabled = values . unset , notifications_invited_to_channel_template = values . unset , notifications_invited_to_channel_sound = values . unset , pre_webhook_url = values . unset , post_webhook_url = values . unset , webhook_method = values . unset , webhook_filters = values . unset , limits_channel_members = values . unset , limits_user_channels = values . unset , media_compatibility_message = values . unset , pre_webhook_retry_count = values . unset , post_webhook_retry_count = values . unset , notifications_log_enabled = values . unset ) :
"""Update the ServiceInstance
: param unicode friendly _ name : A string to describe the resource
: param unicode default _ service _ role _ sid : The service role assigned to users when they are added to the service
: param unicode default _ channel _ role _ sid : The channel role assigned to users when they are added to a channel
: param unicode default _ channel _ creator _ role _ sid : The channel role assigned to a channel creator when they join a new channel
: param bool read _ status _ enabled : Whether to enable the Message Consumption Horizon feature
: param bool reachability _ enabled : Whether to enable the Reachability Indicator feature for this Service instance
: param unicode typing _ indicator _ timeout : How long in seconds to wait before assuming the user is no longer typing
: param unicode consumption _ report _ interval : DEPRECATED
: param bool notifications _ new _ message _ enabled : Whether to send a notification when a new message is added to a channel
: param unicode notifications _ new _ message _ template : The template to use to create the notification text displayed when a new message is added to a channel
: param unicode notifications _ new _ message _ sound : The name of the sound to play when a new message is added to a channel
: param bool notifications _ new _ message _ badge _ count _ enabled : Whether the new message badge is enabled
: param bool notifications _ added _ to _ channel _ enabled : Whether to send a notification when a member is added to a channel
: param unicode notifications _ added _ to _ channel _ template : The template to use to create the notification text displayed when a member is added to a channel
: param unicode notifications _ added _ to _ channel _ sound : The name of the sound to play when a member is added to a channel
: param bool notifications _ removed _ from _ channel _ enabled : Whether to send a notification to a user when they are removed from a channel
: param unicode notifications _ removed _ from _ channel _ template : The template to use to create the notification text displayed to a user when they are removed
: param unicode notifications _ removed _ from _ channel _ sound : The name of the sound to play to a user when they are removed from a channel
: param bool notifications _ invited _ to _ channel _ enabled : Whether to send a notification when a user is invited to a channel
: param unicode notifications _ invited _ to _ channel _ template : The template to use to create the notification text displayed when a user is invited to a channel
: param unicode notifications _ invited _ to _ channel _ sound : The name of the sound to play when a user is invited to a channel
: param unicode pre _ webhook _ url : The webhook URL for pre - event webhooks
: param unicode post _ webhook _ url : The URL for post - event webhooks
: param unicode webhook _ method : The HTTP method to use for both PRE and POST webhooks
: param unicode webhook _ filters : The list of WebHook events that are enabled for this Service instance
: param unicode limits _ channel _ members : The maximum number of Members that can be added to Channels within this Service
: param unicode limits _ user _ channels : The maximum number of Channels Users can be a Member of within this Service
: param unicode media _ compatibility _ message : The message to send when a media message has no text
: param unicode pre _ webhook _ retry _ count : Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses
: param unicode post _ webhook _ retry _ count : The number of times calls to the ` post _ webhook _ url ` will be retried
: param bool notifications _ log _ enabled : Whether to log notifications
: returns : Updated ServiceInstance
: rtype : twilio . rest . chat . v2 . service . ServiceInstance"""
|
data = values . of ( { 'FriendlyName' : friendly_name , 'DefaultServiceRoleSid' : default_service_role_sid , 'DefaultChannelRoleSid' : default_channel_role_sid , 'DefaultChannelCreatorRoleSid' : default_channel_creator_role_sid , 'ReadStatusEnabled' : read_status_enabled , 'ReachabilityEnabled' : reachability_enabled , 'TypingIndicatorTimeout' : typing_indicator_timeout , 'ConsumptionReportInterval' : consumption_report_interval , 'Notifications.NewMessage.Enabled' : notifications_new_message_enabled , 'Notifications.NewMessage.Template' : notifications_new_message_template , 'Notifications.NewMessage.Sound' : notifications_new_message_sound , 'Notifications.NewMessage.BadgeCountEnabled' : notifications_new_message_badge_count_enabled , 'Notifications.AddedToChannel.Enabled' : notifications_added_to_channel_enabled , 'Notifications.AddedToChannel.Template' : notifications_added_to_channel_template , 'Notifications.AddedToChannel.Sound' : notifications_added_to_channel_sound , 'Notifications.RemovedFromChannel.Enabled' : notifications_removed_from_channel_enabled , 'Notifications.RemovedFromChannel.Template' : notifications_removed_from_channel_template , 'Notifications.RemovedFromChannel.Sound' : notifications_removed_from_channel_sound , 'Notifications.InvitedToChannel.Enabled' : notifications_invited_to_channel_enabled , 'Notifications.InvitedToChannel.Template' : notifications_invited_to_channel_template , 'Notifications.InvitedToChannel.Sound' : notifications_invited_to_channel_sound , 'PreWebhookUrl' : pre_webhook_url , 'PostWebhookUrl' : post_webhook_url , 'WebhookMethod' : webhook_method , 'WebhookFilters' : serialize . map ( webhook_filters , lambda e : e ) , 'Limits.ChannelMembers' : limits_channel_members , 'Limits.UserChannels' : limits_user_channels , 'Media.CompatibilityMessage' : media_compatibility_message , 'PreWebhookRetryCount' : pre_webhook_retry_count , 'PostWebhookRetryCount' : post_webhook_retry_count , 'Notifications.LogEnabled' : notifications_log_enabled , } )
payload = self . _version . update ( 'POST' , self . _uri , data = data , )
return ServiceInstance ( self . _version , payload , sid = self . _solution [ 'sid' ] , )
|
def p_var_decl_at ( p ) :
"""var _ decl : DIM idlist typedef AT expr"""
|
p [ 0 ] = None
if len ( p [ 2 ] ) != 1 :
syntax_error ( p . lineno ( 1 ) , 'Only one variable at a time can be declared this way' )
return
idlist = p [ 2 ] [ 0 ]
entry = SYMBOL_TABLE . declare_variable ( idlist [ 0 ] , idlist [ 1 ] , p [ 3 ] )
if entry is None :
return
if p [ 5 ] . token == 'CONST' :
tmp = p [ 5 ] . expr
if tmp . token == 'UNARY' and tmp . operator == 'ADDRESS' : # Must be an ID
if tmp . operand . token == 'VAR' :
entry . make_alias ( tmp . operand )
elif tmp . operand . token == 'ARRAYACCESS' :
if tmp . operand . offset is None :
syntax_error ( p . lineno ( 4 ) , 'Address is not constant. Only constant subscripts are allowed' )
return
entry . make_alias ( tmp . operand )
entry . offset = tmp . operand . offset
else :
syntax_error ( p . lineno ( 4 ) , 'Only address of identifiers are allowed' )
return
elif not is_number ( p [ 5 ] ) :
syntax_error ( p . lineno ( 4 ) , 'Address must be a numeric constant expression' )
return
else :
entry . addr = str ( make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , p [ 5 ] , p . lineno ( 4 ) ) . value )
entry . accessed = True
if entry . scope == SCOPE . local :
SYMBOL_TABLE . make_static ( entry . name )
|
def replace_built ( self , built_packages ) :
"""Return a copy of this resolvable set but with built packages .
: param dict built _ packages : A mapping from a resolved package to its locally built package .
: returns : A new resolvable set with built package replacements made ."""
|
def map_packages ( resolved_packages ) :
packages = OrderedSet ( built_packages . get ( p , p ) for p in resolved_packages . packages )
return _ResolvedPackages ( resolved_packages . resolvable , packages , resolved_packages . parent , resolved_packages . constraint_only )
return _ResolvableSet ( [ map_packages ( rp ) for rp in self . __tuples ] )
|
def makePhe ( segID , N , CA , C , O , geo ) :
'''Creates a Phenylalanine residue'''
|
# # R - Group
CA_CB_length = geo . CA_CB_length
C_CA_CB_angle = geo . C_CA_CB_angle
N_C_CA_CB_diangle = geo . N_C_CA_CB_diangle
CB_CG_length = geo . CB_CG_length
CA_CB_CG_angle = geo . CA_CB_CG_angle
N_CA_CB_CG_diangle = geo . N_CA_CB_CG_diangle
CG_CD1_length = geo . CG_CD1_length
CB_CG_CD1_angle = geo . CB_CG_CD1_angle
CA_CB_CG_CD1_diangle = geo . CA_CB_CG_CD1_diangle
CG_CD2_length = geo . CG_CD2_length
CB_CG_CD2_angle = geo . CB_CG_CD2_angle
CA_CB_CG_CD2_diangle = geo . CA_CB_CG_CD2_diangle
CD1_CE1_length = geo . CD1_CE1_length
CG_CD1_CE1_angle = geo . CG_CD1_CE1_angle
CB_CG_CD1_CE1_diangle = geo . CB_CG_CD1_CE1_diangle
CD2_CE2_length = geo . CD2_CE2_length
CG_CD2_CE2_angle = geo . CG_CD2_CE2_angle
CB_CG_CD2_CE2_diangle = geo . CB_CG_CD2_CE2_diangle
CE1_CZ_length = geo . CE1_CZ_length
CD1_CE1_CZ_angle = geo . CD1_CE1_CZ_angle
CG_CD1_CE1_CZ_diangle = geo . CG_CD1_CE1_CZ_diangle
carbon_b = calculateCoordinates ( N , C , CA , CA_CB_length , C_CA_CB_angle , N_C_CA_CB_diangle )
CB = Atom ( "CB" , carbon_b , 0.0 , 1.0 , " " , " CB" , 0 , "C" )
carbon_g = calculateCoordinates ( N , CA , CB , CB_CG_length , CA_CB_CG_angle , N_CA_CB_CG_diangle )
CG = Atom ( "CG" , carbon_g , 0.0 , 1.0 , " " , " CG" , 0 , "C" )
carbon_d1 = calculateCoordinates ( CA , CB , CG , CG_CD1_length , CB_CG_CD1_angle , CA_CB_CG_CD1_diangle )
CD1 = Atom ( "CD1" , carbon_d1 , 0.0 , 1.0 , " " , " CD1" , 0 , "C" )
carbon_d2 = calculateCoordinates ( CA , CB , CG , CG_CD2_length , CB_CG_CD2_angle , CA_CB_CG_CD2_diangle )
CD2 = Atom ( "CD2" , carbon_d2 , 0.0 , 1.0 , " " , " CD2" , 0 , "C" )
carbon_e1 = calculateCoordinates ( CB , CG , CD1 , CD1_CE1_length , CG_CD1_CE1_angle , CB_CG_CD1_CE1_diangle )
CE1 = Atom ( "CE1" , carbon_e1 , 0.0 , 1.0 , " " , " CE1" , 0 , "C" )
carbon_e2 = calculateCoordinates ( CB , CG , CD2 , CD2_CE2_length , CG_CD2_CE2_angle , CB_CG_CD2_CE2_diangle )
CE2 = Atom ( "CE2" , carbon_e2 , 0.0 , 1.0 , " " , " CE2" , 0 , "C" )
carbon_z = calculateCoordinates ( CG , CD1 , CE1 , CE1_CZ_length , CD1_CE1_CZ_angle , CG_CD1_CE1_CZ_diangle )
CZ = Atom ( "CZ" , carbon_z , 0.0 , 1.0 , " " , " CZ" , 0 , "C" )
# # Create Residue Data Structures
res = Residue ( ( ' ' , segID , ' ' ) , "PHE" , ' ' )
res . add ( N )
res . add ( CA )
res . add ( C )
res . add ( O )
res . add ( CB )
res . add ( CG )
res . add ( CD1 )
res . add ( CE1 )
res . add ( CD2 )
res . add ( CE2 )
res . add ( CZ )
return res
|
def _update_console ( self , value = None ) :
"""Update the progress bar to the given value ( out of the total
given to the constructor ) ."""
|
if self . _total == 0 :
frac = 1.0
else :
frac = float ( value ) / float ( self . _total )
file = self . _file
write = file . write
if frac > 1 :
bar_fill = int ( self . _bar_length )
else :
bar_fill = int ( float ( self . _bar_length ) * frac )
write ( '\r|' )
color_print ( '=' * bar_fill , 'blue' , file = file , end = '' )
if bar_fill < self . _bar_length :
color_print ( '>' , 'green' , file = file , end = '' )
write ( '-' * ( self . _bar_length - bar_fill - 1 ) )
write ( '|' )
if value >= self . _total :
t = time . time ( ) - self . _start_time
prefix = ' '
elif value <= 0 :
t = None
prefix = ''
else :
t = ( ( time . time ( ) - self . _start_time ) * ( 1.0 - frac ) ) / frac
prefix = ' ETA '
write ( ' {0:>4s}/{1:>4s}' . format ( human_file_size ( value ) , self . _human_total ) )
write ( ' ({0:>6s}%)' . format ( '{0:.2f}' . format ( frac * 100.0 ) ) )
write ( prefix )
if t is not None :
write ( human_time ( t ) )
self . _file . flush ( )
|
def __gen_hierarchy_file ( self , layer ) :
"""Hierarchical structures ( < structList > elements ) are used to create
hierarchically nested annotation graphs ( e . g . to express consists - of
relationships or dominance - edges in syntax trees , RST ) .
A < struct > element will be created for each hierarchical node
( e . g . an NP ) with edges ( < rel > elements ) to each dominated element
( e . g . tokens , other < struct > elements ) .
NOTE : The types / labels of these newly create hierarchical nodes and
edges aren ' t stored in this file , but in feat / multiFeat files
referencing this one ! See : _ _ gen _ struct _ anno _ files ( ) and
_ _ gen _ rel _ anno _ file ( ) ) .
There will be one hierarchy file for each top level layer .
TODO : check , if we can omit hierarchy files for layers that don ' t
contain dominance edges"""
|
paula_id = '{0}.{1}.{2}_{3}' . format ( layer , self . corpus_name , self . name , layer )
self . paulamap [ 'hierarchy' ] [ layer ] = paula_id
E , tree = gen_paula_etree ( paula_id )
dominance_edges = select_edges_by ( self . dg , layer = layer , edge_type = EdgeTypes . dominance_relation , data = True )
span_edges = select_edges_by ( self . dg , layer = layer , edge_type = EdgeTypes . spanning_relation , data = True )
dominance_dict = defaultdict ( lambda : defaultdict ( str ) )
for source_id , target_id , edge_attrs in dominance_edges :
if source_id != layer + ':root_node' :
dominance_dict [ source_id ] [ target_id ] = edge_attrs
# in PAULA XML , token spans are also part of the hierarchy
for source_id , target_id , edge_attrs in span_edges :
if istoken ( self . dg , target_id ) :
dominance_dict [ source_id ] [ target_id ] = edge_attrs
# NOTE : we don ' t add a base file here , because the nodes could be
# tokens or structural nodes
slist = E ( 'structList' , { 'type' : layer } )
for source_id in dominance_dict :
struct = E ( 'struct' , { 'id' : str ( source_id ) } )
if self . human_readable :
struct . append ( Comment ( self . dg . node [ source_id ] . get ( 'label' ) ) )
for target_id in dominance_dict [ source_id ] :
if istoken ( self . dg , target_id ) :
href = '{0}.xml#{1}' . format ( self . paulamap [ 'tokenization' ] , target_id )
else :
href = '#{0}' . format ( target_id )
rel = E ( 'rel' , { 'id' : 'rel_{0}_{1}' . format ( source_id , target_id ) , 'type' : dominance_dict [ source_id ] [ target_id ] [ 'edge_type' ] , XLINKHREF : href } )
struct . append ( rel )
if self . human_readable :
struct . append ( Comment ( self . dg . node [ target_id ] . get ( 'label' ) ) )
slist . append ( struct )
tree . append ( slist )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . struct
return paula_id
|
def setUpMethods ( self , port ) :
'''set up all methods representing the port operations .
Parameters :
port - - Port that defines the operations .'''
|
assert isinstance ( port , WSDLTools . Port ) , 'expecting WSDLTools.Port not: ' % type ( port )
sd = self . _services . get ( port . getService ( ) . name )
assert sd is not None , 'failed to initialize.'
binding = port . getBinding ( )
portType = port . getPortType ( )
action_in = ''
for bop in binding . operations :
try :
op = portType . operations [ bop . name ]
except KeyError , ex :
raise WsdlGeneratorError , 'Port(%s) PortType(%s) missing operation(%s) defined in Binding(%s)' % ( port . name , portType . name , bop . name , binding . name )
for ext in bop . extensions :
if isinstance ( ext , WSDLTools . SoapOperationBinding ) :
action_in = ext . soapAction
break
else :
warnings . warn ( 'Port(%s) operation(%s) defined in Binding(%s) missing soapAction' % ( port . name , op . name , binding . name ) )
msgin = op . getInputMessage ( )
msgin_name = TextProtect ( msgin . name )
method_name = self . getMethodName ( op . name )
m = sd . newMethod ( )
print >> m , '%sdef %s(self, ps, **kw):' % ( self . getIndent ( level = 1 ) , method_name )
if msgin is not None :
print >> m , '%srequest = ps.Parse(%s.typecode)' % ( self . getIndent ( level = 2 ) , msgin_name )
else :
print >> m , '%s# NO input' % self . getIndent ( level = 2 )
msgout = op . getOutputMessage ( )
if msgout is not None :
msgout_name = TextProtect ( msgout . name )
print >> m , '%sreturn request,%s()' % ( self . getIndent ( level = 2 ) , msgout_name )
else :
print >> m , '%s# NO output' % self . getIndent ( level = 2 )
print >> m , '%sreturn request,None' % self . getIndent ( level = 2 )
print >> m , ''
print >> m , '%ssoapAction[\'%s\'] = \'%s\'' % ( self . getIndent ( level = 1 ) , action_in , method_name )
print >> m , '%sroot[(%s.typecode.nspname,%s.typecode.pname)] = \'%s\'' % ( self . getIndent ( level = 1 ) , msgin_name , msgin_name , method_name )
return
|
def resolve ( hostname , family = AF_UNSPEC ) :
"""Resolve hostname to one or more IP addresses through the operating system .
Resolution is carried out for the given address family . If no
address family is specified , only IPv4 and IPv6 addresses are returned . If
multiple IP addresses are found , all are returned .
: param family : AF _ INET or AF _ INET6 or AF _ UNSPEC ( default )
: return : tuple of unique IP addresses"""
|
af_ok = ( AF_INET , AF_INET6 )
if family != AF_UNSPEC and family not in af_ok :
raise ValueError ( "Invalid family '%s'" % family )
ips = ( )
try :
addrinfo = socket . getaddrinfo ( hostname , None , family )
except socket . gaierror as exc : # EAI _ NODATA and EAI _ NONAME are expected if this name is not ( yet )
# present in DNS
if exc . errno not in ( socket . EAI_NODATA , socket . EAI_NONAME ) :
LOG . debug ( "socket.getaddrinfo() raised an exception" , exc_info = exc )
else :
if family == AF_UNSPEC :
ips = tuple ( { item [ 4 ] [ 0 ] for item in addrinfo if item [ 0 ] in af_ok } )
else :
ips = tuple ( { item [ 4 ] [ 0 ] for item in addrinfo } )
return ips
|
def astuple ( self ) :
"""Create a tuple ` ` { fieldvalue1 , . . . } ` ` of the Message object .
: return : A tuple representation of the message .
: rtype : Tuple [ Any ]"""
|
return tuple ( getattr ( self , f . name ) for f in fields ( self ) )
|
def structure_from_string ( data ) :
"""Parses a rndstr . in or lat . in file into pymatgen ' s
Structure format .
: param data : contents of a rndstr . in or lat . in file
: return : Structure object"""
|
data = data . splitlines ( )
data = [ x . split ( ) for x in data if x ]
# remove empty lines
# following specification / terminology given in manual
if len ( data [ 0 ] ) == 6 : # lattice parameters
a , b , c , alpha , beta , gamma = map ( float , data [ 0 ] )
coord_system = Lattice . from_parameters ( a , b , c , alpha , beta , gamma ) . matrix
lattice_vecs = np . array ( [ [ data [ 1 ] [ 0 ] , data [ 1 ] [ 1 ] , data [ 1 ] [ 2 ] ] , [ data [ 2 ] [ 0 ] , data [ 2 ] [ 1 ] , data [ 2 ] [ 2 ] ] , [ data [ 3 ] [ 0 ] , data [ 3 ] [ 1 ] , data [ 3 ] [ 2 ] ] ] , dtype = float )
first_species_line = 4
else :
coord_system = np . array ( [ [ data [ 0 ] [ 0 ] , data [ 0 ] [ 1 ] , data [ 0 ] [ 2 ] ] , [ data [ 1 ] [ 0 ] , data [ 1 ] [ 1 ] , data [ 1 ] [ 2 ] ] , [ data [ 2 ] [ 0 ] , data [ 2 ] [ 1 ] , data [ 2 ] [ 2 ] ] ] , dtype = float )
lattice_vecs = np . array ( [ [ data [ 3 ] [ 0 ] , data [ 3 ] [ 1 ] , data [ 3 ] [ 2 ] ] , [ data [ 4 ] [ 0 ] , data [ 4 ] [ 1 ] , data [ 4 ] [ 2 ] ] , [ data [ 5 ] [ 0 ] , data [ 5 ] [ 1 ] , data [ 5 ] [ 2 ] ] ] , dtype = float )
first_species_line = 6
scaled_matrix = np . matmul ( coord_system , lattice_vecs )
lattice = Lattice ( scaled_matrix )
all_coords = [ ]
all_species = [ ]
for l in data [ first_species_line : ] :
all_coords . append ( np . array ( [ l [ 0 ] , l [ 1 ] , l [ 2 ] ] , dtype = float ) )
species_strs = "" . join ( l [ 3 : ] )
# join multiple strings back together
species_strs = species_strs . replace ( " " , "" )
# trim any white space
species_strs = species_strs . split ( "," )
# comma - delimited
species = { }
for species_str in species_strs :
species_str = species_str . split ( '=' )
if len ( species_str ) == 1 : # assume occupancy is 1.0
species_str = [ species_str [ 0 ] , 1.0 ]
try :
species [ Specie ( species_str [ 0 ] ) ] = float ( species_str [ 1 ] )
except :
species [ DummySpecie ( species_str [ 0 ] ) ] = float ( species_str [ 1 ] )
all_species . append ( species )
return Structure ( lattice , all_species , all_coords )
|
def import_table ( self , source , table_name ) :
"""Copy a table from another SQLite database to this one ."""
|
query = "SELECT * FROM `%s`" % table_name . lower ( )
df = pandas . read_sql ( query , source . connection )
df . to_sql ( table_name , con = self . own_connection )
|
def timeline ( self , uri ) :
'''Get the domain tagging timeline for a given uri .
Could be a domain , ip , or url .
For details , see https : / / docs . umbrella . com / investigate - api / docs / timeline'''
|
uri = self . _uris [ "timeline" ] . format ( uri )
resp_json = self . get_parse ( uri )
return resp_json
|
def p_simple_indirect_reference ( p ) :
'''simple _ indirect _ reference : DOLLAR simple _ indirect _ reference
| reference _ variable'''
|
if len ( p ) == 3 :
p [ 0 ] = ast . Variable ( p [ 2 ] , lineno = p . lineno ( 1 ) )
else :
p [ 0 ] = p [ 1 ]
|
def get_all_spot_instance_requests ( self , request_ids = None , filters = None ) :
"""Retrieve all the spot instances requests associated with your account .
: type request _ ids : list
: param request _ ids : A list of strings of spot instance request IDs
: type filters : dict
: param filters : Optional filters that can be used to limit
the results returned . Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value . The set of allowable filter
names / values is dependent on the request
being performed . Check the EC2 API guide
for details .
: rtype : list
: return : A list of
: class : ` boto . ec2 . spotinstancerequest . SpotInstanceRequest `"""
|
params = { }
if request_ids :
self . build_list_params ( params , request_ids , 'SpotInstanceRequestId' )
if filters :
if 'launch.group-id' in filters :
lgid = filters . get ( 'launch.group-id' )
if not lgid . startswith ( 'sg-' ) or len ( lgid ) != 11 :
warnings . warn ( "The 'launch.group-id' filter now requires a security " "group id (sg-*) and no longer supports filtering by " "group name. Please update your filters accordingly." , UserWarning )
self . build_filter_params ( params , filters )
return self . get_list ( 'DescribeSpotInstanceRequests' , params , [ ( 'item' , SpotInstanceRequest ) ] , verb = 'POST' )
|
def get_file_sample ( self , numLines = 10 ) :
"""retrieve a sample of the file"""
|
res = ''
try :
with open ( self . fullname , 'r' ) as f :
for line_num , line in enumerate ( f ) :
res += str ( line_num ) . zfill ( 5 ) + ' ' + line
if line_num >= numLines - 1 :
break
return res
except Exception as ex :
print ( 'cant get_file_sample in "' , self . fullname , '":' , str ( ex ) )
return res
|
def get_resources ( self , collections ) :
"""Get resources that correspond to values from : collections : .
: param collections : Collection names for which resources should be
gathered
: type collections : list of str
: return : Gathered resources
: rtype : list of Resource instances"""
|
res_map = self . request . registry . _model_collections
resources = [ res for res in res_map . values ( ) if res . collection_name in collections ]
resources = [ res for res in resources if res ]
return set ( resources )
|
def new_digraph ( self , name , data = None , ** attr ) :
"""Return a new instance of type DiGraph , initialized with the given
data if provided .
: arg name : a name for the graph
: arg data : dictionary or NetworkX graph object providing initial state"""
|
self . _init_graph ( name , 'DiGraph' )
dg = DiGraph ( self , name , data , ** attr )
self . _graph_objs [ name ] = dg
return dg
|
def to_index ( self , ordered_dims = None ) :
"""Convert all index coordinates into a : py : class : ` pandas . Index ` .
Parameters
ordered _ dims : sequence , optional
Possibly reordered version of this object ' s dimensions indicating
the order in which dimensions should appear on the result .
Returns
pandas . Index
Index subclass corresponding to the outer - product of all dimension
coordinates . This will be a MultiIndex if this object is has more
than more dimension ."""
|
if ordered_dims is None :
ordered_dims = self . dims
elif set ( ordered_dims ) != set ( self . dims ) :
raise ValueError ( 'ordered_dims must match dims, but does not: ' '{} vs {}' . format ( ordered_dims , self . dims ) )
if len ( ordered_dims ) == 0 :
raise ValueError ( 'no valid index for a 0-dimensional object' )
elif len ( ordered_dims ) == 1 :
( dim , ) = ordered_dims
return self . _data . get_index ( dim )
else :
indexes = [ self . _data . get_index ( k ) for k in ordered_dims ]
names = list ( ordered_dims )
return pd . MultiIndex . from_product ( indexes , names = names )
|
def sg_query_streamer ( self , index ) :
"""Query the current status of a streamer ."""
|
resp = self . sensor_graph . query_streamer ( index )
if resp is None :
return [ struct . pack ( "<L" , _pack_sgerror ( SensorGraphError . STREAMER_NOT_ALLOCATED ) ) ]
return [ struct . pack ( "<LLLLBBBx" , * resp ) ]
|
def getStats ( self ) :
"""Returns the GA4GH protocol representation of this read group ' s
ReadStats ."""
|
stats = protocol . ReadStats ( )
stats . aligned_read_count = self . getNumAlignedReads ( )
stats . unaligned_read_count = self . getNumUnalignedReads ( )
# TODO base _ count requires iterating through all reads
return stats
|
async def kickChatMember ( self , chat_id , user_id , until_date = None ) :
"""See : https : / / core . telegram . org / bots / api # kickchatmember"""
|
p = _strip ( locals ( ) )
return await self . _api_request ( 'kickChatMember' , _rectify ( p ) )
|
def find_best_matching_node ( self , new , old_nodes ) :
"""Find the node that best matches the new node given the old nodes . If no
good match exists return ` None ` ."""
|
name = new . __class__ . __name__
# : TODO : We should pick the BEST one from this list
# : based on some " matching " criteria ( such as matching ref name or params )
matches = [ c for c in old_nodes if name == c . __class__ . __name__ ]
if self . debug :
print ( "Found matches for {}: {} " . format ( new , matches ) )
return matches [ 0 ] if matches else None
|
def pygmentify ( value , ** kwargs ) :
"""Return a highlighted code block with Pygments ."""
|
soup = BeautifulSoup ( value , 'html.parser' )
for pre in soup . find_all ( 'pre' ) : # Get code
code = '' . join ( [ to_string ( item ) for item in pre . contents ] )
code = code . replace ( '<' , '<' )
code = code . replace ( '>' , '>' )
code = code . replace ( ''' , "'" )
code = code . replace ( '"' , '"' )
code = code . replace ( '&' , '&' )
# Get lexer by language
class_list = pre . get ( 'class' , [ ] )
lexers = [ ]
options = { 'stripall' : True }
# Collect all found lexers
for c in class_list :
try :
lexers . append ( get_lexer_by_name ( c , ** options ) )
except ClassNotFound :
pass
# Get first lexer match or none
try :
lexer = lexers [ 0 ]
except IndexError :
lexer = None
# If no lexer , try guessing
if lexer is None :
try :
lexer = guess_lexer ( pre . text , ** options )
class_list += [ alias for alias in lexer . aliases ]
except ClassNotFound :
pass
if lexer is not None : # Get formatter
formatter = HtmlFormatter ( ** kwargs )
# Highlight code
highlighted = highlight ( code , lexer , formatter )
class_string = ' ' . join ( [ c for c in class_list ] )
highlighted = highlighted . replace ( '<div class="%s"><pre>' % kwargs [ 'cssclass' ] , '<div class="%s"><pre class="%s">' % ( kwargs [ 'cssclass' ] , class_string ) )
pre . replace_with ( highlighted )
return soup . decode ( formatter = None ) . strip ( )
|
def check_spelling ( spelling_lang , txt ) :
"""Check the spelling in the text , and compute a score . The score is the
number of words correctly ( or almost correctly ) spelled , minus the number
of mispelled words . Words " almost " correct remains neutral ( - > are not
included in the score )
Returns :
A tuple : ( fixed text , score )"""
|
if os . name == "nt" :
assert ( not "check_spelling() not available on Windows" )
return
with _ENCHANT_LOCK : # Maximum distance from the first suggestion from python - enchant
words_dict = enchant . request_dict ( spelling_lang )
try :
tknzr = enchant . tokenize . get_tokenizer ( spelling_lang )
except enchant . tokenize . TokenizerNotFoundError : # Fall back to default tokenization if no match for ' lang '
tknzr = enchant . tokenize . get_tokenizer ( )
score = 0
offset = 0
for ( word , word_pos ) in tknzr ( txt ) :
if len ( word ) < _MIN_WORD_LEN :
continue
if words_dict . check ( word ) : # immediately correct words are a really good hint for
# orientation
score += 100
continue
suggestions = words_dict . suggest ( word )
if ( len ( suggestions ) <= 0 ) : # this word is useless . It may even indicates a bad orientation
score -= 10
continue
main_suggestion = suggestions [ 0 ]
lv_dist = Levenshtein . distance ( word , main_suggestion )
if ( lv_dist > _MAX_LEVENSHTEIN_DISTANCE ) : # hm , this word looks like it ' s in a bad shape
continue
logger . debug ( "Spell checking: Replacing: %s -> %s" % ( word , main_suggestion ) )
# let ' s replace the word by its suggestion
pre_txt = txt [ : word_pos + offset ]
post_txt = txt [ word_pos + len ( word ) + offset : ]
txt = pre_txt + main_suggestion + post_txt
offset += ( len ( main_suggestion ) - len ( word ) )
# fixed words may be a good hint for orientation
score += 5
return ( txt , score )
|
def direct2dDistance ( self , point ) :
"""consider the distance between two mapPoints , ignoring all terrain , pathing issues"""
|
if not isinstance ( point , MapPoint ) :
return 0.0
return ( ( self . x - point . x ) ** 2 + ( self . y - point . y ) ** 2 ) ** ( 0.5 )
# simple distance formula
|
def stop_script ( self , script_id ) :
"""Stops a running script .
script _ id : = id of stored script .
status = pi . stop _ script ( sid )"""
|
res = yield from self . _pigpio_aio_command ( _PI_CMD_PROCS , script_id , 0 )
return _u2i ( res )
|
def register ( cls , instance_class , name = None ) :
"""Register a class with the factory .
: param instance _ class : the class to register with the factory ( not a
string )
: param name : the name to use as the key for instance class lookups ;
defaults to the name of the class"""
|
if name is None :
name = instance_class . __name__
cls . INSTANCE_CLASSES [ name ] = instance_class
|
def delete_role_config_group ( self , name ) :
"""Delete a role config group by name .
@ param name : Role config group name .
@ return : The deleted ApiRoleConfigGroup object .
@ since : API v3"""
|
return role_config_groups . delete_role_config_group ( self . _get_resource_root ( ) , self . name , name , self . _get_cluster_name ( ) )
|
def render_field_errors ( field ) :
"""Render field errors as html ."""
|
if field . errors :
html = """<p class="help-block">Error: {errors}</p>""" . format ( errors = '. ' . join ( field . errors ) )
return HTMLString ( html )
return None
|
def build ( self ) :
"""Builds the ` HelicalHelix ` ."""
|
helical_helix = Polypeptide ( )
primitive_coords = self . curve_primitive . coordinates
helices = [ Helix . from_start_and_end ( start = primitive_coords [ i ] , end = primitive_coords [ i + 1 ] , helix_type = self . minor_helix_type , aa = 1 ) for i in range ( len ( primitive_coords ) - 1 ) ]
residues_per_turn = self . minor_residues_per_turn ( minor_repeat = self . minor_repeat )
if residues_per_turn == 0 :
residues_per_turn = _helix_parameters [ self . minor_helix_type ] [ 0 ]
if self . minor_handedness == 'l' :
residues_per_turn *= - 1
# initial phi _ c _ alpha value calculated using the first Helix in helices .
if self . orientation != - 1 :
initial_angle = dihedral ( numpy . array ( [ 0 , 0 , 0 ] ) , primitive_coords [ 0 ] , primitive_coords [ 1 ] , helices [ 0 ] [ 0 ] [ 'CA' ] )
else :
initial_angle = dihedral ( numpy . array ( [ 0 , 0 , primitive_coords [ 0 ] [ 2 ] ] ) , primitive_coords [ 0 ] , numpy . array ( [ primitive_coords [ 0 ] [ 0 ] , primitive_coords [ 0 ] [ 1 ] , primitive_coords [ 1 ] [ 2 ] ] ) , helices [ 0 ] [ 0 ] [ 'CA' ] )
# angle required to achieve desired phi _ c _ alpha value of self . phi _ c _ alpha .
addition_angle = self . phi_c_alpha - initial_angle
for i , h in enumerate ( helices ) :
angle = ( i * ( 360.0 / residues_per_turn ) ) + addition_angle
h . rotate ( angle = angle , axis = h . axis . unit_tangent , point = h . helix_start )
helical_helix . extend ( h )
helical_helix . relabel_all ( )
self . _monomers = helical_helix . _monomers [ : ]
for monomer in self . _monomers :
monomer . ampal_parent = self
return
|
def singlediode ( self ) :
"""Deprecated"""
|
( photocurrent , saturation_current , resistance_series , resistance_shunt , nNsVth ) = ( self . system . calcparams_desoto ( self . effective_irradiance , self . temps [ 'temp_cell' ] ) )
self . desoto = ( photocurrent , saturation_current , resistance_series , resistance_shunt , nNsVth )
self . dc = self . system . singlediode ( photocurrent , saturation_current , resistance_series , resistance_shunt , nNsVth )
self . dc = self . system . scale_voltage_current_power ( self . dc ) . fillna ( 0 )
return self
|
def get_all_for_project ( self , name , ** kwargs ) :
"""Gets the Build Records produced from the BuildConfiguration by name .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . get _ all _ for _ project ( name , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param str name : BuildConfiguration name ( required )
: param int page _ index : Page index
: param int page _ size : Pagination size
: param str sort : Sorting RSQL
: param str q : RSQL query
: return : BuildRecordPage
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . get_all_for_project_with_http_info ( name , ** kwargs )
else :
( data ) = self . get_all_for_project_with_http_info ( name , ** kwargs )
return data
|
def os_packages ( metadata ) :
"""Installs operating system dependent packages"""
|
family = metadata [ 0 ]
release = metadata [ 1 ]
if 'Amazon' in family and '2' not in release :
stdout_message ( 'Identified Amazon Linux 1 os distro' )
commands = [ 'sudo yum -y update' , 'sudo yum -y groupinstall "Development tools"' ]
for cmd in commands :
stdout_message ( subprocess . getoutput ( cmd ) )
return True
elif 'Amazon' in family and '2' in release :
stdout_message ( 'Identified Amazon Linux 2 os distro' )
commands = [ 'sudo yum -y update' , 'sudo yum -y groupinstall "Development tools"' ]
for cmd in commands :
stdout_message ( subprocess . getoutput ( cmd ) )
return True
elif 'Redhat' in family :
stdout_message ( 'Identified Redhat Enterprise Linux os distro' )
commands = [ 'sudo yum -y update' , 'sudo yum -y groupinstall "Development tools"' ]
for cmd in commands :
stdout_message ( subprocess . getoutput ( cmd ) )
elif 'Ubuntu' or 'Mint' in family :
stdout_message ( 'Identified Ubuntu Linux os distro' )
commands = [ 'sudo apt -y update' , 'sudo apt -y upgrade' , 'sudo yum -y groupinstall "Development tools"' ]
for cmd in commands :
stdout_message ( subprocess . getoutput ( cmd ) )
return True
return False
|
def solve_select ( expr , vars ) :
"""Use IAssociative . select to get key ( rhs ) from the data ( lhs ) .
This operation supports both scalars and repeated values on the LHS -
selecting from a repeated value implies a map - like operation and returns a
new repeated value ."""
|
data , _ = __solve_for_repeated ( expr . lhs , vars )
key = solve ( expr . rhs , vars ) . value
try :
results = [ associative . select ( d , key ) for d in repeated . getvalues ( data ) ]
except ( KeyError , AttributeError ) : # Raise a better exception for accessing a non - existent key .
raise errors . EfilterKeyError ( root = expr , key = key , query = expr . source )
except ( TypeError , ValueError ) : # Raise a better exception for what is probably a null pointer error .
if vars . locals is None :
raise errors . EfilterNoneError ( root = expr , query = expr . source , message = "Cannot select key %r from a null." % key )
else :
raise
except NotImplementedError :
raise errors . EfilterError ( root = expr , query = expr . source , message = "Cannot select keys from a non-associative value." )
return Result ( repeated . meld ( * results ) , ( ) )
|
def check_finished ( self , max_plugins_output_length ) : # pylint : disable = too - many - branches
"""Handle action if it is finished ( get stdout , stderr , exit code . . . )
: param max _ plugins _ output _ length : max plugin data length
: type max _ plugins _ output _ length : int
: return : None"""
|
self . last_poll = time . time ( )
_ , _ , child_utime , child_stime , _ = os . times ( )
# Not yet finished . . .
if self . process . poll ( ) is None : # We must wait , but checks are variable in time so we do not wait the same
# for a little check or a long ping . So we do like TCP : slow start with a very
# shot time ( 0.0001 s ) increased * 2 but do not wait more than 0.5 s .
self . wait_time = min ( self . wait_time * 2 , 0.5 )
now = time . time ( )
# This log is really spamming . . . uncomment if you really need this information : )
# logger . debug ( " % s - Process pid = % d is still alive " , now , self . process . pid )
# Get standard outputs in non blocking mode from the process streams
stdout = no_block_read ( self . process . stdout )
stderr = no_block_read ( self . process . stderr )
try :
self . stdoutdata += stdout . decode ( "utf-8" )
self . stderrdata += stderr . decode ( "utf-8" )
except AttributeError :
pass
if ( now - self . check_time ) > self . timeout :
logger . warning ( "Process pid=%d spent too much time: %.2f seconds" , self . process . pid , now - self . check_time )
self . _in_timeout = True
self . _kill ( )
self . status = ACT_STATUS_TIMEOUT
self . execution_time = now - self . check_time
self . exit_status = 3
if self . log_actions :
if os . environ [ 'ALIGNAK_LOG_ACTIONS' ] == 'WARNING' :
logger . warning ( "Action '%s' exited on timeout (%d s)" , self . command , self . timeout )
else :
logger . info ( "Action '%s' exited on timeout (%d s)" , self . command , self . timeout )
# Do not keep the process objcet
del self . process
# Replace stdout with stderr if stdout is empty
self . stdoutdata = self . stdoutdata . strip ( )
if not self . stdoutdata :
self . stdoutdata = self . stderrdata
# Now grep what we want in the output
self . get_outputs ( self . stdoutdata , max_plugins_output_length )
# We can clean the useless properties now
del self . stdoutdata
del self . stderrdata
# Get the user and system time
_ , _ , n_child_utime , n_child_stime , _ = os . times ( )
self . u_time = n_child_utime - child_utime
self . s_time = n_child_stime - child_stime
return
return
logger . debug ( "Process pid=%d exited with %d" , self . process . pid , self . process . returncode )
if fcntl : # Get standard outputs in non blocking mode from the process streams
stdout = no_block_read ( self . process . stdout )
stderr = no_block_read ( self . process . stderr )
else : # Get standard outputs from the communicate function
( stdout , stderr ) = self . process . communicate ( )
try :
self . stdoutdata += stdout . decode ( "utf-8" )
except ( UnicodeDecodeError , AttributeError ) :
self . stdoutdata += stdout
try :
self . stderrdata += stderr . decode ( "utf-8" )
except ( UnicodeDecodeError , AttributeError ) :
self . stderrdata += stderr
self . exit_status = self . process . returncode
if self . log_actions :
if os . environ [ 'ALIGNAK_LOG_ACTIONS' ] == 'WARNING' :
logger . warning ( "Action '%s' exited with code %d" , self . command , self . exit_status )
else :
logger . info ( "Action '%s' exited with code %d" , self . command , self . exit_status )
# We do not need the process now
del self . process
# check for bad syntax in command line :
if ( self . stderrdata . find ( 'sh: -c: line 0: unexpected EOF' ) >= 0 or ( self . stderrdata . find ( 'sh: -c: ' ) >= 0 and self . stderrdata . find ( ': Syntax' ) >= 0 or self . stderrdata . find ( 'Syntax error: Unterminated quoted string' ) >= 0 ) ) :
logger . warning ( "Bad syntax in command line!" )
# Very , very ugly . But subprocess . _ handle _ exitstatus does
# not see a difference between a regular " exit 1 " and a
# bailing out shell . Strange , because strace clearly shows
# a difference . ( exit _ group ( 1 ) vs . exit _ group ( 257 ) )
self . stdoutdata = self . stdoutdata + self . stderrdata
self . exit_status = 3
# Make sure that exit code is a valid exit code
if self . exit_status not in VALID_EXIT_STATUS :
self . exit_status = 3
# Replace stdout with stderr if stdout is empty
self . stdoutdata = self . stdoutdata . strip ( )
if not self . stdoutdata :
self . stdoutdata = self . stderrdata
# Now grep what we want in the output
self . get_outputs ( self . stdoutdata , max_plugins_output_length )
# We can clean the useless properties now
del self . stdoutdata
del self . stderrdata
self . status = ACT_STATUS_DONE
self . execution_time = time . time ( ) - self . check_time
# Also get the system and user times
_ , _ , n_child_utime , n_child_stime , _ = os . times ( )
self . u_time = n_child_utime - child_utime
self . s_time = n_child_stime - child_stime
|
def _parse_category ( fname , categories ) :
"""Parse unicode category tables ."""
|
version , date , values = None , None , [ ]
print ( "parsing {} .." . format ( fname ) )
for line in open ( fname , 'rb' ) :
uline = line . decode ( 'utf-8' )
if version is None :
version = uline . split ( None , 1 ) [ 1 ] . rstrip ( )
continue
elif date is None :
date = uline . split ( ':' , 1 ) [ 1 ] . rstrip ( )
continue
if uline . startswith ( '#' ) or not uline . lstrip ( ) :
continue
addrs , details = uline . split ( ';' , 1 )
addrs , details = addrs . rstrip ( ) , details . lstrip ( )
if any ( details . startswith ( '{} #' . format ( value ) ) for value in categories ) :
start , stop = addrs , addrs
if '..' in addrs :
start , stop = addrs . split ( '..' )
values . extend ( range ( int ( start , 16 ) , int ( stop , 16 ) + 1 ) )
return version , date , sorted ( values )
|
def getTreeWalker ( treeType , implementation = None , ** kwargs ) :
"""Get a TreeWalker class for various types of tree with built - in support
: arg str treeType : the name of the tree type required ( case - insensitive ) .
Supported values are :
* " dom " : The xml . dom . minidom DOM implementation
* " etree " : A generic walker for tree implementations exposing an
elementtree - like interface ( known to work with ElementTree ,
cElementTree and lxml . etree ) .
* " lxml " : Optimized walker for lxml . etree
* " genshi " : a Genshi stream
: arg implementation : A module implementing the tree type e . g .
xml . etree . ElementTree or cElementTree ( Currently applies to the " etree "
tree type only ) .
: arg kwargs : keyword arguments passed to the etree walker - - for other
walkers , this has no effect
: returns : a TreeWalker class"""
|
treeType = treeType . lower ( )
if treeType not in treeWalkerCache :
if treeType == "dom" :
from . import dom
treeWalkerCache [ treeType ] = dom . TreeWalker
elif treeType == "genshi" :
from . import genshi
treeWalkerCache [ treeType ] = genshi . TreeWalker
elif treeType == "lxml" :
from . import etree_lxml
treeWalkerCache [ treeType ] = etree_lxml . TreeWalker
elif treeType == "etree" :
from . import etree
if implementation is None :
implementation = default_etree
# XXX : NEVER cache here , caching is done in the etree submodule
return etree . getETreeModule ( implementation , ** kwargs ) . TreeWalker
return treeWalkerCache . get ( treeType )
|
def draw ( self , painter , options , widget ) :
"""Handle the draw event for the widget ."""
|
self . declaration . draw ( painter , options , widget )
|
def loadFromURL ( self , url , schema = None ) :
"""Return an XMLSchema instance loaded from the given url .
url - - URL to dereference
schema - - Optional XMLSchema instance ."""
|
reader = self . __readerClass ( )
if self . __base_url :
url = basejoin ( self . __base_url , url )
reader . loadFromURL ( url )
schema = schema or XMLSchema ( )
schema . setBaseUrl ( url )
schema . load ( reader )
self . __setIncludes ( schema )
self . __setImports ( schema )
return schema
|
def project_versions ( self , project ) :
"""Get a list of version Resources present on a project .
: param project : ID or key of the project to get versions from
: type project : str
: rtype : List [ Version ]"""
|
r_json = self . _get_json ( 'project/' + project + '/versions' )
versions = [ Version ( self . _options , self . _session , raw_ver_json ) for raw_ver_json in r_json ]
return versions
|
def run ( self ) :
"""Run the job and immediately reschedule it .
: return : The return value returned by the ` job _ func `"""
|
logger . info ( 'Running job %s' , self )
ret = self . job_func ( )
self . last_run = datetime . datetime . now ( )
self . _schedule_next_run ( )
return ret
|
def _convert_oauth2_credentials ( credentials ) :
"""Converts to : class : ` google . oauth2 . credentials . Credentials ` .
Args :
credentials ( Union [ oauth2client . client . OAuth2Credentials ,
oauth2client . client . GoogleCredentials ] ) : The credentials to
convert .
Returns :
google . oauth2 . credentials . Credentials : The converted credentials ."""
|
new_credentials = google . oauth2 . credentials . Credentials ( token = credentials . access_token , refresh_token = credentials . refresh_token , token_uri = credentials . token_uri , client_id = credentials . client_id , client_secret = credentials . client_secret , scopes = credentials . scopes )
new_credentials . _expires = credentials . token_expiry
return new_credentials
|
def get_docker_io ( self , container_id , all_stats ) :
"""Return the container IO usage using the Docker API ( v1.0 or higher ) .
Input : id is the full container id
Output : a dict { ' time _ since _ update ' : 3000 , ' ior ' : 10 , ' iow ' : 65 } .
with :
time _ since _ update : number of seconds elapsed between the latest grab
ior : Number of byte readed
iow : Number of byte written"""
|
# Init the returned dict
io_new = { }
# Read the ior / iow stats ( in bytes )
try :
iocounters = all_stats [ "blkio_stats" ]
except KeyError as e : # all _ stats do not have io information
logger . debug ( "docker plugin - Cannot grab block IO usage for container {} ({})" . format ( container_id , e ) )
logger . debug ( all_stats )
# No fallback available . . .
return io_new
# Previous io interface stats are stored in the io _ old variable
if not hasattr ( self , 'iocounters_old' ) : # First call , we init the io _ old var
self . iocounters_old = { }
try :
self . iocounters_old [ container_id ] = iocounters
except ( IOError , UnboundLocalError ) :
pass
if container_id not in self . iocounters_old :
try :
self . iocounters_old [ container_id ] = iocounters
except ( IOError , UnboundLocalError ) :
pass
else : # By storing time data we enable IoR / s and IoW / s calculations in the
# XML / RPC API , which would otherwise be overly difficult work
# for users of the API
try : # Read IOR and IOW value in the structure list of dict
ior = [ i for i in iocounters [ 'io_service_bytes_recursive' ] if i [ 'op' ] == 'Read' ] [ 0 ] [ 'value' ]
iow = [ i for i in iocounters [ 'io_service_bytes_recursive' ] if i [ 'op' ] == 'Write' ] [ 0 ] [ 'value' ]
ior_old = [ i for i in self . iocounters_old [ container_id ] [ 'io_service_bytes_recursive' ] if i [ 'op' ] == 'Read' ] [ 0 ] [ 'value' ]
iow_old = [ i for i in self . iocounters_old [ container_id ] [ 'io_service_bytes_recursive' ] if i [ 'op' ] == 'Write' ] [ 0 ] [ 'value' ]
except ( TypeError , IndexError , KeyError ) as e : # all _ stats do not have io information
logger . debug ( "docker plugin - Cannot grab block IO usage for container {} ({})" . format ( container_id , e ) )
else :
io_new [ 'time_since_update' ] = getTimeSinceLastUpdate ( 'docker_io_{}' . format ( container_id ) )
io_new [ 'ior' ] = ior - ior_old
io_new [ 'iow' ] = iow - iow_old
io_new [ 'cumulative_ior' ] = ior
io_new [ 'cumulative_iow' ] = iow
# Save stats to compute next bitrate
self . iocounters_old [ container_id ] = iocounters
# Return the stats
return io_new
|
def set_filters ( self , filters ) :
"""Sets the filters for the server .
: Parameters :
filters
List of filters to set on this server , or None to remove all filters .
Elements in list should subclass Filter"""
|
if filters == None or isinstance ( filters , ( tuple , list ) ) :
self . filters = filters
else :
self . filters = [ filters ]
|
def getZoom ( self , resolution ) :
"Return the zoom level for a given resolution"
|
assert resolution in self . RESOLUTIONS
return self . RESOLUTIONS . index ( resolution )
|
def pages_sub_menu ( context , page , url = '/' ) :
"""Get the root page of the given page and
render a nested list of all root ' s children pages .
Good for rendering a secondary menu .
: param page : the page where to start the menu from .
: param url : not used anymore ."""
|
lang = context . get ( 'lang' , pages_settings . PAGE_DEFAULT_LANGUAGE )
page = get_page_from_string_or_id ( page , lang )
if page :
root = page . get_root ( )
children = root . get_children_for_frontend ( )
context . update ( { 'children' : children , 'page' : page } )
return context
|
def relaxNGValidateDoc ( self , doc ) :
"""Validate a document tree in memory ."""
|
if doc is None :
doc__o = None
else :
doc__o = doc . _o
ret = libxml2mod . xmlRelaxNGValidateDoc ( self . _o , doc__o )
return ret
|
def apply_sql ( self , ex , values , lockref ) :
"""call the stmt in tree with values subbed on the tables in t _ d .
ex is a parsed statement returned by parse _ expression .
values is the tuple of % s replacements .
lockref can be anything as long as it stays the same ; it ' s used for assigning tranaction ownership .
( safest is to make it a pgmock _ dbapi2 . Connection , because that will rollback on close )"""
|
sqex . depth_first_sub ( ex , values )
with self . lock_db ( lockref , isinstance ( ex , sqparse2 . StartX ) ) :
sqex . replace_subqueries ( ex , self , table . Table )
if isinstance ( ex , sqparse2 . SelectX ) :
return sqex . run_select ( ex , self , table . Table )
elif isinstance ( ex , sqparse2 . InsertX ) :
return self [ ex . table ] . insert ( ex . cols , ex . values , ex . ret , self )
elif isinstance ( ex , sqparse2 . UpdateX ) :
if len ( ex . tables ) != 1 :
raise NotImplementedError ( 'multi-table update' )
return self [ ex . tables [ 0 ] ] . update ( ex . assigns , ex . where , ex . ret , self )
elif isinstance ( ex , sqparse2 . CreateX ) :
self . create ( ex )
elif isinstance ( ex , sqparse2 . IndexX ) :
pass
elif isinstance ( ex , sqparse2 . DeleteX ) :
return self [ ex . table ] . delete ( ex . where , self )
elif isinstance ( ex , sqparse2 . StartX ) :
self . trans_start ( lockref )
elif isinstance ( ex , sqparse2 . CommitX ) :
self . trans_commit ( )
elif isinstance ( ex , sqparse2 . RollbackX ) :
self . trans_rollback ( )
elif isinstance ( ex , sqparse2 . DropX ) :
self . drop ( ex )
else :
raise TypeError ( type ( ex ) )
|
def partition_block ( block ) :
"""If a block is not partitionable , returns a list with the same block .
Otherwise , returns a list with the resulting blocks , recursively ."""
|
result = [ block ]
if not block . is_partitionable :
return result
EDP = END_PROGRAM_LABEL + ':'
for i in range ( len ( block ) - 1 ) :
if i and block . asm [ i ] == EDP : # END _ PROGRAM label always starts a basic block
block , new_block = block_partition ( block , i - 1 )
LABELS [ END_PROGRAM_LABEL ] . basic_block = new_block
result . extend ( partition_block ( new_block ) )
return result
if block . mem [ i ] . is_ender :
block , new_block = block_partition ( block , i )
result . extend ( partition_block ( new_block ) )
op = block . mem [ i ] . opers
for l in op :
if l in LABELS . keys ( ) :
JUMP_LABELS . add ( l )
block . label_goes += [ l ]
return result
if block . asm [ i ] in arch . zx48k . backend . ASMS :
if i > 0 :
block , new_block = block_partition ( block , i - 1 )
result . extend ( partition_block ( new_block ) )
return result
block , new_block = block_partition ( block , i )
result . extend ( partition_block ( new_block ) )
return result
for label in JUMP_LABELS :
must_partition = False
if LABELS [ label ] . basic_block is block :
for i in range ( len ( block ) ) :
cell = block . mem [ i ]
if cell . inst == label :
break
if cell . is_label :
continue
if cell . is_ender :
continue
must_partition = True
if must_partition :
block , new_block = block_partition ( block , i - 1 )
LABELS [ label ] . basic_block = new_block
result . extend ( partition_block ( new_block ) )
return result
return result
|
def fluoview_description_metadata ( description , ignoresections = None ) :
"""Return metatata from FluoView image description as dict .
The FluoView image description format is unspecified . Expect failures .
> > > descr = ( ' [ Intensity Mapping ] \\ nMap Ch0 : Range = 00000 to 02047 \\ n '
. . . ' [ Intensity Mapping End ] ' )
> > > fluoview _ description _ metadata ( descr )
{ ' Intensity Mapping ' : { ' Map Ch0 : Range ' : ' 00000 to 02047 ' } }"""
|
if not description . startswith ( '[' ) :
raise ValueError ( 'invalid FluoView image description' )
if ignoresections is None :
ignoresections = { 'Region Info (Fields)' , 'Protocol Description' }
result = { }
sections = [ result ]
comment = False
for line in description . splitlines ( ) :
if not comment :
line = line . strip ( )
if not line :
continue
if line [ 0 ] == '[' :
if line [ - 5 : ] == ' End]' : # close section
del sections [ - 1 ]
section = sections [ - 1 ]
name = line [ 1 : - 5 ]
if comment :
section [ name ] = '\n' . join ( section [ name ] )
if name [ : 4 ] == 'LUT ' :
a = numpy . array ( section [ name ] , dtype = 'uint8' )
a . shape = - 1 , 3
section [ name ] = a
continue
# new section
comment = False
name = line [ 1 : - 1 ]
if name [ : 4 ] == 'LUT ' :
section = [ ]
elif name in ignoresections :
section = [ ]
comment = True
else :
section = { }
sections . append ( section )
result [ name ] = section
continue
# add entry
if comment :
section . append ( line )
continue
line = line . split ( '=' , 1 )
if len ( line ) == 1 :
section [ line [ 0 ] . strip ( ) ] = None
continue
key , value = line
if key [ : 4 ] == 'RGB ' :
section . extend ( int ( rgb ) for rgb in value . split ( ) )
else :
section [ key . strip ( ) ] = astype ( value . strip ( ) )
return result
|
def get_frontend_node ( self ) :
"""Returns the first node of the class specified in the
configuration file as ` ssh _ to ` , or the first node of
the first class in alphabetic order .
: return : : py : class : ` Node `
: raise : : py : class : ` elasticluster . exceptions . NodeNotFound ` if no
valid frontend node is found"""
|
if self . ssh_to :
if self . ssh_to in self . nodes :
cls = self . nodes [ self . ssh_to ]
if cls :
return cls [ 0 ]
else :
log . warning ( "preferred `ssh_to` `%s` is empty: unable to " "get the choosen frontend node from that class." , self . ssh_to )
else :
raise NodeNotFound ( "Invalid ssh_to `%s`. Please check your " "configuration file." % self . ssh_to )
# If we reach this point , the preferred class was empty . Pick
# one using the default logic .
for cls in sorted ( self . nodes . keys ( ) ) :
if self . nodes [ cls ] :
return self . nodes [ cls ] [ 0 ]
# Uh - oh , no nodes in this cluster .
raise NodeNotFound ( "Unable to find a valid frontend: " "cluster has no nodes!" )
|
def add_grad ( left , right ) :
"""Recursively add the gradient of two objects .
Args :
left : The left value to add . Can be either an array , a number , list or
dictionary .
right : The right value . Must be of the same type ( recursively ) as the left .
Returns :
The sum of the two gradients , which will of the same type ."""
|
# We assume that initial gradients are always identity WRT add _ grad .
# We also assume that only init _ grad could have created None values .
assert left is not None and right is not None
left_type = type ( left )
right_type = type ( right )
if left_type is ZeroGradient :
return right
if right_type is ZeroGradient :
return left
return grad_adders [ ( left_type , right_type ) ] ( left , right )
|
def variables ( self ) :
"""Returns : class : ` Variables ` instance ."""
|
return Variables ( [ ( k , self . _unescape ( k , v ) , sl ) for k , v , sl in self . _nodes_to_values ( ) ] )
|
def get_score_metadata ( self ) :
"""Gets the metadata for a score .
return : ( osid . Metadata ) - metadata for the score
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'score' ] )
metadata . update ( { 'existing_decimal_values' : self . _my_map [ 'score' ] } )
return Metadata ( ** metadata )
|
def rearrange_nums ( nums ) :
"""Rearranges an array of integers , sorting the values at odd indices in
non - increasing order and the values at even indices in non - decreasing order .
Args :
nums ( List [ int ] ) : A 0 - indexed integer array .
Returns :
List [ int ] : A new array formed after rearranging the values of nums .
Values at odd indices are sorted in non - increasing order ,
and values at even indices are sorted in non - decreasing order .
Example :
> > > rearrange _ nums ( [ 4,1,2,3 ] )
[2 , 3 , 4 , 1]"""
|
n = len ( nums )
ev = list ( sorted ( nums [ : : 2 ] ) )
od = list ( reversed ( sorted ( nums [ 1 : : 2 ] ) ) )
res = [ None for _ in range ( n ) ]
for i in range ( n ) :
if i % 2 == 0 :
res [ i ] = ev [ i // 2 ]
else :
res [ i ] = od [ i // 2 ]
return res
|
def _load_assembly_mapping_data ( filename ) :
"""Load assembly mapping data .
Parameters
filename : str
path to compressed archive with assembly mapping data
Returns
assembly _ mapping _ data : dict
dict of assembly maps if loading was successful , else None
Notes
Keys of returned dict are chromosomes and values are the corresponding assembly map ."""
|
try :
assembly_mapping_data = { }
with tarfile . open ( filename , "r" ) as tar : # http : / / stackoverflow . com / a / 2018576
for member in tar . getmembers ( ) :
if ".json" in member . name :
with tar . extractfile ( member ) as tar_file :
tar_bytes = tar_file . read ( )
# https : / / stackoverflow . com / a / 42683509/4727627
assembly_mapping_data [ member . name . split ( "." ) [ 0 ] ] = json . loads ( tar_bytes . decode ( "utf-8" ) )
return assembly_mapping_data
except Exception as err :
print ( err )
return None
|
def validate ( options ) :
"""Validates the application of this backend to a given metadata"""
|
try :
if options . backends . index ( 'modelinstance' ) > options . backends . index ( 'model' ) :
raise Exception ( "Metadata backend 'modelinstance' must come before 'model' backend" )
except ValueError :
raise Exception ( "Metadata backend 'modelinstance' must be installed in order to use 'model' backend" )
|
def run ( self ) :
"""Run import ."""
|
latest_track = Track . objects . all ( ) . order_by ( '-last_played' )
latest_track = latest_track [ 0 ] if latest_track else None
importer = self . get_importer ( )
tracks = importer . run ( )
# Create / update Django Track objects for importer tracks .
for track in tracks : # Only create / update if tracks with start times greater than what already exists are imported .
if not latest_track or not latest_track . last_played or track . start_time > latest_track . last_played :
obj = self . lookup_track ( track )
# Don ' t update importing track that is regarded as the latest . This prevents start times from constantly incrementing .
if latest_track and obj == latest_track :
print "[%s-%s]: Start time not updated as it is the latest track." % ( track . title , track . artist )
continue
# If no existing track object could be resolved , create it .
if not obj :
print "[%s-%s]: Created." % ( track . title , track . artist )
obj = Track . objects . create ( title = track . title )
obj . length = track . length
roles = MusicCreditOption . objects . all ( ) . order_by ( 'role_priority' )
role = roles [ 0 ] . role_priority if roles else 1
obj . create_credit ( track . artist , role )
else :
print "[%s-%s]: Not created as it already exists." % ( track . title , track . artist )
# Update last played time to start time .
obj . last_played = track . start_time
obj . save ( )
print "[%s-%s]: Start time updated to %s." % ( track . title , track . artist , track . start_time )
else :
print "[%s-%s]: Not created as it has a past start time of %s (latest %s). " % ( track . title , track . artist , track . start_time , latest_track . last_played )
|
def _call_cli ( self , command , cwd = None , universal_newlines = False , redirect_stderr = False ) :
"""Executes the given command , internally using Popen . The output of
stdout and stderr are returned as a tuple . The returned tuple looks
like : ( stdout , stderr , returncode )
Parameters
command : string
The command to execute .
cwd : string
Change the working directory of the program to the specified path .
universal _ newlines : boolean
Enable the universal _ newlines feature of Popen .
redirect _ stderr : boolean
If True , redirect stderr into stdout"""
|
command = str ( command . encode ( "utf-8" ) . decode ( "ascii" , "ignore" ) )
env = os . environ . copy ( )
env . update ( self . envvars )
stderr = STDOUT if redirect_stderr else PIPE
proc = Popen ( shlex . split ( command ) , stdout = PIPE , stderr = stderr , cwd = cwd , universal_newlines = universal_newlines , env = env )
stdout , stderr = proc . communicate ( )
return ( stdout , stderr , proc . returncode )
|
def to_csv ( self , path , iamc_index = False , ** kwargs ) :
"""Write timeseries data to a csv file
Parameters
path : string
file path
iamc _ index : bool , default False
if True , use ` [ ' model ' , ' scenario ' , ' region ' , ' variable ' , ' unit ' ] ` ;
else , use all ` data ` columns"""
|
self . _to_file_format ( iamc_index ) . to_csv ( path , index = False , ** kwargs )
|
def make_chunk_iter ( stream , separator , limit = None , buffer_size = 10 * 1024 ) :
"""Works like : func : ` make _ line _ iter ` but accepts a separator
which divides chunks . If you want newline based processing
you should use : func : ` make _ limited _ stream ` instead as it
supports arbitrary newline markers .
. . versionadded : : 0.8
. . versionadded : : 0.9
added support for iterators as input stream .
: param stream : the stream or iterate to iterate over .
: param separator : the separator that divides chunks .
: param limit : the limit in bytes for the stream . ( Usually
content length . Not necessary if the ` stream `
is a : class : ` LimitedStream ` .
: param buffer _ size : The optional buffer size ."""
|
_read = make_chunk_iter_func ( stream , limit , buffer_size )
_split = re . compile ( r'(%s)' % re . escape ( separator ) ) . split
buffer = [ ]
while 1 :
new_data = _read ( )
if not new_data :
break
chunks = _split ( new_data )
new_buf = [ ]
for item in chain ( buffer , chunks ) :
if item == separator :
yield '' . join ( new_buf )
new_buf = [ ]
else :
new_buf . append ( item )
buffer = new_buf
if buffer :
yield '' . join ( buffer )
|
def changeLane ( self , vehID , laneIndex , duration ) :
"""changeLane ( string , int , int ) - > None
Forces a lane change to the lane with the given index ; if successful ,
the lane will be chosen for the given amount of time ( in ms ) ."""
|
self . _connection . _beginMessage ( tc . CMD_SET_VEHICLE_VARIABLE , tc . CMD_CHANGELANE , vehID , 1 + 4 + 1 + 1 + 1 + 4 )
self . _connection . _string += struct . pack ( "!BiBBBi" , tc . TYPE_COMPOUND , 2 , tc . TYPE_BYTE , laneIndex , tc . TYPE_INTEGER , duration )
self . _connection . _sendExact ( )
|
def inodeusage ( args = None ) :
'''Return inode usage information for volumes mounted on this minion
CLI Example :
. . code - block : : bash
salt ' * ' disk . inodeusage'''
|
flags = _clean_flags ( args , 'disk.inodeusage' )
if __grains__ [ 'kernel' ] == 'AIX' :
cmd = 'df -i'
else :
cmd = 'df -iP'
if flags :
cmd += ' -{0}' . format ( flags )
ret = { }
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) . splitlines ( )
for line in out :
if line . startswith ( 'Filesystem' ) :
continue
comps = line . split ( )
# Don ' t choke on empty lines
if not comps :
continue
try :
if __grains__ [ 'kernel' ] == 'OpenBSD' :
ret [ comps [ 8 ] ] = { 'inodes' : int ( comps [ 5 ] ) + int ( comps [ 6 ] ) , 'used' : comps [ 5 ] , 'free' : comps [ 6 ] , 'use' : comps [ 7 ] , 'filesystem' : comps [ 0 ] , }
elif __grains__ [ 'kernel' ] == 'AIX' :
ret [ comps [ 6 ] ] = { 'inodes' : comps [ 4 ] , 'used' : comps [ 5 ] , 'free' : comps [ 2 ] , 'use' : comps [ 5 ] , 'filesystem' : comps [ 0 ] , }
else :
ret [ comps [ 5 ] ] = { 'inodes' : comps [ 1 ] , 'used' : comps [ 2 ] , 'free' : comps [ 3 ] , 'use' : comps [ 4 ] , 'filesystem' : comps [ 0 ] , }
except ( IndexError , ValueError ) :
log . error ( 'Problem parsing inode usage information' )
ret = { }
return ret
|
def maybe_render_markdown ( string : str ) -> Any :
"""Render a string as Markdown only if in an IPython interpreter ."""
|
if is_ipython_interpreter ( ) : # pragma : no cover
from IPython . display import Markdown
# type : ignore # noqa : E501
return Markdown ( string )
else :
return string
|
def url_to_path ( url ) : # type : ( str ) - > str
"""Convert a file : URL to a path ."""
|
assert url . startswith ( 'file:' ) , ( "You can only turn file: urls into filenames (not %r)" % url )
_ , netloc , path , _ , _ = urllib_parse . urlsplit ( url )
# if we have a UNC path , prepend UNC share notation
if netloc :
netloc = '\\\\' + netloc
path = urllib_request . url2pathname ( netloc + path )
return path
|
def gps_status_encode ( self , satellites_visible , satellite_prn , satellite_used , satellite_elevation , satellite_azimuth , satellite_snr ) :
'''The positioning status , as reported by GPS . This message is intended
to display status information about each satellite
visible to the receiver . See message GLOBAL _ POSITION
for the global position estimate . This message can
contain information for up to 20 satellites .
satellites _ visible : Number of satellites visible ( uint8 _ t )
satellite _ prn : Global satellite ID ( uint8 _ t )
satellite _ used : 0 : Satellite not used , 1 : used for localization ( uint8 _ t )
satellite _ elevation : Elevation ( 0 : right on top of receiver , 90 : on the horizon ) of satellite ( uint8 _ t )
satellite _ azimuth : Direction of satellite , 0 : 0 deg , 255 : 360 deg . ( uint8 _ t )
satellite _ snr : Signal to noise ratio of satellite ( uint8 _ t )'''
|
return MAVLink_gps_status_message ( satellites_visible , satellite_prn , satellite_used , satellite_elevation , satellite_azimuth , satellite_snr )
|
def addfunctions ( abunch ) :
"""add functions to epbunch"""
|
key = abunch . obj [ 0 ] . upper ( )
# TODO : alternate strategy to avoid listing the objkeys in snames
# check if epbunch has field " Zone _ Name " or " Building _ Surface _ Name "
# and is in group u ' Thermal Zones and Surfaces '
# then it is likely to be a surface .
# of course we need to recode for surfaces that do not have coordinates : - (
# or we can filter those out since they do not have
# the field " Number _ of _ Vertices "
snames = [ "BuildingSurface:Detailed" , "Wall:Detailed" , "RoofCeiling:Detailed" , "Floor:Detailed" , "FenestrationSurface:Detailed" , "Shading:Site:Detailed" , "Shading:Building:Detailed" , "Shading:Zone:Detailed" , ]
snames = [ sname . upper ( ) for sname in snames ]
if key in snames :
func_dict = { 'area' : fh . area , 'height' : fh . height , # not working correctly
'width' : fh . width , # not working correctly
'azimuth' : fh . azimuth , 'tilt' : fh . tilt , 'coords' : fh . getcoords , # needed for debugging
}
abunch . __functions . update ( func_dict )
# print ( abunch . getfieldidd )
names = [ "CONSTRUCTION" , "MATERIAL" , "MATERIAL:AIRGAP" , "MATERIAL:INFRAREDTRANSPARENT" , "MATERIAL:NOMASS" , "MATERIAL:ROOFVEGETATION" , "WINDOWMATERIAL:BLIND" , "WINDOWMATERIAL:GLAZING" , "WINDOWMATERIAL:GLAZING:REFRACTIONEXTINCTIONMETHOD" , "WINDOWMATERIAL:GAP" , "WINDOWMATERIAL:GAS" , "WINDOWMATERIAL:GASMIXTURE" , "WINDOWMATERIAL:GLAZINGGROUP:THERMOCHROMIC" , "WINDOWMATERIAL:SCREEN" , "WINDOWMATERIAL:SHADE" , "WINDOWMATERIAL:SIMPLEGLAZINGSYSTEM" , ]
if key in names :
func_dict = { 'rvalue' : fh . rvalue , 'ufactor' : fh . ufactor , 'rvalue_ip' : fh . rvalue_ip , # quick fix for Santosh . Needs to thought thru
'ufactor_ip' : fh . ufactor_ip , # quick fix for Santosh . Needs to thought thru
'heatcapacity' : fh . heatcapacity , }
abunch . __functions . update ( func_dict )
names = [ 'FAN:CONSTANTVOLUME' , 'FAN:VARIABLEVOLUME' , 'FAN:ONOFF' , 'FAN:ZONEEXHAUST' , 'FANPERFORMANCE:NIGHTVENTILATION' , ]
if key in names :
func_dict = { 'f_fanpower_bhp' : fh . fanpower_bhp , 'f_fanpower_watts' : fh . fanpower_watts , 'f_fan_maxcfm' : fh . fan_maxcfm , }
abunch . __functions . update ( func_dict )
# code for references
# add function zonesurfaces
if key == 'ZONE' :
func_dict = { 'zonesurfaces' : fh . zonesurfaces }
abunch . __functions . update ( func_dict )
# add function subsurfaces
# going to cheat here a bit
# check if epbunch has field " Zone _ Name "
# and is in group u ' Thermal Zones and Surfaces '
# then it is likely to be a surface attached to a zone
fields = abunch . fieldnames
try :
group = abunch . getfieldidd ( 'key' ) [ 'group' ]
except KeyError as e : # some pytests don ' t have group
group = None
if group == u'Thermal Zones and Surfaces' :
if "Zone_Name" in fields :
func_dict = { 'subsurfaces' : fh . subsurfaces }
abunch . __functions . update ( func_dict )
return abunch
|
def validateArchiveList ( archiveList ) :
"""Validates an archiveList .
An ArchiveList must :
1 . Have at least one archive config . Example : ( 60 , 86400)
2 . No archive may be a duplicate of another .
3 . Higher precision archives ' precision must evenly divide all lower precision archives ' precision .
4 . Lower precision archives must cover larger time intervals than higher precision archives .
5 . Each archive must have at least enough points to consolidate to the next archive
Returns True or False"""
|
if not archiveList :
raise InvalidConfiguration ( "You must specify at least one archive configuration!" )
archiveList = sorted ( archiveList , key = lambda a : a [ 0 ] )
# sort by precision ( secondsPerPoint )
for i , archive in enumerate ( archiveList ) :
if i == len ( archiveList ) - 1 :
break
nextArchive = archiveList [ i + 1 ]
if not archive [ 0 ] < nextArchive [ 0 ] :
raise InvalidConfiguration ( "A Whisper database may not configured having" "two archives with the same precision (archive%d: %s, archive%d: %s)" % ( i , archive , i + 1 , nextArchive ) )
if nextArchive [ 0 ] % archive [ 0 ] != 0 :
raise InvalidConfiguration ( "Higher precision archives' precision " "must evenly divide all lower precision archives' precision " "(archive%d: %s, archive%d: %s)" % ( i , archive [ 0 ] , i + 1 , nextArchive [ 0 ] ) )
retention = archive [ 0 ] * archive [ 1 ]
nextRetention = nextArchive [ 0 ] * nextArchive [ 1 ]
if not nextRetention > retention :
raise InvalidConfiguration ( "Lower precision archives must cover " "larger time intervals than higher precision archives " "(archive%d: %s seconds, archive%d: %s seconds)" % ( i , retention , i + 1 , nextRetention ) )
archivePoints = archive [ 1 ]
pointsPerConsolidation = nextArchive [ 0 ] // archive [ 0 ]
if not archivePoints >= pointsPerConsolidation :
raise InvalidConfiguration ( "Each archive must have at least enough points " "to consolidate to the next archive (archive%d consolidates %d of " "archive%d's points but it has only %d total points)" % ( i + 1 , pointsPerConsolidation , i , archivePoints ) )
|
def attributes ( self ) :
"""Return sync attributes ."""
|
attr = { 'name' : self . name , 'id' : self . sync_id , 'network_id' : self . network_id , 'serial' : self . serial , 'status' : self . status , 'region' : self . region , 'region_id' : self . region_id , }
return attr
|
def metric_arun_2010 ( topic_word_distrib , doc_topic_distrib , doc_lengths ) :
"""Rajkumar Arun , V . Suresh , C . E . Veni Madhavan , and M . N . Narasimha Murthy . 2010 . On finding the natural number of
topics with latent dirichlet allocation : Some observations . In Advances in knowledge discovery and data mining ,
Mohammed J . Zaki , Jeffrey Xu Yu , Balaraman Ravindran and Vikram Pudi ( eds . ) . Springer Berlin Heidelberg , 391–402.
http : / / doi . org / 10.1007/978-3-642-13657-3_43"""
|
# Note : It will fail when num . of words in the vocabulary is less then the num . of topics ( which is very unusual ) .
# CM1 = SVD ( M1)
cm1 = np . linalg . svd ( topic_word_distrib , compute_uv = False )
# cm1 / = np . sum ( cm1 ) # normalize by L1 norm # the paper says nothing about normalizing so let ' s leave it as it is . . .
# CM2 = L * M2 / norm2 ( L )
if doc_lengths . shape [ 0 ] != 1 :
doc_lengths = doc_lengths . T
cm2 = np . array ( doc_lengths * np . matrix ( doc_topic_distrib ) ) [ 0 ]
cm2 /= np . linalg . norm ( doc_lengths , 2 )
# wrong :
# cm2 / = np . linalg . norm ( cm2 , 2 ) # normalize by L2 norm
# also wrong :
# cm2 / = np . sum ( cm2 ) # normalize by L1 norm
# symmetric Kullback - Leibler divergence KL ( cm1 | | cm2 ) + KL ( cm2 | | cm1)
# KL is called entropy in scipy
# we can ' t use this because entropy ( ) will normalize the vectors so that they sum up to 1 but this should not
# be done according to the paper
# return entropy ( cm1 , cm2 ) + entropy ( cm2 , cm1)
# use it as in the paper ( note : cm1 and cm2 are not prob . distributions that sum up to 1)
return np . sum ( cm1 * np . log ( cm1 / cm2 ) ) + np . sum ( cm2 * np . log ( cm2 / cm1 ) )
|
def pull ( image , insecure_registry = False , api_response = False , client_timeout = salt . utils . docker . CLIENT_TIMEOUT ) :
'''. . versionchanged : : 2018.3.0
If no tag is specified in the ` ` image ` ` argument , all tags for the
image will be pulled . For this reason is it recommended to pass
` ` image ` ` using the ` ` repo : tag ` ` notation .
Pulls an image from a Docker registry
image
Image to be pulled
insecure _ registry : False
If ` ` True ` ` , the Docker client will permit the use of insecure
( non - HTTPS ) registries .
api _ response : False
If ` ` True ` ` , an ` ` API _ Response ` ` key will be present in the return
data , containing the raw output from the Docker API .
. . note : :
This may result in a * * lot * * of additional return data , especially
for larger images .
client _ timeout
Timeout in seconds for the Docker client . This is not a timeout for
this function , but for receiving a response from the API .
* * RETURN DATA * *
A dictionary will be returned , containing the following keys :
- ` ` Layers ` ` - A dictionary containing one or more of the following keys :
- ` ` Already _ Pulled ` ` - Layers that that were already present on the
Minion
- ` ` Pulled ` ` - Layers that that were pulled
- ` ` Status ` ` - A string containing a summary of the pull action ( usually a
message saying that an image was downloaded , or that it was up to date ) .
- ` ` Time _ Elapsed ` ` - Time in seconds taken to perform the pull
CLI Example :
. . code - block : : bash
salt myminion docker . pull centos
salt myminion docker . pull centos : 6'''
|
_prep_pull ( )
kwargs = { 'stream' : True , 'client_timeout' : client_timeout }
if insecure_registry :
kwargs [ 'insecure_registry' ] = insecure_registry
time_started = time . time ( )
response = _client_wrapper ( 'pull' , image , ** kwargs )
ret = { 'Time_Elapsed' : time . time ( ) - time_started , 'retcode' : 0 }
_clear_context ( )
if not response :
raise CommandExecutionError ( 'Pull failed for {0}, no response returned from Docker API' . format ( image ) )
elif api_response :
ret [ 'API_Response' ] = response
errors = [ ]
# Iterate through API response and collect information
for event in response :
log . debug ( 'pull event: %s' , event )
try :
event = salt . utils . json . loads ( event )
except Exception as exc :
raise CommandExecutionError ( 'Unable to interpret API event: \'{0}\'' . format ( event ) , info = { 'Error' : exc . __str__ ( ) } )
try :
event_type = next ( iter ( event ) )
except StopIteration :
continue
if event_type == 'status' :
_pull_status ( ret , event )
elif event_type == 'errorDetail' :
_error_detail ( errors , event )
if errors :
ret [ 'Errors' ] = errors
ret [ 'retcode' ] = 1
return ret
|
def isnat ( obj ) :
"""Check if a value is np . NaT ."""
|
if obj . dtype . kind not in ( 'm' , 'M' ) :
raise ValueError ( "%s is not a numpy datetime or timedelta" )
return obj . view ( int64_dtype ) == iNaT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.