signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _load_modeling_extent ( self ) :
"""# Get extent from GSSHA Grid in LSM coordinates
# Determine range within LSM Grid""" | # STEP 1 : Get extent from GSSHA Grid in LSM coordinates
# reproject GSSHA grid and get bounds
min_x , max_x , min_y , max_y = self . gssha_grid . bounds ( as_projection = self . xd . lsm . projection )
# set subset indices
self . _set_subset_indices ( min_y , max_y , min_x , max_x ) |
def list_all_payment_transactions ( cls , ** kwargs ) :
"""List PaymentTransactions
Return a list of PaymentTransactions
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ payment _ transactions ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ PaymentTransaction ]
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_payment_transactions_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_payment_transactions_with_http_info ( ** kwargs )
return data |
def keplerian_sheared_field_locations ( ax , kbos , date , ras , decs , names , elongation = False , plot = False ) :
"""Shift fields from the discovery set to the requested date by the average motion of L7 kbos in the discovery field .
: param ras :
: param decs :
: param plot :
: param ax :
: param kbos : precomputed at the discovery date for that block . e . g . Oct new moon for 13B
: param date :
: param names :
: param elongation :""" | seps = { 'dra' : 0. , 'ddec' : 0. }
for kbo in kbos :
ra = kbo . ra
dec = kbo . dec
kbo . compute ( date )
seps [ 'dra' ] += kbo . ra - ra
seps [ 'ddec' ] += kbo . dec - dec
seps [ 'dra' ] /= float ( len ( kbos ) )
seps [ 'ddec' ] /= float ( len ( kbos ) )
print date , seps , len ( kbos )
for idx in range ( len ( ras ) ) :
name = names [ idx ]
ra = ras [ idx ] + seps [ 'dra' ]
dec = decs [ idx ] + seps [ 'ddec' ]
if plot :
ax . add_artist ( Rectangle ( xy = ( math . degrees ( ra ) - camera_dimen / 2.0 , math . degrees ( dec ) - camera_dimen / 2.0 ) , height = camera_dimen , width = camera_dimen , edgecolor = 'b' , facecolor = 'b' , lw = 0.5 , fill = True , alpha = 0.2 ) )
if elongation : # For each field centre , plot the elongation onto the field at that date .
elong = field_elongation ( ephem . degrees ( ra ) , ephem . degrees ( dec ) , date )
ax . annotate ( name , ( math . degrees ( ra ) + camera_dimen / 2. , math . degrees ( dec ) ) , size = 3 )
ax . annotate ( "%0.1f" % elong , ( math . degrees ( ra ) + camera_dimen / 4. , math . degrees ( dec ) - camera_dimen / 4. ) , size = 5 )
return ax |
def build ( self , builder ) :
"""Build XML by appending to builder""" | params = dict ( ItemOID = self . oid , Mandatory = bool_to_yes_no ( self . mandatory ) )
if self . order_number is not None :
params [ "OrderNumber" ] = str ( self . order_number )
if self . key_sequence is not None :
params [ "KeySequence" ] = str ( self . key_sequence )
if self . imputation_method_oid is not None :
params [ "ImputationMethodOID" ] = self . imputation_method_oid
if self . role is not None :
params [ "Role" ] = self . role
if self . role_codelist_oid is not None :
params [ "RoleCodeListOID" ] = self . role_codelist_oid
builder . start ( "ItemRef" , params )
for attribute in self . attributes :
attribute . build ( builder )
builder . end ( "ItemRef" ) |
def push_progress ( self , status , object_id , progress ) :
"""Prints progress information .
: param status : Status text .
: type status : unicode
: param object _ id : Object that the progress is reported on .
: type object _ id : unicode
: param progress : Progress bar .
: type progress : unicode""" | fastprint ( progress_fmt ( status , object_id , progress ) , end = '\n' ) |
def memory_warning ( config , context ) :
"""Determines when memory usage is nearing it ' s max .""" | used = psutil . Process ( os . getpid ( ) ) . memory_info ( ) . rss / 1048576
limit = float ( context . memory_limit_in_mb )
p = used / limit
memory_threshold = config . get ( 'memory_warning_threshold' )
if p >= memory_threshold :
config [ 'raven_client' ] . captureMessage ( 'Memory Usage Warning' , level = 'warning' , extra = { 'MemoryLimitInMB' : context . memory_limit_in_mb , 'MemoryUsedInMB' : math . floor ( used ) } )
else : # nothing to do check back later
Timer ( .5 , memory_warning , ( config , context ) ) . start ( ) |
def build_multi_point_source_node ( multi_point_source ) :
"""Parses a point source to a Node class
: param point _ source :
MultiPoint source as instance of : class :
` openquake . hazardlib . source . point . MultiPointSource `
: returns :
Instance of : class : ` openquake . baselib . node . Node `""" | # parse geometry
pos = [ ]
for p in multi_point_source . mesh :
pos . append ( p . x )
pos . append ( p . y )
mesh_node = Node ( 'gml:posList' , text = pos )
upper_depth_node = Node ( "upperSeismoDepth" , text = multi_point_source . upper_seismogenic_depth )
lower_depth_node = Node ( "lowerSeismoDepth" , text = multi_point_source . lower_seismogenic_depth )
source_nodes = [ Node ( "multiPointGeometry" , nodes = [ mesh_node , upper_depth_node , lower_depth_node ] ) ]
# parse common distributed attributes
source_nodes . extend ( get_distributed_seismicity_source_nodes ( multi_point_source ) )
return Node ( "multiPointSource" , get_source_attributes ( multi_point_source ) , nodes = source_nodes ) |
def create_diskgroup ( service_instance , vsan_disk_mgmt_system , host_ref , cache_disk , capacity_disks ) :
'''Creates a disk group
service _ instance
Service instance to the host or vCenter
vsan _ disk _ mgmt _ system
vim . VimClusterVsanVcDiskManagemenetSystem representing the vSan disk
management system retrieved from the vsan endpoint .
host _ ref
vim . HostSystem object representing the target host the disk group will
be created on
cache _ disk
The vim . HostScsidisk to be used as a cache disk . It must be an ssd disk .
capacity _ disks
List of vim . HostScsiDisk objects representing of disks to be used as
capacity disks . Can be either ssd or non - ssd . There must be a minimum
of 1 capacity disk in the list .''' | hostname = salt . utils . vmware . get_managed_object_name ( host_ref )
cache_disk_id = cache_disk . canonicalName
log . debug ( 'Creating a new disk group with cache disk \'%s\' on host \'%s\'' , cache_disk_id , hostname )
log . trace ( 'capacity_disk_ids = %s' , [ c . canonicalName for c in capacity_disks ] )
spec = vim . VimVsanHostDiskMappingCreationSpec ( )
spec . cacheDisks = [ cache_disk ]
spec . capacityDisks = capacity_disks
# All capacity disks must be either ssd or non - ssd ( mixed disks are not
# supported )
spec . creationType = 'allFlash' if getattr ( capacity_disks [ 0 ] , 'ssd' ) else 'hybrid'
spec . host = host_ref
try :
task = vsan_disk_mgmt_system . InitializeDiskMappings ( spec )
except vim . fault . NoPermission as exc :
log . exception ( exc )
raise VMwareApiError ( 'Not enough permissions. Required privilege: ' '{0}' . format ( exc . privilegeId ) )
except vim . fault . VimFault as exc :
log . exception ( exc )
raise VMwareApiError ( exc . msg )
except vmodl . fault . MethodNotFound as exc :
log . exception ( exc )
raise VMwareRuntimeError ( 'Method \'{0}\' not found' . format ( exc . method ) )
except vmodl . RuntimeFault as exc :
log . exception ( exc )
raise VMwareRuntimeError ( exc . msg )
_wait_for_tasks ( [ task ] , service_instance )
return True |
def write_gif_to_file ( self , fp , images , durations , loops , xys , disposes ) :
"""write _ gif _ to _ file ( fp , images , durations , loops , xys , disposes )
Given a set of images writes the bytes to the specified stream .""" | # Obtain palette for all images and count each occurance
palettes , occur = [ ] , [ ]
for im in images :
palettes . append ( getheader ( im ) [ 1 ] )
for palette in palettes :
occur . append ( palettes . count ( palette ) )
# Select most - used palette as the global one ( or first in case no max )
print palettes , occur
global_palette = palettes [ occur . index ( max ( occur ) ) ]
# Init
frames = 0
first_frame = True
for im , palette in zip ( images , palettes ) :
if first_frame : # Write header
# Gather info
header = self . get_header_anim ( im )
appext = self . get_app_ext ( loops )
# Write
fp . write ( header )
fp . write ( global_palette )
fp . write ( appext )
# Next frame is not the first
first_frame = False
if True : # Write palette and image data
# Gather info
data = getdata ( im )
imdes , data = data [ 0 ] , data [ 1 : ]
transparent_flag = 0
if self . transparency :
transparent_flag = 1
graphext = self . get_graphics_control_ext ( durations [ frames ] , disposes [ frames ] , transparent_flag = transparent_flag , transparency_index = 255 )
# Make image descriptor suitable for using 256 local color palette
lid = self . get_image_descriptor ( im , xys [ frames ] )
# Write local header
if ( palette != global_palette ) or ( disposes [ frames ] != 2 ) : # Use local color palette
fp . write ( graphext )
fp . write ( lid )
# write suitable image descriptor
fp . write ( palette )
# write local color table
fp . write ( '\x08' )
# LZW minimum size code
else : # Use global color palette
fp . write ( graphext )
fp . write ( imdes )
# write suitable image descriptor
# Write image data
for d in data :
fp . write ( d )
# Prepare for next round
frames = frames + 1
fp . write ( ";" )
# end gif
return frames |
def is_stalemate ( self ) -> bool :
"""Checks if the current position is a stalemate .""" | if self . is_check ( ) :
return False
if self . is_variant_end ( ) :
return False
return not any ( self . generate_legal_moves ( ) ) |
def ISIs ( self , time_dimension = 0 , units = None , min_t = None , max_t = None ) :
"""returns the Inter Spike Intervals
` time _ dimension ` : which dimension contains the spike times ( by default the first )
` units ` , ` min _ t ` , ` max _ t ` : define the units of the output and the range of spikes that should be considered""" | units = self . _default_units ( units )
converted_dimension , st = self . spike_times . get_converted ( time_dimension , units )
if min_t is None :
min_t = converted_dimension . min
if max_t is None :
max_t = converted_dimension . max
return np . diff ( sorted ( st [ ( st > min_t ) * ( st < max_t ) ] ) ) |
def _parse_residue ( self , residue ) :
"""Extracts Residue Name , Number , Chain , Model , Atoms .
I / O : xml object < response > / dictionary""" | # Filter Element Nodes
childs = [ child for child in residue . childNodes if child . nodeType == child . ELEMENT_NODE ]
# Parse info out
resi = int ( childs [ 0 ] . firstChild . data . strip ( ) )
resn = childs [ 1 ] . firstChild . data . strip ( )
icode = childs [ 3 ] . firstChild . data
chain = childs [ 4 ] . firstChild . data . strip ( )
model = int ( childs [ 5 ] . firstChild . data . strip ( ) )
atoms = childs [ 6 : ]
# Output
return { 'name' : resn , 'number' : resi , 'icode' : icode , 'chain' : chain , 'model' : model , 'atoms' : atoms } |
def makeMarkovApproxToNormalByMonteCarlo ( x_grid , mu , sigma , N_draws = 10000 ) :
'''Creates an approximation to a normal distribution with mean mu and standard
deviation sigma , by Monte Carlo .
Returns a stochastic vector called p _ vec , corresponding
to values in x _ grid . If a RV is distributed x ~ N ( mu , sigma ) , then the expectation
of a continuous function f ( ) is E [ f ( x ) ] = numpy . dot ( p _ vec , f ( x _ grid ) ) .
Parameters
x _ grid : numpy . array
A sorted 1D array of floats representing discrete values that a normally
distributed RV could take on .
mu : float
Mean of the normal distribution to be approximated .
sigma : float
Standard deviation of the normal distribution to be approximated .
N _ draws : int
Number of draws to use in Monte Carlo .
Returns
p _ vec : numpy . array
A stochastic vector with probability weights for each x in x _ grid .''' | # Take random draws from the desired normal distribution
random_draws = np . random . normal ( loc = mu , scale = sigma , size = N_draws )
# Compute the distance between the draws and points in x _ grid
distance = np . abs ( x_grid [ : , np . newaxis ] - random_draws [ np . newaxis , : ] )
# Find the indices of the points in x _ grid that are closest to the draws
distance_minimizing_index = np . argmin ( distance , axis = 0 )
# For each point in x _ grid , the approximate probability of that point is the number
# of Monte Carlo draws that are closest to that point
p_vec = np . zeros_like ( x_grid )
for p_index , p in enumerate ( p_vec ) :
p_vec [ p_index ] = np . sum ( distance_minimizing_index == p_index ) / N_draws
# Check for obvious errors , and return p _ vec
assert ( np . all ( p_vec >= 0. ) ) and ( np . all ( p_vec <= 1. ) ) and ( np . isclose ( np . sum ( p_vec ) ) , 1. )
return p_vec |
def logs ( id , url , follow , sleep_duration = 1 ) :
"""View the logs of a job .
To follow along a job in real time , use the - - follow flag""" | instance_log_id = get_log_id ( id )
if url :
log_url = "{}/api/v1/resources/{}?content=true" . format ( floyd . floyd_host , instance_log_id )
floyd_logger . info ( log_url )
return
if follow :
floyd_logger . info ( "Launching job ..." )
follow_logs ( instance_log_id , sleep_duration )
else :
log_file_contents = ResourceClient ( ) . get_content ( instance_log_id )
if len ( log_file_contents . strip ( ) ) :
floyd_logger . info ( log_file_contents . rstrip ( ) )
else :
floyd_logger . info ( "Launching job now. Try after a few seconds." ) |
def _map_purchase_request_to_func ( self , purchase_request_type ) :
"""Provides appropriate parameters to the on _ purchase functions .""" | if purchase_request_type in self . _intent_view_funcs :
view_func = self . _intent_view_funcs [ purchase_request_type ]
else :
raise NotImplementedError ( 'Request type "{}" not found and no default view specified.' . format ( purchase_request_type ) )
argspec = inspect . getargspec ( view_func )
arg_names = argspec . args
arg_values = self . _map_params_to_view_args ( purchase_request_type , arg_names )
print ( '_map_purchase_request_to_func' , arg_names , arg_values , view_func , purchase_request_type )
return partial ( view_func , * arg_values ) |
def _mic_required ( target_info ) :
"""Checks the MsvAvFlags field of the supplied TargetInfo structure to determine in the MIC flags is set
: param target _ info : The TargetInfo structure to check
: return : a boolean value indicating that the MIC flag is set""" | if target_info is not None and target_info [ TargetInfo . NTLMSSP_AV_FLAGS ] is not None :
flags = struct . unpack ( '<I' , target_info [ TargetInfo . NTLMSSP_AV_FLAGS ] [ 1 ] ) [ 0 ]
return bool ( flags & 0x00000002 ) |
def help_center_article_translation_create ( self , article_id , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / help _ center / translations # create - translation" | api_path = "/api/v2/help_center/articles/{article_id}/translations.json"
api_path = api_path . format ( article_id = article_id )
return self . call ( api_path , method = "POST" , data = data , ** kwargs ) |
def charts_get ( self , ** kwargs ) :
"""Charts
Returns a list of Charts , ordered by creation date ( newest first ) . A Chart is chosen by Pollster editors . One example is \" Obama job approval - Democrats \" . It is always based upon a single Question . Users should strongly consider basing their analysis on Questions instead . Charts are derived data ; Pollster editors publish them and change them as editorial priorities change .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . charts _ get ( callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param str cursor : Special string to index into the Array
: param str tags : Comma - separated list of tag slugs . Only Charts with one or more of these tags and Charts based on Questions with one or more of these tags will be returned .
: param date election _ date : Date of an election , in YYYY - MM - DD format . Only Charts based on Questions pertaining to an election on this date will be returned .
: return : InlineResponse200
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . charts_get_with_http_info ( ** kwargs )
else :
( data ) = self . charts_get_with_http_info ( ** kwargs )
return data |
def validate_abi ( abi ) :
"""Helper function for validating an ABI""" | if not is_list_like ( abi ) :
raise ValueError ( "'abi' is not a list" )
if not all ( is_dict ( e ) for e in abi ) :
raise ValueError ( "'abi' is not a list of dictionaries" )
functions = filter_by_type ( 'function' , abi )
selectors = groupby ( compose ( encode_hex , function_abi_to_4byte_selector ) , functions )
duplicates = valfilter ( lambda funcs : len ( funcs ) > 1 , selectors )
if duplicates :
raise ValueError ( 'Abi contains functions with colliding selectors. ' 'Functions {0}' . format ( _prepare_selector_collision_msg ( duplicates ) ) ) |
def _validate_for_numeric_unaryop ( self , op , opstr ) :
"""Validate if we can perform a numeric unary operation .""" | if not self . _is_numeric_dtype :
raise TypeError ( "cannot evaluate a numeric op " "{opstr} for type: {typ}" . format ( opstr = opstr , typ = type ( self ) . __name__ ) ) |
def _filter_gte ( self , term , field_name , field_type , is_not ) :
"""Private method that returns a xapian . Query that searches for any term
that is greater than ` term ` in a specified ` field ` .""" | vrp = XHValueRangeProcessor ( self . backend )
pos , begin , end = vrp ( '%s:%s' % ( field_name , _term_to_xapian_value ( term , field_type ) ) , '*' )
if is_not :
return xapian . Query ( xapian . Query . OP_AND_NOT , self . _all_query ( ) , xapian . Query ( xapian . Query . OP_VALUE_RANGE , pos , begin , end ) )
return xapian . Query ( xapian . Query . OP_VALUE_RANGE , pos , begin , end ) |
def load_modules ( self , args ) :
"""Wrapper to load : plugins and export modules .""" | # Init the plugins dict
# Active plugins dictionnary
self . _plugins = collections . defaultdict ( dict )
# Load the plugins
self . load_plugins ( args = args )
# Init the export modules dict
# Active exporters dictionnary
self . _exports = collections . defaultdict ( dict )
# All available exporters dictionnary
self . _exports_all = collections . defaultdict ( dict )
# Load the export modules
self . load_exports ( args = args )
# Restoring system path
sys . path = sys_path |
def do_setup ( self , arg , arguments ) :
"""Usage :
setup init [ - - force ]
Copies a cmd3 . yaml file into ~ / . cloudmesh / cmd3 . yaml""" | if arguments [ "init" ] :
Console . ok ( "Initialize cmd3.yaml file" )
from cmd3 . yaml_setup import create_cmd3_yaml_file
force = arguments [ "--force" ]
create_cmd3_yaml_file ( force = force ) |
def find_relations ( chunked ) :
"""The input is a list of [ token , tag , chunk ] - items .
The output is a list of [ token , tag , chunk , relation ] - items .
A noun phrase preceding a verb phrase is perceived as sentence subject .
A noun phrase following a verb phrase is perceived as sentence object .""" | tag = lambda token : token [ 2 ] . split ( "-" ) [ - 1 ]
# B - NP = > NP
# Group successive tokens with the same chunk - tag .
chunks = [ ]
for token in chunked :
if len ( chunks ) == 0 or token [ 2 ] . startswith ( "B-" ) or tag ( token ) != tag ( chunks [ - 1 ] [ - 1 ] ) :
chunks . append ( [ ] )
chunks [ - 1 ] . append ( token + [ "O" ] )
# If a VP is preceded by a NP , the NP is tagged as NP - SBJ - ( id ) .
# If a VP is followed by a NP , the NP is tagged as NP - OBJ - ( id ) .
# Chunks that are not part of a relation get an O - tag .
id = 0
for i , chunk in enumerate ( chunks ) :
if tag ( chunk [ - 1 ] ) == "VP" and i > 0 and tag ( chunks [ i - 1 ] [ - 1 ] ) == "NP" :
if chunk [ - 1 ] [ - 1 ] == "O" :
id += 1
for token in chunk :
token [ - 1 ] = "VP-" + str ( id )
for token in chunks [ i - 1 ] :
token [ - 1 ] += "*NP-SBJ-" + str ( id )
token [ - 1 ] = token [ - 1 ] . lstrip ( "O-*" )
if tag ( chunk [ - 1 ] ) == "VP" and i < len ( chunks ) - 1 and tag ( chunks [ i + 1 ] [ - 1 ] ) == "NP" :
if chunk [ - 1 ] [ - 1 ] == "O" :
id += 1
for token in chunk :
token [ - 1 ] = "VP-" + str ( id )
for token in chunks [ i + 1 ] :
token [ - 1 ] = "*NP-OBJ-" + str ( id )
token [ - 1 ] = token [ - 1 ] . lstrip ( "O-*" )
# This is more a proof - of - concept than useful in practice :
# PP - LOC = be + in | at + the | my
# PP - DIR = go + to | towards + the | my
for i , chunk in enumerate ( chunks ) :
if 0 < i < len ( chunks ) - 1 and len ( chunk ) == 1 and chunk [ - 1 ] [ - 1 ] == "O" :
t0 , t1 , t2 = chunks [ i - 1 ] [ - 1 ] , chunks [ i ] [ 0 ] , chunks [ i + 1 ] [ 0 ]
# previous / current / next
if tag ( t1 ) == "PP" and t2 [ 1 ] in ( "DT" , "PR" , "PRP$" ) :
if t0 [ 0 ] in BE and t1 [ 0 ] in ( "in" , "at" ) :
t1 [ - 1 ] = "PP-LOC"
if t0 [ 0 ] in GO and t1 [ 0 ] in ( "to" , "towards" ) :
t1 [ - 1 ] = "PP-DIR"
related = [ ] ;
[ related . extend ( chunk ) for chunk in chunks ]
return related |
def create_tuple ( input_list , input_string ) :
"""Function to construct a new tuple using a list and a string .
Args :
input _ list : A list of strings .
input _ string : A string .
Returns :
A tuple combining the elements of the input list and the input string .
Examples :
> > > create _ tuple ( [ ' WEB ' , ' is ' ] , ' best ' )
( ' WEB ' , ' is ' , ' best ' )
> > > create _ tuple ( [ ' We ' , ' are ' ] , ' Developers ' )
( ' We ' , ' are ' , ' Developers ' )
> > > create _ tuple ( [ ' Part ' , ' is ' ] , ' Wrong ' )
( ' Part ' , ' is ' , ' Wrong ' )""" | result_tuple = tuple ( input_list + [ input_string ] )
return result_tuple |
def manage ( self , dateTimeString ) :
"""Return a Python datetime object based on the dateTimeString
This will handle date times in the following formats :
YYYY / MM / DD HH : MM : SS
2014/11/05 21:47:28
2014/11/5 21:47:28
11/05/2014
11/5/2014
11/05/2014 16:28:00
11/05/2014 16:28
11/5/2014 16:28:00
11/5/2014 16:28
It can also handle these formats when using a - instead of a / for a
date separator .""" | dateTime = None
dateTimeString = dateTimeString . replace ( '-' , '/' )
_date_time_split = dateTimeString . split ( ' ' )
# [0 ] = date , [ 1 ] = time ( if exists )
_date = _date_time_split [ 0 ]
_time = '00:00:00'
# default
if len ( _date_time_split ) > 1 :
_time = _date_time_split [ 1 ]
if dateTimeString . find ( '/' ) == 4 : # YYYY / MM / DD . . .
dateList = _date . split ( '/' ) + _time . split ( ':' )
dateTime = datetime ( * map ( lambda x : int ( x ) , dateList ) )
elif 1 <= dateTimeString . find ( '/' ) <= 2 : # MM / DD / YYYY or M / D ? / YYYY
_date_split = _date . split ( '/' )
dateList = [ _date_split [ 2 ] , _date_split [ 0 ] , _date_split [ 1 ] ] + _time . split ( ':' )
dateTime = datetime ( * map ( lambda x : int ( x ) , dateList ) )
if not dateTime :
raise ValueError ( "unable to manage unsupported string format: %s" % ( dateTimeString ) )
return dateTime |
def config ( self ) :
"""Implements Munin Plugin Graph Configuration .
Prints out configuration for graphs .
Use as is . Not required to be overwritten in child classes . The plugin
will work correctly as long as the Munin Graph objects have been
populated .""" | for parent_name in self . _graphNames :
graph = self . _graphDict [ parent_name ]
if self . isMultigraph :
print "multigraph %s" % self . _getMultigraphID ( parent_name )
print self . _formatConfig ( graph . getConfig ( ) )
print
if ( self . isMultigraph and self . _nestedGraphs and self . _subgraphDict and self . _subgraphNames ) :
for ( parent_name , subgraph_names ) in self . _subgraphNames . iteritems ( ) :
for graph_name in subgraph_names :
graph = self . _subgraphDict [ parent_name ] [ graph_name ]
print "multigraph %s" % self . getMultigraphID ( parent_name , graph_name )
print self . _formatConfig ( graph . getConfig ( ) )
print
return True |
def _query_k ( k , i , P , oracle , query , trn , state_cache , dist_cache , smooth = False , D = None , weight = 0.5 ) :
"""A helper function for query - matching function ` s iteration over observations .
Args :
k - index of the candidate path
i - index of the frames of the observations
P - the path matrix of size K x N , K the number for paths initiated ,
N the frame number of observations
oracle - an encoded oracle
query - observations matrix ( numpy array ) of dimension N x D .
D the dimension of the observation .
trn - function handle of forward links vector gathering
state _ cache - a list storing the states visited during the for loop for k
dist _ cache - a list of the same lenth as oracle storing the
distance calculated between the current observation and states
in the oracle
smooth - whether to enforce a preference on continuation or not
D - Self - similarity matrix , required if smooth is set to True
weight - the weight between continuation or jumps ( 1.0 for certain continuation )""" | _trn = trn ( oracle , P [ i - 1 ] [ k ] )
t = list ( itertools . chain . from_iterable ( [ oracle . latent [ oracle . data [ j ] ] for j in _trn ] ) )
_trn_unseen = [ _t for _t in _trn if _t not in state_cache ]
state_cache . extend ( _trn_unseen )
if _trn_unseen :
t_unseen = list ( itertools . chain . from_iterable ( [ oracle . latent [ oracle . data [ j ] ] for j in _trn_unseen ] ) )
dist_cache [ t_unseen ] = _dist_obs_oracle ( oracle , query [ i ] , t_unseen )
dvec = dist_cache [ t ]
if smooth and P [ i - 1 ] [ k ] < oracle . n_states - 1 :
dvec = dvec * ( 1.0 - weight ) + weight * np . array ( [ D [ P [ i - 1 ] [ k ] ] [ _t - 1 ] for _t in t ] )
_m = np . argmin ( dvec )
return t [ _m ] , dvec [ _m ] |
def handle_options ( ) :
'''Handle options .''' | parser = OptionParser ( )
parser . set_defaults ( cmaglin = False )
parser . set_defaults ( single = False )
parser . set_defaults ( alpha_cov = False )
parser . add_option ( '-x' , '--xmin' , dest = 'xmin' , help = 'Minium X range' , type = 'float' , )
parser . add_option ( '-X' , '--xmax' , dest = 'xmax' , help = 'Maximum X range' , type = 'float' , )
parser . add_option ( '-z' , '--zmin' , dest = 'zmin' , help = 'Minium Z range' , type = 'float' , )
parser . add_option ( '-Z' , '--zmax' , dest = 'zmax' , help = 'Maximum Z range' , type = 'float' , )
parser . add_option ( '-c' , '--column' , dest = 'column' , help = 'column to plot of input file' , type = 'int' , default = 2 , )
parser . add_option ( '-u' , '--unit' , dest = 'xunit' , help = 'Unit of length scale, typically meters (m) ' + 'or centimeters (cm)' , metavar = 'UNIT' , type = 'str' , default = 'm' , )
parser . add_option ( "--alpha_cov" , action = "store_true" , dest = "alpha_cov" , help = "use coverage for transparency" , )
parser . add_option ( '--cbtiks' , dest = 'cbtiks' , help = "Number of CB tiks" , type = int , metavar = "INT" , default = 3 , )
parser . add_option ( "--cmaglin" , action = "store_true" , dest = "cmaglin" , help = "linear colorbar for magnitude" , )
parser . add_option ( '--mag_vmin' , dest = 'mag_vmin' , help = 'Minium of colorbar' , type = 'float' , )
parser . add_option ( '--mag_vmax' , dest = 'mag_vmax' , help = 'Maximum of colorbar' , type = 'float' , )
parser . add_option ( '--pha_vmin' , dest = 'pha_vmin' , help = 'Minium of colorbar' , type = 'float' , )
parser . add_option ( '--pha_vmax' , dest = 'pha_vmax' , help = 'Maximum of colorbar' , type = 'float' , )
parser . add_option ( '--real_vmin' , dest = 'real_vmin' , help = 'Minium of colorbar' , type = 'float' , )
parser . add_option ( '--real_vmax' , dest = 'real_vmax' , help = 'Maximum of colorbar' , type = 'float' , )
parser . add_option ( '--imag_vmin' , dest = 'imag_vmin' , help = 'Minium of colorbar' , type = 'float' , )
parser . add_option ( '--imag_vmax' , dest = 'imag_vmax' , help = 'Maximum of colorbar' , type = 'float' , )
( options , args ) = parser . parse_args ( )
return options |
def startup ( request ) :
"""This view provides initial data to the client , such as available skills and causes""" | with translation . override ( translation . get_language_from_request ( request ) ) :
skills = serializers . SkillSerializer ( models . Skill . objects . all ( ) , many = True )
causes = serializers . CauseSerializer ( models . Cause . objects . all ( ) , many = True )
cities = serializers . GoogleAddressCityStateSerializer ( models . GoogleAddress . objects . all ( ) , many = True )
return response . Response ( { "skills" : skills . data , "causes" : causes . data , "cities" : cities . data } ) |
def bInit ( self , vrModel , vrDiffuseTexture ) :
"Purpose : Allocates and populates the GL resources for a render model" | # create and bind a VAO to hold state for this model
self . m_glVertArray = glGenVertexArrays ( 1 )
glBindVertexArray ( self . m_glVertArray )
# Populate a vertex buffer
self . m_glVertBuffer = glGenBuffers ( 1 )
glBindBuffer ( GL_ARRAY_BUFFER , self . m_glVertBuffer )
glBufferData ( GL_ARRAY_BUFFER , sizeof ( openvr . RenderModel_Vertex_t ) * vrModel . unVertexCount , vrModel . rVertexData , GL_STATIC_DRAW )
# Identify the components in the vertex buffer
glEnableVertexAttribArray ( 0 )
glVertexAttribPointer ( 0 , 3 , GL_FLOAT , False , sizeof ( openvr . RenderModel_Vertex_t ) , openvr . RenderModel_Vertex_t . vPosition . offset )
glEnableVertexAttribArray ( 1 )
glVertexAttribPointer ( 1 , 3 , GL_FLOAT , False , sizeof ( openvr . RenderModel_Vertex_t ) , openvr . RenderModel_Vertex_t . vNormal . offset )
glEnableVertexAttribArray ( 2 )
glVertexAttribPointer ( 2 , 2 , GL_FLOAT , False , sizeof ( openvr . RenderModel_Vertex_t ) , openvr . RenderModel_Vertex_t . rfTextureCoord . offset )
# Create and populate the index buffer
self . m_glIndexBuffer = glGenBuffers ( 1 )
glBindBuffer ( GL_ELEMENT_ARRAY_BUFFER , self . m_glIndexBuffer )
glBufferData ( GL_ELEMENT_ARRAY_BUFFER , sizeof ( ctypes . c_uint16 ) * vrModel . unTriangleCount * 3 , vrModel . rIndexData , GL_STATIC_DRAW )
glBindVertexArray ( 0 )
# create and populate the texture
self . m_glTexture = glGenTextures ( 1 )
glBindTexture ( GL_TEXTURE_2D , self . m_glTexture )
glTexImage2D ( GL_TEXTURE_2D , 0 , GL_RGBA , vrDiffuseTexture . unWidth , vrDiffuseTexture . unHeight , 0 , GL_RGBA , GL_UNSIGNED_BYTE , vrDiffuseTexture . rubTextureMapData )
# If this renders black ask McJohn what ' s wrong .
glGenerateMipmap ( GL_TEXTURE_2D )
glTexParameteri ( GL_TEXTURE_2D , GL_TEXTURE_WRAP_S , GL_CLAMP_TO_EDGE )
glTexParameteri ( GL_TEXTURE_2D , GL_TEXTURE_WRAP_T , GL_CLAMP_TO_EDGE )
glTexParameteri ( GL_TEXTURE_2D , GL_TEXTURE_MAG_FILTER , GL_LINEAR )
glTexParameteri ( GL_TEXTURE_2D , GL_TEXTURE_MIN_FILTER , GL_LINEAR_MIPMAP_LINEAR )
fLargest = glGetFloatv ( GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT )
glTexParameterf ( GL_TEXTURE_2D , GL_TEXTURE_MAX_ANISOTROPY_EXT , fLargest )
glBindTexture ( GL_TEXTURE_2D , 0 )
self . m_unVertexCount = vrModel . unTriangleCount * 3
return True |
def _get_command_args ( self , command , args ) :
'''Work out the command arguments for a given command''' | command_args = { }
command_argument_names = command . __code__ . co_varnames [ : command . __code__ . co_argcount ]
for varname in command_argument_names :
if varname == 'args' :
command_args [ 'args' ] = args
elif varname in self . provide_args :
command_args [ varname ] = self . provide_args [ varname ]
else :
command_args [ varname ] = getattr ( args , varname )
return command_args |
def _ProcessPathSpec ( self , extraction_worker , parser_mediator , path_spec ) :
"""Processes a path specification .
Args :
extraction _ worker ( worker . ExtractionWorker ) : extraction worker .
parser _ mediator ( ParserMediator ) : parser mediator .
path _ spec ( dfvfs . PathSpec ) : path specification .""" | self . _current_display_name = parser_mediator . GetDisplayNameForPathSpec ( path_spec )
try :
extraction_worker . ProcessPathSpec ( parser_mediator , path_spec )
except dfvfs_errors . CacheFullError : # TODO : signal engine of failure .
self . _abort = True
logger . error ( ( 'ABORT: detected cache full error while processing path spec: ' '{0:s}' ) . format ( self . _current_display_name ) )
except Exception as exception : # pylint : disable = broad - except
parser_mediator . ProduceExtractionWarning ( ( 'unable to process path specification with error: ' '{0!s}' ) . format ( exception ) , path_spec = path_spec )
if self . _processing_configuration . debug_output :
logger . warning ( ( 'Unhandled exception while processing path specification: ' '{0:s}.' ) . format ( self . _current_display_name ) )
logger . exception ( exception ) |
def urlencode2 ( query , doseq = 0 , safe = "" , querydelimiter = "&" ) :
"""Encode a sequence of two - element tuples or dictionary into a URL query string .
If any values in the query arg are sequences and doseq is true , each
sequence element is converted to a separate parameter .
If the query arg is a sequence of two - element tuples , the order of the
parameters in the output will match the order of parameters in the
input .""" | if hasattr ( query , "items" ) : # mapping objects
query = query . items ( )
else : # it ' s a bother at times that strings and string - like objects are
# sequences . . .
try : # non - sequence items should not work with len ( )
# non - empty strings will fail this
if len ( query ) and not isinstance ( query [ 0 ] , tuple ) :
raise TypeError
# zero - length sequences of all types will get here and succeed ,
# but that ' s a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError :
ty , va , tb = sys . exc_info ( )
raise TypeError ( "not a valid non-string sequence or mapping object " + tb )
l = [ ]
if not doseq : # preserve old behavior
for k , v in query :
k = quote_plus ( str ( k ) , safe = safe )
v = quote_plus ( str ( v ) , safe = safe )
l . append ( k + '=' + v )
else :
for k , v in query :
k = quote_plus ( str ( k ) , safe = safe )
if isinstance ( v , str ) :
v = quote_plus ( v , safe = safe )
l . append ( k + '=' + v )
elif _is_unicode ( v ) : # is there a reasonable way to convert to ASCII ?
# encode generates a string , but " replace " or " ignore "
# lose information and " strict " can raise UnicodeError
v = quote_plus ( v . encode ( "ASCII" , "replace" ) )
l . append ( k + '=' + v )
else :
try : # is this a sufficient test for sequence - ness ?
len ( v )
except TypeError : # not a sequence
v = quote_plus ( str ( v ) , safe = safe )
l . append ( k + '=' + v )
else : # loop over the sequence
for elt in v :
l . append ( k + '=' + quote_plus ( str ( elt ) ) )
return querydelimiter . join ( l ) |
def cmd_output_sysid ( self , args ) :
'''add new output for a specific MAVLink sysID''' | sysid = int ( args [ 0 ] )
device = args [ 1 ]
print ( "Adding output %s for sysid %u" % ( device , sysid ) )
try :
conn = mavutil . mavlink_connection ( device , input = False , source_system = self . settings . source_system )
conn . mav . srcComponent = self . settings . source_component
except Exception :
print ( "Failed to connect to %s" % device )
return
try :
mp_util . child_fd_list_add ( conn . port . fileno ( ) )
except Exception :
pass
if sysid in self . mpstate . sysid_outputs :
self . mpstate . sysid_outputs [ sysid ] . close ( )
self . mpstate . sysid_outputs [ sysid ] = conn |
def do_jump ( self , arg ) :
"""j ( ump ) lineno
Set the next line that will be executed . Only available in
the bottom - most frame . This lets you jump back and execute
code again , or jump forward to skip code that you don ' t want
to run .
It should be noted that not all jumps are allowed - - for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause .""" | if self . curindex + 1 != len ( self . stack ) :
self . error ( 'You can only jump within the bottom frame' )
return
try :
arg = int ( arg )
except ValueError :
self . error ( "The 'jump' command requires a line number" )
else :
try : # Do the jump , fix up our copy of the stack , and display the
# new position
self . curframe . f_lineno = arg
self . stack [ self . curindex ] = self . stack [ self . curindex ] [ 0 ] , arg
self . print_stack_entry ( self . stack [ self . curindex ] )
except ValueError as e :
self . error ( 'Jump failed: %s' % e ) |
def root_parent ( self , category = None ) :
"""Returns the topmost parent of the current category .""" | return next ( filter ( lambda c : c . is_root , self . hierarchy ( ) ) ) |
async def send_animation ( self , chat_id : typing . Union [ base . Integer , base . String ] , animation : typing . Union [ base . InputFile , base . String ] , duration : typing . Union [ base . Integer , None ] = None , width : typing . Union [ base . Integer , None ] = None , height : typing . Union [ base . Integer , None ] = None , thumb : typing . Union [ typing . Union [ base . InputFile , base . String ] , None ] = None , caption : typing . Union [ base . String , None ] = None , parse_mode : typing . Union [ base . String , None ] = None , disable_notification : typing . Union [ base . Boolean , None ] = None , reply_to_message_id : typing . Union [ base . Integer , None ] = None , reply_markup : typing . Union [ typing . Union [ types . InlineKeyboardMarkup , types . ReplyKeyboardMarkup , types . ReplyKeyboardRemove , types . ForceReply ] , None ] = None ) -> types . Message :
"""Use this method to send animation files ( GIF or H . 264 / MPEG - 4 AVC video without sound ) .
On success , the sent Message is returned .
Bots can currently send animation files of up to 50 MB in size , this limit may be changed in the future .
Source https : / / core . telegram . org / bots / api # sendanimation
: param chat _ id : Unique identifier for the target chat or username of the target channel
( in the format @ channelusername )
: type chat _ id : : obj : ` typing . Union [ base . Integer , base . String ] `
: param animation : Animation to send . Pass a file _ id as String to send an animation that exists
on the Telegram servers ( recommended ) , pass an HTTP URL as a String for Telegram to get an animation
from the Internet , or upload a new animation using multipart / form - data
: type animation : : obj : ` typing . Union [ base . InputFile , base . String ] `
: param duration : Duration of sent animation in seconds
: type duration : : obj : ` typing . Union [ base . Integer , None ] `
: param width : Animation width
: type width : : obj : ` typing . Union [ base . Integer , None ] `
: param height : Animation height
: type height : : obj : ` typing . Union [ base . Integer , None ] `
: param thumb : Thumbnail of the file sent . The thumbnail should be in JPEG format and less than 200 kB in size .
A thumbnail ‘ s width and height should not exceed 90.
: type thumb : : obj : ` typing . Union [ typing . Union [ base . InputFile , base . String ] , None ] `
: param caption : Animation caption ( may also be used when resending animation by file _ id ) , 0-1024 characters
: type caption : : obj : ` typing . Union [ base . String , None ] `
: param parse _ mode : Send Markdown or HTML , if you want Telegram apps to show bold , italic ,
fixed - width text or inline URLs in the media caption
: type parse _ mode : : obj : ` typing . Union [ base . String , None ] `
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound
: type disable _ notification : : obj : ` typing . Union [ base . Boolean , None ] `
: param reply _ to _ message _ id : If the message is a reply , ID of the original message
: type reply _ to _ message _ id : : obj : ` typing . Union [ base . Integer , None ] `
: param reply _ markup : Additional interface options . A JSON - serialized object for an inline keyboard ,
custom reply keyboard , instructions to remove reply keyboard or to force a reply from the user
: type reply _ markup : : obj : ` typing . Union [ typing . Union [ types . InlineKeyboardMarkup , types . ReplyKeyboardMarkup ,
types . ReplyKeyboardRemove , types . ForceReply ] , None ] `
: return : On success , the sent Message is returned
: rtype : : obj : ` types . Message `""" | reply_markup = prepare_arg ( reply_markup )
payload = generate_payload ( ** locals ( ) , exclude = [ "animation" , "thumb" ] )
files = { }
prepare_file ( payload , files , 'animation' , animation )
prepare_attachment ( payload , files , 'thumb' , thumb )
result = await self . request ( api . Methods . SEND_ANIMATION , payload , files )
return types . Message ( ** result ) |
def system_monitor_mail_interface_enable ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
system_monitor_mail = ET . SubElement ( config , "system-monitor-mail" , xmlns = "urn:brocade.com:mgmt:brocade-system-monitor" )
interface = ET . SubElement ( system_monitor_mail , "interface" )
enable = ET . SubElement ( interface , "enable" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def alltoall_pointtwise ( xs , devices , split_axis , concat_axis ) :
"""MPI alltoall operation .
Implementation of alltoall using pointwise communication .
Args :
xs : a list of n tf . Tensors
devices : a list of n strings
split _ axis : an integer
concat _ axis : an integer
Returns :
a list of n Tensors""" | n = len ( xs )
if n == 1 :
return xs
# [ target , source ]
parts = mtf . transpose_list_of_lists ( mtf . parallel ( devices , tf . split , xs , [ n ] * n , axis = [ split_axis ] * n ) )
return mtf . parallel ( devices , tf . concat , parts , axis = [ concat_axis ] * n ) |
def euclidean3d ( v1 , v2 ) :
"""Faster implementation of euclidean distance for the 3D case .""" | if not len ( v1 ) == 3 and len ( v2 ) == 3 :
print ( "Vectors are not in 3D space. Returning None." )
return None
return np . sqrt ( ( v1 [ 0 ] - v2 [ 0 ] ) ** 2 + ( v1 [ 1 ] - v2 [ 1 ] ) ** 2 + ( v1 [ 2 ] - v2 [ 2 ] ) ** 2 ) |
def paxos_instance ( self ) :
"""Returns instance of PaxosInstance ( protocol implementation ) .""" | # Construct instance with the constant attributes .
instance = PaxosInstance ( self . network_uid , self . quorum_size )
# Set the variable attributes from the aggregate .
for name in self . paxos_variables :
value = getattr ( self , name , None )
if value is not None :
if isinstance ( value , ( set , list , dict , tuple ) ) :
value = deepcopy ( value )
setattr ( instance , name , value )
# Return the instance .
return instance |
def _notification_stmt ( self , stmt : Statement , sctx : SchemaContext ) -> None :
"""Handle notification statement .""" | self . _handle_child ( NotificationNode ( ) , stmt , sctx ) |
def get_error ( self ) :
"""Retrieve error data .""" | col_offset = - 1
if self . node is not None :
try :
col_offset = self . node . col_offset
except AttributeError :
pass
try :
exc_name = self . exc . __name__
except AttributeError :
exc_name = str ( self . exc )
if exc_name in ( None , 'None' ) :
exc_name = 'UnknownError'
out = [ " %s" % self . expr ]
if col_offset > 0 :
out . append ( " %s^^^" % ( ( col_offset ) * ' ' ) )
out . append ( str ( self . msg ) )
return ( exc_name , '\n' . join ( out ) ) |
def s3_upload ( source , destination , profile_name = None ) :
"""Copy a file from a local source to an S3 destination .
Parameters
source : str
destination : str
Path starting with s3 : / / , e . g . ' s3 : / / bucket - name / key / foo . bar '
profile _ name : str , optional
AWS profile""" | session = boto3 . Session ( profile_name = profile_name )
s3 = session . resource ( 's3' )
bucket_name , key = _s3_path_split ( destination )
with open ( source , 'rb' ) as data :
s3 . Bucket ( bucket_name ) . put_object ( Key = key , Body = data ) |
def patch_jwt_settings ( ) :
"""Patch rest _ framework _ jwt authentication settings from allauth""" | defaults = api_settings . defaults
defaults [ 'JWT_PAYLOAD_GET_USER_ID_HANDLER' ] = ( __name__ + '.get_user_id_from_payload_handler' )
if 'allauth.socialaccount' not in settings . INSTALLED_APPS :
return
from allauth . socialaccount . models import SocialApp
try :
app = SocialApp . objects . get ( provider = 'helsinki' )
except SocialApp . DoesNotExist :
return
defaults [ 'JWT_SECRET_KEY' ] = app . secret
defaults [ 'JWT_AUDIENCE' ] = app . client_id |
def get_all_users ( ** kwargs ) :
"""Get the username & ID of all users .
Use the the filter if it has been provided
The filter has to be a list of values""" | users_qry = db . DBSession . query ( User )
filter_type = kwargs . get ( 'filter_type' )
filter_value = kwargs . get ( 'filter_value' )
if filter_type is not None : # Filtering the search of users
if filter_type == "id" :
if isinstance ( filter_value , str ) : # Trying to read a csv string
log . info ( "[HB.users] Getting user by Filter ID : %s" , filter_value )
filter_value = eval ( filter_value )
if type ( filter_value ) is int :
users_qry = users_qry . filter ( User . id == filter_value )
else :
users_qry = users_qry . filter ( User . id . in_ ( filter_value ) )
elif filter_type == "username" :
if isinstance ( filter_value , str ) : # Trying to read a csv string
log . info ( "[HB.users] Getting user by Filter Username : %s" , filter_value )
filter_value = filter_value . split ( "," )
for i , em in enumerate ( filter_value ) :
log . info ( "[HB.users] >>> Getting user by single Username : %s" , em )
filter_value [ i ] = em . strip ( )
if isinstance ( filter_value , str ) :
users_qry = users_qry . filter ( User . username == filter_value )
else :
users_qry = users_qry . filter ( User . username . in_ ( filter_value ) )
else :
raise Exception ( "Filter type '{}' not allowed" . format ( filter_type ) )
else :
log . info ( '[HB.users] Getting All Users' )
rs = users_qry . all ( )
return rs |
def change_capacity_percent ( self , group_name , scaling_adjustment ) :
"""http : / / docs . aws . amazon . com / AutoScaling / latest / DeveloperGuide / as - scale - based - on - demand . html
If PercentChangeInCapacity returns a value between 0 and 1,
Auto Scaling will round it off to 1 . If the PercentChangeInCapacity
returns a value greater than 1 , Auto Scaling will round it off to the
lower value . For example , if PercentChangeInCapacity returns 12.5,
then Auto Scaling will round it off to 12.""" | group = self . autoscaling_groups [ group_name ]
percent_change = 1 + ( scaling_adjustment / 100.0 )
desired_capacity = group . desired_capacity * percent_change
if group . desired_capacity < desired_capacity < group . desired_capacity + 1 :
desired_capacity = group . desired_capacity + 1
else :
desired_capacity = int ( desired_capacity )
self . set_desired_capacity ( group_name , desired_capacity ) |
def _init ( self , parser ) :
"""Initialize / Build the ` ` argparse . ArgumentParser ` ` and subparsers .
This internal version of ` ` init ` ` is used to ensure that all
subcommands have a properly initialized parser .
Args
parser : argparse . ArgumentParser
The parser for this command .""" | assert isinstance ( parser , argparse . ArgumentParser )
self . _init_parser ( parser )
self . _attach_arguments ( )
self . _attach_subcommands ( )
self . initialized = True |
def parse ( cls , data : bytes ) -> 'MessageContent' :
"""Parse the bytestring into message content .
Args :
data : The bytestring to parse .""" | lines = cls . _find_lines ( data )
view = memoryview ( data )
return cls . _parse ( data , view , lines ) |
def fit ( self , choosers , alternatives , current_choice ) :
"""Fit and save model parameters based on given data .
Parameters
choosers : pandas . DataFrame
Table describing the agents making choices , e . g . households .
alternatives : pandas . DataFrame
Table describing the things from which agents are choosing ,
e . g . buildings .
current _ choice : pandas . Series or any
A Series describing the ` alternatives ` currently chosen
by the ` choosers ` . Should have an index matching ` choosers `
and values matching the index of ` alternatives ` .
If a non - Series is given it should be a column in ` choosers ` .
Returns
log _ likelihoods : dict
Dict of log - liklihood values describing the quality of the
model fit . Will have keys ' null ' , ' convergence ' , and ' ratio ' .""" | logger . debug ( 'start: fit LCM model {}' . format ( self . name ) )
if not isinstance ( current_choice , pd . Series ) :
current_choice = choosers [ current_choice ]
choosers , alternatives = self . apply_fit_filters ( choosers , alternatives )
if self . estimation_sample_size :
choosers = choosers . loc [ np . random . choice ( choosers . index , min ( self . estimation_sample_size , len ( choosers ) ) , replace = False ) ]
current_choice = current_choice . loc [ choosers . index ]
_ , merged , chosen = interaction . mnl_interaction_dataset ( choosers , alternatives , self . sample_size , current_choice )
model_design = dmatrix ( self . str_model_expression , data = merged , return_type = 'dataframe' )
if len ( merged ) != model_design . as_matrix ( ) . shape [ 0 ] :
raise ModelEvaluationError ( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.' )
self . log_likelihoods , self . fit_parameters = mnl . mnl_estimate ( model_design . as_matrix ( ) , chosen , self . sample_size )
self . fit_parameters . index = model_design . columns
logger . debug ( 'finish: fit LCM model {}' . format ( self . name ) )
return self . log_likelihoods |
def start_discovery ( add_callback = None , remove_callback = None ) :
"""Start discovering chromecasts on the network .
This method will start discovering chromecasts on a separate thread . When
a chromecast is discovered , the callback will be called with the
discovered chromecast ' s zeroconf name . This is the dictionary key to find
the chromecast metadata in listener . services .
This method returns the CastListener object and the zeroconf ServiceBrowser
object . The CastListener object will contain information for the discovered
chromecasts . To stop discovery , call the stop _ discovery method with the
ServiceBrowser object .""" | listener = CastListener ( add_callback , remove_callback )
service_browser = False
try :
service_browser = zeroconf . ServiceBrowser ( zeroconf . Zeroconf ( ) , "_googlecast._tcp.local." , listener )
except ( zeroconf . BadTypeInNameException , NotImplementedError , OSError , socket . error , zeroconf . NonUniqueNameException ) :
pass
return listener , service_browser |
def subvolume_show ( path ) :
'''Show information of a given subvolume
path
Mount point for the filesystem
CLI Example :
. . code - block : : bash
salt ' * ' btrfs . subvolume _ show / var / volumes / tmp''' | cmd = [ 'btrfs' , 'subvolume' , 'show' , path ]
res = __salt__ [ 'cmd.run_all' ] ( cmd )
salt . utils . fsutils . _verify_run ( res )
result = { }
table = { }
# The real name is the first line , later there is a table of
# values separated with colon .
stdout = res [ 'stdout' ] . splitlines ( )
key = stdout . pop ( 0 )
result [ key . strip ( ) ] = table
for line in stdout :
key , value = line . split ( ':' , 1 )
table [ key . lower ( ) . strip ( ) ] = value . strip ( )
return result |
def p0 ( self ) :
"""A dictionary of the initial position of the walkers .
This is set by using ` ` set _ p0 ` ` . If not set yet , a ` ` ValueError ` ` is
raised when the attribute is accessed .""" | if self . _p0 is None :
raise ValueError ( "initial positions not set; run set_p0" )
# convert to dict
p0 = { param : self . _p0 [ ... , k ] for ( k , param ) in enumerate ( self . sampling_params ) }
return p0 |
def wordify ( self ) :
"""Constructs string of all documents .
: return : document representation of the dataset , one line per document
: rtype : str""" | string_documents = [ ]
for klass , document in zip ( self . resulting_classes , self . resulting_documents ) :
string_documents . append ( "!" + str ( klass ) + " " + '' . join ( document ) )
return '\n' . join ( string_documents ) |
def _set_infra ( self , v , load = False ) :
"""Setter method for infra , mapped from YANG variable / show / infra ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ infra is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ infra ( ) directly .
YANG Description : Show system info""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = infra . infra , is_container = 'container' , presence = False , yang_name = "infra" , rest_name = "" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'action' : u'chassis' , u'cli-drop-node-name' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-ras-ext' , defining_module = 'brocade-ras-ext' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """infra must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=infra.infra, is_container='container', presence=False, yang_name="infra", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'action': u'chassis', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='container', is_config=True)""" , } )
self . __infra = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def brightness ( self , x , y , radius , medv , data ) :
"""Return the brightness value found in a region ( radius ) pixels away
from ( x , y ) in ( data ) .""" | x0 , y0 , arr = self . cut_region ( x , y , radius , data )
arr2 = np . sort ( arr . flat )
idx = int ( len ( arr2 ) * 0.8 )
res = arr2 [ idx ] - medv
return float ( res ) |
def decode ( cls , bytes , cmddict ) :
"""Decodes a sequence command from an array of bytes , according to the
given command dictionary , and returns a new SeqCmd .""" | attrs = SeqCmdAttrs . decode ( bytes [ 0 : 1 ] )
delay = SeqDelay . decode ( bytes [ 1 : 4 ] )
cmd = cmddict . decode ( bytes [ 4 : ] )
return cls ( cmd , delay , attrs ) |
def encode_single ( typ , arg ) : # pylint : disable = too - many - return - statements , too - many - branches , too - many - statements , too - many - locals
"""Encode ` arg ` as ` typ ` .
` arg ` will be encoded in a best effort manner , were necessary the function
will try to correctly define the underlying binary representation ( ie .
decoding a hex - encoded address / hash ) .
Args :
typ ( Tuple [ ( str , int , list ) ] ) : A 3 - tuple defining the ` arg ` type .
The first element defines the type name .
The second element defines the type length in bits .
The third element defines if it ' s an array type .
Together the first and second defines the elementary type , the third
element must be present but is ignored .
Valid type names are :
- uint
- int
- bool
- ufixed
- fixed
- string
- bytes
- hash
- address
arg ( object ) : The object to be encoded , it must be a python object
compatible with the ` typ ` .
Raises :
ValueError : when an invalid ` typ ` is supplied .
ValueOutOfBounds : when ` arg ` cannot be encoded as ` typ ` because of the
binary contraints .
Note :
This function don ' t work with array types , for that use the ` enc `
function .""" | base , sub , _ = typ
if base == 'uint' :
sub = int ( sub )
if not ( 0 < sub <= 256 and sub % 8 == 0 ) :
raise ValueError ( 'invalid unsigned integer bit length {}' . format ( sub ) )
try :
i = decint ( arg , signed = False )
except EncodingError : # arg is larger than 2 * * 256
raise ValueOutOfBounds ( repr ( arg ) )
if not 0 <= i < 2 ** sub :
raise ValueOutOfBounds ( repr ( arg ) )
value_encoded = int_to_big_endian ( i )
return zpad ( value_encoded , 32 )
if base == 'int' :
sub = int ( sub )
bits = sub - 1
if not ( 0 < sub <= 256 and sub % 8 == 0 ) :
raise ValueError ( 'invalid integer bit length {}' . format ( sub ) )
try :
i = decint ( arg , signed = True )
except EncodingError : # arg is larger than 2 * * 255
raise ValueOutOfBounds ( repr ( arg ) )
if not - 2 ** bits <= i < 2 ** bits :
raise ValueOutOfBounds ( repr ( arg ) )
value = i % 2 ** 256
# convert negative to " equivalent " positive
value_encoded = int_to_big_endian ( value )
return zpad ( value_encoded , 32 )
if base == 'bool' :
if arg is True :
value_encoded = int_to_big_endian ( 1 )
elif arg is False :
value_encoded = int_to_big_endian ( 0 )
else :
raise ValueError ( '%r is not bool' % arg )
return zpad ( value_encoded , 32 )
if base == 'ufixed' :
sub = str ( sub )
# pylint : disable = redefined - variable - type
high_str , low_str = sub . split ( 'x' )
high = int ( high_str )
low = int ( low_str )
if not ( 0 < high + low <= 256 and high % 8 == 0 and low % 8 == 0 ) :
raise ValueError ( 'invalid unsigned fixed length {}' . format ( sub ) )
if not 0 <= arg < 2 ** high :
raise ValueOutOfBounds ( repr ( arg ) )
float_point = arg * 2 ** low
fixed_point = int ( float_point )
return zpad ( int_to_big_endian ( fixed_point ) , 32 )
if base == 'fixed' :
sub = str ( sub )
# pylint : disable = redefined - variable - type
high_str , low_str = sub . split ( 'x' )
high = int ( high_str )
low = int ( low_str )
bits = high - 1
if not ( 0 < high + low <= 256 and high % 8 == 0 and low % 8 == 0 ) :
raise ValueError ( 'invalid unsigned fixed length {}' . format ( sub ) )
if not - 2 ** bits <= arg < 2 ** bits :
raise ValueOutOfBounds ( repr ( arg ) )
float_point = arg * 2 ** low
fixed_point = int ( float_point )
value = fixed_point % 2 ** 256
return zpad ( int_to_big_endian ( value ) , 32 )
# Decimals
if base == 'decimal' :
val_to_encode = int ( arg * 10 ** int ( sub ) )
return zpad ( encode_int ( val_to_encode % 2 ** 256 ) , 32 )
if base == 'string' :
if isinstance ( arg , utils . unicode ) :
arg = arg . encode ( 'utf8' )
else :
try :
arg . decode ( 'utf8' )
except UnicodeDecodeError :
raise ValueError ( 'string must be utf8 encoded' )
if len ( sub ) : # fixed length
if not 0 <= len ( arg ) <= int ( sub ) :
raise ValueError ( 'invalid string length {}' . format ( sub ) )
if not 0 <= int ( sub ) <= 32 :
raise ValueError ( 'invalid string length {}' . format ( sub ) )
return rzpad ( arg , 32 )
if not 0 <= len ( arg ) < TT256 :
raise Exception ( 'Integer invalid or out of range: %r' % arg )
length_encoded = zpad ( int_to_big_endian ( len ( arg ) ) , 32 )
value_encoded = rzpad ( arg , utils . ceil32 ( len ( arg ) ) )
return length_encoded + value_encoded
if base == 'bytes' :
if not is_string ( arg ) :
if isinstance ( arg , str ) :
arg = bytes ( arg , 'utf8' )
else :
raise EncodingError ( 'Expecting string: %r' % arg )
arg = utils . to_string ( arg )
# py2 : force unicode into str
if len ( sub ) : # fixed length
if not 0 <= len ( arg ) <= int ( sub ) :
raise ValueError ( 'string must be utf8 encoded' )
if not 0 <= int ( sub ) <= 32 :
raise ValueError ( 'string must be utf8 encoded' )
return rzpad ( arg , 32 )
if not 0 <= len ( arg ) < TT256 :
raise Exception ( 'Integer invalid or out of range: %r' % arg )
length_encoded = zpad ( int_to_big_endian ( len ( arg ) ) , 32 )
value_encoded = rzpad ( arg , utils . ceil32 ( len ( arg ) ) )
return length_encoded + value_encoded
if base == 'hash' :
if not ( int ( sub ) and int ( sub ) <= 32 ) :
raise EncodingError ( 'too long: %r' % arg )
if is_numeric ( arg ) :
return zpad ( encode_int ( arg ) , 32 )
if len ( arg ) == int ( sub ) :
return zpad ( arg , 32 )
if len ( arg ) == int ( sub ) * 2 :
return zpad ( decode_hex ( arg ) , 32 )
raise EncodingError ( 'Could not parse hash: %r' % arg )
if base == 'address' :
assert sub == ''
if is_numeric ( arg ) :
return zpad ( encode_int ( arg ) , 32 )
if len ( arg ) == 20 :
return zpad ( arg , 32 )
if len ( arg ) == 40 :
return zpad ( decode_hex ( arg ) , 32 )
if len ( arg ) == 42 and arg [ : 2 ] == '0x' :
return zpad ( decode_hex ( arg [ 2 : ] ) , 32 )
raise EncodingError ( 'Could not parse address: %r' % arg )
raise EncodingError ( 'Unhandled type: %r %r' % ( base , sub ) ) |
def anonymize ( remote_addr ) :
"""Anonymize IPv4 and IPv6 : param remote _ addr : to / 24 ( zero ' d )
and / 48 ( zero ' d ) .""" | if not isinstance ( remote_addr , text_type ) and isinstance ( remote_addr , str ) :
remote_addr = remote_addr . decode ( 'ascii' , 'ignore' )
try :
ipv4 = ipaddress . IPv4Address ( remote_addr )
return u'' . join ( ipv4 . exploded . rsplit ( '.' , 1 ) [ 0 ] ) + '.' + '0'
except ipaddress . AddressValueError :
try :
ipv6 = ipaddress . IPv6Address ( remote_addr )
if ipv6 . ipv4_mapped is not None :
return anonymize ( text_type ( ipv6 . ipv4_mapped ) )
return u'' + ipv6 . exploded . rsplit ( ':' , 5 ) [ 0 ] + ':' + ':' . join ( [ '0000' ] * 5 )
except ipaddress . AddressValueError :
return u'0.0.0.0' |
def load_text_data ( dataset , directory , centre = True ) :
"""Load in a data set of marker points from the Ohio State University C3D motion capture files ( http : / / accad . osu . edu / research / mocap / mocap _ data . htm ) .""" | points , point_names = parse_text ( os . path . join ( directory , dataset + '.txt' ) ) [ 0 : 2 ]
# Remove markers where there is a NaN
present_index = [ i for i in range ( points [ 0 ] . shape [ 1 ] ) if not ( np . any ( np . isnan ( points [ 0 ] [ : , i ] ) ) or np . any ( np . isnan ( points [ 0 ] [ : , i ] ) ) or np . any ( np . isnan ( points [ 0 ] [ : , i ] ) ) ) ]
point_names = point_names [ present_index ]
for i in range ( 3 ) :
points [ i ] = points [ i ] [ : , present_index ]
if centre :
points [ i ] = ( points [ i ] . T - points [ i ] . mean ( axis = 1 ) ) . T
# Concatanate the X , Y and Z markers together
Y = np . concatenate ( ( points [ 0 ] , points [ 1 ] , points [ 2 ] ) , axis = 1 )
Y = Y / 400.
connect = read_connections ( os . path . join ( directory , 'connections.txt' ) , point_names )
return Y , connect |
def enable_disable_on_bot_select_deselect ( self ) :
"""Disables the botconfig groupbox and minus buttons when no bot is selected
: return :""" | if not self . blue_listwidget . selectedItems ( ) and not self . orange_listwidget . selectedItems ( ) :
self . bot_config_groupbox . setDisabled ( True )
self . blue_minus_toolbutton . setDisabled ( True )
self . orange_minus_toolbutton . setDisabled ( True )
else :
self . bot_config_groupbox . setDisabled ( False ) |
def add_var_arg ( self , arg ) :
"""Add a variable ( or macro ) argument to the condor job . The argument is
added to the submit file and a different value of the argument can be set
for each node in the DAG .
@ param arg : name of option to add .""" | self . __args . append ( arg )
self . __job . add_var_arg ( self . __arg_index )
self . __arg_index += 1 |
def newsnr_sgveto ( snr , bchisq , sgchisq ) :
"""Combined SNR derived from NewSNR and Sine - Gaussian Chisq""" | nsnr = numpy . array ( newsnr ( snr , bchisq ) , ndmin = 1 )
sgchisq = numpy . array ( sgchisq , ndmin = 1 )
t = numpy . array ( sgchisq > 4 , ndmin = 1 )
if len ( t ) :
nsnr [ t ] = nsnr [ t ] / ( sgchisq [ t ] / 4.0 ) ** 0.5
# If snr input is float , return a float . Otherwise return numpy array .
if hasattr ( snr , '__len__' ) :
return nsnr
else :
return nsnr [ 0 ] |
def check_integrity ( models ) :
'''Apply validation and integrity checks to a collection of Bokeh models .
Args :
models ( seq [ Model ] ) : a collection of Models to test
Returns :
None
This function will emit log warning and error messages for all error or
warning conditions that are detected . For example , layouts without any
children will trigger a warning :
. . code - block : : python
> > > empty _ row = Row
> > > check _ integrity ( [ empty _ row ] )
W - 1002 ( EMPTY _ LAYOUT ) : Layout has no children : Row ( id = ' 2404a029 - c69b - 4e30-9b7d - 4b7b6cdaad5b ' , . . . )''' | messages = dict ( error = [ ] , warning = [ ] )
for model in models :
validators = [ ]
for name in dir ( model ) :
if not name . startswith ( "_check" ) :
continue
obj = getattr ( model , name )
if getattr ( obj , "validator_type" , None ) :
validators . append ( obj )
for func in validators :
messages [ func . validator_type ] . extend ( func ( ) )
for msg in sorted ( messages [ 'error' ] ) :
log . error ( "E-%d (%s): %s: %s" % msg )
for msg in sorted ( messages [ 'warning' ] ) :
code , name , desc , obj = msg
if code not in __silencers__ :
log . warning ( "W-%d (%s): %s: %s" % msg ) |
def preprocess ( * _unused , ** processors ) :
"""Decorator that applies pre - processors to the arguments of a function before
calling the function .
Parameters
* * processors : dict
Map from argument name - > processor function .
A processor function takes three arguments : ( func , argname , argvalue ) .
` func ` is the the function for which we ' re processing args .
` argname ` is the name of the argument we ' re processing .
` argvalue ` is the value of the argument we ' re processing .
Examples
> > > def _ ensure _ tuple ( func , argname , arg ) :
. . . if isinstance ( arg , tuple ) :
. . . return argvalue
. . . try :
. . . return tuple ( arg )
. . . except TypeError :
. . . raise TypeError (
. . . " % s ( ) expected argument ' % s ' to "
. . . " be iterable , but got % s instead . " % (
. . . func . _ _ name _ _ , argname , arg ,
> > > @ preprocess ( arg = _ ensure _ tuple )
. . . def foo ( arg ) :
. . . return arg
> > > foo ( [ 1 , 2 , 3 ] )
(1 , 2 , 3)
> > > foo ( " a " )
> > > foo ( 2)
Traceback ( most recent call last ) :
TypeError : foo ( ) expected argument ' arg ' to be iterable , but got 2 instead .""" | if _unused :
raise TypeError ( "preprocess() doesn't accept positional arguments" )
def _decorator ( f ) :
args , varargs , varkw , defaults = argspec = getargspec ( f )
if defaults is None :
defaults = ( )
no_defaults = ( NO_DEFAULT , ) * ( len ( args ) - len ( defaults ) )
args_defaults = list ( zip ( args , no_defaults + defaults ) )
if varargs :
args_defaults . append ( ( varargs , NO_DEFAULT ) )
if varkw :
args_defaults . append ( ( varkw , NO_DEFAULT ) )
argset = set ( args ) | { varargs , varkw } - { None }
# Arguments can be declared as tuples in Python 2.
if not all ( isinstance ( arg , str ) for arg in args ) :
raise TypeError ( "Can't validate functions using tuple unpacking: %s" % ( argspec , ) )
# Ensure that all processors map to valid names .
bad_names = viewkeys ( processors ) - argset
if bad_names :
raise TypeError ( "Got processors for unknown arguments: %s." % bad_names )
return _build_preprocessed_function ( f , processors , args_defaults , varargs , varkw , )
return _decorator |
def ensure_ndarray ( A , shape = None , uniform = None , ndim = None , size = None , dtype = None , kind = None ) :
r"""Ensures A is an ndarray and does an assert _ array with the given parameters
Returns
A : ndarray
If A is already an ndarray , it is just returned . Otherwise this is an independent copy as an ndarray""" | if not isinstance ( A , np . ndarray ) :
try :
A = np . array ( A )
except :
raise AssertionError ( 'Given argument cannot be converted to an ndarray:\n' + str ( A ) )
assert_array ( A , shape = shape , uniform = uniform , ndim = ndim , size = size , dtype = dtype , kind = kind )
return A |
async def send_http_response ( writer , http_code : int , headers : List [ Tuple [ str , str ] ] , content : bytes , http_status : str = None ) -> None :
"""generate http response payload and send to writer""" | # generate response payload
if not http_status :
http_status = STATUS_CODES . get ( http_code , 'Unknown' )
response : bytes = f'HTTP/1.1 {http_code} {http_status}\r\n' . encode ( )
for k , v in headers :
response += f'{k}: {v}\r\n' . encode ( )
response += b'\r\n'
response += content
# send payload
writer . write ( response )
await writer . drain ( ) |
def _download_images ( label_path : PathOrStr , img_tuples : list , max_workers : int = defaults . cpus , timeout : int = 4 ) -> FilePathList :
"""Downloads images in ` img _ tuples ` to ` label _ path ` .
If the directory doesn ' t exist , it ' ll be created automatically .
Uses ` parallel ` to speed things up in ` max _ workers ` when the system has enough CPU cores .
If something doesn ' t work , try setting up ` max _ workers = 0 ` to debug .""" | os . makedirs ( Path ( label_path ) , exist_ok = True )
parallel ( partial ( _download_single_image , label_path , timeout = timeout ) , img_tuples , max_workers = max_workers )
return get_image_files ( label_path ) |
def update_network ( self , network , name ) :
'''Updates a network''' | net_id = self . _find_network_id ( network )
return self . network_conn . update_network ( network = net_id , body = { 'network' : { 'name' : name } } ) |
def scan_processes_fast ( self ) :
"""Populates the snapshot with running processes .
Only the PID is retrieved for each process .
Dead processes are removed .
Threads and modules of living processes are ignored .
Tipically you don ' t need to call this method directly , if unsure use
L { scan } instead .
@ note : This method uses the PSAPI . It may be faster for scanning ,
but some information may be missing , outdated or slower to obtain .
This could be a good tradeoff under some circumstances .""" | # Get the new and old list of pids
new_pids = set ( win32 . EnumProcesses ( ) )
old_pids = set ( compat . iterkeys ( self . __processDict ) )
# Ignore our own pid
our_pid = win32 . GetCurrentProcessId ( )
if our_pid in new_pids :
new_pids . remove ( our_pid )
if our_pid in old_pids :
old_pids . remove ( our_pid )
# Add newly found pids
for pid in new_pids . difference ( old_pids ) :
self . _add_process ( Process ( pid ) )
# Remove missing pids
for pid in old_pids . difference ( new_pids ) :
self . _del_process ( pid ) |
def default ( value ) :
"""Default encoder for JSON""" | if isinstance ( value , Decimal ) :
primative = float ( value )
if int ( primative ) == primative :
return int ( primative )
else :
return primative
elif isinstance ( value , set ) :
return list ( value )
elif isinstance ( value , Binary ) :
return b64encode ( value . value )
raise TypeError ( "Cannot encode %s value %r" % ( type ( value ) , value ) ) |
def modify_cache_parameter_group ( name , region = None , key = None , keyid = None , profile = None , ** args ) :
'''Update a cache parameter group in place .
Note that due to a design limitation in AWS , this function is not atomic - - a maximum of 20
params may be modified in one underlying boto call . This means that if more than 20 params
need to be changed , the update is performed in blocks of 20 , which in turns means that if a
later sub - call fails after an earlier one has succeeded , the overall update will be left
partially applied .
CacheParameterGroupName
The name of the cache parameter group to modify .
ParameterNameValues
A [ list ] of { dicts } , each composed of a parameter name and a value , for the parameter
update . At least one parameter / value pair is required .
. . code - block : : yaml
ParameterNameValues :
- ParameterName : timeout
# Amazon requires ALL VALUES to be strings . . .
ParameterValue : " 30"
- ParameterName : appendonly
# The YAML parser will turn a bare ` yes ` into a bool , which Amazon will then throw on . . .
ParameterValue : " yes "
Example :
. . code - block : : bash
salt myminion boto3 _ elasticache . modify _ cache _ parameter _ group CacheParameterGroupName = myParamGroup ParameterNameValues = ' [ { ParameterName : timeout ,
ParameterValue : " 30 " } ,
{ ParameterName : appendonly ,
ParameterValue : " yes " } ] ' ''' | args = dict ( [ ( k , v ) for k , v in args . items ( ) if not k . startswith ( '_' ) ] )
try :
Params = args [ 'ParameterNameValues' ]
except ValueError as e :
raise SaltInvocationError ( 'Invalid `ParameterNameValues` structure passed.' )
while Params :
args . update ( { 'ParameterNameValues' : Params [ : 20 ] } )
Params = Params [ 20 : ]
if not _modify_resource ( name , name_param = 'CacheParameterGroupName' , desc = 'cache parameter group' , res_type = 'cache_parameter_group' , region = region , key = key , keyid = keyid , profile = profile , ** args ) :
return False
return True |
def update_ip_info ( self , since_days = 10 , save = False , force = False ) :
"""Update the IP info .
Args :
since _ days ( int ) : if checked less than this number of days ago ,
don ' t check again ( default to 10 days ) .
save ( bool ) : whether to save anyway or not .
force ( bool ) : whether to update ip _ info to last checked one .
Returns :
bool : check was run . IPInfo might not have been updated .""" | # If ip already checked
try :
last_check = IPInfoCheck . objects . get ( ip_address = self . client_ip_address )
# If checked less than since _ days ago , don ' t check again
since_last = datetime . date . today ( ) - last_check . date
if since_last <= datetime . timedelta ( days = since_days ) :
if not self . ip_info or ( self . ip_info != last_check . ip_info and force ) :
self . ip_info = last_check . ip_info
self . save ( )
return True
elif save :
self . save ( )
return False
# Get or create ip _ info object
ip_info , created = IPInfo . get_or_create_from_ip ( self . client_ip_address )
# Update check time
last_check . date = datetime . date . today ( )
last_check . save ( )
# Maybe data changed
if created :
last_check . ip_info = ip_info
self . ip_info = ip_info
self . save ( )
return True
elif save :
self . save ( )
return False
except IPInfoCheck . DoesNotExist : # Else if ip never checked , check it and set ip _ info
self . ip_info = IPInfoCheck . check_ip ( self . client_ip_address )
self . save ( )
return True |
def getRemoteObject ( self , busName , objectPath , interfaces = None , replaceKnownInterfaces = False ) :
"""Creates a L { objects . RemoteDBusObject } instance to represent the
specified DBus object . If explicit interfaces are not supplied , DBus
object introspection will be used to obtain them automatically .
@ param interfaces : May be None , a single value , or a list of string
interface names and / or instances of
L { interface . DBusInterface } . If None or any of the
specified interface names are unknown , full
introspection will be attempted . If interfaces
consists of solely of L { interface . DBusInterface }
instances and / or known interface names , no
introspection will be preformed .
@ rtype : L { twisted . internet . defer . Deferred }
@ returns : A deferred to a L { objects . RemoteDBusObject } instance
representing the remote object""" | return self . objHandler . getRemoteObject ( busName , objectPath , interfaces , replaceKnownInterfaces , ) |
def stop ( self ) :
"""Close websocket connection .""" | self . state = STATE_STOPPED
if self . transport :
self . transport . close ( ) |
def classname ( ob ) :
"""Get the object ' s class ' s name as package . module . Class""" | import inspect
if inspect . isclass ( ob ) :
return '.' . join ( [ ob . __module__ , ob . __name__ ] )
else :
return '.' . join ( [ ob . __class__ . __module__ , ob . __class__ . __name__ ] ) |
def ___replace_adjective_maybe ( sentence , counts ) :
"""Lets find and replace all instances of # ADJECTIVE _ MAYBE
: param _ sentence :
: param counts :""" | random_decision = random . randint ( 0 , 1 )
if sentence is not None :
while sentence . find ( '#ADJECTIVE_MAYBE' ) != - 1 :
if random_decision % 2 == 0 :
sentence = sentence . replace ( '#ADJECTIVE_MAYBE' , ' ' + str ( __get_adjective ( counts ) ) , 1 )
elif random_decision % 2 != 0 :
sentence = sentence . replace ( '#ADJECTIVE_MAYBE' , '' , 1 )
if sentence . find ( '#ADJECTIVE_MAYBE' ) == - 1 :
return sentence
return sentence
else :
return sentence |
async def raw_command ( self , service : str , method : str , params : Any ) :
"""Call an arbitrary method with given parameters .
This is useful for debugging and trying out commands before
implementing them properly .
: param service : Service , use list ( self . services ) to get a list of availables .
: param method : Method to call .
: param params : Parameters as a python object ( e . g . , dict , list )
: return : Raw JSON response from the device .""" | _LOGGER . info ( "Calling %s.%s(%s)" , service , method , params )
return await self . services [ service ] [ method ] ( params ) |
def _pre_md5_skip_on_check ( self , lpath , rfile ) : # type : ( Downloader , pathlib . Path ,
# blobxfer . models . azure . StorageEntity ) - > None
"""Perform pre MD5 skip on check
: param Downloader self : this
: param pathlib . Path lpath : local path
: param blobxfer . models . azure . StorageEntity rfile : remote file""" | md5 = blobxfer . models . metadata . get_md5_from_metadata ( rfile )
key = blobxfer . operations . download . Downloader . create_unique_transfer_operation_id ( rfile )
with self . _md5_meta_lock :
self . _md5_map [ key ] = rfile
slpath = str ( lpath )
# temporarily create a download descriptor view for vectored io
if rfile . vectored_io is not None :
view , _ = blobxfer . models . download . Descriptor . generate_view ( rfile )
fpath = str ( blobxfer . models . download . Descriptor . convert_vectored_io_slice_to_final_path_name ( lpath , rfile ) )
else :
view = None
fpath = slpath
self . _md5_offload . add_localfile_for_md5_check ( key , slpath , fpath , md5 , rfile . mode , view ) |
def generate_pipeline_code ( pipeline_tree , operators ) :
"""Generate code specific to the construction of the sklearn Pipeline .
Parameters
pipeline _ tree : list
List of operators in the current optimized pipeline
Returns
Source code for the sklearn pipeline""" | steps = _process_operator ( pipeline_tree , operators )
pipeline_text = "make_pipeline(\n{STEPS}\n)" . format ( STEPS = _indent ( ",\n" . join ( steps ) , 4 ) )
return pipeline_text |
def create ( self , model_name ) :
"""Create a model .
Args :
model _ name : the short name of the model , such as " iris " .
Returns :
If successful , returns informaiton of the model , such as
{ u ' regions ' : [ u ' us - central1 ' ] , u ' name ' : u ' projects / myproject / models / mymodel ' }
Raises :
If the model creation failed .""" | body = { 'name' : model_name }
parent = 'projects/' + self . _project_id
# Model creation is instant . If anything goes wrong , Exception will be thrown .
return self . _api . projects ( ) . models ( ) . create ( body = body , parent = parent ) . execute ( ) |
def update_fixed_order ( self ) :
"after pruning fixed order needs update to match new nnodes / ntips ." | # set tips order if fixing for multi - tree plotting ( default None )
fixed_order = self . ttree . _fixed_order
self . ttree_fixed_order = None
self . ttree_fixed_idx = list ( range ( self . ttree . ntips ) )
# check if fixed _ order changed :
if fixed_order :
fixed_order = [ i for i in fixed_order if i in self . ttree . get_tip_labels ( ) ]
self . ttree . _set_fixed_order ( fixed_order )
else :
self . ttree . _fixed_idx = list ( range ( self . ttree . ntips ) ) |
def __view_to_selected_graphics ( self , data_and_metadata : DataAndMetadata . DataAndMetadata ) -> None :
"""Change the view to encompass the selected graphic intervals .""" | all_graphics = self . __graphics
graphics = [ graphic for graphic_index , graphic in enumerate ( all_graphics ) if self . __graphic_selection . contains ( graphic_index ) ]
intervals = list ( )
for graphic in graphics :
if isinstance ( graphic , Graphics . IntervalGraphic ) :
intervals . append ( graphic . interval )
self . __view_to_intervals ( data_and_metadata , intervals ) |
def overall_CEN_calc ( classes , TP , TOP , P , CEN_dict , modified = False ) :
"""Calculate Overall _ CEN ( Overall confusion entropy ) .
: param classes : classes
: type classes : list
: param TP : true positive dict for all classes
: type TP : dict
: param TOP : test outcome positive
: type TOP : dict
: param P : condition positive
: type P : dict
: param CEN _ dict : CEN dictionary for each class
: type CEN _ dict : dict
: param modified : modified mode flag
: type modified : bool
: return : Overall _ CEN ( MCEN ) as float""" | try :
result = 0
for i in classes :
result += ( convex_combination ( classes , TP , TOP , P , i , modified ) * CEN_dict [ i ] )
return result
except Exception :
return "None" |
def update_stats ( self , stats , value , _type , sample_rate = 1 ) :
"""Pipeline function that formats data , samples it and passes to send ( )
> > > client = StatsdClient ( )
> > > client . update _ stats ( ' example . update _ stats ' , 73 , " c " , 0.9)""" | stats = self . format ( stats , value , _type , self . prefix )
self . send ( self . sample ( stats , sample_rate ) , self . addr ) |
def as_array ( self , include_missing = False , weighted = True , include_transforms_for_dims = None , prune = False , ) :
"""Return ` ndarray ` representing cube values .
Returns the tabular representation of the crunch cube . The returned
array has the same number of dimensions as the cube . E . g . for
a cross - tab representation of a categorical and numerical variable ,
the resulting cube will have two dimensions .
* include _ missing * ( bool ) : Include rows / cols for missing values .
Example 1 ( Categorical x Categorical ) : :
> > > cube = CrunchCube ( response )
> > > cube . as _ array ( )
np . array ( [
[5 , 2 ] ,
[5 , 3 ] ,
Example 2 ( Categorical x Categorical , include missing values ) : :
> > > cube = CrunchCube ( response )
> > > cube . as _ array ( include _ missing = True )
np . array ( [
[5 , 3 , 2 , 0 ] ,
[5 , 2 , 3 , 0 ] ,
[0 , 0 , 0 , 0 ] ,""" | array = self . _as_array ( include_missing = include_missing , weighted = weighted , include_transforms_for_dims = include_transforms_for_dims , )
# - - - prune array if pruning was requested - - -
if prune :
array = self . _prune_body ( array , transforms = include_transforms_for_dims )
return self . _drop_mr_cat_dims ( array ) |
def _interfaces_ifconfig ( out ) :
"""Uses ifconfig to return a dictionary of interfaces with various information
about each ( up / down state , ip address , netmask , and hwaddr )""" | ret = dict ( )
piface = re . compile ( r'^([^\s:]+)' )
pmac = re . compile ( '.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)' )
pip = re . compile ( r'.*?(?:inet addr:|inet )(.*?)\s' )
pip6 = re . compile ( '.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)' )
pmask = re . compile ( r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))' )
pmask6 = re . compile ( r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*' )
pupdown = re . compile ( 'UP' )
pbcast = re . compile ( r'.*?(?:Bcast:|broadcast )([\d\.]+)' )
groups = re . compile ( '\r?\n(?=\\S)' ) . split ( out )
for group in groups :
data = dict ( )
iface = ''
updown = False
for line in group . splitlines ( ) :
miface = piface . match ( line )
mmac = pmac . match ( line )
mip = pip . match ( line )
mip6 = pip6 . match ( line )
mupdown = pupdown . search ( line )
if miface :
iface = miface . group ( 1 )
if mmac :
data [ 'hwaddr' ] = mmac . group ( 1 )
if mip :
if 'inet' not in data :
data [ 'inet' ] = list ( )
addr_obj = dict ( )
addr_obj [ 'address' ] = mip . group ( 1 )
mmask = pmask . match ( line )
if mmask :
if mmask . group ( 1 ) :
mmask = _number_of_set_bits_to_ipv4_netmask ( int ( mmask . group ( 1 ) , 16 ) )
else :
mmask = mmask . group ( 2 )
addr_obj [ 'netmask' ] = mmask
mbcast = pbcast . match ( line )
if mbcast :
addr_obj [ 'broadcast' ] = mbcast . group ( 1 )
data [ 'inet' ] . append ( addr_obj )
if mupdown :
updown = True
if mip6 :
if 'inet6' not in data :
data [ 'inet6' ] = list ( )
addr_obj = dict ( )
addr_obj [ 'address' ] = mip6 . group ( 1 ) or mip6 . group ( 2 )
mmask6 = pmask6 . match ( line )
if mmask6 :
addr_obj [ 'prefixlen' ] = mmask6 . group ( 1 ) or mmask6 . group ( 2 )
data [ 'inet6' ] . append ( addr_obj )
data [ 'up' ] = updown
ret [ iface ] = data
del data
return ret |
def _dicts_to_columns ( dicts ) :
"""Given a List of Dictionaries with uniform keys , returns a single Dictionary
with keys holding a List of values matching the key in the original List .
[ { ' name ' : ' Field Museum ' , ' location ' : ' Chicago ' } ,
{ ' name ' : ' Epcot ' , ' location ' : ' Orlando ' } ]
{ ' name ' : [ ' Field Museum ' , ' Epcot ' ] ,
' location ' : [ ' Chicago ' , ' Orlando ' ] }""" | keys = dicts [ 0 ] . keys ( )
result = dict ( ( k , [ ] ) for k in keys )
for d in dicts :
for k , v in d . items ( ) :
result [ k ] += [ v ]
return result |
def resolve_dns ( opts , fallback = True ) :
'''Resolves the master _ ip and master _ uri options''' | ret = { }
check_dns = True
if ( opts . get ( 'file_client' , 'remote' ) == 'local' and not opts . get ( 'use_master_when_local' , False ) ) :
check_dns = False
# Since salt . log is imported below , salt . utils . network needs to be imported here as well
import salt . utils . network
if check_dns is True :
try :
if opts [ 'master' ] == '' :
raise SaltSystemExit
ret [ 'master_ip' ] = salt . utils . network . dns_check ( opts [ 'master' ] , int ( opts [ 'master_port' ] ) , True , opts [ 'ipv6' ] , attempt_connect = False )
except SaltClientError :
retry_dns_count = opts . get ( 'retry_dns_count' , None )
if opts [ 'retry_dns' ] :
while True :
if retry_dns_count is not None :
if retry_dns_count == 0 :
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt . log
msg = ( 'Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds' ) . format ( opts [ 'master' ] , opts [ 'retry_dns' ] )
if salt . log . setup . is_console_configured ( ) :
log . error ( msg )
else :
print ( 'WARNING: {0}' . format ( msg ) )
time . sleep ( opts [ 'retry_dns' ] )
try :
ret [ 'master_ip' ] = salt . utils . network . dns_check ( opts [ 'master' ] , int ( opts [ 'master_port' ] ) , True , opts [ 'ipv6' ] , attempt_connect = False )
break
except SaltClientError :
pass
else :
if fallback :
ret [ 'master_ip' ] = '127.0.0.1'
else :
raise
except SaltSystemExit :
unknown_str = 'unknown address'
master = opts . get ( 'master' , unknown_str )
if master == '' :
master = unknown_str
if opts . get ( '__role' ) == 'syndic' :
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' 'Set \'syndic_master\' value in minion config.' . format ( master )
else :
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' 'Set \'master\' value in minion config.' . format ( master )
log . error ( err )
raise SaltSystemExit ( code = 42 , msg = err )
else :
ret [ 'master_ip' ] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts :
if ret [ 'master_ip' ] != opts [ 'master_ip' ] :
log . warning ( 'Master ip address changed from %s to %s' , opts [ 'master_ip' ] , ret [ 'master_ip' ] )
if opts [ 'source_interface_name' ] :
log . trace ( 'Custom source interface required: %s' , opts [ 'source_interface_name' ] )
interfaces = salt . utils . network . interfaces ( )
log . trace ( 'The following interfaces are available on this Minion:' )
log . trace ( interfaces )
if opts [ 'source_interface_name' ] in interfaces :
if interfaces [ opts [ 'source_interface_name' ] ] [ 'up' ] :
addrs = interfaces [ opts [ 'source_interface_name' ] ] [ 'inet' ] if not opts [ 'ipv6' ] else interfaces [ opts [ 'source_interface_name' ] ] [ 'inet6' ]
ret [ 'source_ip' ] = addrs [ 0 ] [ 'address' ]
log . debug ( 'Using %s as source IP address' , ret [ 'source_ip' ] )
else :
log . warning ( 'The interface %s is down so it cannot be used as source to connect to the Master' , opts [ 'source_interface_name' ] )
else :
log . warning ( '%s is not a valid interface. Ignoring.' , opts [ 'source_interface_name' ] )
elif opts [ 'source_address' ] :
ret [ 'source_ip' ] = salt . utils . network . dns_check ( opts [ 'source_address' ] , int ( opts [ 'source_ret_port' ] ) , True , opts [ 'ipv6' ] , attempt_connect = False )
log . debug ( 'Using %s as source IP address' , ret [ 'source_ip' ] )
if opts [ 'source_ret_port' ] :
ret [ 'source_ret_port' ] = int ( opts [ 'source_ret_port' ] )
log . debug ( 'Using %d as source port for the ret server' , ret [ 'source_ret_port' ] )
if opts [ 'source_publish_port' ] :
ret [ 'source_publish_port' ] = int ( opts [ 'source_publish_port' ] )
log . debug ( 'Using %d as source port for the master pub' , ret [ 'source_publish_port' ] )
ret [ 'master_uri' ] = 'tcp://{ip}:{port}' . format ( ip = ret [ 'master_ip' ] , port = opts [ 'master_port' ] )
log . debug ( 'Master URI: %s' , ret [ 'master_uri' ] )
return ret |
def _ReadFileEntries ( self , file_object ) :
"""Reads the file entries from the cpio archive .
Args :
file _ object ( FileIO ) : file - like object .""" | self . _file_entries = { }
file_offset = 0
while file_offset < self . _file_size or self . _file_size == 0 :
file_entry = self . _ReadFileEntry ( file_object , file_offset )
file_offset += file_entry . size
if file_entry . path == 'TRAILER!!!' :
break
if file_entry . path in self . _file_entries : # TODO : alert on file entries with duplicate paths ?
continue
self . _file_entries [ file_entry . path ] = file_entry |
def cmd ( send , msg , args ) :
"""Converts text into NATO form .
Syntax : { command } < text >""" | if not msg :
send ( "NATO what?" )
return
nato = gen_nato ( msg )
if len ( nato ) > 100 :
send ( "Your NATO is too long. Have you considered letters?" )
else :
send ( nato ) |
def export_olx ( self , tarball , root_path ) :
"""if sequestered , only export the assets""" | def append_asset_to_soup_and_export ( asset_ ) :
if isinstance ( asset_ , Item ) :
try :
unique_url = asset_ . export_olx ( tarball , root_path )
except AttributeError :
pass
else :
unique_name = get_file_name_without_extension ( unique_url )
asset_type = asset_ . genus_type . identifier
asset_tag = my_soup . new_tag ( asset_type )
asset_tag [ 'url_name' ] = unique_name
getattr ( my_soup , my_tag ) . append ( asset_tag )
else :
try :
unique_urls = asset_ . export_olx ( tarball , root_path )
except AttributeError :
pass
else :
for index , ac in enumerate ( asset_ . get_asset_contents ( ) ) :
asset_type = ac . genus_type . identifier
unique_url = unique_urls [ index ]
unique_name = get_file_name_without_extension ( unique_url )
asset_tag = my_soup . new_tag ( asset_type )
asset_tag [ 'url_name' ] = unique_name
getattr ( my_soup , my_tag ) . append ( asset_tag )
def get_file_name_without_extension ( filepath ) :
return filepath . split ( '/' ) [ - 1 ] . replace ( '.xml' , '' )
my_path = None
if self . my_osid_object . is_sequestered ( ) : # just export assets
for asset in self . assets :
try :
asset . export_olx ( tarball , root_path )
except AttributeError :
pass
else : # also add to the / < tag > / folder
my_tag = self . my_osid_object . genus_type . identifier
expected_name = self . get_unique_name ( tarball , self . url , my_tag , root_path )
my_path = '{0}{1}/{2}.xml' . format ( root_path , my_tag , expected_name )
my_soup = BeautifulSoup ( '<' + my_tag + '/>' , 'xml' )
getattr ( my_soup , my_tag ) [ 'display_name' ] = self . my_osid_object . display_name . text
if my_tag == 'split_test' :
getattr ( my_soup , my_tag ) [ 'group_id_to_child' ] = self . my_osid_object . group_id_to_child
getattr ( my_soup , my_tag ) [ 'user_partition_id' ] = self . my_osid_object . user_partition_id . text
rm = self . my_osid_object . _get_provider_manager ( 'REPOSITORY' )
if self . my_osid_object . _proxy is None :
cls = rm . get_composition_lookup_session ( )
else :
cls = rm . get_composition_lookup_session ( proxy = self . my_osid_object . _proxy )
cls . use_federated_repository_view ( )
cls . use_unsequestered_composition_view ( )
for child_id in self . my_osid_object . get_child_ids ( ) :
child = cls . get_composition ( child_id )
if child . is_sequestered ( ) : # append its assets here
for asset in child . assets :
append_asset_to_soup_and_export ( asset )
else :
child_type = child . genus_type . identifier
child_tag = my_soup . new_tag ( child_type )
child_path = child . export_olx ( tarball , root_path )
if child_path is not None :
child_tag [ 'url_name' ] = get_file_name_without_extension ( child_path )
getattr ( my_soup , my_tag ) . append ( child_tag )
for asset in self . assets :
append_asset_to_soup_and_export ( asset )
self . write_to_tarfile ( tarball , my_path , my_soup )
return my_path |
def get_error ( time , x , sets , err_type = 'block' , tool = 'gmx analyze' ) :
"""To estimate error using block averaging method
. . warning : :
To calculate errors by using ` ` error = ' acf ' ` ` or ` ` error = ' block ' ` ` ,
GROMACS tool ` ` g _ analyze ` ` or ` ` gmx analyze ` ` should be present in ` ` $ PATH ` ` .
Parameters
time : 1D list or array
: attr : ` DNA . time `
x : 2D list or array
Shape of ( nset , nframe ) ; where * nset * is number of set and * nframe * is
total number of frames . * nframe * should be equal to length of time
list / array
sets : int
Number of sets ( * nset * )
err _ type : str
Error estimation by autocorrelation method ` ` err _ type = ' acf ' ` ` or
block averaging method ` ` err _ type = ' block ' ` `
tool : str
GROMACS tool to calculate error . In older versions it is ` g _ analyze ` while in
newer versions ( above 2016 ) it is ` gmx analyze ` .
Returns
error : 1D array
Of length = number of sets ( * nset * )""" | for i in range ( sets ) :
if ( len ( time ) != len ( x [ i ] ) ) :
raise ValueError ( '\nError: number of frame in time {0} mismatched with {1} of x[{2}]!!\n' . format ( len ( time ) , len ( x [ i ] ) , i ) )
if not ( ( err_type == 'block' ) or ( err_type == 'acf' ) ) :
print ( '\nWarning: Method {0} is not implemented. Switching to \'acf\'.\n' . format ( err_type ) )
err_type = 'acf'
error = [ ]
char_set = string . ascii_lowercase
name = '' . join ( random . sample ( string . ascii_lowercase , 10 ) )
filename = name + '.xvg'
eefile = 'ee_' + name + '.xvg'
acfile = 'acf_' + name + '.xvg'
fout = open ( filename , 'w' )
for i in range ( len ( time ) ) :
fout . write ( '{0}' . format ( time [ i ] ) )
for j in range ( sets ) :
fout . write ( ' {0}' . format ( x [ j ] [ i ] ) )
fout . write ( "\n" )
fout . close ( )
command = '{0} -f {1} -ee {2} -ac {3} -fitfn exp' . format ( tool , filename , eefile , acfile )
try :
p = sub . Popen ( command . split ( ) , stdout = sub . PIPE , stderr = sub . PIPE , universal_newlines = True )
out , outputerror = p . communicate ( )
except Exception as e :
os . remove ( filename )
raise e
lines = out . split ( '\n' )
if ( err_type == 'block' ) :
for line in lines :
if ( re . match ( 'Set' , line ) ) :
temp = line . split ( )
error . append ( float ( temp [ 3 ] ) )
if ( err_type == 'acf' ) :
acf_time = [ ]
for line in lines :
if re . match ( 'COR: Correlation time' , line ) is not None :
temp = line . split ( '=' )
acf_time . append ( abs ( float ( temp [ 1 ] . split ( ) [ 0 ] ) ) )
total_time = float ( time [ - 1 ] ) - float ( time [ 0 ] )
dt = total_time / len ( time )
for i in range ( sets ) :
if ( acf_time [ i ] >= dt ) :
n_indp = total_time / acf_time [ i ]
tmp_err = np . std ( x [ i ] ) / np . sqrt ( n_indp )
else :
tmp_err = np . std ( x [ i ] ) / np . sqrt ( len ( time ) )
error . append ( tmp_err )
os . remove ( filename )
os . remove ( eefile )
os . remove ( acfile )
if os . path . isfile ( 'fitlog.log' ) :
os . remove ( 'fitlog.log' )
return np . array ( error ) |
def _qr_code ( self , instance ) :
"""return generate html code with " otpauth : / / . . . " link and QR - code""" | request = self . request
# FIXME
try :
user = instance . user
except ObjectDoesNotExist :
return _ ( "Please save first!" )
current_site = get_current_site ( request )
username = user . username
secret = six . text_type ( base64 . b32encode ( instance . bin_key ) , encoding = "ASCII" )
key_uri = ( "otpauth://totp/secure-login:%(site_name)s-%(username)s?secret=%(secret)s&issuer=%(issuer)s" ) % { "site_name" : urlquote ( current_site . name ) , "username" : urlquote ( username ) , "secret" : secret , "issuer" : urlquote ( username ) , }
context = { "key_uri" : key_uri }
return render_to_string ( "secure_js_login/qr_info.html" , context ) |
def _relative_uris ( self , uri_list ) :
"""if uris in list are relative , re - relate them to our basedir""" | return [ u for u in ( self . _relative ( uri ) for uri in uri_list ) if u ] |
def make_data ( n , prob ) :
"""make _ data : prepare data for a random graph
Parameters :
- n : number of vertices
- prob : probability of existence of an edge , for each pair of vertices
Returns a tuple with a list of vertices and a list edges .""" | V = range ( 1 , n + 1 )
E = [ ( i , j ) for i in V for j in V if i < j and random . random ( ) < prob ]
return V , E |
def count_seven_in_multiples ( n : int ) -> int :
"""Return the number of times the digit 7 appears in integers less than n which are divisible by 11 or 13.
Args :
n ( int ) : The upper limit to consider for the count .
Returns :
int : The count of number ' 7 ' in the desired range .
Examples :
> > > count _ seven _ in _ multiples ( 50)
> > > count _ seven _ in _ multiples ( 78)
> > > count _ seven _ in _ multiples ( 79)""" | count = 0
for number in range ( n ) :
if number % 11 == 0 or number % 13 == 0 :
count += str ( number ) . count ( "7" )
return count |
def prepare ( self , variables ) :
"""Initialize all steps in this recipe using their parameters .
Args :
variables ( dict ) : A dictionary of global variable definitions
that may be used to replace or augment the parameters given
to each step .
Returns :
list of RecipeActionObject like instances : The list of instantiated
steps that can be used to execute this recipe .""" | initializedsteps = [ ]
if variables is None :
variables = dict ( )
for step , params , _resources , _files in self . steps :
new_params = _complete_parameters ( params , variables )
initializedsteps . append ( step ( new_params ) )
return initializedsteps |
def distance ( lons1 , lats1 , depths1 , lons2 , lats2 , depths2 ) :
"""Calculate a distance between two points ( or collections of points )
considering points ' depth .
Calls : func : ` geodetic _ distance ` , finds the " vertical " distance between
points by subtracting one depth from another and combine both using
Pythagoras theorem .
: returns :
Distance in km , a square root of sum of squares of : func : ` geodetic
< geodetic _ distance > ` distance and vertical distance , which is just
a difference between depths .""" | hdist = geodetic_distance ( lons1 , lats1 , lons2 , lats2 )
vdist = depths1 - depths2
return numpy . sqrt ( hdist ** 2 + vdist ** 2 ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.