signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def adjustSize ( self ) :
"""Adjusts the size of this node to support the length of its contents .""" | cell = self . scene ( ) . cellWidth ( ) * 2
minheight = cell
minwidth = 2 * cell
# fit to the grid size
metrics = QFontMetrics ( QApplication . font ( ) )
width = metrics . width ( self . displayName ( ) ) + 20
width = ( ( width / cell ) * cell ) + ( cell % width )
height = self . rect ( ) . height ( )
# adjust for the icon
icon = self . icon ( )
if icon and not icon . isNull ( ) :
width += self . iconSize ( ) . width ( ) + 2
height = max ( height , self . iconSize ( ) . height ( ) + 2 )
w = max ( width , minwidth )
h = max ( height , minheight )
max_w = self . maximumWidth ( )
max_h = self . maximumHeight ( )
if max_w is not None :
w = min ( w , max_w )
if max_h is not None :
h = min ( h , max_h )
self . setMinimumWidth ( w )
self . setMinimumHeight ( h )
self . rebuild ( ) |
def get_all_children ( self , include_self = False ) :
"""Return all subsidiaries of this company .""" | ownership = Ownership . objects . filter ( parent = self )
subsidiaries = Company . objects . filter ( child__in = ownership )
for sub in subsidiaries :
subsidiaries = subsidiaries | sub . get_all_children ( )
if include_self is True :
self_company = Company . objects . filter ( id = self . id )
subsidiaries = subsidiaries | self_company
return subsidiaries |
def hide_busy ( self ) :
"""Unlock buttons A helper function to indicate processing is done .""" | self . progress_bar . hide ( )
self . parent . pbnNext . setEnabled ( True )
self . parent . pbnBack . setEnabled ( True )
self . parent . pbnCancel . setEnabled ( True )
self . parent . repaint ( )
disable_busy_cursor ( ) |
def spectral_embedding ( geom , n_components = 8 , eigen_solver = 'auto' , random_state = None , drop_first = True , diffusion_maps = False , diffusion_time = 0 , solver_kwds = None ) :
"""Project the sample on the first eigen vectors of the graph Laplacian .
The adjacency matrix is used to compute a normalized graph Laplacian
whose principal eigenvectors ( associated to the
smallest eigen values ) represent the embedding coordinates of the data .
The ` ` adjacency ` ` variable is not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples ( for instance the
heat kernel of a euclidean distance matrix or a k - NN matrix ) .
The Laplacian must be symmetric so that the eigen vector decomposition works as expected .
This is ensured by the default setting ( for more details ,
see the documentation in geometry . py ) .
The data and generic geometric parameters are passed via a Geometry object , which also
computes the Laplacian . By default , the ' geometric ' Laplacian ( or " debiased " , or " renormalized " with
alpha = 1 ) is used . This is the Laplacian construction defined in [ Coifman and Lafon , 2006 ] ( see also
documentation in laplacian . py ) . Thus , with diffusion _ maps = False , spectral embedding is a modification
of the Laplacian Eigenmaps algorithm of [ Belkin and Nyiogi , 2002 ] , with diffusion _ maps = False , geom . laplacian _ method
= ' symmetricnormalized ' it is exactly the Laplacian Eigenmaps , with diffusion _ maps = True , diffusion _ time > 0 it
is the Diffusion Maps algorithm of [ Coifman and Lafon 2006 ] ; diffusion _ maps = True and diffusion _ time = 0 is the same
as diffusion _ maps = False and default geom . laplacian _ method .
Parameters
geom : a Geometry object from megaman . embedding . geometry
n _ components : integer , optional
The dimension of the projection subspace .
eigen _ solver : { ' auto ' , ' dense ' , ' arpack ' , ' lobpcg ' , or ' amg ' }
' auto ' :
algorithm will attempt to choose the best method for input data
' dense ' :
use standard dense matrix operations for the eigenvalue decomposition .
For this method , M must be an array or matrix type . This method should be avoided for large problems .
' arpack ' :
use arnoldi iteration in shift - invert mode . For this method ,
M may be a dense matrix , sparse matrix , or general linear operator .
Warning : ARPACK can be unstable for some problems . It is best to
try several random seeds in order to check results .
' lobpcg ' :
Locally Optimal Block Preconditioned Conjugate Gradient Method .
A preconditioned eigensolver for large symmetric positive definite
( SPD ) generalized eigenproblems .
' amg ' :
AMG requires pyamg to be installed . It can be faster on very large ,
sparse problems , but may also lead to instabilities .
random _ state : int seed , RandomState instance , or None ( default )
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen _ solver = = ' amg ' .
By default , arpack is used .
drop _ first : bool , optional , default = True
Whether to drop the first eigenvector . For spectral embedding , this
should be True as the first eigenvector should be constant vector for
connected graph , but for spectral clustering , this should be kept as
False to retain the first eigenvector .
diffusion _ map : boolean , optional . Whether to return the diffusion map
version by re - scaling the embedding coordinate by the eigenvalues to the power
diffusion _ time .
diffusion _ time : if diffusion _ map = True , the eigenvectors of the Laplacian are rescaled by
(1 - lambda ) ^ diffusion _ time , where lambda is the corresponding eigenvalue .
diffusion _ time has the role of scale parameter . One of the main ideas of diffusion framework is
that running the diffusion forward in time ( taking larger and larger
powers of the Laplacian / transition matrix ) reveals the geometric structure of X at larger and
larger scales ( the diffusion process ) .
diffusion _ time = 0 empirically provides a reasonable balance from a clustering
perspective . Specifically , the notion of a cluster in the data set
is quantified as a region in which the probability of escaping this
region is low ( within a certain time t ) .
Credit to Satrajit Ghosh ( http : / / satra . cogitatum . org / ) for description
solver _ kwds : any additional keyword arguments to pass to the selected eigen _ solver
Returns
embedding : array , shape = ( n _ samples , n _ components )
The reduced samples .
Notes
Spectral embedding is most useful when the graph has one connected
component . If there graph has many components , the first few eigenvectors
will simply uncover the connected components of the graph .
References
* http : / / en . wikipedia . org / wiki / LOBPCG
* Toward the Optimal Preconditioned Eigensolver : Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V . Knyazev
http : / / dx . doi . org / 10.1137%2FS1064827500366124""" | random_state = check_random_state ( random_state )
if geom . affinity_matrix is None :
geom . compute_affinity_matrix ( )
if not _graph_is_connected ( geom . affinity_matrix ) :
warnings . warn ( "Graph is not fully connected: " "spectral embedding may not work as expected." )
if geom . laplacian_matrix is None :
laplacian = geom . compute_laplacian_matrix ( copy = False , return_lapsym = True )
else :
laplacian = geom . laplacian_matrix
n_nodes = laplacian . shape [ 0 ]
lapl_type = geom . laplacian_method
eigen_solver , solver_kwds = check_eigen_solver ( eigen_solver , solver_kwds , size = laplacian . shape [ 0 ] , nvec = n_components + 1 )
re_normalize = False
PD_solver = False
if eigen_solver in [ 'amg' , 'lobpcg' ] : # these methods require a symmetric positive definite matrix !
epsilon = 2
PD_solver = True
if lapl_type not in [ 'symmetricnormalized' , 'unnormalized' ] :
re_normalize = True
# If lobpcg ( or amg with lobpcg ) is chosen and
# If the Laplacian is non - symmetric then we need to extract :
# the w ( weight ) vector from geometry
# and the symmetric Laplacian = S .
# The actual Laplacian is L = W ^ { - 1 } S ( Where W is the diagonal matrix of w )
# Which has the same spectrum as : L * = W ^ { - 1/2 } SW ^ { - 1/2 } which is symmetric
# We calculate the eigen - decomposition of L * : [ D , V ]
# then use W ^ { - 1/2 } V to compute the eigenvectors of L
# See ( Handbook for Cluster Analysis Chapter 2 Proposition 1 ) .
# However , since we censor the affinity matrix A at a radius it is not guaranteed
# to be positive definite . But since L = W ^ { - 1 } S has maximum eigenvalue 1 ( stochastic matrix )
# and L * has the same spectrum it also has largest e - value of 1.
# therefore if we look at I - L * then this has smallest eigenvalue of 0 and so
# must be positive semi - definite . It also has the same spectrum as L * but
# lambda ( I - L * ) = 1 - lambda ( L * ) .
# Finally , since we want positive definite not semi - definite we use ( 1 + epsilon ) * I
# instead of I to make the smallest eigenvalue epsilon .
if geom . laplacian_weights is None : # a laplacian existed but it wasn ' t called with return _ lapsym = True
geom . compute_laplacian_matrix ( copy = False , return_lapsym = True )
w = np . array ( geom . laplacian_weights )
symmetrized_laplacian = geom . laplacian_symmetric . copy ( )
if sparse . isspmatrix ( symmetrized_laplacian ) :
symmetrized_laplacian . data /= np . sqrt ( w [ symmetrized_laplacian . row ] )
symmetrized_laplacian . data /= np . sqrt ( w [ symmetrized_laplacian . col ] )
symmetrized_laplacian = ( 1 + epsilon ) * sparse . identity ( n_nodes ) - symmetrized_laplacian
else :
symmetrized_laplacian /= np . sqrt ( w )
symmetrized_laplacian /= np . sqrt ( w [ : , np . newaxis ] )
symmetrized_laplacian = ( 1 + epsilon ) * np . identity ( n_nodes ) - symmetrized_laplacian
else : # using a symmetric laplacian but adjust to avoid positive definite errors
symmetrized_laplacian = geom . laplacian_matrix . copy ( )
if sparse . isspmatrix ( symmetrized_laplacian ) :
symmetrized_laplacian = ( 1 + epsilon ) * sparse . identity ( n_nodes ) - symmetrized_laplacian
else :
symmetrized_laplacian = ( 1 + epsilon ) * np . identity ( n_nodes ) - symmetrized_laplacian
if PD_solver : # then eI - L was used , fix the eigenvalues
lambdas , diffusion_map = eigen_decomposition ( symmetrized_laplacian , n_components + 1 , eigen_solver = eigen_solver , random_state = random_state , drop_first = drop_first , largest = False , solver_kwds = solver_kwds )
lambdas = - lambdas + epsilon
else :
lambdas , diffusion_map = eigen_decomposition ( laplacian , n_components + 1 , eigen_solver = eigen_solver , random_state = random_state , drop_first = drop_first , largest = True , solver_kwds = solver_kwds )
if re_normalize :
diffusion_map /= np . sqrt ( w [ : , np . newaxis ] )
# put back on original Laplacian space
diffusion_map /= np . linalg . norm ( diffusion_map , axis = 0 )
# norm 1 vectors
# sort the eigenvalues
ind = np . argsort ( lambdas ) ;
ind = ind [ : : - 1 ]
lambdas = lambdas [ ind ] ;
lambdas [ 0 ] = 0
diffusion_map = diffusion_map [ : , ind ]
eigenvalues = lambdas . copy ( )
eigenvectors = diffusion_map . copy ( )
if diffusion_maps :
diffusion_map = compute_diffusion_maps ( lapl_type , diffusion_map , lambdas , diffusion_time )
if drop_first :
embedding = diffusion_map [ : , 1 : ( n_components + 1 ) ]
eigenvectors = eigenvectors [ : , 1 : ( n_components + 1 ) ]
eigenvalues = eigenvalues [ 1 : ( n_components + 1 ) ]
else :
embedding = diffusion_map [ : , : n_components ]
eigenvectors = eigenvectors [ : , : ( n_components ) ]
eigenvalues = eigenvalues [ : ( n_components ) ]
return embedding , eigenvalues , eigenvectors |
def get_hypertree_from_predecessors ( H , Pv , source_node , node_weights = None , attr_name = "weight" ) :
"""Gives the hypertree ( i . e . , the subhypergraph formed from the union of
the set of paths from an execution of , e . g . , the SBT algorithm ) defined by
Pv beginning at a source node . Returns a dictionary mapping each node to
the ID of the hyperedge that preceeded it in the path ( i . e . , a Pv vector ) .
Assigns the node weights ( if provided ) as attributes of the nodes ( e . g . ,
the rank of that node in a specific instance of the SBT algorithm , or the
cardinality of that node in a B - Visit traversal , etc . ) .
: note : The IDs of the hyperedges in the subhypergraph returned may be
different than those in the original hypergraph ( even though the
tail and head sets are identical ) .
: param H : the hypergraph which the path algorithm was executed on .
: param Pv : dictionary mapping each node to the ID of the hyperedge that
preceeded it in the path .
: param source _ node : the root of the executed path algorithm .
: param node _ weights : [ optional ] dictionary mapping each node to some weight
measure .
: param attr _ name : key into the nodes ' attribute dictionaries for their
weight values ( if node _ weights is provided ) .
: returns : DirectedHypergraph - - subhypergraph induced by the path
algorithm specified by the predecessor vector ( Pv ) from a
source node .
: raises : TypeError - - Algorithm only applicable to directed hypergraphs""" | if not isinstance ( H , DirectedHypergraph ) :
raise TypeError ( "Algorithm only applicable to directed hypergraphs" )
sub_H = DirectedHypergraph ( )
# If node weights are not provided , simply collect all the nodes that are
# will be in the hypertree
if node_weights is None :
nodes = [ node for node in Pv . keys ( ) if Pv [ node ] is not None ]
nodes . append ( source_node )
# If node weights are provided , collect all the nodes that will be in the
# tree and pair them with their corresponding weights
else :
nodes = [ ( node , { attr_name : node_weights [ node ] } ) for node in Pv . keys ( ) if Pv [ node ] is not None ]
nodes . append ( ( source_node , { attr_name : node_weights [ source_node ] } ) )
# Add the collected elements to the hypergraph
sub_H . add_nodes ( nodes )
# Add all hyperedges , specified by Pv , to the hypergraph
hyperedges = [ ( H . get_hyperedge_tail ( hyperedge_id ) , H . get_hyperedge_head ( hyperedge_id ) , H . get_hyperedge_attributes ( hyperedge_id ) ) for hyperedge_id in Pv . values ( ) if hyperedge_id is not None ]
sub_H . add_hyperedges ( hyperedges )
return sub_H |
def findbeam_radialpeak ( data , orig_initial , mask , rmin , rmax , maxiter = 100 , drive_by = 'amplitude' , extent = 10 , callback = None ) :
"""Find the beam by minimizing the width of a peak in the radial average .
Inputs :
data : scattering matrix
orig _ initial : first guess for the origin
mask : mask matrix . Nonzero is non - masked .
rmin , rmax : distance from the origin ( in pixels ) of the peak range .
drive _ by : ' hwhm ' to minimize the hwhm of the peak or ' amplitude ' to
maximize the peak amplitude
extent : approximate distance of the current and the real origin in pixels .
Too high a value makes the fitting procedure unstable . Too low a value
does not permit to move away the current origin .
callback : callback function ( expects no arguments )
Outputs :
the beam coordinates
Notes :
A Gaussian will be fitted .""" | orig_initial = np . array ( orig_initial )
mask = 1 - mask . astype ( np . uint8 )
data = data . astype ( np . double )
pix = np . arange ( rmin * 1.0 , rmax * 1.0 , 1 )
if drive_by . lower ( ) == 'hwhm' :
def targetfunc ( orig , data , mask , orig_orig , callback ) :
I = radintpix ( data , None , orig [ 0 ] + orig_orig [ 0 ] , orig [ 1 ] + orig_orig [ 1 ] , mask , pix ) [ 1 ]
hwhm = float ( misc . findpeak_single ( pix , I ) [ 1 ] )
# print orig [ 0 ] + orig _ orig [ 0 ] , orig [ 1 ] + orig _ orig [ 1 ] , p
if callback is not None :
callback ( )
return abs ( hwhm )
elif drive_by . lower ( ) == 'amplitude' :
def targetfunc ( orig , data , mask , orig_orig , callback ) :
I = radintpix ( data , None , orig [ 0 ] + orig_orig [ 0 ] , orig [ 1 ] + orig_orig [ 1 ] , mask , pix ) [ 1 ]
fp = misc . findpeak_single ( pix , I )
height = - float ( fp [ 2 ] + fp [ 3 ] )
# print orig [ 0 ] + orig _ orig [ 0 ] , orig [ 1 ] + orig _ orig [ 1 ] , p
if callback is not None :
callback ( )
return height
else :
raise ValueError ( 'Invalid argument for drive_by %s' % drive_by )
orig1 = scipy . optimize . fmin ( targetfunc , np . array ( [ extent , extent ] ) , args = ( data , mask , orig_initial - extent , callback ) , maxiter = maxiter , disp = 0 )
return np . array ( orig_initial ) - extent + orig1 |
def convert ( self , request , response , data ) :
"""Performs the desired Conversion .
: param request : The webob Request object describing the
request .
: param response : The webob Response object describing the
response .
: param data : The data dictionary returned by the prepare ( )
method .
: returns : A string , the results of which are the desired
conversion .""" | # Notes are in bark . notes dictionary
return self . escape ( request . environ . get ( 'bark.notes' , { } ) . get ( self . modifier . param , '-' ) ) |
def get_top_stories ( self ) :
"""Get the item numbers for the current top stories .
Will raise an requests . HTTPError if we got a non - 200 response back .
: return : A list with the top story item numbers .""" | suburl = "v0/topstories.json"
try :
top_stories = self . _make_request ( suburl )
except requests . HTTPError as e :
hn_logger . exception ( 'Faulted on getting top stories, with status {}' . format ( e . errno ) )
raise e
return top_stories |
def _geom_type ( self , source ) :
"""gets geometry type ( s ) of specified layer""" | if isinstance ( source , AbstractLayer ) :
query = source . orig_query
else :
query = 'SELECT * FROM "{table}"' . format ( table = source )
resp = self . sql_client . send ( utils . minify_sql ( ( 'SELECT' , ' CASE WHEN ST_GeometryType(the_geom)' , ' in (\'ST_Point\', \'ST_MultiPoint\')' , ' THEN \'point\'' , ' WHEN ST_GeometryType(the_geom)' , ' in (\'ST_LineString\', \'ST_MultiLineString\')' , ' THEN \'line\'' , ' WHEN ST_GeometryType(the_geom)' , ' in (\'ST_Polygon\', \'ST_MultiPolygon\')' , ' THEN \'polygon\'' , ' ELSE null END AS geom_type,' , ' count(*) as cnt' , 'FROM ({query}) AS _wrap' , 'WHERE the_geom IS NOT NULL' , 'GROUP BY 1' , 'ORDER BY 2 DESC' , ) ) . format ( query = query ) , ** DEFAULT_SQL_ARGS )
if resp [ 'total_rows' ] > 1 :
warn ( 'There are multiple geometry types in {query}: ' '{geoms}. Styling by `{common_geom}`, the most common' . format ( query = query , geoms = ',' . join ( g [ 'geom_type' ] for g in resp [ 'rows' ] ) , common_geom = resp [ 'rows' ] [ 0 ] [ 'geom_type' ] ) )
elif resp [ 'total_rows' ] == 0 :
raise ValueError ( 'No geometry for layer. Check all layer tables ' 'and queries to ensure there are geometries.' )
return resp [ 'rows' ] [ 0 ] [ 'geom_type' ] |
def step_through ( self , msg = '' , shutit_pexpect_child = None , level = 1 , print_input = True , value = True ) :
"""Implements a step - through function , using pause _ point .""" | shutit_global . shutit_global_object . yield_to_draw ( )
shutit_pexpect_child = shutit_pexpect_child or self . get_current_shutit_pexpect_session ( ) . pexpect_child
shutit_pexpect_session = self . get_shutit_pexpect_session_from_child ( shutit_pexpect_child )
if ( not shutit_global . shutit_global_object . determine_interactive ( ) or not shutit_global . shutit_global_object . interactive or shutit_global . shutit_global_object . interactive < level ) :
return True
self . build [ 'step_through' ] = value
shutit_pexpect_session . pause_point ( msg , print_input = print_input , level = level )
return True |
def _horizontal_segment ( old_offs , new_offs , spacing , diameter ) :
'''Vertices of a horizontal rectangle''' | return np . array ( ( ( old_offs [ 0 ] , old_offs [ 1 ] + spacing [ 1 ] ) , ( new_offs [ 0 ] , old_offs [ 1 ] + spacing [ 1 ] ) , ( new_offs [ 0 ] , old_offs [ 1 ] + spacing [ 1 ] - diameter ) , ( old_offs [ 0 ] , old_offs [ 1 ] + spacing [ 1 ] - diameter ) ) ) |
def findRoleID ( self , name ) :
"""searches the roles by name and returns the role ' s ID""" | for r in self :
if r [ 'name' ] . lower ( ) == name . lower ( ) :
return r [ 'id' ]
del r
return None |
def dynamodb_connection_factory ( ) :
"""Since SessionStore is called for every single page view , we ' d be
establishing new connections so frequently that performance would be
hugely impacted . We ' ll lazy - load this here on a per - worker basis . Since
boto3 . resource . ( ' dynamodb ' ) objects are state - less ( aside from security
tokens ) , we ' re not too concerned about thread safety issues .""" | global _DYNAMODB_CONN
global _BOTO_SESSION
if not _DYNAMODB_CONN :
logger . debug ( "Creating a DynamoDB connection." )
if not _BOTO_SESSION :
_BOTO_SESSION = Boto3Session ( aws_access_key_id = AWS_ACCESS_KEY_ID , aws_secret_access_key = AWS_SECRET_ACCESS_KEY , region_name = AWS_REGION_NAME )
_DYNAMODB_CONN = _BOTO_SESSION . resource ( 'dynamodb' )
return _DYNAMODB_CONN |
def xml_to_json ( element , definition , required = False ) : # TODO document tuple - it looks little too complex
"""Convert XML ( ElementTree ) to dictionary from a definition schema .
Definition schema can be a simple string - XPath or @ attribute for
direct extraction or a complex one described by
* dictionary ` ` { key : ' xpath or @ attribute ' , second : ' complex definition ' } ` ` required parameters can be marked with * at the end
* list ` ` [ xpath , [ definition ] ] ` ` - create a list of all elements found by xpath , parse the parts with given definition if provided as second argument
* Callable - parse the element by given function , can be handy as a part of complex definition
: param element : ElementTree element
: type element : ElementTree . Element
: param definition : schema for the json
: type definition : Union [ str , tuple , dict , list , Callable ]
: param required : parsed value should be not None
: type required : bool
: return : parsed xml
: rtype : Union [ dict , str , list ]""" | # handle simple definition
if isinstance ( definition , str ) and len ( definition ) > 0 :
if definition [ 0 ] == '@' : # test for attribute
return element . get ( definition [ 1 : ] )
# get tag text
else :
sub_element = element . find ( definition )
if sub_element is None :
if required :
raise NotCompleteXmlException ( 'Expecting {0} in element {1}' . format ( definition , element . tag ) )
return None
return sub_element . text . strip ( ) if sub_element . text else None
# handle tuple
elif isinstance ( definition , tuple ) :
return _parse_tuple ( element , definition , required )
# handle dict
elif isinstance ( definition , dict ) :
return _parse_dict ( element , definition )
# handle list
elif isinstance ( definition , list ) :
return _parse_list ( element , definition )
elif hasattr ( definition , '__call__' ) :
return definition ( element )
# default
else :
return element . text . strip ( ) if element . text else None |
def add_point_region ( self , y : float , x : float ) -> Graphic :
"""Add a point graphic to the data item .
: param x : The x coordinate , in relative units [ 0.0 , 1.0]
: param y : The y coordinate , in relative units [ 0.0 , 1.0]
: return : The : py : class : ` nion . swift . Facade . Graphic ` object that was added .
. . versionadded : : 1.0
Scriptable : Yes""" | graphic = Graphics . PointGraphic ( )
graphic . position = Geometry . FloatPoint ( y , x )
self . __display_item . add_graphic ( graphic )
return Graphic ( graphic ) |
def _get_optimal_threshold ( arr , quantized_dtype , num_bins = 8001 , num_quantized_bins = 255 ) :
"""Given a dataset , find the optimal threshold for quantizing it .
The reference distribution is ` q ` , and the candidate distribution is ` p ` .
` q ` is a truncated version of the original distribution .
Ref : http : / / on - demand . gputechconf . com / gtc / 2017 / presentation / s7310-8 - bit - inference - with - tensorrt . pdf""" | if isinstance ( arr , NDArray ) :
arr = arr . asnumpy ( )
elif isinstance ( arr , list ) :
assert len ( arr ) != 0
for i , nd in enumerate ( arr ) :
if isinstance ( nd , NDArray ) :
arr [ i ] = nd . asnumpy ( )
elif not isinstance ( nd , np . ndarray ) :
raise TypeError ( 'get_optimal_threshold only supports input type of NDArray,' ' list of np.ndarrays or NDArrays, and np.ndarray,' ' while received type=%s' % ( str ( type ( nd ) ) ) )
arr = np . concatenate ( arr )
elif not isinstance ( arr , np . ndarray ) :
raise TypeError ( 'get_optimal_threshold only supports input type of NDArray,' ' list of NDArrays and np.ndarray,' ' while received type=%s' % ( str ( type ( arr ) ) ) )
min_val = np . min ( arr )
max_val = np . max ( arr )
th = max ( abs ( min_val ) , abs ( max_val ) )
if min_val >= 0 and quantized_dtype in [ 'auto' , 'uint8' ] : # We need to move negative bins to positive bins to fit uint8 range .
num_quantized_bins = num_quantized_bins * 2 + 1
hist , hist_edges = np . histogram ( arr , bins = num_bins , range = ( - th , th ) )
zero_bin_idx = num_bins // 2
num_half_quantized_bins = num_quantized_bins // 2
thresholds = np . zeros ( num_bins // 2 + 1 - num_quantized_bins // 2 )
divergence = np . zeros_like ( thresholds )
quantized_bins = np . zeros ( num_quantized_bins , dtype = np . int32 )
# i means the number of bins on half axis excluding the zero bin .
for i in range ( num_quantized_bins // 2 , num_bins // 2 + 1 ) :
p_bin_idx_start = zero_bin_idx - i
p_bin_idx_stop = zero_bin_idx + i + 1
thresholds [ i - num_half_quantized_bins ] = hist_edges [ p_bin_idx_stop ]
sliced_nd_hist = hist [ p_bin_idx_start : p_bin_idx_stop ]
# generate reference distribution p
p = sliced_nd_hist . copy ( )
assert p . size % 2 == 1
assert p . size >= num_quantized_bins
# put left outlier count in p [ 0]
left_outlier_count = np . sum ( hist [ 0 : p_bin_idx_start ] )
p [ 0 ] += left_outlier_count
# put right outlier count in p [ - 1]
right_outlier_count = np . sum ( hist [ p_bin_idx_stop : ] )
p [ - 1 ] += right_outlier_count
# is _ nonzeros [ k ] indicates whether hist [ k ] is nonzero
is_nonzeros = ( p != 0 ) . astype ( np . int32 )
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist . size // num_quantized_bins
# merge hist into num _ quantized _ bins bins
for j in range ( num_quantized_bins ) :
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins [ j ] = sliced_nd_hist [ start : stop ] . sum ( )
quantized_bins [ - 1 ] += sliced_nd_hist [ num_quantized_bins * num_merged_bins : ] . sum ( )
# expand quantized _ bins into p . size bins
q = np . zeros ( sliced_nd_hist . size , dtype = np . float32 )
for j in range ( num_quantized_bins ) :
start = j * num_merged_bins
if j == num_quantized_bins - 1 :
stop = len ( is_nonzeros )
else :
stop = start + num_merged_bins
norm = is_nonzeros [ start : stop ] . sum ( )
if norm != 0 :
q [ start : stop ] = float ( quantized_bins [ j ] ) / float ( norm )
q [ p == 0 ] = 0
p = _smooth_distribution ( p )
# There is a chance that q is an invalid probability distribution .
try :
q = _smooth_distribution ( q )
except ValueError :
divergence [ i - num_half_quantized_bins ] = float ( "inf" )
divergence [ i - num_half_quantized_bins ] = stats . entropy ( p , q )
min_divergence_idx = np . argmin ( divergence )
min_divergence = divergence [ min_divergence_idx ]
opt_th = thresholds [ min_divergence_idx ]
return min_val , max_val , min_divergence , opt_th |
def update_filters ( self , filters ) :
"""Modify the filters list .
Filter with value 0 will be dropped because not active""" | new_filters = { }
for ( filter , values ) in filters . items ( ) :
new_values = [ ]
for value in values :
if isinstance ( value , str ) :
new_values . append ( value . strip ( "\n " ) )
else :
new_values . append ( int ( value ) )
values = new_values
if len ( values ) != 0 and values [ 0 ] != 0 and values [ 0 ] != '' :
new_filters [ filter ] = values
if new_filters != self . filters :
self . _filters = new_filters
if self . _created :
yield from self . update ( )
self . _project . controller . notification . emit ( "link.updated" , self . __json__ ( ) )
self . _project . dump ( ) |
def is_string_type ( type_ ) :
"""Checks if the given type is a string type .
: param type _ : The type to check
: return : True if the type is a string type , otherwise False
: rtype : bool""" | string_types = _get_types ( Types . STRING )
if is_typing_type ( type_ ) :
return type_ in string_types or is_regex_type ( type_ )
return type_ in string_types |
def _get_gcloud_sdk_credentials ( ) :
"""Gets the credentials and project ID from the Cloud SDK .""" | from google . auth import _cloud_sdk
# Check if application default credentials exist .
credentials_filename = ( _cloud_sdk . get_application_default_credentials_path ( ) )
if not os . path . isfile ( credentials_filename ) :
return None , None
credentials , project_id = _load_credentials_from_file ( credentials_filename )
if not project_id :
project_id = _cloud_sdk . get_project_id ( )
return credentials , project_id |
def targets_w_bins ( cnv_file , access_file , target_anti_fn , work_dir , data ) :
"""Calculate target and anti - target files with pre - determined bins .""" | target_file = os . path . join ( work_dir , "%s-target.bed" % dd . get_sample_name ( data ) )
anti_file = os . path . join ( work_dir , "%s-antitarget.bed" % dd . get_sample_name ( data ) )
if not utils . file_exists ( target_file ) :
target_bin , _ = target_anti_fn ( )
with file_transaction ( data , target_file ) as tx_out_file :
cmd = [ _get_cmd ( ) , "target" , cnv_file , "--split" , "-o" , tx_out_file , "--avg-size" , str ( target_bin ) ]
do . run ( _prep_cmd ( cmd , tx_out_file ) , "CNVkit target" )
if not os . path . exists ( anti_file ) :
_ , anti_bin = target_anti_fn ( )
with file_transaction ( data , anti_file ) as tx_out_file : # Create access file without targets to avoid overlap
# antitarget in cnvkit is meant to do this but appears to not always happen
# after chromosome 1
tx_access_file = os . path . join ( os . path . dirname ( tx_out_file ) , os . path . basename ( access_file ) )
pybedtools . BedTool ( access_file ) . subtract ( cnv_file ) . saveas ( tx_access_file )
cmd = [ _get_cmd ( ) , "antitarget" , "-g" , tx_access_file , cnv_file , "-o" , tx_out_file , "--avg-size" , str ( anti_bin ) ]
do . run ( _prep_cmd ( cmd , tx_out_file ) , "CNVkit antitarget" )
return target_file , anti_file |
def draw_header ( self , stream , header ) :
"""Draw header with underline""" | stream . writeln ( '=' * ( len ( header ) + 4 ) )
stream . writeln ( '| ' + header + ' |' )
stream . writeln ( '=' * ( len ( header ) + 4 ) )
stream . writeln ( ) |
def configure_plugin ( self , name , options ) :
"""Configure a plugin .
Args :
name ( string ) : The name of the plugin . The ` ` : latest ` ` tag is
optional , and is the default if omitted .
options ( dict ) : A key - value mapping of options
Returns :
` ` True ` ` if successful""" | url = self . _url ( '/plugins/{0}/set' , name )
data = options
if isinstance ( data , dict ) :
data = [ '{0}={1}' . format ( k , v ) for k , v in six . iteritems ( data ) ]
res = self . _post_json ( url , data = data )
self . _raise_for_status ( res )
return True |
def traverse_ingredients ( self ) :
"""Recursively traverse this ingredient and its sub - ingredients .
Yields
ingredient : sacred . Ingredient
The ingredient as traversed in preorder .
depth : int
The depth of the ingredient starting from 0.
Raises
CircularDependencyError :
If a circular structure among ingredients was detected .""" | if self . _is_traversing :
raise CircularDependencyError ( ingredients = [ self ] )
else :
self . _is_traversing = True
yield self , 0
with CircularDependencyError . track ( self ) :
for ingredient in self . ingredients :
for ingred , depth in ingredient . traverse_ingredients ( ) :
yield ingred , depth + 1
self . _is_traversing = False |
def add ( self , index , value ) :
"""Add a value to the series .
Args :
index ( int ) : Index .
value ( float ) : Value .""" | self . buf . append ( value )
if ( index - self . flush_at ) < self . interval :
return
value = np . mean ( self . buf )
if self . verbose :
logger . info ( "iter={} {{{}}}={}" . format ( index , self . name , value ) )
if self . fd is not None :
print ( "{} {:g}" . format ( index , value ) , file = self . fd )
self . flush_at = index
self . buf = [ ] |
def _remote_chown ( self , paths , user , sudoable = False ) :
"""Issue an asynchronous os . chown ( ) call for every path in ` paths ` , then
format the resulting return value list with fake _ shell ( ) .""" | LOG . debug ( '_remote_chown(%r, user=%r, sudoable=%r)' , paths , user , sudoable )
ent = self . _connection . get_chain ( ) . call ( pwd . getpwnam , user )
return self . fake_shell ( lambda : mitogen . select . Select . all ( self . _connection . get_chain ( ) . call_async ( os . chown , path , ent . pw_uid , ent . pw_gid ) for path in paths ) ) |
def container_fs_limit_bytes ( self , metric , scraper_config ) :
"""Number of bytes that can be consumed by the container on this filesystem .
This method is used by container _ fs _ usage _ bytes , it doesn ' t report any metric""" | pct_m_name = scraper_config [ 'namespace' ] + '.filesystem.usage_pct'
if metric . type not in METRIC_TYPES :
self . log . error ( "Metric type %s unsupported for metric %s" % ( metric . type , metric . name ) )
return
self . _process_limit_metric ( '' , metric , self . fs_usage_bytes , scraper_config , pct_m_name ) |
def set_input_score_end_range ( self , score ) :
"""Sets the input score start range .
arg : score ( decimal ) : the new start range
raise : InvalidArgument - ` ` score ` ` is invalid
raise : NoAccess - ` ` range ` ` cannot be modified
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . grading . GradeSystemForm . set _ lowest _ numeric _ score
if self . get_input_score_end_range_metadata ( ) . is_read_only ( ) :
raise errors . NoAccess ( )
try :
score = float ( score )
except ValueError :
raise errors . InvalidArgument ( )
if not self . _is_valid_decimal ( score , self . get_input_score_end_range_metadata ( ) ) :
raise errors . InvalidArgument ( )
self . _my_map [ 'inputScoreEndRange' ] = score |
def get ( self , sid ) :
"""Constructs a KeyContext
: param sid : The unique string that identifies the resource
: returns : twilio . rest . api . v2010 . account . key . KeyContext
: rtype : twilio . rest . api . v2010 . account . key . KeyContext""" | return KeyContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , sid = sid , ) |
def remove ( self , * members ) :
"""Removes @ members from the set
- > # int the number of members that were removed from the set""" | if self . serialized :
members = list ( map ( self . _dumps , members ) )
return self . _client . srem ( self . key_prefix , * members ) |
def create_key ( policy = None , description = None , key_usage = None , region = None , key = None , keyid = None , profile = None ) :
'''Creates a master key .
CLI example : :
salt myminion boto _ kms . create _ key ' { " Statement " : . . . } ' " My master key "''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
r = { }
_policy = salt . serializers . json . serialize ( policy )
try :
key_metadata = conn . create_key ( _policy , description = description , key_usage = key_usage )
r [ 'key_metadata' ] = key_metadata [ 'KeyMetadata' ]
except boto . exception . BotoServerError as e :
r [ 'error' ] = __utils__ [ 'boto.get_error' ] ( e )
return r |
def Aitken ( s ) :
"""Accelerate the convergence of the a series
using Aitken ' s delta - squared process ( SCIP calls it Euler ) .""" | def accel ( ) :
s0 , s1 , s2 = s >> item [ : 3 ]
while 1 :
yield s2 - ( s2 - s1 ) ** 2 / ( s0 - 2 * s1 + s2 )
s0 , s1 , s2 = s1 , s2 , next ( s )
return accel ( ) |
def write ( self , offset , data ) :
"""Write C { data } into this file at position C { offset } . Extending the
file past its original end is expected . Unlike python ' s normal
C { write ( ) } methods , this method cannot do a partial write : it must
write all of C { data } or else return an error .
The default implementation checks for an attribute on C { self } named
C { writefile } , and if present , performs the write operation on the
python file - like object found there . The attribute is named
differently from C { readfile } to make it easy to implement read - only
( or write - only ) files , but if both attributes are present , they should
refer to the same file .
@ param offset : position in the file to start reading from .
@ type offset : int or long
@ param data : data to write into the file .
@ type data : str
@ return : an SFTP error code like L { SFTP _ OK } .""" | writefile = getattr ( self , 'writefile' , None )
if writefile is None :
return SFTP_OP_UNSUPPORTED
try : # in append mode , don ' t care about seeking
if ( self . __flags & os . O_APPEND ) == 0 :
if self . __tell is None :
self . __tell = writefile . tell ( )
if offset != self . __tell :
writefile . seek ( offset )
self . __tell = offset
writefile . write ( data )
writefile . flush ( )
except IOError , e :
self . __tell = None
return SFTPServer . convert_errno ( e . errno )
if self . __tell is not None :
self . __tell += len ( data )
return SFTP_OK |
def _render_template_block_nodelist ( nodelist , block_name , context ) :
"""Recursively iterate over a node to find the wanted block .""" | # Attempt to find the wanted block in the current template .
for node in nodelist : # If the wanted block was found , return it .
if isinstance ( node , BlockNode ) : # No matter what , add this block to the rendering context .
context . render_context [ BLOCK_CONTEXT_KEY ] . push ( node . name , node )
# If the name matches , you ' re all set and we found the block !
if node . name == block_name :
return node . render ( context )
# If a node has children , recurse into them . Based on
# django . template . base . Node . get _ nodes _ by _ type .
for attr in node . child_nodelists :
try :
new_nodelist = getattr ( node , attr )
except AttributeError :
continue
# Try to find the block recursively .
try :
return _render_template_block_nodelist ( new_nodelist , block_name , context )
except BlockNotFound :
continue
# The wanted block _ name was not found .
raise BlockNotFound ( "block with name '%s' does not exist" % block_name ) |
def yindex ( self ) :
"""Positions of the data on the y - axis
: type : ` ~ astropy . units . Quantity ` array""" | try :
return self . _yindex
except AttributeError :
self . _yindex = Index . define ( self . y0 , self . dy , self . shape [ 1 ] )
return self . _yindex |
def get_pkg_list ( self ) :
"""Returns a dictionary of packages in the following
format : :
{ ' package _ name ' : { ' name ' : ' package _ name ' ,
' version ' : ' major . minor . version ' } }""" | if self . query_command :
cmd = self . query_command
pkg_list = shell_out ( cmd , timeout = 0 , chroot = self . chroot ) . splitlines ( )
for pkg in pkg_list :
if '|' not in pkg :
continue
elif pkg . count ( "|" ) == 1 :
name , version = pkg . split ( "|" )
release = None
elif pkg . count ( "|" ) == 2 :
name , version , release = pkg . split ( "|" )
self . packages [ name ] = { 'name' : name , 'version' : version . split ( "." ) }
release = release if release else None
self . packages [ name ] [ 'release' ] = release
return self . packages |
def visit ( self , node ) :
"""Visit a node .""" | f = self . get_visitor ( node )
if f is not None :
return f ( node )
return self . generic_visit ( node ) |
def transform_streams_for_comparison ( outputs ) :
"""Makes failure output for streams better by having key be the stream name""" | new_outputs = [ ]
for output in outputs :
if ( output . output_type == 'stream' ) : # Transform output
new_outputs . append ( { 'output_type' : 'stream' , output . name : output . text , } )
else :
new_outputs . append ( output )
return new_outputs |
def get_routertypes ( self , context , filters = None , fields = None , sorts = None , limit = None , marker = None , page_reverse = False ) :
"""Lists defined router types .""" | pass |
def parse_database_url ( url ) :
"""Parses a database URL .""" | if url == 'sqlite://:memory:' : # this is a special case , because if we pass this URL into
# urlparse , urlparse will choke trying to interpret " memory "
# as a port number
return { 'ENGINE' : DATABASE_SCHEMES [ 'sqlite' ] , 'NAME' : ':memory:' }
# note : no other settings are required for sqlite
# otherwise parse the url as normal
config = { }
url = urlparse . urlparse ( url )
# Remove query strings .
path = url . path [ 1 : ]
path = path . split ( '?' , 2 ) [ 0 ]
# if we are using sqlite and we have no path , then assume we
# want an in - memory database ( this is the behaviour of sqlalchemy )
if url . scheme == 'sqlite' and path == '' :
path = ':memory:'
# Update with environment configuration .
config . update ( { 'NAME' : path or '' , 'USER' : url . username or '' , 'PASSWORD' : url . password or '' , 'HOST' : url . hostname or '' , 'PORT' : url . port or '' , } )
if url . scheme in DATABASE_SCHEMES :
config [ 'ENGINE' ] = DATABASE_SCHEMES [ url . scheme ]
return config |
def get_plugin_font ( self , rich_text = False ) :
"""Return plugin font option .
All plugins in Spyder use a global font . This is a convenience method
in case some plugins will have a delta size based on the default size .""" | if rich_text :
option = 'rich_font'
font_size_delta = self . RICH_FONT_SIZE_DELTA
else :
option = 'font'
font_size_delta = self . FONT_SIZE_DELTA
return get_font ( option = option , font_size_delta = font_size_delta ) |
def namespaces ( self , psuedo = True ) :
"""Fetches a list of namespaces for this wiki and returns them as a
dictionary of namespace IDs corresponding to namespace names . If
* psuedo * is ` ` True ` ` , the dictionary will also list psuedo - namespaces ,
which are the " Special : " and " Media : " namespaces ( special because they
have no content associated with them and their IDs are negative ) .
: param psuedo : boolean to determine inclusion of psuedo - namespaces
: returns : dictionary of namespace IDs and names""" | if self . _namespaces is None :
result = self . call ( { 'action' : 'query' , 'meta' : 'siteinfo' , 'siprop' : 'namespaces' } )
self . _namespaces = { }
self . _psuedo_namespaces = { }
for nsid in result [ 'query' ] [ 'namespaces' ] :
if int ( nsid ) >= 0 :
self . _namespaces [ int ( nsid ) ] = result [ 'query' ] [ 'namespaces' ] [ nsid ] [ '*' ]
else :
self . _psuedo_namespaces [ int ( nsid ) ] = result [ 'query' ] [ 'namespaces' ] [ nsid ] [ '*' ]
if psuedo :
retval = { }
retval . update ( self . _namespaces )
retval . update ( self . _psuedo_namespaces )
return retval
else :
return self . _namespaces |
def enabled ( name , runas = None ) :
'''Ensure the RabbitMQ plugin is enabled .
name
The name of the plugin
runas
The user to run the rabbitmq - plugin command as''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
try :
plugin_enabled = __salt__ [ 'rabbitmq.plugin_is_enabled' ] ( name , runas = runas )
except CommandExecutionError as err :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Error: {0}' . format ( err )
return ret
if plugin_enabled :
ret [ 'comment' ] = 'Plugin \'{0}\' is already enabled.' . format ( name )
return ret
if not __opts__ [ 'test' ] :
try :
__salt__ [ 'rabbitmq.enable_plugin' ] ( name , runas = runas )
except CommandExecutionError as err :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Error: {0}' . format ( err )
return ret
ret [ 'changes' ] . update ( { 'old' : '' , 'new' : name } )
if __opts__ [ 'test' ] and ret [ 'changes' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Plugin \'{0}\' is set to be enabled.' . format ( name )
return ret
ret [ 'comment' ] = 'Plugin \'{0}\' was enabled.' . format ( name )
return ret |
def xmlchars ( text ) :
"""Not all of UTF - 8 is considered valid character data in XML . . .
Thus , this function can be used to remove illegal characters from ` ` text ` ` .""" | invalid = list ( range ( 0x9 ) )
invalid . extend ( [ 0xb , 0xc ] )
invalid . extend ( range ( 0xe , 0x20 ) )
return re . sub ( '|' . join ( '\\x%0.2X' % i for i in invalid ) , '' , text ) |
def register ( self , collector ) :
"""Registers a collector""" | if not isinstance ( collector , Collector ) :
raise TypeError ( "Can't register instance, not a valid type of collector" )
if collector . name in self . collectors :
raise ValueError ( "Collector already exists or name colision" )
with mutex :
self . collectors [ collector . name ] = collector |
def lbol_from_spt_dist_mag ( sptnum , dist_pc , jmag , kmag , format = 'cgs' ) :
"""Estimate a UCD ' s bolometric luminosity given some basic parameters .
sptnum : the spectral type as a number ; 8 - > M8 ; 10 - > L0 ; 20 - > T0
Valid values range between 0 and 30 , ie M0 to Y0.
dist _ pc : distance to the object in parsecs
jmag : object ' s J - band magnitude or NaN ( * not * None ) if unavailable
kmag : same with K - band magnitude
format : either ' cgs ' , ' logcgs ' , or ' logsun ' , defining the form of the
outputs . Logarithmic quantities are base 10.
This routine can be used with vectors of measurements . The result will be
NaN if a value cannot be computed . This routine implements the method
documented in the Appendix of Williams et al . , 2014ApJ . . . 785 . . . . 9W
( doi : 10.1088/0004-637X / 785/1/9 ) .""" | bcj = bcj_from_spt ( sptnum )
bck = bck_from_spt ( sptnum )
n = np . zeros ( sptnum . shape , dtype = np . int )
app_mbol = np . zeros ( sptnum . shape )
w = np . isfinite ( bcj ) & np . isfinite ( jmag )
app_mbol [ w ] += jmag [ w ] + bcj [ w ]
n [ w ] += 1
w = np . isfinite ( bck ) & np . isfinite ( kmag )
app_mbol [ w ] += kmag [ w ] + bck [ w ]
n [ w ] += 1
w = ( n != 0 )
abs_mbol = ( app_mbol [ w ] / n [ w ] ) - 5 * ( np . log10 ( dist_pc [ w ] ) - 1 )
# note : abs _ mbol is filtered by ` w `
lbol = np . empty ( sptnum . shape )
lbol . fill ( np . nan )
lbol [ w ] = lbol_from_mbol ( abs_mbol , format = format )
return lbol |
def func_str ( func , args = [ ] , kwargs = { } , type_aliases = [ ] , packed = False , packkw = None , truncate = False ) :
"""string representation of function definition
Returns :
str : a representation of func with args , kwargs , and type _ aliases
Args :
func ( function ) :
args ( list ) : argument values ( default = [ ] )
kwargs ( dict ) : kwargs values ( default = { } )
type _ aliases ( list ) : ( default = [ ] )
packed ( bool ) : ( default = False )
packkw ( None ) : ( default = None )
Returns :
str : func _ str
CommandLine :
python - m utool . util _ str - - exec - func _ str
Example :
> > > # ENABLE _ DOCTEST
> > > from utool . util _ str import * # NOQA
> > > func = byte _ str
> > > args = [ 1024 , ' MB ' ]
> > > kwargs = dict ( precision = 2)
> > > type _ aliases = [ ]
> > > packed = False
> > > packkw = None
> > > _ str = func _ str ( func , args , kwargs , type _ aliases , packed , packkw )
> > > result = _ str
> > > print ( result )
byte _ str ( 1024 , ' MB ' , precision = 2)""" | import utool as ut
# if truncate :
# truncatekw = { ' maxlen ' : 20}
# else :
truncatekw = { }
argrepr_list = ( [ ] if args is None else ut . get_itemstr_list ( args , nl = False , truncate = truncate , truncatekw = truncatekw ) )
kwrepr_list = ( [ ] if kwargs is None else ut . dict_itemstr_list ( kwargs , explicit = True , nl = False , truncate = truncate , truncatekw = truncatekw ) )
repr_list = argrepr_list + kwrepr_list
argskwargs_str = ', ' . join ( repr_list )
_str = '%s(%s)' % ( meta_util_six . get_funcname ( func ) , argskwargs_str )
if packed :
packkw_ = dict ( textwidth = 80 , nlprefix = ' ' , break_words = False )
if packkw is not None :
packkw_ . update ( packkw_ )
_str = packstr ( _str , ** packkw_ )
return _str |
def unregister_all ( self ) :
"""Safely unregisters all active instances .
Errors occurred here will be recorded but not raised .""" | aliases = list ( self . _service_objects . keys ( ) )
for alias in aliases :
self . unregister ( alias ) |
def list_limit_range_for_all_namespaces ( self , ** kwargs ) :
"""list or watch objects of kind LimitRange
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ limit _ range _ for _ all _ namespaces ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1LimitRangeList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_limit_range_for_all_namespaces_with_http_info ( ** kwargs )
else :
( data ) = self . list_limit_range_for_all_namespaces_with_http_info ( ** kwargs )
return data |
def _get_union_type_name ( type_names_to_union ) :
"""Construct a unique union type name based on the type names being unioned .""" | if not type_names_to_union :
raise AssertionError ( u'Expected a non-empty list of type names to union, received: ' u'{}' . format ( type_names_to_union ) )
return u'Union__' + u'__' . join ( sorted ( type_names_to_union ) ) |
def mkdir ( path ) :
"""Make a directory and its parents .
Args :
path ( str ) : path to create
Returns :
None
Raises :
OSError if the directory cannot be created .""" | try :
os . makedirs ( path )
# sanity check
if not os . path . isdir ( path ) : # pragma : no cover
raise IOError ( 'path is not a directory' )
except OSError as e : # EEXIST
if e . errno == 17 and os . path . isdir ( path ) :
return
raise |
def _check_limit ( self ) :
"""Intenal method : check if current cache size exceeds maximum cache
size and pop the oldest item in this case""" | # First compress
self . _compress ( )
# Then check the max size
if len ( self . _store ) >= self . _max_size :
self . _store . popitem ( last = False ) |
def replace_many ( expression : Expression , replacements : Sequence [ Tuple [ Sequence [ int ] , Replacement ] ] ) -> Replacement :
r"""Replaces the subexpressions of * expression * at the given positions with the given replacements .
The original * expression * itself is not modified , but a modified copy is returned . If the replacement
is a sequence of expressions , it will be expanded into the list of operands of the respective operation .
This function works the same as ` replace ` , but allows multiple positions to be replaced at the same time .
However , compared to just replacing each position individually with ` replace ` , this does work when positions are
modified due to replacing a position with a sequence :
> > > expr = f ( a , b )
> > > expected _ result = replace _ many ( expr , [ ( ( 0 , ) , [ c , c ] ) , ( ( 1 , ) , a ) ] )
> > > print ( expected _ result )
f ( c , c , a )
However , using ` replace ` for one position at a time gives the wrong result :
> > > step1 = replace ( expr , ( 0 , ) , [ c , c ] )
> > > print ( step1)
f ( c , c , b )
> > > step2 = replace ( step1 , ( 1 , ) , a )
> > > print ( step2)
f ( c , a , b )
Parameters :
expression :
An : class : ` Expression ` where a ( sub ) expression is to be replaced .
replacements :
A collection of tuples consisting of a position in the expression and a replacement for that position .
With just a single replacement pair , this is equivalent to using ` replace ` :
> > > replace ( a , ( ) , b ) = = replace _ many ( a , [ ( ( ) , b ) ] )
True
Returns :
The resulting expression from the replacements .
Raises :
IndexError : If a position is invalid or out of range or if you try to replace a subterm of a term you are
already replacing .""" | if len ( replacements ) == 0 :
return expression
replacements = sorted ( replacements )
if len ( replacements [ 0 ] [ 0 ] ) == 0 :
if len ( replacements ) > 1 :
raise IndexError ( "Cannot replace child positions for expression {}, got {!r}" . format ( expression , replacements [ 1 : ] ) )
return replacements [ 0 ] [ 1 ]
if len ( replacements ) == 1 :
return replace ( expression , replacements [ 0 ] [ 0 ] , replacements [ 0 ] [ 1 ] )
if not isinstance ( expression , Operation ) :
raise IndexError ( "Invalid replacements {!r} for expression {!s}" . format ( replacements , expression ) )
operands = list ( op_iter ( expression ) )
new_operands = [ ]
last_index = 0
for index , group in itertools . groupby ( replacements , lambda r : r [ 0 ] [ 0 ] ) :
new_operands . extend ( operands [ last_index : index ] )
replacements = [ ( pos [ 1 : ] , r ) for pos , r in group ]
if len ( replacements ) == 1 :
replacement = replace ( operands [ index ] , replacements [ 0 ] [ 0 ] , replacements [ 0 ] [ 1 ] )
else :
replacement = replace_many ( operands [ index ] , replacements )
if isinstance ( replacement , ( list , tuple , Multiset ) ) :
new_operands . extend ( replacement )
else :
new_operands . append ( replacement )
last_index = index + 1
new_operands . extend ( operands [ last_index : len ( operands ) ] )
return create_operation_expression ( expression , new_operands ) |
def origin_west_asia ( origin ) :
"""Returns if the origin is located in Western Asia .
Holds true for the following countries :
* Armenia
* Azerbaijan
* Bahrain
* Cyprus
* Georgia
* Iraq
* Israel
* Jordan
* Kuwait
* Lebanon
* Oman
* Qatar
* Saudi Arabia
* Syria
* Turkey
* United Arab Emirates
* Yemen
` origin `
The origin to check .""" | return origin_armenia ( origin ) or origin_azerbaijan ( origin ) or origin_bahrain ( origin ) or origin_cyprus ( origin ) or origin_georgia ( origin ) or origin_georgia ( origin ) or origin_iraq ( origin ) or origin_israel ( origin ) or origin_jordan ( origin ) or origin_kuwait ( origin ) or origin_lebanon ( origin ) or origin_oman ( origin ) or origin_qatar ( origin ) or origin_saudi_arabia ( origin ) or origin_syria ( origin ) or origin_turkey ( origin ) or origin_united_arab_emirates ( origin ) or origin_yemen ( origin ) |
def occupations ( self , site_label ) :
"""Number of these atoms occupying a specific site type .
Args :
site _ label ( Str ) : Label for the site type being considered .
Returns :
( Int ) : Number of atoms occupying sites of type ` site _ label ` .""" | return sum ( atom . site . label == site_label for atom in self . atoms ) |
def is_dataset ( ds ) :
"""Whether ds is a Dataset . Compatible across TF versions .""" | import tensorflow as tf
from tensorflow_datasets . core . utils import py_utils
dataset_types = [ tf . data . Dataset ]
v1_ds = py_utils . rgetattr ( tf , "compat.v1.data.Dataset" , None )
v2_ds = py_utils . rgetattr ( tf , "compat.v2.data.Dataset" , None )
if v1_ds is not None :
dataset_types . append ( v1_ds )
if v2_ds is not None :
dataset_types . append ( v2_ds )
return isinstance ( ds , tuple ( dataset_types ) ) |
def create ( path , saltenv = None ) :
'''join ` path ` and ` saltenv ` into a ' salt : / / ' URL .''' | if salt . utils . platform . is_windows ( ) :
path = salt . utils . path . sanitize_win_path ( path )
path = salt . utils . data . decode ( path )
query = 'saltenv={0}' . format ( saltenv ) if saltenv else ''
url = salt . utils . data . decode ( urlunparse ( ( 'file' , '' , path , '' , query , '' ) ) )
return 'salt://{0}' . format ( url [ len ( 'file:///' ) : ] ) |
def send ( self , ** req_kwargs ) :
"""Send an authenticated request to a Google API .
Automatically retries if the access token has expired .
Args :
* * req _ kwargs : Arbitrary keyword arguments to pass to Requests .
Return :
dict : The parsed JSON response .
Raises :
APIException : If the server returns an error .
LoginException : If : py : meth : ` login ` has not been called .""" | i = 0
while True :
response = self . _send ( ** req_kwargs ) . json ( )
if 'error' not in response :
break
error = response [ 'error' ]
if error [ 'code' ] != 401 :
raise exception . APIException ( error [ 'code' ] , error )
if i >= self . RETRY_CNT :
raise exception . APIException ( error [ 'code' ] , error )
logger . info ( 'Refreshing access token' )
self . _auth . refresh ( )
i += 1
return response |
def get_dom ( self ) -> str :
"""Retrieves the current value of the DOM for the step""" | if self . is_running :
return self . dumps ( )
if self . dom is not None :
return self . dom
dom = self . dumps ( )
self . dom = dom
return dom |
def FromData ( cls , stream , json_data , http = None , auto_transfer = None , ** kwds ) :
"""Create a new Download object from a stream and serialized data .""" | info = json . loads ( json_data )
missing_keys = cls . _REQUIRED_SERIALIZATION_KEYS - set ( info . keys ( ) )
if missing_keys :
raise exceptions . InvalidDataError ( 'Invalid serialization data, missing keys: %s' % ( ', ' . join ( missing_keys ) ) )
download = cls . FromStream ( stream , ** kwds )
if auto_transfer is not None :
download . auto_transfer = auto_transfer
else :
download . auto_transfer = info [ 'auto_transfer' ]
setattr ( download , '_Download__progress' , info [ 'progress' ] )
setattr ( download , '_Download__total_size' , info [ 'total_size' ] )
download . _Initialize ( # pylint : disable = protected - access
http , info [ 'url' ] )
return download |
def fetchThreads ( self , thread_location , before = None , after = None , limit = None ) :
"""Get all threads in thread _ location .
Threads will be sorted from newest to oldest .
: param thread _ location : models . ThreadLocation : INBOX , PENDING , ARCHIVED or OTHER
: param before : Fetch only thread before this epoch ( in ms ) ( default all threads )
: param after : Fetch only thread after this epoch ( in ms ) ( default all threads )
: param limit : The max . amount of threads to fetch ( default all threads )
: return : : class : ` models . Thread ` objects
: rtype : list
: raises : FBchatException if request failed""" | threads = [ ]
last_thread_timestamp = None
while True : # break if limit is exceeded
if limit and len ( threads ) >= limit :
break
# fetchThreadList returns at max 20 threads before last _ thread _ timestamp ( included )
candidates = self . fetchThreadList ( before = last_thread_timestamp , thread_location = thread_location )
if len ( candidates ) > 1 :
threads += candidates [ 1 : ]
else : # End of threads
break
last_thread_timestamp = threads [ - 1 ] . last_message_timestamp
# FB returns a sorted list of threads
if ( before is not None and int ( last_thread_timestamp ) > before ) or ( after is not None and int ( last_thread_timestamp ) < after ) :
break
# Return only threads between before and after ( if set )
if before is not None or after is not None :
for t in threads :
last_message_timestamp = int ( t . last_message_timestamp )
if ( before is not None and last_message_timestamp > before ) or ( after is not None and last_message_timestamp < after ) :
threads . remove ( t )
if limit and len ( threads ) > limit :
return threads [ : limit ]
return threads |
def _type_digest ( self , config : bool ) -> Dict [ str , Any ] :
"""Return receiver ' s type digest .
Args :
config : Specifies whether the type is on a configuration node .""" | res = { "base" : self . yang_type ( ) }
if self . name is not None :
res [ "derived" ] = self . name
return res |
def hasField ( self , name ) :
"""Returns true if field with field _ name exists .
@ param name : Field Name
@ return : Boolean""" | if self . _autoFixNames :
name = self . _fixName ( name )
return self . _fieldAttrDict . has_key ( name ) |
def symmetrize ( self , max_n = 10 , tolerance = 0.3 , epsilon = 1e-3 ) :
"""Returns a symmetrized molecule
The equivalent atoms obtained via
: meth : ` ~ Cartesian . get _ equivalent _ atoms `
are rotated , mirrored . . . unto one position .
Then the average position is calculated .
The average position is rotated , mirrored . . . back with the inverse
of the previous symmetry operations , which gives the
symmetrized molecule .
This operation is repeated iteratively ` ` max _ n ` ` times at maximum
until the difference between subsequently symmetrized structures is
smaller than ` ` epsilon ` ` .
Args :
max _ n ( int ) : Maximum number of iterations .
tolerance ( float ) : Tolerance for detecting symmetry .
Gets passed as Argument into
: class : ` ~ pymatgen . analyzer . symmetry . PointGroupAnalyzer ` .
epsilon ( float ) : If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon ,
the iteration stops before ` ` max _ n ` ` is reached .
Returns :
dict : The returned dictionary has three possible keys :
` ` sym _ mol ` ` :
A symmetrized molecule : class : ` ~ Cartesian `
` ` eq _ sets ` ` :
A dictionary of indices mapping to sets of indices ,
each key maps to indices of all equivalent atoms .
The keys are guaranteed to be not symmetry - equivalent .
` ` sym _ ops ` ` :
Twofold nested dictionary .
` ` operations [ i ] [ j ] ` ` gives the symmetry operation
that maps atom ` ` i ` ` unto ` ` j ` ` .""" | mg_mol = self . get_pymatgen_molecule ( )
eq = iterative_symmetrize ( mg_mol , max_n = max_n , tolerance = tolerance , epsilon = epsilon )
self . _convert_eq ( eq )
return eq |
def run_interactive ( source_x , source_o , timeout = None , memlimit = None , cgroup = None , cgroup_path = None ) :
"""Challenges source _ x vs source _ y under time / memory constraints
memlimit = memory limit in bytes ( for Lua interpreter and everything under )
cgroup = existing cgroup where to put this contest""" | msg = msgpack . packb ( [ source_x , source_o ] )
run_lua = os . path . join ( os . path . dirname ( __file__ ) , 'run.lua' )
server = [ os . getenv ( "LUA" , "lua" ) , run_lua , '--server' ]
args = dict ( bufsize = 0xffff , stdin = subprocess . PIPE , stdout = subprocess . PIPE )
if timeout is not None :
server = [ 'timeout' , '%.3f' % timeout ] + server
with limit_ram ( memlimit , cgroup , cgroup_path ) as cg :
if cg is not None :
server = [ 'cgexec' , '-g' , cg ] + server
with subprocess . Popen ( server , ** args ) as f :
send_payload ( f . stdin , msg )
xo , stop = 'o' , False
while not stop :
msg = get_payload ( f . stdout )
if msg == b'' :
xo = flip ( xo )
# because bad thing happened during next exec
f . wait ( )
# it has to shutdown properly first
if f . returncode == 124 :
yield xo , ( 'error' , "timeout" ) , ""
else :
errmsg = "probably OOM (%d)" % f . returncode
yield xo , ( 'error' , errmsg ) , ""
stop = True
else :
[ xo , moveresult , log ] = msgpack . unpackb ( msg )
xo , log = xo . decode ( 'utf8' ) , log . decode ( 'utf8' )
if moveresult [ 0 ] == b'error' :
yield xo , ( 'error' , moveresult [ 1 ] . decode ( 'utf8' ) ) , ""
stop = True
elif moveresult [ 0 ] == b'state_coords' :
state = moveresult [ 1 ] [ 0 ] . decode ( 'utf8' )
coords = moveresult [ 1 ] [ 1 ]
if state == 'draw' or state == 'x' or state == 'o' :
stop = True
yield xo , [ 'state_coords' , [ state , coords ] ] , "" |
def accuracy_study ( tdm = None , u = None , s = None , vt = None , verbosity = 0 , ** kwargs ) :
"""Reconstruct the term - document matrix and measure error as SVD terms are truncated""" | smat = np . zeros ( ( len ( u ) , len ( vt ) ) )
np . fill_diagonal ( smat , s )
smat = pd . DataFrame ( smat , columns = vt . index , index = u . index )
if verbosity :
print ( )
print ( 'Sigma:' )
print ( smat . round ( 2 ) )
print ( )
print ( 'Sigma without zeroing any dim:' )
print ( np . diag ( smat . round ( 2 ) ) )
tdm_prime = u . values . dot ( smat . values ) . dot ( vt . values )
if verbosity :
print ( )
print ( 'Reconstructed Term-Document Matrix' )
print ( tdm_prime . round ( 2 ) )
err = [ np . sqrt ( ( ( tdm_prime - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ]
if verbosity :
print ( )
print ( 'Error without reducing dimensions:' )
print ( err [ - 1 ] )
# 2.3481474529927113e - 15
smat2 = smat . copy ( )
for numdim in range ( len ( s ) - 1 , 0 , - 1 ) :
smat2 . iloc [ numdim , numdim ] = 0
if verbosity :
print ( 'Sigma after zeroing out dim {}' . format ( numdim ) )
print ( np . diag ( smat2 . round ( 2 ) ) )
# d0 d1 d2 d3 d4 d5
# ship 2.16 0.00 0.0 0.0 0.0 0.0
# boat 0.00 1.59 0.0 0.0 0.0 0.0
# ocean 0.00 0.00 0.0 0.0 0.0 0.0
# voyage 0.00 0.00 0.0 0.0 0.0 0.0
# trip 0.00 0.00 0.0 0.0 0.0 0.0
tdm_prime2 = u . values . dot ( smat2 . values ) . dot ( vt . values )
err += [ np . sqrt ( ( ( tdm_prime2 - tdm ) . values . flatten ( ) ** 2 ) . sum ( ) / np . product ( tdm . shape ) ) ]
if verbosity :
print ( 'Error after zeroing out dim {}' . format ( numdim ) )
print ( err [ - 1 ] )
return err |
def normalize ( trainingset ) :
"""Morph the input signal to a mean of 0 and scale the signal strength by
dividing with the standard deviation ( rather that forcing a [ 0 , 1 ] range )""" | def encoder ( dataset ) :
for instance in dataset :
if np . any ( stds == 0 ) :
nonzero_indexes = np . where ( stds != 0 )
instance . features [ nonzero_indexes ] = instance . features [ nonzero_indexes ] / stds [ nonzero_indexes ]
else :
instance . features = instance . features / stds
return dataset
# end
training_data = np . array ( [ instance . features for instance in trainingset ] )
stds = training_data . std ( axis = 0 )
return encoder |
def buffer_typechecks ( self , call_id , payload ) :
"""Adds typecheck events to the buffer""" | if self . currently_buffering_typechecks :
for note in payload [ 'notes' ] :
self . buffered_notes . append ( note ) |
def find ( self , searchText , layers , contains = True , searchFields = "" , sr = "" , layerDefs = "" , returnGeometry = True , maxAllowableOffset = "" , geometryPrecision = "" , dynamicLayers = "" , returnZ = False , returnM = False , gdbVersion = "" ) :
"""performs the map service find operation""" | url = self . _url + "/find"
# print url
params = { "f" : "json" , "searchText" : searchText , "contains" : self . _convert_boolean ( contains ) , "searchFields" : searchFields , "sr" : sr , "layerDefs" : layerDefs , "returnGeometry" : self . _convert_boolean ( returnGeometry ) , "maxAllowableOffset" : maxAllowableOffset , "geometryPrecision" : geometryPrecision , "dynamicLayers" : dynamicLayers , "returnZ" : self . _convert_boolean ( returnZ ) , "returnM" : self . _convert_boolean ( returnM ) , "gdbVersion" : gdbVersion , "layers" : layers }
res = self . _get ( url , params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
qResults = [ ]
for r in res [ 'results' ] :
qResults . append ( Feature ( r ) )
return qResults |
def _mine_send ( self , tag , data ) :
'''Send mine data to the master''' | channel = salt . transport . client . ReqChannel . factory ( self . opts )
data [ 'tok' ] = self . tok
try :
ret = channel . send ( data )
return ret
except SaltReqTimeoutError :
log . warning ( 'Unable to send mine data to master.' )
return None
finally :
channel . close ( ) |
def follows ( self , uri ) :
"""Follow a remote user by uri ( username @ domain ) .
Returns a ` user dict ` _ .""" | params = self . __generate_params ( locals ( ) )
return self . __api_request ( 'POST' , '/api/v1/follows' , params ) |
def getUsers ( context , roles , allow_empty = True ) :
"""Present a DisplayList containing users in the specified
list of roles""" | mtool = getToolByName ( context , 'portal_membership' )
pairs = allow_empty and [ [ '' , '' ] ] or [ ]
users = mtool . searchForMembers ( roles = roles )
for user in users :
uid = user . getId ( )
fullname = user . getProperty ( 'fullname' )
if not fullname :
fullname = uid
pairs . append ( ( uid , fullname ) )
pairs . sort ( lambda x , y : cmp ( x [ 1 ] , y [ 1 ] ) )
return DisplayList ( pairs ) |
def _handle_parens ( self , children , start , formats ) :
"""Changes ` children ` and returns new start""" | opens , closes = self . _count_needed_parens ( formats )
old_end = self . source . offset
new_end = None
for i in range ( closes ) :
new_end = self . source . consume ( ')' ) [ 1 ]
if new_end is not None :
if self . children :
children . append ( self . source [ old_end : new_end ] )
new_start = start
for i in range ( opens ) :
new_start = self . source . rfind_token ( '(' , 0 , new_start )
if new_start != start :
if self . children :
children . appendleft ( self . source [ new_start : start ] )
start = new_start
return start |
def clear_request ( name = None ) :
'''. . versionadded : : 2017.7.3
Clear out the state execution request without executing it
CLI Example :
. . code - block : : bash
salt ' * ' state . clear _ request''' | notify_path = os . path . join ( __opts__ [ 'cachedir' ] , 'req_state.p' )
serial = salt . payload . Serial ( __opts__ )
if not os . path . isfile ( notify_path ) :
return True
if not name :
try :
os . remove ( notify_path )
except ( IOError , OSError ) :
pass
else :
req = check_request ( )
if name in req :
req . pop ( name )
else :
return False
with salt . utils . files . set_umask ( 0o077 ) :
try :
if salt . utils . platform . is_windows ( ) : # Make sure cache file isn ' t read - only
__salt__ [ 'cmd.run' ] ( 'attrib -R "{0}"' . format ( notify_path ) )
with salt . utils . files . fopen ( notify_path , 'w+b' ) as fp_ :
serial . dump ( req , fp_ )
except ( IOError , OSError ) :
log . error ( 'Unable to write state request file %s. Check permission.' , notify_path )
return True |
def contains_point ( self , p ) :
"""Returns whether this node or a child overlaps p .""" | for iv in self . s_center :
if iv . contains_point ( p ) :
return True
branch = self [ p > self . x_center ]
return branch and branch . contains_point ( p ) |
def instance_variables ( self ) :
"""Returns all instance variables in the class , sorted
alphabetically as a list of ` pydoc . Variable ` . Instance variables
are attributes of ` self ` defined in a class ' s ` _ _ init _ _ `
method .""" | p = lambda o : isinstance ( o , Variable ) and self . module . _docfilter ( o )
return filter ( p , self . doc_init . values ( ) ) |
def get_rules ( self , description = None ) :
'''Returns a list of extraction rules that match a given description .
@ description - The description to match against .
Returns a list of extraction rules that match the given description .
If no description is provided , a list of all rules are returned .''' | if description :
rules = [ ]
description = description . lower ( )
for i in range ( 0 , len ( self . extract_rules ) ) :
if self . extract_rules [ i ] [ 'regex' ] . search ( description ) :
rules . append ( self . extract_rules [ i ] )
else :
rules = self . extract_rules
return rules |
def _sign ( self , params ) :
'''Generate API sign code''' | for k , v in params . iteritems ( ) :
if type ( v ) == int :
v = str ( v )
elif type ( v ) == float :
v = '%.2f' % v
elif type ( v ) in ( list , set ) :
v = ',' . join ( [ str ( i ) for i in v ] )
elif type ( v ) == bool :
v = 'true' if v else 'false'
elif type ( v ) == datetime . datetime :
v = v . strftime ( '%Y-%m-%d %X' )
if type ( v ) == unicode :
params [ k ] = v . encode ( 'utf-8' )
else :
params [ k ] = v
src = self . APP_SECRET + '' . join ( [ "%s%s" % ( k , v ) for k , v in sorted ( params . iteritems ( ) ) ] )
return md5 ( src ) . hexdigest ( ) . upper ( ) |
def print_to_file ( object_name ) :
"""Function takes in object of type str , list , or dict and prints out to current working
directory as pyoutput . txt
: param : Object : object of type str , list , or dict
: return : No return . Just prints out to file handler and save to current working directory as
pyoutput . txt""" | with open ( 'pyoutput.txt' , 'w' ) as filehandler :
output = None
if isinstance ( object_name , list ) :
output = json . dumps ( object , indent = 4 )
if isinstance ( object_name , dict ) :
output = json . dumps ( object , indent = 4 )
if isinstance ( object_name , str ) :
output = object_name
filehandler . write ( output ) |
def pid ( self ) :
"""Return the pool ID used for connection pooling .
: rtype : str""" | return hashlib . md5 ( ':' . join ( [ self . __class__ . __name__ , self . _uri ] ) . encode ( 'utf-8' ) ) . hexdigest ( ) |
def visit_Variable ( self , node ) :
"""Visitor for ` Variable ` AST node .""" | var_name = node . identifier . name
var_symbol = self . table [ var_name ]
if var_symbol is None :
raise SementicError ( f"Variable `{var_name}` is not declared." ) |
def set_low_quality_matches_ ( self , matches , all_matches , match_quality_matrix ) :
"""Produce additional matches for predictions that have only low - quality matches .
Specifically , for each ground - truth find the set of predictions that have
maximum overlap with it ( including ties ) ; for each prediction in that set , if
it is unmatched , then match it to the ground - truth with which it has the highest
quality value .""" | # For each gt , find the prediction with which it has highest quality
highest_quality_foreach_gt , _ = match_quality_matrix . max ( dim = 1 )
# Find highest quality match available , even if it is low , including ties
gt_pred_pairs_of_highest_quality = torch . nonzero ( match_quality_matrix == highest_quality_foreach_gt [ : , None ] )
# Example gt _ pred _ pairs _ of _ highest _ quality :
# tensor ( [ [ 0 , 39796 ] ,
# [ 1 , 32055 ] ,
# [ 1 , 32070 ] ,
# [ 2 , 39190 ] ,
# [ 2 , 40255 ] ,
# [ 3 , 40390 ] ,
# [ 3 , 41455 ] ,
# [ 4 , 45470 ] ,
# [ 5 , 45325 ] ,
# [ 5 , 46390 ] ] )
# Each row is a ( gt index , prediction index )
# Note how gt items 1 , 2 , 3 , and 5 each have two ties
pred_inds_to_update = gt_pred_pairs_of_highest_quality [ : , 1 ]
matches [ pred_inds_to_update ] = all_matches [ pred_inds_to_update ] |
def writef ( notebook , nb_file , fmt = None ) :
"""Write a notebook to the file with given name""" | if nb_file == '-' :
write ( notebook , sys . stdout , fmt )
return
_ , ext = os . path . splitext ( nb_file )
fmt = copy ( fmt or { } )
fmt = long_form_one_format ( fmt , update = { 'extension' : ext } )
create_prefix_dir ( nb_file , fmt )
with io . open ( nb_file , 'w' , encoding = 'utf-8' ) as stream :
write ( notebook , stream , fmt ) |
def get_script_str ( self , job_name , launch_dir , executable , qout_path , qerr_path , stdin = None , stdout = None , stderr = None , exec_args = None ) :
"""Returns a ( multi - line ) String representing the queue script , e . g . PBS script .
Uses the template _ file along with internal parameters to create the script .
Args :
job _ name : Name of the job .
launch _ dir : ( str ) The directory the job will be launched in .
executable : String with the name of the executable to be executed or list of commands
qout _ path Path of the Queue manager output file .
qerr _ path : Path of the Queue manager error file .
exec _ args : List of arguments passed to executable ( used only if executable is a string , default : empty )""" | # PbsPro does not accept job _ names longer than 15 chars .
if len ( job_name ) > 14 and isinstance ( self , PbsProAdapter ) :
job_name = job_name [ : 14 ]
# Construct the header for the Queue Manager .
qheader = self . _make_qheader ( job_name , qout_path , qerr_path )
# Add the bash section .
se = ScriptEditor ( )
# Cd to launch _ dir immediately .
se . add_line ( "cd " + os . path . abspath ( launch_dir ) )
if self . setup :
se . add_comment ( "Setup section" )
se . add_lines ( self . setup )
se . add_emptyline ( )
if self . modules : # stderr is redirected to mods . err file .
# module load 2 > > mods . err
se . add_comment ( "Load Modules" )
se . add_line ( "module purge" )
se . load_modules ( self . modules )
se . add_emptyline ( )
se . add_comment ( "OpenMp Environment" )
if self . has_omp :
se . declare_vars ( self . omp_env )
se . add_emptyline ( )
else :
se . declare_vars ( { "OMP_NUM_THREADS" : 1 } )
if self . shell_env :
se . add_comment ( "Shell Environment" )
se . declare_vars ( self . shell_env )
se . add_emptyline ( )
if self . pre_run :
se . add_comment ( "Commands before execution" )
se . add_lines ( self . pre_run )
se . add_emptyline ( )
# Construct the string to run the executable with MPI and mpi _ procs .
if is_string ( executable ) :
line = self . mpi_runner . string_to_run ( self , executable , stdin = stdin , stdout = stdout , stderr = stderr , exec_args = exec_args )
se . add_line ( line )
else :
assert isinstance ( executable , ( list , tuple ) )
se . add_lines ( executable )
if self . post_run :
se . add_emptyline ( )
se . add_comment ( "Commands after execution" )
se . add_lines ( self . post_run )
return qheader + se . get_script_str ( ) + "\n" |
def configure ( self , config ) :
"""Configures component by passing configuration parameters .
: param config : configuration parameters to be set .""" | connections = ConnectionParams . many_from_config ( config )
for connection in connections :
self . _connections . append ( connection ) |
def countStewards ( self ) -> int :
"""Count the number of stewards added to the pool transaction store
Note : This is inefficient , a production use case of this function
should require an efficient storage mechanism""" | # THIS SHOULD NOT BE DONE FOR PRODUCTION
return sum ( 1 for _ , txn in self . ledger . getAllTxn ( ) if ( get_type ( txn ) == NYM ) and ( get_payload_data ( txn ) . get ( ROLE ) == STEWARD ) ) |
def get_soundcloud_api_playlist_data ( playlist_id ) :
"""Scrape the new API . Returns the parsed JSON response .""" | url = "https://api.soundcloud.com/playlists/%s?representation=full&client_id=02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea&app_version=1467724310" % ( playlist_id )
response = requests . get ( url )
parsed = response . json ( )
return parsed |
def _is_data_from_today ( self , data_point ) :
"""Takes a DataPoint from SESConnection . get _ send _ statistics ( ) and returns
True if it is talking about the current date , False if not .
: param dict data _ point : The data point to consider .
: rtype : bool
: returns : True if this data _ point is for today , False if not ( probably
yesterday ) .""" | today = datetime . date . today ( )
raw_timestr = data_point [ 'Timestamp' ]
dtime = datetime . datetime . strptime ( raw_timestr , '%Y-%m-%dT%H:%M:%SZ' )
return today . day == dtime . day |
def display_weyl ( decomps ) :
"""Construct and display 3D plot of canonical coordinates""" | tx , ty , tz = list ( zip ( * decomps ) )
rcParams [ 'axes.labelsize' ] = 24
rcParams [ 'font.family' ] = 'serif'
rcParams [ 'font.serif' ] = [ 'Computer Modern Roman' ]
rcParams [ 'text.usetex' ] = True
fig = pyplot . figure ( )
ax = Axes3D ( fig )
ax . scatter ( tx , ty , tz )
ax . plot ( ( 1 , ) , ( 1 , ) , ( 1 , ) )
ax . plot ( ( 0 , 1 , 1 / 2 , 0 , 1 / 2 , 1 , 1 / 2 , 1 / 2 ) , ( 0 , 0 , 1 / 2 , 0 , 1 / 2 , 0 , 1 / 2 , 1 / 2 ) , ( 0 , 0 , 0 , 0 , 1 / 2 , 0 , 0 , 1 / 2 ) )
ax . plot ( ( 0 , 1 / 2 , 1 , 1 / 2 , 1 / 2 ) , ( 0 , 1 / 4 , 0 , 1 / 4 , 1 / 2 ) , ( 0 , 1 / 4 , 0 , 1 / 4 , 0 ) )
points = [ ( 0 , 0 , 0 ) , ( 1 / 4 , 0 , 0 ) , ( 1 / 2 , 0 , 0 ) , ( 3 / 4 , 0 , 0 ) , ( 1 , 0 , 0 ) , ( 1 / 4 , 1 / 4 , 0 ) , ( 1 / 2 , 1 / 4 , 0 ) , ( 3 / 4 , 1 / 4 , 0 ) , ( 1 / 2 , 1 / 2 , 0 ) , ( 1 / 4 , 1 / 4 , 1 / 4 ) , ( 1 / 2 , 1 / 4 , 1 / 4 ) , ( 3 / 4 , 1 / 4 , 1 / 4 ) , ( 1 / 2 , 1 / 2 , 1 / 4 ) , ( 1 / 2 , 1 / 2 , 1 / 2 ) ]
ax . scatter ( * zip ( * points ) )
eps = 0.04
ax . text ( 0 , 0 , 0 - 2 * eps , 'I' , ha = 'center' )
ax . text ( 1 , 0 , 0 - 2 * eps , 'I' , ha = 'center' )
ax . text ( 1 / 2 , 1 / 2 , 0 - 2 * eps , 'iSWAP' , ha = 'center' )
ax . text ( 1 / 2 , 1 / 2 , 1 / 2 + eps , 'SWAP' , ha = 'center' )
ax . text ( 1 / 2 , 0 , 0 - 2 * eps , 'CNOT' , ha = 'center' )
# More coordinate labels
# ax . text ( 1/4 - eps , 1/4 , 1/4 , ' $ \ sqrt { SWAP } $ ' , ha = ' right ' )
# ax . text ( 3/4 + eps , 1/4 , 1/4 , ' $ \ sqrt { SWAP } ^ \ dagger $ ' , ha = ' left ' )
# ax . text ( 1/4 , 0 , 0-2 * eps , ' $ \ sqrt { { CNOT } } $ ' , ha = ' center ' )
# ax . text ( 3/4 , 0 , 0-2 * eps , ' $ \ sqrt { { CNOT } } $ ' , ha = ' center ' )
# ax . text ( 1/2 , 1/4 , 0-2 * eps , ' B ' , ha = ' center ' )
# ax . text ( 1/2 , 1/4 , 1/4 + eps , ' ECP ' , ha = ' center ' )
# ax . text ( 1/4 , 1/4 , 0-2 * eps , ' $ \ sqrt { iSWAP } $ ' , ha = ' center ' )
# ax . text ( 3/4 , 1/4 , 0-2 * eps , ' $ \ sqrt { iSWAP } $ ' , ha = ' center ' )
# ax . text ( 1/2 , 1/2 + eps , 1/4 , ' PSWAP ( 1/2 ) ' , ha = ' left ' )
ax . set_xlim ( 0 , 1 )
ax . set_ylim ( - 1 / 4 , 3 / 4 )
ax . set_zlim ( - 1 / 4 , 3 / 4 )
# Get rid of the panes
ax . w_xaxis . set_pane_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
ax . w_yaxis . set_pane_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
ax . w_zaxis . set_pane_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
# Get rid of the spines
ax . w_xaxis . line . set_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
ax . w_yaxis . line . set_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
ax . w_zaxis . line . set_color ( ( 1.0 , 1.0 , 1.0 , 0.0 ) )
# Get rid of the ticks
ax . set_xticks ( [ ] )
ax . set_yticks ( [ ] )
ax . set_zticks ( [ ] )
pyplot . show ( ) |
def active_thresholds_value_maps ( keywords , exposure_key ) :
"""Helper to retrieve active value maps or thresholds for an exposure .
: param keywords : Hazard layer keywords .
: type keywords : dict
: param exposure _ key : The exposure key .
: type exposure _ key : str
: returns : Active thresholds or value maps .
: rtype : dict""" | if 'classification' in keywords :
if keywords [ 'layer_mode' ] == layer_mode_continuous [ 'key' ] :
return keywords [ 'thresholds' ]
else :
return keywords [ 'value_map' ]
if keywords [ 'layer_mode' ] == layer_mode_continuous [ 'key' ] :
classifications = keywords [ 'thresholds' ] . get ( exposure_key )
else :
classifications = keywords [ 'value_maps' ] . get ( exposure_key )
if classifications is None :
return None
for value in list ( classifications . values ( ) ) :
if value [ 'active' ] :
return value [ 'classes' ]
return None |
def normalize_example_nlp ( task , example , is_infer , vocab_type , vocab_offset , max_input_length , max_target_length , fixed_train_length ) :
"""Normalize the examples from different tasks so they can be merged .
This function is specific to NLP tasks and normalizes them so that in the
end the example only has " targets " and " task _ id " . For tasks that originally
have inputs , this is done by appending task _ id to the inputs and prepending
targets , so normalized _ targets = inputs task _ id targets . For classification
tasks , targets are constructed by spelling out the class .
Args :
task : the Problem class of the task we are normalizing .
example : a dictionary of tensors , the example to normalize .
is _ infer : bool , whether we are performing inference or not .
vocab _ type : the type of vocabulary in use .
vocab _ offset : integer , offset index for subword vocabularies .
max _ input _ length : maximum length to cut inputs to .
max _ target _ length : maximum length to cut targets to .
fixed _ train _ length : set length to this size if > 0.
Returns :
a dictionary of tensors , like example , after normalizing , which in this
case means that it only has " targets " and " task _ id " as feature .""" | if task . has_inputs :
example [ "inputs" ] = example [ "inputs" ] [ : - 1 ]
# remove EOS token
if hasattr ( task , "class_labels" ) :
if vocab_type == text_problems . VocabType . CHARACTER : # TODO ( urvashik ) : handle the case where num _ labels > 9
example [ "targets" ] = tf . cast ( discretization . int_to_bit ( example [ "targets" ] , 1 , base = 10 ) + 50 , tf . int64 )
example [ "targets" ] = tf . squeeze ( example [ "targets" ] , axis = [ - 1 ] )
elif vocab_type == text_problems . VocabType . SUBWORD :
example [ "targets" ] = vocab_offset + example [ "targets" ]
else : # sequence with inputs and targets eg : summarization
if task . has_inputs :
if max_input_length > 0 :
example [ "inputs" ] = example [ "inputs" ] [ : max_input_length ]
# Do not truncate targets during inference with beam decoding .
if max_target_length > 0 and not is_infer :
example [ "targets" ] = example [ "targets" ] [ : max_target_length ]
def make_constant_shape ( x , size ) :
x = x [ : size ]
xlen = tf . shape ( x ) [ 0 ]
x = tf . pad ( x , [ [ 0 , size - xlen ] ] )
return tf . reshape ( x , [ size ] )
if task . has_inputs :
if is_infer :
concat_list = [ example [ "inputs" ] , [ task . task_id ] ]
example [ "inputs" ] = tf . concat ( concat_list , axis = 0 )
else :
inputs = example . pop ( "inputs" )
concat_list = [ inputs , [ task . task_id ] , example [ "targets" ] ]
example [ "targets" ] = tf . concat ( concat_list , axis = 0 )
if fixed_train_length > 0 :
example [ "targets" ] = make_constant_shape ( example [ "targets" ] , fixed_train_length )
else :
concat_list = [ [ task . task_id ] , example [ "targets" ] ]
example [ "targets" ] = tf . concat ( concat_list , axis = 0 )
if not is_infer and fixed_train_length > 0 :
example [ "targets" ] = make_constant_shape ( example [ "targets" ] , fixed_train_length )
example [ "task_id" ] = tf . constant ( [ task . task_id ] , dtype = tf . int64 )
return example |
def load_env ( file ) :
"""Generate environment used for ' org . restore ' method
: param file : env file
: return : env""" | env = yaml . load ( open ( file ) )
for org in env . get ( 'organizations' , [ ] ) :
if not org . get ( 'applications' ) :
org [ 'applications' ] = [ ]
if org . get ( 'starter-kit' ) :
kit_meta = get_starter_kit_meta ( org . get ( 'starter-kit' ) )
for meta_app in get_applications_from_metadata ( kit_meta ) :
org [ 'applications' ] . append ( meta_app )
if org . get ( 'meta' ) :
for meta_app in get_applications_from_metadata ( org . get ( 'meta' ) ) :
org [ 'applications' ] . append ( meta_app )
for app in org . get ( 'applications' , [ ] ) :
if app . get ( 'file' ) :
app [ 'file' ] = os . path . realpath ( os . path . join ( os . path . dirname ( file ) , app [ 'file' ] ) )
return env |
def smooth_1D ( arr , n = 10 , smooth_type = "flat" ) -> np . ndarray :
"""Smooth 1D data using a window function .
Edge effects will be present .
Parameters
arr : array _ like
Input array , 1D .
n : int ( optional )
Window length .
smooth _ type : { ' flat ' , ' hanning ' , ' hamming ' , ' bartlett ' , ' blackman ' } ( optional )
Type of window function to convolve data with .
' flat ' window will produce a moving average smoothing .
Returns
array _ like
Smoothed 1D array .""" | # check array input
if arr . ndim != 1 :
raise wt_exceptions . DimensionalityError ( 1 , arr . ndim )
if arr . size < n :
message = "Input array size must be larger than window size."
raise wt_exceptions . ValueError ( message )
if n < 3 :
return arr
# construct window array
if smooth_type == "flat" :
w = np . ones ( n , dtype = arr . dtype )
elif smooth_type == "hanning" :
w = np . hanning ( n )
elif smooth_type == "hamming" :
w = np . hamming ( n )
elif smooth_type == "bartlett" :
w = np . bartlett ( n )
elif smooth_type == "blackman" :
w = np . blackman ( n )
else :
message = "Given smooth_type, {0}, not available." . format ( str ( smooth_type ) )
raise wt_exceptions . ValueError ( message )
# convolve reflected array with window function
out = np . convolve ( w / w . sum ( ) , arr , mode = "same" )
return out |
def tags ( self ) :
"""Creates a list of all the tags of the contained items
# Returns
` list [ str ] `
> A list of all the tags""" | tags = set ( )
for i in self :
tags |= set ( i . keys ( ) )
return tags |
def _filter_queryset ( self , queryset ) :
"""Filter queryset by entity , label and position .
Due to a bug in django - filter these filters have to be applied
manually :
https : / / github . com / carltongibson / django - filter / issues / 883""" | entities = self . request . query_params . getlist ( 'entity' )
labels = self . request . query_params . getlist ( 'label' )
positions = self . request . query_params . getlist ( 'position' )
if labels and len ( labels ) != len ( entities ) :
raise exceptions . ParseError ( 'If `labels` query parameter is given, also `entities` ' 'must be given and they must be of the same length.' )
if positions and len ( positions ) != len ( entities ) :
raise exceptions . ParseError ( 'If `positions` query parameter is given, also `entities` ' 'must be given and they must be of the same length.' )
if entities :
for entity , label , position in zip_longest ( entities , labels , positions ) :
filter_params = { 'entities__pk' : entity }
if label :
filter_params [ 'relationpartition__label' ] = label
if position :
filter_params [ 'relationpartition__position' ] = position
queryset = queryset . filter ( ** filter_params )
return queryset |
def get_user_info ( self , save_to_config = True ) :
"""Get user info and settings from Filemail .
: param save _ to _ config : Whether or not to save settings to config file
: type save _ to _ config : ` ` bool ` `
: rtype : ` ` dict ` ` containig user information and default settings .""" | method , url = get_URL ( 'user_get' )
payload = { 'apikey' : self . config . get ( 'apikey' ) , 'logintoken' : self . session . cookies . get ( 'logintoken' ) }
res = getattr ( self . session , method ) ( url , params = payload )
if res . status_code == 200 :
settings = res . json ( ) [ 'user' ]
if save_to_config :
self . config . update ( settings )
return settings
hellraiser ( res ) |
def _domain_event_watchdog_cb ( conn , domain , action , opaque ) :
'''Domain watchdog events handler''' | _salt_send_domain_event ( opaque , conn , domain , opaque [ 'event' ] , { 'action' : _get_libvirt_enum_string ( 'VIR_DOMAIN_EVENT_WATCHDOG_' , action ) } ) |
def sigmaT2 ( self , R , z , nsigma = None , mc = False , nmc = 10000 , gl = True , ngl = _DEFAULTNGL , ** kwargs ) :
"""NAME :
sigmaT2
PURPOSE :
calculate sigma _ T ^ 2 by marginalizing over velocity
INPUT :
R - radius at which to calculate this ( can be Quantity )
z - height at which to calculate this ( can be Quantity )
OPTIONAL INPUT :
nsigma - number of sigma to integrate the velocities over
scipy . integrate . tplquad kwargs epsabs and epsrel
mc = if True , calculate using Monte Carlo integration
nmc = if mc , use nmc samples
gl = if True , calculate using Gauss - Legendre integration
ngl = if gl , use ngl - th order Gauss - Legendre integration for each dimension
OUTPUT :
sigma _ T ^ 2
HISTORY :
2012-07-30 - Written - Bovy ( IAS @ MPIA )""" | if mc :
surfmass , vrs , vts , vzs = self . _vmomentdensity ( R , z , 0. , 0. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , _returnmc = True , ** kwargs )
mvt = self . _vmomentdensity ( R , z , 0. , 1. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , _returnmc = False , _vrs = vrs , _vts = vts , _vzs = vzs , ** kwargs ) / surfmass
return self . _vmomentdensity ( R , z , 0. , 2. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , _returnmc = False , _vrs = vrs , _vts = vts , _vzs = vzs , ** kwargs ) / surfmass - mvt ** 2.
elif gl :
surfmass , glqeval = self . _vmomentdensity ( R , z , 0. , 0. , 0. , gl = gl , ngl = ngl , _returngl = True , ** kwargs )
mvt = self . _vmomentdensity ( R , z , 0. , 1. , 0. , ngl = ngl , gl = gl , _glqeval = glqeval , ** kwargs ) / surfmass
return self . _vmomentdensity ( R , z , 0. , 2. , 0. , ngl = ngl , gl = gl , _glqeval = glqeval , ** kwargs ) / surfmass - mvt ** 2.
else : # pragma : no cover because this is too slow ; a warning is shown
surfmass = self . _vmomentdensity ( R , z , 0. , 0. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , ** kwargs )
return ( self . _vmomentdensity ( R , z , 0. , 2. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , ** kwargs ) / surfmass - ( self . _vmomentdensity ( R , z , 0. , 2. , 0. , nsigma = nsigma , mc = mc , nmc = nmc , ** kwargs ) / surfmass ) ** 2. ) |
def listen ( self , listenip = "" , listenport = DEF_TFTP_PORT , timeout = SOCK_TIMEOUT ) :
"""Start a server listening on the supplied interface and port . This
defaults to INADDR _ ANY ( all interfaces ) and UDP port 69 . You can also
supply a different socket timeout value , if desired .""" | tftp_factory = TftpPacketFactory ( )
# Don ' t use new 2.5 ternary operator yet
# listenip = listenip if listenip else ' 0.0.0.0'
if not listenip :
listenip = '0.0.0.0'
log . info ( "Server requested on ip %s, port %s" % ( listenip , listenport ) )
try : # FIXME - sockets should be non - blocking
self . sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
self . sock . bind ( ( listenip , listenport ) )
_ , self . listenport = self . sock . getsockname ( )
except socket . error as err : # Reraise it for now .
raise err
self . is_running . set ( )
log . info ( "Starting receive loop..." )
while True :
log . debug ( "shutdown_immediately is %s" % self . shutdown_immediately )
log . debug ( "shutdown_gracefully is %s" % self . shutdown_gracefully )
if self . shutdown_immediately :
log . warning ( "Shutting down now. Session count: %d" % len ( self . sessions ) )
self . sock . close ( )
for key in self . sessions :
self . sessions [ key ] . end ( )
self . sessions = [ ]
break
elif self . shutdown_gracefully :
if not self . sessions :
log . warning ( "In graceful shutdown mode and all " "sessions complete." )
self . sock . close ( )
break
# Build the inputlist array of sockets to select ( ) on .
inputlist = [ ]
inputlist . append ( self . sock )
for key in self . sessions :
inputlist . append ( self . sessions [ key ] . sock )
# Block until some socket has input on it .
log . debug ( "Performing select on this inputlist: %s" , inputlist )
try :
readyinput , readyoutput , readyspecial = select . select ( inputlist , [ ] , [ ] , SOCK_TIMEOUT )
except select . error as err :
if err [ 0 ] == EINTR : # Interrupted system call
log . debug ( "Interrupted syscall, retrying" )
continue
else :
raise
deletion_list = [ ]
# Handle the available data , if any . Maybe we timed - out .
for readysock in readyinput : # Is the traffic on the main server socket ? ie . new session ?
if readysock == self . sock :
log . debug ( "Data ready on our main socket" )
buffer , ( raddress , rport ) = self . sock . recvfrom ( MAX_BLKSIZE )
log . debug ( "Read %d bytes" , len ( buffer ) )
if self . shutdown_gracefully :
log . warning ( "Discarding data on main port, " "in graceful shutdown mode" )
continue
# Forge a session key based on the client ' s IP and port ,
# which should safely work through NAT .
key = "%s:%s" % ( raddress , rport )
if not key in self . sessions :
log . debug ( "Creating new server context for " "session key = %s" % key )
self . sessions [ key ] = TftpContextServer ( raddress , rport , timeout , self . root , self . dyn_file_func , self . upload_open )
try :
self . sessions [ key ] . start ( buffer )
except TftpException as err :
deletion_list . append ( key )
log . error ( "Fatal exception thrown from " "session %s: %s" % ( key , str ( err ) ) )
else :
log . warning ( "received traffic on main socket for " "existing session??" )
log . info ( "Currently handling these sessions:" )
for session_key , session in list ( self . sessions . items ( ) ) :
log . info ( " %s" % session )
else : # Must find the owner of this traffic .
for key in self . sessions :
if readysock == self . sessions [ key ] . sock :
log . debug ( "Matched input to session key %s" % key )
try :
self . sessions [ key ] . cycle ( )
if self . sessions [ key ] . state == None :
log . info ( "Successful transfer." )
deletion_list . append ( key )
except TftpException as err :
deletion_list . append ( key )
log . error ( "Fatal exception thrown from " "session %s: %s" % ( key , str ( err ) ) )
# Break out of for loop since we found the correct
# session .
break
else :
log . error ( "Can't find the owner for this packet. " "Discarding." )
log . debug ( "Looping on all sessions to check for timeouts" )
now = time . time ( )
for key in self . sessions :
try :
self . sessions [ key ] . checkTimeout ( now )
except TftpTimeout as err :
log . error ( str ( err ) )
self . sessions [ key ] . retry_count += 1
if self . sessions [ key ] . retry_count >= TIMEOUT_RETRIES :
log . debug ( "hit max retries on %s, giving up" % self . sessions [ key ] )
deletion_list . append ( key )
else :
log . debug ( "resending on session %s" % self . sessions [ key ] )
self . sessions [ key ] . state . resendLast ( )
log . debug ( "Iterating deletion list." )
for key in deletion_list :
log . info ( '' )
log . info ( "Session %s complete" % key )
if key in self . sessions :
log . debug ( "Gathering up metrics from session before deleting" )
self . sessions [ key ] . end ( )
metrics = self . sessions [ key ] . metrics
if metrics . duration == 0 :
log . info ( "Duration too short, rate undetermined" )
else :
log . info ( "Transferred %d bytes in %.2f seconds" % ( metrics . bytes , metrics . duration ) )
log . info ( "Average rate: %.2f kbps" % metrics . kbps )
log . info ( "%.2f bytes in resent data" % metrics . resent_bytes )
log . info ( "%d duplicate packets" % metrics . dupcount )
log . debug ( "Deleting session %s" % key )
del self . sessions [ key ]
log . debug ( "Session list is now %s" % self . sessions )
else :
log . warning ( "Strange, session %s is not on the deletion list" % key )
self . is_running . clear ( )
log . debug ( "server returning from while loop" )
self . shutdown_gracefully = self . shutdown_immediately = False |
def create_policy ( self , account , client , document , name , arn = None ) :
"""Create a new IAM policy .
If the policy already exists , a new version will be added and if needed the oldest policy version not in use
will be removed . Returns a dictionary containing the policy or version information
Args :
account ( : obj : ` Account ` ) : Account to create the policy on
client ( : obj : ` boto3 . client ` ) : A boto3 client object
document ( ` str ` ) : Policy document
name ( ` str ` ) : Name of the policy to create / update
arn ( ` str ` ) : Optional ARN for the policy to update
Returns :
` dict `""" | if not arn and not name :
raise ValueError ( 'create_policy must be called with either arn or name in the argument list' )
if arn :
response = client . list_policy_versions ( PolicyArn = arn )
# If we ' re at the max of the 5 possible versions , remove the oldest version that is not
# the currently active policy
if len ( response [ 'Versions' ] ) >= 5 :
version = [ x for x in sorted ( response [ 'Versions' ] , key = lambda k : k [ 'CreateDate' ] ) if not x [ 'IsDefaultVersion' ] ] [ 0 ]
self . log . info ( 'Deleting oldest IAM Policy version {}/{}' . format ( arn , version [ 'VersionId' ] ) )
client . delete_policy_version ( PolicyArn = arn , VersionId = version [ 'VersionId' ] )
auditlog ( event = 'iam.check_roles.delete_policy_version' , actor = self . ns , data = { 'account' : account . account_name , 'policyName' : name , 'policyArn' : arn , 'versionId' : version [ 'VersionId' ] } )
res = client . create_policy_version ( PolicyArn = arn , PolicyDocument = document , SetAsDefault = True )
else :
res = client . create_policy ( PolicyName = name , PolicyDocument = document )
auditlog ( event = 'iam.check_roles.create_policy' , actor = self . ns , data = { 'account' : account . account_name , 'policyName' : name , 'policyArn' : arn } )
return res |
def get_starred ( self ) :
""": calls : ` GET / user / starred < http : / / developer . github . com / v3 / activity / starring > ` _
: rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . Repository . Repository `""" | return github . PaginatedList . PaginatedList ( github . Repository . Repository , self . _requester , "/user/starred" , None ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.