signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def sync_deleted_attachments ( self , api_post ) :
"""Remove Posts with post _ type = attachment that have been removed from the given Post on the WordPress side .
Logic :
- get the list of Posts with post _ type = attachment whose parent _ id = this post _ id
- get the corresponding list from WP API
- perform set difference
- delete extra local attachments if any
: param api _ post : the API data for the Post
: return : None"""
|
existing_IDs = set ( Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) ) . values_list ( "wp_id" , flat = True ) )
# can ' t delete what we don ' t have
if existing_IDs :
api_IDs = set ( )
# call the API again to the get the full list of attachment posts whose parent is this post ' s wp _ id
path = "sites/{}/posts/" . format ( self . site_id )
params = { "type" : "attachment" , "parent_id" : api_post [ "ID" ] , "fields" : "ID" , "number" : 100 }
page = 1
response = self . get ( path , params )
if not response . ok :
logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text )
# loop around since there may be more than 100 attachments ( example : really large slideshows )
while response . ok and response . text and page < 10 :
api_json = response . json ( )
api_attachments = api_json . get ( "posts" , [ ] )
# iteratively extend the set to include this page ' s IDs
api_IDs |= set ( a [ "ID" ] for a in api_attachments )
# get next page
page += 1
next_page_handle = api_json . get ( "meta" , { } ) . get ( "next_page" )
if next_page_handle :
params [ "page_handle" ] = next_page_handle
else : # no more pages left
break
response = self . get ( path , params )
if not response . ok :
logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text )
return
# perform set difference
to_remove = existing_IDs - api_IDs
# purge the extras
if to_remove :
Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) , wp_id__in = list ( to_remove ) ) . delete ( )
|
def process_readme ( ) :
"""Function which will process README . md file and divide it into INTRO . md and INSTALL . md , which will be used in
documentation"""
|
with open ( '../../README.md' , 'r' ) as file :
readme = file . read ( )
readme = readme . replace ( '# eo-learn' , '# Introduction' ) . replace ( 'docs/source/' , '' )
readme = readme . replace ( '**`' , '**' ) . replace ( '`**' , '**' )
chapters = [ [ ] ]
for line in readme . split ( '\n' ) :
if line . strip ( ) . startswith ( '## ' ) :
chapters . append ( [ ] )
if line . startswith ( '<img' ) :
line = '<p></p>'
chapters [ - 1 ] . append ( line )
chapters = [ '\n' . join ( chapter ) for chapter in chapters ]
intro = '\n' . join ( [ chapter for chapter in chapters if not ( chapter . startswith ( '## Install' ) or chapter . startswith ( '## Documentation' ) ) ] )
install = '\n' . join ( [ chapter for chapter in chapters if chapter . startswith ( '## Install' ) ] )
with open ( os . path . join ( MARKDOWNS_FOLDER , 'INTRO.md' ) , 'w' ) as file :
file . write ( intro )
with open ( os . path . join ( MARKDOWNS_FOLDER , 'INSTALL.md' ) , 'w' ) as file :
file . write ( install )
|
def bwasw ( args , opts ) :
"""% prog bwasw database . fasta long _ read . fastq
Wrapper for ` bwa bwasw ` . Output will be long _ read . sam ."""
|
dbfile , readfile = args
dbfile = check_index ( dbfile )
samfile , _ , unmapped = get_samfile ( readfile , dbfile , bam = opts . bam , unmapped = opts . unmapped )
if not need_update ( dbfile , samfile ) :
logging . error ( "`{0}` exists. `bwa bwasw` already run." . format ( samfile ) )
return "" , samfile
cmd = "bwa bwasw " + " " . join ( args )
cmd += " -t {0}" . format ( opts . cpus )
cmd += " " + opts . extra
return cmd , samfile
|
def get_opt ( self , opt ) :
"""Returns the value associated with the given command line option .
Returns None if the option does not exist in the options list .
@ param opt : command line option"""
|
if self . __options . has_key ( opt ) :
return self . __options [ opt ]
return None
|
def _writeDict ( self , o ) :
"""Write C { dict } to the data stream .
@ param o : The C { dict } data to be encoded to the AMF0 data stream ."""
|
for key , val in o . iteritems ( ) :
if type ( key ) in python . int_types :
key = str ( key )
self . serialiseString ( key )
self . writeElement ( val )
|
def _iter_grouped_shortcut ( self ) :
"""Fast version of ` _ iter _ grouped ` that yields Variables without
metadata"""
|
var = self . _obj . variable
for indices in self . _group_indices :
yield var [ { self . _group_dim : indices } ]
|
def load ( self ) :
"""Function load
Get the list of all objects
@ return RETURN : A ForemanItem list"""
|
cl_tmp = self . api . list ( self . objName , limit = self . searchLimit ) . values ( )
cl = [ ]
for i in cl_tmp :
cl . extend ( i )
return { x [ self . index ] : ItemPuppetClass ( self . api , x [ 'id' ] , self . objName , self . payloadObj , x ) for x in cl }
|
def all_ends_of_turn ( self , root ) :
"""Simulate the root and continue generating ends of turn until
everything has reached mana drain .
Warning on random fill :
If random fill is used together with this method , it will generate
basically forever due to the huge number of possibilities it
introduces .
Arguments :
root : a start state with no parent
Note on mana drain :
Generates but does not continue simulation of mana drains .
Note on run time :
This simulates a complete turn for each eot provided , rather than
just one branch at a time . The method will only stop generating
when all possibilities have been simulated or filtered ."""
|
# simple confirmation that the root is actually a root .
# otherwise it may seem to work but would be totally out of spec
if root . parent :
raise ValueError ( 'Unexpectedly received a node with a parent for' ' root:\n{}' . format ( root ) )
# run a single turn for each eot from a stack
jobs = [ root ]
while jobs :
random_job_index = random . randint ( 0 , len ( jobs ) - 1 )
start_eot = jobs . pop ( random_job_index )
# special case : handle the root once
if start_eot is root :
kw_root = { 'root' : start_eot }
else :
kw_root = { 'root_eot' : start_eot }
for eot in self . ends_of_one_state ( ** kw_root ) : # only continue simulating non - mana drains
if not eot . is_mana_drain :
jobs . append ( eot )
yield eot
|
def resource ( url_prefix_or_resource_cls : Union [ str , Type [ Resource ] ] , resource_cls : Optional [ Type [ Resource ] ] = None , * , member_param : Optional [ str ] = None , unique_member_param : Optional [ str ] = None , rules : Optional [ Iterable [ Union [ Route , RouteGenerator ] ] ] = None , subresources : Optional [ Iterable [ RouteGenerator ] ] = None , ) -> RouteGenerator :
"""This function is used to register a : class : ` Resource ` ' s routes .
Example usage : :
routes = lambda : [
prefix ( ' / api / v1 ' , [
resource ( ' / products ' , ProductResource ) ,
Or with the optional prefix argument : :
routes = lambda : [
resource ( ' / products ' , ProductResource ) ,
Specify ` ` rules ` ` to only include those routes from the resource : :
routes = lambda : [
resource ( ' / users ' , UserResource , rules = [
get ( ' / ' , UserResource . list ) ,
get ( ' / < int : id > ' , UserResource . get ) ,
Specify ` ` subresources ` ` to nest resource routes : :
routes = lambda : [
resource ( ' / users ' , UserResource , subresources = [
resource ( ' / roles ' , RoleResource )
Subresources can be nested as deeply as you want , however it ' s not recommended
to go more than two or three levels deep at the most , otherwise your URLs will
become unwieldy .
: param url _ prefix _ or _ resource _ cls : The resource class , or a url prefix for
all of the rules from the resource class
passed as the second argument .
: param resource _ cls : If a url prefix was given as the first argument , then
the resource class must be passed as the second argument .
: param member _ param : Optionally override the controller ' s member _ param attribute .
: param rules : An optional list of rules to limit / customize the routes included
from the resource .
: param subresources : An optional list of subresources ."""
|
url_prefix , resource_cls = _normalize_args ( url_prefix_or_resource_cls , resource_cls , _is_resource_cls )
member_param = member_param or resource_cls . Meta . member_param
unique_member_param = unique_member_param or resource_cls . Meta . unique_member_param
url_prefix = url_prefix or resource_cls . Meta . url_prefix
routes = getattr ( resource_cls , CONTROLLER_ROUTES_ATTR )
if rules is not None :
routes = { method_name : method_routes for method_name , method_routes in routes . items ( ) if method_name in resource_cls . resource_methods }
for route in rules :
routes [ route . method_name ] = route
yield from _normalize_controller_routes ( routes . values ( ) , resource_cls , url_prefix = url_prefix , member_param = member_param , unique_member_param = unique_member_param )
for subroute in _reduce_routes ( subresources ) :
subroute . _parent_resource_cls = resource_cls
subroute . _parent_member_param = member_param
subroute . _unique_member_param = unique_member_param
subroute = subroute . copy ( )
subroute . rule = rename_parent_resource_param_name ( subroute , rule = join ( url_prefix , member_param , subroute . rule , trailing_slash = subroute . rule . endswith ( '/' ) ) )
yield subroute
|
def add_node ( self , node , weight = 1 ) :
"""Adds node to circle and rebuild it ."""
|
self . _nodes . add ( node )
self . _weights [ node ] = weight
self . _rebuild_circle ( )
|
def auth_string ( self ) :
"""Get the auth string . If the token is expired and auto refresh enabled ,
a new token will be fetched
: return : the auth string
: rtype : str"""
|
if not self . _token :
self . execute ( )
if not self . _token . expired :
return 'Bearer {}' . format ( self . _token . access_token )
if self . auto_refresh :
self . execute ( )
return 'Bearer {}' . format ( self . _token . access_token )
raise TokenExpired ( )
|
def login ( self ) :
"""Logon to the server ."""
|
if self . args . snmp_force : # Force SNMP instead of Glances server
self . client_mode = 'snmp'
else : # First of all , trying to connect to a Glances server
if not self . _login_glances ( ) :
return False
# Try SNMP mode
if self . client_mode == 'snmp' :
if not self . _login_snmp ( ) :
return False
# Load limits from the configuration file
# Each client can choose its owns limits
logger . debug ( "Load limits from the client configuration file" )
self . stats . load_limits ( self . config )
# Init screen
if self . quiet : # In quiet mode , nothing is displayed
logger . info ( "Quiet mode is ON: Nothing will be displayed" )
else :
self . screen = GlancesCursesClient ( config = self . config , args = self . args )
# Return True : OK
return True
|
def get_transition ( self , input_symbol , state ) :
'''This returns ( action , next state ) given an input _ symbol and state .
This does not modify the FSM state , so calling this method has no side
effects . Normally you do not call this method directly . It is called by
process ( ) .
The sequence of steps to check for a defined transition goes from the
most specific to the least specific .
1 . Check state _ transitions [ ] that match exactly the tuple ,
( input _ symbol , state )
2 . Check state _ transitions _ any [ ] that match ( state )
In other words , match a specific state and ANY input _ symbol .
3 . Check if the default _ transition is defined .
This catches any input _ symbol and any state .
This is a handler for errors , undefined states , or defaults .
4 . No transition was defined . If we get here then raise an exception .'''
|
if ( input_symbol , state ) in self . state_transitions :
return self . state_transitions [ ( input_symbol , state ) ]
elif state in self . state_transitions_any :
return self . state_transitions_any [ state ]
elif self . default_transition is not None :
return self . default_transition
else :
raise ExceptionFSM ( 'Transition is undefined: (%s, %s).' % ( str ( input_symbol ) , str ( state ) ) )
|
def summary ( self , h ) :
"""Summarize the results for each model for h steps of the algorithm
Parameters
h : int
How many steps to run the aggregating algorithm on
Returns
- pd . DataFrame of losses for each model"""
|
_ , losses , _ = self . run ( h = h )
df = pd . DataFrame ( losses )
df . index = [ 'Ensemble' ] + self . model_names
df . columns = [ self . loss_name ]
return df
|
def is_header ( line ) :
"""If a line has only one column , then it is a Section or Subsection
header .
: param line : Line to check
: return : boolean - If line is header"""
|
if len ( line ) == 1 :
return True
for idx , val in enumerate ( line ) :
if idx > 0 and val :
return False
return True
|
def update ( self , table , data_list , matched_field = None , return_cols = 'id' ) :
"""Create a bulk insert statement which is much faster ( ~ 2x in tests with 10k & 100k rows and 4 cols )
for inserting data then executemany ( )
TODO : Is there a limit of length the query can be ? If so handle it ."""
|
data_list = copy . deepcopy ( data_list )
# Create deepcopy so the original list does not get modified
if matched_field is None : # Assume the id field
logger . info ( "Matched field not defined, assuming the `id` field" )
matched_field = 'id'
# Make sure that ` data _ list ` is a list
if not isinstance ( data_list , list ) :
data_list = [ data_list ]
if len ( data_list ) == 0 : # No need to continue
return [ ]
# Make sure return _ cols is a list
if return_cols is None or len ( return_cols ) == 0 or return_cols [ 0 ] is None :
return_cols = ''
elif not isinstance ( return_cols , list ) :
return_cols = [ return_cols ]
if len ( return_cols ) > 0 :
return_cols = 'RETURNING ' + ',' . join ( return_cols )
# Data in the list must be dicts ( just check the first one )
if not isinstance ( data_list [ 0 ] , dict ) :
logger . critical ( "Data must be a list of dicts" )
# Do not return here , let the exception handle the error that will be thrown when the query runs
try :
with self . getcursor ( ) as cur :
query_list = [ ]
# TODO : change to return data from the database , not just what you passed in
return_list = [ ]
for row in data_list :
if row . get ( matched_field ) is None :
logger . debug ( "Cannot update row. Missing field {field} in data {data}" . format ( field = matched_field , data = row ) )
logger . error ( "Cannot update row. Missing field {field} in data" . format ( field = matched_field ) )
continue
# Pull matched _ value from data to be updated and remove that key
matched_value = row . get ( matched_field )
del row [ matched_field ]
query = "UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}" . format ( table = table , data = ',' . join ( "%s=%%s" % u for u in row . keys ( ) ) , matched_field = matched_field , return_cols = return_cols )
values = list ( row . values ( ) )
values . append ( matched_value )
values = _check_values ( values )
query = cur . mogrify ( query , values )
query_list . append ( query )
return_list . append ( matched_value )
finial_query = b';' . join ( query_list )
cur . execute ( finial_query )
try :
return cur . fetchall ( )
except Exception :
return None
except Exception as e :
logger . exception ( "Error updating data" )
logger . debug ( "Error updating data: {data}" . format ( data = data_list ) )
raise e . with_traceback ( sys . exc_info ( ) [ 2 ] )
|
def make_residmap_plots ( self , maps , roi = None , ** kwargs ) :
"""Make plots from the output of
` ~ fermipy . gtanalysis . GTAnalysis . residmap ` .
Parameters
maps : dict
Output dictionary of
` ~ fermipy . gtanalysis . GTAnalysis . residmap ` .
roi : ` ~ fermipy . roi _ model . ROIModel `
ROI Model object . Generate markers at the positions of
the sources in this ROI .
zoom : float
Crop the image by this factor . If None then no crop is
applied ."""
|
fmt = kwargs . get ( 'format' , self . config [ 'format' ] )
figsize = kwargs . get ( 'figsize' , self . config [ 'figsize' ] )
workdir = kwargs . pop ( 'workdir' , self . config [ 'fileio' ] [ 'workdir' ] )
use_weights = kwargs . pop ( 'use_weights' , False )
# FIXME , how to set this :
no_contour = False
zoom = kwargs . get ( 'zoom' , None )
kwargs . setdefault ( 'graticule_radii' , self . config [ 'graticule_radii' ] )
kwargs . setdefault ( 'label_ts_threshold' , self . config [ 'label_ts_threshold' ] )
cmap = kwargs . setdefault ( 'cmap' , self . config [ 'cmap' ] )
cmap_resid = kwargs . pop ( 'cmap_resid' , self . config [ 'cmap_resid' ] )
kwargs . setdefault ( 'catalogs' , self . config [ 'catalogs' ] )
if no_contour :
sigma_levels = None
else :
sigma_levels = [ - 5 , - 3 , 3 , 5 , 7 ] + list ( np . logspace ( 1 , 3 , 17 ) )
load_bluered_cmap ( )
prefix = maps [ 'name' ]
mask = maps [ 'mask' ]
if use_weights :
sigma_hist_data = maps [ 'sigma' ] . data [ maps [ 'mask' ] . data . astype ( bool ) ]
maps [ 'sigma' ] . data *= maps [ 'mask' ] . data
maps [ 'data' ] . data *= maps [ 'mask' ] . data
maps [ 'model' ] . data *= maps [ 'mask' ] . data
maps [ 'excess' ] . data *= maps [ 'mask' ] . data
else :
sigma_hist_data = maps [ 'sigma' ] . data
fig = plt . figure ( figsize = figsize )
p = ROIPlotter ( maps [ 'sigma' ] , roi = roi , ** kwargs )
p . plot ( vmin = - 5 , vmax = 5 , levels = sigma_levels , cb_label = 'Significance [$\sigma$]' , interpolation = 'bicubic' , cmap = cmap_resid , zoom = zoom )
plt . savefig ( utils . format_filename ( workdir , 'residmap_sigma' , prefix = [ prefix ] , extension = fmt ) )
plt . close ( fig )
# make and draw histogram
fig , ax = plt . subplots ( figsize = figsize )
nBins = np . linspace ( - 6 , 6 , 121 )
data = np . nan_to_num ( sigma_hist_data )
# find best fit parameters
mu , sigma = norm . fit ( data . flatten ( ) )
# make and draw the histogram
data [ data > 6.0 ] = 6.0
data [ data < - 6.0 ] = - 6.0
n , bins , patches = ax . hist ( data . flatten ( ) , nBins , density = True , histtype = 'stepfilled' , facecolor = 'green' , alpha = 0.75 )
# make and draw best fit line
y = norm . pdf ( bins , mu , sigma )
ax . plot ( bins , y , 'r--' , linewidth = 2 )
y = norm . pdf ( bins , 0.0 , 1.0 )
ax . plot ( bins , y , 'k' , linewidth = 1 )
# labels and such
ax . set_xlabel ( r'Significance ($\sigma$)' )
ax . set_ylabel ( 'Probability' )
paramtext = 'Gaussian fit:\n'
paramtext += '$\\mu=%.2f$\n' % mu
paramtext += '$\\sigma=%.2f$' % sigma
ax . text ( 0.05 , 0.95 , paramtext , verticalalignment = 'top' , horizontalalignment = 'left' , transform = ax . transAxes )
plt . savefig ( utils . format_filename ( workdir , 'residmap_sigma_hist' , prefix = [ prefix ] , extension = fmt ) )
plt . close ( fig )
vmax = max ( np . max ( maps [ 'data' ] . data ) , np . max ( maps [ 'model' ] . data ) )
vmin = min ( np . min ( maps [ 'data' ] . data ) , np . min ( maps [ 'model' ] . data ) )
fig = plt . figure ( figsize = figsize )
p = ROIPlotter ( maps [ 'data' ] , roi = roi , ** kwargs )
p . plot ( cb_label = 'Counts' , interpolation = 'bicubic' , cmap = cmap , zscale = 'sqrt' , vmin = vmin , vmax = vmax )
plt . savefig ( utils . format_filename ( workdir , 'residmap_data' , prefix = [ prefix ] , extension = fmt ) )
plt . close ( fig )
fig = plt . figure ( figsize = figsize )
p = ROIPlotter ( maps [ 'model' ] , roi = roi , ** kwargs )
p . plot ( cb_label = 'Counts' , interpolation = 'bicubic' , cmap = cmap , zscale = 'sqrt' , vmin = vmin , vmax = vmax )
plt . savefig ( utils . format_filename ( workdir , 'residmap_model' , prefix = [ prefix ] , extension = fmt ) )
plt . close ( fig )
fig = plt . figure ( figsize = figsize )
p = ROIPlotter ( maps [ 'excess' ] , roi = roi , ** kwargs )
p . plot ( cb_label = 'Counts' , interpolation = 'bicubic' , cmap = cmap_resid )
plt . savefig ( utils . format_filename ( workdir , 'residmap_excess' , prefix = [ prefix ] , extension = fmt ) )
plt . close ( fig )
|
def validate_milestones ( self ) :
'''Reviews the arc element tree to ensure that milestones appear in the right
order .'''
|
milestones = self . arc_root_node . get_children ( ) . filter ( arc_element_type__contains = 'mile' )
current_cursor = 0
for mile in milestones :
seq = mile . milestone_seq
if seq < current_cursor :
return mile
current_cursor = seq
return None
|
def check_perm ( self , request , resource ) :
"""Check permission
@ param request the HTTP request
@ param resource the requested resource
@ raise Forbidden if the user doesn ' t have access to the resource"""
|
perm_name = self . get_perm_name ( resource , request . method )
if not self . _has_perm ( request . user , perm_name ) :
raise errors . Forbidden ( )
|
def pdf_doc_info ( instance ) :
"""Ensure the keys of the ' document _ info _ dict ' property of the pdf - ext
extension of file objects are only valid PDF Document Information
Dictionary Keys ."""
|
for key , obj in instance [ 'objects' ] . items ( ) :
if ( 'type' in obj and obj [ 'type' ] == 'file' ) :
try :
did = obj [ 'extensions' ] [ 'pdf-ext' ] [ 'document_info_dict' ]
except KeyError :
continue
for elem in did :
if elem not in enums . PDF_DID :
yield JSONError ( "The 'document_info_dict' property of " "object '%s' contains a key ('%s') that is" " not a valid PDF Document Information " "Dictionary key." % ( key , elem ) , instance [ 'id' ] , 'pdf-doc-info' )
|
def upload ( self , engine , timeout = 5 , wait_for_finish = False , ** kw ) :
"""Upload policy to specific device . Using wait for finish
returns a poller thread for monitoring progress : :
policy = FirewallPolicy ( ' _ NSX _ Master _ Default ' )
poller = policy . upload ( ' myfirewall ' , wait _ for _ finish = True )
while not poller . done ( ) :
poller . wait ( 3)
print ( poller . task . progress )
print ( " Task finished : % s " % poller . message ( ) )
: param str engine : name of device to upload policy to
: raises : TaskRunFailed
: return : TaskOperationPoller"""
|
return Task . execute ( self , 'upload' , params = { 'filter' : engine } , timeout = timeout , wait_for_finish = wait_for_finish , ** kw )
|
def convenience_calc_fisher_approx ( self , params ) :
"""Calculates the BHHH approximation of the Fisher Information Matrix for
this model / dataset . Note that this function name is INCORRECT with
regard to the actual actions performed . The Mixed Logit model uses a
placeholder for the BHHH approximation of the Fisher Information Matrix
because the BHHH approximation is already being used to approximate the
hessian .
This placeholder allows calculation of a value for the ' robust '
standard errors , even though such a value is not useful since it is not
correct . . ."""
|
shapes , intercepts , betas = self . convenience_split_params ( params )
placeholder_bhhh = np . diag ( - 1 * np . ones ( betas . shape [ 0 ] ) )
return placeholder_bhhh
|
def _fix ( node ) :
"""Fix the naive construction of the adjont .
See ` fixes . py ` for details .
This function also returns the result of reaching definitions analysis so
that ` split ` mode can use this to carry over the state from primal to
adjoint .
Args :
node : A module with the primal and adjoint function definitions as returned
by ` reverse _ ad ` .
Returns :
node : A module with the primal and adjoint function with additional
variable definitions and such added so that pushes onto the stack and
gradient accumulations are all valid .
defined : The variables defined at the end of the primal .
reaching : The variable definitions that reach the end of the primal ."""
|
# Do reaching definitions analysis on primal and adjoint
pri_cfg = cfg . CFG . build_cfg ( node . body [ 0 ] )
defined = cfg . Defined ( )
defined . visit ( pri_cfg . entry )
reaching = cfg . ReachingDefinitions ( )
reaching . visit ( pri_cfg . entry )
cfg . forward ( node . body [ 1 ] , cfg . Defined ( ) )
cfg . forward ( node . body [ 1 ] , cfg . ReachingDefinitions ( ) )
# Remove pushes of variables that were never defined
fixes . CleanStack ( ) . visit ( node )
fixes . FixStack ( ) . visit ( node . body [ 0 ] )
# Change accumulation into definition if possible
fixes . CleanGrad ( ) . visit ( node . body [ 1 ] )
# Define gradients that might or might not be defined
fixes . FixGrad ( ) . visit ( node . body [ 1 ] )
return node , defined . exit , reaching . exit
|
def set_person ( self , what , rep ) :
"""Set a person substitution .
Equivalent to ` ` ! person ` ` in RiveScript code .
: param str what : The original text to replace .
: param str rep : The text to replace it with .
Set this to ` ` None ` ` to delete the substitution ."""
|
if rep is None : # Unset the variable .
if what in self . _person :
del self . _person [ what ]
self . _person [ what ] = rep
|
def xml_filter ( self , content ) :
r"""Filter and preprocess xml content
: param content : xml content
: rtype : str"""
|
content = utils . strip_whitespace ( content , True ) if self . __options [ 'strip' ] else content . strip ( )
if not self . __options [ 'encoding' ] :
encoding = self . guess_xml_encoding ( content ) or self . __encoding
self . set_options ( encoding = encoding )
if self . __options [ 'encoding' ] . lower ( ) != self . __encoding : # 编码转换去除xml头
content = self . strip_xml_header ( content . decode ( self . __options [ 'encoding' ] , errors = self . __options [ 'errors' ] ) )
if self . __options [ 'unescape' ] :
content = utils . html_entity_decode ( content )
return content
|
def put ( self , endpoint , d , * args , ** kwargs ) :
"""* * put * *
Make a PUT call to a remove endpoint
Input :
* An absolute endpoint
* A data stream
Output :
* A : py : mod : ` pygett . request . GettResponse ` object"""
|
return self . _make_request ( endpoint , type = 'PUT' , data = d )
|
def _update_internal_column_state ( self , column_names ) :
"""Update the internal state with some ( possibly ) new columns
: param column _ names : an iterable which contains new column names"""
|
for k in column_names :
if k not in self . _column_name_idx :
self . _column_name_idx [ k ] = len ( self . _column_name_list )
self . _column_name_list . append ( k )
|
def get_mv_detail ( self , mvid ) :
"""Get mv detail
: param mvid : mv id
: return :"""
|
url = uri + '/mv/detail?id=' + str ( mvid )
return self . request ( 'GET' , url )
|
def position_at_end ( self , block ) :
"""Position at the end of the basic * block * ."""
|
self . _block = block
self . _anchor = len ( block . instructions )
|
def reset ( self ) :
"""Resets the sampler to its initial state
Note
This will destroy the label cache , instrumental distribution and
history of estimates ."""
|
super ( OASISSampler , self ) . reset ( )
self . strata . reset ( )
self . _BB_model . reset ( )
# Array to record history of instrumental distributions
if self . record_inst_hist :
self . _inst_pmf = np . zeros ( [ self . strata . n_strata_ , self . _max_iter ] , dtype = float )
else :
self . _inst_pmf = np . zeros ( self . strata . n_strata_ , dtype = float )
|
def rs ( data , n , unbiased = True ) :
"""Calculates an individual R / S value in the rescaled range approach for
a given n .
Note : This is just a helper function for hurst _ rs and should not be called
directly .
Args :
data ( array - like of float ) :
time series
n ( float ) :
size of the subseries in which data should be split
Kwargs :
unbiased ( boolean ) :
if True , the standard deviation based on the unbiased variance
(1 / ( N - 1 ) instead of 1 / N ) will be used . This should be the default choice ,
since the true mean of the sequences is not known . This parameter should
only be changed to recreate results of other implementations .
Returns :
float :
( R / S ) _ n"""
|
data = np . asarray ( data )
total_N = len ( data )
m = total_N // n
# number of sequences
# cut values at the end of data to make the array divisible by n
data = data [ : total_N - ( total_N % n ) ]
# split remaining data into subsequences of length n
seqs = np . reshape ( data , ( m , n ) )
# calculate means of subsequences
means = np . mean ( seqs , axis = 1 )
# normalize subsequences by substracting mean
y = seqs - means . reshape ( ( m , 1 ) )
# build cumulative sum of subsequences
y = np . cumsum ( y , axis = 1 )
# find ranges
r = np . max ( y , axis = 1 ) - np . min ( y , axis = 1 )
# find standard deviation
# we should use the unbiased estimator , since we do not know the true mean
s = np . std ( seqs , axis = 1 , ddof = 1 if unbiased else 0 )
# some ranges may be zero and have to be excluded from the analysis
idx = np . where ( r != 0 )
r = r [ idx ]
s = s [ idx ]
# it may happen that all ranges are zero ( if all values in data are equal )
if len ( r ) == 0 :
return np . nan
else : # return mean of r / s along subsequence index
return np . mean ( r / s )
|
def foldOneLine ( outbuf , input , lineLength = 75 ) :
"""Folding line procedure that ensures multi - byte utf - 8 sequences are not
broken across lines
TO - DO : This all seems odd . Is it still needed , especially in python3?"""
|
if len ( input ) < lineLength : # Optimize for unfolded line case
try :
outbuf . write ( bytes ( input , 'UTF-8' ) )
except Exception : # fall back on py2 syntax
outbuf . write ( str_ ( input ) )
else : # Look for valid utf8 range and write that out
start = 0
written = 0
counter = 0
# counts line size in bytes
decoded = to_unicode ( input )
length = len ( to_basestring ( input ) )
while written < length :
s = decoded [ start ]
# take one char
size = len ( to_basestring ( s ) )
# calculate it ' s size in bytes
if counter + size > lineLength :
try :
outbuf . write ( bytes ( "\r\n " , 'UTF-8' ) )
except Exception : # fall back on py2 syntax
outbuf . write ( "\r\n " )
counter = 1
# one for space
if str is unicode_type :
outbuf . write ( to_unicode ( s ) )
else : # fall back on py2 syntax
outbuf . write ( s . encode ( 'utf-8' ) )
written += size
counter += size
start += 1
try :
outbuf . write ( bytes ( "\r\n" , 'UTF-8' ) )
except Exception : # fall back on py2 syntax
outbuf . write ( "\r\n" )
|
def download_from_search ( query_str , folder , do_extract_text = True , max_results = None ) :
"""Save raw text files based on a search for papers on ScienceDirect .
This performs a search to get PIIs , downloads the XML corresponding to
the PII , extracts the raw text and then saves the text into a file
in the designated folder .
Parameters
query _ str : str
The query string to search with
folder : str
The local path to an existing folder in which the text files
will be dumped
do _ extract _ text : bool
Choose whether to extract text from the xml , or simply save the raw xml
files . Default is True , so text is extracted .
max _ results : int or None
Default is None . If specified , limit the number of results to the given
maximum ."""
|
piis = get_piis ( query_str )
for pii in piis [ : max_results ] :
if os . path . exists ( os . path . join ( folder , '%s.txt' % pii ) ) :
continue
logger . info ( 'Downloading %s' % pii )
xml = download_article ( pii , 'pii' )
sleep ( 1 )
if do_extract_text :
txt = extract_text ( xml )
if not txt :
continue
with open ( os . path . join ( folder , '%s.txt' % pii ) , 'wb' ) as fh :
fh . write ( txt . encode ( 'utf-8' ) )
else :
with open ( os . path . join ( folder , '%s.xml' % pii ) , 'wb' ) as fh :
fh . write ( xml . encode ( 'utf-8' ) )
return
|
def _get_mirror_urls ( self , mirrors = None , main_mirror_url = None ) :
"""Retrieves a list of URLs from the main mirror DNS entry
unless a list of mirror URLs are passed ."""
|
if not mirrors :
mirrors = get_mirrors ( main_mirror_url )
# Should this be made " less random " ? E . g . netselect like ?
random . shuffle ( mirrors )
mirror_urls = set ( )
for mirror_url in mirrors : # Make sure we have a valid URL
if not ( "http://" or "https://" or "file://" ) in mirror_url :
mirror_url = "http://%s" % mirror_url
if not mirror_url . endswith ( "/simple" ) :
mirror_url = "%s/simple/" % mirror_url
mirror_urls . add ( mirror_url )
return list ( mirror_urls )
|
def dropEvent ( self , event ) :
"""Handles a drop event ."""
|
url = event . mimeData ( ) . urls ( ) [ 0 ]
url_path = nativestring ( url . toString ( ) )
# download an icon from the web
if ( not url_path . startswith ( 'file:' ) ) :
filename = os . path . basename ( url_path )
temp_path = os . path . join ( nativestring ( QDir . tempPath ( ) ) , filename )
try :
urllib . urlretrieve ( url_path , temp_path )
except IOError :
return
self . setFilepath ( temp_path )
else :
self . setFilepath ( url_path . replace ( 'file://' , '' ) )
|
def _create_deployment_object ( self , job_name , job_image , deployment_name , port = 80 , replicas = 1 , cmd_string = None , engine_json_file = '~/.ipython/profile_default/security/ipcontroller-engine.json' , engine_dir = '.' ) :
"""Create a kubernetes deployment for the job .
Args :
- job _ name ( string ) : Name of the job and deployment
- job _ image ( string ) : Docker image to launch
KWargs :
- port ( integer ) : Container port
- replicas : Number of replica containers to maintain
Returns :
- True : The deployment object to launch"""
|
# sorry , quick hack that doesn ' t pass this stuff through to test it works .
# TODO it also doesn ' t only add what is set : (
security_context = None
if 'security' in self . config [ 'execution' ] :
security_context = client . V1SecurityContext ( run_as_group = self . group_id , run_as_user = self . user_id , run_as_non_root = self . run_as_non_root )
# self . user _ id = None
# self . group _ id = None
# self . run _ as _ non _ root = None
# Create the enviornment variables and command to initiate IPP
environment_vars = client . V1EnvVar ( name = "TEST" , value = "SOME DATA" )
launch_args = [ "-c" , "{0}; /app/deploy.sh;" . format ( cmd_string ) ]
print ( launch_args )
# Configureate Pod template container
container = None
if security_context :
container = client . V1Container ( name = job_name , image = job_image , ports = [ client . V1ContainerPort ( container_port = port ) ] , command = [ '/bin/bash' ] , args = launch_args , env = [ environment_vars ] , security_context = security_context )
else :
container = client . V1Container ( name = job_name , image = job_image , ports = [ client . V1ContainerPort ( container_port = port ) ] , command = [ '/bin/bash' ] , args = launch_args , env = [ environment_vars ] )
# Create a secret to enable pulling images from secure repositories
secret = None
if self . secret :
secret = client . V1LocalObjectReference ( name = self . secret )
# Create and configurate a spec section
template = client . V1PodTemplateSpec ( metadata = client . V1ObjectMeta ( labels = { "app" : job_name } ) , spec = client . V1PodSpec ( containers = [ container ] , image_pull_secrets = [ secret ] ) )
# Create the specification of deployment
spec = client . ExtensionsV1beta1DeploymentSpec ( replicas = replicas , template = template )
# Instantiate the deployment object
deployment = client . ExtensionsV1beta1Deployment ( api_version = "extensions/v1beta1" , kind = "Deployment" , metadata = client . V1ObjectMeta ( name = deployment_name ) , spec = spec )
return deployment
|
def get_access_control_function ( ) :
"""Return a predicate for determining if a user can
access the Rosetta views"""
|
fn_path = getattr ( settings , 'ROSETTA_ACCESS_CONTROL_FUNCTION' , None )
if fn_path is None :
return is_superuser_staff_or_in_translators_group
# Dynamically load a permissions function
perm_module , perm_func = fn_path . rsplit ( '.' , 1 )
perm_module = importlib . import_module ( perm_module )
return getattr ( perm_module , perm_func )
|
def _fetch_option ( cfg , ret_config , virtualname , attr_name ) :
"""Fetch a given option value from the config .
@ see : func : ` get _ returner _ options `"""
|
# c _ cfg is a dictionary returned from config . option for
# any options configured for this returner .
if isinstance ( cfg , dict ) :
c_cfg = cfg
else :
c_cfg = cfg ( '{0}' . format ( virtualname ) , { } )
default_cfg_key = '{0}.{1}' . format ( virtualname , attr_name )
if not ret_config : # Using the default configuration key
if isinstance ( cfg , dict ) :
if default_cfg_key in cfg :
return cfg [ default_cfg_key ]
else :
return c_cfg . get ( attr_name )
else :
return c_cfg . get ( attr_name , cfg ( default_cfg_key ) )
# Using ret _ config to override the default configuration key
ret_cfg = cfg ( '{0}.{1}' . format ( ret_config , virtualname ) , { } )
override_default_cfg_key = '{0}.{1}.{2}' . format ( ret_config , virtualname , attr_name , )
override_cfg_default = cfg ( override_default_cfg_key )
# Look for the configuration item in the override location
ret_override_cfg = ret_cfg . get ( attr_name , override_cfg_default )
if ret_override_cfg :
return ret_override_cfg
# if not configuration item found , fall back to the default location .
return c_cfg . get ( attr_name , cfg ( default_cfg_key ) )
|
def _native_size ( self ) :
"""A ( width , height ) 2 - tuple representing the native dimensions of the
image in EMU , calculated based on the image DPI value , if present ,
assuming 72 dpi as a default ."""
|
EMU_PER_INCH = 914400
horz_dpi , vert_dpi = self . _dpi
width_px , height_px = self . _px_size
width = EMU_PER_INCH * width_px / horz_dpi
height = EMU_PER_INCH * height_px / vert_dpi
return width , height
|
def add_exec_permission_to ( target_file ) :
"""Add executable permissions to the file
: param target _ file : the target file whose permission to be changed"""
|
mode = os . stat ( target_file ) . st_mode
os . chmod ( target_file , mode | stat . S_IXUSR )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'pdf' ) and self . pdf is not None :
_dict [ 'pdf' ] = self . pdf . _to_dict ( )
if hasattr ( self , 'word' ) and self . word is not None :
_dict [ 'word' ] = self . word . _to_dict ( )
if hasattr ( self , 'html' ) and self . html is not None :
_dict [ 'html' ] = self . html . _to_dict ( )
if hasattr ( self , 'segment' ) and self . segment is not None :
_dict [ 'segment' ] = self . segment . _to_dict ( )
if hasattr ( self , 'json_normalizations' ) and self . json_normalizations is not None :
_dict [ 'json_normalizations' ] = [ x . _to_dict ( ) for x in self . json_normalizations ]
return _dict
|
def merge ( blocks ) :
"""Merge the given blocks into a contiguous block of compressed data .
: param blocks : the list of blocks
: rtype : a list of compressed bytes"""
|
current_block = blocks [ sorted ( blocks . keys ( ) ) [ 0 ] ]
compressed_data = [ ]
eof = False
while not eof :
data_size_to_append = None
next_block = None
i = 0
while i < len ( current_block . data ) - 1 :
current_byte = current_block . data [ i ]
next_byte = current_block . data [ i + 1 ]
if current_byte == RLE_BYTE :
if next_byte == RLE_BYTE :
i += 2
else :
i += 3
elif current_byte == SPECIAL_BYTE :
if next_byte in SPECIAL_DEFAULTS :
i += 3
elif next_byte == SPECIAL_BYTE :
i += 2
else :
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE :
eof = True
else :
next_block = blocks [ next_byte ]
break
else :
i += 1
assert data_size_to_append is not None , "Ran off the end of a " "block without encountering a block switch or EOF"
compressed_data . extend ( current_block . data [ 0 : data_size_to_append ] )
if not eof :
assert next_block is not None , "Switched blocks, but did " "not provide the next block to switch to"
current_block = next_block
return compressed_data
|
def reboot ( name , call = None ) :
'''reboot a server by name
: param name : name given to the machine
: param call : call value in this case is ' action '
: return : true if successful
CLI Example :
. . code - block : : bash
salt - cloud - a reboot vm _ name'''
|
conn = get_conn ( )
node = get_node ( conn , name )
conn . modify_server_status ( server_id = node [ 'id' ] , action = 'REBOOT' )
return True
|
def transform_kwargs ( self , ** kwargs ) :
"""Transforms Python style kwargs into git command line options ."""
|
args = list ( )
for k , v in kwargs . items ( ) :
if len ( k ) == 1 :
if v is True :
args . append ( "-%s" % k )
elif type ( v ) is not bool :
args . append ( "-%s%s" % ( k , v ) )
else :
if v is True :
args . append ( "--%s" % dashify ( k ) )
elif type ( v ) is not bool :
args . append ( "--%s=%s" % ( dashify ( k ) , v ) )
return args
|
def options ( self , request , * args , ** kwargs ) :
"""Handles responding to requests for the OPTIONS HTTP verb ."""
|
response = Response ( )
response . headers [ 'Allow' ] = ', ' . join ( self . allowed_methods )
response . headers [ 'Content-Length' ] = '0'
return response
|
def write ( path ) :
"""Writes the current process id to the given pidfile .
: type path : str
: param path : The name of the pidfile ."""
|
pid = os . getpid ( )
logging . info ( "Writing PID %s to '%s'" , pid , path )
try :
pidfile = open ( path , 'wb' )
# get a non - blocking exclusive lock
fcntl . flock ( pidfile . fileno ( ) , fcntl . LOCK_EX | fcntl . LOCK_NB )
# clear out the file
pidfile . seek ( 0 )
pidfile . truncate ( 0 )
# write the pid
pidfile . write ( str ( pid ) )
finally :
try :
pidfile . close ( )
except :
pass
|
def compute_path ( self , start_x , start_y , dest_x , dest_y , diagonal_cost = _math . sqrt ( 2 ) ) :
"""Get the shortest path between two points .
Args :
start _ x ( int ) : Starting x - position .
start _ y ( int ) : Starting y - position .
dest _ x ( int ) : Destination x - position .
dest _ y ( int ) : Destination y - position .
diagonal _ cost ( float ) : Multiplier for diagonal movement .
Can be set to zero to disable diagonal movement entirely .
Returns :
List [ Tuple [ int , int ] ] : The shortest list of points to the
destination position from the starting position .
The start point is not included in this list ."""
|
return tcod . path . AStar ( self , diagonal_cost ) . get_path ( start_x , start_y , dest_x , dest_y )
|
def beautify_date ( inasafe_time , feature , parent ) :
"""Given an InaSAFE analysis time , it will convert it to a date with
year - month - date format .
For instance :
* beautify _ date ( @ start _ datetime ) - > will convert datetime provided by
qgis _ variable ."""
|
_ = feature , parent
# NOQA
datetime_object = parse ( inasafe_time )
date = datetime_object . strftime ( '%Y-%m-%d' )
return date
|
def get_module_defined_tf_var ( terraform_version_opts , env_name ) :
"""Return version of Terraform requested in module options ."""
|
if isinstance ( terraform_version_opts , six . string_types ) :
return terraform_version_opts
if terraform_version_opts . get ( env_name ) :
return terraform_version_opts . get ( env_name )
if terraform_version_opts . get ( '*' ) :
return terraform_version_opts . get ( '*' )
return None
|
def update_credit_note ( self , credit_note_id , credit_note_dict ) :
"""Updates a credit note
: param credit _ note _ id : the credit note id
: param credit _ note _ dict : dict
: return : dict"""
|
return self . _create_put_request ( resource = CREDIT_NOTES , billomat_id = credit_note_id , send_data = credit_note_dict )
|
def post ( self , value , addend , unit ) :
"""A date adder endpoint ."""
|
value = value or dt . datetime . utcnow ( )
if unit == "minutes" :
delta = dt . timedelta ( minutes = addend )
else :
delta = dt . timedelta ( days = addend )
result = value + delta
return { "result" : result . isoformat ( ) }
|
def choices_from_enum ( source : Enum ) -> Tuple [ Tuple [ Any , str ] , ... ] :
"""Makes tuple to use in Django ' s Fields ` ` choices ` ` attribute .
Enum members names will be titles for the choices .
: param source : Enum to process .
: return : Tuple to put into ` ` choices ` `"""
|
result = tuple ( ( s . value , s . name . title ( ) ) for s in source )
return result
|
def enforce_required_fields ( self , attrs ) :
"""The ` UniqueTogetherValidator ` always forces an implied ' required '
state on the fields it applies to ."""
|
if self . instance is not None :
return
missing = { field_name : self . missing_message for field_name in self . fields if field_name not in attrs }
if missing :
raise ValidationError ( missing )
|
def do_help ( self , arg ) :
"""h ( elp )
Without argument , print the list of available commands .
With a command name as argument , print help about that command .
" help pdb " shows the full pdb documentation .
" help exec " gives help on the ! command ."""
|
if not arg :
return cmd . Cmd . do_help ( self , arg )
try :
try :
topic = getattr ( self , 'help_' + arg )
return topic ( )
except AttributeError :
command = getattr ( self , 'do_' + arg )
except AttributeError :
self . error ( 'No help for %r' % arg )
else :
if sys . flags . optimize >= 2 :
self . error ( 'No help for %r; please do not run Python with -OO ' 'if you need command help' % arg )
return
self . message ( command . __doc__ . rstrip ( ) )
|
def _validate_params ( self , req ) :
'''Validate parameters of a jsonrpc - request .
req - request as a jsonrpc - dict
raises SLOJSONRPCError on validation error'''
|
# does the method exist ?
method = req [ 'method' ]
if not method in self . _methods :
raise SLOJSONRPCError ( - 32601 )
fct = self . _methods [ method ] [ 'fct' ]
# ' id ' is only needed for none SLOJSONRPCNotification ' s
try :
getattr ( fct , '__SLOJSONRPCNotification__' )
if 'id' in req :
logging . debug ( 'JSONRPC: Fmt Error: no id for SLOJSONRPCNotifications' )
raise SLOJSONRPCError ( - 32602 )
except AttributeError :
if not 'id' in req :
logging . debug ( 'JSONRPC: Fmt Error: Need an id for non SLOJSONRPCNotifications' )
raise SLOJSONRPCError ( - 32602 )
# get arguments and defaults for the python - function representing
# the method
argspec = self . _methods [ method ] [ 'argspec' ]
args , defaults = list ( argspec . args ) , list ( argspec . defaults if argspec . defaults else [ ] )
# ignore self and session
if 'self' in args :
args . remove ( 'self' )
args . remove ( 'session' )
# create required arguments . delete the ones with defaults
required = list ( args )
if defaults :
for default in defaults :
required . pop ( )
# check if we need paremeters and there are none , then error
if len ( required ) > 0 and 'params' not in req :
logging . debug ( 'JSONRPC: Parameter Error: More than zero params required' )
raise SLOJSONRPCError ( - 32602 )
if 'params' in req : # parameters must be a dict if there is more then one
if not isinstance ( req [ 'params' ] , dict ) and len ( required ) > 1 :
logging . debug ( 'JSONRPC: Parameter Error: "params" must be a dictionary' )
raise SLOJSONRPCError ( - 32602 )
if isinstance ( req [ 'params' ] , dict ) : # check if required parameters are there
for key in required :
if not key in req [ 'params' ] :
logging . debug ( 'JSONRPC: Parameter Error: Required key "%s" is missing' % key )
raise SLOJSONRPCError ( - 32602 )
# check if parameters are given that do not exist in the method
for key in req [ 'params' ] :
if not key in required :
logging . debug ( 'JSONRPC: Parameter Error: Key is not allowed "%s"' % key )
raise SLOJSONRPCError ( - 32602 )
|
def boto_client ( self , service , * args , ** kwargs ) :
"""A wrapper to apply configuration options to boto clients"""
|
return self . boto_session . client ( service , * args , ** self . configure_boto_session_method_kwargs ( service , kwargs ) )
|
def timesteps ( self ) :
"""Time - step at which this intervention occurs , starting from 0 , the first intervention - period time - step .
https : / / github . com / SwissTPH / openmalaria / wiki / GeneratedSchema32Doc # - deploy - 1
rtype : list"""
|
timesteps = [ ]
timed = self . et . find ( "timed" )
if timed is not None :
for deploy in timed . findall ( "deploy" ) :
timesteps . append ( deploy . attrib [ "time" ] )
return timesteps
|
def _read_config ( self ) :
"""Read the configuration file ."""
|
config = configparser . ConfigParser ( )
config . read ( self . path )
if config . has_section ( 'distutils' ) :
server_names = config . get ( 'distutils' , 'index-servers' )
servers = [ name . strip ( ) for name in server_names . split ( '\n' ) ]
servers = [ server for server in servers if server ]
for server in servers :
repo_config = RepositoryConfig ( server )
repo_config . fill ( config , server )
self . repositories . append ( repo_config )
repo_config = RepositoryConfig ( 'default' )
repo_config . fill ( config , 'server-login' )
|
def exit_actor ( ) :
"""Intentionally exit the current actor .
This function is used to disconnect an actor and exit the worker .
Raises :
Exception : An exception is raised if this is a driver or this
worker is not an actor ."""
|
worker = ray . worker . global_worker
if worker . mode == ray . WORKER_MODE and not worker . actor_id . is_nil ( ) : # Disconnect the worker from the raylet . The point of
# this is so that when the worker kills itself below , the
# raylet won ' t push an error message to the driver .
worker . raylet_client . disconnect ( )
ray . disconnect ( )
# Disconnect global state from GCS .
ray . global_state . disconnect ( )
sys . exit ( 0 )
assert False , "This process should have terminated."
else :
raise Exception ( "exit_actor called on a non-actor worker." )
|
def keyphrases_table ( keyphrases , texts , similarity_measure = None , synonimizer = None , language = consts . Language . ENGLISH ) :
"""Constructs the keyphrases table , containing their matching scores in a set of texts .
The resulting table is stored as a dictionary of dictionaries ,
where the entry table [ " keyphrase " ] [ " text " ] corresponds
to the matching score ( 0 < = score < = 1 ) of keyphrase " keyphrase "
in the text named " text " .
: param keyphrases : list of strings
: param texts : dictionary of form { text _ name : text }
: param similarity _ measure : similarity measure to use
: param synonimizer : SynonymExtractor object to be used
: param language : Language of the text collection / keyphrases
: returns : dictionary of dictionaries , having keyphrases on its first level and texts
on the second level ."""
|
similarity_measure = similarity_measure or relevance . ASTRelevanceMeasure ( )
text_titles = texts . keys ( )
text_collection = texts . values ( )
similarity_measure . set_text_collection ( text_collection , language )
i = 0
keyphrases_prepared = { keyphrase : utils . prepare_text ( keyphrase ) for keyphrase in keyphrases }
total_keyphrases = len ( keyphrases )
total_scores = len ( text_collection ) * total_keyphrases
res = { }
for keyphrase in keyphrases :
if not keyphrase :
continue
res [ keyphrase ] = { }
for j in xrange ( len ( text_collection ) ) :
i += 1
logging . progress ( "Calculating matching scores" , i , total_scores )
res [ keyphrase ] [ text_titles [ j ] ] = similarity_measure . relevance ( keyphrases_prepared [ keyphrase ] , text = j , synonimizer = synonimizer )
logging . clear ( )
return res
|
def init_controller ( url ) :
"""Initialize a controller .
Provides a single global controller for applications that can ' t do this
themselves"""
|
# pylint : disable = global - statement
global _VERA_CONTROLLER
created = False
if _VERA_CONTROLLER is None :
_VERA_CONTROLLER = VeraController ( url )
created = True
_VERA_CONTROLLER . start ( )
return [ _VERA_CONTROLLER , created ]
|
def add_source ( self , url , * , note = '' ) :
"""Add a source URL from which data was collected"""
|
new = { 'url' : url , 'note' : note }
self . sources . append ( new )
|
def output ( self ) :
"""Rank 3 array representing output time series . Axis 0 is time ,
axis 1 ranges across output variables of a single simulation ,
axis 2 ranges across different simulation instances ."""
|
subts = [ s . output for s in self . sims ]
sub_ndim = subts [ 0 ] . ndim
if sub_ndim is 1 :
subts = [ distob . expand_dims ( ts , 1 ) for ts in subts ]
sub_ndim += 1
nodeaxis = sub_ndim
subts = [ distob . expand_dims ( ts , nodeaxis ) for ts in subts ]
ts = subts [ 0 ] . concatenate ( subts [ 1 : ] , axis = nodeaxis )
ts . labels [ nodeaxis ] = self . _node_labels ( )
return ts
|
def _compare_list ( new_list , old_list , change_list = None , root = None ) :
'''a method for recursively listing changes made to a list
: param new _ list : list with new value
: param old _ list : list with old values
: param change _ list : list of differences between old and new
: param root : string with record of path to the root of the main object
: return : list of differences between old and new'''
|
from copy import deepcopy
if len ( old_list ) > len ( new_list ) :
same_len = len ( new_list )
for i in reversed ( range ( len ( new_list ) , len ( old_list ) ) ) :
new_path = deepcopy ( root )
new_path . append ( i )
change_list . append ( { 'action' : 'REMOVE' , 'value' : None , 'path' : new_path } )
elif len ( new_list ) > len ( old_list ) :
same_len = len ( old_list )
append_list = [ ]
path = deepcopy ( root )
for i in range ( len ( old_list ) , len ( new_list ) ) :
append_list . append ( new_list [ i ] )
change_list . append ( { 'action' : 'APPEND' , 'value' : append_list , 'path' : path } )
else :
same_len = len ( new_list )
for i in range ( 0 , same_len ) :
new_path = deepcopy ( root )
new_path . append ( i )
if new_list [ i ] . __class__ != old_list [ i ] . __class__ :
change_list . append ( { 'action' : 'UPDATE' , 'value' : new_list [ i ] , 'path' : new_path } )
elif isinstance ( new_list [ i ] , dict ) :
_compare_dict ( new_list [ i ] , old_list [ i ] , change_list , new_path )
elif isinstance ( new_list [ i ] , list ) :
_compare_list ( new_list [ i ] , old_list [ i ] , change_list , new_path )
elif isinstance ( new_list [ i ] , set ) :
_compare_set ( new_list [ i ] , old_list [ i ] , change_list , new_path )
elif new_list [ i ] != old_list [ i ] :
change_list . append ( { 'action' : 'UPDATE' , 'value' : new_list [ i ] , 'path' : new_path } )
return change_list
|
def save_parsed_data_to_csv ( self , output_filename = 'output.csv' ) :
"""Outputs a csv file in accordance with parse _ rectlabel _ app _ output method . This csv file is meant to accompany a set of pictures files
in the creation of an Object Detection dataset .
: param output _ filename string , default makes sense , but for your convenience ."""
|
result = self . parse_rectlabel_app_output ( )
ff = open ( output_filename , 'w' , encoding = 'utf8' )
for line in result :
ff . write ( line + '\n' )
ff . close ( )
|
def _load_dataframe ( self , resource_name ) :
"""Build pandas . DataFrame from resource data
Lazy load any optional dependencies in order to allow users to
use package without installing pandas if so they wish .
: param resource _ name :"""
|
try :
import pandas
except ImportError :
raise RuntimeError ( 'To enable dataframe support, ' 'run \'pip install datadotworld[pandas]\'' )
tabular_resource = self . __tabular_resources [ resource_name ]
field_dtypes = fields_to_dtypes ( tabular_resource . descriptor [ 'schema' ] )
try :
return pandas . read_csv ( path . join ( self . __base_path , tabular_resource . descriptor [ 'path' ] ) , dtype = field_dtypes [ 'other' ] , parse_dates = list ( field_dtypes [ 'dates' ] . keys ( ) ) , infer_datetime_format = True )
except ValueError as e :
warnings . warn ( 'Unable to set data frame dtypes automatically using {} ' 'schema. Data types may need to be adjusted manually. ' 'Error: {}' . format ( resource_name , e ) )
return pandas . read_csv ( path . join ( self . __base_path , tabular_resource . descriptor [ 'path' ] ) )
|
def use_http_form_post ( message , destination , relay_state , typ = "SAMLRequest" ) :
"""Return a form that will automagically execute and POST the message
to the recipient .
: param message :
: param destination :
: param relay _ state :
: param typ : Whether a Request , Response or Artifact
: return : dictionary"""
|
if not isinstance ( message , six . string_types ) :
message = "%s" % ( message , )
return http_form_post_message ( message , destination , relay_state , typ )
|
def drag ( start_x , start_y , end_x , end_y , absolute = True , duration = 0 ) :
"""Holds the left mouse button , moving from start to end position , then
releases . ` absolute ` and ` duration ` are parameters regarding the mouse
movement ."""
|
if is_pressed ( ) :
release ( )
move ( start_x , start_y , absolute , 0 )
press ( )
move ( end_x , end_y , absolute , duration )
release ( )
|
def dump_dict_of_nested_lists_to_h5 ( fname , data ) :
"""Take nested list structure and dump it in hdf5 file .
Parameters
fname : str
Filename
data : dict ( list ( numpy . ndarray ) )
Dict of nested lists with variable len arrays .
Returns
None"""
|
# Open file
print ( 'writing to file: %s' % fname )
f = h5py . File ( fname )
# Iterate over values
for i , ivalue in list ( data . items ( ) ) :
igrp = f . create_group ( str ( i ) )
for j , jvalue in enumerate ( ivalue ) :
jgrp = igrp . create_group ( str ( j ) )
for k , kvalue in enumerate ( jvalue ) :
if kvalue . size > 0 :
dset = jgrp . create_dataset ( str ( k ) , data = kvalue , compression = 'gzip' )
else :
dset = jgrp . create_dataset ( str ( k ) , data = kvalue , maxshape = ( None , ) , compression = 'gzip' )
# Close file
f . close ( )
|
def calculate_row_format ( columns , keys = None ) :
"""Calculate row format .
Args :
columns ( dict ) : the keys are the column name and the value the max length .
keys ( list ) : optional list of keys to order columns as well as to filter for them .
Returns :
str : format for table row"""
|
row_format = ''
if keys is None :
keys = columns . keys ( )
else :
keys = [ key for key in keys if key in columns ]
for key in keys :
if len ( row_format ) > 0 :
row_format += "|"
row_format += "%%(%s)-%ds" % ( key , columns [ key ] )
return '|' + row_format + '|'
|
def parse_keqv_list ( l ) :
"""Parse list of key = value strings where keys are not duplicated ."""
|
parsed = { }
for elt in l :
k , v = elt . split ( '=' , 1 )
if v [ 0 ] == '"' and v [ - 1 ] == '"' :
v = v [ 1 : - 1 ]
parsed [ k ] = v
return parsed
|
def write_how_many ( self , file ) :
"""Writes component numbers to a table ."""
|
report = CaseReport ( self . case )
# Map component labels to attribute names
components = [ ( "Bus" , "n_buses" ) , ( "Generator" , "n_generators" ) , ( "Committed Generator" , "n_online_generators" ) , ( "Load" , "n_loads" ) , ( "Fixed Load" , "n_fixed_loads" ) , ( "Despatchable Load" , "n_online_vloads" ) , ( "Shunt" , "n_shunts" ) , ( "Branch" , "n_branches" ) , ( "Transformer" , "n_transformers" ) , ( "Inter-tie" , "n_interties" ) , ( "Area" , "n_areas" ) ]
# Column 1 width
longest = max ( [ len ( c [ 0 ] ) for c in components ] )
col1_header = "Object"
col1_width = longest
col2_header = "Quantity"
col2_width = len ( col2_header )
# Row separator
sep = "=" * col1_width + " " + "=" * col2_width + "\n"
# Row headers
file . write ( sep )
file . write ( col1_header . center ( col1_width ) )
file . write ( " " )
file . write ( "%s\n" % col2_header . center ( col2_width ) )
file . write ( sep )
# Rows
for label , attr in components :
col2_value = str ( getattr ( report , attr ) )
file . write ( "%s %s\n" % ( label . ljust ( col1_width ) , col2_value . rjust ( col2_width ) ) )
else :
file . write ( sep )
file . write ( "\n" )
del report
|
def create_job ( db , datadir ) :
"""Create job for the given user , return it .
: param db :
a : class : ` openquake . server . dbapi . Db ` instance
: param datadir :
Data directory of the user who owns / started this job .
: returns :
the job ID"""
|
calc_id = get_calc_id ( db , datadir ) + 1
job = dict ( id = calc_id , is_running = 1 , description = 'just created' , user_name = 'openquake' , calculation_mode = 'to be set' , ds_calc_dir = os . path . join ( '%s/calc_%s' % ( datadir , calc_id ) ) )
return db ( 'INSERT INTO job (?S) VALUES (?X)' , job . keys ( ) , job . values ( ) ) . lastrowid
|
def format_doc ( * args , ** kwargs ) :
"""Replaces the docstring of the decorated object and then formats it .
Modeled after astropy . utils . decorators . format _ doc"""
|
def set_docstring ( obj ) : # None means : use the objects _ _ doc _ _
doc = obj . __doc__
# Delete documentation in this case so we don ' t end up with
# awkwardly self - inserted docs .
obj . __doc__ = None
# If the original has a not - empty docstring append it to the format
# kwargs .
kwargs [ '__doc__' ] = obj . __doc__ or ''
obj . __doc__ = doc . format ( * args , ** kwargs )
return obj
return set_docstring
|
def from_df ( cls , path : PathOrStr , df : pd . DataFrame , folder : PathOrStr = None , label_delim : str = None , valid_pct : float = 0.2 , fn_col : IntsOrStrs = 0 , label_col : IntsOrStrs = 1 , suffix : str = '' , ** kwargs : Any ) -> 'ImageDataBunch' :
"Create from a ` DataFrame ` ` df ` ."
|
src = ( ImageList . from_df ( df , path = path , folder = folder , suffix = suffix , cols = fn_col ) . split_by_rand_pct ( valid_pct ) . label_from_df ( label_delim = label_delim , cols = label_col ) )
return cls . create_from_ll ( src , ** kwargs )
|
def QA_util_random_with_topic ( topic = 'Acc' , lens = 8 ) :
"""生成account随机值
Acc + 4数字id + 4位大小写随机"""
|
_list = [ chr ( i ) for i in range ( 65 , 91 ) ] + [ chr ( i ) for i in range ( 97 , 123 ) ] + [ str ( i ) for i in range ( 10 ) ]
num = random . sample ( _list , lens )
return '{}_{}' . format ( topic , '' . join ( num ) )
|
def update_hit_tally ( self ) :
'''Tally hits'''
|
if not self . quiet :
num_hits = self . amt_services_wrapper . tally_hits ( )
if self . sandbox :
self . sandbox_hits = num_hits
else :
self . live_hits = num_hits
|
def Readdir ( self , path , fh = None ) :
"""Updates the directory listing from the client .
Args :
path : The path to the directory to update . Client is inferred from this .
fh : A file handler . Not used .
Returns :
A list of filenames ."""
|
if self . DataRefreshRequired ( path ) :
self . _RunAndWaitForVFSFileUpdate ( path )
return super ( GRRFuse , self ) . Readdir ( path , fh = None )
|
def island_itergen ( catalog ) :
"""Iterate over a catalog of sources , and return an island worth of sources at a time .
Yields a list of components , one island at a time
Parameters
catalog : iterable
A list or iterable of : class : ` AegeanTools . models . OutputSource ` objects .
Yields
group : list
A list of all sources within an island , one island at a time ."""
|
# reverse sort so that we can pop the last elements and get an increasing island number
catalog = sorted ( catalog )
catalog . reverse ( )
group = [ ]
# using pop and keeping track of the list length ourselves is faster than
# constantly asking for len ( catalog )
src = catalog . pop ( )
c_len = len ( catalog )
isle_num = src . island
while c_len >= 0 :
if src . island == isle_num :
group . append ( src )
c_len -= 1
if c_len < 0 : # we have just added the last item from the catalog
# and there are no more to pop
yield group
else :
src = catalog . pop ( )
else :
isle_num += 1
# maybe there are no sources in this island so skip it
if group == [ ] :
continue
yield group
group = [ ]
return
|
def add_listener_policy ( self , json_data ) :
"""Attaches listerner policies to an ELB
Args :
json _ data ( json ) : return data from ELB upsert"""
|
env = boto3 . session . Session ( profile_name = self . env , region_name = self . region )
elbclient = env . client ( 'elb' )
# create stickiness policy if set in configs
stickiness = { }
elb_settings = self . properties [ 'elb' ]
if elb_settings . get ( 'ports' ) :
ports = elb_settings [ 'ports' ]
for listener in ports :
if listener . get ( "stickiness" ) :
stickiness = self . add_stickiness ( )
LOG . info ( 'Stickiness Found: %s' , stickiness )
break
# Attach policies to created ELB
for job in json . loads ( json_data ) [ 'job' ] :
for listener in job [ 'listeners' ] :
policies = [ ]
ext_port = listener [ 'externalPort' ]
if listener [ 'listenerPolicies' ] :
policies . extend ( listener [ 'listenerPolicies' ] )
if stickiness . get ( ext_port ) :
policies . append ( stickiness . get ( ext_port ) )
if policies :
LOG . info ( 'Adding listener policies: %s' , policies )
elbclient . set_load_balancer_policies_of_listener ( LoadBalancerName = self . app , LoadBalancerPort = ext_port , PolicyNames = policies )
|
def _sensoryComputeInferenceMode ( self , anchorInput ) :
"""Infer the location from sensory input . Activate any cells with enough active
synapses to this sensory input . Deactivate all other cells .
@ param anchorInput ( numpy array )
A sensory input . This will often come from a feature - location pair layer ."""
|
if len ( anchorInput ) == 0 :
return
overlaps = self . connections . computeActivity ( anchorInput , self . connectedPermanence )
activeSegments = np . where ( overlaps >= self . activationThreshold ) [ 0 ]
sensorySupportedCells = np . unique ( self . connections . mapSegmentsToCells ( activeSegments ) )
inactivated = np . setdiff1d ( self . activeCells , sensorySupportedCells )
inactivatedIndices = np . in1d ( self . cellsForActivePhases , inactivated ) . nonzero ( ) [ 0 ]
if inactivatedIndices . size > 0 :
self . activePhases = np . delete ( self . activePhases , inactivatedIndices , axis = 0 )
activated = np . setdiff1d ( sensorySupportedCells , self . activeCells )
# Find centers of point clouds
if "corners" in self . anchoringMethod :
activatedCoordsBase = np . transpose ( np . unravel_index ( sensorySupportedCells , self . cellDimensions ) ) . astype ( 'float' )
else :
activatedCoordsBase = np . transpose ( np . unravel_index ( activated , self . cellDimensions ) ) . astype ( 'float' )
# Generate points to add
activatedCoords = np . concatenate ( [ activatedCoordsBase + [ iOffset , jOffset ] for iOffset in self . cellCoordinateOffsets for jOffset in self . cellCoordinateOffsets ] )
if "corners" in self . anchoringMethod :
self . activePhases = activatedCoords / self . cellDimensions
else :
if activatedCoords . size > 0 :
self . activePhases = np . append ( self . activePhases , activatedCoords / self . cellDimensions , axis = 0 )
self . _computeActiveCells ( )
self . activeSegments = activeSegments
self . sensoryAssociatedCells = sensorySupportedCells
|
def dicom_diff ( file1 , file2 ) :
"""Shows the fields that differ between two DICOM images .
Inspired by https : / / code . google . com / p / pydicom / source / browse / source / dicom / examples / DicomDiff . py"""
|
datasets = compressed_dicom . read_file ( file1 ) , compressed_dicom . read_file ( file2 )
rep = [ ]
for dataset in datasets :
lines = ( str ( dataset . file_meta ) + "\n" + str ( dataset ) ) . split ( '\n' )
lines = [ line + '\n' for line in lines ]
# add the newline to the end
rep . append ( lines )
diff = difflib . Differ ( )
for line in diff . compare ( rep [ 0 ] , rep [ 1 ] ) :
if ( line [ 0 ] == '+' ) or ( line [ 0 ] == '-' ) :
sys . stdout . write ( line )
|
def print_graph ( self , format = None , output = sys . stdout , depth = 0 , ** kwargs ) :
"""Print the graph for self ' s nodes .
Args :
format ( str ) : output format ( csv , json or text ) .
output ( file ) : file descriptor on which to write .
depth ( int ) : depth of the graph ."""
|
graph = self . as_graph ( depth = depth )
graph . print ( format = format , output = output , ** kwargs )
|
def import_as ( module , name ) :
"""Imports the specified module ( from our local directory ) as the
specified name , returning the loaded module object ."""
|
dir = os . path . split ( __file__ ) [ 0 ]
return imp . load_module ( name , * imp . find_module ( module , [ dir ] ) )
|
def list2html ( lst ) :
"""convert a list to html using table formatting"""
|
txt = '<TABLE width=100% border=0>'
for l in lst :
txt += '<TR>\n'
if type ( l ) is str :
txt += '<TD>' + l + '</TD>\n'
elif type ( l ) is list :
txt += '<TD>'
for i in l :
txt += i + ', '
txt += '</TD>'
else :
txt += '<TD>' + str ( l ) + '</TD>\n'
txt += '</TR>\n'
txt += '</TABLE><BR>\n'
return txt
|
def flatFieldFromFit ( self ) :
'''calculate flatField from 2d - polynomal fit filling
all high gradient areas within averaged fit - image
returns flatField , average background level , fitted image , valid indices mask'''
|
fitimg , mask = self . _prepare ( )
out = fitimg . copy ( )
lastm = 0
for _ in range ( 10 ) :
out = polyfit2dGrid ( out , mask , 2 )
mask = highGrad ( out )
m = mask . sum ( )
if m == lastm :
break
lastm = m
out = np . clip ( out , 0.1 , 1 )
out = resize ( out , self . _orig_shape , mode = 'reflect' )
return out , self . bglevel / self . _n , fitimg , mask
|
def get_inventory ( self ) :
"""Retrieve inventory of system
Retrieve inventory of the targeted system . This frequently includes
serial numbers , sometimes hardware addresses , sometimes memory modules
This function will retrieve whatever the underlying platform provides
and apply some structure . Iterating over the return yields tuples
of a name for the inventoried item and dictionary of descriptions
or None for items not present ."""
|
self . oem_init ( )
yield ( "System" , self . _get_zero_fru ( ) )
self . init_sdr ( )
for fruid in sorted ( self . _sdr . fru ) :
fruinf = fru . FRU ( ipmicmd = self , fruid = fruid , sdr = self . _sdr . fru [ fruid ] ) . info
if fruinf is not None :
fruinf = self . _oem . process_fru ( fruinf , self . _sdr . fru [ fruid ] . fru_name )
yield ( self . _sdr . fru [ fruid ] . fru_name , fruinf )
for componentpair in self . _oem . get_oem_inventory ( ) :
yield componentpair
|
def __construct_really ( project , name , target_type , prop_set , sources ) :
"""Attempts to construct target by finding viable generators , running them
and selecting the dependency graph ."""
|
if __debug__ :
from . targets import ProjectTarget
assert isinstance ( project , ProjectTarget )
assert isinstance ( name , basestring ) or name is None
assert isinstance ( target_type , basestring )
assert isinstance ( prop_set , property_set . PropertySet )
assert is_iterable_typed ( sources , virtual_target . VirtualTarget )
viable_generators = find_viable_generators ( target_type , prop_set )
result = [ ]
dout ( " *** %d viable generators" % len ( viable_generators ) )
generators_that_succeeded = [ ]
for g in viable_generators :
__active_generators . append ( g )
r = try_one_generator ( project , name , g , target_type , prop_set , sources )
del __active_generators [ - 1 ]
if r :
generators_that_succeeded . append ( g )
if result :
output = cStringIO . StringIO ( )
print >> output , "ambiguity found when searching for best transformation"
print >> output , "Trying to produce type '%s' from: " % ( target_type )
for s in sources :
print >> output , " - " + s . str ( )
print >> output , "Generators that succeeded:"
for g in generators_that_succeeded :
print >> output , " - " + g . id ( )
print >> output , "First generator produced: "
for t in result [ 1 : ] :
print >> output , " - " + str ( t )
print >> output , "Second generator produced:"
for t in r [ 1 : ] :
print >> output , " - " + str ( t )
get_manager ( ) . errors ( ) ( output . getvalue ( ) )
else :
result = r ;
return result ;
|
def check_datetime ( method , dictionary , fields , label = None ) :
"""Checks if the specified fields are formatted correctly if they have a value .
Throws an exception on incorrectly formatted fields .
: param dict dictionary : Dictionary to check .
: param typle fields : Fields to check .
: param string label : Dictionary name ."""
|
improperly_formatted = [ ]
values = [ ]
for field in fields :
if field in dictionary and dictionary [ field ] is not None :
if type ( dictionary [ field ] ) not in ( datetime . datetime , datetime . date ) and not ISO_8601_REGEX . match ( dictionary [ field ] ) :
improperly_formatted . append ( field )
values . append ( dictionary [ field ] )
if improperly_formatted :
error_label = ' for "%s"' % label if label else ''
raise PyCronofyValidationError ( 'Method: %s. Improperly formatted datetime/date field(s)%s: %s\n%s' % ( method , error_label , improperly_formatted , values ) , method , improperly_formatted , values )
|
def allow_capability ( self , ctx , ops ) :
'''Checks that the user is allowed to perform all the
given operations . If not , a discharge error will be raised .
If allow _ capability succeeds , it returns a list of first party caveat
conditions that must be applied to any macaroon granting capability
to execute the operations . Those caveat conditions will not
include any declarations contained in login macaroons - the
caller must be careful not to mint a macaroon associated
with the LOGIN _ OP operation unless they add the expected
declaration caveat too - in general , clients should not create
capabilities that grant LOGIN _ OP rights .
The operations must include at least one non - LOGIN _ OP operation .'''
|
nops = 0
for op in ops :
if op != LOGIN_OP :
nops += 1
if nops == 0 :
raise ValueError ( 'no non-login operations required in capability' )
_ , used = self . _allow_any ( ctx , ops )
squasher = _CaveatSquasher ( )
for i , is_used in enumerate ( used ) :
if not is_used :
continue
for cond in self . _conditions [ i ] :
squasher . add ( cond )
return squasher . final ( )
|
def __setupViews ( self ) :
"""Creates the UI widgets ."""
|
self . _collector = Collector ( self . windowNumber )
self . configWidget = ConfigWidget ( self . _configTreeModel )
self . repoWidget = RepoWidget ( self . argosApplication . repo , self . collector )
# self . _ configTreeModel . insertItem ( self . repoWidget . repoTreeView . config ) # No configurable items yet
# Define a central widget that will be the parent of the inspector widget .
# We don ' t set the inspector directly as the central widget to retain the size when the
# inspector is changed .
widget = QtWidgets . QWidget ( )
layout = QtWidgets . QVBoxLayout ( widget )
layout . setContentsMargins ( CENTRAL_MARGIN , CENTRAL_MARGIN , CENTRAL_MARGIN , CENTRAL_MARGIN )
layout . setSpacing ( CENTRAL_SPACING )
self . setCentralWidget ( widget )
# Must be after setInspector since that already draws the inspector
self . collector . sigContentsChanged . connect ( self . collectorContentsChanged )
self . _configTreeModel . sigItemChanged . connect ( self . configContentsChanged )
|
def add_events ( self , ** kwargs ) :
"""Add failure event into the queue ."""
|
event_q = kwargs . get ( 'event_queue' )
pri = kwargs . get ( 'priority' )
if not event_q or not pri :
return
try :
event_type = 'server.failure.recovery'
payload = { }
timestamp = time . ctime ( )
data = ( event_type , payload )
event_q . put ( ( pri , timestamp , data ) )
LOG . debug ( 'Added failure recovery event to the queue.' )
except Exception as exc :
LOG . exception ( 'Error: %(exc)s for event %(event)s' , { 'exc' : str ( exc ) , 'event' : event_type } )
raise exc
|
def get_most_recent_release ( self , group , artifact , remote = False ) :
"""Get the version number of the most recent release ( non - integration version )
of a particular group and artifact combination .
: param str group : Group of the artifact to get the version of
: param str artifact : Name of the artifact to get the version of
: param bool remote : Should remote repositories be searched to find the latest
version ? Note this can make the request much slower . Default is false .
: return : Version number of the most recent release
: rtype : str
: raises requests . exceptions . HTTPError : For any non - success HTTP responses
from the Artifactory API ."""
|
url = self . _base_url + '/api/search/latestVersion'
params = { 'g' : group , 'a' : artifact , 'repos' : self . _repo , 'remote' : int ( remote ) }
self . _logger . debug ( "Using latest version API at %s - params %s" , url , params )
response = self . _session . get ( url , params = params )
response . raise_for_status ( )
return response . text . strip ( )
|
def install_global_objects ( self ) :
"""Process [ GLOBAL _ OBJECTS ] , and inject all object to uliweb module , so
user can import from uliweb"""
|
import uliweb
for k , v in settings . GLOBAL_OBJECTS . items ( ) :
setattr ( uliweb , k , import_attr ( v ) )
|
def add_colorbar ( self , ** kwargs ) :
"""Draw a colorbar"""
|
kwargs = kwargs . copy ( )
if self . _cmap_extend is not None :
kwargs . setdefault ( 'extend' , self . _cmap_extend )
if 'label' not in kwargs :
kwargs . setdefault ( 'label' , label_from_attrs ( self . data ) )
self . cbar = self . fig . colorbar ( self . _mappables [ - 1 ] , ax = list ( self . axes . flat ) , ** kwargs )
return self
|
def guest_unpause ( self , userid ) :
"""Unpause a virtual machine .
: param str userid : the id of the virtual machine to be unpaused
: returns : None"""
|
action = "unpause guest '%s'" % userid
with zvmutils . log_and_reraise_sdkbase_error ( action ) :
self . _vmops . guest_unpause ( userid )
|
def upgrade_cdh ( self , deploy_client_config = True , start_all_services = True , cdh_parcel_version = None , cdh_package_version = None , rolling_restart = False , slave_batch_size = None , sleep_seconds = None , slave_fail_count_threshold = None ) :
"""Perform CDH upgrade to the next major version . In v9 + , also supports
minor CDH 5 upgrades ( 5 . a . b to 5 . x . y where x > a ) and supports maintenance
release changes ( a . b . x to a . b . y ) .
If using packages , CDH packages on all hosts of the cluster must be
manually upgraded before this command is issued .
The command will upgrade the services and their configuration to the
requested version . All running services will be stopped before proceeding ,
unless rolling restart is requested and is available .
@ param deploy _ client _ config : Whether to deploy client configurations
after the upgrade . Default is True . Has no effect in v9 + ;
client configurations are always deployed .
@ param start _ all _ services : Whether to start all services after the upgrade .
Default is True . Has no effect in v9 + ; services are always
restarted .
@ param cdh _ parcel _ version : If upgrading to parcels , the full version of an
already distributed parcel for the next CDH version . Default
is None . Example versions are : ' 5.0.0-1 . cdh5.0.0 . p0.11 ' or
'5.0.2-1 . cdh5.0.2 . p0.32 ' .
@ param cdh _ package _ version : If upgrading to packages , the full version of an
already installed package for the next CDH version . Default
is None . Example versions are : ' 5.2.0 ' or ' 4.5.0 ' . Only available
since v9.
@ param rolling _ restart : If you ' d like to do a rolling restart , set this to
True . Default is False . Only available since v9.
@ param slave _ batch _ size : Controls the rolling restart slave batch size .
Only applicable when rolling _ restart is True .
@ param sleep _ seconds : Controls how many seconds to sleep betweein rolling
restart batches . Only applicable when rolling _ restart is True .
@ param slave _ fail _ count _ threshold : Controls how many slave restart failures
are tolerated in a rolling restart . Only applicable when
rolling _ restart is True .
@ return : Reference to the submitted command .
@ since : API v6 for major upgrades only , v9 for maintenance and CDH 5 minor
releases ."""
|
args = dict ( )
args [ 'deployClientConfig' ] = deploy_client_config
args [ 'startAllServices' ] = start_all_services
if cdh_parcel_version :
args [ 'cdhParcelVersion' ] = cdh_parcel_version
if cdh_package_version :
args [ 'cdhPackageVersion' ] = cdh_package_version
if rolling_restart :
args [ 'rollingRestartArgs' ] = { 'slaveBatchSize' : slave_batch_size , 'sleepSeconds' : sleep_seconds , 'slaveFailCountThreshold' : slave_fail_count_threshold }
return self . _cmd ( 'upgradeCdh' , data = args , api_version = 6 )
|
def split_cl_function ( cl_str ) :
"""Split an CL function into a return type , function name , parameters list and the body .
Args :
cl _ str ( str ) : the CL code to parse and plit into components
Returns :
tuple : string elements for the return type , function name , parameter list and the body"""
|
class Semantics :
def __init__ ( self ) :
self . _return_type = ''
self . _function_name = ''
self . _parameter_list = [ ]
self . _cl_body = ''
def result ( self , ast ) :
return self . _return_type , self . _function_name , self . _parameter_list , self . _cl_body
def address_space ( self , ast ) :
self . _return_type = ast . strip ( ) + ' '
return ast
def data_type ( self , ast ) :
self . _return_type += '' . join ( ast ) . strip ( )
return ast
def function_name ( self , ast ) :
self . _function_name = ast . strip ( )
return ast
def arglist ( self , ast ) :
if ast != '()' :
self . _parameter_list = ast
return ast
def body ( self , ast ) :
def join ( items ) :
result = ''
for item in items :
if isinstance ( item , str ) :
result += item
else :
result += join ( item )
return result
self . _cl_body = join ( ast ) . strip ( ) [ 1 : - 1 ]
return ast
return _split_cl_function_parser . parse ( cl_str , semantics = Semantics ( ) )
|
def _handle_sub_value ( self , sub_value , handler_method ) :
"""Generic method to handle value to Fn : : Sub key . We are interested in parsing the $ { } syntaxes inside
the string portion of the value .
: param sub _ value : Value of the Sub function
: param handler _ method : Method to be called on every occurrence of ` $ { LogicalId } ` structure within the string .
Implementation could resolve and replace this structure with whatever they seem fit
: return : Resolved value of the Sub dictionary"""
|
# Just handle known references within the string to be substituted and return the whole dictionary
# because that ' s the best we can do here .
if isinstance ( sub_value , string_types ) : # Ex : { Fn : : Sub : " some string " }
sub_value = self . _sub_all_refs ( sub_value , handler_method )
elif isinstance ( sub_value , list ) and len ( sub_value ) > 0 and isinstance ( sub_value [ 0 ] , string_types ) : # Ex : { Fn : : Sub : [ " some string " , { a : b } ] }
sub_value [ 0 ] = self . _sub_all_refs ( sub_value [ 0 ] , handler_method )
return sub_value
|
def make_shape ( self ) :
"""Make shape object"""
|
if self . region_type == 'ellipse' :
self . coord [ 2 : ] = [ x * 2 for x in self . coord [ 2 : ] ]
if len ( self . coord ) % 2 == 1 : # This checks if the angle is present .
self . coord [ - 1 ] /= 2
if self . region_type == 'box' :
x = ( self . coord [ 0 ] + self . coord [ 2 ] ) / 2
y = ( self . coord [ 1 ] + self . coord [ 3 ] ) / 2
w = u . Quantity ( self . coord [ 0 ] - self . coord [ 2 ] )
h = u . Quantity ( self . coord [ 1 ] - self . coord [ 3 ] )
self . coord = [ x , y , abs ( w ) , abs ( h ) ]
self . meta . pop ( 'coord' , None )
self . shape = Shape ( coordsys = self . coordsys , region_type = reg_mapping [ 'CRTF' ] [ self . region_type ] , coord = self . coord , meta = self . meta , composite = False , include = self . include )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.