signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def create_return_line_item ( cls , return_line_item , ** kwargs ) :
"""Create ReturnLineItem
Create a new ReturnLineItem
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ return _ line _ item ( return _ line _ item , async = True )
> > > result = thread . get ( )
: param async bool
: param ReturnLineItem return _ line _ item : Attributes of returnLineItem to create ( required )
: return : ReturnLineItem
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_return_line_item_with_http_info ( return_line_item , ** kwargs )
else :
( data ) = cls . _create_return_line_item_with_http_info ( return_line_item , ** kwargs )
return data
|
async def xgroup_set_id ( self , name : str , group : str , stream_id : str ) -> bool :
"""[ NOTICE ] Not officially released yet
: param name : name of the stream
: param group : name of the consumer group
: param stream _ id :
If we provide $ as we did , then only new messages arriving
in the stream from now on will be provided to the consumers in the group .
If we specify 0 instead the consumer group will consume all the messages
in the stream history to start with .
Of course , you can specify any other valid ID"""
|
return await self . execute_command ( 'XGROUP SETID' , name , group , stream_id )
|
def get ( self , queue_name , task_id ) :
"""Pops a specific task off the queue by identifier .
: param queue _ name : The name of the queue . Usually handled by the
` ` Gator ` ` instance .
: type queue _ name : string
: param task _ id : The identifier of the task .
: type task _ id : string
: returns : The data for the task .
: rtype : string"""
|
self . _only_watch_from ( queue_name )
job = self . conn . peek ( task_id )
if not job :
return
job . delete ( )
return job . body
|
def session ( request ) :
"""Get the information about the current session or modify the current session .
GET parameters :
html
turn on the HTML version of the API
POST parameters :
locale :
client ' s locale
time _ zone :
client ' s time zone
display _ width :
width of the client ' s display
display _ height
height of the client ' s display"""
|
if request . user . id is None : # Google Bot
return render_json ( request , { 'error' : _ ( 'There is no user available to create a session.' ) , 'error_type' : 'user_undefined' } , status = 400 , template = 'user_json.html' )
if request . method == 'GET' :
return render_json ( request , Session . objects . get_current_session ( ) , template = 'user_session.html' , help_text = session . __doc__ )
elif request . method == 'POST' :
current_session = Session . objects . get_current_session ( )
if current_session is None :
return HttpResponseBadRequest ( "there is no current session to modify" )
data = json_body ( request . body . decode ( "utf-8" ) )
locale = data . get ( 'locale' , None )
time_zone = data . get ( 'time_zone' , None )
display_width = data . get ( 'display_width' , None )
display_height = data . get ( 'display_height' , None )
if locale :
current_session . locale = locale
if time_zone :
current_session . time_zone = TimeZone . objects . from_content ( time_zone )
if display_width :
current_session . display_width = display_width
if display_height :
current_session . display_height = display_height
current_session . save ( )
return HttpResponse ( 'ok' , status = 202 )
else :
return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
|
def backward_delete_char ( self , e ) : # ( Rubout )
u"""Delete the character behind the cursor . A numeric argument means
to kill the characters instead of deleting them ."""
|
self . l_buffer . backward_delete_char ( self . argument_reset )
self . finalize ( )
|
def clear_if_finalized ( iteration : TransitionResult , channelidentifiers_to_channels : ChannelMap , ) -> TransitionResult [ MediatorTransferState ] :
"""Clear the mediator task if all the locks have been finalized .
A lock is considered finalized if it has been removed from the merkle tree
offchain , either because the transfer was unlocked or expired , or because the
channel was settled on chain and therefore the channel is removed ."""
|
state = cast ( MediatorTransferState , iteration . new_state )
if state is None :
return iteration
# Only clear the task if all channels have the lock cleared .
secrethash = state . secrethash
for pair in state . transfers_pair :
payer_channel = get_payer_channel ( channelidentifiers_to_channels , pair )
if payer_channel and channel . is_lock_pending ( payer_channel . partner_state , secrethash ) :
return iteration
payee_channel = get_payee_channel ( channelidentifiers_to_channels , pair )
if payee_channel and channel . is_lock_pending ( payee_channel . our_state , secrethash ) :
return iteration
if state . waiting_transfer :
waiting_transfer = state . waiting_transfer . transfer
waiting_channel_identifier = waiting_transfer . balance_proof . channel_identifier
waiting_channel = channelidentifiers_to_channels . get ( waiting_channel_identifier )
if waiting_channel and channel . is_lock_pending ( waiting_channel . partner_state , secrethash ) :
return iteration
return TransitionResult ( None , iteration . events )
|
def put_conf ( self , configuration , test = False ) :
"""Send the configuration to the satellite
HTTP request to the satellite ( POST / push _ configuration )
If test is True , store the configuration internally
: param configuration : The conf to send ( data depend on the satellite )
: type configuration :
: return : None"""
|
logger . debug ( "Sending configuration to %s, %s %s" , self . name , self . alive , self . reachable )
if test :
setattr ( self , 'unit_test_pushed_configuration' , configuration )
# print ( " * * * unit tests - sent configuration % s : % s " % ( self . name , configuration ) )
return True
return self . con . post ( '_push_configuration' , { 'conf' : configuration } , wait = True )
|
def has_export_permission ( self , request ) :
"""Returns whether a request has export permission ."""
|
EXPORT_PERMISSION_CODE = getattr ( settings , 'IMPORT_EXPORT_EXPORT_PERMISSION_CODE' , None )
if EXPORT_PERMISSION_CODE is None :
return True
opts = self . opts
codename = get_permission_codename ( EXPORT_PERMISSION_CODE , opts )
return request . user . has_perm ( "%s.%s" % ( opts . app_label , codename ) )
|
def customize ( func ) :
"""Decorator to set plotting context and axes style during function call ."""
|
@ wraps ( func )
def call_w_context ( * args , ** kwargs ) :
set_context = kwargs . pop ( 'set_context' , True )
if set_context :
color_palette = sns . color_palette ( 'colorblind' )
with plotting_context ( ) , axes_style ( ) , color_palette :
sns . despine ( left = True )
return func ( * args , ** kwargs )
else :
return func ( * args , ** kwargs )
return call_w_context
|
def _map_arguments ( self , args ) :
"""Map from the top - level arguments to the arguments provided to
the indiviudal links"""
|
comp_file = args . get ( 'comp' , None )
datafile = args . get ( 'data' , None )
do_ltsum = args . get ( 'do_ltsum' , False )
NAME_FACTORY . update_base_dict ( datafile )
outdir_base = os . path . join ( NAME_FACTORY . base_dict [ 'basedir' ] , 'counts_cubes' )
num_files = args . get ( 'nfiles' , 96 )
self . comp_dict = yaml . safe_load ( open ( comp_file ) )
coordsys = self . comp_dict . pop ( 'coordsys' )
for key_e , comp_e in sorted ( self . comp_dict . items ( ) ) :
if 'mktimefilters' in comp_e :
mktimelist = comp_e [ 'mktimefilters' ]
else :
mktimelist = [ 'none' ]
if 'evtclasses' in comp_e :
evtclasslist_vals = comp_e [ 'evtclasses' ]
else :
evtclasslist_vals = [ NAME_FACTORY . base_dict [ 'evclass' ] ]
for mktimekey in mktimelist :
zcut = "zmax%i" % comp_e [ 'zmax' ]
kwargs_mktime = dict ( zcut = zcut , ebin = key_e , psftype = 'ALL' , coordsys = coordsys , mktime = mktimekey )
if do_ltsum :
ltsum_listfile = 'ltsumlist_%s_%s' % ( key_e , mktimekey )
ltsum_outfile = 'ltsum_%s_%s' % ( key_e , mktimekey )
linkname = 'ltsum_%s_%s' % ( key_e , mktimekey )
self . _set_link ( likname , Gtlink_ltsum , infile1 = ltsum_listfile , infile2 = None , outfile = ltsum_outfile , logfile = os . path . join ( outdir_base , "%s.log" % linkname ) )
for evtclassval in evtclasslist_vals :
for psf_type in sorted ( comp_e [ 'psf_types' ] . keys ( ) ) :
fullkey = "%s_%s_%s_%s" % ( key_e , mktimekey , evtclassval , psf_type )
linkname = 'coadd_%s' % ( fullkey )
kwargs_bin = kwargs_mktime . copy ( )
kwargs_bin [ 'psftype' ] = psf_type
kwargs_bin [ 'evclass' ] = evtclassval
ccube_name = os . path . basename ( NAME_FACTORY . ccube ( ** kwargs_bin ) )
outputfile = os . path . join ( outdir_base , ccube_name )
args = _make_input_file_list ( outputfile , num_files )
self . _set_link ( linkname , Link_FermipyCoadd , args = args , output = outputfile , logfile = os . path . join ( outdir_base , "%s.log" % linkname ) )
|
def quantile_gaussianize ( x ) :
"""Normalize a sequence of values via rank and Normal c . d . f .
Args :
x ( array _ like ) : sequence of values .
Returns :
Gaussian - normalized values .
Example :
. . doctest : :
> > > from scipy _ sugar . stats import quantile _ gaussianize
> > > print ( quantile _ gaussianize ( [ - 1 , 0 , 2 ] ) )
[ - 0.67448975 0 . 0.67448975]"""
|
from scipy . stats import norm , rankdata
x = asarray ( x , float ) . copy ( )
ok = isfinite ( x )
x [ ok ] *= - 1
y = empty_like ( x )
y [ ok ] = rankdata ( x [ ok ] )
y [ ok ] = norm . isf ( y [ ok ] / ( sum ( ok ) + 1 ) )
y [ ~ ok ] = x [ ~ ok ]
return y
|
def fit ( self , y , exogenous = None , ** fit_args ) :
"""Fit the auto - arima estimator
Fit an AutoARIMA to a vector , ` ` y ` ` , of observations with an
optional matrix of ` ` exogenous ` ` variables .
Parameters
y : array - like or iterable , shape = ( n _ samples , )
The time - series to which to fit the ` ` ARIMA ` ` estimator . This may
either be a Pandas ` ` Series ` ` object ( statsmodels can internally
use the dates in the index ) , or a numpy array . This should be a
one - dimensional array of floats , and should not contain any
` ` np . nan ` ` or ` ` np . inf ` ` values .
exogenous : array - like , shape = [ n _ obs , n _ vars ] , optional ( default = None )
An optional 2 - d array of exogenous variables . If provided , these
variables are used as additional features in the regression
operation . This should not include a constant or trend . Note that
if an ` ` ARIMA ` ` is fit on exogenous features , it must be provided
exogenous features for making predictions .
* * fit _ args : dict or kwargs
Any keyword arguments to pass to the auto - arima function ."""
|
self . model_ = auto_arima ( y , exogenous = exogenous , start_p = self . start_p , d = self . d , start_q = self . start_q , max_p = self . max_p , max_d = self . max_d , max_q = self . max_q , start_P = self . start_P , D = self . D , start_Q = self . start_Q , max_P = self . max_P , max_D = self . max_D , max_Q = self . max_Q , max_order = self . max_order , m = self . m , seasonal = self . seasonal , stationary = self . stationary , information_criterion = self . information_criterion , alpha = self . alpha , test = self . test , seasonal_test = self . seasonal_test , stepwise = self . stepwise , n_jobs = self . n_jobs , start_params = self . start_params , trend = self . trend , method = self . method , transparams = self . transparams , solver = self . solver , maxiter = self . maxiter , disp = self . disp , callback = self . callback , offset_test_args = self . offset_test_args , seasonal_test_args = self . seasonal_test_args , suppress_warnings = self . suppress_warnings , error_action = self . error_action , trace = self . trace , random = self . random , random_state = self . random_state , n_fits = self . n_fits , return_valid_fits = False , # only return ONE
out_of_sample_size = self . out_of_sample_size , scoring = self . scoring , scoring_args = self . scoring_args , with_intercept = self . with_intercept , ** fit_args )
return self
|
def train ( self , ftrain ) :
'''Trains the polynomial expansion .
: param numpy . ndarray / function ftrain : output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained . Or a function that should be evaluated
at the quadrature points to give these output values .
* Sample Usage * : :
> > > thePC = PolySurrogate ( dimensions = 2)
> > > thePC . train ( myFunc )
> > > predicted _ q = thePC . predict ( [ 0 , 1 ] )
> > > thePC = PolySurrogate ( dimensions = 2)
> > > U = thePC . getQuadraturePoints ( )
> > > Q = [ myFunc ( u ) for u in U ]
> > > thePC . train ( Q )
> > > predicted _ q = thePC . predict ( [ 0 , 1 ] )'''
|
self . coeffs = 0 * self . coeffs
upoints , wpoints = self . getQuadraturePointsAndWeights ( )
try :
fpoints = [ ftrain ( u ) for u in upoints ]
except TypeError :
fpoints = ftrain
for ipoly in np . arange ( self . N_poly ) :
inds = tuple ( self . index_polys [ ipoly ] )
coeff = 0.0
for ( u , q , w ) in zip ( upoints , fpoints , wpoints ) :
coeff += eval_poly ( u , inds , self . J_list ) * q * np . prod ( w )
self . coeffs [ inds ] = coeff
return None
|
def log ( args , number = None , oneline = False , quiet = False ) :
"""Run a " git log . . . " command , and return stdout
args is anything which can be added after a normal " git log . . . "
it can be blank
number , if true - ish , will be added as a " - n " option
oneline , if true - ish , will add the " - - oneline " option"""
|
options = ' ' . join ( [ number and str ( '-n %s' % number ) or '' , oneline and '--oneline' or '' ] )
try :
return run ( 'log %s %s' % ( options , args ) , quiet = quiet )
except UnknownRevision :
return ''
|
def sample ( self ) :
"""This is the core sampling method . Samples a state from a
demonstration , in accordance with the configuration ."""
|
# chooses a sampling scheme randomly based on the mixing ratios
seed = random . uniform ( 0 , 1 )
ratio = np . cumsum ( self . scheme_ratios )
ratio = ratio > seed
for i , v in enumerate ( ratio ) :
if v :
break
sample_method = getattr ( self , self . sample_method_dict [ self . sampling_schemes [ i ] ] )
return sample_method ( )
|
def run_dssp ( pdb , path = True , outfile = None ) :
"""Uses DSSP to find helices and extracts helices from a pdb file or string .
Parameters
pdb : str
Path to pdb file or string .
path : bool , optional
Indicates if pdb is a path or a string .
outfile : str , optional
Filepath for storing the dssp output .
Returns
dssp _ out : str
Std out from DSSP ."""
|
if not path :
if type ( pdb ) == str :
pdb = pdb . encode ( )
try :
temp_pdb = tempfile . NamedTemporaryFile ( delete = False )
temp_pdb . write ( pdb )
temp_pdb . seek ( 0 )
dssp_out = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , temp_pdb . name ] )
temp_pdb . close ( )
finally :
os . remove ( temp_pdb . name )
else :
dssp_out = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , pdb ] )
# Python 3 string formatting .
dssp_out = dssp_out . decode ( )
if outfile :
with open ( outfile , 'w' ) as outf :
outf . write ( dssp_out )
return dssp_out
|
def find_handlers ( event_name , registry = HANDLER_REGISTRY ) :
"""Small helper to find all handlers associated to a given event
If the event can ' t be found , an empty list will be returned , since
this is an internal function and all validation against the event
name and its existence was already performed ."""
|
handlers = [ ]
# event _ name can be a BaseEvent or the string representation
if isinstance ( event_name , basestring ) :
matched_events = [ event for event in registry . keys ( ) if fnmatch . fnmatchcase ( event_name , event ) ]
for matched_event in matched_events :
handlers . extend ( registry . get ( matched_event ) )
else :
handlers = registry . get ( find_event ( event_name ) , [ ] )
return handlers
|
def get_edu_text ( text_subtree ) :
"""return the text of the given EDU subtree"""
|
assert text_subtree . label ( ) == SubtreeType . text
return u' ' . join ( word . decode ( 'utf-8' ) for word in text_subtree . leaves ( ) )
|
def _to_unit_base ( self , base_unit , values , unit , from_unit ) :
"""Return values in a given unit given the input from _ unit ."""
|
self . _is_numeric ( values )
namespace = { 'self' : self , 'values' : values }
if not from_unit == base_unit :
self . is_unit_acceptable ( from_unit , True )
statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( from_unit ) , self . _clean ( base_unit ) )
values = eval ( statement , namespace )
namespace [ 'values' ] = values
if not unit == base_unit :
self . is_unit_acceptable ( unit , True )
statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( base_unit ) , self . _clean ( unit ) )
values = eval ( statement , namespace )
return values
|
def parse_eggs_list ( path ) :
"""Parse eggs list from the script at the given path"""
|
with open ( path , 'r' ) as script :
data = script . readlines ( )
start = 0
end = 0
for counter , line in enumerate ( data ) :
if not start :
if 'sys.path[0:0]' in line :
start = counter + 1
if counter >= start and not end :
if ']' in line :
end = counter
script_eggs = tidy_eggs_list ( data [ start : end ] )
return script_eggs
|
def select_cell ( self , row , col , add_to_selected = False ) :
"""Selects a single cell"""
|
self . grid . SelectBlock ( row , col , row , col , addToSelected = add_to_selected )
|
def checkmagic ( self ) :
"""Overridable .
Check to see if the file object self . lib actually has a file
we understand ."""
|
self . lib . seek ( self . start )
# default - magic is at start of file
if self . lib . read ( len ( self . MAGIC ) ) != self . MAGIC :
raise ArchiveReadError ( "%s is not a valid %s archive file" % ( self . path , self . __class__ . __name__ ) )
if self . lib . read ( len ( self . pymagic ) ) != self . pymagic :
raise ArchiveReadError ( "%s has version mismatch to dll" % ( self . path ) )
self . lib . read ( 4 )
|
def get_list ( self , question , splitter = "," , at_least = 0 , at_most = float ( "inf" ) ) :
"""Parses answer and gets list
: param question : Question : to ask user
: param splitter : Split list elements with this char
: param at _ least : List must have at least this amount of elements
: param at _ most : List must have at most this amount of elements
: return : User answer"""
|
try :
user_answer = self . get_answer ( question )
# ask question
user_answer = user_answer . split ( splitter )
# split items
user_answer = [ str ( item ) . strip ( ) for item in user_answer ]
# strip
if at_least < len ( user_answer ) < at_most :
return user_answer
exc = "List is not correct. "
exc += "There must be at least " + str ( at_least ) + " items, "
exc += "and at most " + str ( at_most ) + ". "
exc += "Use '" + str ( splitter ) + "' to separate items"
raise Exception ( exc )
except Exception as exc :
print ( str ( exc ) )
return self . get_list ( self . last_question , at_least = at_least , at_most = at_most )
|
def split_matrix ( M , contigs ) :
"""Split multiple chromosome matrix
Split a labeled matrix with multiple chromosomes
into unlabeled single - chromosome matrices . Inter chromosomal
contacts are discarded .
Parameters
M : array _ like
The multiple chromosome matrix to be split
contigs : list or array _ like
The list of contig labels"""
|
index = 0
for _ , chunk in itertools . groubpy ( contigs ) :
l = len ( chunk )
yield M [ index : index + l , index : index + l ]
index += l
|
def save_webdriver_logs_by_type ( self , log_type , test_name ) :
"""Get webdriver logs of the specified type and write them to a log file
: param log _ type : browser , client , driver , performance , server , syslog , crashlog or logcat
: param test _ name : test that has generated these logs"""
|
try :
logs = self . driver_wrapper . driver . get_log ( log_type )
except Exception :
return
if len ( logs ) > 0 :
log_file_name = '{}_{}.txt' . format ( get_valid_filename ( test_name ) , log_type )
log_file_name = os . path . join ( DriverWrappersPool . logs_directory , log_file_name )
with open ( log_file_name , 'a+' , encoding = 'utf-8' ) as log_file :
driver_type = self . driver_wrapper . config . get ( 'Driver' , 'type' )
log_file . write ( u"\n{} '{}' test logs with driver = {}\n\n" . format ( datetime . now ( ) , test_name , driver_type ) )
for entry in logs :
timestamp = datetime . fromtimestamp ( float ( entry [ 'timestamp' ] ) / 1000. ) . strftime ( '%Y-%m-%d %H:%M:%S.%f' )
log_file . write ( u'{}\t{}\t{}\n' . format ( timestamp , entry [ 'level' ] , entry [ 'message' ] . rstrip ( ) ) )
|
def peek ( self , size ) :
"""Nondestructively retrieves a given number of characters .
The next : meth : ` read ` operation behaves as though this method was never
called .
: param size : The number of characters to retrieve .
: type size : ` ` integer ` `"""
|
c = self . read ( size )
self . _buffer = self . _buffer + c
return c
|
def unpack_to_nibbles ( bindata ) :
"""unpack packed binary data to nibbles
: param bindata : binary packed from nibbles
: return : nibbles sequence , may have a terminator"""
|
o = bin_to_nibbles ( bindata )
flags = o [ 0 ]
if flags & 2 :
o . append ( NIBBLE_TERMINATOR )
if flags & 1 == 1 :
o = o [ 1 : ]
else :
o = o [ 2 : ]
return o
|
def sort ( self , * , key : Optional [ Callable [ [ Any ] , Any ] ] = None , reverse : bool = False ) -> None :
"""Sort _ WeakList .
: param key : Key by which to sort , default None .
: param reverse : True if return reversed WeakList , false by default ."""
|
return list . sort ( self , key = self . _sort_key ( key ) , reverse = reverse )
|
def remove ( path : str , max_retries : int = 3 ) -> bool :
"""Removes the specified path from the local filesystem if it exists .
Directories will be removed along with all files and folders within
them as well as files .
: param path :
The location of the file or folder to remove .
: param max _ retries :
The number of times to retry before giving up .
: return :
A boolean indicating whether or not the removal was successful ."""
|
if not path :
return False
if not os . path . exists ( path ) :
return True
remover = os . remove if os . path . isfile ( path ) else shutil . rmtree
for attempt in range ( max_retries ) :
try :
remover ( path )
return True
except Exception : # Pause briefly in case there ' s a race condition on lock
# for the target .
time . sleep ( 0.02 )
return False
|
def register_intent_parser ( self , intent_parser ) :
"""" Enforce " the intent parser interface at registration time .
Args :
intent _ parser ( intent ) : Intent to be registered .
Raises :
ValueError : on invalid intent"""
|
if hasattr ( intent_parser , 'validate' ) and callable ( intent_parser . validate ) :
self . intent_parsers . append ( intent_parser )
else :
raise ValueError ( "%s is not an intent parser" % str ( intent_parser ) )
|
def add_request_handler_chain ( self , request_handler_chain ) : # type : ( GenericRequestHandlerChain ) - > None
"""Checks the type before adding it to the
request _ handler _ chains instance variable .
: param request _ handler _ chain : Request Handler Chain instance .
: type request _ handler _ chain : RequestHandlerChain
: raises : : py : class : ` ask _ sdk _ runtime . exceptions . DispatchException `
if a null input is provided or if the input is of invalid type"""
|
if request_handler_chain is None or not isinstance ( request_handler_chain , GenericRequestHandlerChain ) :
raise DispatchException ( "Request Handler Chain is not a GenericRequestHandlerChain " "instance" )
self . _request_handler_chains . append ( request_handler_chain )
|
def open_python ( self , message , namespace ) :
"""Open interactive python console"""
|
# Importing readline will in some cases print weird escape
# characters to stdout . To avoid this we only import readline
# and related packages at this point when we are certain
# they are needed .
from code import InteractiveConsole
import readline
import rlcompleter
readline . set_completer ( rlcompleter . Completer ( namespace ) . complete )
readline . parse_and_bind ( 'tab: complete' )
console = InteractiveConsole ( namespace )
console . interact ( message )
|
def get_column ( self , column_name ) :
"""Returns a column as a Series .
Parameters
column _ name : str
Returns
column : pandas . Series"""
|
with log_start_finish ( 'getting single column {!r} from table {!r}' . format ( column_name , self . name ) , logger ) :
extra_cols = _columns_for_table ( self . name )
if column_name in extra_cols :
with log_start_finish ( 'computing column {!r} for table {!r}' . format ( column_name , self . name ) , logger ) :
column = extra_cols [ column_name ] ( )
else :
column = self . local [ column_name ]
if self . copy_col :
return column . copy ( )
else :
return column
|
def tostr ( s , encoding = 'ascii' ) :
"""Convert string - like - thing s to the ' str ' type , in all Pythons , even
back before Python 2.6 . What ' str ' means varies by PY3K or not .
In Pythons before 3.0 , str and bytes are the same type .
In Python 3 + , this may require a decoding step ."""
|
if PY3K :
if isinstance ( s , str ) : # str = = unicode in PY3K
return s
else : # s is type bytes
return s . decode ( encoding )
else : # for py2.6 on ( before 3.0 ) , bytes is same as str ; 2.5 has no bytes
# but handle if unicode is passed
if isinstance ( s , unicode ) :
return s . encode ( encoding )
else :
return s
|
def getAllNodeUids ( self ) :
'''getAllNodeUids - Gets all the internal uids of all nodes , their children , and all their children so on . .
@ return set < uuid . UUID >'''
|
ret = set ( )
for child in self :
ret . update ( child . getAllNodeUids ( ) )
return ret
|
def _addToBuffers ( self , logname , data ) :
"""Add data to the buffer for logname
Start a timer to send the buffers if BUFFER _ TIMEOUT elapses .
If adding data causes the buffer size to grow beyond BUFFER _ SIZE , then
the buffers will be sent ."""
|
n = len ( data )
self . buflen += n
self . buffered . append ( ( logname , data ) )
if self . buflen > self . BUFFER_SIZE :
self . _sendBuffers ( )
elif not self . sendBuffersTimer :
self . sendBuffersTimer = self . _reactor . callLater ( self . BUFFER_TIMEOUT , self . _bufferTimeout )
|
def aliased_as ( self , name ) :
"""Create an alias of this stream .
Returns an alias of this stream with name ` name ` .
When invocation of an SPL operator requires an
: py : class : ` ~ streamsx . spl . op . Expression ` against
an input port this can be used to ensure expression
matches the input port alias regardless of the name
of the actual stream .
Example use where the filter expression for a ` ` Filter ` ` SPL operator
uses ` ` IN ` ` to access input tuple attribute ` ` seq ` ` : :
s = s . aliased _ as ( ' IN ' )
params = { ' filter ' : op . Expression . expression ( ' IN . seq % 4ul = = 0ul ' ) }
f = op . Map ( ' spl . relational : : Filter ' , stream , params = params )
Args :
name ( str ) : Name for returned stream .
Returns :
Stream : Alias of this stream with ` ` name ` ` equal to ` name ` .
. . versionadded : : 1.9"""
|
stream = copy . copy ( self )
stream . _alias = name
return stream
|
def add_mesh ( self , mesh , color = None , style = None , scalars = None , rng = None , stitle = None , show_edges = None , point_size = 5.0 , opacity = 1.0 , line_width = None , flip_scalars = False , lighting = None , n_colors = 256 , interpolate_before_map = False , cmap = None , label = None , reset_camera = None , scalar_bar_args = None , multi_colors = False , name = None , texture = None , render_points_as_spheres = None , render_lines_as_tubes = False , edge_color = 'black' , ambient = 0.0 , show_scalar_bar = None , nan_color = None , nan_opacity = 1.0 , loc = None , backface_culling = False , rgb = False , categories = False , ** kwargs ) :
"""Adds a unstructured , structured , or surface mesh to the
plotting object .
Also accepts a 3D numpy . ndarray
Parameters
mesh : vtk unstructured , structured , polymesh , or 3D numpy . ndarray
A vtk unstructured , structured , or polymesh to plot .
color : string or 3 item list , optional , defaults to white
Either a string , rgb list , or hex color string . For example :
color = ' white '
color = ' w '
color = [ 1 , 1 , 1]
color = ' # FFFFF '
Color will be overridden when scalars are input .
style : string , optional
Visualization style of the vtk mesh . One for the following :
style = ' surface '
style = ' wireframe '
style = ' points '
Defaults to ' surface '
scalars : numpy array , optional
Scalars used to " color " the mesh . Accepts an array equal
to the number of cells or the number of points in the
mesh . Array should be sized as a single vector . If both
color and scalars are None , then the active scalars are
used
rng : 2 item list , optional
Range of mapper for scalars . Defaults to minimum and
maximum of scalars array . Example : ` ` [ - 1 , 2 ] ` ` . ` ` clim ` `
is also an accepted alias for this .
stitle : string , optional
Scalar title . By default there is no scalar legend bar .
Setting this creates the legend bar and adds a title to
it . To create a bar with no title , use an empty string
( i . e . ' ' ) .
show _ edges : bool , optional
Shows the edges of a mesh . Does not apply to a wireframe
representation .
point _ size : float , optional
Point size . Applicable when style = ' points ' . Default 5.0
opacity : float , optional
Opacity of mesh . Should be between 0 and 1 . Default 1.0.
A string option can also be specified to map the scalar range
to the opacity . Options are : linear , linear _ r , geom , geom _ r
line _ width : float , optional
Thickness of lines . Only valid for wireframe and surface
representations . Default None .
flip _ scalars : bool , optional
Flip direction of cmap .
lighting : bool , optional
Enable or disable view direction lighting . Default False .
n _ colors : int , optional
Number of colors to use when displaying scalars . Default
256.
interpolate _ before _ map : bool , optional
Enabling makes for a smoother scalar display . Default
False
cmap : str , optional
cmap string . See available matplotlib cmaps . Only
applicable for when displaying scalars . Defaults None
( rainbow ) . Requires matplotlib .
multi _ colors : bool , optional
If a ` ` MultiBlock ` ` dataset is given this will color each
block by a solid color using matplotlib ' s color cycler .
name : str , optional
The name for the added mesh / actor so that it can be easily
updated . If an actor of this name already exists in the
rendering window , it will be replaced by the new actor .
texture : vtk . vtkTexture or np . ndarray or boolean , optional
A texture to apply if the input mesh has texture
coordinates . This will not work with MultiBlock
datasets . If set to ` ` True ` ` , the first avaialble texture
on the object will be used . If a string name is given , it
will pull a texture with that name associated to the input
mesh .
ambient : float , optional
When lighting is enabled , this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer . Default 0.2.
nan _ color : string or 3 item list , optional , defaults to gray
The color to use for all NaN values in the plotted scalar
array .
nan _ opacity : float , optional
Opacity of NaN values . Should be between 0 and 1.
Default 1.0
backface _ culling : bool optional
Does not render faces that should not be visible to the
plotter . This can be helpful for dense surface meshes ,
especially when edges are visible , but can cause flat
meshes to be partially displayed . Default False .
rgb : bool , optional
If an 2 dimensional array is passed as the scalars , plot those
values as RGB + A colors ! ` ` rgba ` ` is also accepted alias for this .
categories : bool , optional
If fetching a colormap from matplotlib , this is the number of
categories to use in that colormap . If set to ` ` True ` ` , then
the number of unique values in the scalar array will be used .
Returns
actor : vtk . vtkActor
VTK actor of the mesh ."""
|
# fixes lighting issue when using precalculated normals
if isinstance ( mesh , vtk . vtkPolyData ) :
if mesh . GetPointData ( ) . HasArray ( 'Normals' ) :
mesh . point_arrays [ 'Normals' ] = mesh . point_arrays . pop ( 'Normals' )
if scalar_bar_args is None :
scalar_bar_args = { }
if isinstance ( mesh , np . ndarray ) :
mesh = vtki . PolyData ( mesh )
style = 'points'
# Convert the VTK data object to a vtki wrapped object if neccessary
if not is_vtki_obj ( mesh ) :
mesh = wrap ( mesh )
if show_edges is None :
show_edges = rcParams [ 'show_edges' ]
if show_scalar_bar is None :
show_scalar_bar = rcParams [ 'show_scalar_bar' ]
if lighting is None :
lighting = rcParams [ 'lighting' ]
if rng is None :
rng = kwargs . get ( 'clim' , None )
if render_points_as_spheres is None :
render_points_as_spheres = rcParams [ 'render_points_as_spheres' ]
if name is None :
name = '{}({})' . format ( type ( mesh ) . __name__ , str ( hex ( id ( mesh ) ) ) )
if isinstance ( mesh , vtki . MultiBlock ) :
self . remove_actor ( name , reset_camera = reset_camera )
# frist check the scalars
if rng is None and scalars is not None : # Get the data range across the array for all blocks
# if scalar specified
if isinstance ( scalars , str ) :
rng = mesh . get_data_range ( scalars )
else : # TODO : an array was given . . . how do we deal with
# that ? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block ? This could get complicated real
# quick .
raise RuntimeError ( 'Scalar array must be given as a string name for multiblock datasets.' )
if multi_colors : # Compute unique colors for each index of the block
try :
import matplotlib as mpl
from itertools import cycle
cycler = mpl . rcParams [ 'axes.prop_cycle' ]
colors = cycle ( cycler )
except ImportError :
multi_colors = False
logging . warning ( 'Please install matplotlib for color cycles' )
# Now iteratively plot each element of the multiblock dataset
actors = [ ]
for idx in range ( mesh . GetNumberOfBlocks ( ) ) :
if mesh [ idx ] is None :
continue
# Get a good name to use
next_name = '{}-{}' . format ( name , idx )
# Get the data object
if not is_vtki_obj ( mesh [ idx ] ) :
data = wrap ( mesh . GetBlock ( idx ) )
if not is_vtki_obj ( mesh [ idx ] ) :
continue
# move on if we can ' t plot it
else :
data = mesh . GetBlock ( idx )
if data is None : # Note that a block can exist but be None type
continue
# Now check that scalars is available for this dataset
if isinstance ( data , vtk . vtkMultiBlockDataSet ) or get_scalar ( data , scalars ) is None :
ts = None
else :
ts = scalars
if multi_colors :
color = next ( colors ) [ 'color' ]
a = self . add_mesh ( data , color = color , style = style , scalars = ts , rng = rng , stitle = stitle , show_edges = show_edges , point_size = point_size , opacity = opacity , line_width = line_width , flip_scalars = flip_scalars , lighting = lighting , n_colors = n_colors , interpolate_before_map = interpolate_before_map , cmap = cmap , label = label , scalar_bar_args = scalar_bar_args , reset_camera = reset_camera , name = next_name , texture = None , render_points_as_spheres = render_points_as_spheres , render_lines_as_tubes = render_lines_as_tubes , edge_color = edge_color , show_scalar_bar = show_scalar_bar , nan_color = nan_color , nan_opacity = nan_opacity , loc = loc , rgb = rgb , ** kwargs )
actors . append ( a )
if ( reset_camera is None and not self . camera_set ) or reset_camera :
cpos = self . get_default_cam_pos ( )
self . camera_position = cpos
self . camera_set = False
self . reset_camera ( )
return actors
if nan_color is None :
nan_color = rcParams [ 'nan_color' ]
nanr , nanb , nang = parse_color ( nan_color )
nan_color = nanr , nanb , nang , nan_opacity
if color is True :
color = rcParams [ 'color' ]
if mesh . n_points < 1 :
raise RuntimeError ( 'Empty meshes cannot be plotted. Input mesh has zero points.' )
# set main values
self . mesh = mesh
self . mapper = vtk . vtkDataSetMapper ( )
self . mapper . SetInputData ( self . mesh )
if isinstance ( scalars , str ) :
self . mapper . SetArrayName ( scalars )
actor , prop = self . add_actor ( self . mapper , reset_camera = reset_camera , name = name , loc = loc , culling = backface_culling )
# Try to plot something if no preference given
if scalars is None and color is None and texture is None : # Prefer texture first
if len ( list ( mesh . textures . keys ( ) ) ) > 0 :
texture = True
# If no texture , plot any active scalar
else : # Make sure scalar components are not vectors / tuples
scalars = mesh . active_scalar
if scalars is None : # or scalars . ndim ! = 1:
scalars = None
else :
if stitle is None :
stitle = mesh . active_scalar_info [ 1 ]
if texture == True or isinstance ( texture , ( str , int ) ) :
texture = mesh . _activate_texture ( texture )
if texture :
if isinstance ( texture , np . ndarray ) :
texture = numpy_to_texture ( texture )
if not isinstance ( texture , ( vtk . vtkTexture , vtk . vtkOpenGLTexture ) ) :
raise TypeError ( 'Invalid texture type ({})' . format ( type ( texture ) ) )
if mesh . GetPointData ( ) . GetTCoords ( ) is None :
raise AssertionError ( 'Input mesh does not have texture coordinates to support the texture.' )
actor . SetTexture ( texture )
# Set color to white by default when using a texture
if color is None :
color = 'white'
if scalars is None :
show_scalar_bar = False
self . mapper . SetScalarModeToUsePointFieldData ( )
# Scalar formatting = = = = =
if cmap is None : # grab alias for cmaps : colormap
cmap = kwargs . get ( 'colormap' , None )
if cmap is None : # Set default map if matplotlib is avaialble
try :
import matplotlib
cmap = rcParams [ 'cmap' ]
except ImportError :
pass
title = 'Data' if stitle is None else stitle
if scalars is not None : # if scalars is a string , then get the first array found with that name
append_scalars = True
if isinstance ( scalars , str ) :
title = scalars
scalars = get_scalar ( mesh , scalars , preference = kwargs . get ( 'preference' , 'cell' ) , err = True )
if stitle is None :
stitle = title
# append _ scalars = False
if not isinstance ( scalars , np . ndarray ) :
scalars = np . asarray ( scalars )
if rgb is False or rgb is None :
rgb = kwargs . get ( 'rgba' , False )
if rgb :
if scalars . ndim != 2 or scalars . shape [ 1 ] < 3 or scalars . shape [ 1 ] > 4 :
raise ValueError ( 'RGB array must be n_points/n_cells by 3/4 in shape.' )
if scalars . ndim != 1 :
if rgb :
pass
elif scalars . ndim == 2 and ( scalars . shape [ 0 ] == mesh . n_points or scalars . shape [ 0 ] == mesh . n_cells ) :
scalars = np . linalg . norm ( scalars . copy ( ) , axis = 1 )
title = '{}-normed' . format ( title )
else :
scalars = scalars . ravel ( )
if scalars . dtype == np . bool :
scalars = scalars . astype ( np . float )
# Scalar interpolation approach
if scalars . shape [ 0 ] == mesh . n_points :
self . mesh . _add_point_scalar ( scalars , title , append_scalars )
self . mapper . SetScalarModeToUsePointData ( )
self . mapper . GetLookupTable ( ) . SetNumberOfTableValues ( n_colors )
if interpolate_before_map :
self . mapper . InterpolateScalarsBeforeMappingOn ( )
elif scalars . shape [ 0 ] == mesh . n_cells :
self . mesh . _add_cell_scalar ( scalars , title , append_scalars )
self . mapper . SetScalarModeToUseCellData ( )
self . mapper . GetLookupTable ( ) . SetNumberOfTableValues ( n_colors )
if interpolate_before_map :
self . mapper . InterpolateScalarsBeforeMappingOn ( )
else :
_raise_not_matching ( scalars , mesh )
# Set scalar range
if rng is None :
rng = [ np . nanmin ( scalars ) , np . nanmax ( scalars ) ]
elif isinstance ( rng , float ) or isinstance ( rng , int ) :
rng = [ - rng , rng ]
if np . any ( rng ) and not rgb :
self . mapper . SetScalarRange ( rng [ 0 ] , rng [ 1 ] )
# Flip if requested
table = self . mapper . GetLookupTable ( )
table . SetNanColor ( nan_color )
if cmap is not None :
try :
from matplotlib . cm import get_cmap
except ImportError :
cmap = None
logging . warning ( 'Please install matplotlib for color maps.' )
if cmap is not None :
try :
from matplotlib . cm import get_cmap
except ImportError :
raise Exception ( 'cmap requires matplotlib' )
if isinstance ( cmap , str ) :
if categories :
if categories is True :
categories = len ( np . unique ( scalars ) )
cmap = get_cmap ( cmap , categories )
else :
cmap = get_cmap ( cmap )
# ELSE : assume cmap is callable
ctable = cmap ( np . linspace ( 0 , 1 , n_colors ) ) * 255
ctable = ctable . astype ( np . uint8 )
# Set opactities
if isinstance ( opacity , str ) :
ctable [ : , - 1 ] = opacity_transfer_function ( opacity , n_colors )
if flip_scalars :
ctable = np . ascontiguousarray ( ctable [ : : - 1 ] )
table . SetTable ( VN . numpy_to_vtk ( ctable ) )
else : # no cmap specified
if flip_scalars :
table . SetHueRange ( 0.0 , 0.66667 )
else :
table . SetHueRange ( 0.66667 , 0.0 )
else :
self . mapper . SetScalarModeToUseFieldData ( )
# select view style
if not style :
style = 'surface'
style = style . lower ( )
if style == 'wireframe' :
prop . SetRepresentationToWireframe ( )
if color is None :
color = rcParams [ 'outline_color' ]
elif style == 'points' :
prop . SetRepresentationToPoints ( )
elif style == 'surface' :
prop . SetRepresentationToSurface ( )
else :
raise Exception ( 'Invalid style. Must be one of the following:\n' + '\t"surface"\n' + '\t"wireframe"\n' + '\t"points"\n' )
prop . SetPointSize ( point_size )
prop . SetAmbient ( ambient )
# edge display style
if show_edges :
prop . EdgeVisibilityOn ( )
rgb_color = parse_color ( color )
prop . SetColor ( rgb_color )
if isinstance ( opacity , ( float , int ) ) :
prop . SetOpacity ( opacity )
prop . SetEdgeColor ( parse_color ( edge_color ) )
if render_points_as_spheres :
prop . SetRenderPointsAsSpheres ( render_points_as_spheres )
if render_lines_as_tubes :
prop . SetRenderLinesAsTubes ( render_lines_as_tubes )
# legend label
if label :
if not isinstance ( label , str ) :
raise AssertionError ( 'Label must be a string' )
geom = single_triangle ( )
if scalars is not None :
geom = vtki . Box ( )
rgb_color = parse_color ( 'black' )
self . _labels . append ( [ geom , label , rgb_color ] )
# lighting display style
if not lighting :
prop . LightingOff ( )
# set line thickness
if line_width :
prop . SetLineWidth ( line_width )
# Add scalar bar if available
if stitle is not None and show_scalar_bar and not rgb :
self . add_scalar_bar ( stitle , ** scalar_bar_args )
return actor
|
def convert_dict ( obj , ids , parent , attr_type , item_func , cdata ) :
"""Converts a dict into an XML string ."""
|
LOG . info ( 'Inside convert_dict(): obj type is: "%s", obj="%s"' % ( type ( obj ) . __name__ , unicode_me ( obj ) ) )
output = [ ]
addline = output . append
item_name = item_func ( parent )
for key , val in obj . items ( ) :
LOG . info ( 'Looping inside convert_dict(): key="%s", val="%s", type(val)="%s"' % ( unicode_me ( key ) , unicode_me ( val ) , type ( val ) . __name__ ) )
attr = { } if not ids else { 'id' : '%s' % ( get_unique_id ( parent ) ) }
key , attr = make_valid_xml_name ( key , attr )
if isinstance ( val , numbers . Number ) or type ( val ) in ( str , unicode ) :
addline ( convert_kv ( key , val , attr_type , attr , cdata ) )
elif hasattr ( val , 'isoformat' ) : # datetime
addline ( convert_kv ( key , val . isoformat ( ) , attr_type , attr , cdata ) )
elif type ( val ) == bool :
addline ( convert_bool ( key , val , attr_type , attr , cdata ) )
elif isinstance ( val , dict ) :
if attr_type :
attr [ 'type' ] = get_xml_type ( val )
addline ( '<%s%s>%s</%s>' % ( key , make_attrstring ( attr ) , convert_dict ( val , ids , key , attr_type , item_func , cdata ) , key ) )
elif isinstance ( val , collections . Iterable ) :
if attr_type :
attr [ 'type' ] = get_xml_type ( val )
addline ( '<%s%s>%s</%s>' % ( key , make_attrstring ( attr ) , convert_list ( val , ids , key , attr_type , item_func , cdata ) , key ) )
elif val is None :
addline ( convert_none ( key , val , attr_type , attr , cdata ) )
else :
raise TypeError ( 'Unsupported data type: %s (%s)' % ( val , type ( val ) . __name__ ) )
return '' . join ( output )
|
def load_entry_point_group ( self , entry_point_group ) :
"""Load actions from an entry point group .
: param entry _ point _ group : The entrypoint group name to load plugins ."""
|
for ep in pkg_resources . iter_entry_points ( group = entry_point_group ) :
self . register_scope ( ep . load ( ) )
|
def sqlmany ( self , stringname , * args ) :
"""Wrapper for executing many SQL calls on my connection .
First arg is the name of a query , either a key in the
precompiled JSON or a method name in
` ` allegedb . alchemy . Alchemist ` ` . Remaining arguments should be
tuples of argument sequences to be passed to the query ."""
|
if hasattr ( self , 'alchemist' ) :
return getattr ( self . alchemist . many , stringname ) ( * args )
s = self . strings [ stringname ]
return self . connection . cursor ( ) . executemany ( s , args )
|
def get_compression_filter ( byte_counts ) :
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format , and which compression library to use to that purpose .
Compression reduces the HDF5 file size and also helps improving I / O efficiency
for large datasets .
Parameters
byte _ counts : int
Returns
FILTERS : instance of the tables . Filters class"""
|
assert isinstance ( byte_counts , numbers . Integral ) and byte_counts > 0
if 2 * byte_counts > 1000 * memory ( ) [ 'free' ] :
try :
FILTERS = tables . filters ( complevel = 5 , complib = 'blosc' , shuffle = True , least_significant_digit = 6 )
except tables . FiltersWarning :
FILTERS = tables . filters ( complevel = 5 , complib = 'lzo' , shuffle = True , least_significant_digit = 6 )
else :
FILTERS = None
return FILTERS
|
def detect_unused_return_values ( self , f ) :
"""Return the nodes where the return value of a call is unused
Args :
f ( Function )
Returns :
list ( Node )"""
|
values_returned = [ ]
nodes_origin = { }
for n in f . nodes :
for ir in n . irs :
if isinstance ( ir , HighLevelCall ) : # if a return value is stored in a state variable , it ' s ok
if ir . lvalue and not isinstance ( ir . lvalue , StateVariable ) :
values_returned . append ( ir . lvalue )
nodes_origin [ ir . lvalue ] = ir
for read in ir . read :
if read in values_returned :
values_returned . remove ( read )
return [ nodes_origin [ value ] . node for value in values_returned ]
|
def _tokenize ( self , text ) :
"""Tokenizes a piece of text ."""
|
text = self . _clean_text ( text )
# This was added on November 1st , 2018 for the multilingual and Chinese
# models . This is also applied to the English models now , but it doesn ' t
# matter since the English models were not trained on any Chinese data
# and generally don ' t have any Chinese data in them ( there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia . ) .
text = self . _tokenize_chinese_chars ( text )
orig_tokens = self . _whitespace_tokenize ( text )
split_tokens = [ ]
for token in orig_tokens :
if self . lower :
token = token . lower ( )
token = self . _run_strip_accents ( token )
split_tokens . extend ( self . _run_split_on_punc ( token ) )
output_tokens = self . _whitespace_tokenize ( ' ' . join ( split_tokens ) )
return output_tokens
|
def load ( self , dtype_out_time , dtype_out_vert = False , region = False , plot_units = False , mask_unphysical = False ) :
"""Load the data from the object if possible or from disk ."""
|
msg = ( "Loading data from disk for object={0}, dtype_out_time={1}, " "dtype_out_vert={2}, and region=" "{3}" . format ( self , dtype_out_time , dtype_out_vert , region ) )
logging . info ( msg + ' ({})' . format ( ctime ( ) ) )
# Grab from the object if its there .
try :
data = self . data_out [ dtype_out_time ]
except ( AttributeError , KeyError ) : # Otherwise get from disk . Try scratch first , then archive .
try :
data = self . _load_from_disk ( dtype_out_time , dtype_out_vert , region = region )
except IOError :
data = self . _load_from_tar ( dtype_out_time , dtype_out_vert )
# Copy the array to self . data _ out for ease of future access .
self . _update_data_out ( data , dtype_out_time )
# Apply desired plotting / cleanup methods .
if mask_unphysical :
data = self . var . mask_unphysical ( data )
if plot_units :
data = self . var . to_plot_units ( data , dtype_vert = dtype_out_vert )
return data
|
def parametrized_function ( decorator ) :
'''Decorator used to create decorators with arguments .
Should be used with function returning another function
that will be called with the original function has the first
parameter .
No difference are made between method and function ,
so the wrapper function will have to know if the first
argument is an instance ( self ) .
Note that when using reflect or annotate module functions ,
depth should be incremented by one .
Example : :
@ decorator . parametrized _ function
def mydecorator ( function _ original , decorator , arguments ) :
def wrapper ( call , arguments ) :
# processing
return function _ original ( call , arguments )
return wrapper
@ mydecorator ( decorator , arguments )
def myfunction ( ) :
pass'''
|
def meta_decorator ( * args , ** kwargs ) :
return _NormalMetaDecorator ( decorator , args , kwargs )
return meta_decorator
|
def is_github_ip ( ip_str ) :
"""Verify that an IP address is owned by GitHub ."""
|
if isinstance ( ip_str , bytes ) :
ip_str = ip_str . decode ( )
ip = ipaddress . ip_address ( ip_str )
if ip . version == 6 and ip . ipv4_mapped :
ip = ip . ipv4_mapped
for block in load_github_hooks ( ) :
if ip in ipaddress . ip_network ( block ) :
return True
return False
|
def to_dict ( self ) :
"""Convert self to a dict object for serialization ."""
|
return { 'level' : self . level , 'id' : self . id , 'text' : self . text , 'inner_html' : self . inner_html , 'children' : [ child . to_dict ( ) for child in self . children ] }
|
def refresh_state_in_ec ( self , ec_index ) :
'''Get the up - to - date state of the component in an execution context .
This function will update the state , rather than using the cached
value . This may take time , if the component is executing on a remote
node .
@ param ec _ index The index of the execution context to check the state
in . This index is into the total array of contexts ,
that is both owned and participating contexts . If the
value of ec _ index is greater than the length of @ ref
owned _ ecs , that length is subtracted from ec _ index and
the result used as an index into @ ref
participating _ ecs .'''
|
with self . _mutex :
if ec_index >= len ( self . owned_ecs ) :
ec_index -= len ( self . owned_ecs )
if ec_index >= len ( self . participating_ecs ) :
raise exceptions . BadECIndexError ( ec_index )
state = self . _get_ec_state ( self . participating_ecs [ ec_index ] )
self . participating_ec_states [ ec_index ] = state
else :
state = self . _get_ec_state ( self . owned_ecs [ ec_index ] )
self . owned_ec_states [ ec_index ] = state
return state
|
def timeit_grid ( stmt_list , setup = '' , iterations = 10000 , input_sizes = None , verbose = True , show = False ) :
"""Timeit : :
import utool as ut
setup = ut . codeblock (
import utool as ut
from six . moves import range , zip
import time
def time _ append ( size ) :
start _ time = time . time ( )
last _ time = start _ time
list2 = [ ]
for x in range ( size ) :
now _ time = time . time ( )
between = now _ time - last _ time
last _ time = now _ time
list2 . append ( between )
def time _ assign ( size ) :
start _ time = time . time ( )
last _ time = start _ time
list1 = ut . alloc _ nones ( size )
for x in range ( size ) :
now _ time = time . time ( )
between = now _ time - last _ time
last _ time = now _ time
list1 [ x ] = between
def time _ baseline ( size ) :
start _ time = time . time ( )
last _ time = start _ time
for x in range ( size ) :
now _ time = time . time ( )
between = now _ time - last _ time
last _ time = now _ time
def time _ null ( size ) :
for x in range ( size ) :
pass
input _ sizes = [ 2 * * count for count in range ( 7 , 12 ) ]
stmt _ list = [ ' time _ assign ' , ' time _ append ' , ' time _ baseline ' , ' time _ null ' ]
input _ sizes = [ 100 , 1000 , 10000]
ut . timeit _ grid ( stmt _ list , setup , input _ sizes = input _ sizes , show = True )"""
|
import timeit
# iterations = timeit . default _ number
if input_sizes is None :
input_sizes = [ 2 ** count for count in range ( 7 , 14 ) ]
time_grid = [ ]
for size in input_sizes :
time_list = [ ]
for stmt in stmt_list :
stmt_ = stmt + '(' + str ( size ) + ')'
if verbose :
print ( 'running stmt_=%r' % ( stmt_ , ) )
time = timeit . timeit ( stmt_ , setup = setup , number = iterations )
if verbose :
print ( '... took %r seconds' % ( time , ) )
time_list . append ( time )
time_grid . append ( time_list )
if show :
time_grid = np . array ( time_grid )
import plottool as pt
color_list = pt . distinct_colors ( len ( stmt_list ) )
for count , ( stmt , color ) in enumerate ( zip ( stmt_list , color_list ) ) :
pt . plot ( input_sizes , time_grid . T [ count ] , 'x-' , color = color , label = stmt )
pt . dark_background ( )
pt . legend ( )
pt . show_if_requested ( )
return time_grid
|
def get_design ( self , design_name ) :
"""Returns dict representation of the design document with the matching
name
design _ name < str > name of the design"""
|
try :
r = requests . request ( "GET" , "%s/%s/_design/%s" % ( self . host , self . database_name , design_name ) , auth = self . auth )
return self . result ( r . text )
except :
raise
|
def adjust_all_to_360 ( dictionary ) :
"""Take a dictionary and check each key / value pair .
If this key is of type : declination / longitude / azimuth / direction ,
adjust it to be within 0-360 as required by the MagIC data model"""
|
for key in dictionary :
dictionary [ key ] = adjust_to_360 ( dictionary [ key ] , key )
return dictionary
|
def p_array ( self , t ) :
"""expression : ' { ' commalist ' } '
| kw _ array ' [ ' commalist ' ] '"""
|
if len ( t ) == 4 :
t [ 0 ] = ArrayLit ( t [ 2 ] . children )
elif len ( t ) == 5 :
t [ 0 ] = ArrayLit ( t [ 3 ] . children )
else :
raise NotImplementedError ( 'unk_len' , len ( t ) )
# pragma : no cover
|
def text_to_speech ( self , text , file , voice_name = None , language = None ) :
"""Saves given text synthesized audio file , via ' CreateSpeech ' endpoint
Docs :
http : / / developer . ivona . com / en / speechcloud / actions . html # CreateSpeech
: param text : text to synthesize
: type text : str
: param file : file that will be used to save the audio
: type file : file
: param voice _ name : voice name
: type voice _ name : str
: param language : voice language
: type language : str"""
|
endpoint = 'CreateSpeech'
data = { 'Input' : { 'Data' : text , } , 'OutputFormat' : { 'Codec' : self . codec . upper ( ) , } , 'Parameters' : { 'Rate' : self . rate , 'Volume' : self . volume , 'SentenceBreak' : self . sentence_break , 'ParagraphBreak' : self . paragraph_break , } , 'Voice' : { 'Name' : voice_name or self . voice_name , 'Language' : language or self . language , } , }
response = self . _get_response ( 'post' , endpoint , data )
file . write ( response . content )
|
def _PreParse ( self , key , value ) :
"""Executed against each field of each row read from index table ."""
|
if key == "Command" :
return re . sub ( r"(\[\[.+?\]\])" , self . _Completion , value )
else :
return value
|
def c2r ( self ) :
"""Get real matrix from complex one suitable for solving complex linear system with real solver .
For matrix : math : ` M ( i _ 1 , j _ 1, \\ ldots , i _ d , j _ d ) = \\ Re M + i \\ Im M ` returns ( d + 1 ) - dimensional matrix
: math : ` \\ tilde { M } ( i _ 1 , j _ 1, \\ ldots , i _ d , j _ d , i _ { d + 1 } , j _ { d + 1 } ) ` of form
: math : ` \\ begin { bmatrix } \\ Re M & - \\ Im M \\ \\ \\ Im M & \\ Re M \\ end { bmatrix } ` . This function
is useful for solving complex linear system : math : ` \\ mathcal { A } X = B ` with real solver by
transforming it into
. . math : :
\\ begin { bmatrix } \\ Re \\ mathcal { A } & - \\ Im \\ mathcal { A } \\ \ \\ Im \\ mathcal { A } & \\ Re \\ mathcal { A } \\ end { bmatrix }
\\ begin { bmatrix } \\ Re X \\ \\ \\ Im X \\ end { bmatrix } =
\\ begin { bmatrix } \\ Re B \\ \\ \\ Im B \\ end { bmatrix } ."""
|
return matrix ( a = self . tt . __complex_op ( 'M' ) , n = _np . concatenate ( ( self . n , [ 2 ] ) ) , m = _np . concatenate ( ( self . m , [ 2 ] ) ) )
|
def concat_expr ( operator , conditions ) :
"""Concatenate ` conditions ` with ` operator ` and wrap it by ( ) .
It returns a string in a list or empty list , if ` conditions ` is empty ."""
|
expr = " {0} " . format ( operator ) . join ( conditions )
return [ "({0})" . format ( expr ) ] if expr else [ ]
|
def remove_edge_fun ( graph ) :
"""Returns a function that removes an edge from the ` graph ` .
. . note : : The out node is removed if this is isolate .
: param graph :
A directed graph .
: type graph : networkx . classes . digraph . DiGraph
: return :
A function that remove an edge from the ` graph ` .
: rtype : callable"""
|
# Namespace shortcut for speed .
rm_edge , rm_node = graph . remove_edge , graph . remove_node
from networkx import is_isolate
def remove_edge ( u , v ) :
rm_edge ( u , v )
# Remove the edge .
if is_isolate ( graph , v ) : # Check if v is isolate .
rm_node ( v )
# Remove the isolate out node .
return remove_edge
|
def get1Dcut ( cam : List [ Cam ] , odir : Path = None , verbose : bool = False ) -> List [ Cam ] :
"""i . get az / el of each pixel ( rotated / transposed as appropriate )
ii . get cartesian ECEF of each pixel end , a point outside the grid ( to create rays to check intersections with grid )
iii . put cameras in same frame , getting az / el to each other ' s pixel ends
iv . find the indices corresponding to those angles
now the cameras are geographically registered to pixel indices"""
|
# % % determine slant range between other camera and magnetic zenith to evaluate at
# 4.5 had zero discards for hst0 # 6.8 didn ' t quite get to zenith
srpts = logspace ( 4.3 , 6.9 , 25 )
# % % ( i ) load az / el data from Astrometry . net
for C in cam :
if C . usecam :
C . doorient ( )
C . toecef ( srpts )
# optional : plot ECEF of points between each camera and magnetic zenith ( lying at az , el relative to each camera )
if verbose :
plotLOSecef ( cam , odir )
# % % ( 2 ) get az , el of these points from camera to the other camera ' s points
cam [ 0 ] . az2pts , cam [ 0 ] . el2pts , cam [ 0 ] . r2pts = ecef2aer ( cam [ 1 ] . x2mz , cam [ 1 ] . y2mz , cam [ 1 ] . z2mz , cam [ 0 ] . lat , cam [ 0 ] . lon , cam [ 0 ] . alt_m )
cam [ 1 ] . az2pts , cam [ 1 ] . el2pts , cam [ 1 ] . r2pts = ecef2aer ( cam [ 0 ] . x2mz , cam [ 0 ] . y2mz , cam [ 0 ] . z2mz , cam [ 1 ] . lat , cam [ 1 ] . lon , cam [ 1 ] . alt_m )
# % % ( 3 ) find indices corresponding to these az , el in each image
# and Least squares fit line to nearest points found in step 3
for C in cam :
if C . usecam :
C . findClosestAzel ( odir )
if verbose and odir :
dbgfn = odir / 'debugLSQ.h5'
print ( 'writing' , dbgfn )
with h5py . File ( dbgfn , 'w' ) as f :
for C in cam :
f [ f'/cam{C.name}/cutrow' ] = C . cutrow
f [ f'/cam{C.name}/cutcol' ] = C . cutcol
f [ '/cam{C.name}/xpix' ] = C . xpix
return cam
|
def get_field_mappings ( self , field ) :
"""Converts ES field mappings to . kibana field mappings"""
|
retdict = { }
retdict [ 'indexed' ] = False
retdict [ 'analyzed' ] = False
for ( key , val ) in iteritems ( field ) :
if key in self . mappings :
if ( key == 'type' and ( val == "long" or val == "integer" or val == "double" or val == "float" ) ) :
val = "number"
# self . pr _ dbg ( " \ t \ t \ tkey : % s " % key )
# self . pr _ dbg ( " \ t \ t \ t \ tval : % s " % val )
retdict [ key ] = val
if key == 'index' and val != "no" :
retdict [ 'indexed' ] = True
# self . pr _ dbg ( " \ t \ t \ tkey : % s " % key )
# self . pr _ dbg ( " \ t \ t \ t \ tval : % s " % val )
if val == "analyzed" :
retdict [ 'analyzed' ] = True
return retdict
|
def click_and_hold ( self , on_element = None ) :
"""Holds down the left mouse button on an element .
: Args :
- on _ element : The element to mouse down .
If None , clicks on current mouse position ."""
|
if on_element :
self . move_to_element ( on_element )
if self . _driver . w3c :
self . w3c_actions . pointer_action . click_and_hold ( )
self . w3c_actions . key_action . pause ( )
else :
self . _actions . append ( lambda : self . _driver . execute ( Command . MOUSE_DOWN , { } ) )
return self
|
def holidays ( self , start = None , end = None , return_name = False ) :
"""Returns a curve with holidays between start _ date and end _ date
Parameters
start : starting date , datetime - like , optional
end : ending date , datetime - like , optional
return _ name : bool , optional
If True , return a series that has dates and holiday names .
False will only return a DatetimeIndex of dates .
Returns
DatetimeIndex of holidays"""
|
if self . rules is None :
raise Exception ( 'Holiday Calendar {name} does not have any ' 'rules specified' . format ( name = self . name ) )
if start is None :
start = AbstractHolidayCalendar . start_date
if end is None :
end = AbstractHolidayCalendar . end_date
start = Timestamp ( start )
end = Timestamp ( end )
holidays = None
# If we don ' t have a cache or the dates are outside the prior cache , we
# get them again
if ( self . _cache is None or start < self . _cache [ 0 ] or end > self . _cache [ 1 ] ) :
for rule in self . rules :
rule_holidays = rule . dates ( start , end , return_name = True )
if holidays is None :
holidays = rule_holidays
else :
holidays = holidays . append ( rule_holidays )
self . _cache = ( start , end , holidays . sort_index ( ) )
holidays = self . _cache [ 2 ]
holidays = holidays [ start : end ]
if return_name :
return holidays
else :
return holidays . index
|
def _python3_record_factory ( * args , ** kwargs ) :
"""Python 3 approach to custom logging , using ` logging . getLogRecord ( . . . ) `
Inspireb by : https : / / docs . python . org / 3 / howto / logging - cookbook . html # customizing - logrecord
: return : A log record augmented with the values required by LOG _ FORMAT , as per ` _ update _ record ( . . . ) `"""
|
record = _python_record_factory ( * args , ** kwargs )
_update_record ( record )
return record
|
def coerce ( cls , key , value ) :
"""Convert plain dictionary to MutationDict"""
|
self = MutationDict ( ( k , MutationObj . coerce ( key , v ) ) for ( k , v ) in value . items ( ) )
self . _key = key
return self
|
def extract_nodes ( soup , nodename , attr = None , value = None ) :
"""Returns a list of tags ( nodes ) from the given soup matching the given nodename .
If an optional attribute and value are given , these are used to filter the results
further ."""
|
tags = soup . find_all ( nodename )
if attr != None and value != None :
return list ( filter ( lambda tag : tag . get ( attr ) == value , tags ) )
return list ( tags )
|
def model ( self , data , ind_col = None , dep_col = None , project_ind_col = None , baseline_period = [ None , None ] , projection_period = None , exclude_time_period = None , alphas = np . logspace ( - 4 , 1 , 30 ) , cv = 3 , plot = True , figsize = None , custom_model_func = None ) :
"""Split data into baseline and projection periods , run models on them and display metrics & plots .
Parameters
data : pd . DataFrame ( )
Dataframe to model .
ind _ col : list ( str )
Independent column ( s ) of dataframe . Defaults to all columns except the last .
dep _ col : str
Dependent column of dataframe .
project _ ind _ col : list ( str )
Independent column ( s ) to use for projection . If none , use ind _ col .
baseline _ period : list ( str )
List of time periods to split the data into baseline periods . It needs to have a start and an end date .
projection _ period : list ( str )
List of time periods to split the data into projection periods . It needs to have a start and an end date .
exclude _ time _ period : list ( str )
List of time periods to exclude for modeling .
alphas : list ( int )
List of alphas to run regression on .
cv : int
Number of folds for cross - validation .
plot : bool
Specifies whether to save plots or not .
figsize : tuple
Size of the plots .
custom _ model _ func : function
Model with specific hyper - parameters provided by user .
Returns
dict
Metrics of the optimal / best model ."""
|
# Check to ensure data is a pandas dataframe
if not isinstance ( data , pd . DataFrame ) :
raise SystemError ( 'data has to be a pandas dataframe.' )
# Create instance
model_data_obj = Model_Data ( data , ind_col , dep_col , alphas , cv , exclude_time_period , baseline_period , projection_period )
# Split data into baseline and projection
model_data_obj . split_data ( )
# Logging
self . result [ 'Model' ] = { 'Independent Col' : ind_col , 'Dependent Col' : dep_col , 'Projection Independent Col' : project_ind_col , 'Baseline Period' : baseline_period , 'Projection Period' : projection_period , 'Exclude Time Period' : exclude_time_period , 'Alphas' : list ( alphas ) , 'CV' : cv , 'Plot' : plot , 'Fig Size' : figsize }
# Runs all models on the data and returns optimal model
all_metrics = model_data_obj . run_models ( )
self . result [ 'Model' ] [ 'All Model\'s Metrics' ] = all_metrics
# CHECK : Define custom model ' s parameter and return types in documentation .
if custom_model_func :
self . result [ 'Model' ] [ 'Custom Model\'s Metrics' ] = model_data_obj . custom_model ( custom_model_func )
# Fit optimal model to data
self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] = model_data_obj . best_model_fit ( )
if plot : # Use project _ ind _ col if projecting into the future ( no input data other than weather data )
input_col = model_data_obj . input_col if not project_ind_col else project_ind_col
fig , y_true , y_pred = self . plot_data_obj . baseline_projection_plot ( model_data_obj . y_true , model_data_obj . y_pred , model_data_obj . baseline_period , model_data_obj . projection_period , model_data_obj . best_model_name , model_data_obj . best_metrics [ 'adj_r2' ] , model_data_obj . original_data , input_col , model_data_obj . output_col , model_data_obj . best_model , self . result [ 'Site' ] )
fig . savefig ( self . results_folder_name + '/baseline_projection_plot-' + str ( self . get_global_count ( ) ) + '.png' )
if not y_true . empty and not y_pred . empty :
saving_absolute = ( y_pred - y_true ) . sum ( )
saving_perc = ( saving_absolute / y_pred . sum ( ) ) * 100
self . result [ 'Energy Savings (%)' ] = float ( saving_perc )
self . result [ 'Energy Savings (absolute)' ] = saving_absolute
# Temporary
self . project_df [ 'true' ] = y_true
self . project_df [ 'pred' ] = y_pred
# Calculate uncertainity of savings
self . result [ 'Uncertainity' ] = self . uncertainity_equation ( model_data_obj , y_true , y_pred , 0.9 )
else :
print ( 'y_true: ' , y_true )
print ( 'y_pred: ' , y_pred )
print ( 'Error: y_true and y_pred are empty. Default to -1.0 savings.' )
self . result [ 'Energy Savings (%)' ] = float ( - 1.0 )
self . result [ 'Energy Savings (absolute)' ] = float ( - 1.0 )
return self . best_metrics
|
def _validate_parameters ( self ) :
"""Validate Connection Parameters .
: return :"""
|
if not compatibility . is_string ( self . parameters [ 'hostname' ] ) :
raise AMQPInvalidArgument ( 'hostname should be a string' )
elif not compatibility . is_integer ( self . parameters [ 'port' ] ) :
raise AMQPInvalidArgument ( 'port should be an integer' )
elif not compatibility . is_string ( self . parameters [ 'username' ] ) :
raise AMQPInvalidArgument ( 'username should be a string' )
elif not compatibility . is_string ( self . parameters [ 'password' ] ) :
raise AMQPInvalidArgument ( 'password should be a string' )
elif not compatibility . is_string ( self . parameters [ 'virtual_host' ] ) :
raise AMQPInvalidArgument ( 'virtual_host should be a string' )
elif not isinstance ( self . parameters [ 'timeout' ] , ( int , float ) ) :
raise AMQPInvalidArgument ( 'timeout should be an integer or float' )
elif not compatibility . is_integer ( self . parameters [ 'heartbeat' ] ) :
raise AMQPInvalidArgument ( 'heartbeat should be an integer' )
|
def middleMouseClickEvent ( argosPgPlotItem , axisNumber , mouseClickEvent ) :
"""Emits sigAxisReset when the middle mouse button is clicked on an axis of the the plot item ."""
|
if mouseClickEvent . button ( ) == QtCore . Qt . MiddleButton :
mouseClickEvent . accept ( )
argosPgPlotItem . emitResetAxisSignal ( axisNumber )
|
def feed ( self , url_template , keyword , offset , max_num , page_step ) :
"""Feed urls once
Args :
url _ template : A string with parameters replaced with " { } " .
keyword : A string indicating the searching keyword .
offset : An integer indicating the starting index .
max _ num : An integer indicating the max number of images to be crawled .
page _ step : An integer added to offset after each iteration ."""
|
for i in range ( offset , offset + max_num , page_step ) :
url = url_template . format ( keyword , i )
self . out_queue . put ( url )
self . logger . debug ( 'put url to url_queue: {}' . format ( url ) )
|
def fmt_tag ( cur_namespace , tag , val ) :
"""Processes a documentation reference ."""
|
if tag == 'type' :
fq_val = val
if '.' not in val and cur_namespace is not None :
fq_val = cur_namespace . name + '.' + fq_val
return fq_val
elif tag == 'route' :
if ':' in val :
val , version = val . split ( ':' , 1 )
version = int ( version )
else :
version = 1
return fmt_func ( val , version ) + "()"
elif tag == 'link' :
anchor , link = val . rsplit ( ' ' , 1 )
# There ' s no way to have links in TSDoc , so simply use JSDoc ' s formatting .
# It ' s entirely possible some editors support this .
return '[%s]{@link %s}' % ( anchor , link )
elif tag == 'val' : # Value types seem to match JavaScript ( true , false , null )
return val
elif tag == 'field' :
return val
else :
raise RuntimeError ( 'Unknown doc ref tag %r' % tag )
|
def updateProgress ( self , time , state = 'stopped' ) :
"""Set the watched progress for this video .
Note that setting the time to 0 will not work .
Use ` markWatched ` or ` markUnwatched ` to achieve
that goal .
Parameters :
time ( int ) : milliseconds watched
state ( string ) : state of the video , default ' stopped '"""
|
key = '/:/progress?key=%s&identifier=com.plexapp.plugins.library&time=%d&state=%s' % ( self . ratingKey , time , state )
self . _server . query ( key )
self . reload ( )
|
def setup_owner ( self , name , new_owner = default , transact = { } ) :
"""Set the owner of the supplied name to ` new _ owner ` .
For typical scenarios , you ' ll never need to call this method directly ,
simply call : meth : ` setup _ name ` or : meth : ` setup _ address ` . This method does * not *
set up the name to point to an address .
If ` new _ owner ` is not supplied , then this will assume you
want the same owner as the parent domain .
If the caller owns ` ` parentname . eth ` ` with no subdomains
and calls this method with ` ` sub . parentname . eth ` ` ,
then ` ` sub ` ` will be created as part of this call .
: param str name : ENS name to set up
: param new _ owner : account that will own ` name ` . If ` ` None ` ` , set owner to empty addr .
If not specified , name will point to the parent domain owner ' s address .
: param dict transact : the transaction configuration , like in
: meth : ` ~ web3 . eth . Eth . sendTransaction `
: raises InvalidName : if ` name ` has invalid syntax
: raises UnauthorizedError : if ` ` ' from ' ` ` in ` transact ` does not own ` name `
: returns : the new owner ' s address"""
|
( super_owner , unowned , owned ) = self . _first_owner ( name )
if new_owner is default :
new_owner = super_owner
elif not new_owner :
new_owner = EMPTY_ADDR_HEX
else :
new_owner = to_checksum_address ( new_owner )
current_owner = self . owner ( name )
if new_owner == EMPTY_ADDR_HEX and not current_owner :
return None
elif current_owner == new_owner :
return current_owner
else :
self . _assert_control ( super_owner , name , owned )
self . _claim_ownership ( new_owner , unowned , owned , super_owner , transact = transact )
return new_owner
|
def create_items ( portal_type = None , uid = None , endpoint = None , ** kw ) :
"""create items
1 . If the uid is given , get the object and create the content in there
( assumed that it is folderish )
2 . If the uid is 0 , the target folder is assumed the portal .
3 . If there is no uid given , the payload is checked for either a key
- ` parent _ uid ` specifies the * uid * of the target folder
- ` parent _ path ` specifies the * physical path * of the target folder"""
|
# disable CSRF
req . disable_csrf_protection ( )
# destination where to create the content
container = uid and get_object_by_uid ( uid ) or None
# extract the data from the request
records = req . get_request_data ( )
results = [ ]
for record in records : # get the portal _ type
if portal_type is None : # try to fetch the portal type out of the request data
portal_type = record . pop ( "portal_type" , None )
# check if it is allowed to create the portal _ type
if not is_creation_allowed ( portal_type ) :
fail ( 401 , "Creation of '{}' is not allowed" . format ( portal_type ) )
if container is None : # find the container for content creation
container = find_target_container ( portal_type , record )
# Check if we have a container and a portal _ type
if not all ( [ container , portal_type ] ) :
fail ( 400 , "Please provide a container path/uid and portal_type" )
# create the object and pass in the record data
obj = create_object ( container , portal_type , ** record )
results . append ( obj )
if not results :
fail ( 400 , "No Objects could be created" )
return make_items_for ( results , endpoint = endpoint )
|
def add_primary_key ( conn , schema , table , pk_col ) :
r"""Adds primary key to database table
Parameters
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk _ col : str
Column that primary key is applied to"""
|
sql_str = """alter table {schema}.{table} add primary key ({col})""" . format ( schema = schema , table = table , col = pk_col )
conn . execute ( sql_str )
|
def stream_data ( self , host = HOST , port = GPSD_PORT , enable = True , gpsd_protocol = PROTOCOL , devicepath = None ) :
"""Connect and command , point and shoot , flail and bail"""
|
self . socket . connect ( host , port )
self . socket . watch ( enable , gpsd_protocol , devicepath )
|
def broadcast_dimension_size ( variables : List [ Variable ] , ) -> 'OrderedDict[Any, int]' :
"""Extract dimension sizes from a dictionary of variables .
Raises ValueError if any dimensions have different sizes ."""
|
dims = OrderedDict ( )
# type : OrderedDict [ Any , int ]
for var in variables :
for dim , size in zip ( var . dims , var . shape ) :
if dim in dims and size != dims [ dim ] :
raise ValueError ( 'index %r not aligned' % dim )
dims [ dim ] = size
return dims
|
def md5 ( self ) :
"""MD5 of scene which will change when meshes or
transforms are changed
Returns
hashed : str , MD5 hash of scene"""
|
# start with transforms hash
hashes = [ self . graph . md5 ( ) ]
for g in self . geometry . values ( ) :
if hasattr ( g , 'md5' ) :
hashes . append ( g . md5 ( ) )
elif hasattr ( g , 'tostring' ) :
hashes . append ( str ( hash ( g . tostring ( ) ) ) )
else : # try to just straight up hash
# this may raise errors
hashes . append ( str ( hash ( g ) ) )
md5 = util . md5_object ( '' . join ( hashes ) )
return md5
|
def trigger ( queue , user = None , group = None , mode = None , trigger = _c . FSQ_TRIGGER ) :
'''Installs a trigger for the specified queue .'''
|
# default our owners and mode
user , group , mode = _dflts ( user , group , mode )
trigger_path = fsq_path . trigger ( queue , trigger = trigger )
created = False
try : # mkfifo is incapable of taking unicode , coerce back to str
try :
os . mkfifo ( trigger_path . encode ( _c . FSQ_CHARSET ) , mode )
created = True
except ( OSError , IOError , ) , e : # if failure not due to existence , rm and bail
if e . errno != errno . EEXIST :
raise e
# don ' t open and fchown here , as opening WRONLY without an open
# reading fd will hang , opening RDONLY will zombie if we don ' t
# flush , and intercepts triggers meant to go elsewheres
os . chmod ( trigger_path , mode )
if user is not None or group is not None :
os . chown ( trigger_path , * uid_gid ( user , group , path = trigger_path ) )
except ( OSError , IOError , ) , e : # only rm if we created and failed , otherwise leave it and fail
if created :
_cleanup ( trigger_path , e )
_raise ( trigger_path , e )
|
def generate_data ( method , args ) :
"""Assign arguments to body or URL of an HTTP request .
Parameters
method ( str )
HTTP Method . ( e . g . ' POST ' )
args ( dict )
Dictionary of data to attach to each Request .
e . g . { ' latitude ' : 37.561 , ' longitude ' : - 122.742}
Returns
( str or dict )
Either params containing the dictionary of arguments
or data containing arugments in JSON - formatted string ."""
|
data = { }
params = { }
if method in http . BODY_METHODS :
data = dumps ( args )
else :
params = args
return data , params
|
def start_of_day ( dtime_at = None ) :
"""Returns the local ( user timezone ) start of day , that ' s ,
time 00:00:00 for a given datetime"""
|
dtime_at = datetime_or_now ( dtime_at )
return datetime . datetime ( dtime_at . year , dtime_at . month , dtime_at . day , tzinfo = tzlocal ( ) )
|
def get_process_work_item_type ( self , process_id , wit_ref_name , expand = None ) :
"""GetProcessWorkItemType .
[ Preview API ] Returns a single work item type in a process .
: param str process _ id : The ID of the process
: param str wit _ ref _ name : The reference name of the work item type
: param str expand : Flag to determine what properties of work item type to return
: rtype : : class : ` < ProcessWorkItemType > < azure . devops . v5_0 . work _ item _ tracking _ process . models . ProcessWorkItemType > `"""
|
route_values = { }
if process_id is not None :
route_values [ 'processId' ] = self . _serialize . url ( 'process_id' , process_id , 'str' )
if wit_ref_name is not None :
route_values [ 'witRefName' ] = self . _serialize . url ( 'wit_ref_name' , wit_ref_name , 'str' )
query_parameters = { }
if expand is not None :
query_parameters [ '$expand' ] = self . _serialize . query ( 'expand' , expand , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'e2e9d1a6-432d-4062-8870-bfcb8c324ad7' , version = '5.0-preview.2' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'ProcessWorkItemType' , response )
|
def get_remote_mgmt_addr ( self , tlv_data ) :
"""Returns Remote Mgmt Addr from the TLV ."""
|
ret , parsed_val = self . _check_common_tlv_format ( tlv_data , "IPv4:" , "Management Address TLV" )
if not ret :
return None
addr_fam = 'IPv4:'
addr = parsed_val [ 1 ] . split ( '\n' ) [ 0 ] . strip ( )
return addr_fam + addr
|
def InitLocCheck ( self ) :
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
|
self . panel = wx . Panel ( self , style = wx . SIMPLE_BORDER )
text = """Step 5:
Check that locations are correctly named.
Fill in any blank cells using controlled vocabularies.
(See Help button for details)
** Denotes controlled vocabulary"""
label = wx . StaticText ( self . panel , label = text )
# self . Data _ hierarchy = self . ErMagic . Data _ hierarchy
self . locations = self . er_magic_data . locations
if not self . er_magic_data . locations :
msg = "You have no data in er_locations, so we are skipping step 5.\n Note that location names must be entered at the measurements level,so you may need to re-import your data, or you can add a location in step 3"
dlg = wx . MessageDialog ( None , caption = "Message:" , message = msg , style = wx . OK | wx . ICON_INFORMATION )
dlg . ShowModal ( )
dlg . Destroy ( )
self . panel . Destroy ( )
self . InitAgeCheck ( )
return
self . grid_builder = grid_frame2 . GridBuilder ( self . er_magic_data , 'location' , self . er_magic_data . headers , self . panel )
self . loc_grid = self . grid_builder . make_grid ( incl_pmag = False )
self . loc_grid . InitUI ( )
self . grid_builder . add_data_to_grid ( self . loc_grid , 'location' , incl_pmag = False )
self . grid = self . loc_grid
# initialize all needed drop - down menus
self . drop_down_menu = drop_down_menus . Menus ( "location" , self , self . loc_grid , None )
# need to find max / min lat / lon here IF they were added in the previous grid
sites = self . er_magic_data . sites
location_lat_lon = self . er_magic_data . get_min_max_lat_lon ( self . er_magic_data . locations )
col_names = ( 'location_begin_lat' , 'location_end_lat' , 'location_begin_lon' , 'location_end_lon' )
col_inds = [ self . grid . col_labels . index ( name ) for name in col_names ]
col_info = list ( zip ( col_names , col_inds ) )
for loc in self . er_magic_data . locations :
row_ind = self . grid . row_labels . index ( loc . name )
for col_name , col_ind in col_info :
info = location_lat_lon [ loc . name ] [ col_name ]
self . grid . SetCellValue ( row_ind , col_ind , str ( info ) )
# # # Create Buttons # # #
hbox_one = wx . BoxSizer ( wx . HORIZONTAL )
self . helpButton = wx . Button ( self . panel , label = "Help" )
self . Bind ( wx . EVT_BUTTON , lambda event : self . on_helpButton ( event , "ErMagicLocationHelp.html" ) , self . helpButton )
hbox_one . Add ( self . helpButton )
hboxok = wx . BoxSizer ( wx . HORIZONTAL )
self . saveButton = wx . Button ( self . panel , id = - 1 , label = 'Save' )
self . Bind ( wx . EVT_BUTTON , lambda event : self . on_saveButton ( event , self . loc_grid ) , self . saveButton )
self . cancelButton = wx . Button ( self . panel , wx . ID_CANCEL , '&Cancel' )
self . Bind ( wx . EVT_BUTTON , self . on_cancelButton , self . cancelButton )
self . continueButton = wx . Button ( self . panel , id = - 1 , label = 'Save and continue' )
self . Bind ( wx . EVT_BUTTON , lambda event : self . on_continueButton ( event , self . loc_grid , next_dia = self . InitAgeCheck ) , self . continueButton )
self . backButton = wx . Button ( self . panel , wx . ID_ANY , "&Back" )
previous_dia = self . InitSampCheck
self . Bind ( wx . EVT_BUTTON , lambda event : self . on_backButton ( event , previous_dia , current_dia = self . InitLocCheck ) , self . backButton )
hboxok . Add ( self . saveButton , flag = wx . RIGHT , border = 10 )
hboxok . Add ( self . cancelButton , flag = wx . RIGHT , border = 10 )
hboxok . Add ( self . continueButton , flag = wx . RIGHT , border = 10 )
hboxok . Add ( self . backButton )
hboxgrid = pw . hbox_grid ( self . panel , self . onDeleteRow , 'location' , self . grid )
self . deleteRowButton = hboxgrid . deleteRowButton
self . Bind ( wx . grid . EVT_GRID_LABEL_LEFT_CLICK , self . onLeftClickLabel , self . grid )
# # # Make Containers # # #
vbox = wx . BoxSizer ( wx . VERTICAL )
vbox . Add ( label , flag = wx . ALIGN_CENTER | wx . TOP | wx . BOTTOM , border = 20 )
vbox . Add ( hbox_one , flag = wx . BOTTOM | wx . ALIGN_LEFT , border = 10 )
vbox . Add ( hboxok , flag = wx . BOTTOM | wx . ALIGN_LEFT , border = 10 )
vbox . Add ( hboxgrid , flag = wx . BOTTOM | wx . ALIGN_LEFT , border = 10 )
vbox . Add ( self . loc_grid , flag = wx . TOP | wx . BOTTOM , border = 10 )
vbox . AddSpacer ( 20 )
self . hbox_all = wx . BoxSizer ( wx . HORIZONTAL )
self . hbox_all . AddSpacer ( 20 )
self . hbox_all . Add ( vbox )
self . hbox_all . AddSpacer ( 20 )
self . panel . SetSizer ( self . hbox_all )
# if sys . platform in [ ' win32 ' , ' win64 ' ] :
# self . panel . SetScrollbars ( 20 , 20 , 50 , 50)
self . hbox_all . Fit ( self )
self . Centre ( )
self . Show ( )
self . Hide ( )
self . Show ( )
|
def substitute_url_with_ref ( self , txt ) :
"""In the string ` txt ` , replace links to online docs with
corresponding sphinx cross - references ."""
|
# Find links
mi = re . finditer ( r'\[([^\]]+|\[[^\]]+\])\]\(([^\)]+)\)' , txt )
if mi : # Iterate over match objects in iterator returned by
# re . finditer
for mo in mi : # Get components of current match : full matching text ,
# the link label , and the postfix to the base url in the
# link url
mtxt = mo . group ( 0 )
lbl = mo . group ( 1 )
url = mo . group ( 2 )
# Try to look up the current link url . Issue a warning if
# the lookup fails , and do the substitution if it succeeds .
try :
ref = self . get_sphinx_ref ( url , lbl )
except KeyError as ex :
print ( 'Warning: %s' % ex . args [ 0 ] )
else :
txt = re . sub ( re . escape ( mtxt ) , ref , txt )
return txt
|
def fetch ( self , sql , * args , ** kwargs ) :
"""Executes an SQL SELECT query and returns the first row or ` None ` .
: param sql : statement to execute
: param args : parameters iterable
: param kwargs : parameters iterable
: return : the first row or ` None `"""
|
with self . locked ( ) as conn :
return conn . query ( sql , * args , ** kwargs ) . fetch ( )
|
def get_dev_asset_details ( ipaddress , auth , url ) :
"""Takes in ipaddress as input to fetch device assett details from HP IMC RESTFUL API
: param ipaddress : IP address of the device you wish to gather the asset details
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: return : object of type list containing the device asset details , with each asset contained in a dictionary
: rtype : list
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . netassets import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > single _ asset = get _ dev _ asset _ details ( ' 10.101.0.1 ' , auth . creds , auth . url )
> > > assert type ( single _ asset ) is list
> > > assert ' name ' in single _ asset [ 0]"""
|
get_dev_asset_url = "/imcrs/netasset/asset?assetDevice.ip=" + str ( ipaddress )
f_url = url + get_dev_asset_url
# creates the URL using the payload variable as the contents
r = requests . get ( f_url , auth = auth , headers = HEADERS )
# r . status _ code
try :
if r . status_code == 200 :
dev_asset_info = ( json . loads ( r . text ) )
if len ( dev_asset_info ) > 0 :
dev_asset_info = dev_asset_info [ 'netAsset' ]
if type ( dev_asset_info ) == dict :
dev_asset_info = [ dev_asset_info ]
if type ( dev_asset_info ) == list :
dev_asset_info [ : ] = [ dev for dev in dev_asset_info if dev . get ( 'deviceIp' ) == ipaddress ]
return dev_asset_info
except requests . exceptions . RequestException as e :
return "Error:\n" + str ( e ) + ' get_dev_asset_details: An Error has occured'
|
def on_mismatch ( self , pair ) :
"""Called for pairs that don ' t match ` match ` and ` exclude ` filters .
If - - delete - unmatched is on , remove the remote resource ."""
|
remote_entry = pair . remote
if self . options . get ( "delete_unmatched" ) and remote_entry :
self . _log_action ( "delete" , "unmatched" , ">" , remote_entry )
if remote_entry . is_dir ( ) :
self . _remove_dir ( remote_entry )
else :
self . _remove_file ( remote_entry )
else :
self . _log_action ( "skip" , "unmatched" , "-" , pair . any_entry , min_level = 4 )
|
def resolution_profile ( self , graph , partition_type , resolution_range , weights = None , bisect_func = lambda p : p . bisect_value ( ) , min_diff_bisect_value = 1 , min_diff_resolution = 1e-3 , linear_bisection = False , number_iterations = 1 , ** kwargs ) :
"""Use bisectioning on the resolution parameter in order to construct a
resolution profile .
Parameters
graph
The graph for which to construct a resolution profile .
partition _ type
The type of : class : ` ~ VertexPartition . MutableVertexPartition ` used
to find a partition ( must support resolution parameters obviously ) .
resolution _ range
The range of resolution values that we would like to scan .
weights
If provided , indicates the edge attribute to use as a weight .
Returns
list of : class : ` ~ VertexPartition . MutableVertexPartition `
A list of partitions for different resolutions .
Other Parameters
bisect _ func
The function used for bisectioning . For the methods currently
implemented , this should usually not be altered .
min _ diff _ bisect _ value
The difference in the value returned by the bisect _ func below which the
bisectioning stops ( i . e . by default , a difference of a single edge does
not trigger further bisectioning ) .
min _ diff _ resolution
The difference in resolution below which the bisectioning stops . For
positive differences , the logarithmic difference is used by default , i . e .
` ` diff = log ( res _ 1 ) - log ( res _ 2 ) = log ( res _ 1 / res _ 2 ) ` ` , for which ` ` diff >
min _ diff _ resolution ` ` to continue bisectioning . Set the linear _ bisection
to true in order to use only linear bisectioning ( in the case of negative
resolution parameters for example , which can happen with negative
weights ) .
linear _ bisection
Whether the bisectioning will be done on a linear or on a logarithmic
basis ( if possible ) .
number _ iterations
Indicates the number of iterations of the algorithm to run . If negative
( or zero ) the algorithm is run until a stable iteration .
Examples
> > > G = ig . Graph . Famous ( ' Zachary ' )
> > > optimiser = la . Optimiser ( )
> > > profile = optimiser . resolution _ profile ( G , la . CPMVertexPartition ,
. . . resolution _ range = ( 0,1 ) )"""
|
# Helper function for cleaning values to be a stepwise function
def clean_stepwise ( bisect_values ) : # Check best partition for each resolution parameter
for res , bisect in bisect_values . iteritems ( ) :
best_bisect = bisect
best_quality = bisect . partition . quality ( res )
for res2 , bisect2 in bisect_values . iteritems ( ) :
if bisect2 . partition . quality ( res ) > best_quality :
best_bisect = bisect2
best_quality = bisect2 . partition . quality ( res )
if best_bisect != bisect :
bisect_values [ res ] = best_bisect
# We only need to keep the changes in the bisection values
bisect_list = sorted ( [ ( res , part . bisect_value ) for res , part in bisect_values . iteritems ( ) ] , key = lambda x : x [ 0 ] )
for ( res1 , v1 ) , ( res2 , v2 ) in zip ( bisect_list , bisect_list [ 1 : ] ) : # If two consecutive bisection values are the same , remove the second
# resolution parameter
if v1 == v2 :
del bisect_values [ res2 ]
for res , bisect in bisect_values . iteritems ( ) :
bisect . partition . resolution_parameter = res
# We assume here that the bisection values are
# monotonically decreasing with increasing resolution
# parameter values .
def ensure_monotonicity ( bisect_values , new_res ) : # First check if this partition improves on any other partition
for res , bisect_part in bisect_values . iteritems ( ) :
if bisect_values [ new_res ] . partition . quality ( res ) > bisect_part . partition . quality ( res ) :
bisect_values [ res ] = bisect_values [ new_res ]
# Then check what is best partition for the new _ res
current_quality = bisect_values [ new_res ] . partition . quality ( new_res )
best_res = new_res
for res , bisect_part in bisect_values . iteritems ( ) :
if bisect_part . partition . quality ( new_res ) > current_quality :
best_res = new_res
bisect_values [ new_res ] = bisect_values [ best_res ]
def find_partition ( self , graph , partition_type , weights = None , ** kwargs ) :
partition = partition_type ( graph , weights = weights , ** kwargs )
n_itr = 0
while self . optimise_partition ( partition ) > 0 and ( n_itr < number_iterations or number_iterations <= 0 ) :
n_itr += 1
return partition
assert issubclass ( partition_type , LinearResolutionParameterVertexPartition ) , "Bisectioning only works on partitions with a linear resolution parameter."
# Start actual bisectioning
bisect_values = { }
stack_res_range = [ ]
# Push first range onto the stack
stack_res_range . append ( resolution_range )
# Make sure the bisection values are calculated
# The namedtuple we will use in the bisection function
BisectPartition = namedtuple ( 'BisectPartition' , [ 'partition' , 'bisect_value' ] )
partition = find_partition ( self , graph = graph , partition_type = partition_type , weights = weights , resolution_parameter = resolution_range [ 0 ] , ** kwargs )
bisect_values [ resolution_range [ 0 ] ] = BisectPartition ( partition = partition , bisect_value = bisect_func ( partition ) )
partition = find_partition ( self , graph = graph , partition_type = partition_type , weights = weights , resolution_parameter = resolution_range [ 1 ] , ** kwargs )
bisect_values [ resolution_range [ 1 ] ] = BisectPartition ( partition = partition , bisect_value = bisect_func ( partition ) )
# While stack of ranges not yet empty
while stack_res_range : # Get the current range from the stack
current_range = stack_res_range . pop ( )
# Get the difference in bisection values
diff_bisect_value = abs ( bisect_values [ current_range [ 0 ] ] . bisect_value - bisect_values [ current_range [ 1 ] ] . bisect_value )
# Get the difference in resolution parameter ( in log space if 0 is not in
# the interval ( assuming only non - negative resolution parameters ) .
if current_range [ 0 ] > 0 and current_range [ 1 ] > 0 and not linear_bisection :
diff_resolution = log ( current_range [ 1 ] / current_range [ 0 ] )
else :
diff_resolution = abs ( current_range [ 1 ] - current_range [ 0 ] )
# Check if we still want to scan a smaller interval
# If we would like to bisect this interval
if diff_bisect_value > min_diff_bisect_value and diff_resolution > min_diff_resolution : # Determine new resolution value
if current_range [ 0 ] > 0 and current_range [ 1 ] > 0 and not linear_bisection :
new_res = sqrt ( current_range [ 1 ] * current_range [ 0 ] )
else :
new_res = sum ( current_range ) / 2.0
# Bisect left ( push on stack )
stack_res_range . append ( ( current_range [ 0 ] , new_res ) )
# Bisect right ( push on stack )
stack_res_range . append ( ( new_res , current_range [ 1 ] ) )
# If we haven ' t scanned this resolution value yet ,
# do so now
if not bisect_values . has_key ( new_res ) :
partition = find_partition ( self , graph , partition_type = partition_type , weights = weights , resolution_parameter = new_res , ** kwargs )
bisect_values [ new_res ] = BisectPartition ( partition = partition , bisect_value = bisect_func ( partition ) )
# Because of stochastic differences in different runs , the monotonicity
# of the bisection values might be violated , so check for any
# inconsistencies
ensure_monotonicity ( bisect_values , new_res )
# Ensure we only keep those resolution values for which
# the bisection values actually changed , instead of all of them
clean_stepwise ( bisect_values )
# Use an ordered dict so that when iterating over it , the results appear in
# increasing order based on the resolution value .
return sorted ( ( bisect . partition for res , bisect in bisect_values . iteritems ( ) ) , key = lambda x : x . resolution_parameter )
|
def get_recipes_in_node ( node ) :
"""Gets the name of all recipes present in the run _ list of a node"""
|
recipes = [ ]
for elem in node . get ( 'run_list' , [ ] ) :
if elem . startswith ( "recipe" ) :
recipe = elem . split ( '[' ) [ 1 ] . split ( ']' ) [ 0 ]
recipes . append ( recipe )
return recipes
|
def new ( cls , num = None , * args , ** kwargs ) :
"""Create a new main project
Parameters
num : int
The number of the project
% ( Project . parameters . no _ num ) s
Returns
Project
The with the given ` num ` ( if it does not already exist , it is
created )
See Also
scp : Sets the current project
gcp : Returns the current project"""
|
project = cls ( * args , num = num , ** kwargs )
scp ( project )
return project
|
def _parse_perfdata ( self , s ) :
"""Parse performance data from a perfdata string"""
|
metrics = [ ]
counters = re . findall ( self . TOKENIZER_RE , s )
if counters is None :
self . log . warning ( "Failed to parse performance data: {s}" . format ( s = s ) )
return metrics
for ( key , value , uom , warn , crit , min , max ) in counters :
try :
norm_value = self . _normalize_to_unit ( float ( value ) , uom )
metrics . append ( ( key , norm_value ) )
except ValueError :
self . log . warning ( "Couldn't convert value '{value}' to float" . format ( value = value ) )
return metrics
|
def update ( self , alert_condition_id , policy_id , type = None , condition_scope = None , name = None , entities = None , metric = None , runbook_url = None , terms = None , user_defined = None , enabled = None ) :
"""Updates any of the optional parameters of the alert condition
: type alert _ condition _ id : int
: param alert _ condition _ id : Alerts condition id to update
: type policy _ id : int
: param policy _ id : Alert policy id where target alert condition belongs to
: type type : str
: param type : The type of the condition , can be apm _ app _ metric ,
apm _ kt _ metric , servers _ metric , browser _ metric , mobile _ metric
: type condition _ scope : str
: param condition _ scope : The scope of the condition , can be instance or application
: type name : str
: param name : The name of the server
: type entities : list [ str ]
: param name : entity ids to which the alert condition is applied
: type : str
: param metric : The target metric
: type : str
: param runbook _ url : The url of the runbook
: type terms : list [ hash ]
: param terms : list of hashes containing threshold config for the alert
: type user _ defined : hash
: param user _ defined : hash containing threshold user _ defined for the alert
required if metric is set to user _ defined
: type enabled : bool
: param enabled : Whether to enable that alert condition
: rtype : dict
: return : The JSON response of the API
: raises : This will raise a
: class : ` NewRelicAPIServerException < newrelic _ api . exceptions . NoEntityException > `
if target alert condition is not included in target policy
: raises : This will raise a
: class : ` ConfigurationException < newrelic _ api . exceptions . ConfigurationException > `
if metric is set as user _ defined but user _ defined config is not passed
" condition " : {
" id " : " integer " ,
" type " : " string " ,
" condition _ scope " : " string " ,
" name " : " string " ,
" enabled " : " boolean " ,
" entities " : [
" integer "
" metric " : " string " ,
" runbook _ url " : " string " ,
" terms " : [
" duration " : " string " ,
" operator " : " string " ,
" priority " : " string " ,
" threshold " : " string " ,
" time _ function " : " string "
" user _ defined " : {
" metric " : " string " ,
" value _ function " : " string " """
|
conditions_dict = self . list ( policy_id )
target_condition = None
for condition in conditions_dict [ 'conditions' ] :
if int ( condition [ 'id' ] ) == alert_condition_id :
target_condition = condition
break
if target_condition is None :
raise NoEntityException ( 'Target alert condition is not included in that policy.' 'policy_id: {}, alert_condition_id {}' . format ( policy_id , alert_condition_id ) )
data = { 'condition' : { 'type' : type or target_condition [ 'type' ] , 'name' : name or target_condition [ 'name' ] , 'entities' : entities or target_condition [ 'entities' ] , 'condition_scope' : condition_scope or target_condition [ 'condition_scope' ] , 'terms' : terms or target_condition [ 'terms' ] , 'metric' : metric or target_condition [ 'metric' ] , 'runbook_url' : runbook_url or target_condition [ 'runbook_url' ] , } }
if enabled is not None :
data [ 'condition' ] [ 'enabled' ] = str ( enabled ) . lower ( )
if data [ 'condition' ] [ 'metric' ] == 'user_defined' :
if user_defined :
data [ 'condition' ] [ 'user_defined' ] = user_defined
elif 'user_defined' in target_condition :
data [ 'condition' ] [ 'user_defined' ] = target_condition [ 'user_defined' ]
else :
raise ConfigurationException ( 'Metric is set as user_defined but no user_defined config specified' )
return self . _put ( url = '{0}alerts_conditions/{1}.json' . format ( self . URL , alert_condition_id ) , headers = self . headers , data = data )
|
def removePhenotypeAssociationSet ( self , phenotypeAssociationSet ) :
"""Remove a phenotype association set from the repo"""
|
q = models . Phenotypeassociationset . delete ( ) . where ( models . Phenotypeassociationset . id == phenotypeAssociationSet . getId ( ) )
q . execute ( )
|
def view_vector ( self , vector , viewup = None ) :
"""Point the camera in the direction of the given vector"""
|
focal_pt = self . center
if viewup is None :
viewup = rcParams [ 'camera' ] [ 'viewup' ]
cpos = [ vector + np . array ( focal_pt ) , focal_pt , viewup ]
self . camera_position = cpos
return self . reset_camera ( )
|
def coinc ( self , s0 , s1 , slide , step ) : # pylint : disable = unused - argument
"""Calculate the final coinc ranking statistic"""
|
# Approximate log likelihood ratio by summing single - ifo negative
# log noise likelihoods
loglr = - s0 - s1
# add squares of threshold stat values via idealized Gaussian formula
threshes = [ self . fits_by_tid [ i ] [ 'thresh' ] for i in self . ifos ]
loglr += sum ( [ t ** 2. / 2. for t in threshes ] )
# convert back to a coinc - SNR - like statistic
# via log likelihood ratio \ propto rho _ c ^ 2 / 2
return ( 2. * loglr ) ** 0.5
|
def threshold_monitor_hidden_threshold_monitor_Memory_high_limit ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
threshold_monitor_hidden = ET . SubElement ( config , "threshold-monitor-hidden" , xmlns = "urn:brocade.com:mgmt:brocade-threshold-monitor" )
threshold_monitor = ET . SubElement ( threshold_monitor_hidden , "threshold-monitor" )
Memory = ET . SubElement ( threshold_monitor , "Memory" )
high_limit = ET . SubElement ( Memory , "high-limit" )
high_limit . text = kwargs . pop ( 'high_limit' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def cbc_mac ( key , data ) :
"""AES - 265 - CBC - MAC on the data used in ProcessData .
Does not use padding ( data has to be already padded ) .
: param key :
: param data :
: return :"""
|
engine = AES . new ( key , AES . MODE_CBC , get_zero_vector ( 16 ) )
return engine . encrypt ( data ) [ - 16 : ]
|
def parse_grasp_gwas ( fn ) :
"""Read GRASP database and filter for unique hits .
Parameters
fn : str
Path to ( subset of ) GRASP database .
Returns
df : pandas . DataFrame
Pandas dataframe with de - duplicated , significant SNPs . The index is of
the form chrom : pos where pos is the one - based position of the SNP . The
columns are chrom , start , end , rsid , and pvalue . rsid may be empty or
not actually an RSID . chrom , start , end make a zero - based bed file with
the SNP coordinates ."""
|
df = pd . read_table ( fn , low_memory = False )
df = df [ df . Pvalue < 1e-5 ]
df = df . sort ( columns = [ 'chr(hg19)' , 'pos(hg19)' , 'Pvalue' ] )
df = df . drop_duplicates ( subset = [ 'chr(hg19)' , 'pos(hg19)' ] )
df = df [ df . Pvalue < 1e-5 ]
df [ 'chrom' ] = 'chr' + df [ 'chr(hg19)' ] . astype ( str )
df [ 'end' ] = df [ 'pos(hg19)' ]
df [ 'start' ] = df . end - 1
df [ 'rsid' ] = df [ 'SNPid(in paper)' ]
df [ 'pvalue' ] = df [ 'Pvalue' ]
df = df [ [ 'chrom' , 'start' , 'end' , 'rsid' , 'pvalue' ] ]
df . index = df [ 'chrom' ] . astype ( str ) + ':' + df [ 'end' ] . astype ( str )
return df
|
def filterStack ( self , filters ) :
"""Filter the ObjectGraph in - place by removing all edges to nodes that
do not match every filter in the given filter list
Returns a tuple containing the number of : ( nodes _ visited , nodes _ removed , nodes _ orphaned )"""
|
visited , removes , orphans = filter_stack ( self . graph , self , filters )
for last_good , tail in orphans :
self . graph . add_edge ( last_good , tail , edge_data = 'orphan' )
for node in removes :
self . graph . hide_node ( node )
return len ( visited ) - 1 , len ( removes ) , len ( orphans )
|
def _is_path ( instance , attribute , s , exists = True ) :
"Validator for path - yness"
|
if not s : # allow False as a default
return
if exists :
if os . path . exists ( s ) :
return
else :
raise OSError ( "path does not exist" )
else : # how do we tell if it ' s a path if it doesn ' t exist ?
raise TypeError ( "Not a path?" )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.