signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def write ( self , data , offset = 0 , write_through = False , unbuffered = False , wait = True , send = True ) :
"""Writes data to an opened file .
Supports out of band send function , call this function with send = False
to return a tuple of ( SMBWriteRequest , receive _ func ) instead of
sending the the request and waiting for the response . The receive _ func
can be used to get the response from the server by passing in the
Request that was used to sent it out of band .
: param data : The bytes data to write .
: param offset : The offset in the file to write the bytes at
: param write _ through : Whether written data is persisted to the
underlying storage , not valid for SMB 2.0.2.
: param unbuffered : Whether to the server should cache the write data at
intermediate layers , only value for SMB 3.0.2 or newer
: param wait : If send = True , whether to wait for a response if
STATUS _ PENDING was received from the server or fail .
: param send : Whether to send the request in the same call or return the
message to the caller and the unpack function
: return : The number of bytes written"""
|
data_len = len ( data )
if data_len > self . connection . max_write_size :
raise SMBException ( "The requested write length %d is greater than " "the maximum negotiated write size %d" % ( data_len , self . connection . max_write_size ) )
write = SMB2WriteRequest ( )
write [ 'length' ] = len ( data )
write [ 'offset' ] = offset
write [ 'file_id' ] = self . file_id
write [ 'buffer' ] = data
if write_through :
if self . connection . dialect < Dialects . SMB_2_1_0 :
raise SMBUnsupportedFeature ( self . connection . dialect , Dialects . SMB_2_1_0 , "SMB2_WRITEFLAG_WRITE_THROUGH" , True )
write [ 'flags' ] . set_flag ( WriteFlags . SMB2_WRITEFLAG_WRITE_THROUGH )
if unbuffered :
if self . connection . dialect < Dialects . SMB_3_0_2 :
raise SMBUnsupportedFeature ( self . connection . dialect , Dialects . SMB_3_0_2 , "SMB2_WRITEFLAG_WRITE_UNBUFFERED" , True )
write [ 'flags' ] . set_flag ( WriteFlags . SMB2_WRITEFLAG_WRITE_UNBUFFERED )
if not send :
return write , self . _write_response
log . info ( "Session: %s, Tree Connect: %s - sending SMB2 Write Request " "for file %s" % ( self . tree_connect . session . username , self . tree_connect . share_name , self . file_name ) )
log . debug ( str ( write ) )
request = self . connection . send ( write , self . tree_connect . session . session_id , self . tree_connect . tree_connect_id )
return self . _write_response ( request , wait )
|
def decode_body ( cls , header , f ) :
"""Generates a ` MqttUnsuback ` packet given a
` MqttFixedHeader ` . This method asserts that header . packet _ type
is ` unsuback ` .
Parameters
header : MqttFixedHeader
f : file
Object with a read method .
Raises
DecodeError
When there are extra bytes at the end of the packet .
Returns
int
Number of bytes consumed from ` ` f ` ` .
MqttUnsuback
Object extracted from ` ` f ` ` ."""
|
assert header . packet_type == MqttControlPacketType . unsuback
decoder = mqtt_io . FileDecoder ( mqtt_io . LimitReader ( f , header . remaining_len ) )
packet_id , = decoder . unpack ( mqtt_io . FIELD_PACKET_ID )
if header . remaining_len != decoder . num_bytes_consumed :
raise DecodeError ( 'Extra bytes at end of packet.' )
return decoder . num_bytes_consumed , MqttUnsuback ( packet_id )
|
def need_update ( a , b ) :
"""Check if file a is newer than file b and decide whether or not to update
file b . Can generalize to two lists ."""
|
a = listify ( a )
b = listify ( b )
return any ( ( not op . exists ( x ) ) for x in b ) or all ( ( os . stat ( x ) . st_size == 0 for x in b ) ) or any ( is_newer_file ( x , y ) for x in a for y in b )
|
def get_urls ( self ) :
"""Adds a url to move nodes to this admin"""
|
urls = super ( TreeAdmin , self ) . get_urls ( )
if django . VERSION < ( 1 , 10 ) :
from django . views . i18n import javascript_catalog
jsi18n_url = url ( r'^jsi18n/$' , javascript_catalog , { 'packages' : ( 'treebeard' , ) } )
else :
from django . views . i18n import JavaScriptCatalog
jsi18n_url = url ( r'^jsi18n/$' , JavaScriptCatalog . as_view ( packages = [ 'treebeard' ] ) , name = 'javascript-catalog' )
new_urls = [ url ( '^move/$' , self . admin_site . admin_view ( self . move_node ) , ) , jsi18n_url , ]
return new_urls + urls
|
def _load_build ( self ) :
"""See ` pickle . py ` in Python ' s source code ."""
|
# if the ctor . function ( penultimate on the stack ) is the ` Ref ` class . . .
if isinstance ( self . stack [ - 2 ] , Ref ) : # Ref . _ _ setstate _ _ will know it ' s a remote ref if the state is a tuple
self . stack [ - 1 ] = ( self . stack [ - 1 ] , self . node )
self . load_build ( )
# continue with the default implementation
# detect our own refs sent back to us
ref = self . stack [ - 1 ]
if ref . uri . node == self . node . nid :
ref . is_local = True
ref . _cell = self . node . guardian . lookup_cell ( ref . uri )
# dbg ( ( " dead " if not ref . _ cell else " " ) + " local ref detected " )
del ref . node
# local refs never need access to the node
else : # pragma : no cover
self . load_build ( )
|
def _bind ( self , _descriptor ) :
"""Bind a ResponseObject to a given action descriptor . This
updates the default HTTP response code and selects the
appropriate content type and serializer for the response ."""
|
# If the method has a default code , use it
self . _defcode = getattr ( _descriptor . method , '_wsgi_code' , 200 )
# Set up content type and serializer
self . content_type , self . serializer = _descriptor . serializer ( self . req )
|
def search_debit ( ) :
"""Get one to ten debit ( s ) for a single User .
parameters :
- name : searchcd
in : body
description : The Debit ( s ) you ' d like to get .
required : false
schema :
$ ref : ' # / definitions / SearchCD '
responses :
'200 ' :
description : the User ' s debit ( s )
schema :
items :
$ ref : ' # / definitions / Debit '
type : array
default :
description : unexpected error
schema :
$ ref : ' # / definitions / errorModel '
security :
- kid : [ ]
- typ : [ ]
- alg : [ ]
operationId : searchDebits"""
|
sid = request . jws_payload [ 'data' ] . get ( 'id' )
address = request . jws_payload [ 'data' ] . get ( 'address' )
currency = request . jws_payload [ 'data' ] . get ( 'currency' )
network = request . jws_payload [ 'data' ] . get ( 'network' )
# reference = request . jws _ payload [ ' data ' ] . get ( ' reference ' )
ref_id = request . jws_payload [ 'data' ] . get ( 'ref_id' )
page = request . jws_payload [ 'data' ] . get ( 'page' ) or 0
debsq = ses . query ( wm . Debit ) . filter ( wm . Debit . user_id == current_user . id )
if not debsq :
return None
if sid :
debsq = debsq . filter ( wm . Debit . id == sid )
if address :
debsq = debsq . filter ( wm . Debit . address == address )
if currency :
debsq = debsq . filter ( wm . Debit . currency == currency )
if network :
debsq = debsq . filter ( wm . Debit . network == network )
# if reference :
# debsq = debsq . filter ( wm . Debit . reference = = reference )
if ref_id :
debsq = debsq . filter ( wm . Debit . ref_id == ref_id )
debsq = debsq . order_by ( wm . Debit . time . desc ( ) ) . limit ( 10 )
if page and isinstance ( page , int ) :
debsq = debsq . offset ( page * 10 )
debits = [ json . loads ( jsonify2 ( d , 'Debit' ) ) for d in debsq ]
response = current_app . bitjws . create_response ( debits )
ses . close ( )
return response
|
def cds_length_of_associated_transcript ( effect ) :
"""Length of coding sequence of transcript associated with effect ,
if there is one ( otherwise return 0 ) ."""
|
return apply_to_transcript_if_exists ( effect = effect , fn = lambda t : len ( t . coding_sequence ) if ( t . complete and t . coding_sequence ) else 0 , default = 0 )
|
def add_loaded_callback ( self , callback ) :
"""Add a callback to be run when the ALDB load is complete ."""
|
if callback not in self . _cb_aldb_loaded :
self . _cb_aldb_loaded . append ( callback )
|
def autofit ( ts , maxp = 5 , maxd = 2 , maxq = 5 , sc = None ) :
"""Utility function to help in fitting an automatically selected ARIMA model based on approximate
Akaike Information Criterion ( AIC ) values . The model search is based on the heuristic
developed by Hyndman and Khandakar ( 2008 ) and described in [ [ http : / / www . jstatsoft
. org / v27 / i03 / paper ] ] . In contrast to the algorithm in the paper , we use an approximation to
the AIC , rather than an exact value . Note that if the maximum differencing order provided
does not suffice to induce stationarity , the function returns a failure , with the appropriate
message . Additionally , note that the heuristic only considers models that have parameters
satisfying the stationarity / invertibility constraints . Finally , note that our algorithm is
slightly more lenient than the original heuristic . For example , the original heuristic
rejects models with parameters " close " to violating stationarity / invertibility . We only
reject those that actually violate it .
This functionality is even less mature than some of the other model fitting functions here , so
use it with caution .
Parameters
ts :
time series to which to automatically fit an ARIMA model as a Numpy array
maxP :
limit for the AR order
maxD :
limit for differencing order
maxQ :
limit for the MA order
sc :
The SparkContext , required .
returns an ARIMAModel"""
|
assert sc != None , "Missing SparkContext"
jmodel = sc . _jvm . com . cloudera . sparkts . models . ARIMA . autoFit ( _py2java ( sc , Vectors . dense ( ts ) ) , maxp , maxd , maxq )
return ARIMAModel ( jmodel = jmodel , sc = sc )
|
def _read_para_nat_traversal_mode ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP NAT _ TRAVERSAL _ MODE parameter .
Structure of HIP NAT _ TRAVERSAL _ MODE parameter [ RFC 5770 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Reserved | Mode ID # 1 |
| Mode ID # 2 | Mode ID # 3 |
| Mode ID # n | Padding |
Octets Bits Name Description
0 0 nat _ traversal _ mode . type Parameter Type
1 15 nat _ traversal _ mode . critical Critical Bit
2 16 nat _ traversal _ mode . length Length of Contents
4 32 - Reserved
6 48 nat _ traversal _ mode . id Mode ID
? ? - Padding"""
|
if clen % 2 != 0 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_resv = self . _read_fileng ( 2 )
_mdid = list ( )
for _ in range ( ( clen - 2 ) // 2 ) :
_mdid . append ( _MODE_ID . get ( self . _read_unpack ( 2 ) , 'Unassigned' ) )
nat_traversal_mode = dict ( type = desc , critical = cbit , length = clen , id = _mdid , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return nat_traversal_mode
|
def setup ( self , ** kwargs ) :
'''This is called during production de - trending , prior to
calling the : py : obj : ` Detrender . run ( ) ` method .
: param tuple cdpp _ range : If : py : obj : ` parent _ model ` is set , neighbors are selected only if their de - trended CDPPs fall within this range . Default ` None `
: param tuple mag _ range : Only select neighbors whose magnitudes are within this range . Default ( 11 . , 13 . )
: param int neighbors : The number of neighboring stars to use in the de - trending . The higher this number , the more signals there are and hence the more de - trending information there is . However , the neighboring star signals are regularized together with the target ' s signals , so adding too many neighbors will inevitably reduce the contribution of the target ' s own signals , which may reduce performance . Default ` 10 `
: param str parent _ model : By default , : py : class : ` nPLD ` is run in stand - alone mode . The neighbor signals are computed directly from their TPFs , so there is no need to have run * PLD * on them beforehand . However , if : py : obj : ` parent _ model ` is set , : py : class : ` nPLD ` will use information from the : py : obj : ` parent _ model ` model of each neighboring star when de - trending . This is particularly useful for identifying outliers in the neighbor signals and preventing them from polluting the current target . Setting : py : obj : ` parent _ model ` to : py : class : ` rPLD ` , for instance , will use the outlier information in the : py : class : ` rPLD ` model of the neighbors ( this must have been run ahead of time ) . Note , however , that tests with * K2 * data show that including outliers in the neighbor signals actually * improves * the performance , since many of these outliers are associated with events such as thruster firings and are present in all light curves , and therefore * help * in the de - trending . Default ` None `
. . note : : Optionally , the : py : obj : ` neighbors ` may be specified directly as a list of target IDs to use . In this case , users may also provide a list of : py : class : ` everest . utils . DataContainer ` instances corresponding to each of the neighbors in the : py : obj : ` neighbors _ data ` kwarg .'''
|
# Get neighbors
self . parent_model = kwargs . get ( 'parent_model' , None )
neighbors = kwargs . get ( 'neighbors' , 10 )
neighbors_data = kwargs . get ( 'neighbors_data' , None )
if hasattr ( neighbors , '__len__' ) :
self . neighbors = neighbors
else :
num_neighbors = neighbors
self . neighbors = self . _mission . GetNeighbors ( self . ID , season = self . season , cadence = self . cadence , model = self . parent_model , neighbors = num_neighbors , mag_range = kwargs . get ( 'mag_range' , ( 11. , 13. ) ) , cdpp_range = kwargs . get ( 'cdpp_range' , None ) , aperture_name = self . aperture_name )
if len ( self . neighbors ) :
if len ( self . neighbors ) < num_neighbors :
log . warn ( "%d neighbors requested, but only %d found." % ( num_neighbors , len ( self . neighbors ) ) )
elif num_neighbors > 0 :
log . warn ( "No neighbors found! Running standard PLD..." )
for n , neighbor in enumerate ( self . neighbors ) :
log . info ( "Loading data for neighboring target %d..." % neighbor )
if neighbors_data is not None :
data = neighbors_data [ n ]
data . mask = np . array ( list ( set ( np . concatenate ( [ data . badmask , data . nanmask ] ) ) ) , dtype = int )
data . fraw = np . sum ( data . fpix , axis = 1 )
elif self . parent_model is not None and self . cadence == 'lc' : # We load the ` parent ` model . The advantage here is
# that outliers have properly been identified and masked .
# I haven ' t tested this on short
# cadence data , so I ' m going to just forbid it . . .
data = eval ( self . parent_model ) ( neighbor , mission = self . mission , is_parent = True )
else : # We load the data straight from the TPF . Much quicker ,
# since no model must be run in advance . Downside is we
# don ' t know where the outliers are . But based
# on tests with K2 data , the de - trending is actually
# * better * if the outliers are
# included ! These are mostly thruster fire events and other
# artifacts common to
# all the stars , so it makes sense that we might want
# to keep them in the design matrix .
data = self . _mission . GetData ( neighbor , season = self . season , clobber = self . clobber_tpf , cadence = self . cadence , aperture_name = self . aperture_name , saturated_aperture_name = self . saturated_aperture_name , max_pixels = self . max_pixels , saturation_tolerance = self . saturation_tolerance , get_hires = False , get_nearby = False )
if data is None :
raise Exception ( "Unable to retrieve data for neighboring target." )
data . mask = np . array ( list ( set ( np . concatenate ( [ data . badmask , data . nanmask ] ) ) ) , dtype = int )
data . fraw = np . sum ( data . fpix , axis = 1 )
# Compute the linear PLD vectors and interpolate over
# outliers , NaNs and bad timestamps
X1 = data . fpix / data . fraw . reshape ( - 1 , 1 )
X1 = Interpolate ( data . time , data . mask , X1 )
if self . X1N is None :
self . X1N = np . array ( X1 )
else :
self . X1N = np . hstack ( [ self . X1N , X1 ] )
del X1
del data
|
def delete_all_volumes ( self ) :
"""Remove all the volumes .
Only the manager nodes can delete a volume"""
|
# Raise an exception if we are not a manager
if not self . _manager :
raise RuntimeError ( 'Volumes can only be deleted ' 'on swarm manager nodes' )
volume_list = self . get_volume_list ( )
for volumes in volume_list : # Remove all the services
self . _api_client . remove_volume ( volumes , force = True )
|
def getargspec ( method ) :
"""Drill through layers of decorators attempting to locate the actual argspec
for a method ."""
|
argspec = _getargspec ( method )
args = argspec [ 0 ]
if args and args [ 0 ] == 'self' :
return argspec
if hasattr ( method , '__func__' ) :
method = method . __func__
func_closure = six . get_function_closure ( method )
# NOTE ( sileht ) : if the closure is None we cannot look deeper ,
# so return actual argspec , this occurs when the method
# is static for example .
if not func_closure :
return argspec
closure = None
# In the case of deeply nested decorators ( with arguments ) , it ' s possible
# that there are several callables in scope ; Take a best guess and go
# with the one that looks most like a pecan controller function
# ( has a _ _ code _ _ object , and ' self ' is the first argument )
func_closure = filter ( lambda c : ( six . callable ( c . cell_contents ) and hasattr ( c . cell_contents , '__code__' ) ) , func_closure )
func_closure = sorted ( func_closure , key = lambda c : 'self' in c . cell_contents . __code__ . co_varnames , reverse = True )
closure = func_closure [ 0 ]
method = closure . cell_contents
return getargspec ( method )
|
def post ( self , endpoint , body = None , timeout = None , allow_redirects = None , validate = True , headers = None , ) :
"""* Sends a POST request to the endpoint . *
The endpoint is joined with the URL given on library init ( if any ) .
If endpoint starts with ` ` http : / / ` ` or ` ` https : / / ` ` , it is assumed
an URL outside the tested API ( which may affect logging ) .
* Options *
` ` body ` ` : Request body parameters as a JSON object , file or a dictionary .
` ` timeout ` ` : A number of seconds to wait for the response before failing the keyword .
` ` allow _ redirects ` ` : If false , do not follow any redirects .
` ` validate ` ` : If false , skips any request and response validations set
by expectation keywords and a spec given on library init .
` ` headers ` ` : Headers as a JSON object to add or override for the request .
* Examples *
| ` POST ` | / users | { " id " : 11 , " name " : " Gil Alexander " } |
| ` POST ` | / users | $ { CURDIR } / new _ user . json |"""
|
endpoint = self . _input_string ( endpoint )
request = deepcopy ( self . request )
request [ "method" ] = "POST"
request [ "body" ] = self . input ( body )
if allow_redirects is not None :
request [ "allowRedirects" ] = self . _input_boolean ( allow_redirects )
if timeout is not None :
request [ "timeout" ] = self . _input_timeout ( timeout )
validate = self . _input_boolean ( validate )
if headers :
request [ "headers" ] . update ( self . _input_object ( headers ) )
return self . _request ( endpoint , request , validate ) [ "response" ]
|
def get_model_info ( self ) :
''': return : dictionary of information about this model'''
|
info = { }
info [ 'model_name' ] = self . name
info [ 'stages' ] = '->' . join ( [ repr ( s ) for s in self . _stages ] )
info [ 'sequence' ] = { 'index' : self . _current_index }
return info
|
def metrics ( ref_time , ref_freqs , est_time , est_freqs , ** kwargs ) :
"""Compute multipitch metrics . All metrics are computed at the ' macro ' level
such that the frame true positive / false positive / false negative rates are
summed across time and the metrics are computed on the combined values .
Examples
> > > ref _ time , ref _ freqs = mir _ eval . io . load _ ragged _ time _ series (
. . . ' reference . txt ' )
> > > est _ time , est _ freqs = mir _ eval . io . load _ ragged _ time _ series (
. . . ' estimated . txt ' )
> > > metris _ tuple = mir _ eval . multipitch . metrics (
. . . ref _ time , ref _ freqs , est _ time , est _ freqs )
Parameters
ref _ time : np . ndarray
Time of each reference frequency value
ref _ freqs : list of np . ndarray
List of np . ndarrays of reference frequency values
est _ time : np . ndarray
Time of each estimated frequency value
est _ freqs : list of np . ndarray
List of np . ndarrays of estimate frequency values
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions .
Returns
precision : float
Precision ( TP / ( TP + FP ) )
recall : float
Recall ( TP / ( TP + FN ) )
accuracy : float
Accuracy ( TP / ( TP + FP + FN ) )
e _ sub : float
Substitution error
e _ miss : float
Miss error
e _ fa : float
False alarm error
e _ tot : float
Total error
precision _ chroma : float
Chroma precision
recall _ chroma : float
Chroma recall
accuracy _ chroma : float
Chroma accuracy
e _ sub _ chroma : float
Chroma substitution error
e _ miss _ chroma : float
Chroma miss error
e _ fa _ chroma : float
Chroma false alarm error
e _ tot _ chroma : float
Chroma total error"""
|
validate ( ref_time , ref_freqs , est_time , est_freqs )
# resample est _ freqs if est _ times is different from ref _ times
if est_time . size != ref_time . size or not np . allclose ( est_time , ref_time ) :
warnings . warn ( "Estimate times not equal to reference times. " "Resampling to common time base." )
est_freqs = resample_multipitch ( est_time , est_freqs , ref_time )
# convert frequencies from Hz to continuous midi note number
ref_freqs_midi = frequencies_to_midi ( ref_freqs )
est_freqs_midi = frequencies_to_midi ( est_freqs )
# compute chroma wrapped midi number
ref_freqs_chroma = midi_to_chroma ( ref_freqs_midi )
est_freqs_chroma = midi_to_chroma ( est_freqs_midi )
# count number of occurences
n_ref = compute_num_freqs ( ref_freqs_midi )
n_est = compute_num_freqs ( est_freqs_midi )
# compute the number of true positives
true_positives = util . filter_kwargs ( compute_num_true_positives , ref_freqs_midi , est_freqs_midi , ** kwargs )
# compute the number of true positives ignoring octave mistakes
true_positives_chroma = util . filter_kwargs ( compute_num_true_positives , ref_freqs_chroma , est_freqs_chroma , chroma = True , ** kwargs )
# compute accuracy metrics
precision , recall , accuracy = compute_accuracy ( true_positives , n_ref , n_est )
# compute error metrics
e_sub , e_miss , e_fa , e_tot = compute_err_score ( true_positives , n_ref , n_est )
# compute accuracy metrics ignoring octave mistakes
precision_chroma , recall_chroma , accuracy_chroma = compute_accuracy ( true_positives_chroma , n_ref , n_est )
# compute error metrics ignoring octave mistakes
e_sub_chroma , e_miss_chroma , e_fa_chroma , e_tot_chroma = compute_err_score ( true_positives_chroma , n_ref , n_est )
return ( precision , recall , accuracy , e_sub , e_miss , e_fa , e_tot , precision_chroma , recall_chroma , accuracy_chroma , e_sub_chroma , e_miss_chroma , e_fa_chroma , e_tot_chroma )
|
def mase ( simulated_array , observed_array , m = 1 , replace_nan = None , replace_inf = None , remove_neg = False , remove_zero = False ) :
"""Compute the mean absolute scaled error between the simulated and observed data .
. . image : : / pictures / MASE . png
* * Range : * *
* * Notes : * *
Parameters
simulated _ array : one dimensional ndarray
An array of simulated data from the time series .
observed _ array : one dimensional ndarray
An array of observed data from the time series .
m : int
If given , indicates the seasonal period m . If not given , the default is 1.
replace _ nan : float , optional
If given , indicates which value to replace NaN values with in the two arrays . If None , when
a NaN value is found at the i - th position in the observed OR simulated array , the i - th value
of the observed and simulated array are removed before the computation .
replace _ inf : float , optional
If given , indicates which value to replace Inf values with in the two arrays . If None , when
an inf value is found at the i - th position in the observed OR simulated array , the i - th
value of the observed and simulated array are removed before the computation .
remove _ neg : boolean , optional
If True , when a negative value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
remove _ zero : boolean , optional
If true , when a zero value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
Returns
float
The mean absolute scaled error .
Examples
> > > import HydroErr as he
> > > import numpy as np
> > > sim = np . array ( [ 5 , 7 , 9 , 2 , 4.5 , 6.7 ] )
> > > obs = np . array ( [ 4.7 , 6 , 10 , 2.5 , 4 , 7 ] )
> > > he . mase ( sim , obs )
0.17341040462427745
References
- Hyndman , R . J . , Koehler , A . B . , 2006 . Another look at measures of forecast accuracy .
International Journal of Forecasting 22(4 ) 679-688."""
|
# Checking and cleaning the data
simulated_array , observed_array = treat_values ( simulated_array , observed_array , replace_nan = replace_nan , replace_inf = replace_inf , remove_neg = remove_neg , remove_zero = remove_zero )
start = m
end = simulated_array . size - m
a = np . mean ( np . abs ( simulated_array - observed_array ) )
b = np . abs ( observed_array [ start : observed_array . size ] - observed_array [ : end ] )
return a / ( np . sum ( b ) / end )
|
def search_by_category ( self , category_id , limit = 0 , order_by = None , sort_order = None , filter = None ) :
"""Search for series that belongs to a category id . Returns information about matching series in a DataFrame .
Parameters
category _ id : int
category id , e . g . , 32145
limit : int , optional
limit the number of results to this value . If limit is 0 , it means fetching all results without limit .
order _ by : str , optional
order the results by a criterion . Valid options are ' search _ rank ' , ' series _ id ' , ' title ' , ' units ' , ' frequency ' ,
' seasonal _ adjustment ' , ' realtime _ start ' , ' realtime _ end ' , ' last _ updated ' , ' observation _ start ' , ' observation _ end ' ,
' popularity '
sort _ order : str , optional
sort the results by ascending or descending order . Valid options are ' asc ' or ' desc '
filter : tuple , optional
filters the results . Expects a tuple like ( filter _ variable , filter _ value ) .
Valid filter _ variable values are ' frequency ' , ' units ' , and ' seasonal _ adjustment '
Returns
info : DataFrame
a DataFrame containing information about the matching Fred series"""
|
url = "%s/category/series?category_id=%d&" % ( self . root_url , category_id )
info = self . __get_search_results ( url , limit , order_by , sort_order , filter )
if info is None :
raise ValueError ( 'No series exists for category id: ' + str ( category_id ) )
return info
|
def execute_cleanup_tasks ( ctx , cleanup_tasks , dry_run = False ) :
"""Execute several cleanup tasks as part of the cleanup .
REQUIRES : ` ` clean ( ctx , dry _ run = False ) ` ` signature in cleanup tasks .
: param ctx : Context object for the tasks .
: param cleanup _ tasks : Collection of cleanup tasks ( as Collection ) .
: param dry _ run : Indicates dry - run mode ( bool )"""
|
# pylint : disable = redefined - outer - name
executor = Executor ( cleanup_tasks , ctx . config )
for cleanup_task in cleanup_tasks . tasks :
print ( "CLEANUP TASK: %s" % cleanup_task )
executor . execute ( ( cleanup_task , dict ( dry_run = dry_run ) ) )
|
def set_stim_by_index ( self , index ) :
"""Sets the stimulus to be generated to the one referenced by index
: param index : index number of stimulus to set from this class ' s internal list of stimuli
: type index : int"""
|
# remove any current components
self . stimulus . clearComponents ( )
# add one to index because of tone curve
self . stimulus . insertComponent ( self . stim_components [ index ] )
|
def Indentation ( logical_line , previous_logical , indent_level , previous_indent_level ) :
"""Use two spaces per indentation level ."""
|
comment = '' if logical_line else ' (comment)'
if indent_level % 2 :
code = 'YCM111' if logical_line else 'YCM114'
message = ' indentation is not a multiple of two spaces' + comment
yield 0 , code + message
if ( previous_logical . endswith ( ':' ) and ( indent_level - previous_indent_level != 2 ) ) :
code = 'YCM112' if logical_line else 'YCM115'
message = ' expected an indented block of {} spaces{}' . format ( previous_indent_level + 2 , comment )
yield 0 , code + message
|
def write_bed_with_trackline ( bed , out , trackline , add_chr = False ) :
"""Read a bed file and write a copy with a trackline . Here ' s a simple trackline
example : ' track type = bed name = " cool " description = " A cool track . " '
Parameters
bed : str
Input bed file name .
out : str
Output bed file name .
trackline : str
UCSC trackline .
add _ chr : boolean
Add ' chr ' to the chromosomes in the input file . Necessary for
UCSC genome browser if not present ."""
|
df = pd . read_table ( bed , index_col = None , header = None )
bt = pbt . BedTool ( '\n' . join ( df . apply ( lambda x : '\t' . join ( x . astype ( str ) ) , axis = 1 ) ) + '\n' , from_string = True )
if add_chr :
bt = add_chr_to_contig ( bt )
bt = bt . saveas ( out , trackline = trackline )
|
def results_cache_path ( self ) -> str :
"""Location where step report is cached between sessions to
prevent loss of display data between runs ."""
|
if not self . project :
return ''
return os . path . join ( self . project . results_path , '.cache' , 'steps' , '{}.json' . format ( self . id ) )
|
def match ( self , pattern , context = None ) :
"""This method returns a ( possibly empty ) list of strings that
match the regular expression ` ` pattern ` ` provided . You can
also provide a ` ` context ` ` as described above .
This method calls ` ` choices ` ` to get a list of all possible
choices and then filters the list by performing a regular
expression search on each choice using the supplied ` ` pattern ` ` ."""
|
matches = [ ]
regex = pattern
if regex == '*' :
regex = '.*'
regex = re . compile ( regex )
for choice in self . choices ( context ) :
if regex . search ( choice ) :
matches . append ( choice )
return matches
|
def rescaleX ( self ) :
'''Rescales the horizontal axes to make the lengthscales equal .'''
|
self . ratio = self . figure . get_size_inches ( ) [ 0 ] / float ( self . figure . get_size_inches ( ) [ 1 ] )
self . axes . set_xlim ( - self . ratio , self . ratio )
self . axes . set_ylim ( - 1 , 1 )
|
def stopObserver ( self ) :
"""Stops this region ' s observer loop .
If this is running in a subprocess , the subprocess will end automatically ."""
|
self . _observer . isStopped = True
self . _observer . isRunning = False
|
def get_vsan_enabled ( host , username , password , protocol = None , port = None , host_names = None ) :
'''Get the VSAN enabled status for a given host or a list of host _ names . Returns ` ` True ` `
if VSAN is enabled , ` ` False ` ` if it is not enabled , and ` ` None ` ` if a VSAN Host Config
is unset , per host .
host
The location of the host .
username
The username used to login to the host , such as ` ` root ` ` .
password
The password used to login to the host .
protocol
Optionally set to alternate protocol if the host is not using the default
protocol . Default protocol is ` ` https ` ` .
port
Optionally set to alternate port if the host is not using the default
port . Default port is ` ` 443 ` ` .
host _ names
List of ESXi host names . When the host , username , and password credentials
are provided for a vCenter Server , the host _ names argument is required to
tell vCenter which hosts to check if VSAN enabled .
If host _ names is not provided , the VSAN status will be retrieved for the
` ` host ` ` location instead . This is useful for when service instance
connection information is used for a single ESXi host .
CLI Example :
. . code - block : : bash
# Used for single ESXi host connection information
salt ' * ' vsphere . get _ vsan _ enabled my . esxi . host root bad - password
# Used for connecting to a vCenter Server
salt ' * ' vsphere . get _ vsan _ enabled my . vcenter . location root bad - password host _ names = ' [ esxi - 1 . host . com , esxi - 2 . host . com ] ' '''
|
service_instance = salt . utils . vmware . get_service_instance ( host = host , username = username , password = password , protocol = protocol , port = port )
host_names = _check_hosts ( service_instance , host , host_names )
ret = { }
for host_name in host_names :
host_ref = _get_host_ref ( service_instance , host , host_name = host_name )
vsan_config = host_ref . config . vsanHostConfig
# We must have a VSAN Config in place get information about VSAN state .
if vsan_config is None :
msg = 'VSAN System Config Manager is unset for host \'{0}\'.' . format ( host_name )
log . debug ( msg )
ret . update ( { host_name : { 'Error' : msg } } )
else :
ret . update ( { host_name : { 'VSAN Enabled' : vsan_config . enabled } } )
return ret
|
def _get_result_paths ( self , data ) :
"""Build the dict of result filepaths"""
|
# get the filepath of the indexed database ( after comma )
# / path / to / refseqs . fasta , / path / to / refseqs . idx
db_name = ( self . Parameters [ '--ref' ] . Value ) . split ( ',' ) [ 1 ]
result = { }
extensions = [ 'bursttrie' , 'kmer' , 'pos' , 'stats' ]
for extension in extensions :
for file_path in glob ( "%s.%s*" % ( db_name , extension ) ) : # this will match e . g . nr . bursttrie _ 0 . dat , nr . bursttrie _ 1 . dat
# and nr . stats
key = file_path . split ( db_name + '.' ) [ 1 ]
result [ key ] = ResultPath ( Path = file_path , IsWritten = True )
return result
|
async def _handle_local_charms ( self , bundle ) :
"""Search for references to local charms ( i . e . filesystem paths )
in the bundle . Upload the local charms to the model , and replace
the filesystem paths with appropriate ' local : ' paths in the bundle .
Return the modified bundle .
: param dict bundle : Bundle dictionary
: return : Modified bundle dictionary"""
|
apps , args = [ ] , [ ]
default_series = bundle . get ( 'series' )
apps_dict = bundle . get ( 'applications' , bundle . get ( 'services' , { } ) )
for app_name in self . applications :
app_dict = apps_dict [ app_name ]
charm_dir = os . path . abspath ( os . path . expanduser ( app_dict [ 'charm' ] ) )
if not os . path . isdir ( charm_dir ) :
continue
series = ( app_dict . get ( 'series' ) or default_series or get_charm_series ( charm_dir ) )
if not series :
raise JujuError ( "Couldn't determine series for charm at {}. " "Add a 'series' key to the bundle." . format ( charm_dir ) )
# Keep track of what we need to update . We keep a list of apps
# that need to be updated , and a corresponding list of args
# needed to update those apps .
apps . append ( app_name )
args . append ( ( charm_dir , series ) )
if apps : # If we have apps to update , spawn all the coroutines concurrently
# and wait for them to finish .
charm_urls = await asyncio . gather ( * [ self . model . add_local_charm_dir ( * params ) for params in args ] , loop = self . model . loop )
# Update the ' charm : ' entry for each app with the new ' local : ' url .
for app_name , charm_url in zip ( apps , charm_urls ) :
apps_dict [ app_name ] [ 'charm' ] = charm_url
return bundle
|
def toDict ( self ) :
"""To Dict
Returns the Hashed Node as a dictionary in the same format as is used in
constructing it
Returns :
dict"""
|
# Init the dictionary we will return
dRet = { }
# Add the hash key
dRet [ '__hash__' ] = self . _key . toDict ( )
# Get the parents dict and add it to the return
dRet . update ( super ( HashNode , self ) . toDict ( ) )
# Get the nodes dict and also add it to the return
dRet . update ( self . _node . toDict ( ) )
# Return
return dRet
|
def create_jar_builder ( self , jar ) :
"""Creates a ` ` JarTask . JarBuilder ` ` ready for use .
This method should be called during in ` execute ` context and only after ensuring
` JarTask . JarBuilder . prepare ` has already been called in ` prepare ` context .
: param jar : An opened ` ` pants . backend . jvm . tasks . jar _ task . Jar ` ."""
|
builder = self . JarBuilder ( self . context , jar )
yield builder
builder . commit_manifest ( jar )
|
def has_documented_fields ( self , include_inherited_fields = False ) :
"""Returns whether at least one field is documented ."""
|
fields = self . all_fields if include_inherited_fields else self . fields
for field in fields :
if field . doc :
return True
return False
|
def value_from_person ( self , array , role , default = 0 ) :
"""Get the value of ` ` array ` ` for the person with the unique role ` ` role ` ` .
` ` array ` ` must have the dimension of the number of persons in the simulation
If such a person does not exist , return ` ` default ` ` instead
The result is a vector which dimension is the number of entities"""
|
self . entity . check_role_validity ( role )
if role . max != 1 :
raise Exception ( 'You can only use value_from_person with a role that is unique in {}. Role {} is not unique.' . format ( self . key , role . key ) )
self . members . check_array_compatible_with_entity ( array )
members_map = self . ordered_members_map
result = self . filled_array ( default , dtype = array . dtype )
if isinstance ( array , EnumArray ) :
result = EnumArray ( result , array . possible_values )
role_filter = self . members . has_role ( role )
entity_filter = self . any ( role_filter )
result [ entity_filter ] = array [ members_map ] [ role_filter [ members_map ] ]
return result
|
def get_det_id ( self , det_oid ) :
"""Convert detector string representation ( OID ) to serialnumber"""
|
try :
return self . detectors [ self . detectors . OID == det_oid ] . SERIALNUMBER . iloc [ 0 ]
except IndexError :
log . critical ( "No det ID found for OID '{}'" . format ( det_oid ) )
return None
|
def extrair_logs ( self ) :
"""Sobrepõe : meth : ` ~ satcfe . base . FuncoesSAT . extrair _ logs ` .
: return : Uma resposta SAT especializada em ` ` ExtrairLogs ` ` .
: rtype : satcfe . resposta . extrairlogs . RespostaExtrairLogs"""
|
retorno = super ( ClienteSATLocal , self ) . extrair_logs ( )
return RespostaExtrairLogs . analisar ( retorno )
|
def credential_update ( self , cred_id , ** options ) :
"""credential _ update cred _ id * * options
Updates the specified values of the credential ID specified ."""
|
payload = None
# First we pull the credentials and populate the payload if we
# find a match .
for cred in self . credentials ( ) [ 'credentials' ] :
if cred [ 'id' ] == str ( cred_id ) :
payload = { 'id' : cred_id , 'type' : cred [ 'type' ] , 'name' : cred [ 'name' ] , 'description' : cred [ 'description' ] , 'visibility' : cred [ 'visibility' ] , 'group' : cred [ 'group' ] , 'users' : cred [ 'users' ] , }
if cred [ 'type' ] == 'kerberos' :
payload [ 'ip' ] = cred [ 'ip' ]
payload [ 'port' ] = cred [ 'port' ]
payload [ 'protocol' ] = cred [ 'protocol' ]
payload [ 'realm' ] = cred [ 'realm' ]
if cred [ 'type' ] == 'snmp' :
payload [ 'communityString' ] = cred [ 'communityString' ]
if cred [ 'type' ] == 'ssh' :
payload [ 'username' ] = cred [ 'username' ]
payload [ 'publickey' ] = cred [ 'publickey' ]
payload [ 'privatekey' ] = cred [ 'privatekey' ]
payload [ 'priviledgeEscalation' ] = cred [ 'priviledgeEscalation' ]
payload [ 'escalationUsername' ] = cred [ 'escalationUsername' ]
if cred [ 'type' ] == 'windows' :
payload [ 'username' ] = cred [ 'username' ]
payload [ 'domain' ] = cred [ 'domain' ]
if payload is None :
raise APIError ( 13 , 'cred_id %s does not exist' % cred_id )
for option in options :
payload [ option ] = options [ option ]
return self . raw_query ( 'credential' , 'edit' , data = payload )
|
def log_every_n_seconds ( level , msg , n_seconds , * args ) :
"""Logs ' msg % args ' at level ' level ' iff ' n _ seconds ' elapsed since last call .
Logs the first call , logs subsequent calls if ' n ' seconds have elapsed since
the last logging call from the same call site ( file + line ) . Not thread - safe .
Args :
level : int , the absl logging level at which to log .
msg : str , the message to be logged .
n _ seconds : float or int , seconds which should elapse before logging again .
* args : The args to be substitued into the msg ."""
|
should_log = _seconds_have_elapsed ( get_absl_logger ( ) . findCaller ( ) , n_seconds )
log_if ( level , msg , should_log , * args )
|
def parse ( stream ) :
"""Creates a ` ` ValidationDefinition ` ` from a provided stream containing XML .
The XML typically will look like this :
` ` < items > ` `
` ` < server _ host > myHost < / server _ host > ` `
` ` < server _ uri > https : / / 127.0.0.1:8089 < / server _ uri > ` `
` ` < session _ key > 123102983109283019283 < / session _ key > ` `
` ` < checkpoint _ dir > / opt / splunk / var / lib / splunk / modinputs < / checkpoint _ dir > ` `
` ` < item name = " myScheme " > ` `
` ` < param name = " param1 " > value1 < / param > ` `
` ` < param _ list name = " param2 " > ` `
` ` < value > value2 < / value > ` `
` ` < value > value3 < / value > ` `
` ` < value > value4 < / value > ` `
` ` < / param _ list > ` `
` ` < / item > ` `
` ` < / items > ` `
: param stream : ` ` Stream ` ` containing XML to parse .
: return definition : A ` ` ValidationDefinition ` ` object ."""
|
definition = ValidationDefinition ( )
# parse XML from the stream , then get the root node
root = ET . parse ( stream ) . getroot ( )
for node in root : # lone item node
if node . tag == "item" : # name from item node
definition . metadata [ "name" ] = node . get ( "name" )
definition . parameters = parse_xml_data ( node , "" )
else : # Store anything else in metadata
definition . metadata [ node . tag ] = node . text
return definition
|
def data ( self , index , role = Qt . DisplayRole ) :
"""Cell content"""
|
if not index . isValid ( ) :
return to_qvariant ( )
value = self . get_value ( index )
if is_binary_string ( value ) :
try :
value = to_text_string ( value , 'utf8' )
except :
pass
if role == Qt . DisplayRole :
if value is np . ma . masked :
return ''
else :
try :
return to_qvariant ( self . _format % value )
except TypeError :
self . readonly = True
return repr ( value )
elif role == Qt . TextAlignmentRole :
return to_qvariant ( int ( Qt . AlignCenter | Qt . AlignVCenter ) )
elif role == Qt . BackgroundColorRole and self . bgcolor_enabled and value is not np . ma . masked :
try :
hue = ( self . hue0 + self . dhue * ( float ( self . vmax ) - self . color_func ( value ) ) / ( float ( self . vmax ) - self . vmin ) )
hue = float ( np . abs ( hue ) )
color = QColor . fromHsvF ( hue , self . sat , self . val , self . alp )
return to_qvariant ( color )
except TypeError :
return to_qvariant ( )
elif role == Qt . FontRole :
return to_qvariant ( get_font ( font_size_delta = DEFAULT_SMALL_DELTA ) )
return to_qvariant ( )
|
def build ( path , query = None , fragment = '' ) :
"""Generates a URL based on the inputted path and given query options and
fragment . The query should be a dictionary of terms that will be
generated into the URL , while the fragment is the anchor point within the
target path that will be navigated to . If there are any wildcards within
the path that are found within the query , they will be inserted into the
path itself and removed from the query string .
: example | > > > import skyline . gui
| > > > skyline . gui . build _ url ( ' sky : / / projects / % ( project ) s ' ,
| { ' project ' : ' Test ' , ' asset ' : ' Bob ' } )
| ' sky : / / projects / Test / ? asset = Bob '
: param path | < str >
query | < dict > | | None
fragment | < str > | | None
: return < str > | url"""
|
url = nstr ( path )
# replace the optional arguments in the url
keys = projex . text . findkeys ( path )
if keys :
if query is None :
query = { }
opts = { }
for key in keys :
opts [ key ] = query . pop ( key , '%({})s' . format ( key ) )
url %= opts
# add the query
if query :
if type ( query ) is dict :
mapped_query = { }
for key , value in query . items ( ) :
mapped_query [ nstr ( key ) ] = nstr ( value )
query_str = urllib . urlencode ( mapped_query )
else :
query_str = nstr ( query )
url += '?' + query_str
# include the fragment
if fragment :
url += '#' + fragment
return url
|
def from_xyz ( x , y , z , alpha = 1.0 , wref = _DEFAULT_WREF ) :
"""Create a new instance based on the specifed CIE - XYZ values .
Parameters :
The Red component value [ 0 . . . 1]
The Green component value [ 0 . . . 1]
The Blue component value [ 0 . . . 1]
: alpha :
The color transparency [ 0 . . . 1 ] , default is opaque
: wref :
The whitepoint reference , default is 2 ° D65.
Returns :
A grapefruit . Color instance .
> > > Color . from _ xyz ( 0.488941 , 0.365682 , 0.0448137)
Color ( 1.0 , 0.5 , 0.0 , 1.0)
> > > Color . from _ xyz ( 0.488941 , 0.365682 , 0.0448137 , 0.5)
Color ( 1.0 , 0.5 , 0.0 , 0.5)"""
|
return Color ( xyz_to_rgb ( x , y , z ) , 'rgb' , alpha , wref )
|
def p_identifier ( self , p ) :
'identifier : ID'
|
p [ 0 ] = Identifier ( p [ 1 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def create_loadbalancer ( call = None , kwargs = None ) :
'''Creates a loadbalancer within the datacenter from the provider config .
CLI Example :
. . code - block : : bash
salt - cloud - f create _ loadbalancer profitbricks name = mylb'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The create_address function must be called with -f or --function.' )
if kwargs is None :
kwargs = { }
conn = get_conn ( )
datacenter_id = get_datacenter_id ( )
loadbalancer = LoadBalancer ( name = kwargs . get ( 'name' ) , ip = kwargs . get ( 'ip' ) , dhcp = kwargs . get ( 'dhcp' ) )
response = conn . create_loadbalancer ( datacenter_id , loadbalancer )
_wait_for_completion ( conn , response , 60 , 'loadbalancer' )
return response
|
def get_anonymous_request ( leonardo_page ) :
"""returns inicialized request"""
|
request_factory = RequestFactory ( )
request = request_factory . get ( leonardo_page . get_absolute_url ( ) , data = { } )
request . feincms_page = request . leonardo_page = leonardo_page
request . frontend_editing = False
request . user = AnonymousUser ( )
if not hasattr ( request , '_feincms_extra_context' ) :
request . _feincms_extra_context = { }
request . path = leonardo_page . get_absolute_url ( )
request . frontend_editing = False
leonardo_page . run_request_processors ( request )
request . LEONARDO_CONFIG = ContextConfig ( request )
handler = BaseHandler ( )
handler . load_middleware ( )
# Apply request middleware
for middleware_method in handler . _request_middleware :
try :
middleware_method ( request )
except :
pass
# call processors
for fn in reversed ( list ( leonardo_page . request_processors . values ( ) ) ) :
fn ( leonardo_page , request )
return request
|
def AssertType ( value , expected_type ) :
"""Ensures that given value has certain type .
Args :
value : A value to assert the type for .
expected _ type : An expected type for the given value .
Raises :
TypeError : If given value does not have the expected type ."""
|
if not isinstance ( value , expected_type ) :
message = "Expected type `%r`, but got value `%r` of type `%s`"
message %= ( expected_type , value , type ( value ) )
raise TypeError ( message )
|
def _get_chain_by_pid ( pid ) :
"""Find chain by pid .
Return None if not found ."""
|
try :
return d1_gmn . app . models . ChainMember . objects . get ( pid__did = pid ) . chain
except d1_gmn . app . models . ChainMember . DoesNotExist :
pass
|
def _choose_node ( self , nodes = None ) :
"""Chooses a random node from the list of nodes in the client ,
taking into account each node ' s recent error rate .
: rtype RiakNode"""
|
if not nodes :
nodes = self . nodes
# Prefer nodes which have gone a reasonable time without
# errors
def _error_rate ( node ) :
return node . error_rate . value ( )
good = [ n for n in nodes if _error_rate ( n ) < 0.1 ]
if len ( good ) is 0 : # Fall back to a minimally broken node
return min ( nodes , key = _error_rate )
else :
return random . choice ( good )
|
def dot ( self , vector , theta = None ) :
"""Return the dot product of two vectors .
If theta is given then the dot product is computed as
v1 * v1 = | v1 | | v2 | cos ( theta ) . Argument theta
is measured in degrees ."""
|
if theta is not None :
return ( self . magnitude ( ) * vector . magnitude ( ) * math . degrees ( math . cos ( theta ) ) )
return ( reduce ( lambda x , y : x + y , [ x * vector . vector [ i ] for i , x in self . to_list ( ) ( ) ] ) )
|
def set_runtime_value_float ( self , ihcid : int , value : float ) -> bool :
"""Set float runtime value with re - authenticate if needed"""
|
if self . client . set_runtime_value_float ( ihcid , value ) :
return True
self . re_authenticate ( )
return self . client . set_runtime_value_float ( ihcid , value )
|
def write_pkg_to_file ( self , name , objects , path = '.' , filename = None ) :
"""Write a list of related objs to file"""
|
# Kibana uses an array of docs , do the same
# as opposed to a dict of docs
pkg_objs = [ ]
for _ , obj in iteritems ( objects ) :
pkg_objs . append ( obj )
sorted_pkg = sorted ( pkg_objs , key = lambda k : k [ '_id' ] )
output = self . json_dumps ( sorted_pkg ) + '\n'
if filename is None :
filename = self . safe_filename ( 'Pkg' , name )
filename = os . path . join ( path , filename )
self . pr_inf ( "Writing to file: " + filename )
with open ( filename , 'w' ) as f :
f . write ( output )
return filename
|
def enable ( commanddict , modulename ) :
"""< Purpose >
Enables a module and imports its commands into the seash commanddict .
< Arguments >
modulename : The module to import .
< Side Effects >
All commands inside the specified module will be inserted into the seash
commanddict if possible .
The file modulename . disabled will be removed from / modules / indicating that
this module has been enabled .
< Exceptions >
Exceptions raised by merge _ commanddict ( )
< Returns >
None"""
|
# Is this an installed module ?
if not modulename in module_data :
raise seash_exceptions . UserError ( "Error, module '" + modulename + "' is not installed" )
if _is_module_enabled ( modulename ) :
raise seash_exceptions . UserError ( "Module is already enabled." )
merge_commanddict ( commanddict , module_data [ modulename ] [ 'command_dict' ] )
try : # We mark this module as enabled by deleting the modulename . disabled file
os . remove ( MODULES_FOLDER_PATH + os . sep + modulename + ".disabled" )
except OSError , e : # If the file was deleted before we were able to delete it , it should not
# be a problem .
if not "cannot find the file" in str ( e ) :
raise
try :
initialize ( modulename )
except seash_exceptions . InitializeError , e :
raise seash_exceptions . InitializeError ( e )
|
def calculate_legacy_pad_amount ( H_in , pad_h , k_h , s_h ) :
'''This function calculate padding amount along H - axis . It can be applied to other axes . It should be only used with
pooling conversion .
: param H _ in : input dimension along H - axis
: param pad _ h : padding amount at H - axis
: param k _ h : kernel ' s H - axis dimension
: param s _ h : stride along H - axis
: return : ( top _ padding _ amount , bottom _ padding _ amount )'''
|
# Calculate a common variable
H_temp = H_in + 2 * pad_h - k_h
# Pooling output shape under CoerML IncludeLastPixel padding mode
H_include_last_pad_out = math . ceil ( H_temp / s_h ) + 1
# Pooling output shape under valid padding mode
H_valid_pad_out = math . floor ( H_temp / s_h ) + 1
# Amount of values padded at top boundary . For max pooling , the padded value should be " - inf . "
# For average pooling , we should pad zeros .
pad_t = pad_h
# Amount of values padded at bottom boundary ( add extra pixels so that H _ include _ last _ pad _ out = floor ( ( H _ adjusted _ out - k _ h ) / stride ) + 1)
if H_include_last_pad_out > H_valid_pad_out :
pad_b = pad_h + ( s_h - H_temp % s_h )
else :
pad_b = pad_h
# Intermediate result with pad _ t values at top and pad _ b valules at bottom of the original input
H_adjusted_out = H_in + pad_t + pad_b
# Adjust padded result if the original pooling wants to cut off the last output pixel .
if ( H_include_last_pad_out - 1 ) * s_h >= H_in + pad_h :
if H_adjusted_out % s_h == 0 :
H_adjusted_out -= s_h
else :
H_adjusted_out -= H_adjusted_out % s_h
return ( pad_t , H_adjusted_out - pad_t - H_in )
|
def get_unicodedata ( version , output = HOME , no_zip = False ) :
"""Ensure we have Unicode data to generate Unicode tables ."""
|
target = os . path . join ( output , 'unicodedata' , version )
zip_target = os . path . join ( output , 'unicodedata' , '%s.zip' % version )
if not os . path . exists ( target ) and os . path . exists ( zip_target ) :
unzip_unicode ( output , version )
# Download missing files if any . Zip if required .
download_unicodedata ( version , output , no_zip )
|
def reducejson ( j ) :
"""Not sure if there ' s a better way to walk the . . . interesting result"""
|
authors = [ ]
for key in j [ "data" ] [ "repository" ] [ "commitComments" ] [ "edges" ] :
authors . append ( key [ "node" ] [ "author" ] )
for key in j [ "data" ] [ "repository" ] [ "issues" ] [ "nodes" ] :
authors . append ( key [ "author" ] )
for c in key [ "comments" ] [ "nodes" ] :
authors . append ( c [ "author" ] )
for key in j [ "data" ] [ "repository" ] [ "pullRequests" ] [ "edges" ] :
authors . append ( key [ "node" ] [ "author" ] )
for c in key [ "node" ] [ "comments" ] [ "nodes" ] :
authors . append ( c [ "author" ] )
unique = list ( { v [ 'login' ] : v for v in authors if v is not None } . values ( ) )
return unique
|
def writePlist ( dataObject , filepath ) :
'''Write ' rootObject ' as a plist to filepath .'''
|
plistData , error = ( NSPropertyListSerialization . dataFromPropertyList_format_errorDescription_ ( dataObject , NSPropertyListXMLFormat_v1_0 , None ) )
if plistData is None :
if error :
error = error . encode ( 'ascii' , 'ignore' )
else :
error = "Unknown error"
raise NSPropertyListSerializationException ( error )
else :
if plistData . writeToFile_atomically_ ( filepath , True ) :
return
else :
raise NSPropertyListWriteException ( "Failed to write plist data to %s" % filepath )
|
def _import_protobuf_from_file ( grpc_pyfile , method_name , service_name = None ) :
"""helper function which try to import method from the given _ pb2 _ grpc . py file
service _ name should be provided only in case of name conflict
return ( False , None ) in case of failure
return ( True , ( stub _ class , request _ class , response _ class ) ) in case of success"""
|
prefix = grpc_pyfile [ : - 12 ]
pb2 = __import__ ( "%s_pb2" % prefix )
pb2_grpc = __import__ ( "%s_pb2_grpc" % prefix )
# we take all objects from pb2 _ grpc module which endswith " Stub " , and we remove this postfix to get service _ name
all_service_names = [ stub_name [ : - 4 ] for stub_name in dir ( pb2_grpc ) if stub_name . endswith ( "Stub" ) ]
# if service _ name was specified we take only this service _ name
if ( service_name ) :
if ( service_name not in all_service_names ) :
return False , None
all_service_names = [ service_name ]
found_services = [ ]
for service_name in all_service_names :
service_descriptor = getattr ( pb2 , "DESCRIPTOR" ) . services_by_name [ service_name ]
for method in service_descriptor . methods :
if ( method . name == method_name ) :
request_class = method . input_type . _concrete_class
response_class = method . output_type . _concrete_class
stub_class = getattr ( pb2_grpc , "%sStub" % service_name )
found_services . append ( service_name )
if ( len ( found_services ) == 0 ) :
return False , None
if ( len ( found_services ) > 1 ) :
raise Exception ( "Error while loading protobuf. We found methods %s in multiply services [%s]." " You should specify service_name." % ( method_name , ", " . join ( found_services ) ) )
return True , ( stub_class , request_class , response_class )
|
def delete_project ( project_id ) :
"""Delete Project ."""
|
project = get_data_or_404 ( 'project' , project_id )
if project [ 'owner_id' ] != get_current_user_id ( ) :
return jsonify ( message = 'forbidden' ) , 403
delete_instance ( 'project' , project_id )
return jsonify ( { } )
|
def getIfConfig ( self ) :
"""Return dictionary of Interface Configuration ( ifconfig ) .
@ return : Dictionary of if configurations keyed by if name ."""
|
conf = { }
try :
out = subprocess . Popen ( [ ipCmd , "addr" , "show" ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ]
except :
raise Exception ( 'Execution of command %s failed.' % ipCmd )
for line in out . splitlines ( ) :
mobj = re . match ( '^\d+: (\S+):\s+<(\S*)>\s+(\S.*\S)\s*$' , line )
if mobj :
iface = mobj . group ( 1 )
conf [ iface ] = { }
continue
mobj = re . match ( '^\s{4}link\/(.*\S)\s*$' , line )
if mobj :
arr = mobj . group ( 1 ) . split ( )
if len ( arr ) > 0 :
conf [ iface ] [ 'type' ] = arr [ 0 ]
if len ( arr ) > 1 :
conf [ iface ] [ 'hwaddr' ] = arr [ 1 ]
continue
mobj = re . match ( '^\s+(inet|inet6)\s+([\d\.\:A-Za-z]+)\/(\d+)($|\s+.*\S)\s*$' , line )
if mobj :
proto = mobj . group ( 1 )
if not conf [ iface ] . has_key ( proto ) :
conf [ iface ] [ proto ] = [ ]
addrinfo = { }
addrinfo [ 'addr' ] = mobj . group ( 2 ) . lower ( )
addrinfo [ 'mask' ] = int ( mobj . group ( 3 ) )
arr = mobj . group ( 4 ) . split ( )
if len ( arr ) > 0 and arr [ 0 ] == 'brd' :
addrinfo [ 'brd' ] = arr [ 1 ]
conf [ iface ] [ proto ] . append ( addrinfo )
continue
return conf
|
def assert_credentials_match ( self , verifier , authc_token , account ) :
""": type verifier : authc _ abcs . CredentialsVerifier
: type authc _ token : authc _ abcs . AuthenticationToken
: type account : account _ abcs . Account
: returns : account _ abcs . Account
: raises IncorrectCredentialsException : when authentication fails ,
including unix epoch timestamps
of recently failed attempts"""
|
cred_type = authc_token . token_info [ 'cred_type' ]
try :
verifier . verify_credentials ( authc_token , account [ 'authc_info' ] )
except IncorrectCredentialsException :
updated_account = self . update_failed_attempt ( authc_token , account )
failed_attempts = updated_account [ 'authc_info' ] [ cred_type ] . get ( 'failed_attempts' , [ ] )
raise IncorrectCredentialsException ( failed_attempts )
except ConsumedTOTPToken :
account [ 'authc_info' ] [ cred_type ] [ 'consumed_token' ] = authc_token . credentials
self . cache_handler . set ( domain = 'authentication:' + self . name , identifier = authc_token . identifier , value = account )
|
def compiler ( self , target ) :
"""Returns the thrift compiler to use for the given target .
: param target : The target to extract the thrift compiler from .
: type target : : class : ` pants . backend . codegen . thrift . java . java _ thrift _ library . JavaThriftLibrary `
: returns : The thrift compiler to use .
: rtype : string"""
|
self . _check_target ( target )
return target . compiler or self . _default_compiler
|
def _get_next_events ( self , material ) :
"""Get next events from Genie ROOT file
Looks over the generator"""
|
f = ROOT . TFile ( self . filenames [ material ] )
try :
t = f . Get ( 'gst' )
n = t . GetEntries ( )
except :
self . log . critical ( 'Could not open the ROOT file with Genie events' )
raise
for i in range ( n ) :
t . GetEntry ( i )
next_events = [ ]
position = convert_3vector_to_dict ( [ self . particle [ 'position' ] [ 'x' ] . get_cache ( ) , self . particle [ 'position' ] [ 'y' ] . get_cache ( ) , self . particle [ 'position' ] [ 'z' ] . get_cache ( ) ] )
lepton_event = { }
if t . El ** 2 - ( t . pxl ** 2 + t . pyl ** 2 + t . pzl ** 2 ) < 1e-7 :
lepton_event [ 'pid' ] = self . particle [ 'pid' ] . get ( )
# Either NC or ES
else :
lepton_event [ 'pid' ] = lookup_cc_partner ( self . particle [ 'pid' ] . get ( ) )
# units : GeV - > MeV
momentum_vector = [ 1000 * x for x in [ t . pxl , t . pyl , t . pzl ] ]
lepton_event [ 'momentum' ] = convert_3vector_to_dict ( momentum_vector )
lepton_event [ 'position' ] = position
next_events . append ( lepton_event )
for j in range ( t . nf ) : # nf , number final hadronic states
hadron_event = { }
hadron_event [ 'pid' ] = t . pdgf [ j ]
hadron_event [ 'position' ] = position
# units : GeV - > MeV
momentum_vector = [ 1000 * x for x in [ t . pxf [ j ] , t . pyf [ j ] , t . pzf [ j ] ] ]
hadron_event [ 'momentum' ] = convert_3vector_to_dict ( momentum_vector )
next_events . append ( hadron_event )
event_type = { }
event_type [ 'vertex' ] = position
to_save = { }
# maps our names to Genie gst names
to_save [ 'incoming_neutrino' ] = 'neu'
to_save [ 'neutrino_energy' ] = 'Ev'
to_save [ 'target_material' ] = 'tgt'
for key , value in to_save . iteritems ( ) :
self . log . info ( '%s : %s' % ( key , str ( t . __getattr__ ( value ) ) ) )
event_type [ key ] = t . __getattr__ ( value )
self . log . debug ( 'Event type:' )
for my_type in [ 'qel' , 'res' , 'dis' , 'coh' , 'dfr' , 'imd' , 'nuel' , 'em' ] :
if t . __getattr__ ( my_type ) == 1 :
self . log . debug ( '\t%s' , my_type )
event_type [ my_type ] = t . __getattr__ ( my_type )
self . log . debug ( 'Propogator:' )
for prop in [ 'nc' , 'cc' ] :
if t . __getattr__ ( prop ) == 1 :
self . log . debug ( '\t%s' , prop )
event_type [ prop ] = t . __getattr__ ( prop )
yield next_events , event_type
f . Close ( )
os . remove ( self . filenames [ material ] )
|
def graphs ( self ) :
"""Sorry for the black magic . The result is an object whose attributes
are all the graphs found in graphs . py initialized with this instance as
only argument ."""
|
result = Dummy ( )
for graph in graphs . __all__ :
cls = getattr ( graphs , graph )
setattr ( result , cls . short_name , cls ( self ) )
return result
|
def from_api_repr ( cls , api_repr ) :
"""Return a ` ` SchemaField ` ` object deserialized from a dictionary .
Args :
api _ repr ( Mapping [ str , str ] ) : The serialized representation
of the SchemaField , such as what is output by
: meth : ` to _ api _ repr ` .
Returns :
google . cloud . biquery . schema . SchemaField :
The ` ` SchemaField ` ` object ."""
|
# Handle optional properties with default values
mode = api_repr . get ( "mode" , "NULLABLE" )
description = api_repr . get ( "description" )
fields = api_repr . get ( "fields" , ( ) )
return cls ( field_type = api_repr [ "type" ] . upper ( ) , fields = [ cls . from_api_repr ( f ) for f in fields ] , mode = mode . upper ( ) , description = description , name = api_repr [ "name" ] , )
|
def _srcRect_x ( self , attr_name ) :
"""Value of ` p : blipFill / a : srcRect / @ { attr _ name } ` or 0.0 if not present ."""
|
srcRect = self . blipFill . srcRect
if srcRect is None :
return 0.0
return getattr ( srcRect , attr_name )
|
def concat ( bed_files , catted = None ) :
"""recursively concat a set of BED files , returning a
sorted bedtools object of the result"""
|
bed_files = [ x for x in bed_files if x ]
if len ( bed_files ) == 0 :
if catted : # move to a . bed extension for downstream tools if not already
sorted_bed = catted . sort ( )
if not sorted_bed . fn . endswith ( ".bed" ) :
return sorted_bed . moveto ( sorted_bed . fn + ".bed" )
else :
return sorted_bed
else :
return catted
if not catted :
bed_files = list ( bed_files )
catted = bt . BedTool ( bed_files . pop ( ) )
else :
catted = catted . cat ( bed_files . pop ( ) , postmerge = False , force_truncate = False )
return concat ( bed_files , catted )
|
def register_to_random_name ( grad_f ) :
"""Register a gradient function to a random string .
In order to use a custom gradient in TensorFlow , it must be registered to a
string . This is both a hassle , and - - because only one function can every be
registered to a string - - annoying to iterate on in an interactive
environemnt .
This function registers a function to a unique random string of the form :
{ FUNCTION _ NAME } _ { RANDOM _ SALT }
And then returns the random string . This is a helper in creating more
convenient gradient overrides .
Args :
grad _ f : gradient function to register . Should map ( op , grad ) - > grad ( s )
Returns :
String that gradient function was registered to ."""
|
grad_f_name = grad_f . __name__ + "_" + str ( uuid . uuid4 ( ) )
tf . RegisterGradient ( grad_f_name ) ( grad_f )
return grad_f_name
|
def get_temp_url_key ( self , cached = True ) :
"""Returns the current TempURL key , or None if it has not been set .
By default the value returned is cached . To force an API call to get
the current value on the server , pass ` cached = False ` ."""
|
meta = self . _cached_temp_url_key
if not cached or not meta :
key = "temp_url_key"
meta = self . get_account_metadata ( ) . get ( key )
self . _cached_temp_url_key = meta
return meta
|
def setdiff ( left , * rights , ** kwargs ) :
"""Exclude data from a collection , like ` except ` clause in SQL . All collections involved should
have same schema .
: param left : collection to drop data from
: param rights : collection or list of collections
: param distinct : whether to preserve duplicate entries
: return : collection
: Examples :
> > > import pandas as pd
> > > df1 = DataFrame ( pd . DataFrame ( { ' a ' : [ 1 , 2 , 3 , 3 , 3 ] , ' b ' : [ 1 , 2 , 3 , 3 , 3 ] } ) )
> > > df2 = DataFrame ( pd . DataFrame ( { ' a ' : [ 1 , 3 ] , ' b ' : [ 1 , 3 ] } ) )
> > > df1 . setdiff ( df2)
a b
0 2 2
1 3 3
2 3 3
> > > df1 . setdiff ( df2 , distinct = True )
a b
0 2 2"""
|
import time
from . . utils import output
distinct = kwargs . get ( 'distinct' , False )
if isinstance ( rights [ 0 ] , list ) :
rights = rights [ 0 ]
cols = [ n for n in left . schema . names ]
types = [ n for n in left . schema . types ]
counter_col_name = 'exc_counter_%d' % int ( time . time ( ) )
left = left [ left , Scalar ( 1 ) . rename ( counter_col_name ) ]
rights = [ r [ r , Scalar ( - 1 ) . rename ( counter_col_name ) ] for r in rights ]
unioned = left
for r in rights :
unioned = unioned . union ( r )
if distinct :
aggregated = unioned . groupby ( * cols ) . agg ( ** { counter_col_name : unioned [ counter_col_name ] . min ( ) } )
return aggregated . filter ( aggregated [ counter_col_name ] == 1 ) . select ( * cols )
else :
aggregated = unioned . groupby ( * cols ) . agg ( ** { counter_col_name : unioned [ counter_col_name ] . sum ( ) } )
@ output ( cols , types )
def exploder ( row ) :
import sys
irange = xrange if sys . version_info [ 0 ] < 3 else range
for _ in irange ( getattr ( row , counter_col_name ) ) :
yield row [ : - 1 ]
return aggregated . map_reduce ( mapper = exploder ) . select ( * cols )
|
def _make_pull_imethod_resp ( objs , eos , context_id ) :
"""Create the correct imethod response for the open and pull methods"""
|
eos_tup = ( u'EndOfSequence' , None , eos )
enum_ctxt_tup = ( u'EnumerationContext' , None , context_id )
return [ ( "IRETURNVALUE" , { } , objs ) , enum_ctxt_tup , eos_tup ]
|
def check ( text ) :
"""Flag offensive words based on the GLAAD reference guide ."""
|
err = "glaad.offensive_terms"
msg = "Offensive term. Remove it or consider the context."
list = [ "fag" , "faggot" , "dyke" , "sodomite" , "homosexual agenda" , "gay agenda" , "transvestite" , "homosexual lifestyle" , "gay lifestyle" # homo - may create false positives without additional context
# FIXME use topic detetor to decide whether " homo " is offensive
]
return existence_check ( text , list , err , msg , join = True , ignore_case = False )
|
def serialize_args ( self ) :
"""Returns ( args , kwargs ) to be used when deserializing this parameter ."""
|
args , kwargs = super ( MultiParameter , self ) . serialize_args ( )
args . insert ( 0 , [ [ t . id , t . serialize_args ( ) ] for t in self . types ] )
return args , kwargs
|
def get_router_id ( self , tenant_id , tenant_name ) :
"""Retrieve the router ID ."""
|
router_id = None
if tenant_id in self . tenant_dict :
router_id = self . tenant_dict . get ( tenant_id ) . get ( 'router_id' )
if not router_id :
router_list = self . os_helper . get_rtr_by_name ( 'FW_RTR_' + tenant_name )
if len ( router_list ) > 0 :
router_id = router_list [ 0 ] . get ( 'id' )
return router_id
|
def parse_solver_setting ( s ) :
"""Parse a string containing a solver setting"""
|
try :
key , value = s . split ( '=' , 1 )
except ValueError :
key , value = s , 'yes'
if key in ( 'rational' , 'integer' , 'quadratic' ) :
value = value . lower ( ) in ( '1' , 'yes' , 'true' , 'on' )
elif key in ( 'threads' , ) :
value = int ( value )
elif key in ( 'feasibility_tolerance' , 'optimality_tolerance' , 'integrality_tolerance' ) :
value = float ( value )
return key , value
|
def atomic_output_file ( dest_path , make_parents = False , backup_suffix = None , suffix = ".partial.%s" ) :
"""A context manager for convenience in writing a file or directory in an atomic way . Set up
a temporary name , then rename it after the operation is done , optionally making a backup of
the previous file or directory , if present ."""
|
if dest_path == os . devnull : # Handle the ( probably rare ) case of writing to / dev / null .
yield dest_path
else :
tmp_path = ( "%s" + suffix ) % ( dest_path , new_uid ( ) )
if make_parents :
make_parent_dirs ( tmp_path )
yield tmp_path
# Note this is not in a finally block , so that result won ' t be renamed to final location
# in case of abnormal exit .
if not os . path . exists ( tmp_path ) :
raise IOError ( "failure in writing file '%s': target file '%s' missing" % ( dest_path , tmp_path ) )
if backup_suffix :
move_to_backup ( dest_path , backup_suffix = backup_suffix )
# If the target already exists , and is a directory , it has to be removed .
if os . path . isdir ( dest_path ) :
shutil . rmtree ( dest_path )
shutil . move ( tmp_path , dest_path )
|
def validate ( self , value ) :
"""Performs validation of the value .
: param value : value to validate
: raise ValidationError if the value is invalid"""
|
# check choices
if self . choices :
if isinstance ( self . choices [ 0 ] , ( list , tuple ) ) :
option_keys = [ k for k , v in self . choices ]
if value not in option_keys :
msg = ( 'Value {0} is not listed among valid choices {1}' . format ( value , option_keys ) )
self . raise_error ( msg )
elif value not in self . choices :
msg = ( 'Value {0} is not listed among valid choices {1}' . format ( value , self . choices ) )
self . raise_error ( msg )
|
def gather_hinting ( config , rules , valid_paths ) :
"""Construct hint arguments for datanommer from a list of rules ."""
|
hinting = collections . defaultdict ( list )
for rule in rules :
root , name = rule . code_path . split ( ':' , 1 )
info = valid_paths [ root ] [ name ]
if info [ 'hints-callable' ] : # Call the callable hint to get its values
result = info [ 'hints-callable' ] ( config = config , ** rule . arguments )
# If the rule is inverted , but the hint is not invertible , then
# there is no hinting we can provide . Carry on .
if rule . negated and not info [ 'hints-invertible' ] :
continue
for key , values in result . items ( ) : # Negate the hint if necessary
key = 'not_' + key if rule . negated else key
hinting [ key ] . extend ( values )
# Then , finish off with all the other ordinary , non - callable hints
for key , value in info [ 'datanommer-hints' ] . items ( ) : # If the rule is inverted , but the hint is not invertible , then
# there is no hinting we can provide . Carry on .
if rule . negated and not info [ 'hints-invertible' ] :
continue
# Otherwise , construct the inverse hint if necessary
key = 'not_' + key if rule . negated else key
# And tack it on .
hinting [ key ] += value
log . debug ( 'gathered hinting %r' , hinting )
return hinting
|
def absolute_address ( self ) :
"""Get the absolute byte address of this node .
Indexes of all arrays in the node ' s lineage must be known
Raises
ValueError
If this property is referenced on a node whose array lineage is not
fully defined"""
|
if self . parent and not isinstance ( self . parent , RootNode ) :
return self . parent . absolute_address + self . address_offset
else :
return self . address_offset
|
def length_degrees ( self ) :
'''Computes the length of the arc in degrees .
The length computation corresponds to what you would expect if you would draw the arc using matplotlib taking direction into account .
> > > Arc ( ( 0,0 ) , 1 , 0 , 0 , True ) . length _ degrees ( )
0.0
> > > Arc ( ( 0,0 ) , 2 , 0 , 0 , False ) . length _ degrees ( )
0.0
> > > Arc ( ( 0,0 ) , 3 , 0 , 1 , True ) . length _ degrees ( )
1.0
> > > Arc ( ( 0,0 ) , 4 , 0 , 1 , False ) . length _ degrees ( )
359.0
> > > Arc ( ( 0,0 ) , 5 , 0 , 360 , True ) . length _ degrees ( )
360.0
> > > Arc ( ( 0,0 ) , 6 , 0 , 360 , False ) . length _ degrees ( )
0.0
> > > Arc ( ( 0,0 ) , 7 , 0 , 361 , True ) . length _ degrees ( )
360.0
> > > Arc ( ( 0,0 ) , 8 , 0 , 361 , False ) . length _ degrees ( )
359.0
> > > Arc ( ( 0,0 ) , 9 , 10 , - 10 , True ) . length _ degrees ( )
340.0
> > > Arc ( ( 0,0 ) , 10 , 10 , - 10 , False ) . length _ degrees ( )
20.0
> > > Arc ( ( 0,0 ) , 1 , 10 , 5 , True ) . length _ degrees ( )
355.0
> > > Arc ( ( 0,0 ) , 1 , - 10 , - 5 , False ) . length _ degrees ( )
355.0
> > > Arc ( ( 0,0 ) , 1 , 180 , - 180 , True ) . length _ degrees ( )
0.0
> > > Arc ( ( 0,0 ) , 1 , 180 , - 180 , False ) . length _ degrees ( )
360.0
> > > Arc ( ( 0,0 ) , 1 , - 180 , 180 , True ) . length _ degrees ( )
360.0
> > > Arc ( ( 0,0 ) , 1 , - 180 , 180 , False ) . length _ degrees ( )
0.0
> > > Arc ( ( 0,0 ) , 1 , 175 , - 175 , True ) . length _ degrees ( )
10.0
> > > Arc ( ( 0,0 ) , 1 , 175 , - 175 , False ) . length _ degrees ( )
350.0'''
|
d_angle = self . sign * ( self . to_angle - self . from_angle )
if ( d_angle > 360 ) :
return 360.0
elif ( d_angle < 0 ) :
return d_angle % 360.0
else :
return abs ( d_angle )
|
def create_settings ( sender , ** kwargs ) :
"""create user notification settings on user creation"""
|
created = kwargs [ 'created' ]
user = kwargs [ 'instance' ]
if created :
UserWebNotificationSettings . objects . create ( user = user )
UserEmailNotificationSettings . objects . create ( user = user )
|
def expect_types ( __funcname = _qualified_name , ** named ) :
"""Preprocessing decorator that verifies inputs have expected types .
Examples
> > > @ expect _ types ( x = int , y = str )
. . . def foo ( x , y ) :
. . . return x , y
> > > foo ( 2 , ' 3 ' )
(2 , ' 3 ' )
> > > foo ( 2.0 , ' 3 ' ) # doctest : + NORMALIZE _ WHITESPACE + ELLIPSIS
Traceback ( most recent call last ) :
TypeError : . . . foo ( ) expected a value of type int for argument ' x ' ,
but got float instead .
Notes
A special argument , _ _ funcname , can be provided as a string to override the
function name shown in error messages . This is most often used on _ _ init _ _
or _ _ new _ _ methods to make errors refer to the class name instead of the
function name ."""
|
for name , type_ in iteritems ( named ) :
if not isinstance ( type_ , ( type , tuple ) ) :
raise TypeError ( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead." . format ( name = name , type_ = type_ , ) )
def _expect_type ( type_ ) : # Slightly different messages for type and tuple of types .
_template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." )
if isinstance ( type_ , tuple ) :
template = _template . format ( type_or_types = ' or ' . join ( map ( _qualified_name , type_ ) ) )
else :
template = _template . format ( type_or_types = _qualified_name ( type_ ) )
return make_check ( exc_type = TypeError , template = template , pred = lambda v : not isinstance ( v , type_ ) , actual = compose ( _qualified_name , type ) , funcname = __funcname , )
return preprocess ( ** valmap ( _expect_type , named ) )
|
def _initialize_operation_name_to_id ( self ) :
"""Initializer for _ operation _ name _ to _ id .
Returns :
a { string : int } , mapping operation names to their index in _ operations ."""
|
operation_name_to_id = { }
for i , operation in enumerate ( self . _operations ) :
operation_name_to_id [ operation . name ] = i
return operation_name_to_id
|
def do_start_cluster ( self , cluster ) :
"""Start the cluster
Usage :
> start _ cluster < cluster >"""
|
try :
cluster = api . get_cluster ( cluster )
cluster . start ( )
print ( "Starting Cluster" )
except ApiException :
print ( "Cluster not found" )
return None
|
def factorize ( number ) :
"""Get the prime factors of an integer except for 1.
Parameters
number : int
Returns
primes : iterable
Examples
> > > factorize ( - 17)
[ - 1 , 17]
> > > factorize ( 8)
[2 , 2 , 2]
> > > factorize ( 3 * * 25)
[3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3 , 3]
> > > factorize ( 1)"""
|
if not isinstance ( number , int ) :
raise ValueError ( 'integer expected, but type(number)={}' . format ( type ( number ) ) )
if number < 0 :
return [ - 1 ] + factorize ( number * ( - 1 ) )
elif number == 0 :
raise ValueError ( 'All primes are prime factors of 0.' )
else :
for i in range ( 2 , int ( math_stl . ceil ( number ** 0.5 ) ) + 1 ) :
if number % i == 0 :
if i == number :
return [ i ]
else :
return [ i ] + factorize ( int ( number / i ) )
return [ number ]
|
def view_user ( self , user ) :
"""View the given user on the user page
: param user : the user to view
: type user : : class : ` jukeboxcore . djadapter . models . User `
: returns : None
: rtype : None
: raises : None"""
|
log . debug ( 'Viewing user %s' , user . username )
self . cur_user = None
self . pages_tabw . setCurrentIndex ( 9 )
self . user_username_le . setText ( user . username )
self . user_first_le . setText ( user . first_name )
self . user_last_le . setText ( user . last_name )
self . user_email_le . setText ( user . email )
prjrootdata = treemodel . ListItemData ( [ 'Name' , 'Short' , 'Path' , 'Created' , 'Semester' , 'Status' , 'Resolution' , 'FPS' , 'Scale' ] )
prjrootitem = treemodel . TreeItem ( prjrootdata )
prjs = djadapter . projects . filter ( users = user )
for prj in prjs :
prjdata = djitemdata . ProjectItemData ( prj )
treemodel . TreeItem ( prjdata , prjrootitem )
self . user_prj_model = treemodel . TreeModel ( prjrootitem )
self . user_prj_tablev . setModel ( self . user_prj_model )
taskrootdata = treemodel . ListItemData ( [ 'Name' ] )
taskrootitem = treemodel . TreeItem ( taskrootdata )
self . user_task_model = treemodel . TreeModel ( taskrootitem )
self . user_task_treev . setModel ( self . user_task_model )
tasks = djadapter . tasks . filter ( users = user )
assets = { }
shots = { }
atypes = { }
seqs = { }
prjs = { }
for t in tasks :
tdata = djitemdata . TaskItemData ( t )
titem = treemodel . TreeItem ( tdata )
e = t . element
if isinstance ( e , djadapter . models . Asset ) :
eitem = assets . get ( e )
if not eitem :
edata = djitemdata . AssetItemData ( e )
eitem = treemodel . TreeItem ( edata )
assets [ e ] = eitem
egrp = e . atype
egrpitem = atypes . get ( egrp )
if not egrpitem :
egrpdata = djitemdata . AtypeItemData ( egrp )
egrpitem = treemodel . TreeItem ( egrpdata )
atypes [ egrp ] = egrpitem
else :
eitem = shots . get ( e )
if not eitem :
edata = djitemdata . ShotItemData ( e )
eitem = treemodel . TreeItem ( edata )
shots [ e ] = eitem
egrp = e . sequence
egrpitem = seqs . get ( egrp )
if not egrpitem :
egrpdata = djitemdata . SequenceItemData ( egrp )
egrpitem = treemodel . TreeItem ( egrpdata )
seqs [ egrp ] = egrpitem
if eitem not in egrpitem . childItems :
eitem . set_parent ( egrpitem )
prj = egrp . project
prjitem = prjs . get ( prj )
if not prjitem :
prjdata = djitemdata . ProjectItemData ( prj )
prjitem = treemodel . TreeItem ( prjdata , taskrootitem )
prjs [ prj ] = prjitem
assetdata = treemodel . ListItemData ( [ "Asset" ] )
assetitem = treemodel . TreeItem ( assetdata , prjitem )
shotdata = treemodel . ListItemData ( [ "Shot" ] )
shotitem = treemodel . TreeItem ( shotdata , prjitem )
else :
assetitem = prjitem . child ( 0 )
shotitem = prjitem . child ( 1 )
if isinstance ( egrp , djadapter . models . Atype ) and egrpitem not in assetitem . childItems :
egrpitem . set_parent ( assetitem )
elif isinstance ( egrp , djadapter . models . Sequence ) and egrpitem not in shotitem . childItems :
egrpitem . set_parent ( shotitem )
titem . set_parent ( eitem )
self . cur_user = user
|
def CreateAdGroup ( client , campaign_id ) :
"""Creates a dynamic remarketing campaign .
Args :
client : an AdWordsClient instance .
campaign _ id : an int campaign ID .
Returns :
The ad group that was successfully created ."""
|
ad_group_service = client . GetService ( 'AdGroupService' , 'v201809' )
ad_group = { 'name' : 'Dynamic remarketing ad group' , 'campaignId' : campaign_id , 'status' : 'ENABLED' }
operations = [ { 'operator' : 'ADD' , 'operand' : ad_group } ]
return ad_group_service . mutate ( operations ) [ 'value' ] [ 0 ]
|
def add_device ( self , resource_name , device ) :
"""Bind device to resource name"""
|
if device . resource_name is not None :
msg = 'The device %r is already assigned to %s'
raise ValueError ( msg % ( device , device . resource_name ) )
device . resource_name = resource_name
self . _internal [ device . resource_name ] = device
|
def finished_or_stopped ( self ) :
"""Condition check on finished or stopped status
The method returns a value which is equivalent with not ' active ' status of the current state machine .
: return : outcome of condition check stopped or finished
: rtype : bool"""
|
return ( self . _status . execution_mode is StateMachineExecutionStatus . STOPPED ) or ( self . _status . execution_mode is StateMachineExecutionStatus . FINISHED )
|
def query_api ( app , client_id , imgur_id , is_album ) :
"""Query the Imgur API .
: raise APIError : When Imgur responds with errors or unexpected data .
: param sphinx . application . Sphinx app : Sphinx application object .
: param str client _ id : Imgur API client ID to use . https : / / api . imgur . com / oauth2
: param str imgur _ id : The Imgur ID to query .
: param bool is _ album : If this ID is an album instead of an image .
: return : Parsed JSON .
: rtype : dict"""
|
url = API_URL . format ( type = 'album' if is_album else 'image' , id = imgur_id )
headers = { 'Authorization' : 'Client-ID {}' . format ( client_id ) }
timeout = 5
# Query .
app . info ( 'querying {}' . format ( url ) )
try :
response = requests . get ( url , headers = headers , timeout = timeout )
except ( requests . exceptions . ConnectTimeout , requests . exceptions . ReadTimeout , requests . Timeout ) as exc :
raise APIError ( 'timed out waiting for reply from {}: {}' . format ( url , str ( exc ) ) , app )
except requests . ConnectionError as exc :
raise APIError ( 'unable to connect to {}: {}' . format ( url , str ( exc ) ) , app )
app . debug2 ( 'Imgur API responded with: %s' , response . text )
# Parse JSON .
try :
parsed = response . json ( )
except ValueError :
raise APIError ( 'failed to parse JSON from {}' . format ( url ) , app )
# Verify data .
if not parsed . get ( 'success' ) :
if 'data' not in parsed :
message = 'no "data" key in JSON'
elif 'error' not in parsed [ 'data' ] :
message = 'no "error" key in JSON'
else :
message = parsed [ 'data' ] [ 'error' ]
raise APIError ( 'query unsuccessful from {}: {}' . format ( url , message ) , app )
return parsed
|
def _compute_magnitude ( self , rup , C ) :
"""Compute the first term of the equation described on p . 199:
` ` b1 + b2 * M + b3 * M * * 2 ` `"""
|
return C [ 'b1' ] + ( C [ 'b2' ] * rup . mag ) + ( C [ 'b3' ] * ( rup . mag ** 2 ) )
|
def state ( self ) :
"""Reading returns a list of state flags . Possible flags are
` running ` , ` ramping ` , ` holding ` , ` overloaded ` and ` stalled ` ."""
|
self . _state , value = self . get_attr_set ( self . _state , 'state' )
return value
|
def register_members ( self ) :
"""Collect the names of the class member and convert them to object
members .
Unlike Terms , the Group class members are converted into object
members , so the configuration data"""
|
self . _members = { name : attr for name , attr in iteritems ( type ( self ) . __dict__ ) if isinstance ( attr , Group ) }
for name , m in iteritems ( self . _members ) :
m . init_descriptor ( name , self )
|
def disconnect ( self , close = True ) :
"""Closes the connection as well as logs off any of the
Disconnects the TCP connection and shuts down the socket listener
running in a thread .
: param close : Will close all sessions in the connection as well as the
tree connections of each session ."""
|
if close :
for session in list ( self . session_table . values ( ) ) :
session . disconnect ( True )
log . info ( "Disconnecting transport connection" )
self . transport . disconnect ( )
|
def read_attributes ( self , attributes = None ) :
'''Collect read attributes across reads in this PileupCollection into a
pandas . DataFrame .
Valid attributes are the following properties of a pysam . AlignedSegment
instance . See :
http : / / pysam . readthedocs . org / en / latest / api . html # pysam . AlignedSegment
for the meaning of these attributes .
* cigarstring
* flag
* inferred _ length
* is _ duplicate
* is _ paired
* is _ proper _ pair
* is _ qcfail
* is _ read1
* is _ read2
* is _ reverse
* is _ secondary
* is _ unmapped
* mapping _ quality
* mate _ is _ reverse
* mate _ is _ unmapped
* next _ reference _ id
* next _ reference _ start
* query _ alignment _ end
* query _ alignment _ length
* query _ alignment _ qualities
* query _ alignment _ sequence
* query _ alignment _ start
* query _ length
* query _ name
* reference _ end
* reference _ id
* reference _ length
* reference _ start
* template _ length
( Note : the above list is parsed into the _ READ _ ATTRIBUTE _ NAMES class
variable , so be careful when modifying it . )
Additionally , for alignment " tags " ( arbitrary key values associated
with an alignment ) , a column of the form " TAG _ { tag name } " is
included .
Finally , the column " pysam _ alignment _ record " gives the underlying
` pysam . AlignedSegment ` instances .
Parameters
attributes ( optional ) : list of strings
List of columns to include . If unspecified , all columns are
included in the result .
Returns
pandas . DataFrame of read attributes .'''
|
def include ( attribute ) :
return attributes is None or attribute in attributes
reads = self . reads ( )
possible_column_names = list ( PileupCollection . _READ_ATTRIBUTE_NAMES )
result = OrderedDict ( ( name , [ getattr ( read , name ) for read in reads ] ) for name in PileupCollection . _READ_ATTRIBUTE_NAMES if include ( name ) )
# Add tag columns .
if reads :
tag_dicts = [ dict ( x . get_tags ( ) ) for x in reads ]
tag_keys = set . union ( * [ set ( item . keys ( ) ) for item in tag_dicts ] )
for tag_key in sorted ( tag_keys ) :
column_name = "TAG_%s" % tag_key
possible_column_names . append ( column_name )
if include ( column_name ) :
result [ column_name ] = [ d . get ( tag_key ) for d in tag_dicts ]
# Lastly , we include the underlying pysam alignment record .
possible_column_names . append ( "pysam_alignment_record" )
if include ( "pysam_alignment_record" ) :
result [ "pysam_alignment_record" ] = reads
# If particular attributes were requested , check that they ' re here .
if attributes is not None :
for attribute in attributes :
if attribute not in result :
raise ValueError ( "No such attribute: %s. Valid attributes are: %s" % ( attribute , " " . join ( possible_column_names ) ) )
assert set ( attributes ) == set ( result )
return pandas . DataFrame ( result )
|
def model_import ( self ) :
"""Import and instantiate the non - JIT models and the JIT models .
Models defined in ` ` jits ` ` and ` ` non _ jits ` ` in ` ` models / _ _ init _ _ . py ` `
will be imported and instantiated accordingly .
Returns
None"""
|
# non - JIT models
for file , pair in non_jits . items ( ) :
for cls , name in pair . items ( ) :
themodel = importlib . import_module ( 'andes.models.' + file )
theclass = getattr ( themodel , cls )
self . __dict__ [ name ] = theclass ( self , name )
group = self . __dict__ [ name ] . _group
self . group_add ( group )
self . __dict__ [ group ] . register_model ( name )
self . devman . register_device ( name )
# import JIT models
for file , pair in jits . items ( ) :
for cls , name in pair . items ( ) :
self . __dict__ [ name ] = JIT ( self , file , cls , name )
|
def dateparser ( self , dformat = '%d/%m/%Y' ) :
"""Returns a date parser for pandas"""
|
def dateparse ( dates ) :
return [ pd . datetime . strptime ( d , dformat ) for d in dates ]
return dateparse
|
def dropcols ( df , start = None , end = None ) :
"""Drop columns that contain NaN within [ start , end ] inclusive .
A wrapper around DataFrame . dropna ( ) that builds an easier * subset *
syntax for tseries - indexed DataFrames .
Parameters
df : DataFrame
start : str or datetime , default None
start cutoff date , inclusive
end : str or datetime , default None
end cutoff date , inclusive
Example
df = DataFrame ( np . random . randn ( 10,3 ) ,
index = pd . date _ range ( ' 2017 ' , periods = 10 ) )
# Drop in some NaN
df . set _ value ( ' 2017-01-04 ' , 0 , np . nan )
df . set _ value ( ' 2017-01-02 ' , 2 , np . nan )
df . loc [ ' 2017-01-05 ' : , 1 ] = np . nan
# only col2 will be kept - - its NaN value falls before ` start `
print ( dropcols ( df , start = ' 2017-01-03 ' ) )
2017-01-01 0.12939
2017-01-02 NaN
2017-01-03 0.16596
2017-01-04 1.06442
2017-01-05 - 1.87040
2017-01-06 - 0.17160
2017-01-07 0.94588
2017-01-08 1.49246
2017-01-09 0.02042
2017-01-10 0.75094"""
|
if isinstance ( df , Series ) :
raise ValueError ( "func only applies to `pd.DataFrame`" )
if start is None :
start = df . index [ 0 ]
if end is None :
end = df . index [ - 1 ]
subset = df . index [ ( df . index >= start ) & ( df . index <= end ) ]
return df . dropna ( axis = 1 , subset = subset )
|
def adapter ( self , adapter ) :
"""Sets the adapter instance under the " _ adapter " property in use by this class .
Also sets the adapter property for all implemented classes under this category .
: param adapter : New adapter instance to set for this class and all implemented classes under this category .
: type adapter : hvac . adapters . Adapter"""
|
self . _adapter = adapter
for implemented_class in self . implemented_classes :
class_name = implemented_class . __name__ . lower ( )
getattr ( self , self . get_private_attr_name ( class_name ) ) . adapter = adapter
|
def detect_encoding ( data , encoding = None , fallback = 'latin1' , is_html = False ) :
'''Detect the character encoding of the data .
Returns :
str : The name of the codec
Raises :
ValueError : The codec could not be detected . This error can only
occur if fallback is not a " lossless " codec .'''
|
if encoding :
encoding = normalize_codec_name ( encoding )
bs4_detector = EncodingDetector ( data , override_encodings = ( encoding , ) if encoding else ( ) , is_html = is_html )
candidates = itertools . chain ( bs4_detector . encodings , ( fallback , ) )
for candidate in candidates :
if not candidate :
continue
candidate = normalize_codec_name ( candidate )
if not candidate :
continue
if candidate == 'ascii' and fallback != 'ascii' : # it ' s never ascii : )
# Falling back on UTF - 8 / CP - 1252 / Latin - 1 reduces chance of
# failure
continue
if try_decoding ( data , candidate ) :
return candidate
raise ValueError ( 'Unable to detect encoding.' )
|
def params ( self ) :
"""A combined : class : ` MultiDict ` with values from : attr : ` forms ` and
: attr : ` GET ` . File - uploads are not included ."""
|
params = MultiDict ( self . GET )
for key , value in self . forms . iterallitems ( ) :
params [ key ] = value
return params
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.