signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _get_auth_from_keyring ( self ) :
"""Try to get credentials using ` keyring < https : / / github . com / jaraco / keyring > ` _ .""" | if not keyring :
return None
# Take user from URL if available , else the OS login name
password = self . _get_password_from_keyring ( self . user or getpass . getuser ( ) )
if password is not None :
self . user = self . user or getpass . getuser ( )
self . password = password
return 'keyring' |
def includeme ( configurator ) :
"""Add yaml configuration utilities .
: param pyramid . config . Configurator configurator : pyramid ' s app configurator""" | settings = configurator . registry . settings
# lets default it to running path
yaml_locations = settings . get ( 'yaml.location' , settings . get ( 'yml.location' , os . getcwd ( ) ) )
configurator . add_directive ( 'config_defaults' , config_defaults )
configurator . config_defaults ( yaml_locations )
# reading yml configuration
if configurator . registry [ 'config' ] :
config = configurator . registry [ 'config' ]
log . debug ( 'Yaml config created' )
# extend settings object
if 'configurator' in config and config . configurator :
_extend_settings ( settings , config . configurator )
# run include ' s
if 'include' in config :
_run_includemes ( configurator , config . include )
# let ' s calla a convenience request method
configurator . add_request_method ( lambda request : request . registry [ 'config' ] , name = 'config' , property = True ) |
def instance_of ( target_type : Optional [ type ] = None , raise_ex : bool = False , summary : bool = True , ** items : Any ) -> ValidationReturn :
"""Tests if the given key - value pairs ( items ) are instances of the given ` ` target _ type ` ` .
Per default this function yields whether ` ` True ` ` or ` ` False ` ` depending on the fact if all
items withstand the validation or not . Per default the validation / evaluation is
short - circuit and will return as soon an item evaluates to ` ` False ` ` .
When ` ` raise _ ex ` ` is set to ` ` True ` ` the function will raise a meaningful error message
after the first item evaluates to ` ` False ` ` ( short - circuit ) .
When ` ` summary ` ` is set to ` ` False ` ` a dictionary is returned containing the individual
evaluation result of each item ( non short - circuit ) .
Examples :
> > > Validator . instance _ of ( my _ str = ' str ' , my _ str2 = ' str2 ' , target _ type = str )
True
> > > Validator . instance _ of ( my _ str1 = ' str ' , target _ type = int )
False
> > > Validator . instance _ of ( my _ str = None , raise _ ex = True , target _ type = str )
Traceback ( most recent call last ) :
ValueError : ' my _ str ' ( NoneType ) is not an instance of type ' str '
> > > Validator . instance _ of ( my _ str = ' a ' , my _ str2 = 1 , raise _ ex = True , target _ type = str )
Traceback ( most recent call last ) :
ValueError : ' my _ str2 ' ( int ) is not an instance of type ' str '
> > > ( Validator . instance _ of (
. . . my _ str = ' str ' , my _ str2 = ' str2 ' , non _ str = 5 , target _ type = str , summary = False
. . . = = { ' my _ str ' : True , ' my _ str2 ' : True , ' non _ str ' : False } )
True
Args :
raise _ ex ( bool , optional ) : If set to ` ` True ` ` an exception is raised if at least one
item is validated to ` ` False ` ` ( works short - circuit and will abort the validation when
the first item is evaluated to ` ` False ` ` ) .
summary ( bool , optional ) : If set to ` ` False ` ` instead of returning just a single
` bool ` the validation will return a dictionary containing the individual evaluation
result of each item .
target _ type ( type ) : The target type to test the values against .
Returns :
( boolean or dictionary ) : ` ` True ` ` when the value was successfully validated ; ` ` False ` `
otherwise .
If ` ` summary ` ` is set to ` ` False ` ` a dictionary containing the individual evaluation
result of each item will be returned .
If ` ` raise _ ex ` ` is set to True , instead of returning False a meaningful error will be
raised .""" | if not target_type :
raise ValueError ( "Argument 'target_type' is None" )
return Validator . __test_all ( condition = lambda _ , val : isinstance ( val , cast ( type , target_type ) ) , formatter = ( lambda name , val : "'{varname}' ({actual}) is not an instance of type '{ttype}'" . format ( varname = name , actual = type ( val ) . __name__ , ttype = cast ( type , target_type ) . __name__ ) ) , raise_ex = raise_ex , summary = summary , ** items ) |
def has_file_changed ( directory , checksums , filetype = 'genbank' ) :
"""Check if the checksum of a given file has changed .""" | pattern = NgdConfig . get_fileending ( filetype )
filename , expected_checksum = get_name_and_checksum ( checksums , pattern )
full_filename = os . path . join ( directory , filename )
# if file doesn ' t exist , it has changed
if not os . path . isfile ( full_filename ) :
return True
actual_checksum = md5sum ( full_filename )
return expected_checksum != actual_checksum |
def __setup ( ) :
"""Will be executed in the first time someone calls classes _ * ( )""" | global __collaborators , __flag_first
import f311
__flag_first = False
for pkgname in f311 . COLLABORATORS_C :
try :
pkg = importlib . import_module ( pkgname )
a99 . get_python_logger ( ) . info ( "Imported collaborator package '{}'" . format ( pkgname ) )
try :
if hasattr ( pkg , "_setup_filetypes" ) :
pkg . _setup_filetypes ( )
else :
_collect_classes ( pkg )
__collaborators [ pkgname ] = pkg
except :
a99 . get_python_logger ( ) . exception ( "Actually, package '{}' gave error" . format ( pkgname ) )
raise
except :
a99 . get_python_logger ( ) . warning ( "Failed to import package '{}" . format ( pkgname ) ) |
def main_loop ( self ) :
"""Runs the main game loop .""" | while True :
for e in pygame . event . get ( ) :
self . handle_event ( e )
self . step ( )
pygame . time . wait ( 5 ) |
def sigma_points ( self , x , P ) :
"""Computes the implex sigma points for an unscented Kalman filter
given the mean ( x ) and covariance ( P ) of the filter .
Returns tuple of the sigma points and weights .
Works with both scalar and array inputs :
sigma _ points ( 5 , 9 , 2 ) # mean 5 , covariance 9
sigma _ points ( [ 5 , 2 ] , 9 * eye ( 2 ) , 2 ) # means 5 and 2 , covariance 9I
Parameters
x : An array - like object of the means of length n
Can be a scalar if 1D .
examples : 1 , [ 1,2 ] , np . array ( [ 1,2 ] )
P : scalar , or np . array
Covariance of the filter . If scalar , is treated as eye ( n ) * P .
Returns
sigmas : np . array , of size ( n , n + 1)
Two dimensional array of sigma points . Each column contains all of
the sigmas for one dimension in the problem space .
Ordered by Xi _ 0 , Xi _ { 1 . . n }""" | if self . n != np . size ( x ) :
raise ValueError ( "expected size(x) {}, but size is {}" . format ( self . n , np . size ( x ) ) )
n = self . n
if np . isscalar ( x ) :
x = np . asarray ( [ x ] )
x = x . reshape ( - 1 , 1 )
if np . isscalar ( P ) :
P = np . eye ( n ) * P
else :
P = np . atleast_2d ( P )
U = self . sqrt ( P )
lambda_ = n / ( n + 1 )
Istar = np . array ( [ [ - 1 / np . sqrt ( 2 * lambda_ ) , 1 / np . sqrt ( 2 * lambda_ ) ] ] )
for d in range ( 2 , n + 1 ) :
row = np . ones ( ( 1 , Istar . shape [ 1 ] + 1 ) ) * 1. / np . sqrt ( lambda_ * d * ( d + 1 ) )
row [ 0 , - 1 ] = - d / np . sqrt ( lambda_ * d * ( d + 1 ) )
Istar = np . r_ [ np . c_ [ Istar , np . zeros ( ( Istar . shape [ 0 ] ) ) ] , row ]
I = np . sqrt ( n ) * Istar
scaled_unitary = U . dot ( I )
sigmas = self . subtract ( x , - scaled_unitary )
return sigmas . T |
def init_file_mapping_store ( ) :
"""init _ file _ mapping _ store : creates log to keep track of downloaded files
Args : None
Returns : None""" | # Make storage directory for restore files if it doesn ' t already exist
path = os . path . join ( RESTORE_DIRECTORY , FILE_STORE_LOCATION )
if not os . path . exists ( path ) :
os . makedirs ( path ) |
def incoordination_score ( self , data_frame ) :
"""This method calculates the variance of the time interval in msec between taps
: param data _ frame : the data frame
: type data _ frame : pandas . DataFrame
: return is : incoordination score
: rtype is : float""" | diff = data_frame . td [ 1 : - 1 ] . values - data_frame . td [ 0 : - 2 ] . values
inc_s = np . var ( diff [ np . arange ( 1 , len ( diff ) , 2 ) ] , dtype = np . float64 ) * 1000.0
duration = math . ceil ( data_frame . td [ - 1 ] )
return inc_s , duration |
def add_scope ( self , scope_type , scope_name , scope_start , is_method = False ) :
"""we identified a scope and add it to positions .""" | if self . _curr is not None :
self . _curr [ 'end' ] = scope_start - 1
# close last scope
self . _curr = { 'type' : scope_type , 'name' : scope_name , 'start' : scope_start , 'end' : scope_start }
if is_method and self . _positions :
last = self . _positions [ - 1 ]
if not 'methods' in last :
last [ 'methods' ] = [ ]
last [ 'methods' ] . append ( self . _curr )
else :
self . _positions . append ( self . _curr ) |
def last_requestline ( sent_data ) :
"""Find the last line in sent _ data that can be parsed with parse _ requestline""" | for line in reversed ( sent_data ) :
try :
parse_requestline ( decode_utf8 ( line ) )
except ValueError :
pass
else :
return line |
def on_raise ( self , node ) : # ( ' type ' , ' inst ' , ' tback ' )
"""Raise statement : note difference for python 2 and 3.""" | if version_info [ 0 ] == 3 :
excnode = node . exc
msgnode = node . cause
else :
excnode = node . type
msgnode = node . inst
out = self . run ( excnode )
msg = ' ' . join ( out . args )
msg2 = self . run ( msgnode )
if msg2 not in ( None , 'None' ) :
msg = "%s: %s" % ( msg , msg2 )
self . raise_exception ( None , exc = out . __class__ , msg = msg , expr = '' ) |
def get_position ( self , user_id , track = False , confidence = False ) :
'''a method to retrieve the latest position of a user
: param user _ id : string with id of user
: param track : [ optional ] boolean to add user to self . positions
: param confidence : [ optional ] boolean to include the data model confidence scores
: return : dictionaries with position details
NOTE : if user does not exist , then location and time are null values
' time ' : 0.0,
' location ' : ' location . id ' ,
' id ' : ' user _ id ' ,
bayes : { } , # if confidence = True
svm : None , # if confidence = True
rf : { } # if confidence = True''' | title = '%s.get_position' % self . __class__ . __name__
# validate inputs
input_fields = { 'user_id' : user_id }
for key , value in input_fields . items ( ) :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# construct empty response
position_details = { 'location' : '' , 'time' : 0.0 , 'id' : user_id }
# construct empty position history
position_history = [ ]
# compose request
import requests
url = self . endpoint + '/location'
params = { 'group' : self . group_name , 'user' : user_id , 'n' : 1 }
response = requests . get ( url , params = params )
# ingest response
response_details = response . json ( )
from labpack . records . time import labDT
for key in response_details [ 'users' ] . keys ( ) :
if key == user_id :
for entry in response_details [ 'users' ] [ key ] :
if 'time' in entry . keys ( ) and 'location' in entry . keys ( ) :
time_string = entry [ 'time' ]
time_string = time_string . replace ( ' +0000 UTC' , 'Z' )
time_string = time_string . replace ( ' ' , 'T' )
time_dt = labDT . fromISO ( time_string ) . epoch ( )
if confidence :
for key , value in entry . items ( ) :
position_details [ key ] = value
position_details [ 'time' ] = time_dt
position_details [ 'location' ] = entry [ 'location' ]
break
if track :
stored_position = { 'location' : position_details [ 'location' ] , 'time' : position_details [ 'time' ] }
self . positions [ user_id ] = stored_position
return position_details |
def d_day ( self , time ) :
'''指定日期
: param datetime time : 欲判斷的日期
: rtype : bool
: returns : True 為開市 、 False 為休市''' | if type ( time ) == type ( TWTime ( ) . now ) :
self . twtime = TWTime ( ) . now
elif type ( time ) == type ( TWTime ( ) . date ) :
self . twtime = TWTime ( ) . date
else :
pass
return self . caldata ( time ) |
def sim ( adata , tmax_realization = None , as_heatmap = False , shuffle = False , show = None , save = None ) :
"""Plot results of simulation .
Parameters
as _ heatmap : bool ( default : False )
Plot the timeseries as heatmap .
tmax _ realization : int or None ( default : False )
Number of observations in one realization of the time series . The data matrix
adata . X consists in concatenated realizations .
shuffle : bool , optional ( default : False )
Shuffle the data .
save : ` bool ` or ` str ` , optional ( default : ` None ` )
If ` True ` or a ` str ` , save the figure . A string is appended to the
default filename . Infer the filetype if ending on { { ' . pdf ' , ' . png ' , ' . svg ' } } .
show : bool , optional ( default : ` None ` )
Show the plot , do not return axis .""" | from . . . import utils as sc_utils
if tmax_realization is not None :
tmax = tmax_realization
elif 'tmax_write' in adata . uns :
tmax = adata . uns [ 'tmax_write' ]
else :
tmax = adata . n_obs
n_realizations = adata . n_obs / tmax
if not shuffle :
if not as_heatmap :
timeseries ( adata . X , var_names = adata . var_names , xlim = [ 0 , 1.25 * adata . n_obs ] , highlightsX = np . arange ( tmax , n_realizations * tmax , tmax ) , xlabel = 'realizations' )
else : # plot time series as heatmap , as in Haghverdi et al . ( 2016 ) , Fig . 1d
timeseries_as_heatmap ( adata . X , var_names = adata . var_names , highlightsX = np . arange ( tmax , n_realizations * tmax , tmax ) )
pl . xticks ( np . arange ( 0 , n_realizations * tmax , tmax ) , np . arange ( n_realizations ) . astype ( int ) + 1 )
utils . savefig_or_show ( 'sim' , save = save , show = show )
else : # shuffled data
X = adata . X
X , rows = sc_utils . subsample ( X , seed = 1 )
timeseries ( X , var_names = adata . var_names , xlim = [ 0 , 1.25 * adata . n_obs ] , highlightsX = np . arange ( tmax , n_realizations * tmax , tmax ) , xlabel = 'index (arbitrary order)' )
utils . savefig_or_show ( 'sim_shuffled' , save = save , show = show ) |
def readme_for ( self , subpath ) :
"""Returns the full path for the README file for the specified
subpath , or the root filename if subpath is None .
Raises ReadmeNotFoundError if a README for the specified subpath
does not exist .
Raises werkzeug . exceptions . NotFound if the resulting path
would fall out of the root directory .""" | if subpath is None :
return self . root_filename
# Join for safety and to convert subpath to normalized OS - specific path
filename = os . path . normpath ( safe_join ( self . root_directory , subpath ) )
# Check for existence
if not os . path . exists ( filename ) :
raise ReadmeNotFoundError ( filename )
# Resolve README file if path is a directory
if os . path . isdir ( filename ) :
return self . _find_file ( filename )
return filename |
def convert ( cls , record ) :
"""Converts a single dictionary or list of dictionaries into converted list of dictionaries .""" | if isinstance ( record , list ) :
return [ cls . _convert ( r ) for r in record ]
else :
return [ cls . _convert ( record ) ] |
def random_get_int ( rnd : Optional [ tcod . random . Random ] , mi : int , ma : int ) -> int :
"""Return a random integer in the range : ` ` mi ` ` < = n < = ` ` ma ` ` .
The result is affected by calls to : any : ` random _ set _ distribution ` .
Args :
rnd ( Optional [ Random ] ) : A Random instance , or None to use the default .
low ( int ) : The lower bound of the random range , inclusive .
high ( int ) : The upper bound of the random range , inclusive .
Returns :
int : A random integer in the range ` ` mi ` ` < = n < = ` ` ma ` ` .""" | return int ( lib . TCOD_random_get_int ( rnd . random_c if rnd else ffi . NULL , mi , ma ) ) |
def array_to_csv ( array_like ) : # type : ( np . array or Iterable or int or float ) - > str
"""Convert an array like object to CSV .
To understand better what an array like object is see :
https : / / docs . scipy . org / doc / numpy / user / basics . creation . html # converting - python - array - like - objects - to - numpy - arrays
Args :
array _ like ( np . array or Iterable or int or float ) : array like object to be converted to CSV .
Returns :
( str ) : object serialized to CSV""" | stream = StringIO ( )
np . savetxt ( stream , array_like , delimiter = ',' , fmt = '%s' )
return stream . getvalue ( ) |
def WriteVarBytes ( self , value , endian = "<" ) :
"""Write an integer value in a space saving way to the stream .
Read more about variable size encoding here : http : / / docs . neo . org / en - us / node / network - protocol . html # convention
Args :
value ( bytes ) :
endian ( str ) : specify the endianness . ( Default ) Little endian ( ' < ' ) . Use ' > ' for big endian .
Returns :
int : the number of bytes written .""" | length = len ( value )
self . WriteVarInt ( length , endian )
return self . WriteBytes ( value , unhex = False ) |
def register ( self , params , target ) :
"""Append a point and its target value to the known data .
Parameters
x : ndarray
a single point , with len ( x ) = = self . dim
y : float
target function value
Raises
KeyError :
if the point is not unique
Notes
runs in ammortized constant time
Example
> > > pbounds = { ' p1 ' : ( 0 , 1 ) , ' p2 ' : ( 1 , 100 ) }
> > > space = TargetSpace ( lambda p1 , p2 : p1 + p2 , pbounds )
> > > len ( space )
> > > x = np . array ( [ 0 , 0 ] )
> > > y = 1
> > > space . add _ observation ( x , y )
> > > len ( space )""" | x = self . _as_array ( params )
if x in self :
raise KeyError ( 'Data point {} is not unique' . format ( x ) )
# Insert data into unique dictionary
self . _cache [ _hashable ( x . ravel ( ) ) ] = target
self . _params = np . concatenate ( [ self . _params , x . reshape ( 1 , - 1 ) ] )
self . _target = np . concatenate ( [ self . _target , [ target ] ] ) |
def scale_edges ( self , multiplier ) :
'''Multiply all edges in this ` ` Tree ` ` by ` ` multiplier ` `''' | if not isinstance ( multiplier , int ) and not isinstance ( multiplier , float ) :
raise TypeError ( "multiplier must be an int or float" )
for node in self . traverse_preorder ( ) :
if node . edge_length is not None :
node . edge_length *= multiplier |
def _subscribe_resp ( self , data ) :
"""Handle a subscribe response .
: param data : Payload .
: returns : State ( ON / OFF )""" | if _is_subscribe_response ( data ) :
status = bytes ( [ data [ 23 ] ] )
_LOGGER . debug ( "Successfully subscribed to %s, state: %s" , self . host , ord ( status ) )
return status |
def base64_decode ( data ) :
"""Base 64 decoder""" | data = data . replace ( __enc64__ [ 64 ] , '' )
total = len ( data )
result = [ ]
mod = 0
for i in range ( total ) :
mod = i % 4
cur = __enc64__ . index ( data [ i ] )
if mod == 0 :
continue
elif mod == 1 :
prev = __enc64__ . index ( data [ i - 1 ] )
result . append ( chr ( prev << 2 | cur >> 4 ) )
elif mod == 2 :
prev = __enc64__ . index ( data [ i - 1 ] )
result . append ( chr ( ( prev & 0x0f ) << 4 | cur >> 2 ) )
elif mod == 3 :
prev = __enc64__ . index ( data [ i - 1 ] )
result . append ( chr ( ( prev & 3 ) << 6 | cur ) )
return "" . join ( result ) |
def register_field ( mongo_field_cls , marshmallow_field_cls , available_params = ( ) ) :
"""Bind a marshmallow field to it corresponding mongoengine field
: param mongo _ field _ cls : Mongoengine Field
: param marshmallow _ field _ cls : Marshmallow Field
: param available _ params : List of : class marshmallow _ mongoengine . cnoversion . params . MetaParam :
instances to import the mongoengine field config to marshmallow""" | class Builder ( MetaFieldBuilder ) :
AVAILABLE_PARAMS = available_params
MARSHMALLOW_FIELD_CLS = marshmallow_field_cls
register_field_builder ( mongo_field_cls , Builder ) |
def substitute_str_in_file ( i ) :
"""Input : {
filename - file
string1 - string to be replaced
string2 - replace string
Output : {
return - return code = 0 , if successful
= 16 , if file not found
> 0 , if error
( error ) - error text if return > 0""" | fn = i [ 'filename' ]
s1 = i [ 'string1' ]
s2 = i [ 'string2' ]
# Load text file ( unicode )
r = load_text_file ( { 'text_file' : fn } )
if r [ 'return' ] > 0 :
return r
# Replace
x = r [ 'string' ]
x = x . replace ( s1 , s2 )
# Save text file ( unicode )
r = save_text_file ( { 'text_file' : fn , 'string' : x } )
if r [ 'return' ] > 0 :
return r
return { 'return' : 0 } |
def choose_colour ( self , title = "Select Colour" , ** kwargs ) :
"""Show a Colour Chooser dialog
Usage : C { dialog . choose _ colour ( title = " Select Colour " ) }
@ param title : window title for the dialog
@ return : a tuple containing the exit code and colour
@ rtype : C { DialogData ( int , str ) }""" | return_data = self . _run_kdialog ( title , [ "--getcolor" ] , kwargs )
if return_data . successful :
return DialogData ( return_data . return_code , ColourData . from_html ( return_data . data ) )
else :
return DialogData ( return_data . return_code , None ) |
def ColMap ( season ) :
"""Returns a dictionary mapping the type of information in the RTSS play row to the
appropriate column number . The column locations pre / post 2008 are different .
: param season : int for the season number
: returns : mapping of RTSS column to info type
: rtype : dict , keys are ` ` ' play _ num ' , ' per ' , ' str ' , ' time ' , ' event ' , ' desc ' , ' vis ' , ' home ' ` `""" | if c . MIN_SEASON <= season <= c . MAX_SEASON :
return { "play_num" : 0 , "per" : 1 , "str" : 2 , "time" : 3 , "event" : 4 , "desc" : 5 , "vis" : 6 , "home" : 7 }
else :
raise ValueError ( "RTSSCol.MAP(season): Invalid season " + str ( season ) ) |
def new_set ( set = None , set_type = None , family = 'ipv4' , comment = False , ** kwargs ) :
'''. . versionadded : : 2014.7.0
Create new custom set
CLI Example :
. . code - block : : bash
salt ' * ' ipset . new _ set custom _ set list : set
salt ' * ' ipset . new _ set custom _ set list : set comment = True
IPv6:
salt ' * ' ipset . new _ set custom _ set list : set family = ipv6''' | ipset_family = _IPSET_FAMILIES [ family ]
if not set :
return 'Error: Set needs to be specified'
if not set_type :
return 'Error: Set Type needs to be specified'
if set_type not in _IPSET_SET_TYPES :
return 'Error: Set Type is invalid'
# Check for required arguments
for item in _CREATE_OPTIONS_REQUIRED [ set_type ] :
if item not in kwargs :
return 'Error: {0} is a required argument' . format ( item )
cmd = '{0} create {1} {2}' . format ( _ipset_cmd ( ) , set , set_type )
for item in _CREATE_OPTIONS [ set_type ] :
if item in kwargs :
if item in _CREATE_OPTIONS_WITHOUT_VALUE :
cmd = '{0} {1} ' . format ( cmd , item )
else :
cmd = '{0} {1} {2} ' . format ( cmd , item , kwargs [ item ] )
# Family only valid for certain set types
if 'family' in _CREATE_OPTIONS [ set_type ] :
cmd = '{0} family {1}' . format ( cmd , ipset_family )
if comment :
cmd = '{0} comment' . format ( cmd )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
if not out :
out = True
return out |
def mean ( data , n = 3 , ** kwargs ) :
"""The mean forecast for the next point is the mean value of the previous ` ` n ` ` points in
the series .
Args :
data ( np . array ) : Observed data , presumed to be ordered in time .
n ( int ) : period over which to calculate the mean
Returns :
float : a single - valued forecast for the next value in the series .""" | # don ' t start averaging until we ' ve seen n points
if len ( data [ - n : ] ) < n :
forecast = np . nan
else : # nb : we ' ll keep the forecast as a float
forecast = np . mean ( data [ - n : ] )
return forecast |
def draw_circuit_canvas ( circuit , hunit = HUNIT , vunit = VUNIT , rhmargin = RHMARGIN , rvmargin = RVMARGIN , rpermutation_length = RPLENGTH , draw_boxes = True , permutation_arrows = False ) :
"""Generate a PyX graphical representation of a circuit expression object .
: param circuit : The circuit expression
: type circuit : ca . Circuit
: param hunit : The horizontal length unit , default = ` ` HUNIT ` `
: type hunit : float
: param vunit : The vertical length unit , default = ` ` VUNIT ` `
: type vunit : float
: param rhmargin : relative horizontal margin , default = ` ` RHMARGIN ` `
: type rhmargin : float
: param rvmargin : relative vertical margin , default = ` ` RVMARGIN ` `
: type rvmargin : float
: param rpermutation _ length : the relative length of a permutation circuit , default = ` ` RPLENGTH ` `
: type rpermutation _ length : float
: param draw _ boxes : Whether to draw indicator boxes to denote subexpressions ( Concatenation , SeriesProduct , etc . ) , default = ` ` True ` `
: type draw _ boxes : bool
: param permutation _ arrows : Whether to draw arrows within the permutation visualization , default = ` ` False ` `
: type permutation _ arrows : bool
: return : A PyX canvas object that can be further manipulated or printed to an output image .
: rtype : pyx . canvas . canvas""" | if not isinstance ( circuit , ca . Circuit ) :
raise ValueError ( )
nc = circuit . cdim
c = pyx . canvas . canvas ( )
if circuit is ca . CIdentity : # simply create a line going through
c . stroke ( pyx . path . line ( 0 , vunit / 2 , hunit , vunit / 2 ) )
return c , ( 1 , 1 ) , ( .5 , ) , ( .5 , )
elif isinstance ( circuit , ( ca . CircuitSymbol , ca . SeriesInverse , ca . SLH , Component ) ) : # draw box
b = pyx . path . rect ( rhmargin * hunit , rvmargin * vunit , hunit - 2 * rhmargin * hunit , nc * vunit - 2 * rvmargin * vunit )
c . stroke ( b )
texstr = "${}$" . format ( tex ( circuit ) if not isinstance ( circuit , ca . SLH ) else r"{{\rm SLH}}_{{{}}}" . format ( tex ( circuit . space ) ) )
# draw symbol name
c . text ( hunit / 2. , nc * vunit / 2. , texstr , [ pyx . text . halign . boxcenter , pyx . text . valign . middle ] )
# draw connectors at half - unit positions
connector_positions = tuple ( ( .5 + k ) for k in range ( nc ) )
for y in connector_positions :
c . stroke ( pyx . path . line ( 0 , y * vunit , rhmargin * hunit , y * vunit ) , [ pyx . deco . earrow ( ) ] )
c . stroke ( pyx . path . line ( hunit * ( 1 - rhmargin ) , y * vunit , hunit , y * vunit ) )
return c , ( 1 , nc ) , connector_positions , connector_positions
elif isinstance ( circuit , ca . CPermutation ) :
permutation = circuit . permutation
connector_positions = tuple ( ( k + 0.5 ) for k in range ( nc ) )
target_positions = [ connector_positions [ permutation [ k ] ] for k in range ( nc ) ]
# draw curves
for y1 , y2 in zip ( connector_positions , target_positions ) :
if permutation_arrows :
c . stroke ( _curve ( 0 , y1 , rpermutation_length , y2 , hunit = hunit , vunit = vunit ) , [ pyx . deco . earrow ( ) ] )
else :
c . stroke ( _curve ( 0 , y1 , rpermutation_length , y2 , hunit = hunit , vunit = vunit ) )
if draw_boxes :
b = pyx . path . rect ( .5 * rhmargin * hunit , .5 * rvmargin * vunit , rpermutation_length * hunit - rhmargin * hunit , nc * vunit - rvmargin * vunit )
c . stroke ( b , [ pyx . style . linewidth . thin , pyx . style . linestyle . dashed , pyx . color . rgb . green ] )
return c , ( rpermutation_length , nc ) , connector_positions , connector_positions
elif isinstance ( circuit , ca . SeriesProduct ) :
assert len ( circuit . operands ) > 1
# generate graphics of operad subsystems
sub_graphics = [ draw_circuit_canvas ( op , hunit = hunit , vunit = vunit , rhmargin = rhmargin , rvmargin = rvmargin , rpermutation_length = rpermutation_length , draw_boxes = draw_boxes , permutation_arrows = permutation_arrows ) for op in reversed ( circuit . operands ) ]
# set up first one
previous_csub , previous_dims , previous_c_in , previous_c_out = sub_graphics [ 0 ]
hoffset = 0
c . insert ( previous_csub )
hoffset += previous_dims [ 0 ]
max_height = previous_dims [ 1 ]
# this will later become the full series in - port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub , dims , c_in , c_out in sub_graphics [ 1 : ] :
assert dims [ 1 ] >= 0
max_height = max ( dims [ 1 ] , max_height )
if previous_c_out != c_in : # vertical port locations don ' t agree , map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1 , y2 in zip ( previous_c_out , c_in ) :
c . stroke ( _curve ( x1 , y1 , x2 , y2 , hunit = hunit , vunit = vunit ) )
hoffset += rpermutation_length
previous_c_in , previous_c_out = c_in , c_out
# now insert current system
c . insert ( csub , [ pyx . trafo . translate ( hunit * hoffset , 0 ) ] )
hoffset += dims [ 0 ]
if draw_boxes :
b = pyx . path . rect ( .5 * rhmargin * hunit , .5 * rvmargin * vunit , hoffset * hunit - 1. * rhmargin * hunit , max_height * vunit + rvmargin * vunit )
c . stroke ( b , [ pyx . style . linewidth . thin , pyx . style . linestyle . dashed , pyx . color . rgb . red ] )
return c , ( hoffset , max_height ) , first_c_in , c_out
elif isinstance ( circuit , ca . Concatenation ) :
voffset = 0
total_cin , total_cout = ( ) , ( )
widths = [ ]
# stores the component width for each channel ( ! )
# generate all operand subsystem graphics and stack them vertically
for op in circuit . operands :
csub , dims , c_in , c_out = draw_circuit_canvas ( op , hunit = hunit , vunit = vunit , rhmargin = rhmargin , rvmargin = rvmargin , rpermutation_length = rpermutation_length , draw_boxes = draw_boxes , permutation_arrows = permutation_arrows )
# add appropriatly offsets to vertical port coordinates
total_cin += tuple ( y + voffset for y in c_in )
total_cout += tuple ( y + voffset for y in c_out )
c . insert ( csub , [ pyx . trafo . translate ( 0 , vunit * voffset ) ] )
# keep track of width in all channel for this subsystem
widths += [ dims [ 0 ] ] * op . cdim
voffset += dims [ 1 ]
max_width = max ( widths )
if max_width > min ( widths ) : # components differ in width = > we must extend the narrow component output lines
for x , y in zip ( widths , total_cout ) :
if x == max_width :
continue
ax , ax_to = x * hunit , max_width * hunit
ay = y * vunit
c . stroke ( pyx . path . line ( ax , ay , ax_to , ay ) )
if draw_boxes :
b = pyx . path . rect ( .5 * rhmargin * hunit , .5 * rvmargin * vunit , max_width * hunit - 1. * rhmargin * hunit , voffset * vunit - rvmargin * vunit )
c . stroke ( b , [ pyx . style . linewidth . thin , pyx . style . linestyle . dashed , pyx . color . rgb . blue ] )
return c , ( max_width , voffset ) , total_cin , total_cout
elif isinstance ( circuit , ca . Feedback ) : # generate and insert graphics of subsystem
csub , dims , c_in , c_out = draw_circuit_canvas ( circuit . operand , hunit = hunit , vunit = vunit , rhmargin = rhmargin , rvmargin = rvmargin , rpermutation_length = rpermutation_length , draw_boxes = draw_boxes , permutation_arrows = permutation_arrows )
c . insert ( csub , [ pyx . trafo . translate ( hunit * .5 * rhmargin , 0 ) ] )
width , height = dims
# create feedback loop
fb_out , fb_in = circuit . out_in_pair
out_coords = ( width + .5 * rhmargin ) * hunit , c_out [ fb_out ] * vunit
in_coords = .5 * rhmargin * hunit , c_in [ fb_in ] * vunit
upper_y = ( height ) * vunit
feedback_line = pyx . path . path ( pyx . path . moveto ( * out_coords ) , pyx . path . lineto ( out_coords [ 0 ] , upper_y ) , pyx . path . lineto ( in_coords [ 0 ] , upper_y ) , pyx . path . lineto ( * in_coords ) )
c . stroke ( feedback_line )
# remove feedback port coordinates
new_c_in = c_in [ : fb_in ] + c_in [ fb_in + 1 : ]
new_c_out = c_out [ : fb_out ] + c_out [ fb_out + 1 : ]
# extend port connectors a little bit outward ,
# such that the feedback loop is not at the edge anymore
for y in new_c_in :
c . stroke ( pyx . path . line ( 0 , y * vunit , .5 * rhmargin * hunit , y * vunit ) )
for y in new_c_out :
c . stroke ( pyx . path . line ( ( width + .5 * rhmargin ) * hunit , y * vunit , ( width + rhmargin ) * hunit , y * vunit ) )
return c , ( width + rhmargin , height + rvmargin ) , new_c_in , new_c_out
raise Exception ( 'Visualization not implemented for type %s' % type ( circuit ) ) |
def detect_arbitrary_send ( self , contract ) :
"""Detect arbitrary send
Args :
contract ( Contract )
Returns :
list ( ( Function ) , ( list ( Node ) ) )""" | ret = [ ]
for f in [ f for f in contract . functions if f . contract == contract ] :
nodes = self . arbitrary_send ( f )
if nodes :
ret . append ( ( f , nodes ) )
return ret |
def show ( self , progress , msg = None ) :
"""Show the progress bar and set it to ` progress ` tuple or value .
Args :
progress ( tuple / int / float ) : Tuple ` ` ( done / len ( all ) ) ` ` or
the direct percentage value as int / float .
msg ( str , default None ) : Alternative background description .""" | if self . whole_tag . style . display == "none" :
self . whole_tag . style . display = "block"
# allow either direct percentage value , or ( done / len ( all ) ) pairs
if isinstance ( progress , int ) or isinstance ( progress , float ) :
percentage = progress
else :
percentage = self . __class__ . _compute_percentage ( progress )
# toggle animation
self . tag . class_name = "progress-bar"
if percentage < 100 :
self . tag . class_name += " progress-bar-striped active"
else :
msg = "Hotovo"
# show percentage in progress bar
self . tag . aria_valuemin = percentage
self . tag . style . width = "{}%" . format ( percentage )
if msg :
self . tag . text = msg |
def check_var_coverage_content_type ( self , ds ) :
'''Check coverage content type against valid ISO - 19115-1 codes
: param netCDF4 . Dataset ds : An open netCDF dataset''' | results = [ ]
for variable in cfutil . get_geophysical_variables ( ds ) :
msgs = [ ]
ctype = getattr ( ds . variables [ variable ] , 'coverage_content_type' , None )
check = ctype is not None
if not check :
msgs . append ( "coverage_content_type" )
results . append ( Result ( BaseCheck . HIGH , check , self . _var_header . format ( variable ) , msgs ) )
continue
# ISO 19115-1 codes
valid_ctypes = { 'image' , 'thematicClassification' , 'physicalMeasurement' , 'auxiliaryInformation' , 'qualityInformation' , 'referenceInformation' , 'modelResult' , 'coordinate' }
if ctype not in valid_ctypes :
msgs . append ( "coverage_content_type in \"%s\"" % ( variable , sorted ( valid_ctypes ) ) )
results . append ( Result ( BaseCheck . HIGH , check , # append to list
self . _var_header . format ( variable ) , msgs ) )
return results |
def _request ( self , method , identifier , key = None , value = None ) :
"""Perform request with identifier .""" | params = { 'id' : identifier }
if key is not None and value is not None :
params [ key ] = value
result = yield from self . _transact ( method , params )
return result . get ( key ) |
def setComponent ( self , component ) :
"""Re - implemented from : meth : ` AbstractComponentWidget < sparkle . gui . stim . abstract _ component _ editor . AbstractComponentWidget . setComponent > `""" | details = component . auto_details ( )
state = component . stateDict ( )
for field , detail in details . items ( ) :
val = state [ field ]
self . inputWidgets [ field ] . setValue ( val )
self . _component = component |
def log ( self ) :
"""Return recent log entries as a string .""" | logserv = self . system . request_service ( 'LogStoreService' )
return logserv . lastlog ( html = False ) |
def module ( self ) :
"""The module in which the Function is defined .
Python equivalent of the CLIPS deffunction - module command .""" | modname = ffi . string ( lib . EnvDeffunctionModule ( self . _env , self . _fnc ) )
defmodule = lib . EnvFindDefmodule ( self . _env , modname )
return Module ( self . _env , defmodule ) |
def cart2spher ( x , y , z ) :
"""Cartesian to Spherical coordinate conversion .""" | hxy = np . hypot ( x , y )
r = np . hypot ( hxy , z )
theta = np . arctan2 ( z , hxy )
phi = np . arctan2 ( y , x )
return r , theta , phi |
def process_resource ( self , req , resp , resource , uri_kwargs = None ) :
"""Process resource after routing to it .
This is basic falcon middleware handler .
Args :
req ( falcon . Request ) : request object
resp ( falcon . Response ) : response object
resource ( object ) : resource object matched by falcon router
uri _ kwargs ( dict ) : additional keyword argument from uri template .
For ` ` falcon < 1.0.0 ` ` this is always ` ` None ` `""" | if 'user' in req . context :
return
identifier = self . identify ( req , resp , resource , uri_kwargs )
user = self . try_storage ( identifier , req , resp , resource , uri_kwargs )
if user is not None :
req . context [ 'user' ] = user
# if did not succeed then we need to add this to list of available
# challenges .
elif self . challenge is not None :
req . context . setdefault ( 'challenges' , list ( ) ) . append ( self . challenge ) |
def connect_patch_namespaced_pod_proxy_with_path ( self , name , namespace , path , ** kwargs ) : # noqa : E501
"""connect _ patch _ namespaced _ pod _ proxy _ with _ path # noqa : E501
connect PATCH requests to proxy of Pod # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ patch _ namespaced _ pod _ proxy _ with _ path ( name , namespace , path , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PodProxyOptions ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str path : path to the resource ( required )
: param str path2 : Path is the URL path to use for the current proxy request to pod .
: return : str
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_patch_namespaced_pod_proxy_with_path_with_http_info ( name , namespace , path , ** kwargs )
# noqa : E501
else :
( data ) = self . connect_patch_namespaced_pod_proxy_with_path_with_http_info ( name , namespace , path , ** kwargs )
# noqa : E501
return data |
def _copy_jsonsafe ( value ) :
"""Deep - copy a value into JSON - safe types .""" | if isinstance ( value , six . string_types + ( numbers . Number , ) ) :
return value
if isinstance ( value , collections_abc . Mapping ) :
return { six . text_type ( k ) : _copy_jsonsafe ( v ) for k , v in value . items ( ) }
if isinstance ( value , collections_abc . Iterable ) :
return [ _copy_jsonsafe ( v ) for v in value ]
if value is None : # This doesn ' t happen often for us .
return None
return six . text_type ( value ) |
def POP ( self , params ) :
"""POP { RPopList }
Pop from the stack into the list of registers
List must contain only low registers or PC""" | # TODO verify pop order
# TODO pop list is comma separate , right ?
# TODO what registeres are allowed to POP to ? Low Registers and PC
# TODO need to support ranges , ie { R2 , R5 - R7}
# TODO PUSH should reverse the list , not POP
RPopList = self . get_one_parameter ( r'\s*{(.*)}(.*)' , params ) . split ( ',' )
RPopList . reverse ( )
RPopList = [ i . strip ( ) for i in RPopList ]
def POP_func ( ) :
for register in RPopList : # Get 4 bytes
value = 0
for i in range ( 4 ) : # TODO use memory width instead of constants
value |= self . memory [ self . register [ 'SP' ] + i ] << ( 8 * i )
self . register [ register ] = value
self . register [ 'SP' ] += 4
return POP_func |
def _get_longest_hit_at_ref_start ( self , nucmer_hits , hits_to_exclude = None ) :
'''Input : list of nucmer hits to the same reference . Returns the longest hit to the start of the reference , or None if there is no such hit''' | if hits_to_exclude is None :
hits_to_exclude = set ( )
hits_at_start = [ hit for hit in nucmer_hits if self . _is_at_ref_start ( hit ) and hit not in hits_to_exclude ]
return self . _get_longest_hit_by_ref_length ( hits_at_start ) |
def draw_texture ( tex ) :
"""Draw a 2D texture to the current viewport
Parameters
tex : instance of Texture2D
The texture to draw .""" | from . program import Program
program = Program ( vert_draw , frag_draw )
program [ 'u_texture' ] = tex
program [ 'a_position' ] = [ [ - 1. , - 1. ] , [ - 1. , 1. ] , [ 1. , - 1. ] , [ 1. , 1. ] ]
program [ 'a_texcoord' ] = [ [ 0. , 1. ] , [ 0. , 0. ] , [ 1. , 1. ] , [ 1. , 0. ] ]
program . draw ( 'triangle_strip' ) |
def select_delim ( self , delim ) :
'''Select desired delimeter
Args :
delim : The delimeter character you want .
Returns :
None
Raises :
RuntimeError : Delimeter too long .''' | size = len ( delim )
if size > 20 :
raise RuntimeError ( 'Delimeter too long' )
n1 = size / 10
n2 = size % 10
self . send ( '^SS' + chr ( n1 ) + chr ( n2 ) ) |
def components ( self , obj , fmt = None , comm = True , ** kwargs ) :
"""Returns data and metadata dictionaries containing HTML and JS
components to include render in app , notebook , or standalone
document . Depending on the backend the fmt defines the format
embedded in the HTML , e . g . png or svg . If comm is enabled the
JS code will set up a Websocket comm channel using the
currently defined CommManager .""" | if isinstance ( obj , ( Plot , NdWidget ) ) :
plot = obj
else :
plot , fmt = self . _validate ( obj , fmt )
widget_id = None
data , metadata = { } , { }
if isinstance ( plot , NdWidget ) :
js , html = plot ( as_script = True )
plot_id = plot . plot_id
widget_id = plot . id
else :
html , js = self . _figure_data ( plot , fmt , as_script = True , ** kwargs )
plot_id = plot . id
if comm and plot . comm is not None and self . comm_msg_handler :
msg_handler = self . comm_msg_handler . format ( plot_id = plot_id )
html = plot . comm . html_template . format ( init_frame = html , plot_id = plot_id )
comm_js = plot . comm . js_template . format ( msg_handler = msg_handler , comm_id = plot . comm . id , plot_id = plot_id )
js = '\n' . join ( [ js , comm_js ] )
html = "<div id='%s' style='display: table; margin: 0 auto;'>%s</div>" % ( plot_id , html )
if not os . environ . get ( 'HV_DOC_HTML' , False ) and js is not None :
js = embed_js . format ( widget_id = widget_id , plot_id = plot_id , html = html ) + js
data [ 'text/html' ] = html
if js :
data [ MIME_TYPES [ 'js' ] ] = js
data [ MIME_TYPES [ 'jlab-hv-exec' ] ] = ''
metadata [ 'id' ] = plot_id
self . _plots [ plot_id ] = plot
return ( data , { MIME_TYPES [ 'jlab-hv-exec' ] : metadata } ) |
def _prepare_aggregate ( cls , * args , ** kw ) :
"""Generate and execute an aggregate query pipline using combined plain and parametric query generation .
Additionally , performs argument case normalization , refer to the ` _ prepare _ query ` method ' s docstring .
This provides a find - like interface for generating aggregate pipelines with a few shortcuts that make
aggregates behave more like " find , optionally with more steps " . Positional arguments that are not Filter
instances are assumed to be aggregate pipeline stages .
https : / / api . mongodb . com / python / current / api / pymongo / collection . html # pymongo . collection . Collection . aggregate""" | stages = [ ]
stage_args = [ ]
fragments = [ ]
for arg in args : # Split the positional arguments into filter fragments and projection stages .
( fragments if isinstance ( arg , Filter ) else stage_args ) . append ( arg )
cls , collection , query , options = cls . _prepare_query ( cls . AGGREGATE_MAPPING , cls . AGGREGATE_OPTIONS , * fragments , ** kw )
if query :
stages . append ( { '$match' : query } )
stages . extend ( stage_args )
if 'sort' in options : # Convert the find - like option to a stage with the correct semantics .
stages . append ( { '$sort' : odict ( options . pop ( 'sort' ) ) } )
if 'skip' in options : # Note : Sort + limit memory optimization invalidated when skipping .
stages . append ( { '$skip' : options . pop ( 'skip' ) } )
if 'limit' in options :
stages . append ( { '$limit' : options . pop ( 'limit' ) } )
if 'projection' in options :
stages . append ( { '$project' : options . pop ( 'projection' ) } )
return cls , collection , stages , options |
def register ( self , slug , bundle , order = 1 , title = None ) :
"""Registers the bundle for a certain slug .
If a slug is already registered , this will raise AlreadyRegistered .
: param slug : The slug to register .
: param bundle : The bundle instance being registered .
: param order : An integer that controls where this bundle ' s dashboard links appear in relation to others .""" | if slug in self . _registry :
raise AlreadyRegistered ( 'The url %s is already registered' % slug )
# Instantiate the admin class to save in the registry .
self . _registry [ slug ] = bundle
self . _order [ slug ] = order
if title :
self . _titles [ slug ] = title
bundle . set_admin_site ( self ) |
def close ( self ) :
"""Destructor for this audio interface . Waits the threads to finish their
streams , if desired .""" | with self . halting : # Avoid simultaneous " close " threads
if not self . finished : # Ignore all " close " calls , but the first ,
self . finished = True
# and any call to play would raise ThreadError
# Closes all playing AudioThread instances
while True :
with self . lock : # Ensure there ' s no other thread messing around
try :
thread = self . _threads [ 0 ]
# Needless to say : pop = deadlock
except IndexError : # Empty list
break
# No more threads
if not self . wait :
thread . stop ( )
thread . join ( )
# Closes all recording RecStream instances
while self . _recordings :
recst = self . _recordings [ - 1 ]
recst . stop ( )
recst . take ( inf )
# Ensure it ' ll be closed
# Finishes
assert not self . _pa . _streams
# No stream should survive
self . _pa . terminate ( ) |
def status ( self , job_id ) :
"""Gets the status of a previously - submitted job .""" | check_jobid ( job_id )
queue = self . _get_queue ( )
if queue is None :
raise QueueDoesntExist
ret , output = self . _call ( '%s %s' % ( shell_escape ( queue / 'commands/status' ) , job_id ) , True )
if ret == 0 :
directory , result = output . splitlines ( )
result = result . decode ( 'utf-8' )
return RemoteQueue . JOB_DONE , PosixPath ( directory ) , result
elif ret == 2 :
directory = output . splitlines ( ) [ 0 ]
return RemoteQueue . JOB_RUNNING , PosixPath ( directory ) , None
elif ret == 3 :
raise JobNotFound
else :
raise RemoteCommandFailure ( command = "commands/status" , ret = ret ) |
def selected_purpose ( self ) :
"""Obtain the layer purpose selected by user .
: returns : Metadata of the selected layer purpose .
: rtype : dict , None""" | item = self . lstCategories . currentItem ( )
try :
return definition ( item . data ( QtCore . Qt . UserRole ) )
except ( AttributeError , NameError ) :
return None |
def search ( self ) :
"""Execute solr search query""" | params = self . solr_params ( )
logging . info ( "PARAMS=" + str ( params ) )
results = self . solr . search ( ** params )
logging . info ( "Docs found: {}" . format ( results . hits ) )
return self . _process_search_results ( results ) |
def find_file_paths ( self ) :
"""A generator function that recursively finds all files in the upload directory .""" | paths = [ ]
for root , dirs , files in os . walk ( self . directory , topdown = True ) :
rel_path = os . path . relpath ( root , self . directory )
for f in files :
if rel_path == '.' :
path = ( f , os . path . join ( root , f ) )
else :
path = ( os . path . join ( rel_path , f ) , os . path . join ( root , f ) )
paths . append ( path )
return paths |
def page ( self , beta = values . unset , friendly_name = values . unset , phone_number = values . unset , origin = values . unset , page_token = values . unset , page_number = values . unset , page_size = values . unset ) :
"""Retrieve a single page of LocalInstance records from the API .
Request is executed immediately
: param bool beta : Whether to include new phone numbers
: param unicode friendly _ name : A string that identifies the resources to read
: param unicode phone _ number : The phone numbers of the resources to read
: param unicode origin : Include phone numbers based on their origin . By default , phone numbers of all origin are included .
: param str page _ token : PageToken provided by the API
: param int page _ number : Page Number , this value is simply for client state
: param int page _ size : Number of records to return , defaults to 50
: returns : Page of LocalInstance
: rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . local . LocalPage""" | params = values . of ( { 'Beta' : beta , 'FriendlyName' : friendly_name , 'PhoneNumber' : phone_number , 'Origin' : origin , 'PageToken' : page_token , 'Page' : page_number , 'PageSize' : page_size , } )
response = self . _version . page ( 'GET' , self . _uri , params = params , )
return LocalPage ( self . _version , response , self . _solution ) |
def get_entity_text_for_relation ( self , node , relation ) :
"""Looks for an edge from node to some other node , such that the edge is
annotated with the given relation . If there exists such an edge , and
the node at the other edge is an entity , return that entity ' s text .
Otherwise , returns None .""" | G = self . G
related_node = self . get_related_node ( node , relation )
if related_node is not None :
if not G . node [ related_node ] [ 'is_event' ] :
return G . node [ related_node ] [ 'text' ]
else :
return None
else :
return None |
def delete ( self ) :
"""Deletes the bucket .
Raises :
Exception if there was an error deleting the bucket .""" | if self . exists ( ) :
try :
self . _api . buckets_delete ( self . _name )
except Exception as e :
raise e |
def django_table_names ( self , only_existing = False , ** kwargs ) :
"""Returns a list of all table names that have associated cqlengine models
and are present in settings . INSTALLED _ APPS .""" | all_models = list ( chain . from_iterable ( self . cql_models . values ( ) ) )
tables = [ model . column_family_name ( include_keyspace = False ) for model in all_models ]
return tables |
def construct_survival_curves ( hazard_rates , timelines ) :
"""Given hazard rates , reconstruct the survival curves
Parameters
hazard _ rates : ( n , t ) array
timelines : ( t , ) the observational times
Returns
t : survial curves , ( n , t ) array""" | cumulative_hazards = cumulative_integral ( hazard_rates . values , timelines )
return pd . DataFrame ( np . exp ( - cumulative_hazards ) , index = timelines ) |
def submit ( self , service_id : str , data : dict = None ) :
"""Отправить задачу в запускатор
: param service _ id : ID службы . Например " meta . docs _ generate "
: param data : Полезная нагрузка задачи
: return : dict""" | if self . __app . starter_api_url == 'http://STUB_URL' :
self . log . info ( 'STARTER DEV. Задача условно поставлена' , { "service_id" : service_id , "data" : data , } )
return
task = { "serviceId" : service_id , "data" : data }
url = self . __app . starter_api_url + '/services/' + service_id + '/tasks'
last_e = None
for _idx in range ( self . max_retries ) :
try :
resp = requests . post ( url = url , data = json . dumps ( task ) , headers = self . headers , timeout = 15 )
try :
return json . loads ( resp . text )
except Exception :
raise IOError ( "Starter response read error: " + resp . text )
except ( requests . exceptions . ConnectionError , requests . exceptions . Timeout ) as e : # При ошибках подключения пытаемся еще раз
last_e = e
sleep ( 3 )
raise last_e |
def get ( self , typ = "" , only_active = True ) :
"""Return a list of keys . Either all keys or only keys of a specific type
: param typ : Type of key ( rsa , ec , oct , . . )
: return : If typ is undefined all the keys as a dictionary
otherwise the appropriate keys in a list""" | self . _uptodate ( )
_typs = [ typ . lower ( ) , typ . upper ( ) ]
if typ :
_keys = [ k for k in self . _keys if k . kty in _typs ]
else :
_keys = self . _keys
if only_active :
return [ k for k in _keys if not k . inactive_since ]
else :
return _keys |
def start_daemon_thread ( * args , ** kwargs ) :
"""Starts a thread and marks it as a daemon thread .""" | thread = threading . Thread ( * args , ** kwargs )
thread . daemon = True
thread . start ( )
return thread |
def get_default_config_help ( self ) :
"""Returns the help text for the configuration options for this handler""" | config = super ( HostedGraphiteHandler , self ) . get_default_config_help ( )
config . update ( { 'apikey' : 'Api key to use' , 'host' : 'Hostname' , 'port' : 'Port' , 'proto' : 'udp or tcp' , 'timeout' : '' , 'batch' : 'How many to store before sending to the graphite server' , 'max_backlog_multiplier' : 'how many batches to store before trimming' , # NOQA
'trim_backlog_multiplier' : 'Trim down how many batches' , } )
return config |
def iter_setup_packages ( srcdir , packages ) :
"""A generator that finds and imports all of the ` ` setup _ package . py ` `
modules in the source packages .
Returns
modgen : generator
A generator that yields ( modname , mod ) , where ` mod ` is the module and
` modname ` is the module name for the ` ` setup _ package . py ` ` modules .""" | for packagename in packages :
package_parts = packagename . split ( '.' )
package_path = os . path . join ( srcdir , * package_parts )
setup_package = os . path . relpath ( os . path . join ( package_path , 'setup_package.py' ) )
if os . path . isfile ( setup_package ) :
module = import_file ( setup_package , name = packagename + '.setup_package' )
yield module |
def rows ( self , csv = False ) :
"""Returns each row based on the selected criteria .""" | # Store the index of each field against its ID for building each
# entry row with columns in the correct order . Also store the IDs of
# fields with a type of FileField or Date - like for special handling of
# their values .
field_indexes = { }
file_field_ids = [ ]
date_field_ids = [ ]
for field in self . form_fields :
if self . cleaned_data [ "field_%s_export" % field . id ] :
field_indexes [ field . id ] = len ( field_indexes )
if field . is_a ( fields . FILE ) :
file_field_ids . append ( field . id )
elif field . is_a ( * fields . DATES ) :
date_field_ids . append ( field . id )
num_columns = len ( field_indexes )
include_entry_time = self . cleaned_data [ "field_0_export" ]
if include_entry_time :
num_columns += 1
# Get the field entries for the given form and filter by entry _ time
# if specified .
field_entries = FieldEntry . objects . filter ( entry__form = self . form ) . order_by ( "-entry__id" ) . select_related ( "entry" )
if self . cleaned_data [ "field_0_filter" ] == FILTER_CHOICE_BETWEEN :
time_from = self . cleaned_data [ "field_0_from" ]
time_to = self . cleaned_data [ "field_0_to" ]
if time_from and time_to :
field_entries = field_entries . filter ( entry__entry_time__range = ( time_from , time_to ) )
# Loop through each field value ordered by entry , building up each
# entry as a row . Use the ` ` valid _ row ` ` flag for marking a row as
# invalid if it fails one of the filtering criteria specified .
current_entry = None
current_row = None
valid_row = True
for field_entry in field_entries :
if field_entry . entry_id != current_entry : # New entry , write out the current row and start a new one .
if valid_row and current_row is not None :
if not csv :
current_row . insert ( 0 , current_entry )
yield current_row
current_entry = field_entry . entry_id
current_row = [ "" ] * num_columns
valid_row = True
if include_entry_time :
current_row [ - 1 ] = field_entry . entry . entry_time
field_value = field_entry . value or ""
# Check for filter .
field_id = field_entry . field_id
filter_type = self . cleaned_data . get ( "field_%s_filter" % field_id )
filter_args = None
if filter_type :
if filter_type == FILTER_CHOICE_BETWEEN :
f , t = "field_%s_from" % field_id , "field_%s_to" % field_id
filter_args = [ self . cleaned_data [ f ] , self . cleaned_data [ t ] ]
else :
field_name = "field_%s_contains" % field_id
filter_args = self . cleaned_data [ field_name ]
if filter_args :
filter_args = [ filter_args ]
if filter_args : # Convert dates before checking filter .
if field_id in date_field_ids :
y , m , d = field_value . split ( " " ) [ 0 ] . split ( "-" )
dte = date ( int ( y ) , int ( m ) , int ( d ) )
filter_args . append ( dte )
else :
filter_args . append ( field_value )
filter_func = FILTER_FUNCS [ filter_type ]
if not filter_func ( * filter_args ) :
valid_row = False
# Create download URL for file fields .
if field_entry . value and field_id in file_field_ids :
url = reverse ( "admin:form_file" , args = ( field_entry . id , ) )
field_value = self . request . build_absolute_uri ( url )
if not csv :
parts = ( field_value , split ( field_entry . value ) [ 1 ] )
field_value = mark_safe ( "<a href=\"%s\">%s</a>" % parts )
# Only use values for fields that were selected .
try :
current_row [ field_indexes [ field_id ] ] = field_value
except KeyError :
pass
# Output the final row .
if valid_row and current_row is not None :
if not csv :
current_row . insert ( 0 , current_entry )
yield current_row |
def topk ( arg , k , by = None ) :
"""Returns
topk : TopK filter expression""" | op = ops . TopK ( arg , k , by = by )
return op . to_expr ( ) |
def create_signed_entity_descriptor ( entity_descriptor , security_context , valid_for = None ) :
""": param entity _ descriptor : the entity descriptor to sign
: param security _ context : security context for the signature
: param valid _ for : number of hours the metadata should be valid
: return : the signed XML document
: type entity _ descriptor : saml2 . md . EntityDescriptor ]
: type security _ context : saml2 . sigver . SecurityContext
: type valid _ for : Optional [ int ]""" | if valid_for :
entity_descriptor . valid_until = in_a_while ( hours = valid_for )
entity_desc , xmldoc = sign_entity_descriptor ( entity_descriptor , None , security_context )
if not valid_instance ( entity_desc ) :
raise ValueError ( "Could not construct valid EntityDescriptor tag" )
return xmldoc |
def codec_param ( self ) :
"""string""" | param = u".%X" % self . objectTypeIndication
info = self . decSpecificInfo
if info is not None :
param += u".%d" % info . audioObjectType
return param |
def _R2deriv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rderiv
PURPOSE :
evaluate the second radial derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the second radial derivative
HISTORY :
2013-09-08 - Written - Bovy ( IAS )""" | r2 = R ** 2. + z ** 2.
rb = nu . sqrt ( r2 + self . b2 )
return - ( - self . b ** 3. - self . b * z ** 2. + ( 2. * R ** 2. - z ** 2. - self . b ** 2. ) * rb ) / rb ** 3. / ( self . b + rb ) ** 3. |
def base_query ( cls , db_session = None ) :
"""returns base query for specific service
: param db _ session :
: return : query""" | db_session = get_db_session ( db_session )
return db_session . query ( cls . model ) |
def make_script ( python_script , target_path = '' , target_name = '' , user = False , make_link = False , force = False , remove = False , no_check_shebang = False , no_check_path = False ) :
"""v { VERSION }
This script makes a command line script out of a python script .
For example , ' clingon script . py ' will copy or symlink script . py
( without the . py extension ) to :
- < python - exe - path > / script ( default ) ,
- < target - path > / script if - - target - path is specfied ,
- ~ / bin / script if - - user is specified ,
and then set the copy / symlink as executable .
See https : / / github . com / francois - vincent / clingon""" | if user and target_path :
raise RunnerErrorWithUsage ( "You cannot specify --path and --user at the same time" )
source = os . path . abspath ( python_script )
dest_dir = os . path . normpath ( os . path . expanduser ( '~/bin' if user else target_path or os . path . dirname ( sys . executable ) ) )
target = os . path . join ( dest_dir , target_name if target_name else os . path . splitext ( os . path . basename ( source ) ) [ 0 ] )
target_exists = os . path . exists ( target )
if remove :
if target_exists :
os . unlink ( target )
print ( "Script '%s' removed" % target )
else :
print ( "Script '%s' not found, nothing to do" % target )
return
if not os . path . exists ( source ) :
raise RunnerError ( "Could not find source '%s', aborting" % source )
if DEBUG :
print ( 'Source, target:' , source , target )
if target_exists :
def same_file_same_type ( source , target ) :
if os . path . islink ( target ) :
return make_link and os . path . samefile ( source , target )
else :
return not make_link and ( open ( source , 'rb' ) . read ( ) == open ( target , 'rb' ) . read ( ) )
if same_file_same_type ( source , target ) :
print ( "Target '%s' already created, nothing to do" % target )
return
elif not force :
raise RunnerError ( "Target '%s' already exists, aborting" % target )
if not os . path . isdir ( dest_dir ) : # Create directory
try :
os . makedirs ( dest_dir )
except OSError :
raise RunnerError ( "Target folder '%s' does not exist, and cannot create it, aborting" % dest_dir )
if not no_check_shebang : # Check that file starts with python shebang ( # ! / usr / bin / env python )
first_line = open ( source ) . readline ( )
if not ( '#!' in first_line and 'python' in first_line ) :
raise RunnerError ( "Your script's first line should start with '#!' and contain 'python', aborting" )
# Now it ' s time to copy or symlink file
if target_exists :
os . unlink ( target )
import stat
perms = stat . S_IXUSR | stat . S_IXGRP
if os . getuid ( ) == 0 :
perms |= stat . S_IXOTH
if make_link :
st = os . stat ( source )
if st . st_mode & perms != perms :
os . chmod ( source , st . st_mode | perms )
os . symlink ( source , target )
st = os . stat ( target )
os . chmod ( target , st . st_mode | perms )
print ( 'Script %s has been symlinked to %s' % ( source , target ) )
else :
import shutil
shutil . copyfile ( source , target )
st = os . stat ( target )
os . chmod ( target , st . st_mode | perms )
print ( 'Script %s has been copied to %s' % ( source , target ) )
# check PATH and advise user to update it if relevant
path_env = os . environ . get ( 'PATH' )
if not no_check_path and ( not path_env or dest_dir not in path_env ) :
print ( "Please add your local bin path [%s] to your environment PATH" % dest_dir ) |
def addProxyObject ( self , obj , proxied ) :
"""Stores a reference to the unproxied and proxied versions of C { obj } for
later retrieval .
@ since : 0.6""" | self . proxied_objects [ id ( obj ) ] = proxied
self . proxied_objects [ id ( proxied ) ] = obj |
def ampliconclear ( self ) :
"""Clear previously created amplicon files to prepare for the appending of data to fresh files""" | for sample in self . metadata : # Set the name of the amplicon FASTA file
sample [ self . analysistype ] . ampliconfile = os . path . join ( sample [ self . analysistype ] . outputdir , '{sn}_amplicons.fa' . format ( sn = sample . name ) )
try :
os . remove ( sample [ self . analysistype ] . ampliconfile )
except IOError :
pass |
def load ( self , fname ) :
""". . todo : : REPO . load docstring""" | # Imports
import h5py as h5
from . . error import RepoError
# If repo not None , complain
if not self . _repo == None :
raise RepoError ( RepoError . STATUS , "Repository already open" , "File: {0}" . format ( self . fname ) )
# # end if
# If string passed , try opening h5 . File ; otherwise complain
if isinstance ( fname , str ) :
self . fname = fname
self . _repo = h5 . File ( fname )
else :
raise TypeError ( "Invalid filename type: {0}" . format ( type ( fname ) ) ) |
def get_api_versions ( self , ** kwargs ) : # noqa : E501
"""get _ api _ versions # noqa : E501
get available API versions # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . get _ api _ versions ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: return : V1APIGroupList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . get_api_versions_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . get_api_versions_with_http_info ( ** kwargs )
# noqa : E501
return data |
def get_radius_normal ( lat_radians : float , ell : Ellipsoid = None ) -> float :
"""Compute normal radius of planetary body
Parameters
lat _ radians : float
latitude in radians
ell : Ellipsoid , optional
reference ellipsoid
Returns
radius : float
normal radius ( meters )""" | if ell is None :
ell = Ellipsoid ( )
a = ell . a
b = ell . b
return a ** 2 / sqrt ( a ** 2 * cos ( lat_radians ) ** 2 + b ** 2 * sin ( lat_radians ) ** 2 ) |
def use_plenary_sequence_rule_enabler_rule_view ( self ) :
"""Pass through to provider SequenceRuleEnablerRuleLookupSession . use _ plenary _ sequence _ rule _ enabler _ rule _ view""" | self . _object_views [ 'sequence_rule_enabler_rule' ] = PLENARY
# self . _ get _ provider _ session ( ' sequence _ rule _ enabler _ rule _ lookup _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_plenary_sequence_rule_enabler_rule_view ( )
except AttributeError :
pass |
def __view_add_actions ( self ) :
"""Sets the View actions .""" | self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Add Project ..." , slot = self . __view_add_project_action__triggered ) )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Remove Project" , slot = self . __view_remove_project_action__triggered ) )
separator_action = QAction ( self . __view )
separator_action . setSeparator ( True )
self . __view . addAction ( separator_action )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Add New File ..." , slot = self . __view_add_new_file_action__triggered ) )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Add New Directory ..." , slot = self . __view_add_new_directory_action__triggered ) )
separator_action = QAction ( self . __view )
separator_action . setSeparator ( True )
self . __view . addAction ( separator_action )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Rename ..." , slot = self . __view_rename_action__triggered ) )
# self . _ _ view . addAction ( self . _ _ engine . actions _ manager . register _ action (
# " Actions | Umbra | Components | addons . projects _ explorer | Copy . . . " ,
# slot = self . _ _ view _ copy _ action _ _ triggered ) )
# self . _ _ view . addAction ( self . _ _ engine . actions _ manager . register _ action (
# " Actions | Umbra | Components | addons . projects _ explorer | Move . . . " ,
# slot = self . _ _ view _ move _ action _ _ triggered ) )
separator_action = QAction ( self . __view )
separator_action . setSeparator ( True )
self . __view . addAction ( separator_action )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Delete ..." , slot = self . __view_delete_action__triggered ) )
separator_action = QAction ( self . __view )
separator_action . setSeparator ( True )
self . __view . addAction ( separator_action )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Find In Files ..." , slot = self . __view_find_in_files_action__triggered ) )
separator_action = QAction ( self . __view )
separator_action . setSeparator ( True )
self . __view . addAction ( separator_action )
self . __view . addAction ( self . __engine . actions_manager . register_action ( "Actions|Umbra|Components|addons.projects_explorer|Output Selected Path" , slot = self . __view_output_selected_path_action__triggered ) ) |
def _assignRootname ( self , chip ) :
"""Assign a unique rootname for the image based in the expname .""" | extname = self . _image [ self . scienceExt , chip ] . header [ "EXTNAME" ] . lower ( )
extver = self . _image [ self . scienceExt , chip ] . header [ "EXTVER" ]
expname = self . _rootname
# record extension - based name to reflect what extension a mask file corresponds to
self . _image [ self . scienceExt , chip ] . rootname = expname + "_" + extname + str ( extver )
self . _image [ self . scienceExt , chip ] . sciname = self . _filename + "[" + extname + "," + str ( extver ) + "]"
self . _image [ self . scienceExt , chip ] . dqrootname = self . _rootname + "_" + extname + str ( extver )
# Needed to keep EXPNAMEs associated properly ( 1 EXPNAME for all chips )
self . _image [ self . scienceExt , chip ] . _expname = expname
self . _image [ self . scienceExt , chip ] . _chip = chip |
def count_group ( group_id , failures = False , cached = Conf . CACHED ) :
"""Count the results in a group .
: param str group _ id : the group id
: param bool failures : Returns failure count if True
: param bool cached : run this against the cache backend
: return : the number of tasks / results in a group
: rtype : int""" | if cached :
return count_group_cached ( group_id , failures )
return Task . get_group_count ( group_id , failures ) |
def mkdir ( self , children , mode = 0o0755 , return_node = True ) :
"""Creates child entities in directory .
Raises exception if the object is a file .
: param children : The list of children to be created .
: return : The child object , if one child is provided . None , otherwise .""" | result = None
if isinstance ( children , ( str , unicode ) ) :
if os . path . isabs ( children ) :
raise BadValueError ( 'Cannot mkdir an absolute path: {path}' . format ( path = self . _pyerarchy_path ) )
rel_path = os . path . join ( self . _pyerarchy_path , children )
os . makedirs ( rel_path , mode )
if return_node :
result = Node ( rel_path )
else :
for child in children :
self . mkdir ( child , mode , False )
return result |
def check_all_local ( self ) :
"""Check or uncheck all local event parameters .""" | all_local_chk = self . event [ 'global' ] [ 'all_local' ] . isChecked ( )
for buttons in self . event [ 'local' ] . values ( ) :
buttons [ 0 ] . setChecked ( all_local_chk )
buttons [ 1 ] . setEnabled ( buttons [ 0 ] . isChecked ( ) ) |
def _call ( self , dx ) :
"""Return ` ` self ( x ) ` ` .""" | x = self . point
dx_norm = dx . norm ( )
if dx_norm == 0 :
return 0
scaled_dx = dx * ( self . step / dx_norm )
if self . method == 'backward' :
dAdx = self . operator ( x ) - self . operator ( x - scaled_dx )
elif self . method == 'forward' :
dAdx = self . operator ( x + scaled_dx ) - self . operator ( x )
elif self . method == 'central' :
dAdx = ( self . operator ( x + scaled_dx / 2 ) - self . operator ( x - scaled_dx / 2 ) )
else :
raise RuntimeError ( 'unknown method' )
return dAdx * ( dx_norm / self . step ) |
def decode ( self , obj , restype , raw_ptr = False ) :
"""Converts Weld object to Python object .
Args :
obj : Result of Weld computation that needs to be decoded
restype : Type of Weld computation result
raw _ ptr : Boolean indicating whether obj needs to be extracted
from WeldValue or not
Returns :
Python object representing result of the Weld computation""" | if raw_ptr :
data = obj
else :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( restype . ctype_class ) ) . contents
if restype == WeldInt16 ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_int16 ) ) . contents . value
return result
elif restype == WeldInt ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_int ) ) . contents . value
return result
elif restype == WeldLong ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_long ) ) . contents . value
return result
elif restype == WeldFloat ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_float ) ) . contents . value
return np . float32 ( result )
elif restype == WeldDouble ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_double ) ) . contents . value
return float ( result )
elif restype == WeldBit ( ) :
data = cweld . WeldValue ( obj ) . data ( )
result = ctypes . cast ( data , ctypes . POINTER ( c_bool ) ) . contents . value
return bool ( result )
# Obj is a WeldVec ( WeldInt ( ) ) . ctype _ class , which is a subclass of
# ctypes . _ structure
if restype == WeldVec ( WeldBit ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_bool_arr
elif restype == WeldVec ( WeldInt16 ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_int16_arr
elif restype == WeldVec ( WeldInt ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_int_arr
elif restype == WeldVec ( WeldLong ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_long_arr
elif restype == WeldVec ( WeldFloat ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_float_arr
elif restype == WeldVec ( WeldDouble ( ) ) :
weld_to_numpy = self . utils . weld_to_numpy_double_arr
elif restype == WeldVec ( WeldVec ( WeldChar ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_char_arr_arr
elif restype == WeldVec ( WeldVec ( WeldInt16 ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_int16_arr_arr
elif restype == WeldVec ( WeldVec ( WeldInt ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_int_arr_arr
elif restype == WeldVec ( WeldVec ( WeldLong ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_long_arr_arr
elif restype == WeldVec ( WeldVec ( WeldFloat ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_float_arr_arr
elif restype == WeldVec ( WeldVec ( WeldDouble ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_double_arr_arr
elif restype == WeldVec ( WeldVec ( WeldBit ( ) ) ) :
weld_to_numpy = self . utils . weld_to_numpy_bool_arr_arr
elif isinstance ( restype , WeldStruct ) :
ret_vecs = [ ]
# Iterate through all fields in the struct , and recursively call
# decode .
for field_type in restype . field_types :
ret_vec = self . decode ( data , field_type , raw_ptr = True )
data += sizeof ( field_type . ctype_class ( ) )
ret_vecs . append ( ret_vec )
return tuple ( ret_vecs )
else :
raise Exception ( "Unable to decode; invalid return type" )
weld_to_numpy . restype = py_object
weld_to_numpy . argtypes = [ restype . ctype_class ]
ret_vec = weld_to_numpy ( result )
return ret_vec |
def valuefrompostdata ( self , postdata ) :
"""This parameter method searches the POST data and retrieves the values it needs . It does not set the value yet though , but simply returns it . Needs to be explicitly passed to parameter . set ( )""" | if self . multi : # multi parameters can be passed as parameterid = choiceid1 , choiceid2 or by setting parameterid [ choiceid ] = 1 ( or whatever other non - zero value )
found = False
if self . id in postdata :
found = True
passedvalues = postdata [ self . id ] . split ( ',' )
values = [ ]
for choicekey in [ x [ 0 ] for x in self . choices ] :
if choicekey in passedvalues :
found = True
values . append ( choicekey )
else :
values = [ ]
for choicekey in [ x [ 0 ] for x in self . choices ] :
if self . id + '[' + choicekey + ']' in postdata :
found = True
if postdata [ self . id + '[' + choicekey + ']' ] :
values . append ( choicekey )
if not found :
return None
else :
return values
else :
if self . id in postdata :
return postdata [ self . id ]
else :
return None |
def _create_blank_page ( self ) :
"""Create html page to show while the kernel is starting""" | loading_template = Template ( BLANK )
page = loading_template . substitute ( css_path = self . css_path )
return page |
def summarizePosition ( self , index ) :
"""Compute residue counts at a specific sequence index .
@ param index : an C { int } index into the sequence .
@ return : A C { dict } with the count of too - short ( excluded ) sequences ,
and a Counter instance giving the residue counts .""" | countAtPosition = Counter ( )
excludedCount = 0
for read in self :
try :
countAtPosition [ read . sequence [ index ] ] += 1
except IndexError :
excludedCount += 1
return { 'excludedCount' : excludedCount , 'countAtPosition' : countAtPosition } |
def truncate ( self , percentage ) :
"""Truncate ` ` percentage ` ` / 2 [ % ] of whole time from first and last time .
: param float percentage : Percentage of truncate .
: Sample Code :
. . code : : python
from datetimerange import DateTimeRange
time _ range = DateTimeRange (
"2015-03-22T10:00:00 + 0900 " , " 2015-03-22T10:10:00 + 0900 " )
time _ range . is _ output _ elapse = True
print ( time _ range )
time _ range . truncate ( 10)
print ( time _ range )
: Output :
. . parsed - literal : :
2015-03-22T10:00:00 + 0900 - 2015-03-22T10:10:00 + 0900 ( 0:10:00)
2015-03-22T10:00:30 + 0900 - 2015-03-22T10:09:30 + 0900 ( 0:09:00)""" | self . validate_time_inversion ( )
if percentage < 0 :
raise ValueError ( "discard_percent must be greater or equal to zero: " + str ( percentage ) )
if percentage == 0 :
return
discard_time = self . timedelta // int ( 100 ) * int ( percentage / 2 )
self . __start_datetime += discard_time
self . __end_datetime -= discard_time |
def normalize_geojson_featurecollection ( obj ) :
"""Takes a geojson - like mapping representing
geometry , Feature or FeatureCollection ( or a sequence of such objects )
and returns a FeatureCollection - like dict""" | if not isinstance ( obj , Sequence ) :
obj = [ obj ]
features = [ ]
for x in obj :
if not isinstance ( x , Mapping ) or 'type' not in x :
raise ValueError ( "Expecting a geojson-like mapping or sequence of them" )
if 'features' in x :
features . extend ( x [ 'features' ] )
elif 'geometry' in x :
features . append ( x )
elif 'coordinates' in x :
feat = { 'type' : 'Feature' , 'properties' : { } , 'geometry' : x }
features . append ( feat )
else :
raise ValueError ( "Expecting a geojson-like mapping or sequence of them" )
return { 'type' : 'FeatureCollection' , 'features' : features } |
def from_independent_strains ( cls , strains , stresses , eq_stress = None , vasp = False , tol = 1e-10 ) :
"""Constructs the elastic tensor least - squares fit of independent strains
Args :
strains ( list of Strains ) : list of strain objects to fit
stresses ( list of Stresses ) : list of stress objects to use in fit
corresponding to the list of strains
eq _ stress ( Stress ) : equilibrium stress to use in fitting
vasp ( boolean ) : flag for whether the stress tensor should be
converted based on vasp units / convention for stress
tol ( float ) : tolerance for removing near - zero elements of the
resulting tensor""" | strain_states = [ tuple ( ss ) for ss in np . eye ( 6 ) ]
ss_dict = get_strain_state_dict ( strains , stresses , eq_stress = eq_stress )
if not set ( strain_states ) <= set ( ss_dict . keys ( ) ) :
raise ValueError ( "Missing independent strain states: " "{}" . format ( set ( strain_states ) - set ( ss_dict ) ) )
if len ( set ( ss_dict . keys ( ) ) - set ( strain_states ) ) > 0 :
warnings . warn ( "Extra strain states in strain-stress pairs " "are neglected in independent strain fitting" )
c_ij = np . zeros ( ( 6 , 6 ) )
for i in range ( 6 ) :
istrains = ss_dict [ strain_states [ i ] ] [ "strains" ]
istresses = ss_dict [ strain_states [ i ] ] [ "stresses" ]
for j in range ( 6 ) :
c_ij [ i , j ] = np . polyfit ( istrains [ : , i ] , istresses [ : , j ] , 1 ) [ 0 ]
if vasp :
c_ij *= - 0.1
# Convert units / sign convention of vasp stress tensor
c = cls . from_voigt ( c_ij )
c = c . zeroed ( tol )
return c |
def has_next_assessment_part ( self , assessment_part_id ) :
"""This supports the basic simple sequence case . Can be overriden in a record for other cases""" | if not self . supports_child_ordering or not self . supports_simple_child_sequencing :
raise AttributeError ( )
# Only available through a record extension
if 'childIds' in self . _my_map and str ( assessment_part_id ) in self . _my_map [ 'childIds' ] :
if self . _my_map [ 'childIds' ] [ - 1 ] != str ( assessment_part_id ) :
return True
else :
return False
raise errors . NotFound ( 'the Part with Id ' + str ( assessment_part_id ) + ' is not a child of this Part' ) |
def query ( self , where = "1=1" , out_fields = "*" , timeFilter = None , geometryFilter = None , returnGeometry = True , returnCountOnly = False , returnIDsOnly = False , returnFeatureClass = False , returnDistinctValues = False , returnExtentOnly = False , groupByFieldsForStatistics = None , statisticFilter = None , resultOffset = None , resultRecordCount = None , out_fc = None , objectIds = None , distance = None , units = None , maxAllowableOffset = None , outSR = None , geometryPrecision = None , gdbVersion = None , orderByFields = None , outStatistics = None , returnZ = False , returnM = False , multipatchOption = None , quanitizationParameters = None , returnCentroid = False , as_json = False , ** kwargs ) :
"""queries a feature service based on a sql statement
Inputs :
where - the selection sql statement
out _ fields - the attribute fields to return
objectIds - The object IDs of this layer or table to be
queried .
distance - The buffer distance for the input geometries .
The distance unit is specified by units . For
example , if the distance is 100 , the query
geometry is a point , units is set to meters , and
all points within 100 meters of the point are
returned .
units - The unit for calculating the buffer distance . If
unit is not specified , the unit is derived from the
geometry spatial reference . If the geometry spatial
reference is not specified , the unit is derived
from the feature service data spatial reference .
This parameter only applies if
supportsQueryWithDistance is true .
Values : esriSRUnit _ Meter | esriSRUnit _ StatuteMile |
esriSRUnit _ Foot | esriSRUnit _ Kilometer |
esriSRUnit _ NauticalMile | esriSRUnit _ USNauticalMile
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time . The values in
the timeFilter should be as UTC timestampes in
milliseconds . No checking occurs to see if they
are in the right format .
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset .
maxAllowableOffset - This option can be used to specify the
maxAllowableOffset to be used for
generalizing geometries returned by
the query operation .
The maxAllowableOffset is in the units
of outSR . If outSR is not specified ,
maxAllowableOffset is assumed to be in
the unit of the spatial reference of
the map .
outSR - The spatial reference of the returned geometry .
geometryPrecision - This option can be used to specify the
number of decimal places in the
response geometries returned by the
Query operation .
gdbVersion - Geodatabase version to query
returnDistinctValues - If true , it returns distinct values
based on the fields specified in
outFields . This parameter applies
only if the
supportsAdvancedQueries property of
the layer is true .
returnIDsOnly - If true , the response only includes an
array of object IDs . Otherwise , the
response is a feature set . The default is
false .
returnCountOnly - If true , the response only includes the
count ( number of features / records ) that
would be returned by a query . Otherwise ,
the response is a feature set . The
default is false . This option supersedes
the returnIdsOnly parameter . If
returnCountOnly = true , the response will
return both the count and the extent .
returnExtentOnly - If true , the response only includes the
extent of the features that would be
returned by the query . If
returnCountOnly = true , the response will
return both the count and the extent .
The default is false . This parameter
applies only if the
supportsReturningQueryExtent property
of the layer is true .
orderByFields - One or more field names on which the
features / records need to be ordered . Use
ASC or DESC for ascending or descending ,
respectively , following every field to
control the ordering .
groupByFieldsForStatistics - One or more field names on
which the values need to be
grouped for calculating the
statistics .
outStatistics - The definitions for one or more field - based
statistics to be calculated .
returnZ - If true , Z values are included in the results if
the features have Z values . Otherwise , Z values
are not returned . The default is false .
returnM - If true , M values are included in the results if
the features have M values . Otherwise , M values
are not returned . The default is false .
multipatchOption - This option dictates how the geometry of
a multipatch feature will be returned .
resultOffset - This option can be used for fetching query
results by skipping the specified number of
records and starting from the next record
( that is , resultOffset + 1th ) .
resultRecordCount - This option can be used for fetching
query results up to the
resultRecordCount specified . When
resultOffset is specified but this
parameter is not , the map service
defaults it to maxRecordCount . The
maximum value for this parameter is the
value of the layer ' s maxRecordCount
property .
quanitizationParameters - Used to project the geometry onto
a virtual grid , likely
representing pixels on the screen .
returnCentroid - Used to return the geometry centroid
associated with each feature returned . If
true , the result includes the geometry
centroid . The default is false .
as _ json - If true , the query will return as the raw JSON .
The default is False .
returnFeatureClass - If true and arcpy is installed , the
script will attempt to save the result
of the query to a feature class .
out _ fc - only valid if returnFeatureClass is set to True .
Output location of query . If out _ fc is set to None ,
then the feature class will be saved to the scratch
File Geodatabase with a random name .
kwargs - optional parameters that can be passed to the Query
function . This will allow users to pass additional
parameters not explicitly implemented on the function . A
complete list of functions available is documented on the
Query REST API .
Output :
A list of Feature Objects ( default ) or a path to the output featureclass if
returnFeatureClass is set to True .""" | url = self . _url + "/query"
params = { "f" : "json" }
params [ 'where' ] = where
params [ 'outFields' ] = out_fields
params [ 'returnGeometry' ] = returnGeometry
params [ 'returnDistinctValues' ] = returnDistinctValues
params [ 'returnCentroid' ] = returnCentroid
params [ 'returnCountOnly' ] = returnCountOnly
params [ 'returnExtentOnly' ] = returnExtentOnly
params [ 'returnIdsOnly' ] = returnIDsOnly
params [ 'returnZ' ] = returnZ
params [ 'returnM' ] = returnM
if resultRecordCount :
params [ 'resultRecordCount' ] = resultRecordCount
if resultOffset :
params [ 'resultOffset' ] = resultOffset
if quanitizationParameters :
params [ 'quanitizationParameters' ] = quanitizationParameters
if multipatchOption :
params [ 'multipatchOption' ] = multipatchOption
if orderByFields :
params [ 'orderByFields' ] = orderByFields
if groupByFieldsForStatistics :
params [ 'groupByFieldsForStatistics' ] = groupByFieldsForStatistics
if statisticFilter and isinstance ( statisticFilter , filters . StatisticFilter ) :
params [ 'outStatistics' ] = statisticFilter . filter
if outStatistics :
params [ 'outStatistics' ] = outStatistics
if outSR :
params [ 'outSR' ] = outSR
if maxAllowableOffset :
params [ 'maxAllowableOffset' ] = maxAllowableOffset
if gdbVersion :
params [ 'gdbVersion' ] = gdbVersion
if geometryPrecision :
params [ 'geometryPrecision' ] = geometryPrecision
if objectIds :
params [ 'objectIds' ] = objectIds
if distance :
params [ 'distance' ] = distance
if units :
params [ 'units' ] = units
if timeFilter and isinstance ( timeFilter , TimeFilter ) :
for k , v in timeFilter . filter . items ( ) :
params [ k ] = v
elif isinstance ( timeFilter , dict ) :
for k , v in timeFilter . items ( ) :
params [ k ] = v
if geometryFilter and isinstance ( geometryFilter , GeometryFilter ) :
for k , v in geometryFilter . filter . items ( ) :
params [ k ] = v
elif geometryFilter and isinstance ( geometryFilter , dict ) :
for k , v in geometryFilter . items ( ) :
params [ k ] = v
if len ( kwargs ) > 0 :
for k , v in kwargs . items ( ) :
params [ k ] = v
del k , v
result = self . _post ( url = url , securityHandler = self . _securityHandler , param_dict = params , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
if 'error' in result :
raise ValueError ( result )
if as_json or returnCountOnly == True or returnIDsOnly == True :
return result
elif returnFeatureClass and not returnCountOnly and not returnIDsOnly :
uid = create_uid ( )
if out_fc is None :
out_fc = os . path . join ( scratchGDB ( ) , "a{fid}" . format ( fid = uid ) )
text = json . dumps ( result )
temp = scratchFolder ( ) + os . sep + uid + ".json"
with open ( temp , 'wb' ) as writer :
if six . PY3 :
text = bytes ( text , 'UTF-8' )
writer . write ( text )
writer . flush ( )
del writer
fc = json_to_featureclass ( json_file = temp , out_fc = out_fc )
os . remove ( temp )
return fc
else :
return FeatureSet . fromJSON ( jsonValue = json . dumps ( result ) )
return result |
def filename ( self ) :
"""Filename of the attachment , without the full ' attachment ' path .""" | if self . value and 'value' in self . _json_data and self . _json_data [ 'value' ] :
return self . _json_data [ 'value' ] . split ( '/' ) [ - 1 ]
return None |
def get_top ( self ) :
'''Returns the high data derived from the top file''' | tops , errors = self . get_tops ( )
try :
merged_tops = self . merge_tops ( tops )
except TypeError as err :
merged_tops = OrderedDict ( )
errors . append ( 'Error encountered while rendering pillar top file.' )
return merged_tops , errors |
def listdir ( path , include = r'.' , exclude = r'\.pyc$|^\.' , show_all = False , folders_only = False ) :
"""List files and directories""" | namelist = [ ]
dirlist = [ to_text_string ( osp . pardir ) ]
for item in os . listdir ( to_text_string ( path ) ) :
if re . search ( exclude , item ) and not show_all :
continue
if osp . isdir ( osp . join ( path , item ) ) :
dirlist . append ( item )
elif folders_only :
continue
elif re . search ( include , item ) or show_all :
namelist . append ( item )
return sorted ( dirlist , key = str_lower ) + sorted ( namelist , key = str_lower ) |
def get_random_word ( dictionary , min_word_length = 3 , max_word_length = 8 ) :
"""Returns a random word from the dictionary""" | while True : # Choose a random word
word = choice ( dictionary )
# Stop looping as soon as we have a valid candidate
if len ( word ) >= min_word_length and len ( word ) <= max_word_length :
break
return word |
def deactivate ( self ) :
"""Deactivates the Component .
: return : Method success .
: rtype : bool""" | LOGGER . debug ( "> Deactivating '{0}' Component." . format ( self . __class__ . __name__ ) )
self . __engine = None
self . __settings = None
self . __settings_section = None
self . __script_editor = None
self . activated = False
return True |
def parse ( self , file = None , string = None ) :
"""SAX parse XML text .
@ param file : Parse a python I { file - like } object .
@ type file : I { file - like } object .
@ param string : Parse string XML .
@ type string : str""" | timer = metrics . Timer ( )
timer . start ( )
sax , handler = self . saxparser ( )
if file is not None :
sax . parse ( file )
timer . stop ( )
metrics . log . debug ( 'sax (%s) duration: %s' , file , timer )
return handler . nodes [ 0 ]
if string is not None :
if isinstance ( string , str ) :
string = string . encode ( )
parseString ( string , handler )
timer . stop ( )
metrics . log . debug ( '%s\nsax duration: %s' , string , timer )
return handler . nodes [ 0 ] |
def process_result ( self , context , result_body , exc , content_type ) :
"""given an result body and an exception object ,
return the appropriate result object ,
or raise an exception .""" | return process_result ( self , context , result_body , exc , content_type ) |
def source ( self ) :
"""The source element this document element was created from .""" | if self . _source is not None :
return self . _source
elif self . parent is not None :
return self . parent . source
else :
return Location ( self ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.