signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def del_unused_keyframes ( self ) :
"""Scans through list of keyframes in the channel and removes those
which are not in self . key _ frame _ list .""" | skl = self . key_frame_list . sorted_key_list ( )
unused_keys = [ k for k in self . dct [ 'keys' ] if k not in skl ]
for k in unused_keys :
del self . dct [ 'keys' ] [ k ] |
def get ( self , key , default = None ) :
u"""Возвращает значение с указанным ключем
Пример вызова :
value = self . get ( ' system . database . name ' )
: param key : Имя параметра
: param default : Значение , возвращаемое по умолчанию
: return : mixed""" | segments = key . split ( '.' )
result = reduce ( lambda dct , k : dct and dct . get ( k ) or None , segments , self . data )
return result or default |
def fetchRootJob ( self ) :
"""Fetches the root job from the jobStore that provides context for all other jobs .
Exactly the same as the jobStore . loadRootJob ( ) function , but with a different
exit message if the root job is not found ( indicating the workflow ran successfully
to completion and certain stats cannot be gathered from it meaningfully such
as which jobs are left to run ) .
: raises JobException : if the root job does not exist .""" | try :
return self . jobStore . loadRootJob ( )
except JobException :
print ( 'Root job is absent. The workflow may have completed successfully.' , file = sys . stderr )
raise |
def from_barset ( cls , barset , name = None , delay = None , use_wrapper = True , wrapper = None ) :
"""Copy a BarSet ' s frames to create a new FrameSet .
Arguments :
barset : An existing BarSet object to copy frames from .
name : A name for the new FrameSet .
delay : Delay for the animation .
use _ wrapper : Whether to use the old barset ' s wrapper in the
frames .
wrapper : A new wrapper pair to use for each frame .
This overrides the ` use _ wrapper ` option .""" | if wrapper :
data = tuple ( barset . wrap_str ( s , wrapper = wrapper ) for s in barset )
elif use_wrapper :
data = tuple ( barset . wrap_str ( s ) for s in barset )
else :
data = barset . data
return cls ( data , name = name , delay = delay ) |
def tag ( collector , image , artifact , ** kwargs ) :
"""Tag an image !""" | if artifact in ( None , "" , NotSpecified ) :
raise BadOption ( "Please specify a tag using the artifact option" )
if image . image_index in ( None , "" , NotSpecified ) :
raise BadOption ( "Please specify an image with an image_index option" )
tag = image . image_name
if collector . configuration [ "harpoon" ] . tag is not NotSpecified :
tag = "{0}:{1}" . format ( tag , collector . configuration [ "harpoon" ] . tag )
else :
tag = "{0}:latest" . format ( tag )
images = image . harpoon . docker_api . images ( )
current_tags = chain . from_iterable ( image_conf [ "RepoTags" ] for image_conf in images if image_conf [ "RepoTags" ] is not None )
if tag not in current_tags :
raise BadOption ( "Please build or pull the image down to your local cache before tagging it" )
for image_conf in images :
if image_conf [ "RepoTags" ] is not None :
if tag in image_conf [ "RepoTags" ] :
image_id = image_conf [ "Id" ]
break
log . info ( "Tagging {0} ({1}) as {2}" . format ( image_id , image . image_name , artifact ) )
image . harpoon . docker_api . tag ( image_id , repository = image . image_name , tag = artifact , force = True )
image . tag = artifact
Syncer ( ) . push ( image ) |
def _query ( self , path : str , method : str , data : Dict [ str , Any ] = None , expected_status : int = 200 ) -> Union [ List [ Dict [ str , Any ] ] , Dict [ str , Any ] , None ] :
"""Make an HTTP request
Args :
path : the URI path ( not including the base url , start with
the first uri segment , like ' users / . . . ' )
method : the HTTP method to use ( GET , POST , PATCH , . . . )
data : the data to send as JSON data
expected _ status : expected HTTP status ; other statuses
received will raise an Exception
Returns :
Data from the endpoint ' s response""" | url = Pycord . url_base + path
self . logger . debug ( f'Making {method} request to "{url}"' )
if method == 'GET' :
r = requests . get ( url , headers = self . _build_headers ( ) )
elif method == 'POST' :
r = requests . post ( url , headers = self . _build_headers ( ) , json = data )
r = requests . get ( url , headers = self . _build_headers ( ) )
elif method == 'PATCH' :
r = requests . patch ( url , headers = self . _build_headers ( ) , json = data )
else :
raise ValueError ( f'Unknown HTTP method {method}' )
self . logger . debug ( f'{method} response from "{url}" was "{r.status_code}"' )
if r . status_code != expected_status :
raise ValueError ( f'Non-{expected_status} {method} response from Discord API ({r.status_code}): {r.text}' )
if expected_status == 200 :
return r . json ( )
return None |
def serv ( args ) :
"""Serve a rueckenwind application""" | if not args . no_debug :
tornado . autoreload . start ( )
extra = [ ]
if sys . stdout . isatty ( ) : # set terminal title
sys . stdout . write ( '\x1b]2;rw: {}\x07' . format ( ' ' . join ( sys . argv [ 2 : ] ) ) )
if args . cfg :
extra . append ( os . path . abspath ( args . cfg ) )
listen = ( int ( args . port ) , args . address )
ioloop = tornado . ioloop . IOLoop . instance ( )
setup_app ( app = args . MODULE , extra_configs = extra , ioloop = ioloop , listen = listen )
ioloop . start ( ) |
def point_at ( self , horizontal_distance , vertical_increment , azimuth ) :
"""Compute the point with given horizontal , vertical distances
and azimuth from this point .
: param horizontal _ distance :
Horizontal distance , in km .
: type horizontal _ distance :
float
: param vertical _ increment :
Vertical increment , in km . When positive , the new point
has a greater depth . When negative , the new point
has a smaller depth .
: type vertical _ increment :
float
: type azimuth :
Azimuth , in decimal degrees .
: type azimuth :
float
: returns :
The point at the given distances .
: rtype :
Instance of : class : ` Point `""" | lon , lat = geodetic . point_at ( self . longitude , self . latitude , azimuth , horizontal_distance )
return Point ( lon , lat , self . depth + vertical_increment ) |
def _get_selected_library_state ( self ) :
"""Returns the LibraryState which was selected in the LibraryTree
: return : selected state in TreeView
: rtype : LibraryState""" | library_os_path , library_path , library_name , item_key = self . extract_library_properties_from_selected_row ( )
if library_path is None :
return None
logger . debug ( "Link library state '{0}' (with library tree path: {2} and file system path: {1}) into state " "machine." . format ( str ( item_key ) , library_os_path , self . convert_if_human_readable ( str ( library_path ) ) + "/" + str ( item_key ) ) )
library_name = library_os_path . split ( os . path . sep ) [ - 1 ]
return LibraryState ( library_path , library_name , "0.1" , format_folder_name_human_readable ( library_name ) ) |
def reverse_dict ( dict_obj ) :
"""Reverse a dict , so each value in it maps to a sorted list of its keys .
Parameters
dict _ obj : dict
A key - value dict .
Returns
dict
A dict where each value maps to a sorted list of all the unique keys
that mapped to it .
Example
> > > dicti = { ' a ' : 1 , ' b ' : 3 , ' c ' : 1}
> > > reverse _ dict ( dicti )
{1 : [ ' a ' , ' c ' ] , 3 : [ ' b ' ] }""" | new_dict = { }
for key in dict_obj :
add_to_dict_val_set ( dict_obj = new_dict , key = dict_obj [ key ] , val = key )
for key in new_dict :
new_dict [ key ] = sorted ( new_dict [ key ] , reverse = False )
return new_dict |
def find_file ( name , path = os . getcwd ( ) , deep = False , partial = False ) :
"""Searches for a file and returns its path upon finding it .
Searches for a file with the given ` name ` in the list of directories
mentioned in ` path ` . It also supports ` deep ` search ( recursive ) and
` partial ` search ( searching for files having filename matching
partially with the query ) through the respective boolean arguments .
Parameters
name : str
The name of the file to search for .
path : str or list of str , optional
The list of directories to be searched . It can be either a
single str with the directory paths being seperated by
` os . pathsep ` , or a list of the directory path strings . The
default is the system ` PATH ` .
deep : bool , optional
Enables deep - searching , i . e . recursively looking for the file
in the sub - directories of the mentioned directories .
partial : bool , optional
Whether look for files having filename partially matching with
the query ` name ` .
Returns
str
The path of the file . In case of multiple hits , it only return
the first one .""" | paths = path . split ( os . pathsep ) if type ( path ) is str else path
for p in paths :
ret = __find_file ( name , p , deep , partial )
if ret :
return ret |
def get_default_config ( self ) :
"""Returns the default collector settings""" | config = super ( ElbCollector , self ) . get_default_config ( )
config . update ( { 'path' : 'elb' , 'regions' : [ 'us-west-1' ] , 'interval' : 60 , 'format' : '$zone.$elb_name.$metric_name' , } )
return config |
def analysis ( self ) :
"""The list of analysis of ` ` words ` ` layer elements .""" | if not self . is_tagged ( ANALYSIS ) :
self . tag_analysis ( )
return [ word [ ANALYSIS ] for word in self . words ] |
def detunings_code ( Neu , Nl , pairs , omega_levelu , iu0 , ju0 ) :
r"""Get the code to calculate the simplified detunings .
> > > Ne = 6
> > > Nl = 2
> > > omega _ level = [ 0.0 , 100.0 , 100.0 , 200.0 , 200.0 , 300.0]
> > > xi = np . zeros ( ( Nl , Ne , Ne ) )
> > > coup = [ [ ( 1 , 0 ) , ( 2 , 0 ) ] , [ ( 3 , 0 ) , ( 4 , 0 ) , ( 5 , 0 ) ] ]
> > > for l in range ( Nl ) :
. . . for pair in coup [ l ] :
. . . xi [ l , pair [ 0 ] , pair [ 1 ] ] = 1.0
. . . xi [ l , pair [ 1 ] , pair [ 0 ] ] = 1.0
> > > aux = define _ simplification ( omega _ level , xi , Nl )
> > > u , invu , omega _ levelu , Neu , xiu = aux
> > > omega _ min , iu0 , ju0 = find _ omega _ min ( omega _ levelu , Neu , Nl , xiu )
> > > pairs = detunings _ indices ( Neu , Nl , xiu )
> > > print ( detunings _ code ( Neu , Nl , pairs , omega _ levelu , iu0 , ju0 ) )
delta1_2_1 = detuning _ knob [ 0]
delta2_3_1 = detuning _ knob [ 1]
delta2_4_1 = detuning _ knob [ 1 ] + ( - 100.0)
< BLANKLINE >""" | code_det = ""
for l in range ( Nl ) :
for pair in pairs [ l ] :
iu , ju = pair
code_det += " delta" + str ( l + 1 )
code_det += "_" + str ( iu + 1 )
code_det += "_" + str ( ju + 1 )
code_det += " = detuning_knob[" + str ( l ) + "]"
corr = - omega_levelu [ iu ] + omega_levelu [ iu0 [ l ] ]
corr = - omega_levelu [ ju0 [ l ] ] + omega_levelu [ ju ] + corr
if corr != 0 :
code_det += " + (" + str ( corr ) + ")"
code_det += "\n"
return code_det |
def _handle_response ( self , response ) :
"""Handle the HTTP response by memoing the headers and then delivering
bytes .""" | self . client . status = response . code
self . response_headers = response . headers
# XXX This workaround ( which needs to be improved at that ) for possible
# bug in Twisted with new client :
# http : / / twistedmatrix . com / trac / ticket / 5476
if self . _method . upper ( ) == 'HEAD' or response . code == NO_CONTENT :
return succeed ( '' )
receiver = self . receiver_factory ( )
receiver . finished = d = Deferred ( )
receiver . content_length = response . length
response . deliverBody ( receiver )
if response . code >= 400 :
d . addCallback ( self . _fail_response , response )
return d |
def register_retinotopy ( hemi , model = 'benson17' , model_hemi = Ellipsis , polar_angle = None , eccentricity = None , weight = None , pRF_radius = None , weight_min = 0.1 , eccentricity_range = None , partial_voluming_correction = False , radius_weight = 1 , field_sign_weight = 1 , invert_rh_field_sign = False , scale = 20.0 , sigma = Ellipsis , select = 'close' , prior = None , resample = Ellipsis , radius = np . pi / 3 , max_steps = 2000 , max_step_size = 0.05 , method = 'random' , yield_imap = False ) :
'''register _ retinotopy ( hemi ) registers the given hemisphere object , hemi , to a model of V1 , V2,
and V3 retinotopy , and yields a copy of hemi that is identical but additionally contains
the registration ' retinotopy ' , whose coordinates are aligned with the model .
Registration attempts to align the vertex positions of the hemisphere ' s spherical surface with a
model of polar angle and eccentricity . This alignment proceeds through several steps and can
be modified by several options . A description of these steps and options are provided here . For
most cases , the default options should work relatively well .
Method :
(1 ) Prepare for registration by several intitialization substeps :
a . Extract the polar angle , eccentricity and weight data from the hemisphere . These
data are usually properties on the mesh and can be modifies by the options
polar _ angle , eccentricity , and weight , which can be either property names or list
of property values . By default ( None ) , a property is chosen using the functions
neuropythy . vision . extract _ retinotopy _ argument with the default option set to
' empirical ' .
b . If partial voluming correction is enabled ( via the option
partial _ voluming _ correction ) , multiply the weight by ( 1 - p ) where p is
hemi . partial _ volume _ factor .
c . If there is a prior that is specified as a belief about the retinotopy , then a
Registration is created for the hemisphere such that its vertices are arranged
according to that prior ( see also the prior option ) . Note that because hemi ' s
coordinates must always be projected into the registration specified by the model ,
the prior must be the name of a registration to which the model ' s specified subject
is also registered . This is clear in the case of an example . The default value for
this is ' retinotopy ' ; assuming that our model is specified on the fsaverage _ sym ,
surface , the initial positions of the coordinates for the registration process would
be the result of starting with hemi ' s fsaverage _ sym - aligned coordinates then warping
these coordinates in a way that is equivalent to the warping from fsaverage _ sym ' s
native spherical coordinates to fsaverage _ sym ' s retinotopy registration coordinates .
Note that the retinotopy registration would usually be specified in a file in the
fsaverage _ sym subject ' s surf directory : surf / lh . retinotopy . sphere . reg .
If no prior is specified ( option value None ) , then the vertices that are used are
those aligned with the registration of the model , which will usually be ' fsaverage '
or ' fsaverage _ sym ' .
d . If the option resample is not None , then the vertex coordinates are resampled onto
either the fsaverage or fsaverage _ sym ' s native sphere surface . ( The value of resample
should be either ' fsaverage ' or ' fsaverage _ sym ' . ) Resampling can prevent vertices
that have been rearranged by alignment with the model ' s specified registration or by
application of a prior from beginning the alignment with very high initial gradients
and is recommended for subject alignments .
If resample is None then no changes are made .
e . A 2D projection of the ( possibly aligned , prior - warped , and resampled ) cortical
surface is made according to the projection parameters of the model . This map is the
mesh that is warped to eventually fit the model .
(2 ) Perform the registration by running neuropythy . registration . mesh _ register . This step
consists of two major components .
a . Create the potential function , which we will minimize . The potential function is a
complex function whose inputs are the coordinates of all of the vertices and whose
output is a potential value that increases both as the mesh is warped and as the
vertices with retinotopy predictions get farther away from the positions in the model
that their retinotopy values would predict they should lie . The balance of these
two forces is best controlled by the option functional _ scale . The potential function
fundamentally consists of four terms ; the first three describe mesh deformations and
the last describes the model fit .
- The edge deformation term is described for any vertices u and v that are connected
by an edge in the mesh ; it ' s value is c / p ( r ( u , v ) - r0 ( u , v ) ) ^ 2 where c is the
edge _ scale , p is the number of edges in the mesh , r ( a , b ) is the distance between
vertices a and b , and r0 ( a , b ) is the distance between a and b in the initial mesh .
- The angle deformation term is described for any three vertices ( u , v , w ) that form
an angle in the mesh ; its value is c / m h ( t ( u , v , w ) , t0 ( u , v , w ) ) where c is the
angle _ scale argument , m is the number of angles in the mesh , t is the value of the
angle ( u , v , w ) , t0 is the value of the angle in the initial mesh , and h ( t , t0 ) is an
infinite - well function that asymptotes to positive infinity as t approaches both 0
and pi and is minimal when t = t0 ( see the nben ' s
nben . mesh . registration . InfiniteWell documentation for more details ) .
- The perimeter term prevents the perimeter vertices form moving significantly ;
this primarily prevents the mesh from wrapping in on itself during registration .
The form of this term is , for any vertex u on the mesh perimeter ,
( x ( u ) - x0 ( u ) ) ^ 2 where x and x0 are the position and initial position of the
vertex .
- Finally , the functional term is minimized when the vertices best align with the
retinotopy model .
b . Register the mesh vertices to the potential function using the nben Java library . The
particular parameters of the registration are method , max _ steps , and max _ step _ size .
Options :
* model specifies the instance of the retinotopy model to use ; this must be an
instance of the RegisteredRetinotopyModel class or a string that can be passed to the
retinotopy _ model ( ) function ( default : ' standard ' ) .
* model _ hemi specifies the hemisphere of the model ; generally you shouldn ' t have to set this
unless you are using an fsaverage _ sym model , in which case it should be set to None ; in all
other cases , the default value ( Ellipsis ) instructs the function to auto - detect the
hemisphere .
* polar _ angle , eccentricity , pRF _ radius , and weight specify the property names for the
respective quantities ; these may alternately be lists or numpy arrays of values . If weight
is not given or found , then unity weight for all vertices is assumed . By default , each will
check the hemisphere ' s properties for properties with compatible names ; it will prefer the
properties PRF _ polar _ angle , PRF _ ecentricity , and PRF _ variance _ explained if possible .
* weight _ min ( default : 0.1 ) specifies the minimum value a vertex must have in the weight
property in order to be considered as retinotopically relevant .
* eccentricity _ range ( default : None ) specifies that any vertex whose eccentricity is too low
or too high should be given a weight of 0 in the registration .
* partial _ voluming _ correction ( default : True ) , if True , specifies that the value
(1 - hemi . partial _ volume _ factor ) should be applied to all weight values ( i . e . , weights
should be down - weighted when likely to be affected by a partial voluming error ) .
* field _ sign _ weight ( default : 1 ) indicates the relative weight ( between 0 and 1 ) that should
be given to the field - sign as a method of determining which anchors have the strongest
springs . A value of 1 indicates that the effective weights of anchors should be the
geometric mean of the empirical retinotopic weight and field - sign - based weight ; a value of 0
indicates that no attention should be paid to the field sign weight .
* radius _ weight ( default : 1 ) indicates the relative weight ( between 0 and 1 ) that should
be given to the pRF radius as a method of determining which anchors have the strongest
springs . A value of 1 indicates that the effective weights of anchors should be the
geometric mean of the empirical retinotopic weight and pRF - radius - based weight ; a value of 0
indicates that no attention should be paid to the radius - based weight .
* sigma specifies the standard deviation of the Gaussian shape for the Schira model anchors ;
see retinotopy _ anchors for more information .
* scale ( default : 1.0 ) specifies the strength of the functional constraints ( i . e . the anchors :
the part of the minimization responsible for ensuring that retinotopic coordinates are
aligned ) ; the anatomical constraints ( i . e . the edges and angles : the part of the
minimization responsible for ensuring that the mesh is not overly deformed ) are always held
at a strength of 1.0.
* select specifies the select option that should be passed to retinotopy _ anchors .
* max _ steps ( default 2,000 ) specifies the maximum number of registration steps to run . This
may be a tuple ( max _ steps , stride ) in which case the registered map that is returned will
contain a piece of meta - data , ' trajectory ' containing the vertex coordinates every stride
steps of the registration .
* max _ step _ size ( default 0.05 ) specifies the maxmim distance a single vertex is allowed to
move in a single step of the minimization .
* method ( default ' random ' ) is the method argument passed to mesh _ register . This should be
' random ' , ' pure ' , or ' nimble ' . Generally , ' random ' is recommended .
* yield _ imap ( default : False ) specifies whether the return value should be the new
Mesh object or a pimms imap ( i . e . , a persistent mapping of the result of a pimms
calculation ) containing the meta - data that was used during the registration
calculations . If this is True , then register _ retinotopy will return immediately , and
calculations will only be performed as the relevant data are requested from the returned
imap . The item ' predicted _ mesh ' gives the return value when yield _ imap is set to False .
* radius ( default : pi / 3 ) specifies the radius , in radians , of the included portion of the map
projection ( projected about the occipital pole ) .
* sigma ( default Ellipsis ) specifies the sigma argument to be passed onto the
retinotopy _ anchors function ( see help ( retinotopy _ anchors ) ) ; the default value , Ellipsis ,
is interpreted as the default value of the retinotopy _ anchors function ' s sigma option .
* prior ( default : None ) specifies the prior that should be used , if found , in the
topology registrations for the subject associated with the retinotopy _ model ' s registration .
* resample ( default : Ellipsis ) specifies that the data should be resampled to one of
the uniform meshes , ' fsaverage ' or ' fsaverage _ sym ' , prior to registration ; if None then no
resampling is performed ; if Ellipsis , then auto - detect either fsaverage or fsaverage _ sym
based on the model _ hemi option ( if it is None , fsaverage _ sym , else fsaverage ) .''' | # create the imap
m = retinotopy_registration ( cortex = hemi , model_argument = model , model_hemi = model_hemi , polar_angle = polar_angle , eccentricity = eccentricity , weight = weight , pRF_radius = pRF_radius , weight_min = weight_min , eccentricity_range = eccentricity_range , partial_voluming_correction = partial_voluming_correction , radius_weight = radius_weight , field_sign_weight = field_sign_weight , invert_rh_field_sign = invert_rh_field_sign , scale = scale , sigma = sigma , select = select , prior = prior , resample = resample , radius = radius , max_steps = max_steps , max_step_size = max_step_size , method = method )
return m if yield_imap else m [ 'predicted_mesh' ] |
def evaluate_barycentric ( self , lambda1 , lambda2 , lambda3 , _verify = True ) :
r"""Compute a point on the surface .
Evaluates : math : ` B \ left ( \ lambda _ 1 , \ lambda _ 2 , \ lambda _ 3 \ right ) ` .
. . image : : . . / . . / images / surface _ evaluate _ barycentric . png
: align : center
. . testsetup : : surface - barycentric , surface - barycentric - fail1,
surface - barycentric - fail2 , surface - barycentric - no - verify
import numpy as np
import bezier
nodes = np . asfortranarray ( [
[0.0 , 0.5 , 1.0 , 0.125 , 0.375 , 0.25 ] ,
[0.0 , 0.0 , 0.25 , 0.5 , 0.375 , 1.0 ] ,
surface = bezier . Surface ( nodes , degree = 2)
. . doctest : : surface - barycentric
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ 0.0 , 0.5 , 1.0 , 0.125 , 0.375 , 0.25 ] ,
. . . [ 0.0 , 0.0 , 0.25 , 0.5 , 0.375 , 1.0 ] ,
> > > surface = bezier . Surface ( nodes , degree = 2)
> > > point = surface . evaluate _ barycentric ( 0.125 , 0.125 , 0.75)
> > > point
array ( [ [ 0.265625 ] ,
[0.73046875 ] ] )
. . testcleanup : : surface - barycentric
import make _ images
make _ images . surface _ evaluate _ barycentric ( surface , point )
However , this can ' t be used for points * * outside * * the
reference triangle :
. . doctest : : surface - barycentric - fail1
> > > surface . evaluate _ barycentric ( - 0.25 , 0.75 , 0.5)
Traceback ( most recent call last ) :
ValueError : ( ' Weights must be positive ' , - 0.25 , 0.75 , 0.5)
or for non - barycentric coordinates ;
. . doctest : : surface - barycentric - fail2
> > > surface . evaluate _ barycentric ( 0.25 , 0.25 , 0.25)
Traceback ( most recent call last ) :
ValueError : ( ' Weights do not sum to 1 ' , 0.25 , 0.25 , 0.25)
However , these " invalid " inputs can be used if ` ` _ verify ` ` is
: data : ` False ` .
. . doctest : : surface - barycentric - no - verify
: options : + NORMALIZE _ WHITESPACE
> > > surface . evaluate _ barycentric ( - 0.25 , 0.75 , 0.5 , _ verify = False )
array ( [ [ 0.6875 ] ,
[0.546875 ] ] )
> > > surface . evaluate _ barycentric ( 0.25 , 0.25 , 0.25 , _ verify = False )
array ( [ [ 0.203125 ] ,
[0.1875 ] ] )
Args :
lambda1 ( float ) : Parameter along the reference triangle .
lambda2 ( float ) : Parameter along the reference triangle .
lambda3 ( float ) : Parameter along the reference triangle .
_ verify ( Optional [ bool ] ) : Indicates if the barycentric coordinates
should be verified as summing to one and all non - negative ( i . e .
verified as barycentric ) . Can either be used to evaluate at
points outside the domain , or to save time when the caller
already knows the input is verified . Defaults to : data : ` True ` .
Returns :
numpy . ndarray : The point on the surface ( as a two dimensional
NumPy array with a single column ) .
Raises :
ValueError : If the weights are not valid barycentric
coordinates , i . e . they don ' t sum to ` ` 1 ` ` . ( Won ' t raise if
` ` _ verify = False ` ` . )
ValueError : If some weights are negative . ( Won ' t raise if
` ` _ verify = False ` ` . )""" | if _verify :
self . _verify_barycentric ( lambda1 , lambda2 , lambda3 )
return _surface_helpers . evaluate_barycentric ( self . _nodes , self . _degree , lambda1 , lambda2 , lambda3 ) |
def fbank ( signal , samplerate = 16000 , winlen = 0.025 , winstep = 0.01 , nfilt = 26 , nfft = 512 , lowfreq = 0 , highfreq = None , preemph = 0.97 , winfunc = lambda x : numpy . ones ( ( x , ) ) ) :
"""Compute Mel - filterbank energy features from an audio signal .
: param signal : the audio signal from which to compute features . Should be an N * 1 array
: param samplerate : the sample rate of the signal we are working with , in Hz .
: param winlen : the length of the analysis window in seconds . Default is 0.025s ( 25 milliseconds )
: param winstep : the step between successive windows in seconds . Default is 0.01s ( 10 milliseconds )
: param nfilt : the number of filters in the filterbank , default 26.
: param nfft : the FFT size . Default is 512.
: param lowfreq : lowest band edge of mel filters . In Hz , default is 0.
: param highfreq : highest band edge of mel filters . In Hz , default is samplerate / 2
: param preemph : apply preemphasis filter with preemph as coefficient . 0 is no filter . Default is 0.97.
: param winfunc : the analysis window to apply to each frame . By default no window is applied . You can use numpy window functions here e . g . winfunc = numpy . hamming
: returns : 2 values . The first is a numpy array of size ( NUMFRAMES by nfilt ) containing features . Each row holds 1 feature vector . The
second return value is the energy in each frame ( total energy , unwindowed )""" | highfreq = highfreq or samplerate / 2
signal = sigproc . preemphasis ( signal , preemph )
frames = sigproc . framesig ( signal , winlen * samplerate , winstep * samplerate , winfunc )
pspec = sigproc . powspec ( frames , nfft )
energy = numpy . sum ( pspec , 1 )
# this stores the total energy in each frame
energy = numpy . where ( energy == 0 , numpy . finfo ( float ) . eps , energy )
# if energy is zero , we get problems with log
fb = get_filterbanks ( nfilt , nfft , samplerate , lowfreq , highfreq )
feat = numpy . dot ( pspec , fb . T )
# compute the filterbank energies
feat = numpy . where ( feat == 0 , numpy . finfo ( float ) . eps , feat )
# if feat is zero , we get problems with log
return feat , energy |
def get_args ( tp , evaluate = None ) :
"""Get type arguments with all substitutions performed . For unions ,
basic simplifications used by Union constructor are performed .
On versions prior to 3.7 if ` evaluate ` is False ( default ) ,
report result as nested tuple , this matches
the internal representation of types . If ` evaluate ` is True
( or if Python version is 3.7 or greater ) , then all
type parameters are applied ( this could be time and memory expensive ) .
Examples : :
get _ args ( int ) = = ( )
get _ args ( Union [ int , Union [ T , int ] , str ] [ int ] ) = = ( int , str )
get _ args ( Union [ int , Tuple [ T , int ] ] [ str ] ) = = ( int , ( Tuple , str , int ) )
get _ args ( Union [ int , Tuple [ T , int ] ] [ str ] , evaluate = True ) = = ( int , Tuple [ str , int ] )
get _ args ( Dict [ int , Tuple [ T , T ] ] [ Optional [ int ] ] , evaluate = True ) = = ( int , Tuple [ Optional [ int ] , Optional [ int ] ] )
get _ args ( Callable [ [ ] , T ] [ int ] , evaluate = True ) = = ( [ ] , int , )""" | if NEW_TYPING :
if evaluate is not None and not evaluate :
raise ValueError ( 'evaluate can only be True in Python 3.7' )
if isinstance ( tp , _GenericAlias ) :
res = tp . __args__
if get_origin ( tp ) is collections . abc . Callable and res [ 0 ] is not Ellipsis :
res = ( list ( res [ : - 1 ] ) , res [ - 1 ] )
return res
return ( )
if is_classvar ( tp ) :
return ( tp . __type__ , )
if ( is_generic_type ( tp ) or is_union_type ( tp ) or is_callable_type ( tp ) or is_tuple_type ( tp ) ) :
tree = tp . _subs_tree ( )
if isinstance ( tree , tuple ) and len ( tree ) > 1 :
if not evaluate :
return tree [ 1 : ]
res = _eval_args ( tree [ 1 : ] )
if get_origin ( tp ) is Callable and res [ 0 ] is not Ellipsis :
res = ( list ( res [ : - 1 ] ) , res [ - 1 ] )
return res
return ( ) |
def estimateDeltaStaeckel ( pot , R , z , no_median = False ) :
"""NAME :
estimateDeltaStaeckel
PURPOSE :
Estimate a good value for delta using eqn . ( 9 ) in Sanders ( 2012)
INPUT :
pot - Potential instance or list thereof
R , z - coordinates ( if these are arrays , the median estimated delta is returned , i . e . , if this is an orbit )
no _ median - ( False ) if True , and input is array , return all calculated values of delta ( useful for quickly
estimating delta for many phase space points )
OUTPUT :
delta
HISTORY :
2013-08-28 - Written - Bovy ( IAS )
2016-02-20 - Changed input order to allow physical conversions - Bovy ( UofT )""" | if isinstance ( R , nu . ndarray ) :
delta2 = nu . array ( [ ( z [ ii ] ** 2. - R [ ii ] ** 2. # eqn . ( 9 ) has a sign error
+ ( 3. * R [ ii ] * _evaluatezforces ( pot , R [ ii ] , z [ ii ] ) - 3. * z [ ii ] * _evaluateRforces ( pot , R [ ii ] , z [ ii ] ) + R [ ii ] * z [ ii ] * ( evaluateR2derivs ( pot , R [ ii ] , z [ ii ] , use_physical = False ) - evaluatez2derivs ( pot , R [ ii ] , z [ ii ] , use_physical = False ) ) ) / evaluateRzderivs ( pot , R [ ii ] , z [ ii ] , use_physical = False ) ) for ii in range ( len ( R ) ) ] )
indx = ( delta2 < 0. ) * ( delta2 > - 10. ** - 10. )
delta2 [ indx ] = 0.
if not no_median :
delta2 = nu . median ( delta2 [ True ^ nu . isnan ( delta2 ) ] )
else :
delta2 = ( z ** 2. - R ** 2. # eqn . ( 9 ) has a sign error
+ ( 3. * R * _evaluatezforces ( pot , R , z ) - 3. * z * _evaluateRforces ( pot , R , z ) + R * z * ( evaluateR2derivs ( pot , R , z , use_physical = False ) - evaluatez2derivs ( pot , R , z , use_physical = False ) ) ) / evaluateRzderivs ( pot , R , z , use_physical = False ) )
if delta2 < 0. and delta2 > - 10. ** - 10. :
delta2 = 0.
return nu . sqrt ( delta2 ) |
def encode ( self , data : mx . sym . Symbol , data_length : Optional [ mx . sym . Symbol ] , seq_len : int ) -> Tuple [ mx . sym . Symbol , mx . sym . Symbol , int ] :
"""Encodes data given sequence lengths of individual examples and maximum sequence length .
: param data : Input data .
: param data _ length : Vector with sequence lengths .
: param seq _ len : Maximum sequence length .
: return : Encoded versions of input data ( data , data _ length , seq _ len ) .""" | factor_embeddings = [ ]
# type : List [ mx . sym . Symbol ]
if self . is_source :
data , * data_factors = mx . sym . split ( data = data , num_outputs = self . config . num_factors , axis = 2 , squeeze_axis = True , name = self . prefix + "factor_split" )
if self . config . factor_configs is not None :
for i , ( factor_data , factor_config , factor_weight ) in enumerate ( zip ( data_factors , self . config . factor_configs , self . embed_factor_weights ) ) :
factor_embeddings . append ( mx . sym . Embedding ( data = factor_data , input_dim = factor_config . vocab_size , weight = factor_weight , output_dim = factor_config . num_embed , name = self . prefix + "factor%d_embed" % i ) )
embedding = mx . sym . Embedding ( data = data , input_dim = self . config . vocab_size , weight = self . embed_weight , output_dim = self . config . num_embed , name = self . prefix + "embed" )
if self . config . factor_configs is not None :
if self . config . source_factors_combine == C . SOURCE_FACTORS_COMBINE_CONCAT :
embedding = mx . sym . concat ( embedding , * factor_embeddings , dim = 2 , name = self . prefix + "embed_plus_factors" )
else :
embedding = mx . sym . add_n ( embedding , * factor_embeddings , name = self . prefix + "embed_plus_factors" )
if self . config . dropout > 0 :
embedding = mx . sym . Dropout ( data = embedding , p = self . config . dropout , name = "source_embed_dropout" )
return embedding , data_length , seq_len |
def overlap_matrix ( hdf5_file_name , consensus_labels , cluster_runs ) :
"""Writes on disk ( in an HDF5 file whose handle is provided as the first
argument to this function ) a stack of matrices , each describing
for a particular run the overlap of cluster ID ' s that are matching
each of the cluster ID ' s stored in ' consensus _ labels '
( the vector of labels obtained by ensemble clustering ) .
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus .
Parameters
hdf5 _ file _ name : file handle or string
consensus _ labels : array of shape ( n _ samples , )
cluster _ runs : array of shape ( n _ partitions , n _ samples )
Returns
cluster _ dims _ list :
mutual _ info _ list :
consensus _ adjacency :""" | if reduce ( operator . mul , cluster_runs . shape , 1 ) == max ( cluster_runs . shape ) :
cluster_runs = cluster_runs . reshape ( 1 , - 1 )
N_runs , N_samples = cluster_runs . shape
N_consensus_labels = np . unique ( consensus_labels ) . size
indices_consensus_adjacency = np . empty ( 0 , dtype = np . int32 )
indptr_consensus_adjacency = np . zeros ( 1 , dtype = np . int64 )
for k in range ( N_consensus_labels ) :
indices_consensus_adjacency = np . append ( indices_consensus_adjacency , np . where ( consensus_labels == k ) [ 0 ] )
indptr_consensus_adjacency = np . append ( indptr_consensus_adjacency , indices_consensus_adjacency . size )
data_consensus_adjacency = np . ones ( indices_consensus_adjacency . size , dtype = int )
consensus_adjacency = scipy . sparse . csr_matrix ( ( data_consensus_adjacency , indices_consensus_adjacency , indptr_consensus_adjacency ) , shape = ( N_consensus_labels , N_samples ) )
fileh = tables . open_file ( hdf5_file_name , 'r+' )
FILTERS = get_compression_filter ( 4 * N_consensus_labels * N_runs )
overlap_matrix = fileh . create_earray ( fileh . root . consensus_group , 'overlap_matrix' , tables . Float32Atom ( ) , ( 0 , N_consensus_labels ) , "Matrix of overlaps between each run and " "the consensus labellings" , filters = FILTERS , expectedrows = N_consensus_labels * N_runs )
mutual_info_list = [ ]
cluster_dims_list = [ 0 ]
for i in range ( N_runs ) :
M = cluster_runs [ i ]
mutual_info_list . append ( ceEvalMutual ( M , consensus_labels ) )
finite_indices = np . where ( np . isfinite ( M ) ) [ 0 ]
positive_indices = np . where ( M >= 0 ) [ 0 ]
selected_indices = np . intersect1d ( finite_indices , positive_indices , assume_unique = True )
cluster_ids = np . unique ( M [ selected_indices ] )
n_ids = cluster_ids . size
cluster_dims_list . append ( n_ids )
unions = np . zeros ( ( n_ids , N_consensus_labels ) , dtype = float )
indices = np . empty ( 0 , dtype = int )
indptr = [ 0 ]
c = 0
for elt in cluster_ids :
indices = np . append ( indices , np . where ( M == elt ) [ 0 ] )
indptr . append ( indices . size )
for k in range ( N_consensus_labels ) :
x = indices_consensus_adjacency [ indptr_consensus_adjacency [ k ] : indptr_consensus_adjacency [ k + 1 ] ]
unions [ c , k ] = np . union1d ( indices , x ) . size
c += 1
data = np . ones ( indices . size , dtype = int )
I = scipy . sparse . csr_matrix ( ( data , indices , indptr ) , shape = ( n_ids , N_samples ) )
intersections = I . dot ( consensus_adjacency . transpose ( ) )
intersections = np . squeeze ( np . asarray ( intersections . todense ( ) ) )
overlap_matrix . append ( np . divide ( intersections , unions ) )
fileh . close ( )
return cluster_dims_list , mutual_info_list , consensus_adjacency |
def theo1 ( data , rate = 1.0 , data_type = "phase" , taus = None ) :
"""PRELIMINARY - REQUIRES FURTHER TESTING .
Theo1 is a two - sample variance with improved confidence and
extended averaging factor range .
. . math : :
\\ sigma ^ 2 _ { THEO1 } ( m \\ tau _ 0 ) = { 1 \\ over ( m \\ tau _ 0 ) ^ 2 ( N - m ) }
\\ sum _ { i = 1 } ^ { N - m } \\ sum _ { \\ delta = 0 } ^ { m / 2-1}
{1 \\ over m / 2- \\ delta } \\ lbrace
( { x } _ { i } - x _ { i - \\ delta + m / 2 } ) +
( x _ { i + m } - x _ { i + \\ delta + m / 2 } ) \\ rbrace ^ 2
Where : math : ` 10 < = m < = N - 1 ` is even .
FIXME : bias correction
NIST SP 1065 eq ( 30 ) page 29
Parameters
data : np . array
Input data . Provide either phase or frequency ( fractional ,
adimensional ) .
rate : float
The sampling rate for data , in Hz . Defaults to 1.0
data _ type : { ' phase ' , ' freq ' }
Data type , i . e . phase or frequency . Defaults to " phase " .
taus : np . array
Array of tau values , in seconds , for which to compute statistic .
Optionally set taus = [ " all " | " octave " | " decade " ] for automatic
tau - list generation .""" | phase = input_to_phase ( data , rate , data_type )
tau0 = 1.0 / rate
( phase , ms , taus_used ) = tau_generator ( phase , rate , taus , even = True )
devs = np . zeros_like ( taus_used )
deverrs = np . zeros_like ( taus_used )
ns = np . zeros_like ( taus_used )
N = len ( phase )
for idx , m in enumerate ( ms ) :
m = int ( m )
# to avoid : VisibleDeprecationWarning : using a
# non - integer number instead of an integer will
# result in an error in the future
assert m % 2 == 0
# m must be even
dev = 0
n = 0
for i in range ( int ( N - m ) ) :
s = 0
for d in range ( int ( m / 2 ) ) : # inner sum
pre = 1.0 / ( float ( m ) / 2 - float ( d ) )
s += pre * pow ( phase [ i ] - phase [ i - d + int ( m / 2 ) ] + phase [ i + m ] - phase [ i + d + int ( m / 2 ) ] , 2 )
n = n + 1
dev += s
assert n == ( N - m ) * m / 2
# N - m outer sums , m / 2 inner sums
dev = dev / ( 0.75 * ( N - m ) * pow ( m * tau0 , 2 ) )
# factor 0.75 used here ? http : / / tf . nist . gov / general / pdf / 1990 . pdf
# but not here ? http : / / tf . nist . gov / timefreq / general / pdf / 2220 . pdf page 29
devs [ idx ] = np . sqrt ( dev )
deverrs [ idx ] = devs [ idx ] / np . sqrt ( N - m )
ns [ idx ] = n
return remove_small_ns ( taus_used , devs , deverrs , ns ) |
def index_search ( right_eigenvectors ) :
"""Find simplex structure in eigenvectors to begin PCCA + .
Parameters
right _ eigenvectors : ndarray
Right eigenvectors of transition matrix
Returns
index : ndarray
Indices of simplex""" | num_micro , num_eigen = right_eigenvectors . shape
index = np . zeros ( num_eigen , 'int' )
# first vertex : row with largest norm
index [ 0 ] = np . argmax ( [ norm ( right_eigenvectors [ i ] ) for i in range ( num_micro ) ] )
ortho_sys = right_eigenvectors - np . outer ( np . ones ( num_micro ) , right_eigenvectors [ index [ 0 ] ] )
for j in range ( 1 , num_eigen ) :
temp = ortho_sys [ index [ j - 1 ] ] . copy ( )
for l in range ( num_micro ) :
ortho_sys [ l ] -= temp * dot ( ortho_sys [ l ] , temp )
dist_list = np . array ( [ norm ( ortho_sys [ l ] ) for l in range ( num_micro ) ] )
index [ j ] = np . argmax ( dist_list )
ortho_sys /= dist_list . max ( )
return index |
def get_chunks ( source , chunk_len ) :
"""Returns an iterator over ' chunk _ len ' chunks of ' source '""" | return ( source [ i : i + chunk_len ] for i in range ( 0 , len ( source ) , chunk_len ) ) |
def releases ( self , owner , module ) :
"""Fetch the releases of a module .""" | resource = self . RRELEASES
params = { self . PMODULE : owner + '-' + module , self . PLIMIT : self . max_items , self . PSHOW_DELETED : 'true' , self . PSORT_BY : self . VRELEASE_DATE , }
for page in self . _fetch ( resource , params ) :
yield page |
def _handle_posix ( self , i , result , end_range ) :
"""Handle posix classes .""" | last_posix = False
m = i . match ( RE_POSIX )
if m :
last_posix = True
# Cannot do range with posix class
# so escape last ` - ` if we think this
# is the end of a range .
if end_range and i . index - 1 >= end_range :
result [ - 1 ] = '\\' + result [ - 1 ]
posix_type = uniprops . POSIX_BYTES if self . is_bytes else uniprops . POSIX
result . append ( uniprops . get_posix_property ( m . group ( 1 ) , posix_type ) )
return last_posix |
def put ( self , src , dst ) :
'''Upload a file to HDFS
This will take a file from the ` ` testfiles _ path ` ` supplied in the constuctor .''' | src = "%s%s" % ( self . _testfiles_path , src )
return self . _getStdOutCmd ( [ self . _hadoop_cmd , 'fs' , '-put' , src , self . _full_hdfs_path ( dst ) ] , True ) |
def init ( ) :
"""Execute init tasks for all components ( virtualenv , pip ) .""" | print ( yellow ( "# Setting up environment...\n" , True ) )
virtualenv . init ( )
virtualenv . update_requirements ( )
print ( green ( "\n# DONE." , True ) )
print ( green ( "Type " ) + green ( "activate" , True ) + green ( " to enable your virtual environment." ) ) |
def get_savepath ( self , url , savepath = None ) :
"""Evaluates the savepath with the help of the given url .
: param str url : url to evaluate the savepath with
: return str : the evaluated savepath for the given url""" | timestamp = int ( time . time ( ) )
if not savepath :
savepath = self . cfg_savepath
# lambda is used for lazy evaluation
savepath = re . sub ( re_working_path , lambda match : self . working_path , savepath )
savepath = re . sub ( re_time_dl , lambda match : SavepathParser . time_replacer ( match , timestamp ) , savepath )
savepath = re . sub ( re_timstamp_dl , str ( timestamp ) , savepath )
savepath = re . sub ( re_domain , lambda match : UrlExtractor . get_allowed_domain ( url , False ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_appendmd5_domain , lambda match : SavepathParser . append_md5_if_too_long ( UrlExtractor . get_allowed_domain ( url , False ) , int ( match . group ( 1 ) ) ) , savepath )
savepath = re . sub ( re_md5_domain , lambda match : hashlib . md5 ( UrlExtractor . get_allowed_domain ( url , False ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_full_domain , lambda match : UrlExtractor . get_allowed_domain ( url ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_appendmd5_full_domain , lambda match : SavepathParser . append_md5_if_too_long ( UrlExtractor . get_allowed_domain ( url ) , int ( match . group ( 1 ) ) ) , savepath )
savepath = re . sub ( re_md5_full_domain , lambda match : hashlib . md5 ( UrlExtractor . get_allowed_domain ( url ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_subdomains , lambda match : UrlExtractor . get_subdomain ( url ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_appendmd5_subdomains , lambda match : SavepathParser . append_md5_if_too_long ( UrlExtractor . get_subdomain ( url ) , int ( match . group ( 1 ) ) ) , savepath )
savepath = re . sub ( re_md5_subdomains , lambda match : hashlib . md5 ( UrlExtractor . get_subdomain ( url ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_url_dir , lambda match : UrlExtractor . get_url_directory_string ( url ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_appendmd5_url_dir , lambda match : SavepathParser . append_md5_if_too_long ( UrlExtractor . get_url_directory_string ( url ) , int ( match . group ( 1 ) ) ) , savepath )
savepath = re . sub ( re_md5_url_dir , lambda match : hashlib . md5 ( UrlExtractor . get_url_directory_string ( url ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_url_file , lambda match : UrlExtractor . get_url_file_name ( url ) [ : int ( match . group ( 1 ) ) ] , savepath )
savepath = re . sub ( re_md5_url_file , lambda match : hashlib . md5 ( UrlExtractor . get_url_file_name ( url ) . encode ( 'utf-8' ) ) . hexdigest ( ) [ : int ( match . group ( 1 ) ) ] , savepath )
abs_savepath = self . get_abs_path ( savepath )
savepath = re . sub ( re_max_url_file , lambda match : UrlExtractor . get_url_file_name ( url ) [ : SavepathParser . get_max_url_file_name_length ( abs_savepath ) ] , savepath )
savepath = re . sub ( re_appendmd5_max_url_file , lambda match : SavepathParser . append_md5_if_too_long ( UrlExtractor . get_url_file_name ( url ) , SavepathParser . get_max_url_file_name_length ( abs_savepath ) ) , savepath )
# ensure the savepath doesn ' t contain any invalid characters
return SavepathParser . remove_not_allowed_chars ( savepath ) |
def get_contacts_of_client_per_page ( self , client_id , per_page = 1000 , page = 1 ) :
"""Get contacts of client per page
: param client _ id : the client id
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: return : list""" | return self . _get_resource_per_page ( resource = CONTACTS , per_page = per_page , page = page , params = { 'client_id' : client_id } , ) |
def get_client_history ( self , client ) :
"""Returns the history for a client .""" | data = self . _request ( 'GET' , '/clients/{}/history' . format ( client ) )
return data . json ( ) |
def load ( self , filename = None ) :
"""Loads file and registers filename as attribute .""" | assert not self . __flag_loaded , "File can be loaded only once"
if filename is None :
filename = self . default_filename
assert filename is not None , "{0!s} class has no default filename" . format ( self . __class__ . __name__ )
# Convention : trying to open empty file is an error ,
# because it could be of ( almost ) any type .
size = os . path . getsize ( filename )
if size == 0 :
raise RuntimeError ( "Empty file: '{0!s}'" . format ( filename ) )
self . _test_magic ( filename )
self . _do_load ( filename )
self . filename = filename
self . __flag_loaded = True |
def dump_json ( d : dict ) -> None :
"""Dump json when using tuples for dictionary keys
Have to convert tuples to strings to dump out as json""" | import json
k = d . keys ( )
v = d . values ( )
k1 = [ str ( i ) for i in k ]
return json . dumps ( dict ( zip ( * [ k1 , v ] ) ) , indent = 4 ) |
def get_stored_procs ( db_connection ) :
""": param db _ connection :
: return :""" | sql = "SHOW PROCEDURE STATUS;"
procs = execute_sql ( sql , db_connection )
return [ x [ 1 ] for x in procs ] |
def ncoef_fmap ( order ) :
"""Expected number of coefficients in a 2D transformation of a given order .
Parameters
order : int
Order of the 2D polynomial transformation .
Returns
ncoef : int
Expected number of coefficients .""" | ncoef = 0
for i in range ( order + 1 ) :
for j in range ( i + 1 ) :
ncoef += 1
return ncoef |
def create ( self , weight , priority , enabled , friendly_name , sip_url ) :
"""Create a new OriginationUrlInstance
: param unicode weight : The value that determines the relative load the URI should receive compared to others with the same priority
: param unicode priority : The relative importance of the URI
: param bool enabled : Whether the URL is enabled
: param unicode friendly _ name : A string to describe the resource
: param unicode sip _ url : The SIP address you want Twilio to route your Origination calls to
: returns : Newly created OriginationUrlInstance
: rtype : twilio . rest . trunking . v1 . trunk . origination _ url . OriginationUrlInstance""" | data = values . of ( { 'Weight' : weight , 'Priority' : priority , 'Enabled' : enabled , 'FriendlyName' : friendly_name , 'SipUrl' : sip_url , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return OriginationUrlInstance ( self . _version , payload , trunk_sid = self . _solution [ 'trunk_sid' ] , ) |
def append_change_trust_op ( self , asset_code , asset_issuer , limit = None , source = None ) :
"""Append a : class : ` ChangeTrust < stellar _ base . operation . ChangeTrust > `
operation to the list of operations .
: param str asset _ issuer : The issuer address for the asset .
: param str asset _ code : The asset code for the asset .
: param str limit : The limit of the new trustline .
: param str source : The source address to add the trustline to .
: return : This builder instance .""" | asset = Asset ( asset_code , asset_issuer )
op = operation . ChangeTrust ( asset , limit , source )
return self . append_op ( op ) |
def _convert_py_number ( value ) :
"""Convert a Python integer value into equivalent C object .
Will attempt to use the smallest possible conversion , starting with int , then long
then double .""" | try :
return c_uamqp . int_value ( value )
except OverflowError :
pass
try :
return c_uamqp . long_value ( value )
except OverflowError :
pass
return c_uamqp . double_value ( value ) |
def Registry ( address = 'https://index.docker.io' , ** kwargs ) :
""": return :""" | registry = None
try :
try :
registry = V1 ( address , ** kwargs )
registry . ping ( )
except RegistryException :
registry = V2 ( address , ** kwargs )
registry . ping ( )
except OSError :
logger . warning ( 'Was unable to verify certs for a registry @ {0}. ' 'Will not be able to interact with it for any operations until the certs can be validated.' . format ( address ) )
return registry |
def _get_timeout ( self , timeout ) :
"""Helper that always returns a : class : ` urllib3 . util . Timeout `""" | if timeout is _Default :
return self . timeout . clone ( )
if isinstance ( timeout , Timeout ) :
return timeout . clone ( )
else : # User passed us an int / float . This is for backwards compatibility ,
# can be removed later
return Timeout . from_float ( timeout ) |
def get_default_config_help ( self ) :
"""Help text""" | config = super ( DatadogHandler , self ) . get_default_config_help ( )
config . update ( { 'api_key' : 'Datadog API key' , 'queue_size' : 'Number of metrics to queue before send' , } )
return config |
def uni_to_beta ( text ) :
"""Convert unicode text to a betacode equivalent .
This method can handle tónos or oxeîa characters in the input .
Args :
text : The text to convert to betacode . This text does not have to all be
Greek polytonic text , and only Greek characters will be converted . Note
that in this case , you cannot convert to beta and then back to unicode .
Returns :
The betacode equivalent of the inputted text where applicable .""" | u = _UNICODE_MAP
transform = [ ]
for ch in text :
try :
conv = u [ ch ]
except KeyError :
conv = ch
transform . append ( conv )
converted = '' . join ( transform )
return converted |
def enable_cloud_integration ( self , id , ** kwargs ) : # noqa : E501
"""Enable a specific cloud integration # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . enable _ cloud _ integration ( id , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str id : ( required )
: return : ResponseContainerCloudIntegration
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . enable_cloud_integration_with_http_info ( id , ** kwargs )
# noqa : E501
else :
( data ) = self . enable_cloud_integration_with_http_info ( id , ** kwargs )
# noqa : E501
return data |
def wrap_parser_error ( self , data , renderer_context ) :
"""Convert parser errors to the JSON API Error format
Parser errors have a status code of 400 , like field errors , but have
the same native format as generic errors . Also , the detail message is
often specific to the input , so the error is listed as a ' detail '
rather than a ' title ' .""" | response = renderer_context . get ( "response" , None )
status_code = response and response . status_code
if status_code != 400 :
raise WrapperNotApplicable ( 'Status code must be 400.' )
if list ( data . keys ( ) ) != [ 'detail' ] :
raise WrapperNotApplicable ( 'Data must only have "detail" key.' )
# Probably a parser error , unless ` detail ` is a valid field
view = renderer_context . get ( "view" , None )
model = self . model_from_obj ( view )
if 'detail' in model . _meta . get_all_field_names ( ) :
raise WrapperNotApplicable ( )
return self . wrap_error ( data , renderer_context , keys_are_fields = False , issue_is_title = False ) |
def get_next_valid_day ( self , timestamp ) :
"""Get next valid day for timerange
: param timestamp : time we compute from
: type timestamp : int
: return : timestamp of the next valid day ( midnight ) in LOCAL time .
: rtype : int | None""" | if self . get_next_future_timerange_valid ( timestamp ) is None : # this day is finish , we check for next period
( start_time , _ ) = self . get_start_and_end_time ( get_day ( timestamp ) + 86400 )
else :
( start_time , _ ) = self . get_start_and_end_time ( timestamp )
if timestamp <= start_time :
return get_day ( start_time )
if self . is_time_day_valid ( timestamp ) :
return get_day ( timestamp )
return None |
def _init_setup_console_data ( self , order : str = "C" ) -> None :
"""Setup numpy arrays over libtcod data buffers .""" | global _root_console
self . _key_color = None
if self . console_c == ffi . NULL :
_root_console = self
self . _console_data = lib . TCOD_ctx . root
else :
self . _console_data = ffi . cast ( "struct TCOD_Console*" , self . console_c )
self . _tiles = np . frombuffer ( ffi . buffer ( self . _console_data . tiles [ 0 : self . width * self . height ] ) , dtype = self . DTYPE , ) . reshape ( ( self . height , self . width ) )
self . _order = tcod . _internal . verify_order ( order ) |
def upload ( self ) :
'''一般上传模式 .
使用这种方式上传 , 不可以中断上传过程 , 但因为只用它来上传小的文件 , 所以
最终的影响不会很大 .''' | info = pcs . upload ( self . cookie , self . row [ SOURCEPATH_COL ] , self . row [ PATH_COL ] , self . upload_mode )
if info :
self . emit ( 'uploaded' , self . row [ FID_COL ] )
else :
self . emit ( 'network-error' , self . row [ FID_COL ] ) |
def _valid_choices ( cla55 : type ) -> Dict [ str , str ] :
"""Return a mapping { registered _ name - > subclass _ name }
for the registered subclasses of ` cla55 ` .""" | valid_choices : Dict [ str , str ] = { }
if cla55 not in Registrable . _registry :
raise ValueError ( f"{cla55} is not a known Registrable class" )
for name , subclass in Registrable . _registry [ cla55 ] . items ( ) : # These wrapper classes need special treatment
if isinstance ( subclass , ( _Seq2SeqWrapper , _Seq2VecWrapper ) ) :
subclass = subclass . _module_class
valid_choices [ name ] = full_name ( subclass )
return valid_choices |
def _reset ( self ) :
"""This method is for use by tests only !""" | self . bundles = AttrDict ( )
self . _bundles = _DeferredBundleFunctionsStore ( )
self . babel_bundle = None
self . env = None
self . extensions = AttrDict ( )
self . services = AttrDict ( )
self . _deferred_functions = [ ]
self . _initialized = False
self . _models_initialized = False
self . _services_initialized = False
self . _services_registry = { }
self . _shell_ctx = { } |
def scoped ( self , func ) :
"""Decorator to switch scopes .""" | @ wraps ( func )
def wrapper ( * args , ** kwargs ) :
scope = kwargs . get ( "scope" , self . __factory__ . default_scope )
with self . scoped_to ( scope ) :
return func ( * args , ** kwargs )
return wrapper |
def _operator_norms ( L ) :
"""Get operator norms if needed .
Parameters
L : sequence of ` Operator ` or float
The operators or the norms of the operators that are used in the
` douglas _ rachford _ pd ` method . For ` Operator ` entries , the norm
is computed with ` ` Operator . norm ( estimate = True ) ` ` .""" | L_norms = [ ]
for Li in L :
if np . isscalar ( Li ) :
L_norms . append ( float ( Li ) )
elif isinstance ( Li , Operator ) :
L_norms . append ( Li . norm ( estimate = True ) )
else :
raise TypeError ( 'invalid entry {!r} in `L`' . format ( Li ) )
return L_norms |
def display ( self ) :
"""Whether this object should be displayed in documentation .
This attribute depends on the configuration options given in
: confval : ` autoapi _ options ` .
: type : bool""" | if self . is_undoc_member and "undoc-members" not in self . options :
return False
if self . is_private_member and "private-members" not in self . options :
return False
if self . is_special_member and "special-members" not in self . options :
return False
return True |
def auth ( self ) :
"""tuple of ( username , password ) . if use _ keyring is set to true the password will be queried from the local keyring instead of taken from the
configuration file .""" | username = self . _settings [ "username" ]
if not username :
raise ValueError ( "Username was not configured in %s" % CONFIG_FILE )
if self . _settings [ "use_keyring" ] :
password = self . keyring_get_password ( username )
if not password :
self . keyring_set_password ( username )
password = self . keyring_get_password ( username )
else :
password = self . _settings [ "password" ]
return self . _settings [ "username" ] , password |
def discover ( service = "ssdp:all" , timeout = 1 , retries = 2 , ipAddress = "239.255.255.250" , port = 1900 ) :
"""Discovers UPnP devices in the local network .
Try to discover all devices in the local network which do support UPnP . The discovery process can fail
for various reasons and it is recommended to do at least two discoveries , which you can specify with the
` ` retries ` ` parameter .
The default ` ` service ` ` parameter tries to address all devices also if you know which kind of service type
you are looking for you should set it as some devices do not respond or respond differently otherwise .
: param service : the service type or list of service types of devices you look for
: type service : str or list [ str ]
: param float timeout : the socket timeout for each try
: param int retries : how often should be a discovery request send
: param str ipAddress : the multicast ip address to use
: param int port : the port to use
: return : a list of DiscoveryResponse objects or empty if no device was found
: rtype : list [ DiscoveryResponse ]
Example :
results = discover ( )
for result in results :
print ( " Host : " + result . locationHost + " Port : " + result . locationPort + " Device definitions : " + \ result . location )
. . seealso : :
: class : ` ~ simpletr64 . DiscoveryResponse ` , : meth : ` ~ simpletr64 . Discover . discoverParticularHost `""" | socket . setdefaulttimeout ( timeout )
messages = [ ]
if isinstance ( service , str ) :
services = [ service ]
elif isinstance ( service , list ) :
services = service
for service in services :
message = 'M-SEARCH * HTTP/1.1\r\nMX: 5\r\nMAN: "ssdp:discover"\r\nHOST: ' + ipAddress + ':' + str ( port ) + '\r\n'
message += "ST: " + service + "\r\n\r\n"
messages . append ( message )
responses = { }
for _ in range ( retries ) : # setup the socket
sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM , socket . IPPROTO_UDP )
sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
sock . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , 2 )
# noinspection PyAssignmentToLoopOrWithParameter
for _ in range ( 2 ) : # send the messages with different service types
for message in messages : # send message more often to make sure all devices will get it
sock . sendto ( message . encode ( 'utf-8' ) , ( ipAddress , port ) )
while True :
try : # read the message until timeout
data = sock . recv ( 1024 )
except socket . timeout :
break
else : # no time out , read the response data and create response object
response = DiscoveryResponse ( data )
# filter duplicated responses
responses [ response . location ] = response
# return a list of all responses
return list ( responses . values ( ) ) |
def _format_ase2clusgeo ( obj , all_atomtypes = None ) :
"""Takes an ase Atoms object and returns numpy arrays and integers
which are read by the internal clusgeo . Apos is currently a flattened
out numpy array
Args :
obj ( ) :
all _ atomtypes ( ) :
sort ( ) :""" | # atoms metadata
totalAN = len ( obj )
if all_atomtypes is not None :
atomtype_set = set ( all_atomtypes )
else :
atomtype_set = set ( obj . get_atomic_numbers ( ) )
atomtype_lst = np . sort ( list ( atomtype_set ) )
n_atoms_per_type_lst = [ ]
pos_lst = [ ]
for atomtype in atomtype_lst :
condition = obj . get_atomic_numbers ( ) == atomtype
pos_onetype = obj . get_positions ( ) [ condition ]
n_onetype = pos_onetype . shape [ 0 ]
# store data in lists
pos_lst . append ( pos_onetype )
n_atoms_per_type_lst . append ( n_onetype )
typeNs = n_atoms_per_type_lst
Ntypes = len ( n_atoms_per_type_lst )
atomtype_lst
Apos = np . concatenate ( pos_lst ) . ravel ( )
return Apos , typeNs , Ntypes , atomtype_lst , totalAN |
def load_file_or_hdu ( filename ) :
"""Load a file from disk and return an HDUList
If filename is already an HDUList return that instead
Parameters
filename : str or HDUList
File or HDU to be loaded
Returns
hdulist : HDUList""" | if isinstance ( filename , fits . HDUList ) :
hdulist = filename
else :
hdulist = fits . open ( filename , ignore_missing_end = True )
return hdulist |
def placeOrder ( self , contract : Contract , order : Order ) -> Trade :
"""Place a new order or modify an existing order .
Returns a Trade that is kept live updated with
status changes , fills , etc .
Args :
contract : Contract to use for order .
order : The order to be placed .""" | orderId = order . orderId or self . client . getReqId ( )
self . client . placeOrder ( orderId , contract , order )
now = datetime . datetime . now ( datetime . timezone . utc )
key = self . wrapper . orderKey ( self . wrapper . clientId , orderId , order . permId )
trade = self . wrapper . trades . get ( key )
if trade : # this is a modification of an existing order
assert trade . orderStatus . status not in OrderStatus . DoneStates
logEntry = TradeLogEntry ( now , trade . orderStatus . status , 'Modify' )
trade . log . append ( logEntry )
self . _logger . info ( f'placeOrder: Modify order {trade}' )
trade . modifyEvent . emit ( trade )
self . orderModifyEvent . emit ( trade )
else : # this is a new order
order . clientId = self . wrapper . clientId
order . orderId = orderId
orderStatus = OrderStatus ( status = OrderStatus . PendingSubmit )
logEntry = TradeLogEntry ( now , orderStatus . status , '' )
trade = Trade ( contract , order , orderStatus , [ ] , [ logEntry ] )
self . wrapper . trades [ key ] = trade
self . _logger . info ( f'placeOrder: New order {trade}' )
self . newOrderEvent . emit ( trade )
return trade |
def GetAttributes ( self , urns , age = NEWEST_TIME ) :
"""Retrieves all the attributes for all the urns .""" | urns = set ( [ utils . SmartUnicode ( u ) for u in urns ] )
to_read = { urn : self . _MakeCacheInvariant ( urn , age ) for urn in urns }
# Urns not present in the cache we need to get from the database .
if to_read :
for subject , values in data_store . DB . MultiResolvePrefix ( to_read , AFF4_PREFIXES , timestamp = self . ParseAgeSpecification ( age ) , limit = None ) : # Ensure the values are sorted .
values . sort ( key = lambda x : x [ - 1 ] , reverse = True )
yield utils . SmartUnicode ( subject ) , values |
def swipe_right ( self , width : int = 1080 , length : int = 1920 ) -> None :
'''Swipe right .''' | self . swipe ( 0.2 * width , 0.5 * length , 0.8 * width , 0.5 * length ) |
def compare_csv_timeseries_files ( file1 , file2 , header = True ) :
"""This function compares two csv files""" | return compare_csv_decimal_files ( file1 , file2 , header , True ) |
def _prepare_sets ( self , sets ) :
"""Return all sets in self . _ lazy _ collection [ ' sets ' ] to be ready to be used
to intersect them . Called by _ get _ final _ set , to use in subclasses .
Must return a tuple with a set of redis set keys , and another with
new temporary keys to drop at the end of _ get _ final _ set""" | final_sets = set ( )
tmp_keys = set ( )
for set_ in sets :
if isinstance ( set_ , str ) :
final_sets . add ( set_ )
elif isinstance ( set_ , ParsedFilter ) :
for index_key , key_type , is_tmp in set_ . index . get_filtered_keys ( set_ . suffix , accepted_key_types = self . _accepted_key_types , * ( set_ . extra_field_parts + [ set_ . value ] ) ) :
if key_type not in self . _accepted_key_types :
raise ValueError ( 'The index key returned by the index %s is not valid' % ( set_ . index . __class__ . __name__ ) )
final_sets . add ( index_key )
if is_tmp :
tmp_keys . add ( index_key )
else :
raise ValueError ( 'Invalid filter type' )
return final_sets , tmp_keys |
def from_json ( json_data ) :
"""Returns a pyalveo . OAuth2 given a json string built from the oauth . to _ json ( ) method .""" | # If we have a string , then decode it , otherwise assume it ' s already decoded
if isinstance ( json_data , str ) :
data = json . loads ( json_data )
else :
data = json_data
oauth = Cache ( cache_dir = data . get ( 'cache_dir' , None ) , max_age = data . get ( 'max_age' , None ) )
return oauth |
def ArcSin ( input_vertex : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex :
"""Takes the inverse sin of a vertex , Arcsin ( vertex )
: param input _ vertex : the vertex""" | return Double ( context . jvm_view ( ) . ArcSinVertex , label , cast_to_double_vertex ( input_vertex ) ) |
def read_persistent_volume ( self , name , ** kwargs ) :
"""read the specified PersistentVolume
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . read _ persistent _ volume ( name , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PersistentVolume ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param bool exact : Should the export be exact . Exact export maintains cluster - specific fields like ' Namespace ' . Deprecated . Planned for removal in 1.18.
: param bool export : Should this value be exported . Export strips fields that a user can not specify . Deprecated . Planned for removal in 1.18.
: return : V1PersistentVolume
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . read_persistent_volume_with_http_info ( name , ** kwargs )
else :
( data ) = self . read_persistent_volume_with_http_info ( name , ** kwargs )
return data |
def document ( self , result ) :
"""Build dict for MongoDB , expanding result keys as we go .""" | self . _add_meta ( result )
walker = JsonWalker ( JsonWalker . value_json , JsonWalker . dict_expand )
r = walker . walk ( result )
return r |
def print_locale_info ( out = stderr ) :
"""Print locale info .""" | for key in ( "LANGUAGE" , "LC_ALL" , "LC_CTYPE" , "LANG" ) :
print_env_info ( key , out = out )
print ( _ ( "Default locale:" ) , i18n . get_locale ( ) , file = out ) |
def jdbc_connection_pool ( self , name , res_type , ds_classname , props ) :
"""Domain JDBC connection pool .
: param str name :
Resource name .
: param str res _ type :
Resource type .
: param str ds _ classname :
Data source class name .
: param dict props :
Connection pool properties .
: rtype :
JDBCConnectionPool""" | return JDBCConnectionPool ( self . __endpoint , name , res_type , ds_classname , props ) |
def set_membind ( nodemask ) :
"""Sets the memory allocation mask .
The thread will only allocate memory from the nodes set in nodemask .
@ param nodemask : node mask
@ type nodemask : C { set }""" | mask = set_to_numa_nodemask ( nodemask )
tmp = bitmask_t ( )
tmp . maskp = cast ( byref ( mask ) , POINTER ( c_ulong ) )
tmp . size = sizeof ( nodemask_t ) * 8
libnuma . numa_set_membind ( byref ( tmp ) ) |
def get_os_dist_info ( ) :
"""Returns the distribution info""" | distribution = platform . dist ( )
dist_name = distribution [ 0 ] . lower ( )
dist_version_str = distribution [ 1 ]
if dist_name and dist_version_str :
return dist_name , dist_version_str
else :
return None , None |
def det_refpoint ( self , angle ) :
"""Return the detector reference point position at ` ` angle ` ` .
For an angle ` ` phi ` ` , the detector position is given by : :
det _ ref ( phi ) = translation +
rot _ matrix ( phi ) * ( det _ rad * src _ to _ det _ init ) +
( offset _ along _ axis + pitch * phi ) * axis
where ` ` src _ to _ det _ init ` ` is the initial unit vector pointing
from source to detector .
Parameters
angle : float or ` array - like `
Angle ( s ) in radians describing the counter - clockwise
rotation of the detector .
Returns
refpt : ` numpy . ndarray `
Vector ( s ) pointing from the origin to the detector reference
point . If ` ` angle ` ` is a single parameter , the returned array
has shape ` ` ( 3 , ) ` ` , otherwise ` ` angle . shape + ( 3 , ) ` ` .
See Also
src _ position
Examples
With default arguments , the detector starts at ` ` det _ rad * e _ y ` `
and rotates to ` ` det _ rad * ( - e _ x ) + pitch / 4 * e _ z ` ` at
90 degrees :
> > > apart = odl . uniform _ partition ( 0 , 4 * np . pi , 10)
> > > dpart = odl . uniform _ partition ( [ - 1 , - 1 ] , [ 1 , 1 ] , ( 20 , 20 ) )
> > > geom = ConeFlatGeometry (
. . . apart , dpart , src _ radius = 5 , det _ radius = 10 , pitch = 2)
> > > geom . det _ refpoint ( 0)
array ( [ 0 . , 10 . , 0 . ] )
> > > np . allclose ( geom . det _ refpoint ( np . pi / 2 ) , [ - 10 , 0 , 0.5 ] )
True
The method is vectorized , i . e . , it can be called with multiple
angles at once ( or an n - dimensional array of angles ) :
> > > points = geom . det _ refpoint ( [ 0 , np . pi / 2 ] )
> > > np . allclose ( points [ 0 ] , [ 0 , 10 , 0 ] )
True
> > > np . allclose ( points [ 1 ] , [ - 10 , 0 , 0.5 ] )
True
> > > geom . det _ refpoint ( np . zeros ( ( 4 , 5 ) ) ) . shape
(4 , 5 , 3)""" | squeeze_out = ( np . shape ( angle ) == ( ) )
angle = np . array ( angle , dtype = float , copy = False , ndmin = 1 )
rot_matrix = self . rotation_matrix ( angle )
extra_dims = angle . ndim
# Initial vector from center of rotation to detector .
# It can be computed this way since source and detector are at
# maximum distance , i . e . the connecting line passes the center .
center_to_det_init = self . det_radius * self . src_to_det_init
# ` circle _ component ` has shape ( a , ndim )
circle_component = rot_matrix . dot ( center_to_det_init )
# Increment along the rotation axis according to pitch and
# offset _ along _ axis
# ` shift _ along _ axis ` has shape angles . shape
shift_along_axis = ( self . offset_along_axis + self . pitch * angle / ( 2 * np . pi ) )
# Create outer product of ` shift _ along _ axis ` and ` axis ` , resulting
# in shape ( a , ndim )
pitch_component = np . multiply . outer ( shift_along_axis , self . axis )
# Broadcast translation along extra dimensions
transl_slc = ( None , ) * extra_dims + ( slice ( None ) , )
refpt = ( self . translation [ transl_slc ] + circle_component + pitch_component )
if squeeze_out :
refpt = refpt . squeeze ( )
return refpt |
def log ( self , msg , error = False ) :
"""Log message helper .""" | output = self . stdout
if error :
output = self . stderr
output . write ( msg )
output . write ( '\n' ) |
def load_models ( * chain , ** kwargs ) :
"""Decorator to load a chain of models from the given parameters . This works just like
: func : ` load _ model ` and accepts the same parameters , with some small differences .
: param chain : The chain is a list of tuples of ( ` ` model ` ` , ` ` attributes ` ` , ` ` parameter ` ` ) .
Lists and tuples can be used interchangeably . All retrieved instances are passed as
parameters to the decorated function
: param permission : Same as in : func : ` load _ model ` , except
: meth : ` ~ coaster . sqlalchemy . PermissionMixin . permissions ` is called on every instance
in the chain and the retrieved permissions are passed as the second parameter to the
next instance in the chain . This allows later instances to revoke permissions granted
by earlier instances . As an example , if a URL represents a hierarchy such as
` ` / < page > / < comment > ` ` , the ` ` page ` ` can assign ` ` edit ` ` and ` ` delete ` ` permissions ,
while the ` ` comment ` ` can revoke ` ` edit ` ` and retain ` ` delete ` ` if the current user
owns the page but not the comment
In the following example , load _ models loads a Folder with a name matching the name in the
URL , then loads a Page with a matching name and with the just - loaded Folder as parent .
If the Page provides a ' view ' permission to the current user , the decorated
function is called : :
@ app . route ( ' / < folder _ name > / < page _ name > ' )
@ load _ models (
( Folder , { ' name ' : ' folder _ name ' } , ' folder ' ) ,
( Page , { ' name ' : ' page _ name ' , ' parent ' : ' folder ' } , ' page ' ) ,
permission = ' view ' )
def show _ page ( folder , page ) :
return render _ template ( ' page . html ' , folder = folder , page = page )""" | def inner ( f ) :
@ wraps ( f )
def decorated_function ( * args , ** kw ) :
permissions = None
permission_required = kwargs . get ( 'permission' )
url_check_attributes = kwargs . get ( 'urlcheck' , [ ] )
if isinstance ( permission_required , six . string_types ) :
permission_required = set ( [ permission_required ] )
elif permission_required is not None :
permission_required = set ( permission_required )
result = { }
for models , attributes , parameter in chain :
if not isinstance ( models , ( list , tuple ) ) :
models = ( models , )
item = None
for model in models :
query = model . query
url_check = False
url_check_paramvalues = { }
for k , v in attributes . items ( ) :
if callable ( v ) :
query = query . filter_by ( ** { k : v ( result , kw ) } )
else :
if '.' in v :
first , attrs = v . split ( '.' , 1 )
val = result . get ( first )
for attr in attrs . split ( '.' ) :
val = getattr ( val , attr )
else :
val = result . get ( v , kw . get ( v ) )
query = query . filter_by ( ** { k : val } )
if k in url_check_attributes :
url_check = True
url_check_paramvalues [ k ] = ( v , val )
item = query . first ( )
if item is not None : # We found it , so don ' t look in additional models
break
if item is None :
abort ( 404 )
if hasattr ( item , 'redirect_view_args' ) : # This item is a redirect object . Redirect to destination
view_args = dict ( request . view_args )
view_args . update ( item . redirect_view_args ( ) )
location = url_for ( request . endpoint , ** view_args )
if request . query_string :
location = location + u'?' + request . query_string . decode ( )
return redirect ( location , code = 307 )
if permission_required :
permissions = item . permissions ( current_auth . actor , inherited = permissions )
addlperms = kwargs . get ( 'addlperms' ) or [ ]
if callable ( addlperms ) :
addlperms = addlperms ( ) or [ ]
permissions . update ( addlperms )
if g : # XXX : Deprecated
g . permissions = permissions
if request :
add_auth_attribute ( 'permissions' , permissions )
if url_check and request . method == 'GET' : # Only do urlcheck redirects on GET requests
url_redirect = False
view_args = None
for k , v in url_check_paramvalues . items ( ) :
uparam , uvalue = v
if getattr ( item , k ) != uvalue :
url_redirect = True
if view_args is None :
view_args = dict ( request . view_args )
view_args [ uparam ] = getattr ( item , k )
if url_redirect :
location = url_for ( request . endpoint , ** view_args )
if request . query_string :
location = location + u'?' + request . query_string . decode ( )
return redirect ( location , code = 302 )
if parameter . startswith ( 'g.' ) :
parameter = parameter [ 2 : ]
setattr ( g , parameter , item )
result [ parameter ] = item
if permission_required and not permission_required & permissions :
abort ( 403 )
if kwargs . get ( 'kwargs' ) :
return f ( * args , kwargs = kw , ** result )
else :
return f ( * args , ** result )
return decorated_function
return inner |
def get_snapshot_by_version ( obj , version = 0 ) :
"""Get a snapshot by version
Snapshot versions begin with ` 0 ` , because this is the first index of the
storage , which is a list .
: param obj : Content object
: param version : The index position of the snapshot in the storage
: returns : Snapshot at the given index position""" | if version < 0 :
return None
snapshots = get_snapshots ( obj )
if version > len ( snapshots ) - 1 :
return None
return snapshots [ version ] |
def finish ( self ) :
"""Parse the buffered response body , rewrite its URLs , write the result to
the wrapped request , and finish the wrapped request .""" | stylesheet = '' . join ( self . _buffer )
parser = CSSParser ( )
css = parser . parseString ( stylesheet )
replaceUrls ( css , self . _replace )
self . request . write ( css . cssText )
return self . request . finish ( ) |
def key_tuple_value_nested_generator ( dict_obj ) :
"""Recursively iterate over key - tuple - value pairs of nested dictionaries .
Parameters
dict _ obj : dict
The outer - most dict to iterate on .
Returns
generator
A generator over key - tuple - value pairs in all nested dictionaries .
Example
> > > dicti = { ' a ' : 1 , ' b ' : { ' c ' : 3 , ' d ' : 4 } }
> > > print ( sorted ( list ( key _ tuple _ value _ nested _ generator ( dicti ) ) ) )
[ ( ( ' a ' , ) , 1 ) , ( ( ' b ' , ' c ' ) , 3 ) , ( ( ' b ' , ' d ' ) , 4 ) ]""" | for key , value in dict_obj . items ( ) :
if isinstance ( value , dict ) :
for nested_key , value in key_tuple_value_nested_generator ( value ) :
yield tuple ( [ key ] ) + nested_key , value
else :
yield tuple ( [ key ] ) , value |
def get_short_status ( self , hosts , services ) :
"""Get the short status of this host
: return : " O " , " W " , " C " , " U ' , or " n / a " based on service state _ id or business _ rule state
: rtype : str""" | mapping = { 0 : "O" , 1 : "W" , 2 : "C" , 3 : "U" , 4 : "N" , }
if self . got_business_rule :
return mapping . get ( self . business_rule . get_state ( hosts , services ) , "n/a" )
return mapping . get ( self . state_id , "n/a" ) |
def load_rf ( freq = "M" ) :
"""Build a risk - free rate return series using 3 - month US T - bill yields .
The 3 - Month Treasury Bill : Secondary Market Rate from the Federal Reserve
( a yield ) is convert to a total return . See ' Methodology ' for details .
The time series should closely mimic returns of the BofA Merrill Lynch US
Treasury Bill ( 3M ) ( Local Total Return ) index .
Parameters
freq : str , sequence , or set
If a single - character string , return a single - column DataFrame with
index frequency corresponding to ` freq ` . If a sequence or set , return
a dict of DataFrames with the keys corresponding to ` freq ` ( s )
Methodology
The Federal Reserve publishes a daily chart of Selected Interest Rates
( release H . 15 ; www . federalreserve . gov / releases / h15 / ) . As with a yield
curve , some yields are interpolated from recent issues because Treasury
auctions do not occur daily .
While the de - annualized ex - ante yield itself is a fairly good tracker of
the day ' s total return , it is not perfect and can exhibit non - neglible
error in periods of volatile short rates . The purpose of this function
is to convert yields to total returns for 3 - month T - bills . It is a
straightforward process given that these are discount ( zero - coupon )
securities . It consists of buying a 3 - month bond at the beginning of each
month , then amortizing that bond throughout the month to back into the
price of a < 3 - month tenor bond .
The source data ( pulled from fred . stlouisfed . org ) is quoted on a discount
basis . ( See footnote 4 from release H . 15 . ) This is converted to a
bond - equivlanet yield ( BEY ) and then translated to a hypothetical daily
total return .
The process largely follows Morningstar ' s published Return Calculation of
U . S . Treasury Constant Maturity Indices , and is as follows :
- At the beginning of each month a bill is purchased at the prior month - end
price , and daily returns in the month reflect the change in daily
valuation of this bill
- If t is not a business day , its yield is the yield of the prior
business day .
- At each day during the month , the price of a 3 - month bill purchased on
the final calendar day of the previous month is computed .
- Month - end pricing is unique . At each month - end date , there are
effectively two bonds and two prices . The first is the bond
hypothetically purchased on the final day of the prior month with 2m
remaining to maturity , and the second is a new - issue bond purchased that
day with 3m to maturity . The former is used as the numerator to compute
that day ' s total return , while the latter is used as the denominator
to compute the next day ' s ( 1st day of next month ) total return .
Description of the BofA Merrill Lynch US 3 - Month Treasury Bill Index :
The BofA Merrill Lynch US 3 - Month Treasury Bill Index is comprised of a
single issue purchased at the beginning of the month and held for a full
month . At the end of the month that issue is sold and rolled into a newly
selected issue . The issue selected at each month - end rebalancing is the
outstanding Treasury Bill that matures closest to , but not beyond , three
months from the rebalancing date . To qualify for selection , an issue must
have settled on or before the month - end rebalancing date .
( Source : Bank of America Merrill Lynch )
See also
FRED : 3 - Month Treasury Bill : Secondary Market Rate ( DTB3)
https : / / fred . stlouisfed . org / series / DTB3
McGraw - Hill / Irwin , Interest Rates , 2008.
https : / / people . ucsc . edu / ~ lbaum / econ80h / LS - Chap009 . pdf
Morningstar , Return Calculation of U . S . Treasury Constant Maturity Indices ,
September 2008.""" | freqs = "DWMQA"
freq = freq . upper ( )
if freq not in freqs :
raise ValueError ( "`freq` must be either a single element or subset" " from %s, case-insensitive" % freqs )
# Load daily 3 - Month Treasury Bill : Secondary Market Rate .
# Note that this is on discount basis and will be converted to BEY .
# Periodicity is daily .
rates = ( pdr . DataReader ( "DTB3" , "fred" , DSTART ) . mul ( 0.01 ) . asfreq ( "D" , method = "ffill" ) . fillna ( method = "ffill" ) . squeeze ( ) )
# Algebra doesn ' t ' work ' on DateOffsets , don ' t simplify here !
minus_one_month = offsets . MonthEnd ( - 1 )
plus_three_months = offsets . MonthEnd ( 3 )
trigger = rates . index . is_month_end
dtm_old = rates . index + minus_one_month + plus_three_months - rates . index
dtm_new = ( rates . index . where ( trigger , rates . index + minus_one_month ) + plus_three_months - rates . index )
# This does 2 things in one step :
# (1 ) convert discount yield to BEY
# (2 ) get the price at that BEY and days to maturity
# The two equations are simplified
# See https : / / people . ucsc . edu / ~ lbaum / econ80h / LS - Chap009 . pdf
p_old = ( 100 / 360 ) * ( 360 - rates * dtm_old . days )
p_new = ( 100 / 360 ) * ( 360 - rates * dtm_new . days )
res = p_old . pct_change ( ) . where ( trigger , p_new . pct_change ( ) ) . dropna ( )
# TODO : For purpose of using in TSeries , we should drop upsampled
# periods where we don ' t have the full period constituents .
return res . add ( 1.0 ) . resample ( freq ) . prod ( ) . sub ( 1.0 ) |
def touch ( self , mode = 0o666 , exist_ok = True ) :
"""Create this file with the given access mode , if it doesn ' t exist .
Based on :
https : / / github . com / python / cpython / blob / master / Lib / pathlib . py )""" | if exist_ok : # First try to bump modification time
# Implementation note : GNU touch uses the UTIME _ NOW option of
# the utimensat ( ) / futimens ( ) functions .
try :
os . utime ( self , None )
except OSError : # Avoid exception chaining
pass
else :
return
flags = os . O_CREAT | os . O_WRONLY
if not exist_ok :
flags |= os . O_EXCL
fd = os . open ( self , flags , mode )
os . close ( fd ) |
def ast2expr ( ast ) :
"""Convert an abstract syntax tree to an Expression .""" | if ast [ 0 ] == 'const' :
return _CONSTS [ ast [ 1 ] ]
elif ast [ 0 ] == 'var' :
return exprvar ( ast [ 1 ] , ast [ 2 ] )
else :
xs = [ ast2expr ( x ) for x in ast [ 1 : ] ]
return ASTOPS [ ast [ 0 ] ] ( * xs , simplify = False ) |
def discover_package_doc_dir ( initial_dir ) :
"""Discover the ` ` doc / ` ` dir of a package given an initial directory .
Parameters
initial _ dir : ` str `
The inititial directory to search from . In practice , this is often the
directory that the user is running the package - docs CLI from . This
directory needs to be somewhere inside the package ' s repository .
Returns
root _ dir : ` str `
The root documentation directory ( ` ` doc / ` ` ) , containing ` ` conf . py ` ` .
Raises
FileNotFoundError
Raised if a ` ` conf . py ` ` file is not found in the initial directory ,
or any parents , or in a ` ` ` doc / ` ` subdirectory .""" | # Create an absolute Path to work with
initial_dir = pathlib . Path ( initial_dir ) . resolve ( )
# Check if this is the doc / dir already with a conf . py
if _has_conf_py ( initial_dir ) :
return str ( initial_dir )
# Search for a doc / directory in cwd ( this covers the case of running
# the CLI from the root of a repository ) .
test_dir = initial_dir / 'doc'
if test_dir . exists ( ) and test_dir . is_dir ( ) :
if _has_conf_py ( test_dir ) :
return str ( test_dir )
# Search upwards until a conf . py is found
try :
return str ( _search_parents ( initial_dir ) )
except FileNotFoundError :
raise |
def list_upgrades ( refresh = True , ** kwargs ) :
'''Check whether or not an upgrade is available for all packages
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ upgrades''' | # sample output of ' xbps - install - un ' :
# fuse - 2.9.4_4 update i686 http : / / repo . voidlinux . eu / current 298133 91688
# xtools - 0.34_1 update noarch http : / / repo . voidlinux . eu / current 21424 10752
refresh = salt . utils . data . is_true ( refresh )
# Refresh repo index before checking for latest version available
if refresh :
refresh_db ( )
ret = { }
# retrieve list of updatable packages
cmd = 'xbps-install -un'
out = __salt__ [ 'cmd.run' ] ( cmd , output_loglevel = 'trace' )
for line in out . splitlines ( ) :
if not line :
continue
pkg = "base-system"
ver = "NonNumericValueIsError"
try :
pkg , ver = line . split ( ) [ 0 ] . rsplit ( '-' , 1 )
except ( ValueError , IndexError ) :
log . error ( 'xbps-query: Unexpected formatting in line: "%s"' , line )
continue
log . trace ( 'pkg=%s version=%s' , pkg , ver )
ret [ pkg ] = ver
return ret |
def tile_status ( self ) :
"""Get the current status of this tile""" | stat = self . status ( )
flags = stat [ 'status' ]
# FIXME : This needs to stay in sync with lib _ common : cdb _ status . h
status = { }
status [ 'debug_mode' ] = bool ( flags & ( 1 << 3 ) )
status [ 'configured' ] = bool ( flags & ( 1 << 1 ) )
status [ 'app_running' ] = bool ( flags & ( 1 << 0 ) )
status [ 'trapped' ] = bool ( flags & ( 1 << 2 ) )
return status |
def status ( self ) :
"""Get the status of Alerting Service
: return : Status object""" | orig_dict = self . _get ( self . _service_url ( 'status' ) )
orig_dict [ 'implementation_version' ] = orig_dict . pop ( 'Implementation-Version' )
orig_dict [ 'built_from_git_sha1' ] = orig_dict . pop ( 'Built-From-Git-SHA1' )
return Status ( orig_dict ) |
def _makeUniqueFilename ( taken_names , name ) :
"""Helper function . Checks if name is in the set ' taken _ names ' .
If so , attepts to form up an untaken name ( by adding numbered suffixes ) .
Adds name to taken _ names .""" | if name in taken_names : # try to form up new name
basename , ext = os . path . splitext ( name )
num = 1
name = "%s-%d%s" % ( basename , num , ext )
while name in taken_names :
num += 1
name = "%s-%d%s" % ( basename , num , ext )
# finally , enter name into set
taken_names . add ( name )
return name |
def inferTM ( self , bottomUp , externalInput ) :
"""Run inference and return the set of predicted cells""" | self . reset ( )
# print > > sys . stderr , " Bottom up : " , bottomUp
# print > > sys . stderr , " ExternalInput : " , externalInput
self . tm . compute ( bottomUp , basalInput = externalInput , learn = False )
# print > > sys . stderr , ( " new active cells " + str ( self . tm . getActiveCells ( ) ) )
# print > > sys . stderr , ( " new predictive cells " + str ( self . tm . getPredictiveCells ( ) ) )
return self . tm . getPredictiveCells ( ) |
def name ( self ) : # type : ( ) - > bytes
'''Generate a string that contains all components of the symlink .
Parameters :
None
Returns :
String containing all components of the symlink .''' | if not self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'SL record not yet initialized!' )
outlist = [ ]
# type : List [ bytes ]
continued = False
for comp in self . symlink_components :
name = comp . name ( )
if name == b'/' :
outlist = [ ]
continued = False
name = b''
if not continued :
outlist . append ( name )
else :
outlist [ - 1 ] += name
continued = comp . is_continued ( )
return b'/' . join ( outlist ) |
def _decorate_axes ( ax , freq , kwargs ) :
"""Initialize axes for time - series plotting""" | if not hasattr ( ax , '_plot_data' ) :
ax . _plot_data = [ ]
ax . freq = freq
xaxis = ax . get_xaxis ( )
xaxis . freq = freq
if not hasattr ( ax , 'legendlabels' ) :
ax . legendlabels = [ kwargs . get ( 'label' , None ) ]
else :
ax . legendlabels . append ( kwargs . get ( 'label' , None ) )
ax . view_interval = None
ax . date_axis_info = None |
def merge_bibtex_collections ( citednames , maindict , extradicts , allow_missing = False ) :
"""There must be a way to be efficient and stream output instead of loading
everything into memory at once , but , meh .
Note that we augment ` citednames ` with all of the names in ` maindict ` . The
intention is that if we ' ve gone to the effort of getting good data for
some record , we don ' t want to trash it if the citation is temporarily
removed ( even if it ought to be manually recoverable from version
control ) . Seems better to err on the side of preservation ; I can write a
quick pruning tool later if needed .""" | allrecords = { }
for ed in extradicts :
allrecords . update ( ed )
allrecords . update ( maindict )
missing = [ ]
from collections import OrderedDict
records = OrderedDict ( )
from itertools import chain
wantednames = sorted ( chain ( citednames , six . viewkeys ( maindict ) ) )
for name in wantednames :
rec = allrecords . get ( name )
if rec is None :
missing . append ( name )
else :
records [ name ] = rec
if len ( missing ) and not allow_missing : # TODO : custom exception so caller can actually see what ' s missing ;
# could conceivably stub out missing records or something .
raise PKError ( 'missing BibTeX records: %s' , ' ' . join ( missing ) )
return records |
def get_enricher ( config , metrics , ** kwargs ) :
"""Get a GCEEnricher client .
A factory function that validates configuration and returns an
enricher client ( : interface : ` gordon . interfaces . IMessageHandler ` )
provider .
Args :
config ( dict ) : Google Compute Engine API related configuration .
metrics ( obj ) : : interface : ` IMetricRelay ` implementation .
kwargs ( dict ) : Additional keyword arguments to pass to the
enricher .
Returns :
A : class : ` GCEEnricher ` instance .""" | builder = enricher . GCEEnricherBuilder ( config , metrics , ** kwargs )
return builder . build_enricher ( ) |
def write_json ( self , ** kwargs ) :
"""Write an JSON object on a single line .
The keyword arguments are interpreted as a single JSON object .
It ' s not possible with this method to write non - objects .""" | self . stdout . write ( json . dumps ( kwargs ) + "\n" )
self . stdout . flush ( ) |
def get_files ( self ) :
"""stub""" | files_map = { }
try :
files_map [ 'choices' ] = self . get_choices_file_urls_map ( )
try :
files_map . update ( self . get_file_urls_map ( ) )
except IllegalState :
pass
except Exception :
files_map [ 'choices' ] = self . get_choices_files_map ( )
try :
files_map . update ( self . get_files_map ( ) )
except IllegalState :
pass
return files_map |
def xdg_config_dir ( ) :
'''Check xdg locations for config files''' | xdg_config = os . getenv ( 'XDG_CONFIG_HOME' , os . path . expanduser ( '~/.config' ) )
xdg_config_directory = os . path . join ( xdg_config , 'salt' )
return xdg_config_directory |
def collect_results ( rule , max_results = 500 , result_stream_args = None ) :
"""Utility function to quickly get a list of tweets from a ` ` ResultStream ` `
without keeping the object around . Requires your args to be configured
prior to using .
Args :
rule ( str ) : valid powertrack rule for your account , preferably
generated by the ` gen _ rule _ payload ` function .
max _ results ( int ) : maximum number of tweets or counts to return from
the API / underlying ` ` ResultStream ` ` object .
result _ stream _ args ( dict ) : configuration dict that has connection
information for a ` ` ResultStream ` ` object .
Returns :
list of results
Example :
> > > from searchtweets import collect _ results
> > > tweets = collect _ results ( rule ,
max _ results = 500,
result _ stream _ args = search _ args )""" | if result_stream_args is None :
logger . error ( "This function requires a configuration dict for the " "inner ResultStream object." )
raise KeyError
rs = ResultStream ( rule_payload = rule , max_results = max_results , ** result_stream_args )
return list ( rs . stream ( ) ) |
def create_keypair ( kwargs = None , call = None ) :
'''Create an SSH keypair''' | if call != 'function' :
log . error ( 'The create_keypair function must be called with -f or --function.' )
return False
if not kwargs :
kwargs = { }
if 'keyname' not in kwargs :
log . error ( 'A keyname is required.' )
return False
params = { 'Action' : 'CreateKeyPair' , 'KeyName' : kwargs [ 'keyname' ] }
data = aws . query ( params , return_url = True , return_root = True , location = get_location ( ) , provider = get_provider ( ) , opts = __opts__ , sigver = '4' )
keys = [ x for x in data [ 0 ] if 'requestId' not in x ]
return ( keys , data [ 1 ] ) |
def prep ( link ) :
'''Prepare a statement into a triple ready for rdflib''' | s , p , o = link [ : 3 ]
s = URIRef ( s )
p = URIRef ( p )
o = URIRef ( o ) if isinstance ( o , I ) else Literal ( o )
return s , p , o |
def _getLocalWhen ( self , date_from , num_days = 1 ) :
"""Returns a string describing when the event occurs ( in the local time zone ) .""" | dateFrom , timeFrom = getLocalDateAndTime ( date_from , self . time_from , self . tz , dt . time . min )
if num_days > 1 or self . time_to is not None :
daysDelta = dt . timedelta ( days = self . num_days - 1 )
dateTo , timeTo = getLocalDateAndTime ( date_from + daysDelta , self . time_to , self . tz )
else :
dateTo = dateFrom
timeTo = None
if dateFrom == dateTo :
retval = _ ( "{date} {atTime}" ) . format ( date = dateFormat ( dateFrom ) , atTime = timeFormat ( timeFrom , timeTo , gettext ( "at " ) ) )
else : # Friday the 10th of April for 3 days at 1pm to 10am
localNumDays = ( dateTo - dateFrom ) . days + 1
retval = _ ( "{date} for {n} days {startTime}" ) . format ( date = dateFormat ( dateFrom ) , n = localNumDays , startTime = timeFormat ( timeFrom , prefix = gettext ( "starting at " ) ) )
retval = _ ( "{dateAndStartTime} {finishTime}" ) . format ( dateAndStartTime = retval . strip ( ) , finishTime = timeFormat ( timeTo , prefix = gettext ( "finishing at " ) ) )
return retval . strip ( ) |
def _load_lib ( ) :
"""Load libary by searching possible path .""" | lib_path = _find_lib_path ( )
lib = ctypes . cdll . LoadLibrary ( lib_path [ 0 ] )
# DMatrix functions
lib . MXGetLastError . restype = ctypes . c_char_p
return lib |
def parse_octal ( self , text , i ) :
"""Parse octal value .""" | value = int ( text , 8 )
if value > 0xFF and self . is_bytes : # Re fails on octal greater than ` 0o377 ` or ` 0xFF `
raise ValueError ( "octal escape value outside of range 0-0o377!" )
else :
single = self . get_single_stack ( )
if self . span_stack :
text = self . convert_case ( chr ( value ) , self . span_stack [ - 1 ] )
value = ord ( self . convert_case ( text , single ) ) if single is not None else ord ( text )
elif single :
value = ord ( self . convert_case ( chr ( value ) , single ) )
if self . use_format and value in _CURLY_BRACKETS_ORD :
self . handle_format ( chr ( value ) , i )
elif value <= 0xFF :
self . result . append ( '\\%03o' % value )
else :
self . result . append ( chr ( value ) ) |
def summary ( ) :
"""Summarize the participants ' status codes .""" | exp = Experiment ( session )
state = { "status" : "success" , "summary" : exp . log_summary ( ) , "completed" : exp . is_complete ( ) , }
unfilled_nets = ( models . Network . query . filter ( models . Network . full != true ( ) ) . with_entities ( models . Network . id , models . Network . max_size ) . all ( ) )
working = ( models . Participant . query . filter_by ( status = "working" ) . with_entities ( func . count ( models . Participant . id ) ) . scalar ( ) )
state [ "unfilled_networks" ] = len ( unfilled_nets )
nodes_remaining = 0
required_nodes = 0
if state [ "unfilled_networks" ] == 0 :
if working == 0 and state [ "completed" ] is None :
state [ "completed" ] = True
else :
for net in unfilled_nets :
node_count = ( models . Node . query . filter_by ( network_id = net . id , failed = False ) . with_entities ( func . count ( models . Node . id ) ) . scalar ( ) )
net_size = net . max_size
required_nodes += net_size
nodes_remaining += net_size - node_count
state [ "nodes_remaining" ] = nodes_remaining
state [ "required_nodes" ] = required_nodes
if state [ "completed" ] is None :
state [ "completed" ] = False
# Regenerate a waiting room message when checking status
# to counter missed messages at the end of the waiting room
nonfailed_count = models . Participant . query . filter ( ( models . Participant . status == "working" ) | ( models . Participant . status == "overrecruited" ) | ( models . Participant . status == "submitted" ) | ( models . Participant . status == "approved" ) ) . count ( )
exp = Experiment ( session )
overrecruited = exp . is_overrecruited ( nonfailed_count )
if exp . quorum :
quorum = { "q" : exp . quorum , "n" : nonfailed_count , "overrecruited" : overrecruited }
db . queue_message ( WAITING_ROOM_CHANNEL , dumps ( quorum ) )
return Response ( dumps ( state ) , status = 200 , mimetype = "application/json" ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.